pangea-sdk 6.3.0__py3-none-any.whl → 6.5.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
- from collections.abc import Sequence
4
- from typing import Generic, Literal, Optional, overload
3
+ from collections.abc import Mapping, Sequence
4
+ from typing import Annotated, Any, Generic, Literal, Optional, Union, overload
5
5
 
6
- from pydantic import BaseModel, ConfigDict
7
- from typing_extensions import TypeVar
6
+ from pydantic import BaseModel, ConfigDict, Field, RootModel
7
+ from typing_extensions import TypeAlias, TypedDict, TypeVar
8
8
 
9
9
  from pangea.config import PangeaConfig
10
- from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
10
+ from pangea.response import APIRequestModel, APIResponseModel, PangeaDateTime, PangeaResponse, PangeaResponseResult
11
11
  from pangea.services.base import ServiceBase
12
12
 
13
13
  # This is named "prompt injection" in the API spec even though it is also used
@@ -21,9 +21,7 @@ MaliciousEntityAction = Literal["report", "defang", "disabled", "block"]
21
21
  PiiEntityAction = Literal["disabled", "report", "block", "mask", "partial_masking", "replacement", "hash", "fpe"]
22
22
 
23
23
 
24
- class Message(BaseModel):
25
- model_config = ConfigDict(extra="forbid")
26
-
24
+ class Message(APIRequestModel):
27
25
  role: str
28
26
  content: str
29
27
 
@@ -31,18 +29,21 @@ class Message(BaseModel):
31
29
  class CodeDetectionOverride(APIRequestModel):
32
30
  disabled: Optional[bool] = None
33
31
  action: Optional[Literal["report", "block"]] = None
32
+ threshold: Optional[float] = None
34
33
 
35
34
 
36
35
  class LanguageDetectionOverride(APIRequestModel):
37
36
  disabled: Optional[bool] = None
38
- allow: Optional[list[str]] = None
39
- block: Optional[list[str]] = None
40
- report: Optional[list[str]] = None
37
+ action: Optional[Literal["", "report", "allow", "block"]] = ""
38
+ languages: Optional[list[str]] = None
39
+ threshold: Optional[float] = None
41
40
 
42
41
 
43
42
  class TopicDetectionOverride(APIRequestModel):
44
43
  disabled: Optional[bool] = None
45
- block: Optional[list[str]] = None
44
+ action: Optional[Literal["", "report", "block"]] = ""
45
+ topics: Optional[list[str]] = None
46
+ threshold: Optional[float] = None
46
47
 
47
48
 
48
49
  class PromptInjectionOverride(APIRequestModel):
@@ -145,6 +146,8 @@ class SecretsDetectionOverride(APIRequestModel):
145
146
 
146
147
 
147
148
  class Overrides(APIRequestModel):
149
+ """Overrides flags."""
150
+
148
151
  ignore_recipe: Optional[bool] = None
149
152
  """Bypass existing Recipe content and create an on-the-fly Recipe."""
150
153
 
@@ -159,7 +162,7 @@ class Overrides(APIRequestModel):
159
162
  secrets_detection: Optional[SecretsDetectionOverride] = None
160
163
  selfharm: Optional[SelfHarmOverride] = None
161
164
  sentiment: Optional[SentimentOverride] = None
162
- topic_detection: Optional[TopicDetectionOverride] = None
165
+ topic: Optional[TopicDetectionOverride] = None
163
166
 
164
167
 
165
168
  class LogFields(APIRequestModel):
@@ -249,15 +252,24 @@ class SecretsEntityResult(APIResponseModel):
249
252
 
250
253
 
251
254
  class LanguageDetectionResult(APIResponseModel):
252
- language: str
253
- action: str
255
+ action: Optional[str] = None
254
256
  """The action taken by this Detector"""
255
257
 
258
+ language: Optional[str] = None
259
+
260
+
261
+ class Topic(APIResponseModel):
262
+ topic: str
263
+ confidence: float
264
+
256
265
 
257
266
  class TopicDetectionResult(APIResponseModel):
258
- action: str
267
+ action: Optional[str] = None
259
268
  """The action taken by this Detector"""
260
269
 
270
+ topics: Optional[list[Topic]] = None
271
+ """List of topics detected"""
272
+
261
273
 
262
274
  class CodeDetectionResult(APIResponseModel):
263
275
  language: str
@@ -274,39 +286,583 @@ class TextGuardDetector(APIResponseModel, Generic[_T]):
274
286
 
275
287
 
276
288
  class TextGuardDetectors(APIResponseModel):
277
- prompt_injection: Optional[TextGuardDetector[PromptInjectionResult]] = None
278
- pii_entity: Optional[TextGuardDetector[PiiEntityResult]] = None
279
- malicious_entity: Optional[TextGuardDetector[MaliciousEntityResult]] = None
289
+ code_detection: Optional[TextGuardDetector[CodeDetectionResult]] = None
290
+ competitors: Optional[TextGuardDetector[object]] = None
280
291
  custom_entity: Optional[TextGuardDetector[object]] = None
281
- secrets_detection: Optional[TextGuardDetector[SecretsEntityResult]] = None
282
- profanity_and_toxicity: Optional[TextGuardDetector[object]] = None
292
+ gibberish: Optional[TextGuardDetector[object]] = None
293
+ hardening: Optional[TextGuardDetector[object]] = None
283
294
  language_detection: Optional[TextGuardDetector[LanguageDetectionResult]] = None
284
- topic_detection: Optional[TextGuardDetector[TopicDetectionResult]] = None
285
- code_detection: Optional[TextGuardDetector[CodeDetectionResult]] = None
295
+ malicious_entity: Optional[TextGuardDetector[MaliciousEntityResult]] = None
296
+ pii_entity: Optional[TextGuardDetector[PiiEntityResult]] = None
297
+ profanity_and_toxicity: Optional[TextGuardDetector[object]] = None
298
+ prompt_injection: Optional[TextGuardDetector[PromptInjectionResult]] = None
299
+ secrets_detection: Optional[TextGuardDetector[SecretsEntityResult]] = None
300
+ selfharm: Optional[TextGuardDetector[object]] = None
301
+ sentiment: Optional[TextGuardDetector[object]] = None
302
+ topic: Optional[TextGuardDetector[TopicDetectionResult]] = None
286
303
 
287
304
 
288
305
  class TextGuardResult(PangeaResponseResult):
289
306
  detectors: TextGuardDetectors
290
307
  """Result of the recipe analyzing and input prompt."""
291
308
 
292
- prompt_text: Optional[str] = None
293
- """Updated prompt text, if applicable."""
309
+ access_rules: Optional[object] = None
310
+ """Result of the recipe evaluating configured rules"""
311
+
312
+ blocked: Optional[bool] = None
313
+ """Whether or not the prompt triggered a block detection."""
314
+
315
+ fpe_context: Optional[str] = None
316
+ """
317
+ If an FPE redaction method returned results, this will be the context passed to
318
+ unredact.
319
+ """
294
320
 
295
321
  prompt_messages: Optional[object] = None
296
322
  """Updated structured prompt, if applicable."""
297
323
 
298
- blocked: bool
299
- """Whether or not the prompt triggered a block detection."""
324
+ prompt_text: Optional[str] = None
325
+ """Updated prompt text, if applicable."""
300
326
 
301
- recipe: str
327
+ recipe: Optional[str] = None
302
328
  """The Recipe that was used."""
303
329
 
330
+ transformed: Optional[bool] = None
331
+ """Whether or not the original input was transformed."""
332
+
333
+
334
+ class GuardDetectors(APIResponseModel):
335
+ code: Optional[object] = None
336
+ competitors: Optional[object] = None
337
+ confidential_and_pii_entity: Optional[object] = None
338
+ custom_entity: Optional[object] = None
339
+ language: Optional[object] = None
340
+ malicious_entity: Optional[object] = None
341
+ malicious_prompt: Optional[object] = None
342
+ prompt_hardening: Optional[object] = None
343
+ secret_and_key_entity: Optional[object] = None
344
+ topic: Optional[object] = None
345
+
346
+
347
+ class GuardResult(PangeaResponseResult):
348
+ detectors: GuardDetectors
349
+ """Result of the recipe analyzing and input prompt."""
350
+
351
+ access_rules: Optional[object] = None
352
+ """Result of the recipe evaluating configured rules"""
353
+
354
+ blocked: Optional[bool] = None
355
+ """Whether or not the prompt triggered a block detection."""
356
+
304
357
  fpe_context: Optional[str] = None
305
358
  """
306
359
  If an FPE redaction method returned results, this will be the context passed
307
360
  to unredact.
308
361
  """
309
362
 
363
+ input_token_count: Optional[float] = None
364
+ """Number of tokens counted in the input"""
365
+
366
+ output: Optional[object] = None
367
+ """Updated structured prompt."""
368
+
369
+ output_token_count: Optional[float] = None
370
+ """Number of tokens counted in the output"""
371
+
372
+ recipe: Optional[str] = None
373
+ """The Recipe that was used."""
374
+
375
+ transformed: Optional[bool] = None
376
+ """Whether or not the original input was transformed."""
377
+
378
+
379
+ class Areas(BaseModel):
380
+ model_config = ConfigDict(extra="forbid")
381
+
382
+ text_guard: bool
383
+
384
+
385
+ class AuditDataActivityConfig(BaseModel):
386
+ model_config = ConfigDict(extra="forbid")
387
+
388
+ enabled: bool
389
+ audit_service_config_id: str
390
+ areas: Areas
391
+
392
+
393
+ class PromptGuard(BaseModel):
394
+ model_config = ConfigDict(extra="forbid")
395
+
396
+ enabled: Optional[bool] = None
397
+ config_id: Optional[str] = None
398
+ confidence_threshold: Optional[float] = None
399
+
400
+
401
+ class IpIntel(BaseModel):
402
+ model_config = ConfigDict(extra="forbid")
403
+
404
+ enabled: Optional[bool] = None
405
+ config_id: Optional[str] = None
406
+ reputation_provider: Optional[str] = None
407
+ risk_threshold: Optional[float] = None
408
+
409
+
410
+ class UserIntel(BaseModel):
411
+ model_config = ConfigDict(extra="forbid")
412
+
413
+ enabled: Optional[bool] = None
414
+ config_id: Optional[str] = None
415
+ breach_provider: Optional[str] = None
416
+
417
+
418
+ class UrlIntel(BaseModel):
419
+ model_config = ConfigDict(extra="forbid")
420
+
421
+ enabled: Optional[bool] = None
422
+ config_id: Optional[str] = None
423
+ reputation_provider: Optional[str] = None
424
+ risk_threshold: Optional[float] = None
425
+
426
+
427
+ class DomainIntel(BaseModel):
428
+ model_config = ConfigDict(extra="forbid")
429
+
430
+ enabled: Optional[bool] = None
431
+ config_id: Optional[str] = None
432
+ reputation_provider: Optional[str] = None
433
+ risk_threshold: Optional[float] = None
434
+
435
+
436
+ class FileScan(BaseModel):
437
+ model_config = ConfigDict(extra="forbid")
438
+
439
+ enabled: Optional[bool] = None
440
+ config_id: Optional[str] = None
441
+ scan_provider: Optional[str] = None
442
+ risk_threshold: Optional[float] = None
443
+
444
+
445
+ class Redact(BaseModel):
446
+ model_config = ConfigDict(extra="forbid")
447
+
448
+ enabled: Optional[bool] = None
449
+ config_id: Optional[str] = None
450
+
451
+
452
+ class Vault(BaseModel):
453
+ model_config = ConfigDict(extra="forbid")
454
+
455
+ config_id: Optional[str] = None
456
+
457
+
458
+ class Lingua(BaseModel):
459
+ model_config = ConfigDict(extra="forbid")
460
+
461
+ enabled: Optional[bool] = None
462
+
463
+
464
+ class Code(BaseModel):
465
+ model_config = ConfigDict(extra="forbid")
466
+
467
+ enabled: Optional[bool] = None
468
+
469
+
470
+ class ConnectionsConfig(BaseModel):
471
+ model_config = ConfigDict(extra="forbid")
472
+
473
+ prompt_guard: Optional[PromptGuard] = None
474
+ ip_intel: Optional[IpIntel] = None
475
+ user_intel: Optional[UserIntel] = None
476
+ url_intel: Optional[UrlIntel] = None
477
+ domain_intel: Optional[DomainIntel] = None
478
+ file_scan: Optional[FileScan] = None
479
+ redact: Optional[Redact] = None
480
+ vault: Optional[Vault] = None
481
+ lingua: Optional[Lingua] = None
482
+ code: Optional[Code] = None
483
+
484
+
485
+ class PartialMasking(BaseModel):
486
+ masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
487
+ unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
488
+ unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
489
+ masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
490
+ masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
491
+ chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
492
+ masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
493
+
494
+
495
+ class RuleRedactionConfig1(APIResponseModel):
496
+ redaction_type: Literal[
497
+ "mask",
498
+ "partial_masking",
499
+ "replacement",
500
+ "hash",
501
+ "detect_only",
502
+ "fpe",
503
+ "mask",
504
+ "detect_only",
505
+ ]
506
+ """Redaction method to apply for this rule"""
507
+ redaction_value: Optional[str] = None
508
+ partial_masking: Optional[PartialMasking] = None
509
+ hash: Optional[Hash] = None
510
+ fpe_alphabet: Optional[
511
+ Literal[
512
+ "numeric",
513
+ "alphalower",
514
+ "alphaupper",
515
+ "alpha",
516
+ "alphanumericlower",
517
+ "alphanumericupper",
518
+ "alphanumeric",
519
+ ]
520
+ ] = None
521
+
522
+
523
+ class PartialMasking1(BaseModel):
524
+ masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
525
+ unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
526
+ unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
527
+ masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
528
+ masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
529
+ chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
530
+ masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
531
+
532
+
533
+ class RuleRedactionConfig2(BaseModel):
534
+ model_config = ConfigDict(extra="forbid")
535
+
536
+ redaction_type: Literal["replacement"]
537
+ redaction_value: str
538
+ partial_masking: Optional[PartialMasking1] = None
539
+ hash: Optional[Hash] = None
540
+ fpe_alphabet: Optional[
541
+ Literal[
542
+ "numeric",
543
+ "alphalower",
544
+ "alphaupper",
545
+ "alpha",
546
+ "alphanumericlower",
547
+ "alphanumericupper",
548
+ "alphanumeric",
549
+ ]
550
+ ] = None
551
+
552
+
553
+ class PartialMasking2(BaseModel):
554
+ masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
555
+ unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
556
+ unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
557
+ masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
558
+ masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
559
+ chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
560
+ masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
561
+
562
+
563
+ class RuleRedactionConfig3(BaseModel):
564
+ model_config = ConfigDict(extra="forbid")
565
+
566
+ redaction_type: Literal["partial_masking"]
567
+ redaction_value: str
568
+ partial_masking: PartialMasking2
569
+ hash: Optional[Hash] = None
570
+ fpe_alphabet: Optional[
571
+ Literal[
572
+ "numeric",
573
+ "alphalower",
574
+ "alphaupper",
575
+ "alpha",
576
+ "alphanumericlower",
577
+ "alphanumericupper",
578
+ "alphanumeric",
579
+ ]
580
+ ] = None
581
+
582
+
583
+ class PartialMasking3(BaseModel):
584
+ masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
585
+ unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
586
+ unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
587
+ masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
588
+ masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
589
+ chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
590
+ masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
591
+
592
+
593
+ class RuleRedactionConfig4(BaseModel):
594
+ model_config = ConfigDict(extra="forbid")
595
+
596
+ redaction_type: Literal["hash"]
597
+ redaction_value: str
598
+ partial_masking: PartialMasking3
599
+ hash: Optional[Hash] = None
600
+ fpe_alphabet: Optional[
601
+ Literal[
602
+ "numeric",
603
+ "alphalower",
604
+ "alphaupper",
605
+ "alpha",
606
+ "alphanumericlower",
607
+ "alphanumericupper",
608
+ "alphanumeric",
609
+ ]
610
+ ] = None
611
+
612
+
613
+ class CharsToIgnoreItem(RootModel[str]):
614
+ root: Annotated[str, Field(max_length=1, min_length=1)]
615
+
616
+
617
+ class PartialMasking4(BaseModel):
618
+ masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
619
+ unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
620
+ unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
621
+ masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
622
+ masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
623
+ chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
624
+ masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
625
+
626
+
627
+ class Hash(BaseModel):
628
+ hash_type: Literal["md5", "sha256"]
629
+ """The type of hashing algorithm"""
630
+
631
+
632
+ class RuleRedactionConfig5(BaseModel):
633
+ model_config = ConfigDict(extra="forbid")
634
+
635
+ redaction_type: Literal["fpe"]
636
+ redaction_value: str
637
+ partial_masking: PartialMasking4
638
+ hash: Optional[Hash] = None
639
+ fpe_alphabet: Optional[
640
+ Literal[
641
+ "numeric",
642
+ "alphalower",
643
+ "alphaupper",
644
+ "alpha",
645
+ "alphanumericlower",
646
+ "alphanumericupper",
647
+ "alphanumeric",
648
+ ]
649
+ ] = None
650
+
651
+
652
+ class Rule(APIResponseModel):
653
+ redact_rule_id: str
654
+ """
655
+ Identifier of the redaction rule to apply. This should match a rule defined
656
+ in the [Redact service](https://pangea.cloud/docs/redact/using-redact/using-redact).
657
+ """
658
+ redaction: Union[
659
+ RuleRedactionConfig1,
660
+ RuleRedactionConfig2,
661
+ RuleRedactionConfig3,
662
+ RuleRedactionConfig4,
663
+ RuleRedactionConfig5,
664
+ ]
665
+ """
666
+ Configuration for the redaction method applied to detected values.
667
+
668
+ Each rule supports one redaction type, such as masking, replacement,
669
+ hashing, Format-Preserving Encryption (FPE), or detection-only mode.
670
+ Additional parameters may be required depending on the selected redaction
671
+ type.
672
+
673
+ For more details, see the [AI Guard Recipe Actions](https://pangea.cloud/docs/ai-guard/recipes#actions)
674
+ documentation.
675
+ """
676
+ block: Optional[bool] = None
677
+ """
678
+ If `true`, indicates that further processing should be stopped when this
679
+ rule is triggered
680
+ """
681
+ disabled: Optional[bool] = None
682
+ """
683
+ If `true`, disables this specific rule even if the detector is enabled
684
+ """
685
+ reputation_check: Optional[bool] = None
686
+ """
687
+ If `true`, performs a reputation check using the configured intel provider.
688
+ Applies to the Malicious Entity detector when using IP, URL, or Domain Intel
689
+ services.
690
+ """
691
+ transform_if_malicious: Optional[bool] = None
692
+ """
693
+ If `true`, applies redaction or transformation when the detected value is
694
+ determined to be malicious by intel analysis
695
+ """
696
+
697
+
698
+ class Settings(BaseModel):
699
+ rules: Optional[list[Rule]] = None
700
+
701
+
702
+ class DetectorSetting(BaseModel):
703
+ model_config = ConfigDict(extra="forbid")
704
+
705
+ detector_name: str
706
+ state: Literal["disabled", "enabled"]
707
+ settings: Settings
708
+
709
+
710
+ class RedactConnectorSettings(BaseModel):
711
+ fpe_tweak_vault_secret_id: Optional[str] = None
712
+
713
+
714
+ class ConnectorSettings(BaseModel):
715
+ model_config = ConfigDict(extra="forbid")
716
+
717
+ redact: Optional[RedactConnectorSettings] = None
718
+
719
+
720
+ class AccessRuleSettings(APIResponseModel):
721
+ """
722
+ Configuration for an individual access rule used in an AI Guard recipe. Each
723
+ rule defines its matching logic and the action to apply when the logic
724
+ evaluates to true.
725
+ """
726
+
727
+ rule_key: Annotated[str, Field(pattern="^([a-zA-Z0-9_][a-zA-Z0-9/|_]*)$")]
728
+ """
729
+ Unique identifier for this rule. Should be user-readable and consistent
730
+ across recipe updates.
731
+ """
732
+ name: str
733
+ """Display label for the rule shown in user interfaces."""
734
+ state: Literal["block", "report"]
735
+ """
736
+ Action to apply if the rule matches. Use 'block' to stop further processing
737
+ or 'report' to simply log the match.
738
+ """
739
+
740
+
741
+ class RecipeConfig(APIResponseModel):
742
+ name: str
743
+ """Human-readable name of the recipe"""
744
+ description: str
745
+ """Detailed description of the recipe's purpose or use case"""
746
+ version: Optional[str] = "v1"
747
+ """Optional version identifier for the recipe. Can be used to track changes."""
748
+ detectors: Optional[list[DetectorSetting]] = None
749
+ """Setting for Detectors"""
750
+ access_rules: Optional[list[AccessRuleSettings]] = None
751
+ """Configuration for access rules used in an AI Guard recipe."""
752
+ connector_settings: Optional[ConnectorSettings] = None
753
+
754
+
755
+ class ServiceConfig(PangeaResponseResult):
756
+ id: Optional[str] = None
757
+ """ID of an AI Guard service configuration"""
758
+ name: Optional[str] = None
759
+ """Human-readable name of the AI Guard service configuration"""
760
+ audit_data_activity: Optional[AuditDataActivityConfig] = None
761
+ connections: Optional[ConnectionsConfig] = None
762
+ recipes: Optional[dict[str, RecipeConfig]] = None
763
+
764
+
765
+ class ServiceConfigFilter(BaseModel):
766
+ model_config = ConfigDict(extra="forbid")
767
+
768
+ id: Optional[str] = None
769
+ """
770
+ Only records where id equals this value.
771
+ """
772
+ id__contains: Optional[list[str]] = None
773
+ """
774
+ Only records where id includes each substring.
775
+ """
776
+ id__in: Optional[list[str]] = None
777
+ """
778
+ Only records where id equals one of the provided substrings.
779
+ """
780
+ created_at: Optional[PangeaDateTime] = None
781
+ """
782
+ Only records where created_at equals this value.
783
+ """
784
+ created_at__gt: Optional[PangeaDateTime] = None
785
+ """
786
+ Only records where created_at is greater than this value.
787
+ """
788
+ created_at__gte: Optional[PangeaDateTime] = None
789
+ """
790
+ Only records where created_at is greater than or equal to this value.
791
+ """
792
+ created_at__lt: Optional[PangeaDateTime] = None
793
+ """
794
+ Only records where created_at is less than this value.
795
+ """
796
+ created_at__lte: Optional[PangeaDateTime] = None
797
+ """
798
+ Only records where created_at is less than or equal to this value.
799
+ """
800
+ updated_at: Optional[PangeaDateTime] = None
801
+ """
802
+ Only records where updated_at equals this value.
803
+ """
804
+ updated_at__gt: Optional[PangeaDateTime] = None
805
+ """
806
+ Only records where updated_at is greater than this value.
807
+ """
808
+ updated_at__gte: Optional[PangeaDateTime] = None
809
+ """
810
+ Only records where updated_at is greater than or equal to this value.
811
+ """
812
+ updated_at__lt: Optional[PangeaDateTime] = None
813
+ """
814
+ Only records where updated_at is less than this value.
815
+ """
816
+ updated_at__lte: Optional[PangeaDateTime] = None
817
+ """
818
+ Only records where updated_at is less than or equal to this value.
819
+ """
820
+
821
+
822
+ class ServiceConfigsPage(PangeaResponseResult):
823
+ count: Optional[int] = None
824
+ """The total number of service configs matched by the list request."""
825
+ last: Optional[str] = None
826
+ """
827
+ Used to fetch the next page of the current listing when provided in a
828
+ repeated request's last parameter.
829
+ """
830
+ items: Optional[list[ServiceConfig]] = None
831
+
832
+
833
+ class ExtraInfoTyped(TypedDict, total=False):
834
+ """(AIDR) Logging schema."""
835
+
836
+ app_name: str
837
+ """Name of source application."""
838
+
839
+ app_group: str
840
+ """The group of source application."""
841
+
842
+ app_version: str
843
+ """Version of the source application."""
844
+
845
+ actor_name: str
846
+ """Name of subject actor."""
847
+
848
+ actor_group: str
849
+ """The group of subject actor."""
850
+
851
+ source_region: str
852
+ """Geographic region or data center."""
853
+
854
+ data_sensitivity: str
855
+ """Sensitivity level of data involved"""
856
+
857
+ customer_tier: str
858
+ """Tier of the user or organization"""
859
+
860
+ use_case: str
861
+ """Business-specific use case"""
862
+
863
+
864
+ ExtraInfo: TypeAlias = Union[ExtraInfoTyped, dict[str, object]]
865
+
310
866
 
311
867
  class AIGuard(ServiceBase):
312
868
  """AI Guard service client.
@@ -314,11 +870,9 @@ class AIGuard(ServiceBase):
314
870
  Provides methods to interact with Pangea's AI Guard service.
315
871
 
316
872
  Examples:
317
- from pangea import PangeaConfig
318
873
  from pangea.services import AIGuard
319
874
 
320
- config = PangeaConfig(domain="aws.us.pangea.cloud")
321
- ai_guard = AIGuard(token="pangea_token", config=config)
875
+ ai_guard = AIGuard(token="pangea_token")
322
876
  """
323
877
 
324
878
  service_name = "ai-guard"
@@ -338,11 +892,9 @@ class AIGuard(ServiceBase):
338
892
  config_id: Configuration ID.
339
893
 
340
894
  Examples:
341
- from pangea import PangeaConfig
342
895
  from pangea.services import AIGuard
343
896
 
344
- config = PangeaConfig(domain="aws.us.pangea.cloud")
345
- ai_guard = AIGuard(token="pangea_token", config=config)
897
+ ai_guard = AIGuard(token="pangea_token")
346
898
  """
347
899
 
348
900
  super().__init__(token, config, logger_name, config_id)
@@ -352,29 +904,31 @@ class AIGuard(ServiceBase):
352
904
  self,
353
905
  text: str,
354
906
  *,
355
- recipe: str | None = None,
356
907
  debug: bool | None = None,
357
- overrides: Overrides | None = None,
358
908
  log_fields: LogFields | None = None,
909
+ overrides: Overrides | None = None,
910
+ recipe: str | None = None,
359
911
  ) -> PangeaResponse[TextGuardResult]:
360
912
  """
361
- Text Guard for scanning LLM inputs and outputs
913
+ Guard LLM input and output text
362
914
 
363
- Analyze and redact text to avoid manipulation of the model, addition of
364
- malicious content, and other undesirable data transfers.
915
+ Detect, remove, or block malicious content and intent in LLM inputs and
916
+ outputs to prevent model manipulation and data leakage.
365
917
 
366
918
  OperationId: ai_guard_post_v1_text_guard
367
919
 
368
920
  Args:
369
921
  text: Text to be scanned by AI Guard for PII, sensitive data,
370
922
  malicious content, and other data types defined by the
371
- configuration. Supports processing up to 10KB of text.
372
- recipe: Recipe key of a configuration of data types and settings
373
- defined in the Pangea User Console. It specifies the rules that
374
- are to be applied to the text, such as defang malicious URLs.
923
+ configuration. Supports processing up to 20 KiB of text.
375
924
  debug: Setting this value to true will provide a detailed analysis
376
925
  of the text data
377
926
  log_field: Additional fields to include in activity log
927
+ overrides: Overrides flags. Note: This parameter has no effect when
928
+ the request is made by AIDR
929
+ recipe: Recipe key of a configuration of data types and settings
930
+ defined in the Pangea User Console. It specifies the rules that
931
+ are to be applied to the text, such as defang malicious URLs.
378
932
 
379
933
  Examples:
380
934
  response = ai_guard.guard_text("text")
@@ -391,24 +945,26 @@ class AIGuard(ServiceBase):
391
945
  log_fields: LogFields | None = None,
392
946
  ) -> PangeaResponse[TextGuardResult]:
393
947
  """
394
- Text Guard for scanning LLM inputs and outputs
948
+ Guard LLM input and output text
395
949
 
396
- Analyze and redact text to avoid manipulation of the model, addition of
397
- malicious content, and other undesirable data transfers.
950
+ Detect, remove, or block malicious content and intent in LLM inputs and
951
+ outputs to prevent model manipulation and data leakage.
398
952
 
399
953
  OperationId: ai_guard_post_v1_text_guard
400
954
 
401
955
  Args:
402
956
  messages: Structured messages data to be scanned by AI Guard for
403
957
  PII, sensitive data, malicious content, and other data types
404
- defined by the configuration. Supports processing up to 10KB of
405
- JSON text
406
- recipe: Recipe key of a configuration of data types and settings
407
- defined in the Pangea User Console. It specifies the rules that
408
- are to be applied to the text, such as defang malicious URLs.
958
+ defined by the configuration. Supports processing up to 20 KiB
959
+ of JSON text using Pangea message format.
409
960
  debug: Setting this value to true will provide a detailed analysis
410
961
  of the text data
411
962
  log_field: Additional fields to include in activity log
963
+ overrides: Overrides flags. Note: This parameter has no effect when
964
+ the request is made by AIDR
965
+ recipe: Recipe key of a configuration of data types and settings
966
+ defined in the Pangea User Console. It specifies the rules that
967
+ are to be applied to the text, such as defang malicious URLs.
412
968
 
413
969
  Examples:
414
970
  response = ai_guard.guard_text(messages=[Message(role="user", content="hello world")])
@@ -419,16 +975,16 @@ class AIGuard(ServiceBase):
419
975
  text: str | None = None,
420
976
  *,
421
977
  messages: Sequence[Message] | None = None,
422
- recipe: str | None = None,
423
978
  debug: bool | None = None,
424
- overrides: Overrides | None = None,
425
979
  log_fields: LogFields | None = None,
980
+ overrides: Overrides | None = None,
981
+ recipe: str | None = None,
426
982
  ) -> PangeaResponse[TextGuardResult]:
427
983
  """
428
- Text Guard for scanning LLM inputs and outputs
984
+ Guard LLM input and output text
429
985
 
430
- Analyze and redact text to avoid manipulation of the model, addition of
431
- malicious content, and other undesirable data transfers.
986
+ Detect, remove, or block malicious content and intent in LLM inputs and
987
+ outputs to prevent model manipulation and data leakage.
432
988
 
433
989
  OperationId: ai_guard_post_v1_text_guard
434
990
 
@@ -440,12 +996,14 @@ class AIGuard(ServiceBase):
440
996
  PII, sensitive data, malicious content, and other data types
441
997
  defined by the configuration. Supports processing up to 10KB of
442
998
  JSON text
443
- recipe: Recipe key of a configuration of data types and settings
444
- defined in the Pangea User Console. It specifies the rules that
445
- are to be applied to the text, such as defang malicious URLs.
446
999
  debug: Setting this value to true will provide a detailed analysis
447
1000
  of the text data
448
1001
  log_field: Additional fields to include in activity log
1002
+ overrides: Overrides flags. Note: This parameter has no effect when
1003
+ the request is made by AIDR
1004
+ recipe: Recipe key of a configuration of data types and settings
1005
+ defined in the Pangea User Console. It specifies the rules that
1006
+ are to be applied to the text, such as defang malicious URLs.
449
1007
 
450
1008
  Examples:
451
1009
  response = ai_guard.guard_text("text")
@@ -466,3 +1024,157 @@ class AIGuard(ServiceBase):
466
1024
  "log_fields": log_fields,
467
1025
  },
468
1026
  )
1027
+
1028
+ def guard(
1029
+ self,
1030
+ input: Mapping[str, Any],
1031
+ *,
1032
+ recipe: str | None = None,
1033
+ debug: bool | None = None,
1034
+ overrides: Overrides | None = None,
1035
+ app_id: str | None = None,
1036
+ actor_id: str | None = None,
1037
+ llm_provider: str | None = None,
1038
+ model: str | None = None,
1039
+ model_version: str | None = None,
1040
+ request_token_count: int | None = None,
1041
+ response_token_count: int | None = None,
1042
+ source_ip: str | None = None,
1043
+ source_location: str | None = None,
1044
+ tenant_id: str | None = None,
1045
+ event_type: Literal["input", "output"] | None = None,
1046
+ sensor_instance_id: str | None = None,
1047
+ extra_info: ExtraInfo | None = None,
1048
+ count_tokens: bool | None = None,
1049
+ ) -> PangeaResponse[GuardResult]:
1050
+ """
1051
+ Guard LLM input and output
1052
+
1053
+ Analyze and redact content to avoid manipulation of the model, addition
1054
+ of malicious content, and other undesirable data transfers.
1055
+
1056
+ OperationId: ai_guard_post_v1beta_guard
1057
+
1058
+ Args:
1059
+ input: 'messages' (required) contains Prompt content and role array
1060
+ in JSON format. The `content` is the multimodal text or image
1061
+ input that will be analyzed. Additional properties such as
1062
+ 'tools' may be provided for analysis.
1063
+ recipe: Recipe key of a configuration of data types and settings defined in the Pangea User Console. It specifies the rules that are to be applied to the text, such as defang malicious URLs.
1064
+ debug: Setting this value to true will provide a detailed analysis of the text data
1065
+ app_name: Name of source application.
1066
+ llm_provider: Underlying LLM. Example: 'OpenAI'.
1067
+ model: Model used to perform the event. Example: 'gpt'.
1068
+ model_version: Model version used to perform the event. Example: '3.5'.
1069
+ request_token_count: Number of tokens in the request.
1070
+ response_token_count: Number of tokens in the response.
1071
+ source_ip: IP address of user or app or agent.
1072
+ source_location: Location of user or app or agent.
1073
+ tenant_id: For gateway-like integrations with multi-tenant support.
1074
+ event_type: (AIDR) Event Type.
1075
+ sensor_instance_id: (AIDR) sensor instance id.
1076
+ extra_info: (AIDR) Logging schema.
1077
+ count_tokens: Provide input and output token count.
1078
+ """
1079
+ return self.request.post(
1080
+ "v1beta/guard",
1081
+ GuardResult,
1082
+ data={
1083
+ "input": input,
1084
+ "recipe": recipe,
1085
+ "debug": debug,
1086
+ "overrides": overrides,
1087
+ "app_id": app_id,
1088
+ "actor_id": actor_id,
1089
+ "llm_provider": llm_provider,
1090
+ "model": model,
1091
+ "model_version": model_version,
1092
+ "request_token_count": request_token_count,
1093
+ "response_token_count": response_token_count,
1094
+ "source_ip": source_ip,
1095
+ "source_location": source_location,
1096
+ "tenant_id": tenant_id,
1097
+ "event_type": event_type,
1098
+ "sensor_instance_id": sensor_instance_id,
1099
+ "extra_info": extra_info,
1100
+ "count_tokens": count_tokens,
1101
+ },
1102
+ )
1103
+
1104
+ def get_service_config(self, id: str) -> PangeaResponse[ServiceConfig]:
1105
+ """
1106
+ OperationId: ai_guard_post_v1beta_config
1107
+ """
1108
+ return self.request.post("v1beta/config", data={"id": id}, result_class=ServiceConfig)
1109
+
1110
+ def create_service_config(
1111
+ self,
1112
+ name: str,
1113
+ *,
1114
+ id: str | None = None,
1115
+ audit_data_activity: AuditDataActivityConfig | None = None,
1116
+ connections: ConnectionsConfig | None = None,
1117
+ recipes: Mapping[str, RecipeConfig] | None = None,
1118
+ ) -> PangeaResponse[ServiceConfig]:
1119
+ """
1120
+ OperationId: ai_guard_post_v1beta_config_create
1121
+ """
1122
+ return self.request.post(
1123
+ "v1beta/config/create",
1124
+ data={
1125
+ "name": name,
1126
+ "id": id,
1127
+ "audit_data_activity": audit_data_activity,
1128
+ "connections": connections,
1129
+ "recipes": recipes,
1130
+ },
1131
+ result_class=ServiceConfig,
1132
+ )
1133
+
1134
+ def update_service_config(
1135
+ self,
1136
+ id: str,
1137
+ name: str,
1138
+ *,
1139
+ audit_data_activity: AuditDataActivityConfig | None = None,
1140
+ connections: ConnectionsConfig | None = None,
1141
+ recipes: Mapping[str, RecipeConfig] | None = None,
1142
+ ) -> PangeaResponse[ServiceConfig]:
1143
+ """
1144
+ OperationId: ai_guard_post_v1beta_config_update
1145
+ """
1146
+ return self.request.post(
1147
+ "v1beta/config/update",
1148
+ data={
1149
+ "id": id,
1150
+ "name": name,
1151
+ "audit_data_activity": audit_data_activity,
1152
+ "connections": connections,
1153
+ "recipes": recipes,
1154
+ },
1155
+ result_class=ServiceConfig,
1156
+ )
1157
+
1158
+ def delete_service_config(self, id: str) -> PangeaResponse[ServiceConfig]:
1159
+ """
1160
+ OperationId: ai_guard_post_v1beta_config_delete
1161
+ """
1162
+ return self.request.post("v1beta/config/delete", data={"id": id}, result_class=ServiceConfig)
1163
+
1164
+ def list_service_configs(
1165
+ self,
1166
+ *,
1167
+ filter: ServiceConfigFilter | None = None,
1168
+ last: str | None = None,
1169
+ order: Literal["asc", "desc"] | None = None,
1170
+ order_by: Literal["id", "created_at", "updated_at"] | None = None,
1171
+ size: int | None = None,
1172
+ ) -> PangeaResponse[ServiceConfigsPage]:
1173
+ """
1174
+ OperationId: ai_guard_post_v1beta_config_list
1175
+ """
1176
+ return self.request.post(
1177
+ "v1beta/config/list",
1178
+ data={"filter": filter, "last": last, "order": order, "order_by": order_by, "size": size},
1179
+ result_class=ServiceConfigsPage,
1180
+ )