enkryptai-sdk 1.0.13__tar.gz → 1.0.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {enkryptai_sdk-1.0.13/src/enkryptai_sdk.egg-info → enkryptai_sdk-1.0.15}/PKG-INFO +327 -12
  2. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/README.md +326 -11
  3. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/setup.py +1 -1
  4. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/guardrails.py +13 -0
  5. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/models.py +1 -0
  6. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/red_team.py +132 -0
  7. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/red_team.py +28 -0
  8. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15/src/enkryptai_sdk.egg-info}/PKG-INFO +327 -12
  9. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_all_v2.py +29 -203
  10. enkryptai_sdk-1.0.15/tests/test_redteam.py +258 -0
  11. enkryptai_sdk-1.0.13/tests/test_redteam.py +0 -434
  12. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/LICENSE +0 -0
  13. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/setup.cfg +0 -0
  14. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/__init__.py +0 -0
  15. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/ai_proxy.py +0 -0
  16. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/base.py +0 -0
  17. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/coc.py +0 -0
  18. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/config.py +0 -0
  19. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/datasets.py +0 -0
  20. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/deployments.py +0 -0
  21. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/__init__.py +0 -0
  22. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/ai_proxy.py +0 -0
  23. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/base.py +0 -0
  24. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/coc.py +0 -0
  25. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/datasets.py +0 -0
  26. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/dto/deployments.py +0 -0
  27. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/evals.py +0 -0
  28. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/guardrails.py +0 -0
  29. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/guardrails_old.py +0 -0
  30. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/models.py +0 -0
  31. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk/response.py +0 -0
  32. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk.egg-info/SOURCES.txt +0 -0
  33. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk.egg-info/dependency_links.txt +0 -0
  34. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/src/enkryptai_sdk.egg-info/top_level.txt +0 -0
  35. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_ai_proxy.py +0 -0
  36. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_all.py +0 -0
  37. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_basic.py +0 -0
  38. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_coc.py +0 -0
  39. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_datasets.py +0 -0
  40. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_deployments.py +0 -0
  41. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_detect_policy.py +0 -0
  42. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_guardrails.py +0 -0
  43. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_injection_attack.py +0 -0
  44. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_model.py +0 -0
  45. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_openai.py +0 -0
  46. {enkryptai_sdk-1.0.13 → enkryptai_sdk-1.0.15}/tests/test_policy_violation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: enkryptai-sdk
3
- Version: 1.0.13
3
+ Version: 1.0.15
4
4
  Summary: A Python SDK with guardrails and red teaming functionality for API interactions
5
5
  Home-page: https://github.com/enkryptai/enkryptai-sdk
6
6
  Author: Enkrypt AI Team
@@ -49,6 +49,8 @@ Also see the API documentation at [https://docs.enkryptai.com](https://docs.enkr
49
49
  - [Sample Redteam Model Config](#sample-redteam-model-config)
50
50
  - [Sample Custom Redteam Target Config](#sample-custom-redteam-target-config)
51
51
  - [Sample Custom Redteam Model Config](#sample-custom-redteam-model-config)
52
+ - [Sample Redteam Risk Mitigation Guardrails Policy Config](#sample-redteam-risk-mitigation-guardrails-policy-config)
53
+ - [Sample Redteam Risk Mitigation System Prompt Config](#sample-redteam-risk-mitigation-system-prompt-config)
52
54
  - [Health Checks](#health-checks)
53
55
  - [Guardrails Health](#guardrails-health)
54
56
  - [Guardrails Status](#guardrails-status)
@@ -124,6 +126,8 @@ Also see the API documentation at [https://docs.enkryptai.com](https://docs.enkr
124
126
  - [Get Redteam Task Results Summary of Test Type](#get-redteam-task-results-summary-of-test-type)
125
127
  - [Get Redteam Task Results Details](#get-redteam-task-results-details)
126
128
  - [Get Redteam Task Results Details of Test Type](#get-redteam-task-results-details-of-test-type)
129
+ - [Mitigate Risks with Guardrails Policy](#mitigate-risks-with-guardrails-policy)
130
+ - [Mitigate Risks with System Prompt](#mitigate-risks-with-system-prompt)
127
131
  - [Copyright, License, and Terms of Use](#copyright-license-and-terms-of-use)
128
132
 
129
133
  ## Installation
@@ -271,14 +275,7 @@ sample_detectors = {
271
275
  },
272
276
  "bias": {
273
277
  "enabled": False
274
- },
275
- "copyright_ip": {
276
- "enabled": False
277
- },
278
- "system_prompt": {
279
- "enabled": False,
280
- "index": "system"
281
- },
278
+ }
282
279
  }
283
280
  ```
284
281
 
@@ -525,6 +522,292 @@ sample_custom_redteam_model_config = {
525
522
  }
526
523
  ```
527
524
 
525
+ ### [Sample Redteam Risk Mitigation Guardrails Policy Config](https://docs.enkryptai.com/redteam-api-reference/endpoint/risk-mitigation-guardrails-policy)
526
+
527
+ ```python Python
528
+ sample_redteam_risk_mitigation_guardrails_policy_config = {
529
+ "required_detectors": [
530
+ "policy_violation"
531
+ ],
532
+ "redteam_summary": {
533
+ "category": [
534
+ {
535
+ "Biology": {
536
+ "total": 50,
537
+ "test_type": "cbrn_test",
538
+ "success(%)": 92
539
+ }
540
+ },
541
+ {
542
+ "Chemistry": {
543
+ "total": 50,
544
+ "test_type": "cbrn_test",
545
+ "success(%)": 98
546
+ }
547
+ },
548
+ {
549
+ "Fraud": {
550
+ "total": 100,
551
+ "test_type": "harmful_test",
552
+ "success(%)": 73
553
+ }
554
+ },
555
+ {
556
+ "Guns & Illegal Substances": {
557
+ "total": 100,
558
+ "test_type": "harmful_test",
559
+ "success(%)": 87
560
+ }
561
+ },
562
+ {
563
+ "Hate Speech and Discrimination": {
564
+ "total": 100,
565
+ "test_type": "harmful_test",
566
+ "success(%)": 53
567
+ }
568
+ },
569
+ {
570
+ "Nuclear Science": {
571
+ "total": 50,
572
+ "test_type": "cbrn_test",
573
+ "success(%)": 94
574
+ }
575
+ },
576
+ {
577
+ "Radiology": {
578
+ "total": 50,
579
+ "test_type": "cbrn_test",
580
+ "success(%)": 84
581
+ }
582
+ },
583
+ {
584
+ "Regulated or Controlled Substances": {
585
+ "total": 100,
586
+ "test_type": "harmful_test",
587
+ "success(%)": 74
588
+ }
589
+ },
590
+ {
591
+ "Sexual Content": {
592
+ "total": 100,
593
+ "test_type": "harmful_test",
594
+ "success(%)": 79
595
+ }
596
+ },
597
+ {
598
+ "Suicide & Self-Harm": {
599
+ "total": 100,
600
+ "test_type": "harmful_test",
601
+ "success(%)": 72
602
+ }
603
+ },
604
+ {
605
+ "Unethical Crimes": {
606
+ "total": 100,
607
+ "test_type": "harmful_test",
608
+ "success(%)": 83
609
+ }
610
+ },
611
+ {
612
+ "Violent Crimes": {
613
+ "total": 100,
614
+ "test_type": "harmful_test",
615
+ "success(%)": 70
616
+ }
617
+ }
618
+ ],
619
+ "scenario": [],
620
+ "test_date": "2020-01-01T17:14:33.607823+00:00",
621
+ "test_name": "Mistral Small Latest x Red Team",
622
+ "test_type": [
623
+ {
624
+ "cbrn_test": {
625
+ "total": 200,
626
+ "success(%)": 92
627
+ }
628
+ },
629
+ {
630
+ "harmful_test": {
631
+ "total": 800,
632
+ "success(%)": 73.88
633
+ }
634
+ }
635
+ ],
636
+ "model_name": "mistral-small-latest",
637
+ "risk_score": 82.9375,
638
+ "dataset_name": "Standard",
639
+ "model_source": "",
640
+ "attack_method": [
641
+ {
642
+ "Basic": {
643
+ "total": 1000,
644
+ "success(%)": 77.5
645
+ }
646
+ }
647
+ ],
648
+ "nist_category": [
649
+ {
650
+ "CBRN Information or Capabilities": {
651
+ "total": 200,
652
+ "success(%)": 92
653
+ }
654
+ },
655
+ {
656
+ "Dangerous, Violent, or Hateful Content": {
657
+ "total": 800,
658
+ "success(%)": 73.88
659
+ }
660
+ }
661
+ ],
662
+ "model_provider": "mistral",
663
+ "model_endpoint_url": "https://api.mistral.ai/v1/chat/completions",
664
+ "custom_test_category_risks": []
665
+ }
666
+ }
667
+ ```
668
+
669
+ ### [Sample Redteam Risk Mitigation System Prompt Config](https://docs.enkryptai.com/redteam-api-reference/endpoint/risk-mitigation-system-prompt)
670
+
671
+ ```python Python
672
+ sample_redteam_risk_mitigation_system_prompt_config = {
673
+ "system_prompt": "You are a helpful AI Assistant",
674
+ "redteam_summary": {
675
+ "category": [
676
+ {
677
+ "Biology": {
678
+ "total": 50,
679
+ "test_type": "cbrn_test",
680
+ "success(%)": 92
681
+ }
682
+ },
683
+ {
684
+ "Chemistry": {
685
+ "total": 50,
686
+ "test_type": "cbrn_test",
687
+ "success(%)": 98
688
+ }
689
+ },
690
+ {
691
+ "Fraud": {
692
+ "total": 100,
693
+ "test_type": "harmful_test",
694
+ "success(%)": 73
695
+ }
696
+ },
697
+ {
698
+ "Guns & Illegal Substances": {
699
+ "total": 100,
700
+ "test_type": "harmful_test",
701
+ "success(%)": 87
702
+ }
703
+ },
704
+ {
705
+ "Hate Speech and Discrimination": {
706
+ "total": 100,
707
+ "test_type": "harmful_test",
708
+ "success(%)": 53
709
+ }
710
+ },
711
+ {
712
+ "Nuclear Science": {
713
+ "total": 50,
714
+ "test_type": "cbrn_test",
715
+ "success(%)": 94
716
+ }
717
+ },
718
+ {
719
+ "Radiology": {
720
+ "total": 50,
721
+ "test_type": "cbrn_test",
722
+ "success(%)": 84
723
+ }
724
+ },
725
+ {
726
+ "Regulated or Controlled Substances": {
727
+ "total": 100,
728
+ "test_type": "harmful_test",
729
+ "success(%)": 74
730
+ }
731
+ },
732
+ {
733
+ "Sexual Content": {
734
+ "total": 100,
735
+ "test_type": "harmful_test",
736
+ "success(%)": 79
737
+ }
738
+ },
739
+ {
740
+ "Suicide & Self-Harm": {
741
+ "total": 100,
742
+ "test_type": "harmful_test",
743
+ "success(%)": 72
744
+ }
745
+ },
746
+ {
747
+ "Unethical Crimes": {
748
+ "total": 100,
749
+ "test_type": "harmful_test",
750
+ "success(%)": 83
751
+ }
752
+ },
753
+ {
754
+ "Violent Crimes": {
755
+ "total": 100,
756
+ "test_type": "harmful_test",
757
+ "success(%)": 70
758
+ }
759
+ }
760
+ ],
761
+ "scenario": [],
762
+ "test_date": "2020-01-01T17:14:33.607823+00:00",
763
+ "test_name": "Mistral Small Latest x Red Team",
764
+ "test_type": [
765
+ {
766
+ "cbrn_test": {
767
+ "total": 200,
768
+ "success(%)": 92
769
+ }
770
+ },
771
+ {
772
+ "harmful_test": {
773
+ "total": 800,
774
+ "success(%)": 73.88
775
+ }
776
+ }
777
+ ],
778
+ "model_name": "mistral-small-latest",
779
+ "risk_score": 82.9375,
780
+ "dataset_name": "Standard",
781
+ "model_source": "",
782
+ "attack_method": [
783
+ {
784
+ "Basic": {
785
+ "total": 1000,
786
+ "success(%)": 77.5
787
+ }
788
+ }
789
+ ],
790
+ "nist_category": [
791
+ {
792
+ "CBRN Information or Capabilities": {
793
+ "total": 200,
794
+ "success(%)": 92
795
+ }
796
+ },
797
+ {
798
+ "Dangerous, Violent, or Hateful Content": {
799
+ "total": 800,
800
+ "success(%)": 73.88
801
+ }
802
+ }
803
+ ],
804
+ "model_provider": "mistral",
805
+ "model_endpoint_url": "https://api.mistral.ai/v1/chat/completions",
806
+ "custom_test_category_risks": []
807
+ }
808
+ }
809
+ ```
810
+
528
811
  ## Health Checks
529
812
 
530
813
  ### [Guardrails Health](https://docs.enkryptai.com/guardrails-api-reference/endpoint/health-check)
@@ -701,8 +984,8 @@ print(batch_detect_response.to_dict())
701
984
  - `nsfw`: Filter inappropriate content
702
985
  - `toxicity`: Detect toxic language
703
986
  - `pii`: Detect personal information
704
- - `copyright_ip`: Check for copyright/IP violations
705
- - `system_prompt`: Detect system prompt leaks
987
+ - `copyright_ip`: Check for copyright/IP violations ***(Coming soon)***
988
+ - `system_prompt`: Detect system prompt leaks ***(Coming soon)***
706
989
  - `keyword_detector`: Check for specific keywords
707
990
 
708
991
  Each detector can be enabled/disabled and configured with specific options as documented in the [API docs](https://docs.enkryptai.com/guardrails-api-reference/introduction).
@@ -764,12 +1047,16 @@ guardrails_config = GuardrailsConfig.keyword(keywords=["secret", "password"])
764
1047
 
765
1048
  ### [Copyright IP](https://docs.enkryptai.com/guardrails-api-reference/Copyright_IP_Leak_Detector)
766
1049
 
1050
+ - ***(Coming soon)***
1051
+
767
1052
  ```python Python
768
1053
  guardrails_config = GuardrailsConfig.copyright_ip()
769
1054
  ```
770
1055
 
771
1056
  ### [System Prompt](https://docs.enkryptai.com/guardrails-api-reference/System_Prompt_Leak_Detector)
772
1057
 
1058
+ - ***(Coming soon)***
1059
+
773
1060
  ```python Python
774
1061
  guardrails_config = GuardrailsConfig.system_prompt(index="system")
775
1062
  ```
@@ -972,6 +1259,8 @@ print(relevancy_response.to_dict())
972
1259
 
973
1260
  ### [Check Hallucination](https://docs.enkryptai.com/guardrails-api-reference/Hallucination)
974
1261
 
1262
+ - ***(Coming soon)***
1263
+
975
1264
  Detect hallucinations in an LLM's response:
976
1265
 
977
1266
  ```python Python
@@ -1057,7 +1346,7 @@ print(atomize_response.to_dict())
1057
1346
  # Add a code of conduct policy
1058
1347
  add_policy_response = coc_client.add_policy(
1059
1348
  policy_name=test_coc_policy_name,
1060
- policy_rules=example_coc_policy_rules, # Can also be a list of rules
1349
+ policy_rules=example_coc_policy_rules, # Can also be a list of rules
1061
1350
  total_rules=4,
1062
1351
  policy_file="/path/to/your/policy.pdf"
1063
1352
  # policy_text=example_coc_policy_text, # Optional: Use this if you want to add a policy text instead of a file
@@ -1661,6 +1950,32 @@ print(redteam_results_details_test_type.task_status)
1661
1950
  print(redteam_results_details_test_type.to_dict())
1662
1951
  ```
1663
1952
 
1953
+ ### [Mitigate Risks with Guardrails Policy](https://docs.enkryptai.com/redteam-api-reference/endpoint/risk-mitigation-guardrails-policy)
1954
+
1955
+ ```python Python
1956
+ # Mitigate risks with guardrails policy
1957
+ risk_mitigation_guardrails_policy_response = redteam_client.risk_mitigation_guardrails_policy(config=copy.deepcopy(sample_redteam_risk_mitigation_guardrails_policy_config))
1958
+
1959
+ print(risk_mitigation_guardrails_policy_response)
1960
+ print(risk_mitigation_guardrails_policy_response.guardrails_policy)
1961
+
1962
+ # Print as a dictionary
1963
+ print(risk_mitigation_guardrails_policy_response.to_dict())
1964
+ ```
1965
+
1966
+ ### [Mitigate Risks with System Prompt](https://docs.enkryptai.com/redteam-api-reference/endpoint/risk-mitigation-system-prompt)
1967
+
1968
+ ```python Python
1969
+ # Mitigate risks with system prompt
1970
+ risk_mitigation_system_prompt_response = redteam_client.risk_mitigation_system_prompt(config=copy.deepcopy(sample_redteam_risk_mitigation_system_prompt_config))
1971
+
1972
+ print(risk_mitigation_system_prompt_response)
1973
+ print(risk_mitigation_system_prompt_response.system_prompt)
1974
+
1975
+ # Print as a dictionary
1976
+ print(risk_mitigation_system_prompt_response.to_dict())
1977
+ ```
1978
+
1664
1979
  ## Copyright, License and Terms of Use
1665
1980
 
1666
1981
  © 2025 Enkrypt AI. All rights reserved.