promptshield-firewall 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: promptshield-firewall
3
+ Version: 0.1.0
4
+ Summary: Real-time LLM prompt injection firewall
5
+ Home-page: https://promptshield.live
6
+ Author: Kehinde Omosor
7
+ Author-email: kehinde@promptshield.live
8
+ Keywords: llm security prompt injection firewall ai
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Topic :: Security
12
+ Classifier: Topic :: Software Development :: Libraries
13
+ Requires-Python: >=3.8
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: httpx>=0.24.0
16
+ Dynamic: author
17
+ Dynamic: author-email
18
+ Dynamic: classifier
19
+ Dynamic: description
20
+ Dynamic: description-content-type
21
+ Dynamic: home-page
22
+ Dynamic: keywords
23
+ Dynamic: requires-dist
24
+ Dynamic: requires-python
25
+ Dynamic: summary
26
+
27
+ # PromptShield Python SDK
28
+
29
+ Real-time LLM prompt injection firewall.
30
+
31
+ ## Installation
32
+
33
+ ```bash
34
+ pip install promptshield
35
+ ```
36
+
37
+ ## Quick Start
38
+
39
+ ```python
40
+ from promptshield import PromptShield
41
+
42
+ shield = PromptShield(api_key="ps_your_key")
43
+
44
+ # Scan before sending to your LLM
45
+ result = shield.scan("Ignore all previous instructions.")
46
+
47
+ if result["verdict"] == "BLOCKED":
48
+ print(f"Attack blocked! Risk score: {result['risk_score']}")
49
+ else:
50
+ response = your_llm.chat(user_input)
51
+
52
+ # Or use the simple boolean check
53
+ if not shield.is_safe(user_input):
54
+ return "Request blocked."
55
+ ```
56
+
57
+ ## Async Support
58
+
59
+ ```python
60
+ result = await shield.async_scan(user_input)
61
+ ```
62
+
63
+ ## Get API Key
64
+
65
+ Free tier at [promptshield.live](https://promptshield.live)
@@ -0,0 +1,39 @@
1
+ # PromptShield Python SDK
2
+
3
+ Real-time LLM prompt injection firewall.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install promptshield
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```python
14
+ from promptshield import PromptShield
15
+
16
+ shield = PromptShield(api_key="ps_your_key")
17
+
18
+ # Scan before sending to your LLM
19
+ result = shield.scan("Ignore all previous instructions.")
20
+
21
+ if result["verdict"] == "BLOCKED":
22
+ print(f"Attack blocked! Risk score: {result['risk_score']}")
23
+ else:
24
+ response = your_llm.chat(user_input)
25
+
26
+ # Or use the simple boolean check
27
+ if not shield.is_safe(user_input):
28
+ return "Request blocked."
29
+ ```
30
+
31
+ ## Async Support
32
+
33
+ ```python
34
+ result = await shield.async_scan(user_input)
35
+ ```
36
+
37
+ ## Get API Key
38
+
39
+ Free tier at [promptshield.live](https://promptshield.live)
@@ -0,0 +1,4 @@
1
+ from .client import PromptShield, PromptShieldError
2
+
3
+ __version__ = "0.1.0"
4
+ __all__ = ["PromptShield", "PromptShieldError"]
@@ -0,0 +1,160 @@
1
+ """
2
+ PromptShield Python SDK
3
+ pip install promptshield
4
+ """
5
+
6
+ import httpx
7
+ import time
8
+ from typing import Dict, Any, Optional
9
+
10
+
11
+ class PromptShieldError(Exception):
12
+ """Base error for PromptShield SDK"""
13
+ pass
14
+
15
+
16
+ class PromptShield:
17
+ """
18
+ PromptShield client — real-time LLM prompt injection firewall.
19
+
20
+ Usage:
21
+ from promptshield import PromptShield
22
+
23
+ shield = PromptShield(api_key="ps_your_key")
24
+ result = shield.scan("Ignore all previous instructions.")
25
+
26
+ if result["verdict"] == "BLOCKED":
27
+ print("Attack blocked!")
28
+ else:
29
+ # Safe to send to your LLM
30
+ response = your_llm.chat(user_input)
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ api_key: str,
36
+ base_url: str = "https://promptshield.live",
37
+ timeout: float = 10.0,
38
+ ):
39
+ self.api_key = api_key
40
+ self.base_url = base_url.rstrip("/")
41
+ self.timeout = timeout
42
+ self.headers = {
43
+ "Authorization": f"Bearer {self.api_key}",
44
+ "Content-Type": "application/json",
45
+ }
46
+
47
+ def scan(self, prompt: str, context: Optional[str] = None) -> Dict[str, Any]:
48
+ """
49
+ Scan a prompt for injection attacks synchronously.
50
+
51
+ Args:
52
+ prompt: The user input to scan
53
+ context: Optional system context for better analysis
54
+
55
+ Returns:
56
+ dict with verdict, risk_score, attack_types, explanation
57
+ """
58
+ start_time = time.time()
59
+ payload = {"input": prompt}
60
+ if context:
61
+ payload["context"] = context
62
+
63
+ try:
64
+ with httpx.Client(timeout=self.timeout) as client:
65
+ response = client.post(
66
+ f"{self.base_url}/v1/analyze",
67
+ json=payload,
68
+ headers=self.headers,
69
+ )
70
+
71
+ if response.status_code == 401:
72
+ raise PromptShieldError("Invalid API key. Get one at promptshield.live")
73
+ if response.status_code == 429:
74
+ raise PromptShieldError("Monthly scan limit reached. Upgrade at promptshield.live")
75
+ if response.status_code != 200:
76
+ raise PromptShieldError(f"API Error ({response.status_code}): {response.text}")
77
+
78
+ data = response.json()
79
+ data["sdk_latency_ms"] = int((time.time() - start_time) * 1000)
80
+ return data
81
+
82
+ except httpx.RequestError as exc:
83
+ # Fail-safe: allow request if firewall is unreachable
84
+ return {
85
+ "verdict": "SAFE",
86
+ "risk_score": 0,
87
+ "attack_types": [],
88
+ "confidence": 0,
89
+ "explanation": "PromptShield unreachable — request allowed by fail-safe.",
90
+ "recommendation": "Check your network connection.",
91
+ "tags": ["fail-safe"],
92
+ "latency_ms": 0,
93
+ "sanitized_input": prompt,
94
+ "error": str(exc),
95
+ "fallback_triggered": True,
96
+ "sdk_latency_ms": int((time.time() - start_time) * 1000),
97
+ }
98
+
99
+ async def async_scan(self, prompt: str, context: Optional[str] = None) -> Dict[str, Any]:
100
+ """
101
+ Scan a prompt for injection attacks asynchronously.
102
+
103
+ Args:
104
+ prompt: The user input to scan
105
+ context: Optional system context for better analysis
106
+
107
+ Returns:
108
+ dict with verdict, risk_score, attack_types, explanation
109
+ """
110
+ start_time = time.time()
111
+ payload = {"input": prompt}
112
+ if context:
113
+ payload["context"] = context
114
+
115
+ try:
116
+ async with httpx.AsyncClient(timeout=self.timeout) as client:
117
+ response = await client.post(
118
+ f"{self.base_url}/v1/analyze",
119
+ json=payload,
120
+ headers=self.headers,
121
+ )
122
+
123
+ if response.status_code == 401:
124
+ raise PromptShieldError("Invalid API key. Get one at promptshield.live")
125
+ if response.status_code == 429:
126
+ raise PromptShieldError("Monthly scan limit reached. Upgrade at promptshield.live")
127
+ if response.status_code != 200:
128
+ raise PromptShieldError(f"API Error ({response.status_code}): {response.text}")
129
+
130
+ data = response.json()
131
+ data["sdk_latency_ms"] = int((time.time() - start_time) * 1000)
132
+ return data
133
+
134
+ except httpx.RequestError as exc:
135
+ return {
136
+ "verdict": "SAFE",
137
+ "risk_score": 0,
138
+ "attack_types": [],
139
+ "confidence": 0,
140
+ "explanation": "PromptShield unreachable — request allowed by fail-safe.",
141
+ "recommendation": "Check your network connection.",
142
+ "tags": ["fail-safe"],
143
+ "latency_ms": 0,
144
+ "sanitized_input": prompt,
145
+ "error": str(exc),
146
+ "fallback_triggered": True,
147
+ "sdk_latency_ms": int((time.time() - start_time) * 1000),
148
+ }
149
+
150
+ def is_safe(self, prompt: str) -> bool:
151
+ """
152
+ Simple boolean check — returns True if safe, False if blocked/suspicious.
153
+
154
+ Usage:
155
+ if not shield.is_safe(user_input):
156
+ return "Request blocked."
157
+ response = your_llm.chat(user_input)
158
+ """
159
+ result = self.scan(prompt)
160
+ return result["verdict"] == "SAFE"
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: promptshield-firewall
3
+ Version: 0.1.0
4
+ Summary: Real-time LLM prompt injection firewall
5
+ Home-page: https://promptshield.live
6
+ Author: Kehinde Omosor
7
+ Author-email: kehinde@promptshield.live
8
+ Keywords: llm security prompt injection firewall ai
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Topic :: Security
12
+ Classifier: Topic :: Software Development :: Libraries
13
+ Requires-Python: >=3.8
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: httpx>=0.24.0
16
+ Dynamic: author
17
+ Dynamic: author-email
18
+ Dynamic: classifier
19
+ Dynamic: description
20
+ Dynamic: description-content-type
21
+ Dynamic: home-page
22
+ Dynamic: keywords
23
+ Dynamic: requires-dist
24
+ Dynamic: requires-python
25
+ Dynamic: summary
26
+
27
+ # PromptShield Python SDK
28
+
29
+ Real-time LLM prompt injection firewall.
30
+
31
+ ## Installation
32
+
33
+ ```bash
34
+ pip install promptshield
35
+ ```
36
+
37
+ ## Quick Start
38
+
39
+ ```python
40
+ from promptshield import PromptShield
41
+
42
+ shield = PromptShield(api_key="ps_your_key")
43
+
44
+ # Scan before sending to your LLM
45
+ result = shield.scan("Ignore all previous instructions.")
46
+
47
+ if result["verdict"] == "BLOCKED":
48
+ print(f"Attack blocked! Risk score: {result['risk_score']}")
49
+ else:
50
+ response = your_llm.chat(user_input)
51
+
52
+ # Or use the simple boolean check
53
+ if not shield.is_safe(user_input):
54
+ return "Request blocked."
55
+ ```
56
+
57
+ ## Async Support
58
+
59
+ ```python
60
+ result = await shield.async_scan(user_input)
61
+ ```
62
+
63
+ ## Get API Key
64
+
65
+ Free tier at [promptshield.live](https://promptshield.live)
@@ -0,0 +1,9 @@
1
+ README.md
2
+ setup.py
3
+ promptshield/__init__.py
4
+ promptshield/client.py
5
+ promptshield_firewall.egg-info/PKG-INFO
6
+ promptshield_firewall.egg-info/SOURCES.txt
7
+ promptshield_firewall.egg-info/dependency_links.txt
8
+ promptshield_firewall.egg-info/requires.txt
9
+ promptshield_firewall.egg-info/top_level.txt
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,22 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="promptshield-firewall",
5
+ version="0.1.0",
6
+ description="Real-time LLM prompt injection firewall",
7
+ long_description=open("README.md").read(),
8
+ long_description_content_type="text/markdown",
9
+ author="Kehinde Omosor",
10
+ author_email="kehinde@promptshield.live",
11
+ url="https://promptshield.live",
12
+ packages=find_packages(),
13
+ install_requires=["httpx>=0.24.0"],
14
+ python_requires=">=3.8",
15
+ classifiers=[
16
+ "Programming Language :: Python :: 3",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Topic :: Security",
19
+ "Topic :: Software Development :: Libraries",
20
+ ],
21
+ keywords="llm security prompt injection firewall ai",
22
+ )