prompt-firewall 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {prompt_firewall-0.1.4.dist-info → prompt_firewall-0.1.5.dist-info}/METADATA +1 -3
- {prompt_firewall-0.1.4.dist-info → prompt_firewall-0.1.5.dist-info}/RECORD +6 -6
- prompt_guard/config.py +69 -22
- prompt_guard/pipeline.py +4 -4
- {prompt_firewall-0.1.4.dist-info → prompt_firewall-0.1.5.dist-info}/WHEEL +0 -0
- {prompt_firewall-0.1.4.dist-info → prompt_firewall-0.1.5.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: prompt-firewall
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.5
|
|
4
4
|
Summary: Prompt guard library for composing multiple safety checks.
|
|
5
5
|
Requires-Python: >=3.13
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
@@ -22,8 +22,6 @@ Composable prompt-safety guards as a lightweight Python library. It provides:
|
|
|
22
22
|
from prompt_guard import GuardPipeline, PromptGuardConfig
|
|
23
23
|
|
|
24
24
|
cfg = PromptGuardConfig(
|
|
25
|
-
phrases_path="phrases.txt",
|
|
26
|
-
sentences_path="sentences.txt",
|
|
27
25
|
embed_model_name="mxbai-embed-large",
|
|
28
26
|
judge_model_name="qwen2.5:3b-instruct",
|
|
29
27
|
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
prompt_guard/__init__.py,sha256=MMcONUgjoE5sG8LQX8n6pF7mvawil6XLp4Mxa3KX1Vw,174
|
|
2
|
-
prompt_guard/config.py,sha256=
|
|
3
|
-
prompt_guard/pipeline.py,sha256=
|
|
2
|
+
prompt_guard/config.py,sha256=MCewtkFFL51yXm0I1smFvcEQCTRcYz7Kl1xWij4OQ-g,2144
|
|
3
|
+
prompt_guard/pipeline.py,sha256=zR9a958oG8faAH1hXBcZIpCAxl42e-Gw9coj5-_2tPI,3227
|
|
4
4
|
prompt_guard/protocols.py,sha256=xHOKaIyexvl1HPp7dAdswr2lLDdGc542VtF1SQOMGqE,396
|
|
5
5
|
prompt_guard/data/phrases.txt,sha256=HRP32xImm2AxB42oNVhYXAUX2ikrJWpyjw4BbkN6vnU,348
|
|
6
6
|
prompt_guard/data/sentences.txt,sha256=sbrzWRh8jirNX_xY32PPjp7VGEWxWorN9t-IyVjH4_4,1275
|
|
@@ -8,7 +8,7 @@ prompt_guard/guards/__init__.py,sha256=DZp9g5I-H6AM3MkNyZnLi48kc7-bAECP48b6SO6YD
|
|
|
8
8
|
prompt_guard/guards/judge_guard.py,sha256=dtlcZjG8s8l_6SijyRfVwE_yz7gqFiKtO8uwL0Jlo_E,1870
|
|
9
9
|
prompt_guard/guards/rag_guard.py,sha256=bfDDdLZQvWTqCk6j7N8uCeCNzpkOTOFqh-jUYk4pyPk,960
|
|
10
10
|
prompt_guard/guards/tfidf_guard.py,sha256=9nis8bobAHCgBtpUAYHXOHaQHvs4WQUUXvHWme-OET0,1351
|
|
11
|
-
prompt_firewall-0.1.
|
|
12
|
-
prompt_firewall-0.1.
|
|
13
|
-
prompt_firewall-0.1.
|
|
14
|
-
prompt_firewall-0.1.
|
|
11
|
+
prompt_firewall-0.1.5.dist-info/METADATA,sha256=krtbsTgMufdnmgS0EWfZMvcY4ig0zDsg5bDGfGegWAw,1261
|
|
12
|
+
prompt_firewall-0.1.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
13
|
+
prompt_firewall-0.1.5.dist-info/top_level.txt,sha256=5XNPQyMJgYV0nzEbniQJ3NjJcthIHv4h0WcPXOErxsk,13
|
|
14
|
+
prompt_firewall-0.1.5.dist-info/RECORD,,
|
prompt_guard/config.py
CHANGED
|
@@ -1,30 +1,77 @@
|
|
|
1
|
-
from
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
2
|
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
4
|
+
BUILTIN_DEFAULT_PHRASES_RESOURCE = "data/phrases.txt"
|
|
5
|
+
BUILTIN_DEFAULT_SENTENCES_RESOURCE = "data/sentences.txt"
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
8
|
+
class PromptGuardConfig(BaseModel):
|
|
9
|
+
phrases_path: str | None = Field(
|
|
10
|
+
default=None,
|
|
11
|
+
examples=["/path/to/phrases.txt"],
|
|
12
|
+
description="Path to phrases file (one phrase per line).",
|
|
13
|
+
)
|
|
14
|
+
sentences_path: str | None = Field(
|
|
15
|
+
default=None,
|
|
16
|
+
examples=["/path/to/sentences.txt"],
|
|
17
|
+
description="Path to sentences file (one sentence per line).",
|
|
18
|
+
)
|
|
12
19
|
|
|
13
|
-
tfidf_top_k: int =
|
|
14
|
-
|
|
15
|
-
|
|
20
|
+
tfidf_top_k: int = Field(
|
|
21
|
+
default=5,
|
|
22
|
+
examples=[5],
|
|
23
|
+
description="Number of top TF-IDF matches to keep.",
|
|
24
|
+
)
|
|
25
|
+
tfidf_ngram_range: tuple[int, int] = Field(
|
|
26
|
+
default=(1, 3),
|
|
27
|
+
examples=[(1, 3)],
|
|
28
|
+
description="N-gram range for TF-IDF vectorizer.",
|
|
29
|
+
)
|
|
30
|
+
rag_top_k: int = Field(
|
|
31
|
+
default=5,
|
|
32
|
+
examples=[5],
|
|
33
|
+
description="Number of top RAG matches to retrieve.",
|
|
34
|
+
)
|
|
16
35
|
|
|
17
|
-
embed_model_name: str =
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
36
|
+
embed_model_name: str = Field(
|
|
37
|
+
default="mxbai-embed-large",
|
|
38
|
+
examples=["mxbai-embed-large"],
|
|
39
|
+
description="Ollama embedding model name.",
|
|
40
|
+
)
|
|
41
|
+
judge_model_name: str = Field(
|
|
42
|
+
default="qwen2.5:3b-instruct",
|
|
43
|
+
examples=["qwen2.5:3b-instruct"],
|
|
44
|
+
description="Ollama judge model name.",
|
|
45
|
+
)
|
|
46
|
+
judge_temperature: float = Field(
|
|
47
|
+
default=0.0,
|
|
48
|
+
examples=[0.0],
|
|
49
|
+
description="LLM judge temperature.",
|
|
50
|
+
)
|
|
51
|
+
judge_max_tokens: int = Field(
|
|
52
|
+
default=256,
|
|
53
|
+
examples=[256],
|
|
54
|
+
description="LLM judge max tokens.",
|
|
55
|
+
)
|
|
21
56
|
|
|
22
|
-
base_url: str | None =
|
|
57
|
+
base_url: str | None = Field(
|
|
58
|
+
default=None,
|
|
59
|
+
examples=["http://localhost:11434"],
|
|
60
|
+
description="Optional base URL for Ollama.",
|
|
61
|
+
)
|
|
23
62
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
enable_rag: bool =
|
|
30
|
-
|
|
63
|
+
enable_tfidf: bool = Field(
|
|
64
|
+
default=True,
|
|
65
|
+
examples=[True],
|
|
66
|
+
description="Enable TF-IDF guard.",
|
|
67
|
+
)
|
|
68
|
+
enable_rag: bool = Field(
|
|
69
|
+
default=True,
|
|
70
|
+
examples=[True],
|
|
71
|
+
description="Enable RAG guard.",
|
|
72
|
+
)
|
|
73
|
+
enable_judge: bool = Field(
|
|
74
|
+
default=True,
|
|
75
|
+
examples=[True],
|
|
76
|
+
description="Enable LLM judge guard.",
|
|
77
|
+
)
|
prompt_guard/pipeline.py
CHANGED
|
@@ -6,8 +6,8 @@ from llama_index.embeddings.ollama import OllamaEmbedding
|
|
|
6
6
|
from sklearn.feature_extraction.text import TfidfVectorizer
|
|
7
7
|
|
|
8
8
|
from .config import (
|
|
9
|
-
|
|
10
|
-
|
|
9
|
+
BUILTIN_DEFAULT_PHRASES_RESOURCE,
|
|
10
|
+
BUILTIN_DEFAULT_SENTENCES_RESOURCE,
|
|
11
11
|
PromptGuardConfig,
|
|
12
12
|
)
|
|
13
13
|
from .guards.judge_guard import LlmJudgeGuard
|
|
@@ -43,7 +43,7 @@ class GuardPipeline:
|
|
|
43
43
|
def _build_tfidf_guard(self) -> TfIdfGuard:
|
|
44
44
|
phrases = self._load_lines(
|
|
45
45
|
self._config.phrases_path,
|
|
46
|
-
default_resource=
|
|
46
|
+
default_resource=BUILTIN_DEFAULT_PHRASES_RESOURCE,
|
|
47
47
|
)
|
|
48
48
|
vectorizer = TfidfVectorizer(ngram_range=self._config.tfidf_ngram_range)
|
|
49
49
|
phrase_matrix = vectorizer.fit_transform(phrases)
|
|
@@ -57,7 +57,7 @@ class GuardPipeline:
|
|
|
57
57
|
def _build_rag_guard(self) -> RagGuard:
|
|
58
58
|
lines = self._load_lines(
|
|
59
59
|
self._config.sentences_path,
|
|
60
|
-
default_resource=
|
|
60
|
+
default_resource=BUILTIN_DEFAULT_SENTENCES_RESOURCE,
|
|
61
61
|
)
|
|
62
62
|
docs = [Document(text=line) for line in lines]
|
|
63
63
|
embed_kwargs = {"model_name": self._config.embed_model_name}
|
|
File without changes
|
|
File without changes
|