prompt-firewall 0.1.4__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/PKG-INFO +1 -3
  2. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/README.md +0 -2
  3. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/pyproject.toml +1 -1
  4. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_firewall.egg-info/PKG-INFO +1 -3
  5. prompt_firewall-0.1.5/src/prompt_guard/config.py +77 -0
  6. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/pipeline.py +4 -4
  7. prompt_firewall-0.1.4/src/prompt_guard/config.py +0 -30
  8. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/setup.cfg +0 -0
  9. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_firewall.egg-info/SOURCES.txt +0 -0
  10. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_firewall.egg-info/dependency_links.txt +0 -0
  11. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_firewall.egg-info/requires.txt +0 -0
  12. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_firewall.egg-info/top_level.txt +0 -0
  13. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/__init__.py +0 -0
  14. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/data/phrases.txt +0 -0
  15. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/data/sentences.txt +0 -0
  16. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/guards/__init__.py +0 -0
  17. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/guards/judge_guard.py +0 -0
  18. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/guards/rag_guard.py +0 -0
  19. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/guards/tfidf_guard.py +0 -0
  20. {prompt_firewall-0.1.4 → prompt_firewall-0.1.5}/src/prompt_guard/protocols.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompt-firewall
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: Prompt guard library for composing multiple safety checks.
5
5
  Requires-Python: >=3.13
6
6
  Description-Content-Type: text/markdown
@@ -22,8 +22,6 @@ Composable prompt-safety guards as a lightweight Python library. It provides:
22
22
  from prompt_guard import GuardPipeline, PromptGuardConfig
23
23
 
24
24
  cfg = PromptGuardConfig(
25
- phrases_path="phrases.txt",
26
- sentences_path="sentences.txt",
27
25
  embed_model_name="mxbai-embed-large",
28
26
  judge_model_name="qwen2.5:3b-instruct",
29
27
  )
@@ -11,8 +11,6 @@ Composable prompt-safety guards as a lightweight Python library. It provides:
11
11
  from prompt_guard import GuardPipeline, PromptGuardConfig
12
12
 
13
13
  cfg = PromptGuardConfig(
14
- phrases_path="phrases.txt",
15
- sentences_path="sentences.txt",
16
14
  embed_model_name="mxbai-embed-large",
17
15
  judge_model_name="qwen2.5:3b-instruct",
18
16
  )
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "prompt-firewall"
3
- version = "0.1.4"
3
+ version = "0.1.5"
4
4
  description = "Prompt guard library for composing multiple safety checks."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompt-firewall
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: Prompt guard library for composing multiple safety checks.
5
5
  Requires-Python: >=3.13
6
6
  Description-Content-Type: text/markdown
@@ -22,8 +22,6 @@ Composable prompt-safety guards as a lightweight Python library. It provides:
22
22
  from prompt_guard import GuardPipeline, PromptGuardConfig
23
23
 
24
24
  cfg = PromptGuardConfig(
25
- phrases_path="phrases.txt",
26
- sentences_path="sentences.txt",
27
25
  embed_model_name="mxbai-embed-large",
28
26
  judge_model_name="qwen2.5:3b-instruct",
29
27
  )
@@ -0,0 +1,77 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ BUILTIN_DEFAULT_PHRASES_RESOURCE = "data/phrases.txt"
5
+ BUILTIN_DEFAULT_SENTENCES_RESOURCE = "data/sentences.txt"
6
+
7
+
8
+ class PromptGuardConfig(BaseModel):
9
+ phrases_path: str | None = Field(
10
+ default=None,
11
+ examples=["/path/to/phrases.txt"],
12
+ description="Path to phrases file (one phrase per line).",
13
+ )
14
+ sentences_path: str | None = Field(
15
+ default=None,
16
+ examples=["/path/to/sentences.txt"],
17
+ description="Path to sentences file (one sentence per line).",
18
+ )
19
+
20
+ tfidf_top_k: int = Field(
21
+ default=5,
22
+ examples=[5],
23
+ description="Number of top TF-IDF matches to keep.",
24
+ )
25
+ tfidf_ngram_range: tuple[int, int] = Field(
26
+ default=(1, 3),
27
+ examples=[(1, 3)],
28
+ description="N-gram range for TF-IDF vectorizer.",
29
+ )
30
+ rag_top_k: int = Field(
31
+ default=5,
32
+ examples=[5],
33
+ description="Number of top RAG matches to retrieve.",
34
+ )
35
+
36
+ embed_model_name: str = Field(
37
+ default="mxbai-embed-large",
38
+ examples=["mxbai-embed-large"],
39
+ description="Ollama embedding model name.",
40
+ )
41
+ judge_model_name: str = Field(
42
+ default="qwen2.5:3b-instruct",
43
+ examples=["qwen2.5:3b-instruct"],
44
+ description="Ollama judge model name.",
45
+ )
46
+ judge_temperature: float = Field(
47
+ default=0.0,
48
+ examples=[0.0],
49
+ description="LLM judge temperature.",
50
+ )
51
+ judge_max_tokens: int = Field(
52
+ default=256,
53
+ examples=[256],
54
+ description="LLM judge max tokens.",
55
+ )
56
+
57
+ base_url: str | None = Field(
58
+ default=None,
59
+ examples=["http://localhost:11434"],
60
+ description="Optional base URL for Ollama.",
61
+ )
62
+
63
+ enable_tfidf: bool = Field(
64
+ default=True,
65
+ examples=[True],
66
+ description="Enable TF-IDF guard.",
67
+ )
68
+ enable_rag: bool = Field(
69
+ default=True,
70
+ examples=[True],
71
+ description="Enable RAG guard.",
72
+ )
73
+ enable_judge: bool = Field(
74
+ default=True,
75
+ examples=[True],
76
+ description="Enable LLM judge guard.",
77
+ )
@@ -6,8 +6,8 @@ from llama_index.embeddings.ollama import OllamaEmbedding
6
6
  from sklearn.feature_extraction.text import TfidfVectorizer
7
7
 
8
8
  from .config import (
9
- DEFAULT_PHRASES_RESOURCE,
10
- DEFAULT_SENTENCES_RESOURCE,
9
+ BUILTIN_DEFAULT_PHRASES_RESOURCE,
10
+ BUILTIN_DEFAULT_SENTENCES_RESOURCE,
11
11
  PromptGuardConfig,
12
12
  )
13
13
  from .guards.judge_guard import LlmJudgeGuard
@@ -43,7 +43,7 @@ class GuardPipeline:
43
43
  def _build_tfidf_guard(self) -> TfIdfGuard:
44
44
  phrases = self._load_lines(
45
45
  self._config.phrases_path,
46
- default_resource=DEFAULT_PHRASES_RESOURCE,
46
+ default_resource=BUILTIN_DEFAULT_PHRASES_RESOURCE,
47
47
  )
48
48
  vectorizer = TfidfVectorizer(ngram_range=self._config.tfidf_ngram_range)
49
49
  phrase_matrix = vectorizer.fit_transform(phrases)
@@ -57,7 +57,7 @@ class GuardPipeline:
57
57
  def _build_rag_guard(self) -> RagGuard:
58
58
  lines = self._load_lines(
59
59
  self._config.sentences_path,
60
- default_resource=DEFAULT_SENTENCES_RESOURCE,
60
+ default_resource=BUILTIN_DEFAULT_SENTENCES_RESOURCE,
61
61
  )
62
62
  docs = [Document(text=line) for line in lines]
63
63
  embed_kwargs = {"model_name": self._config.embed_model_name}
@@ -1,30 +0,0 @@
1
- from dataclasses import dataclass
2
-
3
-
4
- DEFAULT_PHRASES_RESOURCE = "data/phrases.txt"
5
- DEFAULT_SENTENCES_RESOURCE = "data/sentences.txt"
6
-
7
-
8
- @dataclass(slots=True)
9
- class PromptGuardConfig:
10
- phrases_path: str | None = None
11
- sentences_path: str | None = None
12
-
13
- tfidf_top_k: int = 5
14
- tfidf_ngram_range: tuple[int, int] = (1, 3)
15
- rag_top_k: int = 5
16
-
17
- embed_model_name: str = "mxbai-embed-large"
18
- judge_model_name: str = "qwen2.5:3b-instruct"
19
- judge_temperature: float = 0.0
20
- judge_max_tokens: int = 256
21
-
22
- base_url: str | None = None
23
-
24
- tfidf_limit: float = 0.0
25
- rag_limit: float = 0.0
26
- judge_limit: float = 0.0
27
-
28
- enable_tfidf: bool = True
29
- enable_rag: bool = True
30
- enable_judge: bool = True