llm_guardrail 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1 @@
1
+ # llm_Guardrails
package/index.js ADDED
@@ -0,0 +1,42 @@
1
+ import { execFile } from "child_process";
2
+ import path from "path";
3
+ import { fileURLToPath } from "url";
4
+
5
+ const __filename = fileURLToPath(import.meta.url);
6
+ const __dirname = path.dirname(__filename);
7
+
8
+ const PYTHON = "D:/npm/llm_Guardrails/venv/Scripts/python.exe";
9
+
10
+
11
+ export function check(prompt) {
12
+ return new Promise((resolve, reject) => {
13
+ if (typeof prompt !== "string") {
14
+ return reject(new Error("Prompt must be a string"));
15
+ }
16
+
17
+ const scriptPath = path.join(__dirname, "model", "predict.py");
18
+
19
+
20
+ execFile(
21
+ PYTHON,
22
+ [scriptPath, prompt],
23
+ {},
24
+ (err, stdout, stderr) => {
25
+ if (err) return reject(err);
26
+ if (stderr) return reject(new Error(stderr));
27
+
28
+ try {
29
+ const result = JSON.parse(stdout);
30
+ resolve({
31
+ allowed: result.injective === 0,
32
+ ...result
33
+ });
34
+ } catch (e) {
35
+ reject(new Error("Invalid JSON from Python"));
36
+ }
37
+ }
38
+ );
39
+ });
40
+ }
41
+
42
+ export default { check };
File without changes
Binary file
Binary file
Binary file
@@ -0,0 +1,32 @@
1
+ import sys
2
+ import json
3
+ import joblib
4
+ from pathlib import Path
5
+
6
+ # ---- safety check ----
7
+ if len(sys.argv) < 2:
8
+ print(json.dumps({"error": "No input text provided"}))
9
+ sys.exit(1)
10
+
11
+ BASE_DIR = Path(__file__).resolve().parent
12
+ text = sys.argv[1]
13
+
14
+ # ---- load combined artifact ----
15
+ artifact = joblib.load(BASE_DIR / "prompt_injection_detector.joblib")
16
+
17
+ model = artifact["model"]
18
+ vectorizer = artifact["vectorizer"]
19
+ threshold = artifact.get("threshold", 0.5)
20
+
21
+ # ---- predict ----
22
+ X = vectorizer.transform([text])
23
+ prob = model.predict_proba(X)[0][1]
24
+ flagged = prob >= threshold
25
+
26
+ result = {
27
+ "injective": int(flagged),
28
+ "probability": round(float(prob), 4),
29
+ "threshold": threshold
30
+ }
31
+
32
+ print(json.dumps(result))
package/package.json ADDED
@@ -0,0 +1,34 @@
1
+ {
2
+ "name": "llm_guardrail",
3
+ "version": "1.0.0",
4
+ "description": "A lightweight, low-latency ML-powered guardrail to stop prompt injection attacks before they reach your LLM.",
5
+ "homepage": "https://github.com/Frank2006x/llm_Guardrails#readme",
6
+ "bugs": {
7
+ "url": "https://github.com/Frank2006x/llm_Guardrails/issues"
8
+ },
9
+ "keywords": [
10
+ "llm",
11
+ "ai",
12
+ "llm-security",
13
+ "prompt-injection",
14
+ "guardrails",
15
+ "ai-safety",
16
+ "llm-guardrail",
17
+ "jailbreak-detection",
18
+ "rag-security",
19
+ "prompt-security",
20
+ "ml-security",
21
+ "low-latency"
22
+ ],
23
+ "repository": {
24
+ "type": "git",
25
+ "url": "git+https://github.com/Frank2006x/llm_Guardrails.git"
26
+ },
27
+ "license": "ISC",
28
+ "author": "Frank2006x",
29
+ "type": "module",
30
+ "main": "index.js",
31
+ "scripts": {
32
+ "test": "echo \"Error: no test specified\" && exit 1"
33
+ }
34
+ }