network-ai 3.2.0 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  **The plug-and-play AI agent orchestrator for TypeScript/Node.js -- connect 12 agent frameworks with zero glue code**
4
4
 
5
- [![Release](https://img.shields.io/badge/release-v3.2.0-blue.svg)](https://github.com/jovanSAPFIONEER/Network-AI/releases)
5
+ [![Release](https://img.shields.io/badge/release-v3.2.2-blue.svg)](https://github.com/jovanSAPFIONEER/Network-AI/releases)
6
6
  [![ClawHub](https://img.shields.io/badge/ClawHub-network--ai-orange.svg)](https://clawhub.ai/skills/network-ai)
7
7
  [![Node.js](https://img.shields.io/badge/node-%3E%3D18.0.0-brightgreen.svg)](https://nodejs.org)
8
8
  [![TypeScript](https://img.shields.io/badge/TypeScript-5.x-3178C6.svg)](https://typescriptlang.org)
@@ -441,7 +441,7 @@ The AuthGuardian evaluates requests using:
441
441
 
442
442
  | Factor | Weight | Description |
443
443
  |--------|--------|-------------|
444
- | Justification | 40% | Quality of business reason |
444
+ | Justification | 40% | Quality of business reason (hardened against prompt injection) |
445
445
  | Trust Level | 30% | Agent's established trust |
446
446
  | Risk Assessment | 30% | Resource sensitivity + scope |
447
447
 
@@ -530,10 +530,11 @@ python scripts/revoke_token.py --list-expired
530
530
  python scripts/revoke_token.py --cleanup
531
531
  ```
532
532
 
533
- **Test results (251 total):**
533
+ **Test results (315 total):**
534
534
  - `test-standalone.ts` -- 79 passed (blackboard, auth, integration, persistence, parallelization, coding domain, quality gate)
535
535
  - `test-security.ts` -- 33 passed (tokens, sanitization, rate limiting, encryption, permissions, audit)
536
536
  - `test-adapters.ts` -- 139 passed (12 adapters: Custom, LangChain, AutoGen, CrewAI, MCP, LlamaIndex, Semantic Kernel, OpenAI Assistants, Haystack, DSPy, Agno + registry routing, integration, edge cases)
537
+ - `test-priority.ts` -- 64 passed (priority-based preemption, conflict resolution, constructor overloads, backward compatibility)
537
538
 
538
539
  ## Audit Trail
539
540
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "network-ai",
3
- "version": "3.2.0",
3
+ "version": "3.2.2",
4
4
  "description": "AI agent orchestration framework for TypeScript/Node.js - plug-and-play multi-agent coordination with 12 frameworks (LangChain, AutoGen, CrewAI, OpenAI Assistants, LlamaIndex, Semantic Kernel, Haystack, DSPy, Agno, MCP, OpenClaw). Built-in security, swarm intelligence, and agentic workflow patterns.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -61,31 +61,135 @@ def ensure_data_dir():
61
61
  return data_dir
62
62
 
63
63
 
64
+ def detect_injection(justification: str) -> bool:
65
+ """
66
+ Detect prompt-injection and manipulation patterns in justifications.
67
+
68
+ Returns True if the justification looks like a prompt-injection attempt.
69
+ """
70
+ injection_patterns = [
71
+ r'ignore\s+(previous|above|prior|all)',
72
+ r'override\s+(policy|restriction|rule|permission|security)',
73
+ r'system\s*prompt',
74
+ r'you\s+are\s+(now|a)',
75
+ r'act\s+as\s+(if|a|an)',
76
+ r'pretend\s+(to|that|you)',
77
+ r'bypass\s+(security|check|restriction|auth)',
78
+ r'grant\s+(me|access|permission)\s+(anyway|regardless|now)',
79
+ r'disregard\s+(policy|rule|restriction|previous)',
80
+ r'admin\s+(mode|access|override)',
81
+ r'sudo\b',
82
+ r'jailbreak',
83
+ r'do\s+not\s+(check|verify|validate|restrict)',
84
+ r'skip\s+(validation|verification|check)',
85
+ r'trust\s+level\s*[:=]',
86
+ r'score\s*[:=]+\s*[\d.]',
87
+ ]
88
+ text = justification.lower()
89
+ for pattern in injection_patterns:
90
+ if re.search(pattern, text):
91
+ return True
92
+ return False
93
+
94
+
64
95
  def score_justification(justification: str) -> float:
65
96
  """
66
- Score the quality of a justification.
67
-
68
- Criteria:
69
- - Length (more detail = better)
70
- - Contains task-related keywords
71
- - Contains specificity keywords
72
- - Doesn't contain test/debug keywords
97
+ Score the quality of a justification with hardened validation.
98
+
99
+ Defenses against prompt injection and keyword stuffing:
100
+ - Injection pattern detection (immediate reject)
101
+ - Maximum length cap (prevents obfuscation in long text)
102
+ - Keyword-stuffing detection (penalises unnatural keyword density)
103
+ - Unique-word ratio check (catches copy-paste padding)
104
+ - Structural coherence (requires natural sentence structure)
105
+
106
+ Criteria (after safety checks):
107
+ - Length (more detail = better, but capped)
108
+ - Contains task-related keywords (capped contribution)
109
+ - Contains specificity keywords (capped contribution)
110
+ - No test/debug keywords
111
+ - Structural coherence bonus
73
112
  """
113
+ # ----- Hard reject: injection patterns -----
114
+ if detect_injection(justification):
115
+ return 0.0
116
+
117
+ # ----- Hard reject: empty or whitespace-only -----
118
+ stripped = justification.strip()
119
+ if not stripped:
120
+ return 0.0
121
+
122
+ # ----- Hard cap: excessively long justifications are suspicious -----
123
+ MAX_JUSTIFICATION_LENGTH = 500
124
+ if len(stripped) > MAX_JUSTIFICATION_LENGTH:
125
+ return 0.1 # Suspiciously long — allow re-submission with concise text
126
+
127
+ words = stripped.split()
128
+ word_count = len(words)
129
+
130
+ # ----- Hard reject: too few words to be meaningful -----
131
+ if word_count < 3:
132
+ return 0.1
133
+
134
+ # ----- Repetition / padding detection -----
135
+ unique_words = set(w.lower() for w in words)
136
+ unique_ratio = len(unique_words) / word_count if word_count > 0 else 0
137
+ if unique_ratio < 0.4:
138
+ return 0.1 # More than 60% repeated words — likely padding
139
+
140
+ # ----- Keyword-stuffing detection -----
141
+ task_keywords = re.findall(
142
+ r'\b(task|purpose|need|require|generate|analyze|create|process)\b',
143
+ stripped, re.IGNORECASE,
144
+ )
145
+ specificity_keywords = re.findall(
146
+ r'\b(specific|particular|exact|quarterly|annual|report|summary)\b',
147
+ stripped, re.IGNORECASE,
148
+ )
149
+ total_matched = len(task_keywords) + len(specificity_keywords)
150
+ keyword_density = total_matched / word_count if word_count > 0 else 0
151
+ if keyword_density > 0.5:
152
+ return 0.1 # More than half the words are scoring keywords — stuffing
153
+
154
+ # ----- Scoring (defensive caps per category) -----
74
155
  score = 0.0
75
-
76
- if len(justification) > 20:
77
- score += 0.2
78
- if len(justification) > 50:
79
- score += 0.2
80
- if re.search(r'\b(task|purpose|need|require|generate|analyze|create|process)\b',
81
- justification, re.IGNORECASE):
82
- score += 0.2
83
- if re.search(r'\b(specific|particular|exact|quarterly|annual|report|summary)\b',
84
- justification, re.IGNORECASE):
85
- score += 0.2
86
- if not re.search(r'\b(test|debug|try|experiment)\b', justification, re.IGNORECASE):
87
- score += 0.2
88
-
156
+
157
+ # Length contribution (max 0.25)
158
+ if len(stripped) > 20:
159
+ score += 0.15
160
+ if len(stripped) > 50:
161
+ score += 0.10
162
+
163
+ # Task keyword presence (max 0.20, but only first match counts)
164
+ if task_keywords:
165
+ score += 0.20
166
+
167
+ # Specificity keyword presence (max 0.20, but only first match counts)
168
+ if specificity_keywords:
169
+ score += 0.20
170
+
171
+ # No test/debug markers (max 0.15)
172
+ if not re.search(r'\b(test|debug|try|experiment)\b', stripped, re.IGNORECASE):
173
+ score += 0.15
174
+
175
+ # Structural coherence: sentence-like structure (max 0.20)
176
+ # Must contain at least one verb-like pattern and read like prose
177
+ has_verb = bool(re.search(
178
+ r'\b(is|are|was|were|need|needs|require|requires|must|should|will|'
179
+ r'generate|generating|analyze|analyzing|create|creating|process|processing|'
180
+ r'prepare|preparing|compile|compiling|review|reviewing|access|accessing|'
181
+ r'retrieve|retrieving|export|exporting|send|sending|run|running)\b',
182
+ stripped, re.IGNORECASE,
183
+ ))
184
+ has_noun_object = bool(re.search(
185
+ r'\b(data|report|records|invoices?|orders?|customers?|accounts?|'
186
+ r'transactions?|files?|emails?|results?|metrics?|statistics?|'
187
+ r'analysis|documents?|exports?|payments?|entries|logs?|summaries)\b',
188
+ stripped, re.IGNORECASE,
189
+ ))
190
+ if has_verb and has_noun_object:
191
+ score += 0.20
192
+
89
193
  return min(score, 1.0)
90
194
 
91
195