cipher-security 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cipher.js +566 -0
- package/lib/api/billing.js +321 -0
- package/lib/api/compliance.js +693 -0
- package/lib/api/controls.js +1401 -0
- package/lib/api/index.js +49 -0
- package/lib/api/marketplace.js +467 -0
- package/lib/api/openai-proxy.js +383 -0
- package/lib/api/server.js +685 -0
- package/lib/autonomous/feedback-loop.js +554 -0
- package/lib/autonomous/framework.js +512 -0
- package/lib/autonomous/index.js +97 -0
- package/lib/autonomous/leaderboard.js +594 -0
- package/lib/autonomous/modes/architect.js +412 -0
- package/lib/autonomous/modes/blue.js +386 -0
- package/lib/autonomous/modes/incident.js +684 -0
- package/lib/autonomous/modes/privacy.js +369 -0
- package/lib/autonomous/modes/purple.js +294 -0
- package/lib/autonomous/modes/recon.js +250 -0
- package/lib/autonomous/parallel.js +587 -0
- package/lib/autonomous/researcher.js +583 -0
- package/lib/autonomous/runner.js +955 -0
- package/lib/autonomous/scheduler.js +615 -0
- package/lib/autonomous/task-parser.js +127 -0
- package/lib/autonomous/validators/forensic.js +266 -0
- package/lib/autonomous/validators/osint.js +216 -0
- package/lib/autonomous/validators/privacy.js +296 -0
- package/lib/autonomous/validators/purple.js +298 -0
- package/lib/autonomous/validators/sigma.js +248 -0
- package/lib/autonomous/validators/threat-model.js +363 -0
- package/lib/benchmark/agent.js +119 -0
- package/lib/benchmark/baselines.js +43 -0
- package/lib/benchmark/builder.js +143 -0
- package/lib/benchmark/config.js +35 -0
- package/lib/benchmark/coordinator.js +91 -0
- package/lib/benchmark/index.js +20 -0
- package/lib/benchmark/llm.js +58 -0
- package/lib/benchmark/models.js +137 -0
- package/lib/benchmark/reporter.js +103 -0
- package/lib/benchmark/runner.js +103 -0
- package/lib/benchmark/sandbox.js +96 -0
- package/lib/benchmark/scorer.js +32 -0
- package/lib/benchmark/solver.js +166 -0
- package/lib/benchmark/tools.js +62 -0
- package/lib/bot/bot.js +238 -0
- package/lib/brand.js +105 -0
- package/lib/commands.js +100 -0
- package/lib/complexity.js +377 -0
- package/lib/config.js +213 -0
- package/lib/gateway/client.js +309 -0
- package/lib/gateway/commands.js +991 -0
- package/lib/gateway/config-validate.js +109 -0
- package/lib/gateway/gateway.js +367 -0
- package/lib/gateway/index.js +62 -0
- package/lib/gateway/mode.js +309 -0
- package/lib/gateway/plugins.js +222 -0
- package/lib/gateway/prompt.js +214 -0
- package/lib/mcp/server.js +262 -0
- package/lib/memory/compressor.js +425 -0
- package/lib/memory/engine.js +763 -0
- package/lib/memory/evolution.js +668 -0
- package/lib/memory/index.js +58 -0
- package/lib/memory/orchestrator.js +506 -0
- package/lib/memory/retriever.js +515 -0
- package/lib/memory/synthesizer.js +333 -0
- package/lib/pipeline/async-scanner.js +510 -0
- package/lib/pipeline/binary-analysis.js +1043 -0
- package/lib/pipeline/dom-xss-scanner.js +435 -0
- package/lib/pipeline/github-actions.js +792 -0
- package/lib/pipeline/index.js +124 -0
- package/lib/pipeline/osint.js +498 -0
- package/lib/pipeline/sarif.js +373 -0
- package/lib/pipeline/scanner.js +880 -0
- package/lib/pipeline/template-manager.js +525 -0
- package/lib/pipeline/xss-scanner.js +353 -0
- package/lib/setup-wizard.js +288 -0
- package/package.json +31 -0
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
// Copyright (c) 2026 defconxt. All rights reserved.
|
|
2
|
+
// Licensed under AGPL-3.0 — see LICENSE file for details.
|
|
3
|
+
// CIPHER is a trademark of defconxt.
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* config-validate.js — Validates raw config from loadConfig() into a
|
|
7
|
+
* flat GatewayConfig-like object.
|
|
8
|
+
*
|
|
9
|
+
* Mirrors Python's GatewayConfig.__post_init__ validation:
|
|
10
|
+
* - backend must be one of ['ollama', 'claude', 'litellm']
|
|
11
|
+
* - required model field per backend
|
|
12
|
+
* - extracts nested sections into flat validated fields
|
|
13
|
+
*
|
|
14
|
+
* API keys are treated as opaque strings — never logged or included
|
|
15
|
+
* in error messages.
|
|
16
|
+
*
|
|
17
|
+
* @module gateway/config-validate
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
const VALID_BACKENDS = ['ollama', 'claude', 'litellm'];
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* @typedef {Object} GatewayConfig
|
|
24
|
+
* @property {string} backend - "ollama" | "claude" | "litellm"
|
|
25
|
+
* @property {string} ollama_base_url
|
|
26
|
+
* @property {string} ollama_model
|
|
27
|
+
* @property {number} ollama_timeout
|
|
28
|
+
* @property {string} claude_api_key
|
|
29
|
+
* @property {string} claude_model
|
|
30
|
+
* @property {number} claude_timeout
|
|
31
|
+
* @property {string} litellm_model
|
|
32
|
+
* @property {string} litellm_api_key
|
|
33
|
+
* @property {number} litellm_timeout
|
|
34
|
+
* @property {string} litellm_api_base
|
|
35
|
+
*/
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Validate a raw config object and produce a flat GatewayConfig.
|
|
39
|
+
*
|
|
40
|
+
* @param {Record<string, any>} raw - Raw config from loadConfig()
|
|
41
|
+
* @returns {GatewayConfig}
|
|
42
|
+
* @throws {Error} If backend is invalid or required fields are missing
|
|
43
|
+
*/
|
|
44
|
+
export function validateConfig(raw) {
|
|
45
|
+
if (!raw || typeof raw !== 'object') {
|
|
46
|
+
throw new Error(
|
|
47
|
+
"No configuration found.\nrun: cipher setup"
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
const backend = raw.llm_backend || '';
|
|
52
|
+
|
|
53
|
+
if (!backend) {
|
|
54
|
+
throw new Error(
|
|
55
|
+
"No LLM backend configured. Set 'llm_backend' in config.yaml or run: cipher setup"
|
|
56
|
+
);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
if (!VALID_BACKENDS.includes(backend)) {
|
|
60
|
+
throw new Error(
|
|
61
|
+
`Invalid backend '${backend}'. Must be one of ${VALID_BACKENDS.join(', ')}.\n` +
|
|
62
|
+
"Set 'llm_backend' in config.yaml or run: cipher setup"
|
|
63
|
+
);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const ollamaSection = (raw.ollama && typeof raw.ollama === 'object') ? raw.ollama : {};
|
|
67
|
+
const claudeSection = (raw.claude && typeof raw.claude === 'object') ? raw.claude : {};
|
|
68
|
+
const litellmSection = (raw.litellm && typeof raw.litellm === 'object') ? raw.litellm : {};
|
|
69
|
+
|
|
70
|
+
const config = {
|
|
71
|
+
backend,
|
|
72
|
+
ollama_base_url: ollamaSection.base_url || 'http://127.0.0.1:11434',
|
|
73
|
+
ollama_model: ollamaSection.model || '',
|
|
74
|
+
ollama_timeout: parseInt(ollamaSection.timeout ?? 300, 10),
|
|
75
|
+
claude_api_key: claudeSection.api_key || '',
|
|
76
|
+
claude_model: claudeSection.model || '',
|
|
77
|
+
claude_timeout: parseInt(claudeSection.timeout ?? 60, 10),
|
|
78
|
+
litellm_model: litellmSection.model || '',
|
|
79
|
+
litellm_api_key: litellmSection.api_key || '',
|
|
80
|
+
litellm_timeout: parseInt(litellmSection.timeout ?? 120, 10),
|
|
81
|
+
litellm_api_base: litellmSection.api_base || '',
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
// Backend-specific required field validation
|
|
85
|
+
if (backend === 'ollama' && !config.ollama_model) {
|
|
86
|
+
throw new Error(
|
|
87
|
+
"ollama_model must be set when backend is 'ollama'.\n" +
|
|
88
|
+
"Set 'ollama.model' in config.yaml or run: cipher setup"
|
|
89
|
+
);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (backend === 'claude' && !config.claude_model) {
|
|
93
|
+
throw new Error(
|
|
94
|
+
"claude_model must be set when backend is 'claude'.\n" +
|
|
95
|
+
"Set 'claude.model' in config.yaml or run: cipher setup"
|
|
96
|
+
);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (backend === 'litellm' && !config.litellm_model) {
|
|
100
|
+
throw new Error(
|
|
101
|
+
"litellm_model must be set when backend is 'litellm'.\n" +
|
|
102
|
+
"Set 'litellm.model' in config.yaml or run: cipher setup"
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return config;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
export { VALID_BACKENDS };
|
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
// Copyright (c) 2026 defconxt. All rights reserved.
|
|
2
|
+
// Licensed under AGPL-3.0 — see LICENSE file for details.
|
|
3
|
+
// CIPHER is a trademark of defconxt.
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* gateway.js — CIPHER Gateway orchestrator.
|
|
7
|
+
*
|
|
8
|
+
* Ports Python gateway/gateway.py:
|
|
9
|
+
* - Gateway class with send() and async *sendStream()
|
|
10
|
+
* - Config → mode detection → prompt assembly → RAG → LLM call → response
|
|
11
|
+
* - Error handling for timeout, connection, status, and generic errors
|
|
12
|
+
*
|
|
13
|
+
* @module gateway/gateway
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import { loadConfig } from '../config.js';
|
|
17
|
+
import { validateConfig } from './config-validate.js';
|
|
18
|
+
import { makeClient } from './client.js';
|
|
19
|
+
import { parseKeywordTable, detectMode, stripModePrefix } from './mode.js';
|
|
20
|
+
import { assembleSystemPrompt, loadRawClaudeMd } from './prompt.js';
|
|
21
|
+
import { PluginManager } from './plugins.js';
|
|
22
|
+
|
|
23
|
+
const debug = process.env.CIPHER_DEBUG === '1'
|
|
24
|
+
? (/** @type {string} */ msg) => process.stderr.write(`[bridge:node] ${msg}\n`)
|
|
25
|
+
: () => {};
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Lazy-load the RAG retriever. Returns null if unavailable.
|
|
29
|
+
* @returns {Promise<Object|null>}
|
|
30
|
+
*/
|
|
31
|
+
async function loadRetriever() {
|
|
32
|
+
try {
|
|
33
|
+
const { AdaptiveRetriever } = await import('../memory/index.js');
|
|
34
|
+
const r = new AdaptiveRetriever();
|
|
35
|
+
// Check if retriever has data
|
|
36
|
+
if (typeof r.count === 'function' ? await r.count() > 0 : (r.count > 0)) {
|
|
37
|
+
return r;
|
|
38
|
+
}
|
|
39
|
+
debug('Gateway: RAG collection empty — running without retrieval');
|
|
40
|
+
} catch (exc) {
|
|
41
|
+
debug(`Gateway: RAG unavailable (${exc.message}) — running without retrieval`);
|
|
42
|
+
}
|
|
43
|
+
return null;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export class Gateway {
|
|
47
|
+
/**
|
|
48
|
+
* @param {Object} [opts]
|
|
49
|
+
* @param {string} [opts.backendOverride] - Override config.backend
|
|
50
|
+
* @param {boolean} [opts.rag=true] - Enable RAG retrieval
|
|
51
|
+
* @param {Object} [opts.configOpts] - Options passed to loadConfig
|
|
52
|
+
* @param {Object} [opts.client] - Pre-built client (for testing)
|
|
53
|
+
* @param {string} [opts.model] - Pre-set model (for testing)
|
|
54
|
+
* @param {Record<string, string[]>} [opts.keywordTable] - Pre-built keyword table (for testing)
|
|
55
|
+
* @param {string} [opts.repoRoot] - Explicit repo root for prompt loading
|
|
56
|
+
*/
|
|
57
|
+
constructor(opts = {}) {
|
|
58
|
+
this._opts = opts;
|
|
59
|
+
this._client = opts.client || null;
|
|
60
|
+
this._model = opts.model || null;
|
|
61
|
+
this._keywordTable = opts.keywordTable || null;
|
|
62
|
+
this._retriever = null;
|
|
63
|
+
this._pluginManager = new PluginManager();
|
|
64
|
+
this._initialized = false;
|
|
65
|
+
this._initPromise = null;
|
|
66
|
+
this._repoRoot = opts.repoRoot || undefined;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Initialize the gateway — loads config, client, keyword table, and retriever.
|
|
71
|
+
* Called lazily on first send/sendStream. Safe to call multiple times.
|
|
72
|
+
*
|
|
73
|
+
* @returns {Promise<void>}
|
|
74
|
+
*/
|
|
75
|
+
async init() {
|
|
76
|
+
if (this._initialized) return;
|
|
77
|
+
if (this._initPromise) return this._initPromise;
|
|
78
|
+
|
|
79
|
+
this._initPromise = this._doInit();
|
|
80
|
+
await this._initPromise;
|
|
81
|
+
this._initialized = true;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/** @private */
|
|
85
|
+
async _doInit() {
|
|
86
|
+
// Skip config/client setup if pre-built (testing)
|
|
87
|
+
if (!this._client) {
|
|
88
|
+
const rawConfig = loadConfig(this._opts.configOpts);
|
|
89
|
+
const config = validateConfig(rawConfig);
|
|
90
|
+
|
|
91
|
+
if (this._opts.backendOverride) {
|
|
92
|
+
config.backend = this._opts.backendOverride;
|
|
93
|
+
debug(`Gateway: backend overridden to '${this._opts.backendOverride}'`);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
const { client, model } = await makeClient(config);
|
|
97
|
+
this._client = client;
|
|
98
|
+
this._model = model;
|
|
99
|
+
debug(`Gateway: client ready (model=${this._model})`);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Cache keyword table at init
|
|
103
|
+
if (!this._keywordTable) {
|
|
104
|
+
const rawClaudeMd = loadRawClaudeMd(this._repoRoot);
|
|
105
|
+
this._keywordTable = parseKeywordTable(rawClaudeMd);
|
|
106
|
+
debug(`Gateway: keyword table loaded (${Object.keys(this._keywordTable).length} modes)`);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// RAG retriever (lazy, graceful fallback)
|
|
110
|
+
if (this._opts.rag !== false) {
|
|
111
|
+
this._retriever = await loadRetriever();
|
|
112
|
+
if (this._retriever) {
|
|
113
|
+
debug('Gateway: RAG enabled');
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Send a message through the full CIPHER pipeline.
|
|
120
|
+
*
|
|
121
|
+
* Pipeline:
|
|
122
|
+
* 1. Detect mode (explicit prefix or keyword scoring)
|
|
123
|
+
* 2. If needs_clarification → return disambiguation string
|
|
124
|
+
* 3. Assemble system prompt for detected mode
|
|
125
|
+
* 4. Strip mode prefix from message
|
|
126
|
+
* 5. Build messages list (history + new user message)
|
|
127
|
+
* 6. Call LLM
|
|
128
|
+
* 7. Prepend [MODE: X] header if missing
|
|
129
|
+
* 8. Return response
|
|
130
|
+
*
|
|
131
|
+
* @param {string} message - User message text
|
|
132
|
+
* @param {Array<{role: string, content: string}>} [history] - Prior messages
|
|
133
|
+
* @returns {Promise<string>}
|
|
134
|
+
*/
|
|
135
|
+
async send(message, history) {
|
|
136
|
+
await this.init();
|
|
137
|
+
|
|
138
|
+
// 1. Detect mode
|
|
139
|
+
const [mode, needsClarification] = detectMode(message, this._keywordTable);
|
|
140
|
+
|
|
141
|
+
// 2. Disambiguation short-circuit
|
|
142
|
+
if (needsClarification) {
|
|
143
|
+
debug(`Gateway: mode overlap detected (${mode}) — requesting clarification`);
|
|
144
|
+
return `Multiple modes detected (${mode}). Are you approaching this offensively or defensively?`;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
debug(`Gateway: routing to mode ${mode}`);
|
|
148
|
+
|
|
149
|
+
// 3. Assemble system prompt
|
|
150
|
+
let systemPrompt = assembleSystemPrompt(mode, this._repoRoot, this._pluginManager);
|
|
151
|
+
|
|
152
|
+
// 3b. RAG retrieval
|
|
153
|
+
if (this._retriever) {
|
|
154
|
+
try {
|
|
155
|
+
const chunks = typeof this._retriever.query === 'function'
|
|
156
|
+
? await this._retriever.query(message, { topK: 5 })
|
|
157
|
+
: [];
|
|
158
|
+
if (chunks && chunks.length > 0) {
|
|
159
|
+
const contextBlock = chunks.map(c => c.content || c.text || '').filter(Boolean).join('\n\n');
|
|
160
|
+
if (contextBlock) {
|
|
161
|
+
systemPrompt = systemPrompt + '\n\n---\n\n' + contextBlock;
|
|
162
|
+
debug(`Gateway: injected ${chunks.length} RAG chunks`);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
} catch (e) {
|
|
166
|
+
debug(`Gateway: RAG query failed: ${e.message}`);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// 4. Strip mode prefix
|
|
171
|
+
const cleanMessage = stripModePrefix(message);
|
|
172
|
+
|
|
173
|
+
// 5. Prepend [MODE: X] for LLM context
|
|
174
|
+
const userContent = `[MODE: ${mode}] ${cleanMessage}`;
|
|
175
|
+
|
|
176
|
+
// 6. Build messages list
|
|
177
|
+
const messages = [];
|
|
178
|
+
if (history) messages.push(...history);
|
|
179
|
+
messages.push({ role: 'user', content: userContent });
|
|
180
|
+
|
|
181
|
+
// 7. Call LLM
|
|
182
|
+
try {
|
|
183
|
+
const response = await this._client.messages.create({
|
|
184
|
+
model: this._model,
|
|
185
|
+
max_tokens: 4096,
|
|
186
|
+
system: systemPrompt,
|
|
187
|
+
messages,
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
let responseText = response.content[0].text;
|
|
191
|
+
|
|
192
|
+
// 8. Ensure [MODE: X] header
|
|
193
|
+
if (!responseText.startsWith('[MODE:')) {
|
|
194
|
+
responseText = `[MODE: ${mode}]\n${responseText}`;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// Strip duplicate header if LLM echoed it twice
|
|
198
|
+
const lines = responseText.split('\n', 3);
|
|
199
|
+
if (lines.length >= 2 && lines[0].startsWith('[MODE:') && lines[1].startsWith('[MODE:')) {
|
|
200
|
+
responseText = lines.length > 2
|
|
201
|
+
? [lines[0], ...responseText.split('\n').slice(2)].join('\n')
|
|
202
|
+
: lines[0];
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
return responseText;
|
|
206
|
+
|
|
207
|
+
} catch (err) {
|
|
208
|
+
return _handleLLMError(err);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* Send a message and yield response tokens as they arrive.
|
|
214
|
+
*
|
|
215
|
+
* Same pipeline as send() but uses streaming API. Yields strings.
|
|
216
|
+
* The first yield is the [MODE: X] header.
|
|
217
|
+
*
|
|
218
|
+
* @param {string} message
|
|
219
|
+
* @param {Array<{role: string, content: string}>} [history]
|
|
220
|
+
* @returns {AsyncGenerator<string>}
|
|
221
|
+
*/
|
|
222
|
+
async *sendStream(message, history) {
|
|
223
|
+
await this.init();
|
|
224
|
+
|
|
225
|
+
const [mode, needsClarification] = detectMode(message, this._keywordTable);
|
|
226
|
+
|
|
227
|
+
if (needsClarification) {
|
|
228
|
+
yield `Multiple modes detected (${mode}). Are you approaching this offensively or defensively?`;
|
|
229
|
+
return;
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
let systemPrompt = assembleSystemPrompt(mode, this._repoRoot, this._pluginManager);
|
|
233
|
+
|
|
234
|
+
// RAG retrieval
|
|
235
|
+
if (this._retriever) {
|
|
236
|
+
try {
|
|
237
|
+
const chunks = typeof this._retriever.query === 'function'
|
|
238
|
+
? await this._retriever.query(message, { topK: 5 })
|
|
239
|
+
: [];
|
|
240
|
+
if (chunks && chunks.length > 0) {
|
|
241
|
+
const contextBlock = chunks.map(c => c.content || c.text || '').filter(Boolean).join('\n\n');
|
|
242
|
+
if (contextBlock) {
|
|
243
|
+
systemPrompt = systemPrompt + '\n\n---\n\n' + contextBlock;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
} catch {
|
|
247
|
+
// RAG query failed — continue without it
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
const cleanMessage = stripModePrefix(message);
|
|
252
|
+
const userContent = `[MODE: ${mode}] ${cleanMessage}`;
|
|
253
|
+
|
|
254
|
+
const messages = [];
|
|
255
|
+
if (history) messages.push(...history);
|
|
256
|
+
messages.push({ role: 'user', content: userContent });
|
|
257
|
+
|
|
258
|
+
let headerSent = false;
|
|
259
|
+
|
|
260
|
+
try {
|
|
261
|
+
// Use the Anthropic SDK streaming pattern:
|
|
262
|
+
// stream = client.messages.stream({...})
|
|
263
|
+
// Then consume via .on('text', cb) + await stream.finalMessage()
|
|
264
|
+
// Or for LiteLLM adapter, iterate the async iterator.
|
|
265
|
+
const stream = this._client.messages.stream({
|
|
266
|
+
model: this._model,
|
|
267
|
+
max_tokens: 4096,
|
|
268
|
+
system: systemPrompt,
|
|
269
|
+
messages,
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
// Check if the stream supports async iteration (LiteLLM adapter)
|
|
273
|
+
if (stream[Symbol.asyncIterator]) {
|
|
274
|
+
for await (const text of stream) {
|
|
275
|
+
if (!headerSent) {
|
|
276
|
+
yield `[MODE: ${mode}]\n`;
|
|
277
|
+
headerSent = true;
|
|
278
|
+
}
|
|
279
|
+
yield text;
|
|
280
|
+
}
|
|
281
|
+
} else {
|
|
282
|
+
// Anthropic SDK: use event-to-queue bridge via Promise chain
|
|
283
|
+
// Create a queue-based async generator that bridges .on('text', cb) events
|
|
284
|
+
const queue = [];
|
|
285
|
+
let resolve = null;
|
|
286
|
+
let done = false;
|
|
287
|
+
let error = null;
|
|
288
|
+
|
|
289
|
+
stream.on('text', (text) => {
|
|
290
|
+
if (resolve) {
|
|
291
|
+
const r = resolve;
|
|
292
|
+
resolve = null;
|
|
293
|
+
r({ value: text, done: false });
|
|
294
|
+
} else {
|
|
295
|
+
queue.push(text);
|
|
296
|
+
}
|
|
297
|
+
});
|
|
298
|
+
|
|
299
|
+
// Drive the stream to completion
|
|
300
|
+
const finalPromise = stream.finalMessage().then(() => {
|
|
301
|
+
done = true;
|
|
302
|
+
if (resolve) {
|
|
303
|
+
const r = resolve;
|
|
304
|
+
resolve = null;
|
|
305
|
+
r({ value: undefined, done: true });
|
|
306
|
+
}
|
|
307
|
+
}).catch((err) => {
|
|
308
|
+
error = err;
|
|
309
|
+
done = true;
|
|
310
|
+
if (resolve) {
|
|
311
|
+
const r = resolve;
|
|
312
|
+
resolve = null;
|
|
313
|
+
r({ value: undefined, done: true });
|
|
314
|
+
}
|
|
315
|
+
});
|
|
316
|
+
|
|
317
|
+
while (true) {
|
|
318
|
+
let text;
|
|
319
|
+
if (queue.length > 0) {
|
|
320
|
+
text = queue.shift();
|
|
321
|
+
} else if (done) {
|
|
322
|
+
break;
|
|
323
|
+
} else {
|
|
324
|
+
const result = await new Promise(r => { resolve = r; });
|
|
325
|
+
if (result.done) break;
|
|
326
|
+
text = result.value;
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
if (!headerSent) {
|
|
330
|
+
yield `[MODE: ${mode}]\n`;
|
|
331
|
+
headerSent = true;
|
|
332
|
+
}
|
|
333
|
+
yield text;
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
await finalPromise;
|
|
337
|
+
if (error) {
|
|
338
|
+
yield _handleLLMError(error);
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
} catch (err) {
|
|
343
|
+
yield _handleLLMError(err);
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
/**
|
|
349
|
+
* Format an LLM error into a user-visible [ERROR] string.
|
|
350
|
+
*
|
|
351
|
+
* @param {Error} err
|
|
352
|
+
* @returns {string}
|
|
353
|
+
*/
|
|
354
|
+
function _handleLLMError(err) {
|
|
355
|
+
const name = err?.constructor?.name || 'Error';
|
|
356
|
+
|
|
357
|
+
if (name === 'APITimeoutError' || err.message?.includes('timed out')) {
|
|
358
|
+
return '[ERROR] Backend timed out. Is the LLM backend running?';
|
|
359
|
+
}
|
|
360
|
+
if (name === 'APIConnectionError' || err.message?.includes('ECONNREFUSED')) {
|
|
361
|
+
return `[ERROR] Cannot reach backend: ${err.message}`;
|
|
362
|
+
}
|
|
363
|
+
if (name === 'APIStatusError' || err.status) {
|
|
364
|
+
return `[ERROR] Backend returned ${err.status}: ${err.message}`;
|
|
365
|
+
}
|
|
366
|
+
return `[ERROR] LLM call failed: ${err.message || err}`;
|
|
367
|
+
}
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
// Copyright (c) 2026 defconxt. All rights reserved.
|
|
2
|
+
// Licensed under AGPL-3.0 — see LICENSE file for details.
|
|
3
|
+
// CIPHER is a trademark of defconxt.
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* CIPHER Gateway — Public API surface.
|
|
7
|
+
*
|
|
8
|
+
* All downstream consumers (cipher.js, API, MCP) import from this barrel.
|
|
9
|
+
*
|
|
10
|
+
* @module gateway
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
// Config validation
|
|
14
|
+
export { validateConfig, VALID_BACKENDS } from './config-validate.js';
|
|
15
|
+
|
|
16
|
+
// Mode detection
|
|
17
|
+
export { parseKeywordTable, detectMode, stripModePrefix } from './mode.js';
|
|
18
|
+
|
|
19
|
+
// Prompt assembly
|
|
20
|
+
export {
|
|
21
|
+
loadBasePrompt,
|
|
22
|
+
loadRawClaudeMd,
|
|
23
|
+
loadSkill,
|
|
24
|
+
assembleSystemPrompt,
|
|
25
|
+
MODE_SKILL_MAP,
|
|
26
|
+
REPO_ROOT,
|
|
27
|
+
} from './prompt.js';
|
|
28
|
+
|
|
29
|
+
// Plugin management
|
|
30
|
+
export { PluginManager } from './plugins.js';
|
|
31
|
+
|
|
32
|
+
// LLM client factory
|
|
33
|
+
export { makeClient } from './client.js';
|
|
34
|
+
|
|
35
|
+
// Gateway orchestrator
|
|
36
|
+
export { Gateway } from './gateway.js';
|
|
37
|
+
|
|
38
|
+
// Command handlers
|
|
39
|
+
export {
|
|
40
|
+
handleSearch,
|
|
41
|
+
handleStore,
|
|
42
|
+
handleStats,
|
|
43
|
+
handleScore,
|
|
44
|
+
handleMemoryExport,
|
|
45
|
+
handleMemoryImport,
|
|
46
|
+
handleIngest,
|
|
47
|
+
handleStatus,
|
|
48
|
+
handleDiff,
|
|
49
|
+
handleWorkflow,
|
|
50
|
+
handleSarif,
|
|
51
|
+
handleOsint,
|
|
52
|
+
handleDomains,
|
|
53
|
+
handleSkills,
|
|
54
|
+
handleVersion,
|
|
55
|
+
handleDoctor,
|
|
56
|
+
handlePlugin,
|
|
57
|
+
handleQuery,
|
|
58
|
+
handleLeaderboard,
|
|
59
|
+
handleFeedback,
|
|
60
|
+
handleMarketplace,
|
|
61
|
+
handleCompliance,
|
|
62
|
+
} from './commands.js';
|