cipher-security 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cipher.js +465 -0
- package/lib/api/billing.js +321 -0
- package/lib/api/compliance.js +693 -0
- package/lib/api/controls.js +1401 -0
- package/lib/api/index.js +49 -0
- package/lib/api/marketplace.js +467 -0
- package/lib/api/openai-proxy.js +383 -0
- package/lib/api/server.js +685 -0
- package/lib/autonomous/feedback-loop.js +554 -0
- package/lib/autonomous/framework.js +512 -0
- package/lib/autonomous/index.js +97 -0
- package/lib/autonomous/leaderboard.js +594 -0
- package/lib/autonomous/modes/architect.js +412 -0
- package/lib/autonomous/modes/blue.js +386 -0
- package/lib/autonomous/modes/incident.js +684 -0
- package/lib/autonomous/modes/privacy.js +369 -0
- package/lib/autonomous/modes/purple.js +294 -0
- package/lib/autonomous/modes/recon.js +250 -0
- package/lib/autonomous/parallel.js +587 -0
- package/lib/autonomous/researcher.js +583 -0
- package/lib/autonomous/runner.js +955 -0
- package/lib/autonomous/scheduler.js +615 -0
- package/lib/autonomous/task-parser.js +127 -0
- package/lib/autonomous/validators/forensic.js +266 -0
- package/lib/autonomous/validators/osint.js +216 -0
- package/lib/autonomous/validators/privacy.js +296 -0
- package/lib/autonomous/validators/purple.js +298 -0
- package/lib/autonomous/validators/sigma.js +248 -0
- package/lib/autonomous/validators/threat-model.js +363 -0
- package/lib/benchmark/agent.js +119 -0
- package/lib/benchmark/baselines.js +43 -0
- package/lib/benchmark/builder.js +143 -0
- package/lib/benchmark/config.js +35 -0
- package/lib/benchmark/coordinator.js +91 -0
- package/lib/benchmark/index.js +20 -0
- package/lib/benchmark/llm.js +58 -0
- package/lib/benchmark/models.js +137 -0
- package/lib/benchmark/reporter.js +103 -0
- package/lib/benchmark/runner.js +103 -0
- package/lib/benchmark/sandbox.js +96 -0
- package/lib/benchmark/scorer.js +32 -0
- package/lib/benchmark/solver.js +166 -0
- package/lib/benchmark/tools.js +62 -0
- package/lib/bot/bot.js +130 -0
- package/lib/commands.js +99 -0
- package/lib/complexity.js +377 -0
- package/lib/config.js +213 -0
- package/lib/gateway/client.js +309 -0
- package/lib/gateway/commands.js +830 -0
- package/lib/gateway/config-validate.js +109 -0
- package/lib/gateway/gateway.js +367 -0
- package/lib/gateway/index.js +62 -0
- package/lib/gateway/mode.js +309 -0
- package/lib/gateway/plugins.js +222 -0
- package/lib/gateway/prompt.js +214 -0
- package/lib/mcp/server.js +262 -0
- package/lib/memory/compressor.js +425 -0
- package/lib/memory/engine.js +763 -0
- package/lib/memory/evolution.js +668 -0
- package/lib/memory/index.js +58 -0
- package/lib/memory/orchestrator.js +506 -0
- package/lib/memory/retriever.js +515 -0
- package/lib/memory/synthesizer.js +333 -0
- package/lib/pipeline/async-scanner.js +510 -0
- package/lib/pipeline/binary-analysis.js +1043 -0
- package/lib/pipeline/dom-xss-scanner.js +435 -0
- package/lib/pipeline/github-actions.js +792 -0
- package/lib/pipeline/index.js +124 -0
- package/lib/pipeline/osint.js +498 -0
- package/lib/pipeline/sarif.js +373 -0
- package/lib/pipeline/scanner.js +880 -0
- package/lib/pipeline/template-manager.js +525 -0
- package/lib/pipeline/xss-scanner.js +353 -0
- package/lib/setup-wizard.js +229 -0
- package/package.json +30 -0
package/lib/config.js
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
// Copyright (c) 2026 defconxt. All rights reserved.
|
|
2
|
+
// Licensed under AGPL-3.0 — see LICENSE file for details.
|
|
3
|
+
// CIPHER is a trademark of defconxt.
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* config.js — YAML config reader/writer for CIPHER CLI.
|
|
7
|
+
*
|
|
8
|
+
* Mirrors the Python gateway/config.py load_config() format and precedence:
|
|
9
|
+
* 1. Environment variables (LLM_BACKEND, ANTHROPIC_API_KEY) — highest
|
|
10
|
+
* 2. Project-root config.yaml (where pyproject.toml lives)
|
|
11
|
+
* 3. ~/.config/cipher/config.yaml — lowest
|
|
12
|
+
*
|
|
13
|
+
* Produces YAML that Python's yaml.safe_load() can parse identically.
|
|
14
|
+
*
|
|
15
|
+
* @module config
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'node:fs';
|
|
19
|
+
import { join, dirname, resolve } from 'node:path';
|
|
20
|
+
import { homedir } from 'node:os';
|
|
21
|
+
import { parse, stringify } from 'yaml';
|
|
22
|
+
|
|
23
|
+
// ---------------------------------------------------------------------------
|
|
24
|
+
// Path resolution
|
|
25
|
+
// ---------------------------------------------------------------------------
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Walk up from `startDir` looking for pyproject.toml — same logic as Python's
|
|
29
|
+
* _find_project_root(). Returns the directory containing pyproject.toml, or null.
|
|
30
|
+
*
|
|
31
|
+
* @param {string} [startDir=process.cwd()]
|
|
32
|
+
* @returns {string | null}
|
|
33
|
+
*/
|
|
34
|
+
function findProjectRoot(startDir = process.cwd()) {
|
|
35
|
+
let current = resolve(startDir);
|
|
36
|
+
for (let i = 0; i < 20; i++) {
|
|
37
|
+
if (existsSync(join(current, 'pyproject.toml'))) {
|
|
38
|
+
return current;
|
|
39
|
+
}
|
|
40
|
+
const parent = dirname(current);
|
|
41
|
+
if (parent === current) break; // filesystem root
|
|
42
|
+
current = parent;
|
|
43
|
+
}
|
|
44
|
+
return null;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Return the two config file paths CIPHER checks.
|
|
49
|
+
*
|
|
50
|
+
* @param {{ home?: string, cwd?: string }} [opts] — overrides for testing
|
|
51
|
+
* @returns {{ userConfig: string, projectConfig: string }}
|
|
52
|
+
*/
|
|
53
|
+
export function getConfigPaths(opts = {}) {
|
|
54
|
+
const home = opts.home || homedir();
|
|
55
|
+
const startDir = opts.cwd || process.cwd();
|
|
56
|
+
const userConfig = join(home, '.config', 'cipher', 'config.yaml');
|
|
57
|
+
const projectRoot = findProjectRoot(startDir);
|
|
58
|
+
const projectConfig = projectRoot
|
|
59
|
+
? join(projectRoot, 'config.yaml')
|
|
60
|
+
: join(startDir, 'config.yaml');
|
|
61
|
+
return { userConfig, projectConfig };
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Check whether any config file exists (user-home or project-root).
|
|
66
|
+
*
|
|
67
|
+
* @param {{ home?: string, cwd?: string }} [opts] — overrides for testing
|
|
68
|
+
* @returns {boolean}
|
|
69
|
+
*/
|
|
70
|
+
export function configExists(opts) {
|
|
71
|
+
const { userConfig, projectConfig } = getConfigPaths(opts);
|
|
72
|
+
return existsSync(userConfig) || existsSync(projectConfig);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// ---------------------------------------------------------------------------
|
|
76
|
+
// YAML file I/O
|
|
77
|
+
// ---------------------------------------------------------------------------
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Safely load a YAML file. Returns empty object if missing or empty.
|
|
81
|
+
* Wraps parse errors with the file path for diagnostics.
|
|
82
|
+
*
|
|
83
|
+
* @param {string} filePath
|
|
84
|
+
* @returns {Record<string, any>}
|
|
85
|
+
*/
|
|
86
|
+
function loadYamlFile(filePath) {
|
|
87
|
+
if (!existsSync(filePath)) return {};
|
|
88
|
+
try {
|
|
89
|
+
const raw = readFileSync(filePath, 'utf-8');
|
|
90
|
+
const data = parse(raw);
|
|
91
|
+
return data && typeof data === 'object' && !Array.isArray(data) ? data : {};
|
|
92
|
+
} catch (err) {
|
|
93
|
+
throw new Error(
|
|
94
|
+
`Failed to parse config at ${filePath}: ${err.message}`
|
|
95
|
+
);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// ---------------------------------------------------------------------------
|
|
100
|
+
// Deep merge helper
|
|
101
|
+
// ---------------------------------------------------------------------------
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Deep-merge `source` into `target`. Source values override target.
|
|
105
|
+
* Only merges plain objects — arrays and primitives are replaced wholesale.
|
|
106
|
+
*
|
|
107
|
+
* @param {Record<string, any>} target
|
|
108
|
+
* @param {Record<string, any>} source
|
|
109
|
+
* @returns {Record<string, any>}
|
|
110
|
+
*/
|
|
111
|
+
function deepMerge(target, source) {
|
|
112
|
+
const result = { ...target };
|
|
113
|
+
for (const key of Object.keys(source)) {
|
|
114
|
+
if (
|
|
115
|
+
source[key] &&
|
|
116
|
+
typeof source[key] === 'object' &&
|
|
117
|
+
!Array.isArray(source[key]) &&
|
|
118
|
+
target[key] &&
|
|
119
|
+
typeof target[key] === 'object' &&
|
|
120
|
+
!Array.isArray(target[key])
|
|
121
|
+
) {
|
|
122
|
+
result[key] = deepMerge(target[key], source[key]);
|
|
123
|
+
} else {
|
|
124
|
+
result[key] = source[key];
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
return result;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// ---------------------------------------------------------------------------
|
|
131
|
+
// Load config
|
|
132
|
+
// ---------------------------------------------------------------------------
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Load configuration from YAML files and environment variable overrides.
|
|
136
|
+
*
|
|
137
|
+
* Precedence (highest → lowest):
|
|
138
|
+
* 1. Environment variables
|
|
139
|
+
* 2. Project-root config.yaml
|
|
140
|
+
* 3. ~/.config/cipher/config.yaml
|
|
141
|
+
*
|
|
142
|
+
* @param {{ home?: string, cwd?: string }} [opts] — overrides for testing
|
|
143
|
+
* @returns {Record<string, any>} Plain config object matching the Python shape.
|
|
144
|
+
*/
|
|
145
|
+
export function loadConfig(opts) {
|
|
146
|
+
const { userConfig, projectConfig } = getConfigPaths(opts);
|
|
147
|
+
|
|
148
|
+
// Load both files — project-root overrides user-home via deep merge
|
|
149
|
+
const userData = loadYamlFile(userConfig);
|
|
150
|
+
const projectData = loadYamlFile(projectConfig);
|
|
151
|
+
const merged = deepMerge(userData, projectData);
|
|
152
|
+
|
|
153
|
+
// Apply env var overrides (highest precedence)
|
|
154
|
+
if (process.env.LLM_BACKEND) {
|
|
155
|
+
merged.llm_backend = process.env.LLM_BACKEND;
|
|
156
|
+
}
|
|
157
|
+
if (process.env.ANTHROPIC_API_KEY) {
|
|
158
|
+
if (!merged.claude || typeof merged.claude !== 'object') {
|
|
159
|
+
merged.claude = {};
|
|
160
|
+
}
|
|
161
|
+
merged.claude.api_key = process.env.ANTHROPIC_API_KEY;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
return merged;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// ---------------------------------------------------------------------------
|
|
168
|
+
// Write config
|
|
169
|
+
// ---------------------------------------------------------------------------
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Write a config object as YAML. Creates parent directories if needed.
|
|
173
|
+
*
|
|
174
|
+
* @param {Record<string, any>} config — config object to write
|
|
175
|
+
* @param {string} [targetPath] — defaults to ~/.config/cipher/config.yaml
|
|
176
|
+
* @returns {string} The path that was written.
|
|
177
|
+
*/
|
|
178
|
+
export function writeConfig(config, targetPath) {
|
|
179
|
+
const dest = targetPath || join(homedir(), '.config', 'cipher', 'config.yaml');
|
|
180
|
+
const dir = dirname(dest);
|
|
181
|
+
|
|
182
|
+
try {
|
|
183
|
+
mkdirSync(dir, { recursive: true });
|
|
184
|
+
} catch (err) {
|
|
185
|
+
throw new Error(
|
|
186
|
+
`Failed to create config directory ${dir}: ${err.code || err.message}`
|
|
187
|
+
);
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
const yamlStr = stringify(config, {
|
|
191
|
+
lineWidth: 0, // no line wrapping — matches Python's default_flow_style=False
|
|
192
|
+
nullStr: '""', // empty strings stay as "" not null
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
// Add the standard header comment
|
|
196
|
+
const header = [
|
|
197
|
+
'# CIPHER Gateway Configuration',
|
|
198
|
+
`# Written by cipher setup — ${new Date().toISOString()}`,
|
|
199
|
+
'#',
|
|
200
|
+
'# Precedence: env vars > project-root config.yaml > ~/.config/cipher/config.yaml',
|
|
201
|
+
'',
|
|
202
|
+
].join('\n');
|
|
203
|
+
|
|
204
|
+
try {
|
|
205
|
+
writeFileSync(dest, header + yamlStr, 'utf-8');
|
|
206
|
+
} catch (err) {
|
|
207
|
+
throw new Error(
|
|
208
|
+
`Failed to write config to ${dest}: ${err.code || err.message}`
|
|
209
|
+
);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
return dest;
|
|
213
|
+
}
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
// Copyright (c) 2026 defconxt. All rights reserved.
|
|
2
|
+
// Licensed under AGPL-3.0 — see LICENSE file for details.
|
|
3
|
+
// CIPHER is a trademark of defconxt.
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* client.js — LLM client factory for CIPHER gateway.
|
|
7
|
+
*
|
|
8
|
+
* Ports Python gateway/client.py:
|
|
9
|
+
* - makeClient(config) → { client, model }
|
|
10
|
+
* - Claude: Anthropic SDK
|
|
11
|
+
* - Ollama: Anthropic SDK with baseURL
|
|
12
|
+
* - LiteLLM: OpenAI SDK with adapter presenting Anthropic-compatible interface
|
|
13
|
+
*
|
|
14
|
+
* All SDK imports are lazy (dynamic import()) to preserve cold start <200ms.
|
|
15
|
+
* API keys are treated as opaque strings — never logged.
|
|
16
|
+
*
|
|
17
|
+
* @module gateway/client
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
const debug = process.env.CIPHER_DEBUG === '1'
|
|
21
|
+
? (/** @type {string} */ msg) => process.stderr.write(`[bridge:node] ${msg}\n`)
|
|
22
|
+
: () => {};
|
|
23
|
+
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
// LiteLLM → Anthropic adapter
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Converts Anthropic-style (system + messages) to OpenAI-style messages.
|
|
30
|
+
*
|
|
31
|
+
* @param {string|null} system
|
|
32
|
+
* @param {Array<{role: string, content: string}>} messages
|
|
33
|
+
* @returns {Array<{role: string, content: string}>}
|
|
34
|
+
*/
|
|
35
|
+
function toOpenAIMessages(system, messages) {
|
|
36
|
+
const result = [];
|
|
37
|
+
if (system) result.push({ role: 'system', content: system });
|
|
38
|
+
for (const msg of messages) {
|
|
39
|
+
result.push({ role: msg.role, content: msg.content });
|
|
40
|
+
}
|
|
41
|
+
return result;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Convert Anthropic tool schema to OpenAI function-calling format.
|
|
46
|
+
*
|
|
47
|
+
* Anthropic: { name, description, input_schema: {...} }
|
|
48
|
+
* OpenAI: { type: "function", function: { name, description, parameters: {...} } }
|
|
49
|
+
*
|
|
50
|
+
* @param {Array<{name: string, description: string, input_schema: Object}>} tools
|
|
51
|
+
* @returns {Array<{type: string, function: {name: string, description: string, parameters: Object}}>}
|
|
52
|
+
*/
|
|
53
|
+
function toOpenAITools(tools) {
|
|
54
|
+
return tools.map(t => ({
|
|
55
|
+
type: 'function',
|
|
56
|
+
function: {
|
|
57
|
+
name: t.name,
|
|
58
|
+
description: t.description || '',
|
|
59
|
+
parameters: t.input_schema || { type: 'object', properties: {} },
|
|
60
|
+
},
|
|
61
|
+
}));
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Map OpenAI finish_reason to Anthropic stop_reason.
|
|
66
|
+
* @param {string} finishReason
|
|
67
|
+
* @returns {string}
|
|
68
|
+
*/
|
|
69
|
+
function mapStopReason(finishReason) {
|
|
70
|
+
if (finishReason === 'tool_calls') return 'tool_use';
|
|
71
|
+
if (finishReason === 'length') return 'max_tokens';
|
|
72
|
+
return 'end_turn';
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Build a LiteLLM adapter that presents an Anthropic-SDK-compatible interface.
|
|
77
|
+
*
|
|
78
|
+
* client.messages.create() and client.messages.stream() work identically
|
|
79
|
+
* to the Anthropic SDK from the Gateway's perspective.
|
|
80
|
+
*
|
|
81
|
+
* @param {Object} openaiClient - OpenAI SDK instance
|
|
82
|
+
* @param {string} model
|
|
83
|
+
* @param {number} timeout
|
|
84
|
+
* @returns {Object}
|
|
85
|
+
*/
|
|
86
|
+
function buildLiteLLMAdapter(openaiClient, model, timeout) {
|
|
87
|
+
return {
|
|
88
|
+
messages: {
|
|
89
|
+
/**
|
|
90
|
+
* Non-streaming completion with optional tool-use support.
|
|
91
|
+
*
|
|
92
|
+
* When `tools` is provided, forwards them to the OpenAI API in
|
|
93
|
+
* function-calling format and maps any tool_calls in the response
|
|
94
|
+
* back to Anthropic-style content blocks.
|
|
95
|
+
*
|
|
96
|
+
* @param {Object} opts
|
|
97
|
+
* @param {string} opts.model
|
|
98
|
+
* @param {number} opts.max_tokens
|
|
99
|
+
* @param {string} [opts.system]
|
|
100
|
+
* @param {Array} opts.messages
|
|
101
|
+
* @param {Array} [opts.tools] - Anthropic-format tool schemas
|
|
102
|
+
* @returns {Promise<Object>} Anthropic-shaped response
|
|
103
|
+
*/
|
|
104
|
+
async create({ model: m, max_tokens, system, messages, tools }) {
|
|
105
|
+
const reqParams = {
|
|
106
|
+
model: m,
|
|
107
|
+
messages: toOpenAIMessages(system || null, messages),
|
|
108
|
+
max_tokens,
|
|
109
|
+
};
|
|
110
|
+
|
|
111
|
+
// Forward tools when provided
|
|
112
|
+
if (tools && tools.length > 0) {
|
|
113
|
+
reqParams.tools = toOpenAITools(tools);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const response = await openaiClient.chat.completions.create(reqParams);
|
|
117
|
+
|
|
118
|
+
const choice = response.choices?.[0];
|
|
119
|
+
const message = choice?.message || {};
|
|
120
|
+
const finishReason = choice?.finish_reason || 'stop';
|
|
121
|
+
|
|
122
|
+
// Build Anthropic-style content blocks
|
|
123
|
+
const content = [];
|
|
124
|
+
|
|
125
|
+
// Text content
|
|
126
|
+
if (message.content) {
|
|
127
|
+
content.push({ text: message.content, type: 'text' });
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Tool calls → Anthropic tool_use blocks
|
|
131
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
132
|
+
for (const tc of message.tool_calls) {
|
|
133
|
+
let input = {};
|
|
134
|
+
try {
|
|
135
|
+
input = typeof tc.function.arguments === 'string'
|
|
136
|
+
? JSON.parse(tc.function.arguments)
|
|
137
|
+
: tc.function.arguments || {};
|
|
138
|
+
} catch {
|
|
139
|
+
input = {};
|
|
140
|
+
}
|
|
141
|
+
content.push({
|
|
142
|
+
type: 'tool_use',
|
|
143
|
+
id: tc.id,
|
|
144
|
+
name: tc.function.name,
|
|
145
|
+
input,
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// If no content blocks at all, add empty text
|
|
151
|
+
if (content.length === 0) {
|
|
152
|
+
content.push({ text: '', type: 'text' });
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return {
|
|
156
|
+
content,
|
|
157
|
+
model: m,
|
|
158
|
+
role: 'assistant',
|
|
159
|
+
stop_reason: mapStopReason(finishReason),
|
|
160
|
+
usage: {
|
|
161
|
+
input_tokens: response.usage?.prompt_tokens || 0,
|
|
162
|
+
output_tokens: response.usage?.completion_tokens || 0,
|
|
163
|
+
},
|
|
164
|
+
};
|
|
165
|
+
},
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* Streaming completion — returns object with .on('text', cb) pattern.
|
|
169
|
+
*
|
|
170
|
+
* The Anthropic Node.js SDK uses .on('text', cb) for streaming.
|
|
171
|
+
* This adapter internally iterates the OpenAI stream and calls the cb.
|
|
172
|
+
*
|
|
173
|
+
* @param {Object} opts
|
|
174
|
+
* @param {string} opts.model
|
|
175
|
+
* @param {number} opts.max_tokens
|
|
176
|
+
* @param {string} [opts.system]
|
|
177
|
+
* @param {Array<{role: string, content: string}>} opts.messages
|
|
178
|
+
* @returns {Object} Stream-like object with .on() and async iteration
|
|
179
|
+
*/
|
|
180
|
+
stream({ model: m, max_tokens, system, messages }) {
|
|
181
|
+
/** @type {Array<{event: string, cb: Function}>} */
|
|
182
|
+
const listeners = [];
|
|
183
|
+
let finalMessageCb = null;
|
|
184
|
+
let streamPromise = null;
|
|
185
|
+
|
|
186
|
+
const streamObj = {
|
|
187
|
+
/**
|
|
188
|
+
* Register event listener. Supported events: 'text', 'finalMessage'.
|
|
189
|
+
*/
|
|
190
|
+
on(event, cb) {
|
|
191
|
+
if (event === 'finalMessage') {
|
|
192
|
+
finalMessageCb = cb;
|
|
193
|
+
} else {
|
|
194
|
+
listeners.push({ event, cb });
|
|
195
|
+
}
|
|
196
|
+
return streamObj;
|
|
197
|
+
},
|
|
198
|
+
|
|
199
|
+
/**
|
|
200
|
+
* Consume the stream — awaiting this drives the iteration.
|
|
201
|
+
*/
|
|
202
|
+
async finalMessage() {
|
|
203
|
+
if (!streamPromise) {
|
|
204
|
+
streamPromise = _consumeStream();
|
|
205
|
+
}
|
|
206
|
+
return streamPromise;
|
|
207
|
+
},
|
|
208
|
+
|
|
209
|
+
/**
|
|
210
|
+
* Async iterator for text chunks.
|
|
211
|
+
*/
|
|
212
|
+
async *[Symbol.asyncIterator]() {
|
|
213
|
+
const response = await openaiClient.chat.completions.create({
|
|
214
|
+
model: m,
|
|
215
|
+
messages: toOpenAIMessages(system || null, messages),
|
|
216
|
+
max_tokens,
|
|
217
|
+
stream: true,
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
for await (const chunk of response) {
|
|
221
|
+
const delta = chunk.choices?.[0]?.delta?.content;
|
|
222
|
+
if (delta) yield delta;
|
|
223
|
+
}
|
|
224
|
+
},
|
|
225
|
+
};
|
|
226
|
+
|
|
227
|
+
async function _consumeStream() {
|
|
228
|
+
const fullText = [];
|
|
229
|
+
const response = await openaiClient.chat.completions.create({
|
|
230
|
+
model: m,
|
|
231
|
+
messages: toOpenAIMessages(system || null, messages),
|
|
232
|
+
max_tokens,
|
|
233
|
+
stream: true,
|
|
234
|
+
});
|
|
235
|
+
|
|
236
|
+
for await (const chunk of response) {
|
|
237
|
+
const delta = chunk.choices?.[0]?.delta?.content;
|
|
238
|
+
if (delta) {
|
|
239
|
+
fullText.push(delta);
|
|
240
|
+
for (const { event, cb } of listeners) {
|
|
241
|
+
if (event === 'text') cb(delta);
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
const msg = {
|
|
247
|
+
content: [{ text: fullText.join(''), type: 'text' }],
|
|
248
|
+
model: m,
|
|
249
|
+
role: 'assistant',
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
if (finalMessageCb) finalMessageCb(msg);
|
|
253
|
+
return msg;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
return streamObj;
|
|
257
|
+
},
|
|
258
|
+
},
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
// ---------------------------------------------------------------------------
|
|
263
|
+
// Public factory
|
|
264
|
+
// ---------------------------------------------------------------------------
|
|
265
|
+
|
|
266
|
+
/**
|
|
267
|
+
* Build a client for the configured backend.
|
|
268
|
+
*
|
|
269
|
+
* All SDK imports are lazy — called only when this function runs.
|
|
270
|
+
*
|
|
271
|
+
* @param {import('./config-validate.js').GatewayConfig} config
|
|
272
|
+
* @returns {Promise<{client: Object, model: string}>}
|
|
273
|
+
*/
|
|
274
|
+
export async function makeClient(config) {
|
|
275
|
+
if (config.backend === 'ollama') {
|
|
276
|
+
const { default: Anthropic } = await import('@anthropic-ai/sdk');
|
|
277
|
+
const client = new Anthropic({
|
|
278
|
+
baseURL: config.ollama_base_url,
|
|
279
|
+
apiKey: 'ollama', // Ollama ignores the key; SDK constructor requires it
|
|
280
|
+
timeout: config.ollama_timeout * 1000, // SDK expects milliseconds
|
|
281
|
+
});
|
|
282
|
+
debug(`client: ollama at ${config.ollama_base_url}, model=${config.ollama_model}`);
|
|
283
|
+
return { client, model: config.ollama_model };
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
if (config.backend === 'litellm') {
|
|
287
|
+
const { default: OpenAI } = await import('openai');
|
|
288
|
+
const openaiClient = new OpenAI({
|
|
289
|
+
apiKey: config.litellm_api_key || 'unused',
|
|
290
|
+
baseURL: config.litellm_api_base || undefined,
|
|
291
|
+
timeout: config.litellm_timeout * 1000,
|
|
292
|
+
});
|
|
293
|
+
const client = buildLiteLLMAdapter(openaiClient, config.litellm_model, config.litellm_timeout);
|
|
294
|
+
debug(`client: litellm, model=${config.litellm_model}`);
|
|
295
|
+
return { client, model: config.litellm_model };
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// backend === 'claude'
|
|
299
|
+
const { default: Anthropic } = await import('@anthropic-ai/sdk');
|
|
300
|
+
const client = new Anthropic({
|
|
301
|
+
apiKey: config.claude_api_key,
|
|
302
|
+
timeout: config.claude_timeout * 1000,
|
|
303
|
+
});
|
|
304
|
+
debug(`client: claude, model=${config.claude_model}`);
|
|
305
|
+
return { client, model: config.claude_model };
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// Exported for testing only
|
|
309
|
+
export { buildLiteLLMAdapter as _buildLiteLLMAdapter };
|