cipher-security 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/bin/cipher.js +566 -0
  2. package/lib/api/billing.js +321 -0
  3. package/lib/api/compliance.js +693 -0
  4. package/lib/api/controls.js +1401 -0
  5. package/lib/api/index.js +49 -0
  6. package/lib/api/marketplace.js +467 -0
  7. package/lib/api/openai-proxy.js +383 -0
  8. package/lib/api/server.js +685 -0
  9. package/lib/autonomous/feedback-loop.js +554 -0
  10. package/lib/autonomous/framework.js +512 -0
  11. package/lib/autonomous/index.js +97 -0
  12. package/lib/autonomous/leaderboard.js +594 -0
  13. package/lib/autonomous/modes/architect.js +412 -0
  14. package/lib/autonomous/modes/blue.js +386 -0
  15. package/lib/autonomous/modes/incident.js +684 -0
  16. package/lib/autonomous/modes/privacy.js +369 -0
  17. package/lib/autonomous/modes/purple.js +294 -0
  18. package/lib/autonomous/modes/recon.js +250 -0
  19. package/lib/autonomous/parallel.js +587 -0
  20. package/lib/autonomous/researcher.js +583 -0
  21. package/lib/autonomous/runner.js +955 -0
  22. package/lib/autonomous/scheduler.js +615 -0
  23. package/lib/autonomous/task-parser.js +127 -0
  24. package/lib/autonomous/validators/forensic.js +266 -0
  25. package/lib/autonomous/validators/osint.js +216 -0
  26. package/lib/autonomous/validators/privacy.js +296 -0
  27. package/lib/autonomous/validators/purple.js +298 -0
  28. package/lib/autonomous/validators/sigma.js +248 -0
  29. package/lib/autonomous/validators/threat-model.js +363 -0
  30. package/lib/benchmark/agent.js +119 -0
  31. package/lib/benchmark/baselines.js +43 -0
  32. package/lib/benchmark/builder.js +143 -0
  33. package/lib/benchmark/config.js +35 -0
  34. package/lib/benchmark/coordinator.js +91 -0
  35. package/lib/benchmark/index.js +20 -0
  36. package/lib/benchmark/llm.js +58 -0
  37. package/lib/benchmark/models.js +137 -0
  38. package/lib/benchmark/reporter.js +103 -0
  39. package/lib/benchmark/runner.js +103 -0
  40. package/lib/benchmark/sandbox.js +96 -0
  41. package/lib/benchmark/scorer.js +32 -0
  42. package/lib/benchmark/solver.js +166 -0
  43. package/lib/benchmark/tools.js +62 -0
  44. package/lib/bot/bot.js +238 -0
  45. package/lib/brand.js +105 -0
  46. package/lib/commands.js +100 -0
  47. package/lib/complexity.js +377 -0
  48. package/lib/config.js +213 -0
  49. package/lib/gateway/client.js +309 -0
  50. package/lib/gateway/commands.js +991 -0
  51. package/lib/gateway/config-validate.js +109 -0
  52. package/lib/gateway/gateway.js +367 -0
  53. package/lib/gateway/index.js +62 -0
  54. package/lib/gateway/mode.js +309 -0
  55. package/lib/gateway/plugins.js +222 -0
  56. package/lib/gateway/prompt.js +214 -0
  57. package/lib/mcp/server.js +262 -0
  58. package/lib/memory/compressor.js +425 -0
  59. package/lib/memory/engine.js +763 -0
  60. package/lib/memory/evolution.js +668 -0
  61. package/lib/memory/index.js +58 -0
  62. package/lib/memory/orchestrator.js +506 -0
  63. package/lib/memory/retriever.js +515 -0
  64. package/lib/memory/synthesizer.js +333 -0
  65. package/lib/pipeline/async-scanner.js +510 -0
  66. package/lib/pipeline/binary-analysis.js +1043 -0
  67. package/lib/pipeline/dom-xss-scanner.js +435 -0
  68. package/lib/pipeline/github-actions.js +792 -0
  69. package/lib/pipeline/index.js +124 -0
  70. package/lib/pipeline/osint.js +498 -0
  71. package/lib/pipeline/sarif.js +373 -0
  72. package/lib/pipeline/scanner.js +880 -0
  73. package/lib/pipeline/template-manager.js +525 -0
  74. package/lib/pipeline/xss-scanner.js +353 -0
  75. package/lib/setup-wizard.js +288 -0
  76. package/package.json +31 -0
@@ -0,0 +1,309 @@
1
+ // Copyright (c) 2026 defconxt. All rights reserved.
2
+ // Licensed under AGPL-3.0 — see LICENSE file for details.
3
+ // CIPHER is a trademark of defconxt.
4
+
5
+ /**
6
+ * client.js — LLM client factory for CIPHER gateway.
7
+ *
8
+ * Ports Python gateway/client.py:
9
+ * - makeClient(config) → { client, model }
10
+ * - Claude: Anthropic SDK
11
+ * - Ollama: Anthropic SDK with baseURL
12
+ * - LiteLLM: OpenAI SDK with adapter presenting Anthropic-compatible interface
13
+ *
14
+ * All SDK imports are lazy (dynamic import()) to preserve cold start <200ms.
15
+ * API keys are treated as opaque strings — never logged.
16
+ *
17
+ * @module gateway/client
18
+ */
19
+
20
+ const debug = process.env.CIPHER_DEBUG === '1'
21
+ ? (/** @type {string} */ msg) => process.stderr.write(`[bridge:node] ${msg}\n`)
22
+ : () => {};
23
+
24
+ // ---------------------------------------------------------------------------
25
+ // LiteLLM → Anthropic adapter
26
+ // ---------------------------------------------------------------------------
27
+
28
+ /**
29
+ * Converts Anthropic-style (system + messages) to OpenAI-style messages.
30
+ *
31
+ * @param {string|null} system
32
+ * @param {Array<{role: string, content: string}>} messages
33
+ * @returns {Array<{role: string, content: string}>}
34
+ */
35
+ function toOpenAIMessages(system, messages) {
36
+ const result = [];
37
+ if (system) result.push({ role: 'system', content: system });
38
+ for (const msg of messages) {
39
+ result.push({ role: msg.role, content: msg.content });
40
+ }
41
+ return result;
42
+ }
43
+
44
+ /**
45
+ * Convert Anthropic tool schema to OpenAI function-calling format.
46
+ *
47
+ * Anthropic: { name, description, input_schema: {...} }
48
+ * OpenAI: { type: "function", function: { name, description, parameters: {...} } }
49
+ *
50
+ * @param {Array<{name: string, description: string, input_schema: Object}>} tools
51
+ * @returns {Array<{type: string, function: {name: string, description: string, parameters: Object}}>}
52
+ */
53
+ function toOpenAITools(tools) {
54
+ return tools.map(t => ({
55
+ type: 'function',
56
+ function: {
57
+ name: t.name,
58
+ description: t.description || '',
59
+ parameters: t.input_schema || { type: 'object', properties: {} },
60
+ },
61
+ }));
62
+ }
63
+
64
+ /**
65
+ * Map OpenAI finish_reason to Anthropic stop_reason.
66
+ * @param {string} finishReason
67
+ * @returns {string}
68
+ */
69
+ function mapStopReason(finishReason) {
70
+ if (finishReason === 'tool_calls') return 'tool_use';
71
+ if (finishReason === 'length') return 'max_tokens';
72
+ return 'end_turn';
73
+ }
74
+
75
+ /**
76
+ * Build a LiteLLM adapter that presents an Anthropic-SDK-compatible interface.
77
+ *
78
+ * client.messages.create() and client.messages.stream() work identically
79
+ * to the Anthropic SDK from the Gateway's perspective.
80
+ *
81
+ * @param {Object} openaiClient - OpenAI SDK instance
82
+ * @param {string} model
83
+ * @param {number} timeout
84
+ * @returns {Object}
85
+ */
86
+ function buildLiteLLMAdapter(openaiClient, model, timeout) {
87
+ return {
88
+ messages: {
89
+ /**
90
+ * Non-streaming completion with optional tool-use support.
91
+ *
92
+ * When `tools` is provided, forwards them to the OpenAI API in
93
+ * function-calling format and maps any tool_calls in the response
94
+ * back to Anthropic-style content blocks.
95
+ *
96
+ * @param {Object} opts
97
+ * @param {string} opts.model
98
+ * @param {number} opts.max_tokens
99
+ * @param {string} [opts.system]
100
+ * @param {Array} opts.messages
101
+ * @param {Array} [opts.tools] - Anthropic-format tool schemas
102
+ * @returns {Promise<Object>} Anthropic-shaped response
103
+ */
104
+ async create({ model: m, max_tokens, system, messages, tools }) {
105
+ const reqParams = {
106
+ model: m,
107
+ messages: toOpenAIMessages(system || null, messages),
108
+ max_tokens,
109
+ };
110
+
111
+ // Forward tools when provided
112
+ if (tools && tools.length > 0) {
113
+ reqParams.tools = toOpenAITools(tools);
114
+ }
115
+
116
+ const response = await openaiClient.chat.completions.create(reqParams);
117
+
118
+ const choice = response.choices?.[0];
119
+ const message = choice?.message || {};
120
+ const finishReason = choice?.finish_reason || 'stop';
121
+
122
+ // Build Anthropic-style content blocks
123
+ const content = [];
124
+
125
+ // Text content
126
+ if (message.content) {
127
+ content.push({ text: message.content, type: 'text' });
128
+ }
129
+
130
+ // Tool calls → Anthropic tool_use blocks
131
+ if (message.tool_calls && message.tool_calls.length > 0) {
132
+ for (const tc of message.tool_calls) {
133
+ let input = {};
134
+ try {
135
+ input = typeof tc.function.arguments === 'string'
136
+ ? JSON.parse(tc.function.arguments)
137
+ : tc.function.arguments || {};
138
+ } catch {
139
+ input = {};
140
+ }
141
+ content.push({
142
+ type: 'tool_use',
143
+ id: tc.id,
144
+ name: tc.function.name,
145
+ input,
146
+ });
147
+ }
148
+ }
149
+
150
+ // If no content blocks at all, add empty text
151
+ if (content.length === 0) {
152
+ content.push({ text: '', type: 'text' });
153
+ }
154
+
155
+ return {
156
+ content,
157
+ model: m,
158
+ role: 'assistant',
159
+ stop_reason: mapStopReason(finishReason),
160
+ usage: {
161
+ input_tokens: response.usage?.prompt_tokens || 0,
162
+ output_tokens: response.usage?.completion_tokens || 0,
163
+ },
164
+ };
165
+ },
166
+
167
+ /**
168
+ * Streaming completion — returns object with .on('text', cb) pattern.
169
+ *
170
+ * The Anthropic Node.js SDK uses .on('text', cb) for streaming.
171
+ * This adapter internally iterates the OpenAI stream and calls the cb.
172
+ *
173
+ * @param {Object} opts
174
+ * @param {string} opts.model
175
+ * @param {number} opts.max_tokens
176
+ * @param {string} [opts.system]
177
+ * @param {Array<{role: string, content: string}>} opts.messages
178
+ * @returns {Object} Stream-like object with .on() and async iteration
179
+ */
180
+ stream({ model: m, max_tokens, system, messages }) {
181
+ /** @type {Array<{event: string, cb: Function}>} */
182
+ const listeners = [];
183
+ let finalMessageCb = null;
184
+ let streamPromise = null;
185
+
186
+ const streamObj = {
187
+ /**
188
+ * Register event listener. Supported events: 'text', 'finalMessage'.
189
+ */
190
+ on(event, cb) {
191
+ if (event === 'finalMessage') {
192
+ finalMessageCb = cb;
193
+ } else {
194
+ listeners.push({ event, cb });
195
+ }
196
+ return streamObj;
197
+ },
198
+
199
+ /**
200
+ * Consume the stream — awaiting this drives the iteration.
201
+ */
202
+ async finalMessage() {
203
+ if (!streamPromise) {
204
+ streamPromise = _consumeStream();
205
+ }
206
+ return streamPromise;
207
+ },
208
+
209
+ /**
210
+ * Async iterator for text chunks.
211
+ */
212
+ async *[Symbol.asyncIterator]() {
213
+ const response = await openaiClient.chat.completions.create({
214
+ model: m,
215
+ messages: toOpenAIMessages(system || null, messages),
216
+ max_tokens,
217
+ stream: true,
218
+ });
219
+
220
+ for await (const chunk of response) {
221
+ const delta = chunk.choices?.[0]?.delta?.content;
222
+ if (delta) yield delta;
223
+ }
224
+ },
225
+ };
226
+
227
+ async function _consumeStream() {
228
+ const fullText = [];
229
+ const response = await openaiClient.chat.completions.create({
230
+ model: m,
231
+ messages: toOpenAIMessages(system || null, messages),
232
+ max_tokens,
233
+ stream: true,
234
+ });
235
+
236
+ for await (const chunk of response) {
237
+ const delta = chunk.choices?.[0]?.delta?.content;
238
+ if (delta) {
239
+ fullText.push(delta);
240
+ for (const { event, cb } of listeners) {
241
+ if (event === 'text') cb(delta);
242
+ }
243
+ }
244
+ }
245
+
246
+ const msg = {
247
+ content: [{ text: fullText.join(''), type: 'text' }],
248
+ model: m,
249
+ role: 'assistant',
250
+ };
251
+
252
+ if (finalMessageCb) finalMessageCb(msg);
253
+ return msg;
254
+ }
255
+
256
+ return streamObj;
257
+ },
258
+ },
259
+ };
260
+ }
261
+
262
+ // ---------------------------------------------------------------------------
263
+ // Public factory
264
+ // ---------------------------------------------------------------------------
265
+
266
+ /**
267
+ * Build a client for the configured backend.
268
+ *
269
+ * All SDK imports are lazy — called only when this function runs.
270
+ *
271
+ * @param {import('./config-validate.js').GatewayConfig} config
272
+ * @returns {Promise<{client: Object, model: string}>}
273
+ */
274
+ export async function makeClient(config) {
275
+ if (config.backend === 'ollama') {
276
+ const { default: Anthropic } = await import('@anthropic-ai/sdk');
277
+ const client = new Anthropic({
278
+ baseURL: config.ollama_base_url,
279
+ apiKey: 'ollama', // Ollama ignores the key; SDK constructor requires it
280
+ timeout: config.ollama_timeout * 1000, // SDK expects milliseconds
281
+ });
282
+ debug(`client: ollama at ${config.ollama_base_url}, model=${config.ollama_model}`);
283
+ return { client, model: config.ollama_model };
284
+ }
285
+
286
+ if (config.backend === 'litellm') {
287
+ const { default: OpenAI } = await import('openai');
288
+ const openaiClient = new OpenAI({
289
+ apiKey: config.litellm_api_key || 'unused',
290
+ baseURL: config.litellm_api_base || undefined,
291
+ timeout: config.litellm_timeout * 1000,
292
+ });
293
+ const client = buildLiteLLMAdapter(openaiClient, config.litellm_model, config.litellm_timeout);
294
+ debug(`client: litellm, model=${config.litellm_model}`);
295
+ return { client, model: config.litellm_model };
296
+ }
297
+
298
+ // backend === 'claude'
299
+ const { default: Anthropic } = await import('@anthropic-ai/sdk');
300
+ const client = new Anthropic({
301
+ apiKey: config.claude_api_key,
302
+ timeout: config.claude_timeout * 1000,
303
+ });
304
+ debug(`client: claude, model=${config.claude_model}`);
305
+ return { client, model: config.claude_model };
306
+ }
307
+
308
+ // Exported for testing only
309
+ export { buildLiteLLMAdapter as _buildLiteLLMAdapter };