guardlink 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/CHANGELOG.md +37 -0
  2. package/dist/agents/config.d.ts +6 -0
  3. package/dist/agents/config.d.ts.map +1 -1
  4. package/dist/agents/config.js +27 -4
  5. package/dist/agents/config.js.map +1 -1
  6. package/dist/agents/index.d.ts +2 -1
  7. package/dist/agents/index.d.ts.map +1 -1
  8. package/dist/agents/index.js +1 -1
  9. package/dist/agents/index.js.map +1 -1
  10. package/dist/agents/launcher.d.ts +14 -0
  11. package/dist/agents/launcher.d.ts.map +1 -1
  12. package/dist/agents/launcher.js +126 -1
  13. package/dist/agents/launcher.js.map +1 -1
  14. package/dist/agents/prompts.d.ts.map +1 -1
  15. package/dist/agents/prompts.js +34 -6
  16. package/dist/agents/prompts.js.map +1 -1
  17. package/dist/analyze/index.d.ts +34 -1
  18. package/dist/analyze/index.d.ts.map +1 -1
  19. package/dist/analyze/index.js +281 -8
  20. package/dist/analyze/index.js.map +1 -1
  21. package/dist/analyze/llm.d.ts +54 -3
  22. package/dist/analyze/llm.d.ts.map +1 -1
  23. package/dist/analyze/llm.js +418 -97
  24. package/dist/analyze/llm.js.map +1 -1
  25. package/dist/analyze/prompts.d.ts +3 -2
  26. package/dist/analyze/prompts.d.ts.map +1 -1
  27. package/dist/analyze/prompts.js +227 -111
  28. package/dist/analyze/prompts.js.map +1 -1
  29. package/dist/analyze/tools.d.ts +22 -0
  30. package/dist/analyze/tools.d.ts.map +1 -0
  31. package/dist/analyze/tools.js +230 -0
  32. package/dist/analyze/tools.js.map +1 -0
  33. package/dist/cli/index.d.ts +15 -7
  34. package/dist/cli/index.d.ts.map +1 -1
  35. package/dist/cli/index.js +289 -95
  36. package/dist/cli/index.js.map +1 -1
  37. package/dist/dashboard/data.d.ts +5 -0
  38. package/dist/dashboard/data.d.ts.map +1 -1
  39. package/dist/dashboard/data.js +5 -0
  40. package/dist/dashboard/data.js.map +1 -1
  41. package/dist/dashboard/generate.d.ts.map +1 -1
  42. package/dist/dashboard/generate.js +176 -59
  43. package/dist/dashboard/generate.js.map +1 -1
  44. package/dist/init/templates.d.ts.map +1 -1
  45. package/dist/init/templates.js +51 -31
  46. package/dist/init/templates.js.map +1 -1
  47. package/dist/mcp/server.d.ts.map +1 -1
  48. package/dist/mcp/server.js +6 -2
  49. package/dist/mcp/server.js.map +1 -1
  50. package/dist/parser/index.d.ts +1 -1
  51. package/dist/parser/index.d.ts.map +1 -1
  52. package/dist/parser/index.js +1 -1
  53. package/dist/parser/index.js.map +1 -1
  54. package/dist/parser/validate.d.ts +12 -0
  55. package/dist/parser/validate.d.ts.map +1 -1
  56. package/dist/parser/validate.js +44 -0
  57. package/dist/parser/validate.js.map +1 -1
  58. package/dist/report/report.d.ts.map +1 -1
  59. package/dist/report/report.js +64 -0
  60. package/dist/report/report.js.map +1 -1
  61. package/dist/tui/commands.d.ts +6 -1
  62. package/dist/tui/commands.d.ts.map +1 -1
  63. package/dist/tui/commands.js +411 -102
  64. package/dist/tui/commands.js.map +1 -1
  65. package/dist/tui/format.d.ts +7 -0
  66. package/dist/tui/format.d.ts.map +1 -1
  67. package/dist/tui/format.js +59 -0
  68. package/dist/tui/format.js.map +1 -1
  69. package/dist/tui/index.d.ts.map +1 -1
  70. package/dist/tui/index.js +19 -2
  71. package/dist/tui/index.js.map +1 -1
  72. package/package.json +1 -1
@@ -2,8 +2,11 @@
2
2
  * GuardLink Threat Reports — Lightweight LLM client using raw fetch.
3
3
  *
4
4
  * Supports:
5
- * - Anthropic Messages API (claude-sonnet-4-5-20250929, etc.)
6
- * - OpenAI-compatible Chat Completions (GPT-4o, DeepSeek, OpenRouter)
5
+ * - Anthropic Messages API (claude-sonnet-4-6, claude-opus-4-6, etc.) with extended thinking + tool use
6
+ * - OpenAI Responses API (gpt-5.2, o3, etc.) with web search, tools, structured output
7
+ * - Google Gemini API (gemini-2.5-flash, gemini-3-pro, etc.) via OpenAI-compatible endpoint
8
+ * - OpenAI-compatible Chat Completions (DeepSeek, OpenRouter, Ollama)
9
+ * - DeepSeek reasoning mode (deepseek-reasoner)
7
10
  *
8
11
  * Zero dependencies — uses Node 20+ built-in fetch.
9
12
  *
@@ -18,51 +21,43 @@
18
21
  * @flows #llm-client -> External_LLM_APIs via fetch -- "HTTP POST with auth headers and prompt payload"
19
22
  * @flows External_LLM_APIs -> #llm-client via response -- "Streaming or complete response from LLM provider"
20
23
  */
24
+ // ─── Defaults ────────────────────────────────────────────────────────
21
25
  const DEFAULT_MODELS = {
22
- anthropic: 'claude-sonnet-4-5-20250929',
23
- openai: 'gpt-4o',
24
- openrouter: 'anthropic/claude-sonnet-4-5-20250929',
26
+ anthropic: 'claude-sonnet-4-6',
27
+ openai: 'gpt-5.2',
28
+ google: 'gemini-2.5-flash',
29
+ openrouter: 'anthropic/claude-sonnet-4-6',
25
30
  deepseek: 'deepseek-chat',
31
+ ollama: 'llama3.2',
26
32
  };
27
33
  const BASE_URLS = {
28
34
  anthropic: 'https://api.anthropic.com',
29
35
  openai: 'https://api.openai.com',
36
+ google: 'https://generativelanguage.googleapis.com/v1beta/openai',
30
37
  openrouter: 'https://openrouter.ai/api',
31
38
  deepseek: 'https://api.deepseek.com',
39
+ ollama: 'http://localhost:11434',
32
40
  };
41
+ // ─── Auto-detect ─────────────────────────────────────────────────────
33
42
  /**
34
43
  * Auto-detect provider from environment variables.
35
44
  * Returns null if no API key found.
36
45
  */
37
46
  export function autoDetectConfig() {
38
- // Priority: Anthropic > OpenAI > OpenRouter > DeepSeek
39
47
  if (process.env.ANTHROPIC_API_KEY) {
40
- return {
41
- provider: 'anthropic',
42
- model: DEFAULT_MODELS.anthropic,
43
- apiKey: process.env.ANTHROPIC_API_KEY,
44
- };
48
+ return { provider: 'anthropic', model: DEFAULT_MODELS.anthropic, apiKey: process.env.ANTHROPIC_API_KEY };
45
49
  }
46
50
  if (process.env.OPENAI_API_KEY) {
47
- return {
48
- provider: 'openai',
49
- model: DEFAULT_MODELS.openai,
50
- apiKey: process.env.OPENAI_API_KEY,
51
- };
51
+ return { provider: 'openai', model: DEFAULT_MODELS.openai, apiKey: process.env.OPENAI_API_KEY };
52
52
  }
53
53
  if (process.env.OPENROUTER_API_KEY) {
54
- return {
55
- provider: 'openrouter',
56
- model: DEFAULT_MODELS.openrouter,
57
- apiKey: process.env.OPENROUTER_API_KEY,
58
- };
54
+ return { provider: 'openrouter', model: DEFAULT_MODELS.openrouter, apiKey: process.env.OPENROUTER_API_KEY };
55
+ }
56
+ if (process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY) {
57
+ return { provider: 'google', model: DEFAULT_MODELS.google, apiKey: (process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY) };
59
58
  }
60
59
  if (process.env.DEEPSEEK_API_KEY) {
61
- return {
62
- provider: 'deepseek',
63
- model: DEFAULT_MODELS.deepseek,
64
- apiKey: process.env.DEEPSEEK_API_KEY,
65
- };
60
+ return { provider: 'deepseek', model: DEFAULT_MODELS.deepseek, apiKey: process.env.DEEPSEEK_API_KEY };
66
61
  }
67
62
  return null;
68
63
  }
@@ -70,13 +65,13 @@ export function autoDetectConfig() {
70
65
  * Build config from explicit flags + env vars.
71
66
  */
72
67
  export function buildConfig(opts) {
73
- // If provider specified, use it
74
68
  if (opts.provider) {
75
69
  const provider = opts.provider;
76
70
  const envKeyMap = {
77
71
  anthropic: 'ANTHROPIC_API_KEY',
78
72
  openai: 'OPENAI_API_KEY',
79
73
  openrouter: 'OPENROUTER_API_KEY',
74
+ google: 'GOOGLE_API_KEY',
80
75
  deepseek: 'DEEPSEEK_API_KEY',
81
76
  };
82
77
  const apiKey = opts.apiKey || process.env[envKeyMap[provider] || ''];
@@ -84,58 +79,120 @@ export function buildConfig(opts) {
84
79
  return null;
85
80
  return {
86
81
  provider,
87
- model: opts.model || DEFAULT_MODELS[provider] || 'gpt-4o',
82
+ model: opts.model || DEFAULT_MODELS[provider] || 'gpt-5.2',
88
83
  apiKey,
89
84
  };
90
85
  }
91
- // Auto-detect
92
86
  const config = autoDetectConfig();
93
87
  if (!config)
94
88
  return null;
95
- // Override model if specified
96
89
  if (opts.model)
97
90
  config.model = opts.model;
98
91
  return config;
99
92
  }
93
+ // ─── Main entry point ────────────────────────────────────────────────
100
94
  /**
101
95
  * Send a message to the LLM and return the response.
96
+ * Supports streaming, tool use (agentic loop), extended thinking,
97
+ * web search, and structured output.
102
98
  */
103
99
  export async function chatCompletion(config, systemPrompt, userMessage, onChunk) {
104
100
  if (config.provider === 'anthropic') {
105
- return callAnthropic(config, systemPrompt, userMessage, onChunk);
101
+ return callAnthropicWithTools(config, systemPrompt, userMessage, onChunk);
102
+ }
103
+ else if (config.provider === 'openai') {
104
+ return callOpenAIResponses(config, systemPrompt, userMessage, onChunk);
106
105
  }
107
106
  else {
107
+ // Google Gemini, DeepSeek, OpenRouter, Ollama all use OpenAI-compatible Chat Completions
108
108
  return callOpenAICompatible(config, systemPrompt, userMessage, onChunk);
109
109
  }
110
110
  }
111
- // ─── Anthropic Messages API ──────────────────────────────────────────
112
- async function callAnthropic(config, systemPrompt, userMessage, onChunk) {
111
+ // ─── Anthropic Messages API (2025) ──────────────────────────────────
112
+ const ANTHROPIC_API_VERSION = '2025-04-14';
113
+ /** Wrapper with agentic tool-call loop */
114
+ async function callAnthropicWithTools(config, systemPrompt, userMessage, onChunk) {
115
+ const maxRounds = config.maxToolRounds ?? 5;
116
+ let messages = [{ role: 'user', content: userMessage }];
117
+ const allToolCalls = [];
118
+ let finalResponse = null;
119
+ for (let round = 0; round <= maxRounds; round++) {
120
+ const response = await callAnthropic(config, systemPrompt, messages, round === 0 ? onChunk : undefined);
121
+ if (response.toolCalls?.length)
122
+ allToolCalls.push(...response.toolCalls);
123
+ if (!response.toolCalls?.length || !config.toolExecutor) {
124
+ finalResponse = response;
125
+ break;
126
+ }
127
+ // Add assistant response and tool results for next round
128
+ messages.push({ role: 'assistant', content: response._rawContent });
129
+ for (const tc of response.toolCalls) {
130
+ let resultText;
131
+ try {
132
+ resultText = await config.toolExecutor(tc.name, tc.arguments);
133
+ }
134
+ catch (err) {
135
+ resultText = `Error: ${err.message}`;
136
+ }
137
+ messages.push({
138
+ role: 'user',
139
+ content: [{ type: 'tool_result', tool_use_id: tc.id, content: resultText }],
140
+ });
141
+ }
142
+ }
143
+ if (!finalResponse)
144
+ throw new Error('Max tool call rounds exceeded');
145
+ finalResponse.toolCalls = allToolCalls.length ? allToolCalls : undefined;
146
+ return finalResponse;
147
+ }
148
+ async function callAnthropic(config, systemPrompt, messages, onChunk) {
113
149
  const baseUrl = config.baseUrl || BASE_URLS.anthropic;
114
150
  const maxTokens = config.maxTokens || 8192;
151
+ const headers = {
152
+ 'Content-Type': 'application/json',
153
+ 'x-api-key': config.apiKey,
154
+ 'anthropic-version': ANTHROPIC_API_VERSION,
155
+ };
156
+ if (config.extendedThinking) {
157
+ headers['anthropic-beta'] = 'interleaved-thinking-2025-05-14';
158
+ }
159
+ const body = {
160
+ model: config.model,
161
+ max_tokens: maxTokens,
162
+ system: systemPrompt,
163
+ messages,
164
+ };
165
+ if (config.extendedThinking) {
166
+ body.thinking = { type: 'enabled', budget_tokens: config.thinkingBudget || 10000 };
167
+ }
168
+ if (config.tools?.length) {
169
+ body.tools = config.tools.map(t => ({
170
+ name: t.name,
171
+ description: t.description,
172
+ input_schema: {
173
+ type: 'object',
174
+ properties: t.parameters.properties,
175
+ required: t.parameters.required,
176
+ },
177
+ }));
178
+ }
115
179
  if (onChunk) {
116
- // Streaming
180
+ body.stream = true;
117
181
  const res = await fetch(`${baseUrl}/v1/messages`, {
118
- method: 'POST',
119
- headers: {
120
- 'Content-Type': 'application/json',
121
- 'x-api-key': config.apiKey,
122
- 'anthropic-version': '2023-06-01',
123
- },
124
- body: JSON.stringify({
125
- model: config.model,
126
- max_tokens: maxTokens,
127
- system: systemPrompt,
128
- stream: true,
129
- messages: [{ role: 'user', content: userMessage }],
130
- }),
182
+ method: 'POST', headers, body: JSON.stringify(body),
131
183
  });
132
184
  if (!res.ok) {
133
185
  const err = await res.text();
134
186
  throw new Error(`Anthropic API error ${res.status}: ${err}`);
135
187
  }
136
188
  let content = '';
189
+ let thinking = '';
137
190
  let inputTokens = 0;
138
191
  let outputTokens = 0;
192
+ const toolCalls = [];
193
+ let curToolId = '';
194
+ let curToolName = '';
195
+ let curToolArgs = '';
139
196
  const reader = res.body?.getReader();
140
197
  if (!reader)
141
198
  throw new Error('No response body');
@@ -155,52 +212,313 @@ async function callAnthropic(config, systemPrompt, userMessage, onChunk) {
155
212
  if (data === '[DONE]')
156
213
  continue;
157
214
  try {
158
- const event = JSON.parse(data);
159
- if (event.type === 'content_block_delta' && event.delta?.text) {
160
- content += event.delta.text;
161
- onChunk(event.delta.text);
215
+ const ev = JSON.parse(data);
216
+ if (ev.type === 'content_block_start' && ev.content_block?.type === 'tool_use') {
217
+ curToolId = ev.content_block.id || '';
218
+ curToolName = ev.content_block.name || '';
219
+ curToolArgs = '';
162
220
  }
163
- if (event.type === 'message_delta' && event.usage) {
164
- outputTokens = event.usage.output_tokens || 0;
221
+ if (ev.type === 'content_block_delta') {
222
+ if (ev.delta?.type === 'text_delta' && ev.delta?.text) {
223
+ content += ev.delta.text;
224
+ onChunk(ev.delta.text);
225
+ }
226
+ if (ev.delta?.type === 'thinking_delta' && ev.delta?.thinking) {
227
+ thinking += ev.delta.thinking;
228
+ }
229
+ if (ev.delta?.type === 'input_json_delta' && ev.delta?.partial_json) {
230
+ curToolArgs += ev.delta.partial_json;
231
+ }
165
232
  }
166
- if (event.type === 'message_start' && event.message?.usage) {
167
- inputTokens = event.message.usage.input_tokens || 0;
233
+ if (ev.type === 'content_block_stop' && curToolId) {
234
+ try {
235
+ toolCalls.push({ id: curToolId, name: curToolName, arguments: JSON.parse(curToolArgs || '{}') });
236
+ }
237
+ catch { /* skip */ }
238
+ curToolId = '';
239
+ curToolName = '';
240
+ curToolArgs = '';
168
241
  }
242
+ if (ev.type === 'message_delta' && ev.usage)
243
+ outputTokens = ev.usage.output_tokens || 0;
244
+ if (ev.type === 'message_start' && ev.message?.usage)
245
+ inputTokens = ev.message.usage.input_tokens || 0;
169
246
  }
170
- catch { /* skip non-JSON lines */ }
247
+ catch { /* skip */ }
171
248
  }
172
249
  }
173
- return { content, model: config.model, inputTokens, outputTokens };
250
+ return {
251
+ content, model: config.model, inputTokens, outputTokens,
252
+ thinking: thinking || undefined, thinkingTokens: undefined,
253
+ toolCalls: toolCalls.length ? toolCalls : undefined,
254
+ _rawContent: buildRawContent(content, thinking, toolCalls),
255
+ };
174
256
  }
175
257
  else {
176
258
  // Non-streaming
177
259
  const res = await fetch(`${baseUrl}/v1/messages`, {
178
- method: 'POST',
179
- headers: {
180
- 'Content-Type': 'application/json',
181
- 'x-api-key': config.apiKey,
182
- 'anthropic-version': '2023-06-01',
183
- },
184
- body: JSON.stringify({
185
- model: config.model,
186
- max_tokens: maxTokens,
187
- system: systemPrompt,
188
- messages: [{ role: 'user', content: userMessage }],
189
- }),
260
+ method: 'POST', headers, body: JSON.stringify(body),
190
261
  });
191
262
  if (!res.ok) {
192
263
  const err = await res.text();
193
264
  throw new Error(`Anthropic API error ${res.status}: ${err}`);
194
265
  }
195
266
  const data = await res.json();
267
+ let content = '';
268
+ let thinking = '';
269
+ const toolCalls = [];
270
+ for (const block of (data.content || [])) {
271
+ if (block.type === 'text')
272
+ content += block.text;
273
+ if (block.type === 'thinking')
274
+ thinking += block.thinking;
275
+ if (block.type === 'tool_use') {
276
+ toolCalls.push({ id: block.id, name: block.name, arguments: block.input || {} });
277
+ }
278
+ }
196
279
  return {
197
- content: data.content?.[0]?.text || '',
198
- model: data.model || config.model,
280
+ content, model: data.model || config.model,
199
281
  inputTokens: data.usage?.input_tokens,
200
282
  outputTokens: data.usage?.output_tokens,
283
+ thinking: thinking || undefined,
284
+ toolCalls: toolCalls.length ? toolCalls : undefined,
285
+ _rawContent: data.content,
201
286
  };
202
287
  }
203
288
  }
289
+ function buildRawContent(content, thinking, toolCalls) {
290
+ const blocks = [];
291
+ if (thinking)
292
+ blocks.push({ type: 'thinking', thinking });
293
+ if (content)
294
+ blocks.push({ type: 'text', text: content });
295
+ for (const tc of toolCalls)
296
+ blocks.push({ type: 'tool_use', id: tc.id, name: tc.name, input: tc.arguments });
297
+ return blocks;
298
+ }
299
+ // ─── OpenAI Responses API ────────────────────────────────────────────
300
+ async function callOpenAIResponses(config, systemPrompt, userMessage, onChunk) {
301
+ const baseUrl = config.baseUrl || BASE_URLS.openai;
302
+ const maxTokens = config.maxTokens || 8192;
303
+ const headers = {
304
+ 'Content-Type': 'application/json',
305
+ 'Authorization': `Bearer ${config.apiKey}`,
306
+ };
307
+ const input = [
308
+ { role: 'developer', content: systemPrompt },
309
+ { role: 'user', content: userMessage },
310
+ ];
311
+ const tools = [];
312
+ if (config.webSearch)
313
+ tools.push({ type: 'web_search' });
314
+ if (config.tools?.length) {
315
+ for (const t of config.tools) {
316
+ tools.push({
317
+ type: 'function', name: t.name, description: t.description,
318
+ parameters: t.parameters, strict: true,
319
+ });
320
+ }
321
+ }
322
+ const body = { model: config.model, input, max_output_tokens: maxTokens };
323
+ if (tools.length)
324
+ body.tools = tools;
325
+ if (config.responseFormat === 'json')
326
+ body.text = { format: { type: 'json_object' } };
327
+ if (onChunk) {
328
+ body.stream = true;
329
+ const res = await fetch(`${baseUrl}/v1/responses`, {
330
+ method: 'POST', headers, body: JSON.stringify(body),
331
+ });
332
+ if (!res.ok) {
333
+ const err = await res.text();
334
+ // Fallback to Chat Completions if Responses API not available
335
+ if (res.status === 404)
336
+ return callOpenAICompatible(config, systemPrompt, userMessage, onChunk);
337
+ throw new Error(`OpenAI API error ${res.status}: ${err}`);
338
+ }
339
+ let content = '';
340
+ let inputTokens = 0;
341
+ let outputTokens = 0;
342
+ const toolCalls = [];
343
+ const reader = res.body?.getReader();
344
+ if (!reader)
345
+ throw new Error('No response body');
346
+ const decoder = new TextDecoder();
347
+ let buffer = '';
348
+ while (true) {
349
+ const { done, value } = await reader.read();
350
+ if (done)
351
+ break;
352
+ buffer += decoder.decode(value, { stream: true });
353
+ const lines = buffer.split('\n');
354
+ buffer = lines.pop() || '';
355
+ for (const line of lines) {
356
+ if (!line.startsWith('data: '))
357
+ continue;
358
+ const d = line.slice(6).trim();
359
+ if (d === '[DONE]')
360
+ continue;
361
+ try {
362
+ const ev = JSON.parse(d);
363
+ if (ev.type === 'response.output_text.delta' && ev.delta) {
364
+ content += ev.delta;
365
+ onChunk(ev.delta);
366
+ }
367
+ if (ev.type === 'response.function_call_arguments.done') {
368
+ try {
369
+ toolCalls.push({ id: ev.call_id || '', name: ev.name || '', arguments: JSON.parse(ev.arguments || '{}') });
370
+ }
371
+ catch { /* skip */ }
372
+ }
373
+ if (ev.type === 'response.completed' && ev.response?.usage) {
374
+ inputTokens = ev.response.usage.input_tokens || 0;
375
+ outputTokens = ev.response.usage.output_tokens || 0;
376
+ }
377
+ }
378
+ catch { /* skip */ }
379
+ }
380
+ }
381
+ if (toolCalls.length && config.toolExecutor) {
382
+ return handleOpenAIToolLoop(config, baseUrl, headers, body, content, toolCalls, inputTokens, outputTokens, onChunk);
383
+ }
384
+ return { content, model: config.model, inputTokens, outputTokens, toolCalls: toolCalls.length ? toolCalls : undefined };
385
+ }
386
+ else {
387
+ // Non-streaming
388
+ const res = await fetch(`${baseUrl}/v1/responses`, {
389
+ method: 'POST', headers, body: JSON.stringify(body),
390
+ });
391
+ if (!res.ok) {
392
+ const err = await res.text();
393
+ if (res.status === 404)
394
+ return callOpenAICompatible(config, systemPrompt, userMessage, undefined);
395
+ throw new Error(`OpenAI API error ${res.status}: ${err}`);
396
+ }
397
+ const data = await res.json();
398
+ let content = '';
399
+ const toolCalls = [];
400
+ for (const item of (data.output || [])) {
401
+ if (item.type === 'message') {
402
+ for (const part of (item.content || [])) {
403
+ if (part.type === 'output_text')
404
+ content += part.text;
405
+ }
406
+ }
407
+ if (item.type === 'function_call') {
408
+ try {
409
+ toolCalls.push({ id: item.call_id || item.id || '', name: item.name || '', arguments: JSON.parse(item.arguments || '{}') });
410
+ }
411
+ catch { /* skip */ }
412
+ }
413
+ }
414
+ if (!content && data.output_text)
415
+ content = data.output_text;
416
+ if (toolCalls.length && config.toolExecutor) {
417
+ return handleOpenAIToolLoop(config, baseUrl, headers, body, content, toolCalls, data.usage?.input_tokens, data.usage?.output_tokens, undefined);
418
+ }
419
+ return {
420
+ content, model: data.model || config.model,
421
+ inputTokens: data.usage?.input_tokens, outputTokens: data.usage?.output_tokens,
422
+ toolCalls: toolCalls.length ? toolCalls : undefined,
423
+ };
424
+ }
425
+ }
426
+ /** Agentic tool-call loop for OpenAI Responses API */
427
+ async function handleOpenAIToolLoop(config, baseUrl, headers, origBody, partialContent, pending, inTok, outTok, onChunk) {
428
+ const maxRounds = config.maxToolRounds ?? 5;
429
+ const all = [...pending];
430
+ let content = partialContent;
431
+ let inputTokens = inTok;
432
+ let outputTokens = outTok;
433
+ for (let round = 0; round < maxRounds && pending.length; round++) {
434
+ const results = [];
435
+ for (const tc of pending) {
436
+ let r;
437
+ try {
438
+ r = await config.toolExecutor(tc.name, tc.arguments);
439
+ }
440
+ catch (e) {
441
+ r = `Error: ${e.message}`;
442
+ }
443
+ results.push({ type: 'function_call_output', call_id: tc.id, output: r });
444
+ }
445
+ const followUp = { ...origBody, input: results, stream: !!onChunk };
446
+ const res = await fetch(`${baseUrl}/v1/responses`, { method: 'POST', headers, body: JSON.stringify(followUp) });
447
+ if (!res.ok) {
448
+ const err = await res.text();
449
+ throw new Error(`OpenAI tool follow-up error ${res.status}: ${err}`);
450
+ }
451
+ pending = [];
452
+ if (onChunk) {
453
+ const reader = res.body?.getReader();
454
+ if (!reader)
455
+ throw new Error('No response body');
456
+ const dec = new TextDecoder();
457
+ let buf = '';
458
+ while (true) {
459
+ const { done, value } = await reader.read();
460
+ if (done)
461
+ break;
462
+ buf += dec.decode(value, { stream: true });
463
+ const lines = buf.split('\n');
464
+ buf = lines.pop() || '';
465
+ for (const ln of lines) {
466
+ if (!ln.startsWith('data: '))
467
+ continue;
468
+ const d = ln.slice(6).trim();
469
+ if (d === '[DONE]')
470
+ continue;
471
+ try {
472
+ const ev = JSON.parse(d);
473
+ if (ev.type === 'response.output_text.delta' && ev.delta) {
474
+ content += ev.delta;
475
+ onChunk(ev.delta);
476
+ }
477
+ if (ev.type === 'response.function_call_arguments.done') {
478
+ try {
479
+ const tc = { id: ev.call_id || '', name: ev.name || '', arguments: JSON.parse(ev.arguments || '{}') };
480
+ pending.push(tc);
481
+ all.push(tc);
482
+ }
483
+ catch { /* skip */ }
484
+ }
485
+ if (ev.type === 'response.completed' && ev.response?.usage) {
486
+ inputTokens = (inputTokens || 0) + (ev.response.usage.input_tokens || 0);
487
+ outputTokens = (outputTokens || 0) + (ev.response.usage.output_tokens || 0);
488
+ }
489
+ }
490
+ catch { /* skip */ }
491
+ }
492
+ }
493
+ }
494
+ else {
495
+ const data = await res.json();
496
+ for (const item of (data.output || [])) {
497
+ if (item.type === 'message') {
498
+ for (const p of (item.content || [])) {
499
+ if (p.type === 'output_text')
500
+ content += p.text;
501
+ }
502
+ }
503
+ if (item.type === 'function_call') {
504
+ try {
505
+ const tc = { id: item.call_id || item.id || '', name: item.name || '', arguments: JSON.parse(item.arguments || '{}') };
506
+ pending.push(tc);
507
+ all.push(tc);
508
+ }
509
+ catch { /* skip */ }
510
+ }
511
+ }
512
+ if (data.output_text && !content)
513
+ content = data.output_text;
514
+ if (data.usage) {
515
+ inputTokens = (inputTokens || 0) + (data.usage.input_tokens || 0);
516
+ outputTokens = (outputTokens || 0) + (data.usage.output_tokens || 0);
517
+ }
518
+ }
519
+ }
520
+ return { content, model: config.model, inputTokens, outputTokens, toolCalls: all.length ? all : undefined };
521
+ }
204
522
  // ─── OpenAI-compatible Chat Completions ──────────────────────────────
205
523
  async function callOpenAICompatible(config, systemPrompt, userMessage, onChunk) {
206
524
  const baseUrl = config.baseUrl || BASE_URLS[config.provider] || BASE_URLS.openai;
@@ -209,31 +527,39 @@ async function callOpenAICompatible(config, systemPrompt, userMessage, onChunk)
209
527
  'Content-Type': 'application/json',
210
528
  'Authorization': `Bearer ${config.apiKey}`,
211
529
  };
212
- // OpenRouter requires extra headers
213
530
  if (config.provider === 'openrouter') {
214
531
  headers['HTTP-Referer'] = 'https://guardlink.bugb.io';
215
532
  headers['X-Title'] = 'GuardLink CLI';
216
533
  }
534
+ const isDeepSeekReasoner = config.provider === 'deepseek' && config.model.includes('reasoner');
535
+ const body = {
536
+ model: config.model,
537
+ max_tokens: maxTokens,
538
+ messages: [
539
+ { role: 'system', content: systemPrompt },
540
+ { role: 'user', content: userMessage },
541
+ ],
542
+ };
543
+ if (config.responseFormat === 'json') {
544
+ body.response_format = { type: 'json_object' };
545
+ }
546
+ if (config.tools?.length) {
547
+ body.tools = config.tools.map(t => ({
548
+ type: 'function',
549
+ function: { name: t.name, description: t.description, parameters: t.parameters },
550
+ }));
551
+ }
217
552
  if (onChunk) {
218
- // Streaming
553
+ body.stream = true;
219
554
  const res = await fetch(`${baseUrl}/v1/chat/completions`, {
220
- method: 'POST',
221
- headers,
222
- body: JSON.stringify({
223
- model: config.model,
224
- max_tokens: maxTokens,
225
- stream: true,
226
- messages: [
227
- { role: 'system', content: systemPrompt },
228
- { role: 'user', content: userMessage },
229
- ],
230
- }),
555
+ method: 'POST', headers, body: JSON.stringify(body),
231
556
  });
232
557
  if (!res.ok) {
233
558
  const err = await res.text();
234
559
  throw new Error(`${config.provider} API error ${res.status}: ${err}`);
235
560
  }
236
561
  let content = '';
562
+ let reasoning = '';
237
563
  const reader = res.body?.getReader();
238
564
  if (!reader)
239
565
  throw new Error('No response body');
@@ -259,36 +585,31 @@ async function callOpenAICompatible(config, systemPrompt, userMessage, onChunk)
259
585
  content += delta;
260
586
  onChunk(delta);
261
587
  }
588
+ const reasoningDelta = event.choices?.[0]?.delta?.reasoning_content;
589
+ if (reasoningDelta)
590
+ reasoning += reasoningDelta;
262
591
  }
263
592
  catch { /* skip */ }
264
593
  }
265
594
  }
266
- return { content, model: config.model };
595
+ return { content, model: config.model, thinking: reasoning || undefined };
267
596
  }
268
597
  else {
269
- // Non-streaming
270
598
  const res = await fetch(`${baseUrl}/v1/chat/completions`, {
271
- method: 'POST',
272
- headers,
273
- body: JSON.stringify({
274
- model: config.model,
275
- max_tokens: maxTokens,
276
- messages: [
277
- { role: 'system', content: systemPrompt },
278
- { role: 'user', content: userMessage },
279
- ],
280
- }),
599
+ method: 'POST', headers, body: JSON.stringify(body),
281
600
  });
282
601
  if (!res.ok) {
283
602
  const err = await res.text();
284
603
  throw new Error(`${config.provider} API error ${res.status}: ${err}`);
285
604
  }
286
605
  const data = await res.json();
606
+ const choice = data.choices?.[0];
287
607
  return {
288
- content: data.choices?.[0]?.message?.content || '',
608
+ content: choice?.message?.content || '',
289
609
  model: data.model || config.model,
290
610
  inputTokens: data.usage?.prompt_tokens,
291
611
  outputTokens: data.usage?.completion_tokens,
612
+ thinking: isDeepSeekReasoner ? (choice?.message?.reasoning_content || undefined) : undefined,
292
613
  };
293
614
  }
294
615
  }