agentic-ai-framework 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Abstract base class defining the interface all LLM providers must implement.
3
+ *
4
+ * Message format (OpenAI-style) — all providers receive and return messages in this format:
5
+ *
6
+ * System: { role: 'system', content: string }
7
+ * User: { role: 'user', content: string }
8
+ * Assistant: { role: 'assistant', content: string | null, tool_calls?: ToolCall[] }
9
+ * Tool: { role: 'tool', tool_call_id: string, name: string, content: string }
10
+ *
11
+ * Providers that use a different native format (e.g., Anthropic) must convert
12
+ * internally before making API calls and convert back before returning.
13
+ *
14
+ * @typedef {Object} ToolCall
15
+ * @property {string} id - Provider-assigned tool call ID
16
+ * @property {string} name - Tool name
17
+ * @property {Object} arguments - Parsed arguments object
18
+ *
19
+ * @typedef {Object} CompletionResponse
20
+ * @property {string} content - Text content of the response
21
+ * @property {Object|null} parsed - Parsed JSON (when schema mode used)
22
+ * @property {Object} usage - { promptTokens, completionTokens, totalTokens }
23
+ * @property {string} model - Model ID that produced the response
24
+ * @property {string} finishReason - 'stop', 'tool_calls', 'length', etc.
25
+ * @property {ToolCall[]|undefined} toolCalls - Present when LLM requests tool execution
26
+ * @property {Array|undefined} messages - Updated message array for loop continuation
27
+ */
28
+ export class BaseLLMProvider {
29
+ /**
30
+ * Generate a completion.
31
+ *
32
+ * @param {string} prompt - User prompt (ignored when options.messages is set)
33
+ * @param {Object} [options={}]
34
+ * @param {string} [options.systemPrompt] - System prompt text
35
+ * @param {Array} [options.messages] - Full message array (takes precedence)
36
+ * @param {number} [options.temperature]
37
+ * @param {number} [options.maxTokens]
38
+ * @param {boolean} [options.enableTools]
39
+ * @param {Array} [options.tools] - Tool definitions [{name, description, parameters}]
40
+ * @param {boolean} [options.jsonMode]
41
+ * @returns {Promise<CompletionResponse>}
42
+ */
43
+ async complete(prompt, options = {}) {
44
+ throw new Error(`${this.constructor.name}.complete() is not implemented`);
45
+ }
46
+
47
+ /**
48
+ * Generate a completion with a JSON schema for structured output.
49
+ * The provider must parse the response as JSON and validate required fields.
50
+ *
51
+ * @param {string} prompt
52
+ * @param {Object} schema - JSON Schema { type, properties, required }
53
+ * @param {Object} [options={}]
54
+ * @returns {Promise<CompletionResponse>}
55
+ */
56
+ async completeWithSchema(prompt, schema, options = {}) {
57
+ throw new Error(`${this.constructor.name}.completeWithSchema() is not implemented`);
58
+ }
59
+
60
+ /**
61
+ * Test connectivity to the provider API.
62
+ * @returns {Promise<boolean>}
63
+ */
64
+ async testConnection() {
65
+ throw new Error(`${this.constructor.name}.testConnection() is not implemented`);
66
+ }
67
+
68
+ /**
69
+ * Return provider metadata.
70
+ * @returns {{ provider: string, model: string }}
71
+ */
72
+ getInfo() {
73
+ return {
74
+ provider: this.constructor.name,
75
+ model: this.model ?? 'unknown',
76
+ };
77
+ }
78
+ }
@@ -0,0 +1,80 @@
1
+ import { createHash } from 'crypto';
2
+ import { GrokProvider } from './providers/GrokProvider.js';
3
+ import { ClaudeProvider } from './providers/ClaudeProvider.js';
4
+ import { OpenAIProvider } from './providers/OpenAIProvider.js';
5
+ import { LLMError } from '../utils/errors.js';
6
+
7
+ // Built-in provider registry
8
+ const REGISTRY = new Map([
9
+ ['grok', GrokProvider],
10
+ ['claude', ClaudeProvider],
11
+ ['openai', OpenAIProvider],
12
+ ]);
13
+
14
+ // Instance cache: "providerName:model:apiKeyHash" → provider instance
15
+ const _cache = new Map();
16
+
17
+ /**
18
+ * Creates and caches LLM provider instances by name, model, and API key.
19
+ *
20
+ * Usage:
21
+ * const llm = LLMRouter.get('grok', 'grok-code-fast-1', process.env.GROK_API_KEY);
22
+ * const llm = LLMRouter.get('claude', null, process.env.ANTHROPIC_API_KEY); // uses default model
23
+ */
24
+ export class LLMRouter {
25
+ /**
26
+ * Get a cached provider instance.
27
+ * The cache key includes a hash of the API key so different keys produce separate instances.
28
+ *
29
+ * @param {string} providerName - 'grok' | 'claude' | 'openai' | custom registered name
30
+ * @param {string | null} model - Model ID (null = provider default)
31
+ * @param {string} apiKey - API key for the provider
32
+ * @returns {import('./BaseLLMProvider.js').BaseLLMProvider}
33
+ */
34
+ static get(providerName, model, apiKey) {
35
+ const keyHash = createHash('sha256').update(apiKey).digest('hex').slice(0, 8);
36
+ const cacheKey = `${providerName}:${model ?? 'default'}:${keyHash}`;
37
+
38
+ if (!_cache.has(cacheKey)) {
39
+ const ProviderClass = REGISTRY.get(providerName);
40
+ if (!ProviderClass) {
41
+ throw new LLMError(
42
+ `Unknown LLM provider: "${providerName}". ` +
43
+ `Registered providers: ${[...REGISTRY.keys()].join(', ')}`
44
+ );
45
+ }
46
+ const instance = model
47
+ ? new ProviderClass(apiKey, model)
48
+ : new ProviderClass(apiKey);
49
+ _cache.set(cacheKey, instance);
50
+ }
51
+
52
+ return _cache.get(cacheKey);
53
+ }
54
+
55
+ /**
56
+ * Register a custom provider class under a name.
57
+ * Call this before creating agents that use the custom provider.
58
+ *
59
+ * @param {string} name
60
+ * @param {typeof import('./BaseLLMProvider.js').BaseLLMProvider} ProviderClass
61
+ */
62
+ static register(name, ProviderClass) {
63
+ REGISTRY.set(name, ProviderClass);
64
+ }
65
+
66
+ /**
67
+ * Clear the instance cache. Useful in tests.
68
+ */
69
+ static clearCache() {
70
+ _cache.clear();
71
+ }
72
+
73
+ /**
74
+ * List all registered provider names.
75
+ * @returns {string[]}
76
+ */
77
+ static listProviders() {
78
+ return [...REGISTRY.keys()];
79
+ }
80
+ }
@@ -0,0 +1,307 @@
1
+ // Claude (Anthropic) LLM Provider
2
+ // Uses Anthropic Messages API with full tool-calling support
3
+
4
+ import { BaseLLMProvider } from '../BaseLLMProvider.js';
5
+
6
+ const ANTHROPIC_API_BASE = 'https://api.anthropic.com/v1';
7
+ const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
8
+ const DEFAULT_TIMEOUT = 120000; // 120 seconds
9
+ const ANTHROPIC_VERSION = '2023-06-01';
10
+
11
+ export class ClaudeProvider extends BaseLLMProvider {
12
+ /**
13
+ * @param {string} apiKey - Anthropic API key
14
+ * @param {string} [model] - Model to use
15
+ * @param {number} [timeout]- Request timeout in ms
16
+ */
17
+ constructor(apiKey, model = DEFAULT_MODEL, timeout = DEFAULT_TIMEOUT) {
18
+ super();
19
+ if (!apiKey) throw new Error('ClaudeProvider: apiKey is required');
20
+ this.apiKey = apiKey;
21
+ this.model = model;
22
+ this.timeout = timeout;
23
+ this.baseUrl = ANTHROPIC_API_BASE;
24
+ }
25
+
26
+ async _makeRequest(endpoint, body) {
27
+ const url = `${this.baseUrl}${endpoint}`;
28
+ const controller = new AbortController();
29
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
30
+
31
+ try {
32
+ const response = await fetch(url, {
33
+ method: 'POST',
34
+ headers: {
35
+ 'Content-Type': 'application/json',
36
+ 'x-api-key': this.apiKey,
37
+ 'anthropic-version': ANTHROPIC_VERSION,
38
+ },
39
+ body: JSON.stringify(body),
40
+ signal: controller.signal,
41
+ });
42
+ clearTimeout(timeoutId);
43
+
44
+ if (!response.ok) {
45
+ const errorBody = await response.text();
46
+ let errorMessage;
47
+ try {
48
+ const errorJson = JSON.parse(errorBody);
49
+ errorMessage = errorJson.error?.message || errorJson.message || errorBody;
50
+ } catch {
51
+ errorMessage = errorBody;
52
+ }
53
+ throw new Error(`Anthropic API error (${response.status}): ${errorMessage}`);
54
+ }
55
+
56
+ return response.json();
57
+ } catch (error) {
58
+ clearTimeout(timeoutId);
59
+ if (error.name === 'AbortError') throw new Error('Anthropic API request timed out');
60
+ throw error;
61
+ }
62
+ }
63
+
64
+ /**
65
+ * Convert OpenAI-format messages to Anthropic format.
66
+ *
67
+ * Differences:
68
+ * - System messages are extracted (returned separately as the system field)
69
+ * - role:'tool' messages become role:'user' with content array [{type:'tool_result',...}]
70
+ * - Consecutive tool results are merged into one user message
71
+ * - Assistant messages with tool_calls become content array [{type:'tool_use',...}]
72
+ *
73
+ * @param {Array} messages - OpenAI-format messages
74
+ * @returns {{ system: string, messages: Array }}
75
+ */
76
+ _convertMessages(messages) {
77
+ let system = '';
78
+ const converted = [];
79
+
80
+ for (const msg of messages) {
81
+ if (msg.role === 'system') {
82
+ system = system ? `${system}\n\n${msg.content}` : msg.content;
83
+ continue;
84
+ }
85
+
86
+ if (msg.role === 'tool') {
87
+ // Tool result → append to last user message if it's already a content array,
88
+ // or create a new user message
89
+ const toolResult = {
90
+ type: 'tool_result',
91
+ tool_use_id: msg.tool_call_id,
92
+ content: msg.content,
93
+ };
94
+
95
+ const last = converted[converted.length - 1];
96
+ if (last && last.role === 'user' && Array.isArray(last.content)) {
97
+ last.content.push(toolResult);
98
+ } else {
99
+ converted.push({ role: 'user', content: [toolResult] });
100
+ }
101
+ continue;
102
+ }
103
+
104
+ if (msg.role === 'assistant' && msg.tool_calls?.length > 0) {
105
+ // Assistant message with tool calls → content array with tool_use blocks
106
+ const content = [];
107
+ if (msg.content) {
108
+ content.push({ type: 'text', text: msg.content });
109
+ }
110
+ for (const tc of msg.tool_calls) {
111
+ let input;
112
+ try {
113
+ input = typeof tc.function?.arguments === 'string'
114
+ ? JSON.parse(tc.function.arguments)
115
+ : (tc.function?.arguments ?? tc.arguments ?? {});
116
+ } catch {
117
+ input = {};
118
+ }
119
+ content.push({
120
+ type: 'tool_use',
121
+ id: tc.id,
122
+ name: tc.function?.name ?? tc.name,
123
+ input,
124
+ });
125
+ }
126
+ converted.push({ role: 'assistant', content });
127
+ continue;
128
+ }
129
+
130
+ // Regular user or assistant message
131
+ converted.push({ role: msg.role, content: msg.content });
132
+ }
133
+
134
+ return { system, messages: converted };
135
+ }
136
+
137
+ /**
138
+ * Convert tool definitions from framework format to Anthropic format.
139
+ * Framework: { name, description, parameters } (parameters = JSON Schema)
140
+ * Anthropic: { name, description, input_schema } (same schema, different key)
141
+ */
142
+ _formatTools(tools) {
143
+ if (!tools || !Array.isArray(tools)) return [];
144
+ return tools.map(tool => ({
145
+ name: tool.name,
146
+ description: tool.description,
147
+ input_schema: tool.parameters || { type: 'object', properties: {} },
148
+ }));
149
+ }
150
+
151
+ /**
152
+ * Convert Anthropic response to standardized format + update messages array.
153
+ * @param {Object} response - Raw Anthropic API response
154
+ * @param {Array} messages - OpenAI-format messages array (for continuation)
155
+ * @returns {import('../BaseLLMProvider.js').CompletionResponse}
156
+ */
157
+ _processResponse(response, messages) {
158
+ const textBlocks = response.content.filter(b => b.type === 'text');
159
+ const toolUseBlocks = response.content.filter(b => b.type === 'tool_use');
160
+
161
+ const content = textBlocks.map(b => b.text).join('');
162
+ const hasToolCalls = toolUseBlocks.length > 0;
163
+
164
+ // Extract tool calls in standardized format (arguments always an object)
165
+ const toolCalls = hasToolCalls
166
+ ? toolUseBlocks.map(b => ({
167
+ id: b.id,
168
+ name: b.name,
169
+ arguments: b.input,
170
+ }))
171
+ : undefined;
172
+
173
+ // Build updated OpenAI-format message array
174
+ // Store arguments as objects (not strings) so _convertMessages doesn't need to JSON.parse
175
+ const updatedMessages = [...messages];
176
+ if (hasToolCalls) {
177
+ updatedMessages.push({
178
+ role: 'assistant',
179
+ content: content || null,
180
+ tool_calls: toolUseBlocks.map(b => ({
181
+ id: b.id,
182
+ type: 'function',
183
+ function: {
184
+ name: b.name,
185
+ arguments: b.input, // keep as object — _convertMessages handles both
186
+ },
187
+ })),
188
+ });
189
+ } else {
190
+ updatedMessages.push({ role: 'assistant', content });
191
+ }
192
+
193
+ return {
194
+ content,
195
+ parsed: null,
196
+ usage: {
197
+ promptTokens: response.usage?.input_tokens || 0,
198
+ completionTokens: response.usage?.output_tokens || 0,
199
+ totalTokens: (response.usage?.input_tokens || 0) + (response.usage?.output_tokens || 0),
200
+ },
201
+ model: response.model || this.model,
202
+ finishReason: response.stop_reason || 'unknown',
203
+ toolCalls,
204
+ messages: updatedMessages,
205
+ };
206
+ }
207
+
208
+ async complete(prompt, options = {}) {
209
+ // Build OpenAI-format message array, then convert to Anthropic format
210
+ let openAiMessages;
211
+ if (options.messages && Array.isArray(options.messages)) {
212
+ openAiMessages = options.messages;
213
+ } else {
214
+ openAiMessages = [];
215
+ if (options.systemPrompt) {
216
+ openAiMessages.push({ role: 'system', content: options.systemPrompt });
217
+ }
218
+ openAiMessages.push({ role: 'user', content: prompt });
219
+ }
220
+
221
+ const { system, messages: anthropicMessages } = this._convertMessages(openAiMessages);
222
+
223
+ const requestBody = {
224
+ model: options.model || this.model,
225
+ max_tokens: options.maxTokens ?? 4096,
226
+ temperature: options.temperature ?? 0.1,
227
+ messages: anthropicMessages,
228
+ };
229
+
230
+ if (system) requestBody.system = system;
231
+
232
+ if (options.enableTools && options.tools) {
233
+ requestBody.tools = this._formatTools(options.tools);
234
+ }
235
+
236
+ const response = await this._makeRequest('/messages', requestBody);
237
+ return this._processResponse(response, openAiMessages);
238
+ }
239
+
240
+ async completeWithSchema(prompt, schema, options = {}) {
241
+ // For tool-calling mode, schema is used for the final answer — don't add schema to prompt
242
+ if (options.enableTools) {
243
+ const response = await this.complete(prompt, options);
244
+ // If we got a final text answer (no tool calls), parse it as JSON
245
+ if (response.content && !response.toolCalls) {
246
+ response.parsed = this._parseJsonContent(response.content);
247
+ if (schema?.required && response.parsed) {
248
+ const missing = schema.required.filter(f => !(f in response.parsed));
249
+ if (missing.length > 0) {
250
+ console.warn(`[ClaudeProvider] Response missing fields: ${missing.join(', ')}`);
251
+ }
252
+ }
253
+ }
254
+ return response;
255
+ }
256
+
257
+ // Standard schema completion (no tools)
258
+ const schemaPrompt = `${prompt}\n\n## Required JSON Schema:\n\`\`\`json\n${JSON.stringify(schema, null, 2)}\n\`\`\`\n\nRespond with a JSON object that matches this schema exactly.`;
259
+
260
+ const systemPrompt = options.systemPrompt
261
+ ? `${options.systemPrompt}\n\nIMPORTANT: You must respond with valid JSON only. No markdown, no code blocks, no explanation — just the JSON object.`
262
+ : 'You must respond with valid JSON only. No markdown, no code blocks, no explanation — just the JSON object.';
263
+
264
+ const response = await this.complete(schemaPrompt, { ...options, systemPrompt });
265
+ response.parsed = this._parseJsonContent(response.content);
266
+ return response;
267
+ }
268
+
269
+ /**
270
+ * Parse content string as JSON, stripping markdown code fences if present.
271
+ */
272
+ _parseJsonContent(content) {
273
+ let text = content.trim();
274
+ if (text.startsWith('```json')) text = text.slice(7);
275
+ else if (text.startsWith('```')) text = text.slice(3);
276
+ if (text.endsWith('```')) text = text.slice(0, -3);
277
+ text = text.trim();
278
+
279
+ try {
280
+ return JSON.parse(text);
281
+ } catch (err) {
282
+ throw new Error(`ClaudeProvider: invalid JSON response — ${err.message}`);
283
+ }
284
+ }
285
+
286
+ async testConnection() {
287
+ try {
288
+ const response = await this.complete("Say 'ok'", { maxTokens: 10, temperature: 0 });
289
+ return !!response.content;
290
+ } catch {
291
+ return false;
292
+ }
293
+ }
294
+
295
+ listModels() {
296
+ return [
297
+ 'claude-sonnet-4-20250514',
298
+ 'claude-opus-4-20250514',
299
+ 'claude-3-5-sonnet-20241022',
300
+ 'claude-3-5-haiku-20241022',
301
+ 'claude-3-opus-20240229',
302
+ 'claude-3-haiku-20240307',
303
+ ];
304
+ }
305
+ }
306
+
307
+ export default ClaudeProvider;
@@ -0,0 +1,208 @@
1
+ // Grok (xAI) LLM Provider
2
+ // Uses OpenAI-compatible API format
3
+
4
+ import { BaseLLMProvider } from '../BaseLLMProvider.js';
5
+
6
+ const GROK_API_BASE = 'https://api.x.ai/v1';
7
+ const DEFAULT_MODEL = 'grok-code-fast-1';
8
+ const DEFAULT_TIMEOUT = 60000; // 60 seconds
9
+
10
+ export class GrokProvider extends BaseLLMProvider {
11
+ /**
12
+ * @param {string} apiKey - xAI API key
13
+ * @param {string} [model] - Model to use
14
+ * @param {number} [timeout] - Request timeout in ms
15
+ */
16
+ constructor(apiKey, model = DEFAULT_MODEL, timeout = DEFAULT_TIMEOUT) {
17
+ super();
18
+ if (!apiKey) throw new Error('GrokProvider: apiKey is required');
19
+ this.apiKey = apiKey;
20
+ this.model = model;
21
+ this.timeout = timeout;
22
+ this.baseUrl = GROK_API_BASE;
23
+ }
24
+
25
+ async _makeRequest(endpoint, body) {
26
+ const url = `${this.baseUrl}${endpoint}`;
27
+ const controller = new AbortController();
28
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
29
+
30
+ try {
31
+ const response = await fetch(url, {
32
+ method: 'POST',
33
+ headers: {
34
+ 'Content-Type': 'application/json',
35
+ Authorization: `Bearer ${this.apiKey}`,
36
+ },
37
+ body: JSON.stringify(body),
38
+ signal: controller.signal,
39
+ });
40
+ clearTimeout(timeoutId);
41
+
42
+ if (!response.ok) {
43
+ const errorBody = await response.text();
44
+ let errorMessage;
45
+ try {
46
+ const errorJson = JSON.parse(errorBody);
47
+ errorMessage = errorJson.error?.message || errorJson.message || errorBody;
48
+ } catch {
49
+ errorMessage = errorBody;
50
+ }
51
+ throw new Error(`Grok API error (${response.status}): ${errorMessage}`);
52
+ }
53
+
54
+ return response.json();
55
+ } catch (error) {
56
+ clearTimeout(timeoutId);
57
+ if (error.name === 'AbortError') throw new Error('Grok API request timed out');
58
+ throw error;
59
+ }
60
+ }
61
+
62
+ _buildMessages(prompt, options) {
63
+ if (options.messages && Array.isArray(options.messages)) {
64
+ return options.messages;
65
+ }
66
+ const messages = [];
67
+ if (options.systemPrompt) {
68
+ messages.push({ role: 'system', content: options.systemPrompt });
69
+ }
70
+ messages.push({ role: 'user', content: prompt });
71
+ return messages;
72
+ }
73
+
74
+ _formatTools(tools) {
75
+ if (!tools || !Array.isArray(tools)) return [];
76
+ return tools.map(tool => ({
77
+ type: 'function',
78
+ function: {
79
+ name: tool.name,
80
+ description: tool.description,
81
+ parameters: tool.parameters || { type: 'object', properties: {} },
82
+ },
83
+ }));
84
+ }
85
+
86
+ _hasToolCalls(choice) {
87
+ return !!(choice?.message?.tool_calls?.length > 0);
88
+ }
89
+
90
+ _extractToolCalls(choice) {
91
+ if (!this._hasToolCalls(choice)) return [];
92
+ return choice.message.tool_calls.map(tc => {
93
+ let args;
94
+ try {
95
+ args = typeof tc.function.arguments === 'string'
96
+ ? JSON.parse(tc.function.arguments)
97
+ : tc.function.arguments ?? {};
98
+ } catch {
99
+ args = {};
100
+ }
101
+ return { id: tc.id, name: tc.function.name, arguments: args };
102
+ });
103
+ }
104
+
105
+ async complete(prompt, options = {}) {
106
+ const messages = this._buildMessages(prompt, options);
107
+
108
+ const requestBody = {
109
+ model: options.model || this.model,
110
+ messages,
111
+ temperature: options.temperature ?? 0.1,
112
+ max_tokens: options.maxTokens ?? 2048,
113
+ };
114
+
115
+ if (options.enableTools && options.tools) {
116
+ requestBody.tools = this._formatTools(options.tools);
117
+ }
118
+
119
+ if (options.jsonMode && !options.enableTools) {
120
+ requestBody.response_format = { type: 'json_object' };
121
+ }
122
+
123
+ const response = await this._makeRequest('/chat/completions', requestBody);
124
+ const choice = response.choices?.[0];
125
+ if (!choice) throw new Error('No completion returned from Grok API');
126
+
127
+ const content = choice.message?.content || '';
128
+ const hasToolCalls = this._hasToolCalls(choice);
129
+ let toolCalls = [];
130
+ const updatedMessages = [...messages];
131
+
132
+ if (hasToolCalls) {
133
+ toolCalls = this._extractToolCalls(choice);
134
+ updatedMessages.push({
135
+ role: 'assistant',
136
+ content: content || null,
137
+ tool_calls: choice.message.tool_calls,
138
+ });
139
+ } else {
140
+ updatedMessages.push({ role: 'assistant', content });
141
+ }
142
+
143
+ let parsed = null;
144
+ if (options.jsonMode && content && !hasToolCalls) {
145
+ try {
146
+ parsed = JSON.parse(content);
147
+ } catch (error) {
148
+ throw new Error(`Failed to parse JSON response: ${error.message}`);
149
+ }
150
+ }
151
+
152
+ return {
153
+ content,
154
+ parsed,
155
+ usage: {
156
+ promptTokens: response.usage?.prompt_tokens || 0,
157
+ completionTokens: response.usage?.completion_tokens || 0,
158
+ totalTokens: response.usage?.total_tokens || 0,
159
+ },
160
+ model: response.model || this.model,
161
+ finishReason: choice.finish_reason || 'unknown',
162
+ toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
163
+ messages: updatedMessages,
164
+ };
165
+ }
166
+
167
+ async completeWithSchema(prompt, schema, options = {}) {
168
+ if (options.enableTools) {
169
+ const response = await this.complete(prompt, { ...options, jsonMode: false });
170
+ if (response.content && !response.toolCalls) {
171
+ try {
172
+ response.parsed = JSON.parse(response.content);
173
+ } catch {
174
+ // non-fatal in tool-calling mode
175
+ }
176
+ }
177
+ return response;
178
+ }
179
+
180
+ const schemaPrompt = `${prompt}\n\nYou MUST respond with a valid JSON object that matches this structure:\n${JSON.stringify(schema, null, 2)}\n\nRespond ONLY with the JSON object, no additional text.`;
181
+
182
+ const response = await this.complete(schemaPrompt, { ...options, jsonMode: true });
183
+
184
+ if (response.parsed && schema?.required) {
185
+ const missing = schema.required.filter(f => !(f in response.parsed));
186
+ if (missing.length > 0) {
187
+ throw new Error(`Response missing required fields: ${missing.join(', ')}`);
188
+ }
189
+ }
190
+
191
+ return response;
192
+ }
193
+
194
+ async testConnection() {
195
+ try {
196
+ const response = await this.complete("Say 'ok'", { maxTokens: 10, temperature: 0 });
197
+ return !!response.content;
198
+ } catch {
199
+ return false;
200
+ }
201
+ }
202
+
203
+ listModels() {
204
+ return ['grok-code-fast-1', 'grok-2', 'grok-2-mini'];
205
+ }
206
+ }
207
+
208
+ export default GrokProvider;