tachibot-mcp 2.0.2 → 2.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/dist/src/collaborative-orchestrator.js +2 -1
  3. package/dist/src/config/model-constants.js +55 -28
  4. package/dist/src/config/model-defaults.js +14 -14
  5. package/dist/src/memory/memory-manager.js +3 -2
  6. package/dist/src/sequential-thinking.js +2 -1
  7. package/dist/src/tools/openai-tools.js +210 -118
  8. package/dist/src/tools/perplexity-tools.js +2 -6
  9. package/dist/src/tools/unified-ai-provider.js +11 -12
  10. package/dist/src/workflows/engine/handlers/StepExecutionHandler.js +1 -1
  11. package/dist/src/workflows/tool-mapper.js +20 -24
  12. package/package.json +1 -3
  13. package/tools.config.json +1 -1
  14. package/workflows/core/iterative-problem-solver.yaml +2 -2
  15. package/workflows/system/scout.yaml +1 -1
  16. package/workflows/ultra-creative-brainstorm.yaml +2 -2
  17. package/dist/personality/komaai-expressions.js +0 -12
  18. package/dist/profiles/balanced.json +0 -33
  19. package/dist/profiles/code_focus.json +0 -33
  20. package/dist/profiles/full.json +0 -33
  21. package/dist/profiles/minimal.json +0 -33
  22. package/dist/profiles/research_power.json +0 -33
  23. package/dist/src/application/services/focus/ModeRegistry.js +0 -46
  24. package/dist/src/application/services/focus/modes/status.mode.js +0 -50
  25. package/dist/src/profiles/debug_intensive.js +0 -59
  26. package/dist/src/profiles/research_code.js +0 -59
  27. package/dist/src/profiles/workflow_builder.js +0 -53
  28. package/dist/src/tools/consolidated/ai-router.js +0 -174
  29. package/dist/src/tools/consolidated/ai-tool.js +0 -48
  30. package/dist/src/tools/consolidated/brainstorm-tool.js +0 -87
  31. package/dist/src/tools/consolidated/environment-detector.js +0 -80
  32. package/dist/src/tools/consolidated/index.js +0 -50
  33. package/dist/src/tools/consolidated/search-tool.js +0 -110
  34. package/dist/src/tools/consolidated/workflow-tool.js +0 -238
  35. package/dist/src/tools/pingpong-tool.js +0 -343
  36. package/dist/src/workflows/orchestrator-integration.js +0 -200
  37. package/dist/src/workflows/workflow-engine.js +0 -573
  38. package/dist/src/workflows/workflow-parser.js +0 -283
  39. package/dist/test-workflow-file-output.js +0 -93
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * OpenAI Tools Implementation
3
- * Provides GPT-5, GPT-5-mini, and GPT-5-nano model capabilities
3
+ * Provides GPT-5.1 model capabilities with reasoning_effort control
4
4
  */
5
5
  import { z } from "zod";
6
6
  import { config } from "dotenv";
@@ -12,20 +12,77 @@ const __dirname = path.dirname(__filename);
12
12
  config({ path: path.resolve(__dirname, '../../../.env') });
13
13
  // OpenAI API configuration
14
14
  const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
15
- const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
16
- // Available OpenAI models (GPT-5 family only)
17
- export var OpenAIModel;
18
- (function (OpenAIModel) {
19
- OpenAIModel["GPT5"] = "gpt-5";
20
- OpenAIModel["GPT5_MINI"] = "gpt-5-mini";
21
- OpenAIModel["GPT5_NANO"] = "gpt-5-nano";
22
- })(OpenAIModel || (OpenAIModel = {}));
15
+ const OPENAI_CHAT_URL = "https://api.openai.com/v1/chat/completions";
16
+ const OPENAI_RESPONSES_URL = "https://api.openai.com/v1/responses";
17
+ // Zod schemas for API responses
18
+ const ChatCompletionResponseSchema = z.object({
19
+ id: z.string(),
20
+ object: z.string(),
21
+ created: z.number(),
22
+ model: z.string(),
23
+ choices: z.array(z.object({
24
+ index: z.number(),
25
+ message: z.object({
26
+ role: z.string(),
27
+ content: z.string()
28
+ }),
29
+ finish_reason: z.string().optional()
30
+ })),
31
+ usage: z.object({
32
+ prompt_tokens: z.number().optional(),
33
+ completion_tokens: z.number().optional(),
34
+ total_tokens: z.number().optional()
35
+ }).optional()
36
+ });
37
+ const ResponsesAPIOutputSchema = z.object({
38
+ id: z.string(),
39
+ type: z.string(),
40
+ status: z.string().optional(),
41
+ content: z.array(z.object({
42
+ type: z.string(), // Can be "output_text", "text", etc.
43
+ text: z.string().optional(), // Make optional to handle different content types
44
+ annotations: z.array(z.unknown()).optional(),
45
+ logprobs: z.array(z.unknown()).optional()
46
+ })).optional(),
47
+ summary: z.array(z.unknown()).optional(),
48
+ role: z.string().optional()
49
+ });
50
+ const ResponsesAPISchema = z.object({
51
+ id: z.string(),
52
+ object: z.literal("response"),
53
+ created_at: z.number(),
54
+ status: z.string(),
55
+ model: z.string(),
56
+ output: z.array(ResponsesAPIOutputSchema),
57
+ reasoning: z.object({
58
+ effort: z.string(),
59
+ summary: z.string().nullable().optional()
60
+ }).optional(),
61
+ usage: z.object({
62
+ input_tokens: z.number().optional(), // Fixed: was prompt_tokens
63
+ input_tokens_details: z.object({
64
+ cached_tokens: z.number().optional()
65
+ }).optional(),
66
+ output_tokens: z.number().optional(), // Fixed: was completion_tokens
67
+ output_tokens_details: z.object({
68
+ reasoning_tokens: z.number().optional()
69
+ }).optional(),
70
+ total_tokens: z.number().optional()
71
+ }).optional()
72
+ });
73
+ // Available OpenAI models (GPT-5.1 family)
74
+ export var OpenAI51Model;
75
+ (function (OpenAI51Model) {
76
+ OpenAI51Model["FULL"] = "gpt-5.1";
77
+ OpenAI51Model["CODEX_MINI"] = "gpt-5.1-codex-mini";
78
+ OpenAI51Model["CODEX"] = "gpt-5.1-codex";
79
+ })(OpenAI51Model || (OpenAI51Model = {}));
23
80
  /**
24
81
  * Call OpenAI API with model fallback support
25
- * Automatically detects GPT-5 models and uses correct endpoint + format
82
+ * Automatically detects GPT-5.1 models and uses correct endpoint + format
26
83
  */
27
- export async function callOpenAI(messages, model = OpenAIModel.GPT5_MINI, temperature = 1, maxTokens = 16384, // Increased default for comprehensive responses
28
- requireConfirmation = false, skipValidation = false) {
84
+ export async function callOpenAI(messages, model = OpenAI51Model.CODEX_MINI, temperature = 0.7, maxTokens = 16384, // Increased default for comprehensive responses
85
+ reasoningEffort = "low", requireConfirmation = false, skipValidation = false) {
29
86
  console.error(`🔍 TRACE: callOpenAI called with model: ${model}`);
30
87
  if (!OPENAI_API_KEY) {
31
88
  console.error(`🔍 TRACE: No API key found`);
@@ -42,11 +99,11 @@ requireConfirmation = false, skipValidation = false) {
42
99
  }
43
100
  return { ...msg, content: validation.sanitized };
44
101
  });
45
- // Model fallback chain - GPT-5 models have no fallbacks to test actual availability
102
+ // Model fallback chain - GPT-5.1 models have no fallbacks to test actual availability
46
103
  const modelFallbacks = {
47
- [OpenAIModel.GPT5]: [], // No fallback - test actual GPT-5
48
- [OpenAIModel.GPT5_MINI]: [], // No fallback - test actual GPT-5-mini
49
- [OpenAIModel.GPT5_NANO]: [] // No fallback - test actual GPT-5-nano
104
+ [OpenAI51Model.FULL]: [], // No fallback - test actual GPT-5.1
105
+ [OpenAI51Model.CODEX_MINI]: [], // No fallback - test actual GPT-5.1-codex-mini
106
+ [OpenAI51Model.CODEX]: [] // No fallback - test actual GPT-5.1-codex
50
107
  };
51
108
  const modelsToTry = [model, ...(modelFallbacks[model] || [])];
52
109
  console.error(`🔍 TRACE: Models to try: ${modelsToTry.join(', ')}`);
@@ -54,32 +111,34 @@ requireConfirmation = false, skipValidation = false) {
54
111
  for (const currentModel of modelsToTry) {
55
112
  console.error(`🔍 TRACE: Trying model: ${currentModel}`);
56
113
  try {
57
- // Detect if this is a GPT-5 model (uses /v1/responses endpoint)
58
- const isGPT5 = currentModel.startsWith('gpt-5');
59
- const endpoint = isGPT5
60
- ? "https://api.openai.com/v1/responses"
61
- : OPENAI_API_URL;
62
- // For GPT-5: convert messages to input string
63
- const input = isGPT5
64
- ? validatedMessages.map(m => m.role === 'system' ? `System: ${m.content}` : m.content).join('\n\n')
65
- : undefined;
66
- const requestBody = isGPT5 ? {
67
- model: currentModel,
68
- input: input,
69
- reasoning: {
70
- effort: "minimal" // minimal/low/medium/high
71
- },
72
- text: {
73
- verbosity: "medium" // silent/minimal/concise/balanced/medium/detailed/exhaustive
74
- }
75
- } : {
76
- model: currentModel,
77
- messages: validatedMessages,
78
- temperature,
79
- max_tokens: maxTokens,
80
- stream: false
81
- };
82
- console.error(`🔍 TRACE: Using ${isGPT5 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
114
+ // GPT-5.1 models use /v1/responses, others use /v1/chat/completions
115
+ const isGPT51 = currentModel.startsWith('gpt-5.1');
116
+ const endpoint = isGPT51 ? OPENAI_RESPONSES_URL : OPENAI_CHAT_URL;
117
+ let requestBody;
118
+ // GPT-5.1 uses Responses API format, others use Chat Completions format
119
+ if (isGPT51) {
120
+ // Responses API format - NO temperature, use reasoning.effort instead
121
+ requestBody = {
122
+ model: currentModel,
123
+ input: validatedMessages,
124
+ max_output_tokens: maxTokens,
125
+ stream: false,
126
+ reasoning: {
127
+ effort: reasoningEffort // "none", "low", "medium", "high"
128
+ }
129
+ };
130
+ }
131
+ else {
132
+ // Chat Completions format
133
+ requestBody = {
134
+ model: currentModel,
135
+ messages: validatedMessages,
136
+ temperature,
137
+ max_tokens: maxTokens,
138
+ stream: false
139
+ };
140
+ }
141
+ console.error(`🔍 TRACE: Using ${isGPT51 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
83
142
  const response = await fetch(endpoint, {
84
143
  method: "POST",
85
144
  headers: {
@@ -99,19 +158,38 @@ requireConfirmation = false, skipValidation = false) {
99
158
  }
100
159
  throw new Error(lastError);
101
160
  }
102
- const data = await response.json();
103
- // Parse response based on endpoint type
104
- let result;
105
- if (isGPT5) {
106
- // GPT-5 /v1/responses format: output array with message objects
107
- const messageOutput = data.output?.find((o) => o.type === 'message');
108
- const textContent = messageOutput?.content?.find((c) => c.type === 'output_text');
109
- result = textContent?.text || "No response from OpenAI";
161
+ const rawData = await response.json();
162
+ // Parse based on API type - they have DIFFERENT response formats!
163
+ let rawContent;
164
+ if (isGPT51) {
165
+ // Validate and parse Responses API format
166
+ const parseResult = ResponsesAPISchema.safeParse(rawData);
167
+ if (parseResult.success) {
168
+ const data = parseResult.data;
169
+ const messageOutput = data.output.find(item => item.type === 'message');
170
+ rawContent = messageOutput?.content?.[0]?.text;
171
+ // Capture reasoning info
172
+ if (data.reasoning) {
173
+ console.error(`🔍 TRACE: Reasoning effort: ${data.reasoning.effort}`);
174
+ }
175
+ }
176
+ else {
177
+ console.error(`🔍 TRACE: Failed to parse Responses API response:`, parseResult.error);
178
+ }
110
179
  }
111
180
  else {
112
- // GPT-4 /v1/chat/completions format
113
- result = data.choices?.[0]?.message?.content || "No response from OpenAI";
181
+ // Validate and parse Chat Completions API format
182
+ const parseResult = ChatCompletionResponseSchema.safeParse(rawData);
183
+ if (parseResult.success) {
184
+ const chatData = parseResult.data;
185
+ rawContent = chatData.choices[0]?.message?.content;
186
+ }
187
+ else {
188
+ console.error(`🔍 TRACE: Failed to parse Chat Completions response:`, parseResult.error);
189
+ }
114
190
  }
191
+ // Ensure result is always a string
192
+ const result = rawContent || "No response from OpenAI";
115
193
  console.error(`🔍 TRACE: ${currentModel} SUCCESS - Response length: ${result.length}`);
116
194
  return result;
117
195
  }
@@ -122,13 +200,13 @@ requireConfirmation = false, skipValidation = false) {
122
200
  }
123
201
  }
124
202
  console.error(`🔍 TRACE: ALL MODELS FAILED - Last error: ${lastError}`);
125
- return `[GPT-5 model "${model}" not available. Error: ${lastError}]`;
203
+ return `[GPT-5.1 model "${model}" not available. Error: ${lastError}]`;
126
204
  }
127
205
  /**
128
206
  * Call OpenAI API with custom parameters for specific models
129
- * Automatically detects GPT-5 models and uses correct endpoint + format
207
+ * Automatically detects GPT-5.1 models and uses correct endpoint + format
130
208
  */
131
- async function callOpenAIWithCustomParams(messages, model, temperature = 0.8, maxTokens = 16384, // Increased for detailed brainstorming
209
+ async function callOpenAIWithCustomParams(messages, model, temperature = 0.7, maxTokens = 16384, // Increased for detailed brainstorming
132
210
  reasoningEffort = "low", skipValidation = false) {
133
211
  console.error(`🔍 TRACE: callOpenAIWithCustomParams called with model: ${model}, reasoning_effort: ${reasoningEffort}`);
134
212
  if (!OPENAI_API_KEY) {
@@ -147,37 +225,35 @@ reasoningEffort = "low", skipValidation = false) {
147
225
  return { ...msg, content: validation.sanitized };
148
226
  });
149
227
  try {
150
- const isGPT5 = model.startsWith('gpt-5');
151
- const endpoint = isGPT5
152
- ? "https://api.openai.com/v1/responses"
153
- : OPENAI_API_URL;
154
- // For GPT-5: convert messages to input string
155
- const input = isGPT5
156
- ? validatedMessages.map(m => m.role === 'system' ? `System: ${m.content}` : m.content).join('\n\n')
157
- : undefined;
158
- const requestBody = isGPT5 ? {
159
- model: model,
160
- input: input,
161
- reasoning: {
162
- effort: reasoningEffort // minimal/low/medium/high
163
- },
164
- text: {
165
- verbosity: "medium"
166
- }
167
- } : {
168
- model: model,
169
- messages: validatedMessages,
170
- temperature,
171
- max_tokens: maxTokens,
172
- stream: false
173
- };
174
- console.error(`🔍 TRACE: Using ${isGPT5 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
175
- if (isGPT5) {
176
- console.error(`🔍 TRACE: GPT-5 params: reasoning_effort=${reasoningEffort}`);
228
+ // GPT-5.1 models use /v1/responses, others use /v1/chat/completions
229
+ const isGPT51 = model.startsWith('gpt-5.1');
230
+ const endpoint = isGPT51 ? OPENAI_RESPONSES_URL : OPENAI_CHAT_URL;
231
+ let requestBody;
232
+ // GPT-5.1 uses Responses API format, others use Chat Completions format
233
+ if (isGPT51) {
234
+ // Responses API format - NO temperature, use reasoning.effort instead
235
+ requestBody = {
236
+ model: model,
237
+ input: validatedMessages,
238
+ max_output_tokens: maxTokens,
239
+ stream: false,
240
+ reasoning: {
241
+ effort: reasoningEffort // "none", "low", "medium", "high"
242
+ }
243
+ };
177
244
  }
178
245
  else {
179
- console.error(`🔍 TRACE: GPT-4 params: max_tokens=${maxTokens}, temperature=${temperature}`);
246
+ // Chat Completions format
247
+ requestBody = {
248
+ model: model,
249
+ messages: validatedMessages,
250
+ temperature,
251
+ max_tokens: maxTokens,
252
+ stream: false
253
+ };
180
254
  }
255
+ console.error(`🔍 TRACE: Using ${isGPT51 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
256
+ console.error(`🔍 TRACE: Model params: max_tokens=${maxTokens}, temperature=${temperature}${isGPT51 ? `, reasoning_effort=${reasoningEffort}` : ''}`);
181
257
  const response = await fetch(endpoint, {
182
258
  method: "POST",
183
259
  headers: {
@@ -191,19 +267,38 @@ reasoningEffort = "low", skipValidation = false) {
191
267
  console.error(`🔍 TRACE: ${model} failed - Status: ${response.status}, Error: ${error}`);
192
268
  return `[${model} failed: ${response.status} - ${error}]`;
193
269
  }
194
- const data = await response.json();
195
- // Parse response based on endpoint type
196
- let result;
197
- if (isGPT5) {
198
- // GPT-5 /v1/responses format
199
- const messageOutput = data.output?.find((o) => o.type === 'message');
200
- const textContent = messageOutput?.content?.find((c) => c.type === 'output_text');
201
- result = textContent?.text || "No response from OpenAI";
270
+ const rawData = await response.json();
271
+ // Parse based on API type - they have DIFFERENT response formats!
272
+ let rawContent;
273
+ if (isGPT51) {
274
+ // Validate and parse Responses API format
275
+ const parseResult = ResponsesAPISchema.safeParse(rawData);
276
+ if (parseResult.success) {
277
+ const data = parseResult.data;
278
+ const messageOutput = data.output.find(item => item.type === 'message');
279
+ rawContent = messageOutput?.content?.[0]?.text;
280
+ // Capture reasoning info
281
+ if (data.reasoning) {
282
+ console.error(`🔍 TRACE: Reasoning effort: ${data.reasoning.effort}`);
283
+ }
284
+ }
285
+ else {
286
+ console.error(`🔍 TRACE: Failed to parse Responses API response:`, parseResult.error);
287
+ }
202
288
  }
203
289
  else {
204
- // GPT-4 /v1/chat/completions format
205
- result = data.choices?.[0]?.message?.content || "No response from OpenAI";
290
+ // Validate and parse Chat Completions API format
291
+ const parseResult = ChatCompletionResponseSchema.safeParse(rawData);
292
+ if (parseResult.success) {
293
+ const chatData = parseResult.data;
294
+ rawContent = chatData.choices[0]?.message?.content;
295
+ }
296
+ else {
297
+ console.error(`🔍 TRACE: Failed to parse Chat Completions response:`, parseResult.error);
298
+ }
206
299
  }
300
+ // Ensure result is always a string
301
+ const result = rawContent || "No response from OpenAI";
207
302
  console.error(`🔍 TRACE: ${model} SUCCESS - Response length: ${result.length}`);
208
303
  return result;
209
304
  }
@@ -246,8 +341,8 @@ export const gpt5ReasonTool = {
246
341
  content: args.query
247
342
  }
248
343
  ];
249
- // Use GPT-5; callOpenAI has fallback to 5-mini and 4o if unavailable
250
- return await callOpenAI(messages, OpenAIModel.GPT5, 0.7, 4000);
344
+ // Use GPT-5.1 with high reasoning
345
+ return await callOpenAI(messages, OpenAI51Model.FULL, 0.7, 4000, "high");
251
346
  }
252
347
  };
253
348
  /**
@@ -278,13 +373,13 @@ export const gpt5MiniReasonTool = {
278
373
  content: args.query
279
374
  }
280
375
  ];
281
- // Use GPT-5-mini directly; fallback chain will handle unavailability
282
- return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 3000);
376
+ // Use GPT-5.1-codex-mini with medium reasoning
377
+ return await callOpenAI(messages, OpenAI51Model.CODEX_MINI, 0.7, 3000, "medium");
283
378
  }
284
379
  };
285
380
  export const openaiGpt5ReasonTool = {
286
381
  name: "openai_gpt5_reason",
287
- description: "Mathematical reasoning using GPT-5-mini",
382
+ description: "Mathematical reasoning using GPT-5.1 with high reasoning effort",
288
383
  parameters: z.object({
289
384
  query: z.string(),
290
385
  context: z.string().optional(),
@@ -310,13 +405,13 @@ ${args.context ? `Context: ${args.context}` : ''}`
310
405
  content: args.query
311
406
  }
312
407
  ];
313
- // Use GPT-5-mini for reasoning
314
- return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 4000);
408
+ // Use GPT-5.1 with high reasoning effort for complex reasoning
409
+ return await callOpenAI(messages, OpenAI51Model.FULL, 0.7, 4000, "high");
315
410
  }
316
411
  };
317
412
  /**
318
413
  * OpenAI Compare Tool
319
- * Multi-option comparison and consensus building using GPT-5-mini
414
+ * Multi-option comparison and consensus building using GPT-5.1-codex-mini
320
415
  */
321
416
  export const openaiCompareTool = {
322
417
  name: "openai_compare",
@@ -342,7 +437,7 @@ ${args.includeRecommendation ? 'Provide a clear recommendation with justificatio
342
437
  content: `Topic: ${args.topic}\n\nOptions:\n${optionsList}`
343
438
  }
344
439
  ];
345
- return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 3000);
440
+ return await callOpenAI(messages, OpenAI51Model.CODEX_MINI, 0.7, 3000, "low");
346
441
  }
347
442
  };
348
443
  /**
@@ -357,13 +452,12 @@ export const openAIBrainstormTool = {
357
452
  constraints: z.string().optional(),
358
453
  quantity: z.number().optional(),
359
454
  style: z.enum(["innovative", "practical", "wild", "systematic"]).optional(),
360
- model: z.enum(["gpt-5", "gpt-5-mini", "gpt-5-nano"]).optional(),
361
- reasoning_effort: z.enum(["minimal", "low", "medium", "high"]).optional(),
362
- verbosity: z.enum(["silent", "minimal", "concise", "balanced", "detailed", "exhaustive"]).optional(),
455
+ model: z.enum(["gpt-5.1", "gpt-5.1-codex-mini", "gpt-5.1-codex"]).optional(),
456
+ reasoning_effort: z.enum(["none", "low", "medium", "high"]).optional(),
363
457
  max_tokens: z.number().optional()
364
458
  }),
365
459
  execute: async (args, options = {}) => {
366
- const { problem, constraints, quantity = 5, style = "innovative", model = "gpt-5-mini", reasoning_effort = "low", verbosity = "balanced", max_tokens = 4000 } = args;
460
+ const { problem, constraints, quantity = 5, style = "innovative", model = "gpt-5.1-codex-mini", reasoning_effort = "medium", max_tokens = 4000 } = args;
367
461
  console.error('🚀 TOOL CALLED: openai_brainstorm');
368
462
  console.error('📥 ARGS RECEIVED:', JSON.stringify(args, null, 2));
369
463
  console.error('📥 OPTIONS RECEIVED:', JSON.stringify(options, null, 2));
@@ -394,9 +488,7 @@ Format: Number each idea and provide a brief explanation.`
394
488
  // Convert string model to OpenAIModel enum
395
489
  const modelEnum = model;
396
490
  console.error(`🔍 CALLING: callOpenAIWithCustomParams with ${modelEnum}, skipValidation: ${options.skipValidation || false}`);
397
- // Use temperature=1 (default) for GPT-5, 0.8 for others
398
- const temperature = model.startsWith('gpt-5') ? 1.0 : 0.8;
399
- const result = await callOpenAIWithCustomParams(messages, modelEnum, temperature, maxTokens, reasoningEffort, options.skipValidation || false);
491
+ const result = await callOpenAIWithCustomParams(messages, modelEnum, 0.9, maxTokens, reasoningEffort, options.skipValidation || false);
400
492
  console.error('🔍 DEBUG: Got result from callOpenAI:', result.substring(0, 100));
401
493
  console.error('✅ TOOL COMPLETE: openai_brainstorm');
402
494
  return result;
@@ -404,7 +496,7 @@ Format: Number each idea and provide a brief explanation.`
404
496
  };
405
497
  /**
406
498
  * OpenAI Code Review Tool
407
- * Comprehensive code review
499
+ * Comprehensive code review using GPT-5.1-codex-mini
408
500
  */
409
501
  export const openaiCodeReviewTool = {
410
502
  name: "openai_code_review",
@@ -432,12 +524,12 @@ Format: Use sections for different aspects, be specific about line numbers or fu
432
524
  content: `Review this code:\n\`\`\`${args.language || ''}\n${args.code}\n\`\`\``
433
525
  }
434
526
  ];
435
- return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 4000);
527
+ return await callOpenAI(messages, OpenAI51Model.CODEX_MINI, 0.3, 4000, "medium");
436
528
  }
437
529
  };
438
530
  /**
439
531
  * OpenAI Explain Tool
440
- * Clear explanations for complex topics
532
+ * Clear explanations for complex topics using GPT-5.1-codex-mini
441
533
  */
442
534
  export const openaiExplainTool = {
443
535
  name: "openai_explain",
@@ -472,7 +564,7 @@ Make the explanation clear, engaging, and memorable.`
472
564
  content: `Explain: ${args.topic}`
473
565
  }
474
566
  ];
475
- return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 2500);
567
+ return await callOpenAI(messages, OpenAI51Model.CODEX_MINI, 0.7, 2500, "low");
476
568
  }
477
569
  };
478
570
  /**
@@ -489,10 +581,10 @@ export function getAllOpenAITools() {
489
581
  return [];
490
582
  }
491
583
  return [
492
- openaiGpt5ReasonTool, // GPT-5-mini reasoning
493
- openaiCompareTool, // GPT-5-mini comparison
494
- openAIBrainstormTool, // GPT-5-mini/GPT-5 brainstorming
495
- openaiCodeReviewTool, // GPT-5-mini code review
496
- openaiExplainTool // GPT-5-mini explanations
584
+ openaiGpt5ReasonTool, // GPT-5.1 reasoning (high effort)
585
+ openaiCompareTool, // GPT-5.1-codex-mini comparison (low effort)
586
+ openAIBrainstormTool, // GPT-5.1-codex-mini brainstorming (medium effort)
587
+ openaiCodeReviewTool, // GPT-5.1-codex-mini code review (medium effort)
588
+ openaiExplainTool // GPT-5.1-codex-mini explanations (low effort)
497
589
  ];
498
590
  }
@@ -14,12 +14,8 @@ function getPerplexityApiKey() {
14
14
  function debugApiKey() {
15
15
  const apiKey = getPerplexityApiKey();
16
16
  console.error('[PERPLEXITY DEBUG] API Key present:', !!apiKey);
17
- if (apiKey) {
18
- console.error('[PERPLEXITY DEBUG] Key length:', apiKey.length);
19
- console.error('[PERPLEXITY DEBUG] Key prefix:', apiKey.substring(0, 8) + '...');
20
- }
21
- else {
22
- console.error('[PERPLEXITY DEBUG] process.env keys:', Object.keys(process.env).filter(k => k.includes('PERP') || k.includes('API')));
17
+ if (!apiKey) {
18
+ console.error('[PERPLEXITY DEBUG] Environment variables containing PERP or API found:', Object.keys(process.env).filter(k => k.includes('PERP') || k.includes('API')).length);
23
19
  }
24
20
  }
25
21
  // Available Perplexity models (2025 latest)
@@ -15,13 +15,13 @@ const PROVIDER_CONFIGS = {
15
15
  openai: {
16
16
  base: 'https://api.openai.com/v1',
17
17
  key: process.env.OPENAI_API_KEY,
18
- models: ['gpt-5', 'gpt-5-mini', 'gpt-5-nano']
18
+ models: ['gpt-5.1', 'gpt-5.1-codex-mini', 'gpt-5.1-codex']
19
19
  },
20
- gpt5: {
20
+ gpt51: {
21
21
  base: 'https://api.openai.com/v1', // Uses /responses endpoint internally
22
22
  key: process.env.OPENAI_API_KEY,
23
- models: ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'],
24
- special: true // Needs special handling
23
+ models: ['gpt-5.1', 'gpt-5.1-codex-mini', 'gpt-5.1-codex'],
24
+ special: true // Needs special handling for reasoning_effort
25
25
  },
26
26
  mistral: {
27
27
  base: 'https://api.mistral.ai/v1',
@@ -86,7 +86,7 @@ export async function queryAI(prompt, options) {
86
86
  throw new Error(`Provider ${options.provider} is not configured. Please set the appropriate API key.`);
87
87
  }
88
88
  // Handle GPT-5 special case
89
- if (options.provider === 'gpt5' && 'special' in config && config.special) {
89
+ if (options.provider === 'gpt51' && 'special' in config && config.special) {
90
90
  return await handleGPT5(prompt, options);
91
91
  }
92
92
  // Standard OpenAI-compatible handling
@@ -118,18 +118,16 @@ export async function queryAI(prompt, options) {
118
118
  * Special handling for GPT-5 (uses /responses endpoint)
119
119
  */
120
120
  async function handleGPT5(prompt, options) {
121
- const config = PROVIDER_CONFIGS.gpt5;
121
+ const config = PROVIDER_CONFIGS.gpt51;
122
122
  const endpoint = 'https://api.openai.com/v1/responses';
123
- const model = options.model || 'gpt-5-nano'; // Default to cheapest
123
+ const model = options.model || 'gpt-5.1-codex-mini'; // Default to cheapest
124
124
  const requestBody = {
125
125
  model,
126
126
  input: prompt,
127
127
  reasoning: {
128
- effort: model === 'gpt-5' ? 'high' : 'low'
128
+ effort: model === 'gpt-5.1' ? 'high' : 'low'
129
129
  },
130
- text: {
131
- verbosity: 'medium'
132
- }
130
+ max_output_tokens: 4000
133
131
  };
134
132
  try {
135
133
  const response = await fetch(endpoint, {
@@ -145,7 +143,8 @@ async function handleGPT5(prompt, options) {
145
143
  throw new Error(`GPT-5 API error: ${error}`);
146
144
  }
147
145
  const data = await response.json();
148
- return data.output || data.response || 'No response generated';
146
+ const messageOutput = data.output.find(item => item.type === 'message');
147
+ return messageOutput?.content?.[0]?.text || 'No response generated';
149
148
  }
150
149
  catch (error) {
151
150
  console.error('GPT-5 error:', error);
@@ -161,7 +161,7 @@ export class StepExecutionHandler {
161
161
  if (match) {
162
162
  const [, variable, expectedValue] = match;
163
163
  const actualValue = variables[variable];
164
- return String(actualValue).trim() === expectedValue.trim().replace(/['"]/, '');
164
+ return String(actualValue).trim() === expectedValue.trim().replace(/['"]/g, '');
165
165
  }
166
166
  // Default: check if variable exists and is truthy
167
167
  return !!variables[condition];