tachibot-mcp 2.0.6 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/.env.example +13 -3
  2. package/README.md +88 -44
  3. package/dist/src/config/model-constants.js +121 -91
  4. package/dist/src/config/model-defaults.js +35 -21
  5. package/dist/src/config/model-preferences.js +5 -4
  6. package/dist/src/config.js +2 -1
  7. package/dist/src/mcp-client.js +3 -3
  8. package/dist/src/modes/scout.js +2 -1
  9. package/dist/src/optimization/model-router.js +19 -16
  10. package/dist/src/orchestrator-instructions.js +1 -1
  11. package/dist/src/orchestrator-lite.js +1 -1
  12. package/dist/src/orchestrator.js +1 -1
  13. package/dist/src/profiles/balanced.js +1 -2
  14. package/dist/src/profiles/code_focus.js +1 -2
  15. package/dist/src/profiles/full.js +1 -2
  16. package/dist/src/profiles/minimal.js +1 -2
  17. package/dist/src/profiles/research_power.js +1 -2
  18. package/dist/src/server.js +13 -12
  19. package/dist/src/tools/gemini-tools.js +32 -16
  20. package/dist/src/tools/grok-enhanced.js +18 -17
  21. package/dist/src/tools/grok-tools.js +34 -20
  22. package/dist/src/tools/openai-tools.js +52 -61
  23. package/dist/src/tools/tool-router.js +53 -52
  24. package/dist/src/tools/unified-ai-provider.js +90 -9
  25. package/dist/src/tools/workflow-runner.js +16 -0
  26. package/dist/src/tools/workflow-validator-tool.js +1 -1
  27. package/dist/src/utils/api-keys.js +20 -0
  28. package/dist/src/utils/openrouter-gateway.js +117 -0
  29. package/dist/src/validators/interpolation-validator.js +4 -0
  30. package/dist/src/validators/tool-registry-validator.js +1 -1
  31. package/dist/src/validators/tool-types.js +0 -1
  32. package/dist/src/workflows/custom-workflows.js +4 -3
  33. package/dist/src/workflows/engine/VariableInterpolator.js +30 -3
  34. package/dist/src/workflows/engine/WorkflowExecutionEngine.js +2 -2
  35. package/dist/src/workflows/engine/WorkflowOutputFormatter.js +27 -4
  36. package/dist/src/workflows/fallback-strategies.js +2 -2
  37. package/dist/src/workflows/model-router.js +20 -11
  38. package/dist/src/workflows/tool-mapper.js +51 -24
  39. package/docs/API_KEYS.md +52 -18
  40. package/docs/CONFIGURATION.md +25 -8
  41. package/docs/TOOLS_REFERENCE.md +12 -48
  42. package/docs/TOOL_PARAMETERS.md +19 -16
  43. package/docs/WORKFLOWS.md +7 -7
  44. package/package.json +1 -1
  45. package/profiles/balanced.json +1 -2
  46. package/profiles/code_focus.json +1 -2
  47. package/profiles/debug_intensive.json +0 -1
  48. package/profiles/full.json +2 -3
  49. package/profiles/minimal.json +1 -2
  50. package/profiles/research_power.json +1 -2
  51. package/profiles/workflow_builder.json +1 -2
  52. package/tools.config.json +15 -3
  53. package/workflows/code-architecture-review.yaml +5 -3
  54. package/workflows/creative-brainstorm-yaml.yaml +1 -1
  55. package/workflows/pingpong.yaml +5 -3
  56. package/workflows/system/README.md +1 -1
  57. package/workflows/system/verifier.yaml +8 -5
  58. package/workflows/ultra-creative-brainstorm.yaml +3 -3
@@ -7,6 +7,7 @@ import { config } from "dotenv";
7
7
  import * as path from 'path';
8
8
  import { fileURLToPath } from 'url';
9
9
  import { validateToolInput } from "../utils/input-validator.js";
10
+ import { tryOpenRouterGateway, isGatewayEnabled } from "../utils/openrouter-gateway.js";
10
11
  const __filename = fileURLToPath(import.meta.url);
11
12
  const __dirname = path.dirname(__filename);
12
13
  config({ path: path.resolve(__dirname, '../../../.env') });
@@ -70,12 +71,14 @@ const ResponsesAPISchema = z.object({
70
71
  total_tokens: z.number().optional()
71
72
  }).optional()
72
73
  });
73
- // Available OpenAI models (GPT-5.1 family)
74
+ // Available OpenAI GPT-5 models (optimized for Claude Code)
74
75
  export var OpenAI51Model;
75
76
  (function (OpenAI51Model) {
76
77
  OpenAI51Model["FULL"] = "gpt-5.1";
78
+ OpenAI51Model["PRO"] = "gpt-5-pro";
77
79
  OpenAI51Model["CODEX_MINI"] = "gpt-5.1-codex-mini";
78
80
  OpenAI51Model["CODEX"] = "gpt-5.1-codex";
81
+ OpenAI51Model["CODEX_MAX"] = "gpt-5.1-codex-max";
79
82
  })(OpenAI51Model || (OpenAI51Model = {}));
80
83
  /**
81
84
  * Call OpenAI API with model fallback support
@@ -84,6 +87,18 @@ export var OpenAI51Model;
84
87
  export async function callOpenAI(messages, model = OpenAI51Model.CODEX_MINI, temperature = 0.7, maxTokens = 16384, // Increased default for comprehensive responses
85
88
  reasoningEffort = "low", requireConfirmation = false, skipValidation = false) {
86
89
  console.error(`🔍 TRACE: callOpenAI called with model: ${model}`);
90
+ // Try OpenRouter gateway first if enabled
91
+ if (isGatewayEnabled()) {
92
+ const gatewayResult = await tryOpenRouterGateway(model, messages, {
93
+ temperature,
94
+ max_tokens: maxTokens
95
+ });
96
+ if (gatewayResult) {
97
+ return gatewayResult;
98
+ }
99
+ // Gateway failed or returned null, fall through to direct API
100
+ console.error(`🔍 TRACE: Gateway returned null, falling back to direct OpenAI API`);
101
+ }
87
102
  if (!OPENAI_API_KEY) {
88
103
  console.error(`🔍 TRACE: No API key found`);
89
104
  return `[OpenAI API key not configured. Add OPENAI_API_KEY to .env file]`;
@@ -111,34 +126,33 @@ reasoningEffort = "low", requireConfirmation = false, skipValidation = false) {
111
126
  for (const currentModel of modelsToTry) {
112
127
  console.error(`🔍 TRACE: Trying model: ${currentModel}`);
113
128
  try {
114
- // GPT-5.1 models use /v1/responses, others use /v1/chat/completions
115
- const isGPT51 = currentModel.startsWith('gpt-5.1');
116
- const endpoint = isGPT51 ? OPENAI_RESPONSES_URL : OPENAI_CHAT_URL;
129
+ // Codex models use /v1/responses, non-codex use /v1/chat/completions
130
+ const isCodex = currentModel.includes('codex');
131
+ const endpoint = isCodex ? OPENAI_RESPONSES_URL : OPENAI_CHAT_URL;
117
132
  let requestBody;
118
- // GPT-5.1 uses Responses API format, others use Chat Completions format
119
- if (isGPT51) {
120
- // Responses API format - NO temperature, use reasoning.effort instead
133
+ if (isCodex) {
134
+ // Responses API format for codex models
121
135
  requestBody = {
122
136
  model: currentModel,
123
137
  input: validatedMessages,
124
138
  max_output_tokens: maxTokens,
125
139
  stream: false,
126
140
  reasoning: {
127
- effort: reasoningEffort // "none", "low", "medium", "high"
141
+ effort: reasoningEffort
128
142
  }
129
143
  };
130
144
  }
131
145
  else {
132
- // Chat Completions format
146
+ // Chat Completions format for non-codex GPT-5 models (gpt-5.1, gpt-5-pro)
133
147
  requestBody = {
134
148
  model: currentModel,
135
149
  messages: validatedMessages,
136
150
  temperature,
137
- max_tokens: maxTokens,
151
+ max_completion_tokens: maxTokens, // GPT-5 requires max_completion_tokens (not max_tokens)
138
152
  stream: false
139
153
  };
140
154
  }
141
- console.error(`🔍 TRACE: Using ${isGPT51 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
155
+ console.error(`🔍 TRACE: Using ${isCodex ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
142
156
  const response = await fetch(endpoint, {
143
157
  method: "POST",
144
158
  headers: {
@@ -159,16 +173,15 @@ reasoningEffort = "low", requireConfirmation = false, skipValidation = false) {
159
173
  throw new Error(lastError);
160
174
  }
161
175
  const rawData = await response.json();
162
- // Parse based on API type - they have DIFFERENT response formats!
176
+ // Parse based on API type
163
177
  let rawContent;
164
- if (isGPT51) {
165
- // Validate and parse Responses API format
178
+ if (isCodex) {
179
+ // Responses API format
166
180
  const parseResult = ResponsesAPISchema.safeParse(rawData);
167
181
  if (parseResult.success) {
168
182
  const data = parseResult.data;
169
183
  const messageOutput = data.output.find(item => item.type === 'message');
170
184
  rawContent = messageOutput?.content?.[0]?.text;
171
- // Capture reasoning info
172
185
  if (data.reasoning) {
173
186
  console.error(`🔍 TRACE: Reasoning effort: ${data.reasoning.effort}`);
174
187
  }
@@ -178,7 +191,7 @@ reasoningEffort = "low", requireConfirmation = false, skipValidation = false) {
178
191
  }
179
192
  }
180
193
  else {
181
- // Validate and parse Chat Completions API format
194
+ // Chat Completions format
182
195
  const parseResult = ChatCompletionResponseSchema.safeParse(rawData);
183
196
  if (parseResult.success) {
184
197
  const chatData = parseResult.data;
@@ -209,6 +222,17 @@ reasoningEffort = "low", requireConfirmation = false, skipValidation = false) {
209
222
  async function callOpenAIWithCustomParams(messages, model, temperature = 0.7, maxTokens = 16384, // Increased for detailed brainstorming
210
223
  reasoningEffort = "low", skipValidation = false) {
211
224
  console.error(`🔍 TRACE: callOpenAIWithCustomParams called with model: ${model}, reasoning_effort: ${reasoningEffort}`);
225
+ // Try OpenRouter gateway first if enabled
226
+ if (isGatewayEnabled()) {
227
+ const gatewayResult = await tryOpenRouterGateway(model, messages, {
228
+ temperature,
229
+ max_tokens: maxTokens
230
+ });
231
+ if (gatewayResult) {
232
+ return gatewayResult;
233
+ }
234
+ console.error(`🔍 TRACE: Gateway returned null, falling back to direct OpenAI API`);
235
+ }
212
236
  if (!OPENAI_API_KEY) {
213
237
  console.error(`🔍 TRACE: No API key found`);
214
238
  return `[OpenAI API key not configured. Add OPENAI_API_KEY to .env file]`;
@@ -225,17 +249,16 @@ reasoningEffort = "low", skipValidation = false) {
225
249
  return { ...msg, content: validation.sanitized };
226
250
  });
227
251
  try {
228
- // GPT-5.1 models use /v1/responses, others use /v1/chat/completions
229
- const isGPT51 = model.startsWith('gpt-5.1');
230
- const endpoint = isGPT51 ? OPENAI_RESPONSES_URL : OPENAI_CHAT_URL;
252
+ // Codex models use /v1/responses, non-codex use /v1/chat/completions
253
+ const isCodex = model.includes('codex');
254
+ const endpoint = isCodex ? OPENAI_RESPONSES_URL : OPENAI_CHAT_URL;
231
255
  let requestBody;
232
- // GPT-5.1 uses Responses API format, others use Chat Completions format
233
- if (isGPT51) {
234
- // Responses API format - NO temperature, use reasoning.effort instead
256
+ if (isCodex) {
257
+ // Responses API format for codex models
235
258
  requestBody = {
236
259
  model: model,
237
260
  input: validatedMessages,
238
- max_output_tokens: maxTokens,
261
+ max_output_tokens: maxTokens, // NOT max_completion_tokens or max_tokens!
239
262
  stream: false,
240
263
  reasoning: {
241
264
  effort: reasoningEffort // "none", "low", "medium", "high"
@@ -243,17 +266,17 @@ reasoningEffort = "low", skipValidation = false) {
243
266
  };
244
267
  }
245
268
  else {
246
- // Chat Completions format
269
+ // Chat Completions format for non-codex GPT-5 models (gpt-5.1, gpt-5-pro)
247
270
  requestBody = {
248
271
  model: model,
249
272
  messages: validatedMessages,
250
273
  temperature,
251
- max_tokens: maxTokens,
274
+ max_completion_tokens: maxTokens, // GPT-5 requires max_completion_tokens (not max_tokens)
252
275
  stream: false
253
276
  };
254
277
  }
255
- console.error(`🔍 TRACE: Using ${isGPT51 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
256
- console.error(`🔍 TRACE: Model params: max_tokens=${maxTokens}, temperature=${temperature}${isGPT51 ? `, reasoning_effort=${reasoningEffort}` : ''}`);
278
+ console.error(`🔍 TRACE: Using ${isCodex ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
279
+ console.error(`🔍 TRACE: Model params: ${isCodex ? `max_output_tokens=${maxTokens}, reasoning_effort=${reasoningEffort}` : `max_completion_tokens=${maxTokens}, temperature=${temperature}`}`);
257
280
  const response = await fetch(endpoint, {
258
281
  method: "POST",
259
282
  headers: {
@@ -270,7 +293,7 @@ reasoningEffort = "low", skipValidation = false) {
270
293
  const rawData = await response.json();
271
294
  // Parse based on API type - they have DIFFERENT response formats!
272
295
  let rawContent;
273
- if (isGPT51) {
296
+ if (isCodex) {
274
297
  // Validate and parse Responses API format
275
298
  const parseResult = ResponsesAPISchema.safeParse(rawData);
276
299
  if (parseResult.success) {
@@ -378,7 +401,7 @@ export const gpt5MiniReasonTool = {
378
401
  }
379
402
  };
380
403
  export const openaiGpt5ReasonTool = {
381
- name: "openai_gpt5_reason",
404
+ name: "openai_reason",
382
405
  description: "Mathematical reasoning using GPT-5.1 with high reasoning effort",
383
406
  parameters: z.object({
384
407
  query: z.string(),
@@ -409,37 +432,6 @@ ${args.context ? `Context: ${args.context}` : ''}`
409
432
  return await callOpenAI(messages, OpenAI51Model.FULL, 0.7, 4000, "high");
410
433
  }
411
434
  };
412
- /**
413
- * OpenAI Compare Tool
414
- * Multi-option comparison and consensus building using GPT-5.1-codex-mini
415
- */
416
- export const openaiCompareTool = {
417
- name: "openai_compare",
418
- description: "Multi-model consensus",
419
- parameters: z.object({
420
- topic: z.string(),
421
- options: z.array(z.string()),
422
- criteria: z.string().optional(),
423
- includeRecommendation: z.boolean().optional().default(true)
424
- }),
425
- execute: async (args, { log }) => {
426
- const optionsList = args.options.map((opt, i) => `${i + 1}. ${opt}`).join('\n');
427
- const messages = [
428
- {
429
- role: "system",
430
- content: `You are an expert at comparative analysis and decision-making.
431
- Compare the given options systematically.
432
- ${args.criteria ? `Criteria: ${args.criteria}` : 'Consider: pros, cons, trade-offs, and suitability'}
433
- ${args.includeRecommendation ? 'Provide a clear recommendation with justification.' : ''}`
434
- },
435
- {
436
- role: "user",
437
- content: `Topic: ${args.topic}\n\nOptions:\n${optionsList}`
438
- }
439
- ];
440
- return await callOpenAI(messages, OpenAI51Model.CODEX_MINI, 0.7, 3000, "low");
441
- }
442
- };
443
435
  /**
444
436
  * OpenAI Brainstorm Tool
445
437
  * Creative ideation and brainstorming
@@ -582,7 +574,6 @@ export function getAllOpenAITools() {
582
574
  }
583
575
  return [
584
576
  openaiGpt5ReasonTool, // GPT-5.1 reasoning (high effort)
585
- openaiCompareTool, // GPT-5.1-codex-mini comparison (low effort)
586
577
  openAIBrainstormTool, // GPT-5.1-codex-mini brainstorming (medium effort)
587
578
  openaiCodeReviewTool, // GPT-5.1-codex-mini code review (medium effort)
588
579
  openaiExplainTool // GPT-5.1-codex-mini explanations (low effort)
@@ -2,6 +2,7 @@
2
2
  * Smart Tool Router for Focus MCP Server
3
3
  * Manages tool selection based on availability, preferences, and capabilities
4
4
  */
5
+ import { hasGrokApiKey, hasOpenAIApiKey, hasPerplexityApiKey, hasGeminiApiKey, hasOpenRouterApiKey } from "../utils/api-keys.js";
5
6
  // Tool capability categories
6
7
  export var ToolCategory;
7
8
  (function (ToolCategory) {
@@ -46,64 +47,54 @@ export class ToolRouter {
46
47
  return {
47
48
  [ToolCategory.REASONING]: [
48
49
  {
49
- name: "gpt5_reason",
50
+ name: "openai_reason",
50
51
  provider: ToolProvider.OPENAI,
51
52
  categories: [ToolCategory.REASONING],
52
53
  priority: 1,
53
54
  costTier: "high",
54
55
  speedTier: "medium",
55
56
  qualityTier: "excellent",
56
- checkAvailability: () => !!process.env.OPENAI_API_KEY && process.env.ENABLE_GPT5 === 'true'
57
+ checkAvailability: () => hasOpenAIApiKey()
57
58
  },
58
59
  {
59
- name: "gpt5_mini_reason",
60
- provider: ToolProvider.OPENAI,
60
+ name: "grok_reason",
61
+ provider: ToolProvider.GROK,
61
62
  categories: [ToolCategory.REASONING],
62
63
  priority: 2,
63
64
  costTier: "medium",
64
65
  speedTier: "fast",
65
66
  qualityTier: "excellent",
66
- checkAvailability: () => !!process.env.OPENAI_API_KEY
67
+ checkAvailability: () => hasGrokApiKey()
67
68
  },
68
69
  {
69
- name: "qwq_reason",
70
- provider: ToolProvider.OPENROUTER,
71
- categories: [ToolCategory.REASONING],
70
+ name: "perplexity_reason",
71
+ provider: ToolProvider.PERPLEXITY,
72
+ categories: [ToolCategory.REASONING, ToolCategory.SEARCH],
72
73
  priority: 3,
73
74
  costTier: "medium",
74
- speedTier: "medium",
75
- qualityTier: "excellent",
76
- checkAvailability: () => !!process.env.OPENROUTER_API_KEY
75
+ speedTier: "fast",
76
+ qualityTier: "good",
77
+ checkAvailability: () => hasPerplexityApiKey()
77
78
  },
78
79
  {
79
- name: "grok_reason",
80
- provider: ToolProvider.GROK,
80
+ name: "kimi_thinking",
81
+ provider: ToolProvider.OPENROUTER,
81
82
  categories: [ToolCategory.REASONING],
82
83
  priority: 4,
83
84
  costTier: "medium",
84
- speedTier: "fast",
85
+ speedTier: "medium",
85
86
  qualityTier: "excellent",
86
- checkAvailability: () => !!process.env.GROK_API_KEY
87
- },
88
- {
89
- name: "perplexity_reason",
90
- provider: ToolProvider.PERPLEXITY,
91
- categories: [ToolCategory.REASONING, ToolCategory.SEARCH],
92
- priority: 5,
93
- costTier: "medium",
94
- speedTier: "fast",
95
- qualityTier: "good",
96
- checkAvailability: () => !!process.env.PERPLEXITY_API_KEY
87
+ checkAvailability: () => hasOpenRouterApiKey()
97
88
  },
98
89
  {
99
- name: "gemini_query",
90
+ name: "gemini_brainstorm",
100
91
  provider: ToolProvider.GEMINI,
101
92
  categories: [ToolCategory.REASONING],
102
- priority: 6,
93
+ priority: 5,
103
94
  costTier: "low",
104
95
  speedTier: "fast",
105
96
  qualityTier: "good",
106
- checkAvailability: () => !!process.env.GOOGLE_API_KEY
97
+ checkAvailability: () => hasGeminiApiKey()
107
98
  }
108
99
  ],
109
100
  [ToolCategory.CODE]: [
@@ -115,7 +106,7 @@ export class ToolRouter {
115
106
  costTier: "medium",
116
107
  speedTier: "medium",
117
108
  qualityTier: "excellent",
118
- checkAvailability: () => !!process.env.OPENROUTER_API_KEY
109
+ checkAvailability: () => hasOpenRouterApiKey()
119
110
  },
120
111
  {
121
112
  name: "grok_code",
@@ -125,17 +116,17 @@ export class ToolRouter {
125
116
  costTier: "medium",
126
117
  speedTier: "fast",
127
118
  qualityTier: "excellent",
128
- checkAvailability: () => !!process.env.GROK_API_KEY
119
+ checkAvailability: () => hasGrokApiKey()
129
120
  },
130
121
  {
131
- name: "gpt5_code",
122
+ name: "openai_code_review",
132
123
  provider: ToolProvider.OPENAI,
133
124
  categories: [ToolCategory.CODE, ToolCategory.ANALYSIS],
134
125
  priority: 3,
135
126
  costTier: "medium",
136
127
  speedTier: "fast",
137
128
  qualityTier: "excellent",
138
- checkAvailability: () => !!process.env.OPENAI_API_KEY
129
+ checkAvailability: () => hasOpenAIApiKey()
139
130
  },
140
131
  {
141
132
  name: "gemini_analyze_code",
@@ -145,7 +136,7 @@ export class ToolRouter {
145
136
  costTier: "low",
146
137
  speedTier: "fast",
147
138
  qualityTier: "good",
148
- checkAvailability: () => !!process.env.GOOGLE_API_KEY
139
+ checkAvailability: () => hasGeminiApiKey()
149
140
  }
150
141
  ],
151
142
  [ToolCategory.BRAINSTORM]: [
@@ -157,7 +148,7 @@ export class ToolRouter {
157
148
  costTier: "medium",
158
149
  speedTier: "medium",
159
150
  qualityTier: "excellent",
160
- checkAvailability: () => !!process.env.GROK_API_KEY
151
+ checkAvailability: () => hasGrokApiKey()
161
152
  },
162
153
  {
163
154
  name: "openai_brainstorm",
@@ -167,7 +158,7 @@ export class ToolRouter {
167
158
  costTier: "high",
168
159
  speedTier: "medium",
169
160
  qualityTier: "excellent",
170
- checkAvailability: () => !!process.env.OPENAI_API_KEY
161
+ checkAvailability: () => hasOpenAIApiKey()
171
162
  },
172
163
  {
173
164
  name: "gemini_brainstorm",
@@ -177,7 +168,7 @@ export class ToolRouter {
177
168
  costTier: "low",
178
169
  speedTier: "fast",
179
170
  qualityTier: "good",
180
- checkAvailability: () => !!process.env.GOOGLE_API_KEY
171
+ checkAvailability: () => hasGeminiApiKey()
181
172
  }
182
173
  ],
183
174
  [ToolCategory.SEARCH]: [
@@ -189,7 +180,7 @@ export class ToolRouter {
189
180
  costTier: "medium",
190
181
  speedTier: "fast",
191
182
  qualityTier: "excellent",
192
- checkAvailability: () => !!process.env.PERPLEXITY_API_KEY
183
+ checkAvailability: () => hasPerplexityApiKey()
193
184
  },
194
185
  {
195
186
  name: "perplexity_research",
@@ -199,39 +190,49 @@ export class ToolRouter {
199
190
  costTier: "medium",
200
191
  speedTier: "medium",
201
192
  qualityTier: "excellent",
202
- checkAvailability: () => !!process.env.PERPLEXITY_API_KEY
193
+ checkAvailability: () => hasPerplexityApiKey()
194
+ },
195
+ {
196
+ name: "grok_search",
197
+ provider: ToolProvider.GROK,
198
+ categories: [ToolCategory.SEARCH],
199
+ priority: 3,
200
+ costTier: "medium",
201
+ speedTier: "fast",
202
+ qualityTier: "good",
203
+ checkAvailability: () => hasGrokApiKey()
203
204
  }
204
205
  ],
205
206
  [ToolCategory.ANALYSIS]: [
206
207
  {
207
- name: "gpt5_mini_analyze",
208
+ name: "openai_code_review",
208
209
  provider: ToolProvider.OPENAI,
209
210
  categories: [ToolCategory.ANALYSIS],
210
211
  priority: 1,
211
212
  costTier: "medium",
212
213
  speedTier: "fast",
213
214
  qualityTier: "excellent",
214
- checkAvailability: () => !!process.env.OPENAI_API_KEY
215
+ checkAvailability: () => hasOpenAIApiKey()
215
216
  },
216
217
  {
217
- name: "openai_compare",
218
- provider: ToolProvider.OPENAI,
218
+ name: "gemini_analyze_text",
219
+ provider: ToolProvider.GEMINI,
219
220
  categories: [ToolCategory.ANALYSIS],
220
221
  priority: 2,
221
- costTier: "medium",
222
- speedTier: "medium",
223
- qualityTier: "excellent",
224
- checkAvailability: () => !!process.env.OPENAI_API_KEY
222
+ costTier: "low",
223
+ speedTier: "fast",
224
+ qualityTier: "good",
225
+ checkAvailability: () => hasGeminiApiKey()
225
226
  },
226
227
  {
227
- name: "gemini_analyze_text",
228
+ name: "gemini_analyze_code",
228
229
  provider: ToolProvider.GEMINI,
229
- categories: [ToolCategory.ANALYSIS],
230
+ categories: [ToolCategory.ANALYSIS, ToolCategory.CODE],
230
231
  priority: 3,
231
232
  costTier: "low",
232
233
  speedTier: "fast",
233
234
  qualityTier: "good",
234
- checkAvailability: () => !!process.env.GOOGLE_API_KEY
235
+ checkAvailability: () => hasGeminiApiKey()
235
236
  }
236
237
  ],
237
238
  [ToolCategory.DEBUG]: [
@@ -243,17 +244,17 @@ export class ToolRouter {
243
244
  costTier: "medium",
244
245
  speedTier: "fast",
245
246
  qualityTier: "excellent",
246
- checkAvailability: () => !!process.env.GROK_API_KEY
247
+ checkAvailability: () => hasGrokApiKey()
247
248
  },
248
249
  {
249
- name: "gpt5_code",
250
+ name: "openai_code_review",
250
251
  provider: ToolProvider.OPENAI,
251
252
  categories: [ToolCategory.DEBUG, ToolCategory.CODE],
252
253
  priority: 2,
253
254
  costTier: "medium",
254
255
  speedTier: "fast",
255
256
  qualityTier: "excellent",
256
- checkAvailability: () => !!process.env.OPENAI_API_KEY
257
+ checkAvailability: () => hasOpenAIApiKey()
257
258
  }
258
259
  ]
259
260
  };
@@ -10,6 +10,50 @@ import { fileURLToPath } from 'url';
10
10
  const __filename = fileURLToPath(import.meta.url);
11
11
  const __dirname = path.dirname(__filename);
12
12
  config({ path: path.resolve(__dirname, '../../../.env') });
13
+ // =============================================================================
14
+ // OPENROUTER GATEWAY MODE
15
+ // =============================================================================
16
+ // When enabled, routes OpenAI/Gemini/Grok through OpenRouter with single API key
17
+ // Kimi/Qwen always use OpenRouter (native), Perplexity always uses direct API
18
+ const USE_OPENROUTER_GATEWAY = process.env.USE_OPENROUTER_GATEWAY === 'true';
19
+ /**
20
+ * Get OpenRouter model name - adds provider prefix
21
+ * Returns null if model should skip gateway (e.g., Perplexity)
22
+ */
23
+ function getOpenRouterModel(model) {
24
+ // Perplexity - NEVER use gateway
25
+ if (model.startsWith('sonar'))
26
+ return null;
27
+ // Already has prefix
28
+ if (model.includes('/'))
29
+ return model;
30
+ // Add provider prefix
31
+ if (model.startsWith('gpt-'))
32
+ return `openai/${model}`;
33
+ if (model.startsWith('gemini-'))
34
+ return `google/${model}`;
35
+ if (model.startsWith('grok-'))
36
+ return `x-ai/${model}`;
37
+ return model;
38
+ }
39
+ /**
40
+ * Check if a model should use OpenRouter gateway
41
+ * - Kimi/Qwen: Always (native OpenRouter)
42
+ * - Perplexity: Never (not on OpenRouter)
43
+ * - Others: Only if USE_OPENROUTER_GATEWAY=true
44
+ */
45
+ function shouldUseOpenRouterGateway(model) {
46
+ // Already OpenRouter format (qwen/, moonshotai/)
47
+ if (model.startsWith('qwen/') || model.startsWith('moonshotai/')) {
48
+ return true;
49
+ }
50
+ // Perplexity models never go through OpenRouter
51
+ if (model.startsWith('sonar')) {
52
+ return false;
53
+ }
54
+ // Everything else: check gateway flag
55
+ return USE_OPENROUTER_GATEWAY;
56
+ }
13
57
  // Provider configurations with their base URLs
14
58
  const PROVIDER_CONFIGS = {
15
59
  openai: {
@@ -31,7 +75,7 @@ const PROVIDER_CONFIGS = {
31
75
  gemini: {
32
76
  base: 'https://generativelanguage.googleapis.com/v1beta/',
33
77
  key: process.env.GOOGLE_API_KEY,
34
- models: ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.5-flash-lite']
78
+ models: ['gemini-3-pro-preview', 'gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.5-flash-lite']
35
79
  },
36
80
  openrouter: {
37
81
  base: 'https://openrouter.ai/api/v1',
@@ -78,23 +122,60 @@ export function getAvailableProviders() {
78
122
  * Unified AI query function
79
123
  */
80
124
  export async function queryAI(prompt, options) {
81
- const config = PROVIDER_CONFIGS[options.provider];
82
- if (!config) {
125
+ const providerConfig = PROVIDER_CONFIGS[options.provider];
126
+ if (!providerConfig) {
83
127
  throw new Error(`Unknown provider: ${options.provider}`);
84
128
  }
85
129
  if (!isProviderAvailable(options.provider)) {
86
130
  throw new Error(`Provider ${options.provider} is not configured. Please set the appropriate API key.`);
87
131
  }
88
- // Handle GPT-5 special case
89
- if (options.provider === 'gpt51' && 'special' in config && config.special) {
132
+ let model = options.model || providerConfig.models[0];
133
+ // Check if we should route through OpenRouter gateway
134
+ if (shouldUseOpenRouterGateway(model) && process.env.OPENROUTER_API_KEY) {
135
+ const openRouterModel = getOpenRouterModel(model);
136
+ // Skip gateway if model returns null (e.g., Perplexity)
137
+ if (!openRouterModel) {
138
+ // Fall through to direct API handling below
139
+ }
140
+ else {
141
+ console.error(`🔀 [OpenRouter Gateway] Routing ${model} → ${openRouterModel}`);
142
+ const client = new OpenAI({
143
+ apiKey: process.env.OPENROUTER_API_KEY,
144
+ baseURL: 'https://openrouter.ai/api/v1',
145
+ defaultHeaders: {
146
+ 'HTTP-Referer': 'https://tachibot-mcp.local',
147
+ 'X-Title': 'TachiBot MCP Server'
148
+ }
149
+ });
150
+ try {
151
+ const response = await client.chat.completions.create({
152
+ model: openRouterModel,
153
+ messages: [
154
+ ...(options.systemPrompt ? [{ role: 'system', content: options.systemPrompt }] : []),
155
+ { role: 'user', content: prompt }
156
+ ],
157
+ temperature: options.temperature ?? 0.7,
158
+ max_tokens: options.maxTokens ?? 2000,
159
+ stream: false
160
+ });
161
+ return response.choices[0]?.message?.content || 'No response generated';
162
+ }
163
+ catch (error) {
164
+ const errorMessage = error instanceof Error ? error.message : String(error);
165
+ console.error(`Error with OpenRouter gateway:`, errorMessage);
166
+ throw new Error(`OpenRouter gateway error: ${errorMessage}`);
167
+ }
168
+ } // Close else block for openRouterModel check
169
+ }
170
+ // Handle GPT-5 special case (direct API only)
171
+ if (options.provider === 'gpt51' && 'special' in providerConfig && providerConfig.special) {
90
172
  return await handleGPT5(prompt, options);
91
173
  }
92
- // Standard OpenAI-compatible handling
174
+ // Standard OpenAI-compatible handling (direct API)
93
175
  const client = new OpenAI({
94
- apiKey: config.key,
95
- baseURL: config.base
176
+ apiKey: providerConfig.key,
177
+ baseURL: providerConfig.base
96
178
  });
97
- const model = options.model || config.models[0];
98
179
  try {
99
180
  const response = await client.chat.completions.create({
100
181
  model,
@@ -73,6 +73,22 @@ export function registerWorkflowTools(server) {
73
73
  output += `## Step ${i + 1}: ${step.step}\n\n`;
74
74
  // Format the step output - keep it clean and readable
75
75
  let stepOutput = step.output;
76
+ // DEFENSIVE: Ensure stepOutput is a string (fix [object Object] issue)
77
+ if (stepOutput !== null && typeof stepOutput === 'object') {
78
+ // Handle FileReference objects - extract summary or stringify
79
+ if ('summary' in stepOutput && typeof stepOutput.summary === 'string') {
80
+ stepOutput = stepOutput.summary;
81
+ }
82
+ else if ('content' in stepOutput && typeof stepOutput.content === 'string') {
83
+ stepOutput = stepOutput.content;
84
+ }
85
+ else {
86
+ stepOutput = JSON.stringify(stepOutput, null, 2);
87
+ }
88
+ }
89
+ else if (stepOutput === undefined || stepOutput === null) {
90
+ stepOutput = '[No output]';
91
+ }
76
92
  // Truncate based on settings
77
93
  if (truncate && typeof stepOutput === 'string' && stepOutput.length > maxChars) {
78
94
  const approxTokens = Math.floor(stepOutput.length / 4);
@@ -16,7 +16,7 @@ function getAllKnownTools() {
16
16
  // Grok
17
17
  'grok_reason', 'grok_code', 'grok_debug', 'grok_architect', 'grok_brainstorm', 'grok_search',
18
18
  // OpenAI
19
- 'openai_compare', 'openai_brainstorm', 'openai_gpt5_reason', 'openai_code_review', 'openai_explain',
19
+ 'openai_brainstorm', 'openai_reason', 'openai_code_review', 'openai_explain',
20
20
  // Gemini
21
21
  'gemini_brainstorm', 'gemini_analyze_code', 'gemini_analyze_text',
22
22
  // Qwen