tachibot-mcp 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +260 -0
- package/CHANGELOG.md +54 -0
- package/CODE_OF_CONDUCT.md +56 -0
- package/CONTRIBUTING.md +54 -0
- package/Dockerfile +36 -0
- package/LICENSE +644 -0
- package/README.md +201 -0
- package/SECURITY.md +95 -0
- package/dist/personality/komaai-expressions.js +12 -0
- package/dist/profiles/balanced.json +33 -0
- package/dist/profiles/code_focus.json +33 -0
- package/dist/profiles/full.json +33 -0
- package/dist/profiles/minimal.json +33 -0
- package/dist/profiles/research_power.json +33 -0
- package/dist/scripts/build-profiles.js +46 -0
- package/dist/src/application/services/focus/FocusModeRegistry.js +46 -0
- package/dist/src/application/services/focus/FocusTool.service.js +109 -0
- package/dist/src/application/services/focus/ModeRegistry.js +46 -0
- package/dist/src/application/services/focus/modes/focus-deep.mode.js +27 -0
- package/dist/src/application/services/focus/modes/status.mode.js +50 -0
- package/dist/src/application/services/focus/modes/tachibot-status.mode.js +50 -0
- package/dist/src/collaborative-orchestrator.js +391 -0
- package/dist/src/config/model-constants.js +188 -0
- package/dist/src/config/model-defaults.js +57 -0
- package/dist/src/config/model-preferences.js +382 -0
- package/dist/src/config/timeout-config.js +130 -0
- package/dist/src/config.js +173 -0
- package/dist/src/domain/interfaces/IFocusMode.js +5 -0
- package/dist/src/domain/interfaces/IProvider.js +6 -0
- package/dist/src/domain/interfaces/ITool.js +5 -0
- package/dist/src/focus-deep.js +245 -0
- package/dist/src/infrastructure/ascii/art/robots.ascii.js +16 -0
- package/dist/src/mcp-client.js +90 -0
- package/dist/src/memory/index.js +17 -0
- package/dist/src/memory/memory-config.js +135 -0
- package/dist/src/memory/memory-interface.js +174 -0
- package/dist/src/memory/memory-manager.js +383 -0
- package/dist/src/memory/providers/devlog-provider.js +385 -0
- package/dist/src/memory/providers/hybrid-provider.js +399 -0
- package/dist/src/memory/providers/local-provider.js +388 -0
- package/dist/src/memory/providers/mem0-provider.js +337 -0
- package/dist/src/modes/architect.js +477 -0
- package/dist/src/modes/auditor.js +362 -0
- package/dist/src/modes/challenger.js +841 -0
- package/dist/src/modes/code-reviewer.js +382 -0
- package/dist/src/modes/commit-guardian.js +424 -0
- package/dist/src/modes/documentation-writer.js +572 -0
- package/dist/src/modes/scout.js +587 -0
- package/dist/src/modes/shared/helpers/challenger-helpers.js +454 -0
- package/dist/src/modes/shared/helpers/index.js +17 -0
- package/dist/src/modes/shared/helpers/scout-helpers.js +270 -0
- package/dist/src/modes/shared/helpers/verifier-helpers.js +332 -0
- package/dist/src/modes/test-architect.js +767 -0
- package/dist/src/modes/verifier.js +378 -0
- package/dist/src/monitoring/performance-monitor.js +435 -0
- package/dist/src/optimization/batch-executor.js +121 -0
- package/dist/src/optimization/context-pruner.js +196 -0
- package/dist/src/optimization/cost-monitor.js +338 -0
- package/dist/src/optimization/index.js +65 -0
- package/dist/src/optimization/model-router.js +264 -0
- package/dist/src/optimization/result-cache.js +114 -0
- package/dist/src/optimization/token-optimizer.js +257 -0
- package/dist/src/optimization/token-tracker.js +118 -0
- package/dist/src/orchestrator-instructions.js +128 -0
- package/dist/src/orchestrator-lite.js +139 -0
- package/dist/src/orchestrator.js +191 -0
- package/dist/src/orchestrators/collaborative/interfaces/IToolExecutionEngine.js +1 -0
- package/dist/src/orchestrators/collaborative/interfaces/IToolExecutionStrategy.js +5 -0
- package/dist/src/orchestrators/collaborative/interfaces/IVisualizationRenderer.js +1 -0
- package/dist/src/orchestrators/collaborative/registries/ModelProviderRegistry.js +95 -0
- package/dist/src/orchestrators/collaborative/registries/ToolAdapterRegistry.js +64 -0
- package/dist/src/orchestrators/collaborative/services/tool-execution/ToolExecutionService.js +502 -0
- package/dist/src/orchestrators/collaborative/services/visualization/VisualizationService.js +206 -0
- package/dist/src/orchestrators/collaborative/types/session-types.js +5 -0
- package/dist/src/profiles/balanced.js +37 -0
- package/dist/src/profiles/code_focus.js +37 -0
- package/dist/src/profiles/debug_intensive.js +59 -0
- package/dist/src/profiles/full.js +37 -0
- package/dist/src/profiles/minimal.js +37 -0
- package/dist/src/profiles/research_code.js +59 -0
- package/dist/src/profiles/research_power.js +37 -0
- package/dist/src/profiles/types.js +5 -0
- package/dist/src/profiles/workflow_builder.js +53 -0
- package/dist/src/prompt-engineer-lite.js +78 -0
- package/dist/src/prompt-engineer.js +399 -0
- package/dist/src/reasoning-chain.js +508 -0
- package/dist/src/sequential-thinking.js +291 -0
- package/dist/src/server-diagnostic.js +74 -0
- package/dist/src/server-raw.js +158 -0
- package/dist/src/server-simple.js +58 -0
- package/dist/src/server.js +514 -0
- package/dist/src/session/session-logger.js +617 -0
- package/dist/src/session/session-manager.js +571 -0
- package/dist/src/session/session-tools.js +400 -0
- package/dist/src/tools/advanced-modes.js +200 -0
- package/dist/src/tools/claude-integration.js +356 -0
- package/dist/src/tools/consolidated/ai-router.js +174 -0
- package/dist/src/tools/consolidated/ai-tool.js +48 -0
- package/dist/src/tools/consolidated/brainstorm-tool.js +87 -0
- package/dist/src/tools/consolidated/environment-detector.js +80 -0
- package/dist/src/tools/consolidated/index.js +50 -0
- package/dist/src/tools/consolidated/search-tool.js +110 -0
- package/dist/src/tools/consolidated/workflow-tool.js +238 -0
- package/dist/src/tools/gemini-tools.js +329 -0
- package/dist/src/tools/grok-enhanced.js +376 -0
- package/dist/src/tools/grok-tools.js +299 -0
- package/dist/src/tools/lmstudio-tools.js +223 -0
- package/dist/src/tools/openai-tools.js +498 -0
- package/dist/src/tools/openrouter-tools.js +317 -0
- package/dist/src/tools/optimized-wrapper.js +204 -0
- package/dist/src/tools/perplexity-tools.js +294 -0
- package/dist/src/tools/pingpong-tool.js +343 -0
- package/dist/src/tools/qwen-wrapper.js +74 -0
- package/dist/src/tools/tool-router.js +444 -0
- package/dist/src/tools/unified-ai-provider.js +260 -0
- package/dist/src/tools/workflow-runner.js +425 -0
- package/dist/src/tools/workflow-validator-tool.js +107 -0
- package/dist/src/types.js +23 -0
- package/dist/src/utils/input-validator.js +130 -0
- package/dist/src/utils/model-router.js +91 -0
- package/dist/src/utils/progress-stream.js +255 -0
- package/dist/src/utils/provider-router.js +88 -0
- package/dist/src/utils/smart-api-client.js +146 -0
- package/dist/src/utils/table-builder.js +218 -0
- package/dist/src/utils/timestamp-formatter.js +134 -0
- package/dist/src/utils/tool-compressor.js +257 -0
- package/dist/src/utils/tool-config.js +201 -0
- package/dist/src/validators/dependency-graph-validator.js +147 -0
- package/dist/src/validators/interpolation-validator.js +222 -0
- package/dist/src/validators/output-usage-validator.js +151 -0
- package/dist/src/validators/syntax-validator.js +102 -0
- package/dist/src/validators/tool-registry-validator.js +123 -0
- package/dist/src/validators/tool-types.js +97 -0
- package/dist/src/validators/types.js +8 -0
- package/dist/src/validators/workflow-validator.js +134 -0
- package/dist/src/visualizer-lite.js +42 -0
- package/dist/src/visualizer.js +179 -0
- package/dist/src/workflows/circuit-breaker.js +199 -0
- package/dist/src/workflows/custom-workflows.js +451 -0
- package/dist/src/workflows/engine/AutoSynthesizer.js +97 -0
- package/dist/src/workflows/engine/StepParameterResolver.js +74 -0
- package/dist/src/workflows/engine/VariableInterpolator.js +123 -0
- package/dist/src/workflows/engine/WorkflowDiscovery.js +125 -0
- package/dist/src/workflows/engine/WorkflowExecutionEngine.js +485 -0
- package/dist/src/workflows/engine/WorkflowExecutor.js +113 -0
- package/dist/src/workflows/engine/WorkflowFileManager.js +244 -0
- package/dist/src/workflows/engine/WorkflowHelpers.js +114 -0
- package/dist/src/workflows/engine/WorkflowOutputFormatter.js +83 -0
- package/dist/src/workflows/engine/events/WorkflowEventBus.js +132 -0
- package/dist/src/workflows/engine/events/interfaces/IEventBus.js +5 -0
- package/dist/src/workflows/engine/handlers/ErrorRecoveryHandler.js +162 -0
- package/dist/src/workflows/engine/handlers/PromptEnhancementHandler.js +115 -0
- package/dist/src/workflows/engine/handlers/SessionPersistenceHandler.js +167 -0
- package/dist/src/workflows/engine/handlers/StepExecutionHandler.js +231 -0
- package/dist/src/workflows/engine/handlers/ToolInvocationHandler.js +46 -0
- package/dist/src/workflows/engine/interfaces/IAutoSynthesizer.js +5 -0
- package/dist/src/workflows/engine/interfaces/IStepParameterResolver.js +5 -0
- package/dist/src/workflows/engine/interfaces/IVariableInterpolator.js +5 -0
- package/dist/src/workflows/engine/interfaces/IWorkflowDiscovery.js +4 -0
- package/dist/src/workflows/engine/interfaces/IWorkflowFileManager.js +5 -0
- package/dist/src/workflows/engine/interfaces/IWorkflowOutputFormatter.js +5 -0
- package/dist/src/workflows/engine/state/WorkflowStateMachine.js +194 -0
- package/dist/src/workflows/engine/state/interfaces/IStateMachine.js +17 -0
- package/dist/src/workflows/fallback-strategies.js +373 -0
- package/dist/src/workflows/message-queue.js +455 -0
- package/dist/src/workflows/model-router.js +189 -0
- package/dist/src/workflows/orchestrator-examples.js +174 -0
- package/dist/src/workflows/orchestrator-integration.js +200 -0
- package/dist/src/workflows/self-healing.js +524 -0
- package/dist/src/workflows/tool-mapper.js +407 -0
- package/dist/src/workflows/tool-orchestrator.js +796 -0
- package/dist/src/workflows/workflow-engine.js +573 -0
- package/dist/src/workflows/workflow-parser.js +283 -0
- package/dist/src/workflows/workflow-types.js +95 -0
- package/dist/src/workflows.js +568 -0
- package/dist/test-workflow-file-output.js +93 -0
- package/docs/API_KEYS.md +570 -0
- package/docs/CLAUDE_CODE_SETUP.md +181 -0
- package/docs/CLAUDE_DESKTOP_MANUAL.md +127 -0
- package/docs/CONFIGURATION.md +745 -0
- package/docs/FOCUS_MODES.md +240 -0
- package/docs/INSTALLATION_BOTH.md +145 -0
- package/docs/TERMS.md +352 -0
- package/docs/TOOLS_REFERENCE.md +1622 -0
- package/docs/TOOL_PARAMETERS.md +496 -0
- package/docs/TOOL_PROFILES.md +236 -0
- package/docs/WORKFLOWS.md +987 -0
- package/docs/WORKFLOW_OUTPUT.md +198 -0
- package/docs/WORKFLOW_PROGRESS_TRACKING.md +305 -0
- package/docs/workflows/design-brainstorm.md +335 -0
- package/package.json +97 -0
- package/profiles/balanced.json +37 -0
- package/profiles/code_focus.json +37 -0
- package/profiles/debug_intensive.json +34 -0
- package/profiles/full.json +37 -0
- package/profiles/minimal.json +37 -0
- package/profiles/research_power.json +37 -0
- package/profiles/workflow_builder.json +37 -0
- package/smithery.yaml +66 -0
- package/start.sh +8 -0
- package/tools.config.json +81 -0
- package/tsconfig.json +18 -0
- package/workflows/accessibility-code-audit.yaml +92 -0
- package/workflows/code-architecture-review.yaml +202 -0
- package/workflows/code-review.yaml +142 -0
- package/workflows/core/iterative-problem-solver.yaml +283 -0
- package/workflows/creative-brainstorm-yaml.yaml +215 -0
- package/workflows/pingpong.yaml +141 -0
- package/workflows/system/README.md +412 -0
- package/workflows/system/challenger.yaml +175 -0
- package/workflows/system/scout.yaml +164 -0
- package/workflows/system/verifier.yaml +133 -0
- package/workflows/ultra-creative-brainstorm.yaml +318 -0
- package/workflows/ux-research-flow.yaml +92 -0
|
@@ -0,0 +1,498 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Tools Implementation
|
|
3
|
+
* Provides GPT-5, GPT-5-mini, and GPT-5-nano model capabilities
|
|
4
|
+
*/
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
import { config } from "dotenv";
|
|
7
|
+
import * as path from 'path';
|
|
8
|
+
import { fileURLToPath } from 'url';
|
|
9
|
+
import { validateToolInput } from "../utils/input-validator.js";
|
|
10
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
11
|
+
const __dirname = path.dirname(__filename);
|
|
12
|
+
config({ path: path.resolve(__dirname, '../../../.env') });
|
|
13
|
+
// OpenAI API configuration
|
|
14
|
+
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
|
|
15
|
+
const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
|
|
16
|
+
// Available OpenAI models (GPT-5 family only)
|
|
17
|
+
export var OpenAIModel;
|
|
18
|
+
(function (OpenAIModel) {
|
|
19
|
+
OpenAIModel["GPT5"] = "gpt-5";
|
|
20
|
+
OpenAIModel["GPT5_MINI"] = "gpt-5-mini";
|
|
21
|
+
OpenAIModel["GPT5_NANO"] = "gpt-5-nano";
|
|
22
|
+
})(OpenAIModel || (OpenAIModel = {}));
|
|
23
|
+
/**
|
|
24
|
+
* Call OpenAI API with model fallback support
|
|
25
|
+
* Automatically detects GPT-5 models and uses correct endpoint + format
|
|
26
|
+
*/
|
|
27
|
+
export async function callOpenAI(messages, model = OpenAIModel.GPT5_MINI, temperature = 1, maxTokens = 16384, // Increased default for comprehensive responses
|
|
28
|
+
requireConfirmation = false, skipValidation = false) {
|
|
29
|
+
console.error(`🔍 TRACE: callOpenAI called with model: ${model}`);
|
|
30
|
+
if (!OPENAI_API_KEY) {
|
|
31
|
+
console.error(`🔍 TRACE: No API key found`);
|
|
32
|
+
return `[OpenAI API key not configured. Add OPENAI_API_KEY to .env file]`;
|
|
33
|
+
}
|
|
34
|
+
// Validate and sanitize message content (skip for internal workflow calls)
|
|
35
|
+
const validatedMessages = messages.map((msg) => {
|
|
36
|
+
if (skipValidation) {
|
|
37
|
+
return msg; // Skip validation for internal workflow calls
|
|
38
|
+
}
|
|
39
|
+
const validation = validateToolInput(msg.content);
|
|
40
|
+
if (!validation.valid) {
|
|
41
|
+
throw new Error(validation.error || "Invalid message content");
|
|
42
|
+
}
|
|
43
|
+
return { ...msg, content: validation.sanitized };
|
|
44
|
+
});
|
|
45
|
+
// Model fallback chain - GPT-5 models have no fallbacks to test actual availability
|
|
46
|
+
const modelFallbacks = {
|
|
47
|
+
[OpenAIModel.GPT5]: [], // No fallback - test actual GPT-5
|
|
48
|
+
[OpenAIModel.GPT5_MINI]: [], // No fallback - test actual GPT-5-mini
|
|
49
|
+
[OpenAIModel.GPT5_NANO]: [] // No fallback - test actual GPT-5-nano
|
|
50
|
+
};
|
|
51
|
+
const modelsToTry = [model, ...(modelFallbacks[model] || [])];
|
|
52
|
+
console.error(`🔍 TRACE: Models to try: ${modelsToTry.join(', ')}`);
|
|
53
|
+
let lastError = '';
|
|
54
|
+
for (const currentModel of modelsToTry) {
|
|
55
|
+
console.error(`🔍 TRACE: Trying model: ${currentModel}`);
|
|
56
|
+
try {
|
|
57
|
+
// Detect if this is a GPT-5 model (uses /v1/responses endpoint)
|
|
58
|
+
const isGPT5 = currentModel.startsWith('gpt-5');
|
|
59
|
+
const endpoint = isGPT5
|
|
60
|
+
? "https://api.openai.com/v1/responses"
|
|
61
|
+
: OPENAI_API_URL;
|
|
62
|
+
// For GPT-5: convert messages to input string
|
|
63
|
+
const input = isGPT5
|
|
64
|
+
? validatedMessages.map(m => m.role === 'system' ? `System: ${m.content}` : m.content).join('\n\n')
|
|
65
|
+
: undefined;
|
|
66
|
+
const requestBody = isGPT5 ? {
|
|
67
|
+
model: currentModel,
|
|
68
|
+
input: input,
|
|
69
|
+
reasoning: {
|
|
70
|
+
effort: "minimal" // minimal/low/medium/high
|
|
71
|
+
},
|
|
72
|
+
text: {
|
|
73
|
+
verbosity: "medium" // silent/minimal/concise/balanced/medium/detailed/exhaustive
|
|
74
|
+
}
|
|
75
|
+
} : {
|
|
76
|
+
model: currentModel,
|
|
77
|
+
messages: validatedMessages,
|
|
78
|
+
temperature,
|
|
79
|
+
max_tokens: maxTokens,
|
|
80
|
+
stream: false
|
|
81
|
+
};
|
|
82
|
+
console.error(`🔍 TRACE: Using ${isGPT5 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
|
|
83
|
+
const response = await fetch(endpoint, {
|
|
84
|
+
method: "POST",
|
|
85
|
+
headers: {
|
|
86
|
+
"Authorization": `Bearer ${OPENAI_API_KEY}`,
|
|
87
|
+
"Content-Type": "application/json"
|
|
88
|
+
},
|
|
89
|
+
body: JSON.stringify(requestBody)
|
|
90
|
+
});
|
|
91
|
+
if (!response.ok) {
|
|
92
|
+
const error = await response.text();
|
|
93
|
+
lastError = `${currentModel}: ${response.statusText} - ${error}`;
|
|
94
|
+
console.error(`🔍 TRACE: ${currentModel} failed - Status: ${response.status}, Error: ${error}`);
|
|
95
|
+
// Check if it's a model not found error
|
|
96
|
+
if (response.status === 404 || error.includes('model') || error.includes('not found')) {
|
|
97
|
+
console.error(`🔍 TRACE: Model ${currentModel} not available, trying fallback...`);
|
|
98
|
+
continue; // Try next model
|
|
99
|
+
}
|
|
100
|
+
throw new Error(lastError);
|
|
101
|
+
}
|
|
102
|
+
const data = await response.json();
|
|
103
|
+
// Parse response based on endpoint type
|
|
104
|
+
let result;
|
|
105
|
+
if (isGPT5) {
|
|
106
|
+
// GPT-5 /v1/responses format: output array with message objects
|
|
107
|
+
const messageOutput = data.output?.find((o) => o.type === 'message');
|
|
108
|
+
const textContent = messageOutput?.content?.find((c) => c.type === 'output_text');
|
|
109
|
+
result = textContent?.text || "No response from OpenAI";
|
|
110
|
+
}
|
|
111
|
+
else {
|
|
112
|
+
// GPT-4 /v1/chat/completions format
|
|
113
|
+
result = data.choices?.[0]?.message?.content || "No response from OpenAI";
|
|
114
|
+
}
|
|
115
|
+
console.error(`🔍 TRACE: ${currentModel} SUCCESS - Response length: ${result.length}`);
|
|
116
|
+
return result;
|
|
117
|
+
}
|
|
118
|
+
catch (error) {
|
|
119
|
+
lastError = `${currentModel}: ${error instanceof Error ? error.message : String(error)}`;
|
|
120
|
+
console.error(`🔍 TRACE: ${currentModel} EXCEPTION - ${lastError}`);
|
|
121
|
+
continue; // Try next model
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
console.error(`🔍 TRACE: ALL MODELS FAILED - Last error: ${lastError}`);
|
|
125
|
+
return `[GPT-5 model "${model}" not available. Error: ${lastError}]`;
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Call OpenAI API with custom parameters for specific models
|
|
129
|
+
* Automatically detects GPT-5 models and uses correct endpoint + format
|
|
130
|
+
*/
|
|
131
|
+
async function callOpenAIWithCustomParams(messages, model, temperature = 0.8, maxTokens = 16384, // Increased for detailed brainstorming
|
|
132
|
+
reasoningEffort = "low", skipValidation = false) {
|
|
133
|
+
console.error(`🔍 TRACE: callOpenAIWithCustomParams called with model: ${model}, reasoning_effort: ${reasoningEffort}`);
|
|
134
|
+
if (!OPENAI_API_KEY) {
|
|
135
|
+
console.error(`🔍 TRACE: No API key found`);
|
|
136
|
+
return `[OpenAI API key not configured. Add OPENAI_API_KEY to .env file]`;
|
|
137
|
+
}
|
|
138
|
+
// Validate and sanitize message content (skip for internal workflow calls)
|
|
139
|
+
const validatedMessages = messages.map((msg) => {
|
|
140
|
+
if (skipValidation) {
|
|
141
|
+
return msg; // Skip validation for internal workflow calls
|
|
142
|
+
}
|
|
143
|
+
const validation = validateToolInput(msg.content);
|
|
144
|
+
if (!validation.valid) {
|
|
145
|
+
throw new Error(validation.error || "Invalid message content");
|
|
146
|
+
}
|
|
147
|
+
return { ...msg, content: validation.sanitized };
|
|
148
|
+
});
|
|
149
|
+
try {
|
|
150
|
+
const isGPT5 = model.startsWith('gpt-5');
|
|
151
|
+
const endpoint = isGPT5
|
|
152
|
+
? "https://api.openai.com/v1/responses"
|
|
153
|
+
: OPENAI_API_URL;
|
|
154
|
+
// For GPT-5: convert messages to input string
|
|
155
|
+
const input = isGPT5
|
|
156
|
+
? validatedMessages.map(m => m.role === 'system' ? `System: ${m.content}` : m.content).join('\n\n')
|
|
157
|
+
: undefined;
|
|
158
|
+
const requestBody = isGPT5 ? {
|
|
159
|
+
model: model,
|
|
160
|
+
input: input,
|
|
161
|
+
reasoning: {
|
|
162
|
+
effort: reasoningEffort // minimal/low/medium/high
|
|
163
|
+
},
|
|
164
|
+
text: {
|
|
165
|
+
verbosity: "medium"
|
|
166
|
+
}
|
|
167
|
+
} : {
|
|
168
|
+
model: model,
|
|
169
|
+
messages: validatedMessages,
|
|
170
|
+
temperature,
|
|
171
|
+
max_tokens: maxTokens,
|
|
172
|
+
stream: false
|
|
173
|
+
};
|
|
174
|
+
console.error(`🔍 TRACE: Using ${isGPT5 ? '/v1/responses' : '/v1/chat/completions'} endpoint`);
|
|
175
|
+
if (isGPT5) {
|
|
176
|
+
console.error(`🔍 TRACE: GPT-5 params: reasoning_effort=${reasoningEffort}`);
|
|
177
|
+
}
|
|
178
|
+
else {
|
|
179
|
+
console.error(`🔍 TRACE: GPT-4 params: max_tokens=${maxTokens}, temperature=${temperature}`);
|
|
180
|
+
}
|
|
181
|
+
const response = await fetch(endpoint, {
|
|
182
|
+
method: "POST",
|
|
183
|
+
headers: {
|
|
184
|
+
"Authorization": `Bearer ${OPENAI_API_KEY}`,
|
|
185
|
+
"Content-Type": "application/json"
|
|
186
|
+
},
|
|
187
|
+
body: JSON.stringify(requestBody)
|
|
188
|
+
});
|
|
189
|
+
if (!response.ok) {
|
|
190
|
+
const error = await response.text();
|
|
191
|
+
console.error(`🔍 TRACE: ${model} failed - Status: ${response.status}, Error: ${error}`);
|
|
192
|
+
return `[${model} failed: ${response.status} - ${error}]`;
|
|
193
|
+
}
|
|
194
|
+
const data = await response.json();
|
|
195
|
+
// Parse response based on endpoint type
|
|
196
|
+
let result;
|
|
197
|
+
if (isGPT5) {
|
|
198
|
+
// GPT-5 /v1/responses format
|
|
199
|
+
const messageOutput = data.output?.find((o) => o.type === 'message');
|
|
200
|
+
const textContent = messageOutput?.content?.find((c) => c.type === 'output_text');
|
|
201
|
+
result = textContent?.text || "No response from OpenAI";
|
|
202
|
+
}
|
|
203
|
+
else {
|
|
204
|
+
// GPT-4 /v1/chat/completions format
|
|
205
|
+
result = data.choices?.[0]?.message?.content || "No response from OpenAI";
|
|
206
|
+
}
|
|
207
|
+
console.error(`🔍 TRACE: ${model} SUCCESS - Response length: ${result.length}`);
|
|
208
|
+
return result;
|
|
209
|
+
}
|
|
210
|
+
catch (error) {
|
|
211
|
+
const errorMsg = `${model}: ${error instanceof Error ? error.message : String(error)}`;
|
|
212
|
+
console.error(`🔍 TRACE: ${model} EXCEPTION - ${errorMsg}`);
|
|
213
|
+
return `[${model} error: ${errorMsg}]`;
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* GPT-5 Reasoning Tool - Most advanced reasoning with confirmation
|
|
218
|
+
*/
|
|
219
|
+
export const gpt5ReasonTool = {
|
|
220
|
+
name: "gpt5_reason",
|
|
221
|
+
description: "Advanced reasoning using GPT-5",
|
|
222
|
+
parameters: z.object({
|
|
223
|
+
query: z.string(),
|
|
224
|
+
context: z.string().optional(),
|
|
225
|
+
mode: z.enum(["mathematical", "scientific", "logical", "analytical"]).optional().default("analytical"),
|
|
226
|
+
confirmUsage: z.boolean().optional().default(false)
|
|
227
|
+
}),
|
|
228
|
+
execute: async (args, { log }) => {
|
|
229
|
+
// Check if user confirmed GPT-5 usage
|
|
230
|
+
if (!args.confirmUsage) {
|
|
231
|
+
return `⚠️ GPT-5 Usage Confirmation Required\n\nGPT-5 is the most advanced model but also the most expensive.\nTo proceed with GPT-5, please set confirmUsage: true\n\nAlternatively, use 'gpt5_mini_reason' for cost-efficient reasoning (no confirmation needed).`;
|
|
232
|
+
}
|
|
233
|
+
const modePrompts = {
|
|
234
|
+
mathematical: "Focus on mathematical proofs, calculations, and formal logic",
|
|
235
|
+
scientific: "Apply scientific method and empirical reasoning",
|
|
236
|
+
logical: "Use formal logic and systematic deduction",
|
|
237
|
+
analytical: "Break down complex problems into components"
|
|
238
|
+
};
|
|
239
|
+
const messages = [
|
|
240
|
+
{
|
|
241
|
+
role: "system",
|
|
242
|
+
content: `You are GPT-5, the most advanced reasoning model.\n${modePrompts[args.mode || 'analytical']}.\nProvide step-by-step reasoning with clear explanations.\n${args.context ? `Context: ${args.context}` : ''}`
|
|
243
|
+
},
|
|
244
|
+
{
|
|
245
|
+
role: "user",
|
|
246
|
+
content: args.query
|
|
247
|
+
}
|
|
248
|
+
];
|
|
249
|
+
// Use GPT-5; callOpenAI has fallback to 5-mini and 4o if unavailable
|
|
250
|
+
return await callOpenAI(messages, OpenAIModel.GPT5, 0.7, 4000);
|
|
251
|
+
}
|
|
252
|
+
};
|
|
253
|
+
/**
|
|
254
|
+
* GPT-5-mini Reasoning Tool - Cost-efficient reasoning without confirmation
|
|
255
|
+
*/
|
|
256
|
+
export const gpt5MiniReasonTool = {
|
|
257
|
+
name: "gpt5_mini_reason",
|
|
258
|
+
description: "Cost-efficient reasoning using GPT-5-mini",
|
|
259
|
+
parameters: z.object({
|
|
260
|
+
query: z.string(),
|
|
261
|
+
context: z.string().optional(),
|
|
262
|
+
mode: z.enum(["mathematical", "scientific", "logical", "analytical"]).optional().default("analytical")
|
|
263
|
+
}),
|
|
264
|
+
execute: async (args, { log }) => {
|
|
265
|
+
const modePrompts = {
|
|
266
|
+
mathematical: "Focus on mathematical proofs, calculations, and formal logic",
|
|
267
|
+
scientific: "Apply scientific method and empirical reasoning",
|
|
268
|
+
logical: "Use formal logic and systematic deduction",
|
|
269
|
+
analytical: "Break down complex problems into components"
|
|
270
|
+
};
|
|
271
|
+
const messages = [
|
|
272
|
+
{
|
|
273
|
+
role: "system",
|
|
274
|
+
content: `You are GPT-5-mini, optimized for efficient reasoning.\n${modePrompts[args.mode || 'analytical']}.\nProvide clear, step-by-step reasoning.\n${args.context ? `Context: ${args.context}` : ''}`
|
|
275
|
+
},
|
|
276
|
+
{
|
|
277
|
+
role: "user",
|
|
278
|
+
content: args.query
|
|
279
|
+
}
|
|
280
|
+
];
|
|
281
|
+
// Use GPT-5-mini directly; fallback chain will handle unavailability
|
|
282
|
+
return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 3000);
|
|
283
|
+
}
|
|
284
|
+
};
|
|
285
|
+
export const openaiGpt5ReasonTool = {
|
|
286
|
+
name: "openai_gpt5_reason",
|
|
287
|
+
description: "Mathematical reasoning using GPT-5-mini",
|
|
288
|
+
parameters: z.object({
|
|
289
|
+
query: z.string(),
|
|
290
|
+
context: z.string().optional(),
|
|
291
|
+
mode: z.enum(["mathematical", "scientific", "logical", "analytical"]).optional().default("analytical")
|
|
292
|
+
}),
|
|
293
|
+
execute: async (args, { log }) => {
|
|
294
|
+
const modePrompts = {
|
|
295
|
+
mathematical: "Focus on mathematical proofs, calculations, and formal logic",
|
|
296
|
+
scientific: "Apply scientific method and empirical reasoning",
|
|
297
|
+
logical: "Use formal logic and systematic deduction",
|
|
298
|
+
analytical: "Break down complex problems into components"
|
|
299
|
+
};
|
|
300
|
+
const messages = [
|
|
301
|
+
{
|
|
302
|
+
role: "system",
|
|
303
|
+
content: `You are an expert reasoner using advanced analytical capabilities.
|
|
304
|
+
${modePrompts[args.mode || 'analytical']}.
|
|
305
|
+
Provide step-by-step reasoning with clear explanations.
|
|
306
|
+
${args.context ? `Context: ${args.context}` : ''}`
|
|
307
|
+
},
|
|
308
|
+
{
|
|
309
|
+
role: "user",
|
|
310
|
+
content: args.query
|
|
311
|
+
}
|
|
312
|
+
];
|
|
313
|
+
// Use GPT-5-mini for reasoning
|
|
314
|
+
return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 4000);
|
|
315
|
+
}
|
|
316
|
+
};
|
|
317
|
+
/**
|
|
318
|
+
* OpenAI Compare Tool
|
|
319
|
+
* Multi-option comparison and consensus building using GPT-5-mini
|
|
320
|
+
*/
|
|
321
|
+
export const openaiCompareTool = {
|
|
322
|
+
name: "openai_compare",
|
|
323
|
+
description: "Multi-model consensus",
|
|
324
|
+
parameters: z.object({
|
|
325
|
+
topic: z.string(),
|
|
326
|
+
options: z.array(z.string()),
|
|
327
|
+
criteria: z.string().optional(),
|
|
328
|
+
includeRecommendation: z.boolean().optional().default(true)
|
|
329
|
+
}),
|
|
330
|
+
execute: async (args, { log }) => {
|
|
331
|
+
const optionsList = args.options.map((opt, i) => `${i + 1}. ${opt}`).join('\n');
|
|
332
|
+
const messages = [
|
|
333
|
+
{
|
|
334
|
+
role: "system",
|
|
335
|
+
content: `You are an expert at comparative analysis and decision-making.
|
|
336
|
+
Compare the given options systematically.
|
|
337
|
+
${args.criteria ? `Criteria: ${args.criteria}` : 'Consider: pros, cons, trade-offs, and suitability'}
|
|
338
|
+
${args.includeRecommendation ? 'Provide a clear recommendation with justification.' : ''}`
|
|
339
|
+
},
|
|
340
|
+
{
|
|
341
|
+
role: "user",
|
|
342
|
+
content: `Topic: ${args.topic}\n\nOptions:\n${optionsList}`
|
|
343
|
+
}
|
|
344
|
+
];
|
|
345
|
+
return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 3000);
|
|
346
|
+
}
|
|
347
|
+
};
|
|
348
|
+
/**
|
|
349
|
+
* OpenAI Brainstorm Tool
|
|
350
|
+
* Creative ideation and brainstorming
|
|
351
|
+
*/
|
|
352
|
+
export const openAIBrainstormTool = {
|
|
353
|
+
name: "openai_brainstorm",
|
|
354
|
+
description: "Creative brainstorming",
|
|
355
|
+
parameters: z.object({
|
|
356
|
+
problem: z.string(),
|
|
357
|
+
constraints: z.string().optional(),
|
|
358
|
+
quantity: z.number().optional(),
|
|
359
|
+
style: z.enum(["innovative", "practical", "wild", "systematic"]).optional(),
|
|
360
|
+
model: z.enum(["gpt-5", "gpt-5-mini", "gpt-5-nano"]).optional(),
|
|
361
|
+
reasoning_effort: z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
362
|
+
verbosity: z.enum(["silent", "minimal", "concise", "balanced", "detailed", "exhaustive"]).optional(),
|
|
363
|
+
max_tokens: z.number().optional()
|
|
364
|
+
}),
|
|
365
|
+
execute: async (args, options = {}) => {
|
|
366
|
+
const { problem, constraints, quantity = 5, style = "innovative", model = "gpt-5-mini", reasoning_effort = "low", verbosity = "balanced", max_tokens = 4000 } = args;
|
|
367
|
+
console.error('🚀 TOOL CALLED: openai_brainstorm');
|
|
368
|
+
console.error('📥 ARGS RECEIVED:', JSON.stringify(args, null, 2));
|
|
369
|
+
console.error('📥 OPTIONS RECEIVED:', JSON.stringify(options, null, 2));
|
|
370
|
+
const stylePrompts = {
|
|
371
|
+
innovative: "Focus on novel, cutting-edge solutions",
|
|
372
|
+
practical: "Emphasize feasible, implementable ideas",
|
|
373
|
+
wild: "Think outside the box with unconventional approaches",
|
|
374
|
+
systematic: "Generate methodical, well-structured solutions"
|
|
375
|
+
};
|
|
376
|
+
const messages = [
|
|
377
|
+
{
|
|
378
|
+
role: "system",
|
|
379
|
+
content: `You are a creative problem-solver and ideation expert.
|
|
380
|
+
Generate ${quantity} distinct ideas.
|
|
381
|
+
Style: ${stylePrompts[style]}
|
|
382
|
+
${constraints ? `Constraints: ${constraints}` : ''}
|
|
383
|
+
Format: Number each idea and provide a brief explanation.`
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
role: "user",
|
|
387
|
+
content: `Brainstorm solutions for: ${problem}`
|
|
388
|
+
}
|
|
389
|
+
];
|
|
390
|
+
// Use specified model with proper parameters
|
|
391
|
+
const maxTokens = max_tokens;
|
|
392
|
+
const reasoningEffort = reasoning_effort;
|
|
393
|
+
console.error(`🔍 DEBUG: Using model: ${model}, reasoning_effort: ${reasoningEffort}, max_tokens: ${maxTokens}`);
|
|
394
|
+
// Convert string model to OpenAIModel enum
|
|
395
|
+
const modelEnum = model;
|
|
396
|
+
console.error(`🔍 CALLING: callOpenAIWithCustomParams with ${modelEnum}, skipValidation: ${options.skipValidation || false}`);
|
|
397
|
+
// Use temperature=1 (default) for GPT-5, 0.8 for others
|
|
398
|
+
const temperature = model.startsWith('gpt-5') ? 1.0 : 0.8;
|
|
399
|
+
const result = await callOpenAIWithCustomParams(messages, modelEnum, temperature, maxTokens, reasoningEffort, options.skipValidation || false);
|
|
400
|
+
console.error('🔍 DEBUG: Got result from callOpenAI:', result.substring(0, 100));
|
|
401
|
+
console.error('✅ TOOL COMPLETE: openai_brainstorm');
|
|
402
|
+
return result;
|
|
403
|
+
}
|
|
404
|
+
};
|
|
405
|
+
/**
|
|
406
|
+
* OpenAI Code Review Tool
|
|
407
|
+
* Comprehensive code review
|
|
408
|
+
*/
|
|
409
|
+
export const openaiCodeReviewTool = {
|
|
410
|
+
name: "openai_code_review",
|
|
411
|
+
description: "Code review",
|
|
412
|
+
parameters: z.object({
|
|
413
|
+
code: z.string(),
|
|
414
|
+
language: z.string().optional(),
|
|
415
|
+
focusAreas: z.array(z.enum(["security", "performance", "readability", "bugs", "best-practices"])).optional()
|
|
416
|
+
}),
|
|
417
|
+
execute: async (args, { log }) => {
|
|
418
|
+
const focusText = args.focusAreas
|
|
419
|
+
? `Focus especially on: ${args.focusAreas.join(', ')}`
|
|
420
|
+
: "Review all aspects: security, performance, readability, bugs, and best practices";
|
|
421
|
+
const messages = [
|
|
422
|
+
{
|
|
423
|
+
role: "system",
|
|
424
|
+
content: `You are an expert code reviewer.
|
|
425
|
+
Provide a thorough code review with specific, actionable feedback.
|
|
426
|
+
${focusText}
|
|
427
|
+
${args.language ? `Language: ${args.language}` : ''}
|
|
428
|
+
Format: Use sections for different aspects, be specific about line numbers or functions.`
|
|
429
|
+
},
|
|
430
|
+
{
|
|
431
|
+
role: "user",
|
|
432
|
+
content: `Review this code:\n\`\`\`${args.language || ''}\n${args.code}\n\`\`\``
|
|
433
|
+
}
|
|
434
|
+
];
|
|
435
|
+
return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 4000);
|
|
436
|
+
}
|
|
437
|
+
};
|
|
438
|
+
/**
|
|
439
|
+
* OpenAI Explain Tool
|
|
440
|
+
* Clear explanations for complex topics
|
|
441
|
+
*/
|
|
442
|
+
export const openaiExplainTool = {
|
|
443
|
+
name: "openai_explain",
|
|
444
|
+
description: "Explain concepts",
|
|
445
|
+
parameters: z.object({
|
|
446
|
+
topic: z.string(),
|
|
447
|
+
level: z.enum(["beginner", "intermediate", "expert"]).optional().default("intermediate"),
|
|
448
|
+
style: z.enum(["technical", "simple", "analogy", "visual"]).optional().default("simple")
|
|
449
|
+
}),
|
|
450
|
+
execute: async (args, { log }) => {
|
|
451
|
+
const levelPrompts = {
|
|
452
|
+
beginner: "Explain for someone with no prior knowledge",
|
|
453
|
+
intermediate: "Explain for someone with basic understanding",
|
|
454
|
+
expert: "Provide detailed, technical explanation"
|
|
455
|
+
};
|
|
456
|
+
const stylePrompts = {
|
|
457
|
+
technical: "Use precise technical terminology",
|
|
458
|
+
simple: "Use simple, everyday language",
|
|
459
|
+
analogy: "Use analogies and metaphors",
|
|
460
|
+
visual: "Describe with visual concepts and diagrams"
|
|
461
|
+
};
|
|
462
|
+
const messages = [
|
|
463
|
+
{
|
|
464
|
+
role: "system",
|
|
465
|
+
content: `You are an expert educator.
|
|
466
|
+
${levelPrompts[args.level || 'intermediate']}.
|
|
467
|
+
${stylePrompts[args.style || 'simple']}.
|
|
468
|
+
Make the explanation clear, engaging, and memorable.`
|
|
469
|
+
},
|
|
470
|
+
{
|
|
471
|
+
role: "user",
|
|
472
|
+
content: `Explain: ${args.topic}`
|
|
473
|
+
}
|
|
474
|
+
];
|
|
475
|
+
return await callOpenAI(messages, OpenAIModel.GPT5_MINI, 0.7, 2500);
|
|
476
|
+
}
|
|
477
|
+
};
|
|
478
|
+
/**
|
|
479
|
+
* Check if OpenAI is available
|
|
480
|
+
*/
|
|
481
|
+
export function isOpenAIAvailable() {
|
|
482
|
+
return !!OPENAI_API_KEY;
|
|
483
|
+
}
|
|
484
|
+
/**
|
|
485
|
+
* Get all OpenAI tools
|
|
486
|
+
*/
|
|
487
|
+
export function getAllOpenAITools() {
|
|
488
|
+
if (!isOpenAIAvailable()) {
|
|
489
|
+
return [];
|
|
490
|
+
}
|
|
491
|
+
return [
|
|
492
|
+
openaiGpt5ReasonTool, // GPT-5-mini reasoning
|
|
493
|
+
openaiCompareTool, // GPT-5-mini comparison
|
|
494
|
+
openAIBrainstormTool, // GPT-5-mini/GPT-5 brainstorming
|
|
495
|
+
openaiCodeReviewTool, // GPT-5-mini code review
|
|
496
|
+
openaiExplainTool // GPT-5-mini explanations
|
|
497
|
+
];
|
|
498
|
+
}
|