tachibot-mcp 2.0.2 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/dist/src/config/model-constants.js +55 -28
  3. package/dist/src/config/model-defaults.js +14 -14
  4. package/dist/src/tools/openai-tools.js +210 -118
  5. package/dist/src/tools/unified-ai-provider.js +11 -12
  6. package/dist/src/workflows/tool-mapper.js +20 -24
  7. package/package.json +1 -1
  8. package/tools.config.json +1 -1
  9. package/workflows/core/iterative-problem-solver.yaml +2 -2
  10. package/workflows/system/scout.yaml +1 -1
  11. package/workflows/ultra-creative-brainstorm.yaml +2 -2
  12. package/dist/personality/komaai-expressions.js +0 -12
  13. package/dist/profiles/balanced.json +0 -33
  14. package/dist/profiles/code_focus.json +0 -33
  15. package/dist/profiles/full.json +0 -33
  16. package/dist/profiles/minimal.json +0 -33
  17. package/dist/profiles/research_power.json +0 -33
  18. package/dist/src/application/services/focus/ModeRegistry.js +0 -46
  19. package/dist/src/application/services/focus/modes/status.mode.js +0 -50
  20. package/dist/src/profiles/debug_intensive.js +0 -59
  21. package/dist/src/profiles/research_code.js +0 -59
  22. package/dist/src/profiles/workflow_builder.js +0 -53
  23. package/dist/src/tools/consolidated/ai-router.js +0 -174
  24. package/dist/src/tools/consolidated/ai-tool.js +0 -48
  25. package/dist/src/tools/consolidated/brainstorm-tool.js +0 -87
  26. package/dist/src/tools/consolidated/environment-detector.js +0 -80
  27. package/dist/src/tools/consolidated/index.js +0 -50
  28. package/dist/src/tools/consolidated/search-tool.js +0 -110
  29. package/dist/src/tools/consolidated/workflow-tool.js +0 -238
  30. package/dist/src/tools/pingpong-tool.js +0 -343
  31. package/dist/src/workflows/orchestrator-integration.js +0 -200
  32. package/dist/src/workflows/workflow-engine.js +0 -573
  33. package/dist/src/workflows/workflow-parser.js +0 -283
  34. package/dist/test-workflow-file-output.js +0 -93
package/CHANGELOG.md CHANGED
@@ -5,6 +5,34 @@ All notable changes to TachiBot MCP will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [2.0.3] - 2025-11-18
9
+
10
+ ### Changed
11
+ - Updated OpenAI models from GPT-5 to GPT-5.1 series
12
+ - Updated default model to `gpt-5.1-codex-mini` for better code generation
13
+ - Added GPT-5.1 reasoning effort levels (none, low, medium, high)
14
+ - Changed default active profile from `research_power` to `full`
15
+ - Improved model configuration in workflows and tool mapper
16
+ - Updated model constants and defaults throughout codebase
17
+
18
+ ### Fixed
19
+ - Model references in iterative-problem-solver workflow
20
+ - Model references in scout workflow
21
+ - Model references in ultra-creative-brainstorm workflow
22
+
23
+ ## [2.0.2] - 2025-11-15
24
+
25
+ ### Fixed
26
+ - OpenAI GPT-5.1 API integration
27
+ - License correction from MIT to AGPL-3.0 in package.json
28
+
29
+ ## [2.0.1] - 2025-11-10
30
+
31
+ ### Changed
32
+ - Updated README with comprehensive API key documentation
33
+ - Added missing GEMINI_API_KEY and OPENROUTER_API_KEY to installation examples
34
+ - Improved documentation clarity
35
+
8
36
  ## [2.0.0] - 2025-10-15
9
37
 
10
38
  ### Added
@@ -2,16 +2,23 @@
2
2
  * Centralized Model Names and Constants
3
3
  * Use these constants instead of hardcoded strings in workflows and tools
4
4
  */
5
- // OpenAI GPT-5 Models (August 2025)
6
- export const GPT5_MODELS = {
7
- NANO: "gpt-5-nano", // Fastest, most cost-efficient ($0.05/$0.40 per 1M tokens)
8
- MINI: "gpt-5-mini", // Balanced performance ($0.25/$2 per 1M tokens)
9
- FULL: "gpt-5", // Most advanced ($1.25/$10 per 1M tokens)
5
+ // OpenAI GPT-5.1 Models (November 2025)
6
+ export const GPT51_MODELS = {
7
+ FULL: "gpt-5.1", // Full reasoning model ($1.25/$10 per 1M tokens)
8
+ CODEX_MINI: "gpt-5.1-codex-mini", // Coding optimized, cost-efficient ($0.25/$2 per 1M tokens) - DEFAULT
9
+ CODEX: "gpt-5.1-codex", // Advanced coding ($1.25/$10 per 1M tokens)
10
10
  };
11
- // OpenAI GPT-4 Models
11
+ // GPT-5.1 Reasoning Effort Levels
12
+ export const GPT51_REASONING = {
13
+ NONE: "none", // No extra reasoning (fastest, cheapest)
14
+ LOW: "low", // Light reasoning
15
+ MEDIUM: "medium", // Balanced reasoning (default)
16
+ HIGH: "high", // Maximum reasoning (slowest, most thorough)
17
+ };
18
+ // OpenAI GPT-4 Models (Legacy - mapped to GPT-5.1)
12
19
  export const GPT4_MODELS = {
13
- O_MINI: "gpt-5-mini", // Cost-efficient
14
- O: "gpt-5", // Current best
20
+ O_MINI: "gpt-5.1-codex-mini", // Cost-efficient
21
+ O: "gpt-5.1", // Current best
15
22
  _1_MINI: "gpt-4.1-mini", // Best value with 1M context
16
23
  };
17
24
  // Google Gemini Models (2025)
@@ -41,7 +48,7 @@ export const KIMI_MODELS = {
41
48
  };
42
49
  // All models combined for validation
43
50
  export const ALL_MODELS = {
44
- ...GPT5_MODELS,
51
+ ...GPT51_MODELS,
45
52
  ...GPT4_MODELS,
46
53
  ...GEMINI_MODELS,
47
54
  ...PERPLEXITY_MODELS,
@@ -57,21 +64,36 @@ export const DEFAULT_WORKFLOW_SETTINGS = {
57
64
  };
58
65
  // Tool-specific defaults for ALL tools
59
66
  export const TOOL_DEFAULTS = {
60
- // OpenAI GPT-5 tools
61
- gpt5_nano: {
62
- model: GPT5_MODELS.NANO,
63
- maxTokens: 1000,
64
- temperature: 1.0, // GPT-5 requires temperature=1
65
- },
66
- gpt5_mini: {
67
- model: GPT5_MODELS.MINI,
67
+ // OpenAI GPT-5.1 tools
68
+ openai_gpt5_reason: {
69
+ model: GPT51_MODELS.FULL,
70
+ reasoning_effort: GPT51_REASONING.HIGH,
71
+ maxTokens: 4000,
72
+ temperature: 0.7,
73
+ },
74
+ openai_brainstorm: {
75
+ model: GPT51_MODELS.CODEX_MINI,
76
+ reasoning_effort: GPT51_REASONING.MEDIUM,
68
77
  maxTokens: 2000,
69
- temperature: 1.0,
78
+ temperature: 0.9,
70
79
  },
71
- gpt5: {
72
- model: GPT5_MODELS.FULL,
73
- maxTokens: 4000,
74
- temperature: 1.0,
80
+ openai_compare: {
81
+ model: GPT51_MODELS.CODEX_MINI,
82
+ reasoning_effort: GPT51_REASONING.LOW,
83
+ maxTokens: 2000,
84
+ temperature: 0.7,
85
+ },
86
+ openai_code_review: {
87
+ model: GPT51_MODELS.CODEX_MINI,
88
+ reasoning_effort: GPT51_REASONING.MEDIUM,
89
+ maxTokens: 2000,
90
+ temperature: 0.3,
91
+ },
92
+ openai_explain: {
93
+ model: GPT51_MODELS.CODEX_MINI,
94
+ reasoning_effort: GPT51_REASONING.LOW,
95
+ maxTokens: 1500,
96
+ temperature: 0.7,
75
97
  },
76
98
  // Gemini tools
77
99
  gemini_query: {
@@ -159,30 +181,35 @@ export const TOOL_DEFAULTS = {
159
181
  },
160
182
  // Meta tools (think, focus, code_reviewer, etc.)
161
183
  think: {
162
- model: GPT5_MODELS.MINI,
184
+ model: GPT51_MODELS.FULL,
185
+ reasoning_effort: GPT51_REASONING.HIGH,
163
186
  maxTokens: 500,
164
187
  temperature: 0.7,
165
188
  },
166
189
  focus: {
167
- model: GPT5_MODELS.MINI,
190
+ model: GPT51_MODELS.CODEX_MINI,
191
+ reasoning_effort: GPT51_REASONING.LOW,
168
192
  maxTokens: 2000,
169
193
  temperature: 0.8,
170
194
  },
171
195
  code_reviewer: {
172
- model: GPT5_MODELS.MINI,
196
+ model: GPT51_MODELS.CODEX_MINI,
197
+ reasoning_effort: GPT51_REASONING.MEDIUM,
173
198
  maxTokens: 2000,
174
199
  temperature: 0.5,
175
200
  },
176
201
  test_architect: {
177
- model: GPT5_MODELS.MINI,
202
+ model: GPT51_MODELS.CODEX_MINI,
203
+ reasoning_effort: GPT51_REASONING.MEDIUM,
178
204
  maxTokens: 2000,
179
205
  temperature: 0.6,
180
206
  },
181
207
  documentation_writer: {
182
- model: GPT5_MODELS.MINI,
208
+ model: GPT51_MODELS.CODEX_MINI,
209
+ reasoning_effort: GPT51_REASONING.LOW,
183
210
  maxTokens: 2000,
184
211
  temperature: 0.7,
185
212
  },
186
213
  };
187
214
  // Default tool to use in workflows if not specified
188
- export const DEFAULT_WORKFLOW_TOOL = "gpt5_mini";
215
+ export const DEFAULT_WORKFLOW_TOOL = "openai_brainstorm";
@@ -10,48 +10,48 @@
10
10
  * Get Scout model configuration
11
11
  *
12
12
  * Defaults:
13
- * - quick_scout: Flash + gpt-5-mini (speed + cost efficient)
14
- * - research_scout: Pro + gpt-5-mini (quality + cost balance)
13
+ * - quick_scout: Flash + gpt-5.1-codex-mini (speed + cost efficient)
14
+ * - research_scout: Pro + gpt-5.1-codex-mini (quality + cost balance)
15
15
  */
16
16
  export function getScoutModels() {
17
17
  const quick = process.env.SCOUT_QUICK_MODELS?.split(',').map(m => m.trim()) ||
18
- ['qwen/qwen3-coder-plus', 'gemini-2.5-flash', 'gpt-5-mini'];
18
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-flash', 'gpt-5.1-codex-mini'];
19
19
  const research = process.env.SCOUT_RESEARCH_MODELS?.split(',').map(m => m.trim()) ||
20
- ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5-mini'];
20
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5.1-codex-mini'];
21
21
  return { quick, research };
22
22
  }
23
23
  /**
24
24
  * Get Challenger model configuration
25
25
  *
26
- * Defaults: Pro + gpt-5-mini (quality for critical analysis, cost efficient)
26
+ * Defaults: Pro + gpt-5.1-codex-mini (quality for critical analysis, cost efficient)
27
27
  */
28
28
  export function getChallengerModels() {
29
29
  return process.env.CHALLENGER_MODELS?.split(',').map(m => m.trim()) ||
30
- ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5-mini'];
30
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5.1-codex-mini'];
31
31
  }
32
32
  /**
33
33
  * Get Verifier model configuration
34
34
  *
35
35
  * Defaults:
36
- * - quick_verify: Flash + gpt-5-mini (fast checks, cost efficient)
37
- * - standard modes: Pro + gpt-5-mini (quality + cost balance)
38
- * - deep_verify: Pro + gpt-5 (maximum quality for critical verification)
36
+ * - quick_verify: Flash + gpt-5.1-codex-mini (fast checks, cost efficient)
37
+ * - standard modes: Pro + gpt-5.1-codex-mini (quality + cost balance)
38
+ * - deep_verify: Pro + gpt-5.1 (maximum quality for critical verification)
39
39
  */
40
40
  export function getVerifierModels() {
41
41
  const quick = process.env.VERIFIER_QUICK_MODELS?.split(',').map(m => m.trim()) ||
42
- ['qwen/qwen3-coder-plus', 'gemini-2.5-flash', 'gpt-5-mini'];
42
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-flash', 'gpt-5.1-codex-mini'];
43
43
  const deep = process.env.VERIFIER_DEEP_MODELS?.split(',').map(m => m.trim()) ||
44
- ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5'];
44
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5.1'];
45
45
  const standard = process.env.VERIFIER_STANDARD_MODELS?.split(',').map(m => m.trim()) ||
46
- ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5-mini'];
46
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5.1-codex-mini'];
47
47
  return { quick, deep, standard };
48
48
  }
49
49
  /**
50
50
  * Get default fallback models (used when variant has no specific config)
51
51
  *
52
- * Default: Pro + gpt-5-mini (balanced quality and cost)
52
+ * Default: Pro + gpt-5.1-codex-mini (balanced quality and cost)
53
53
  */
54
54
  export function getDefaultModels() {
55
55
  return process.env.DEFAULT_MODELS?.split(',').map(m => m.trim()) ||
56
- ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5-mini'];
56
+ ['qwen/qwen3-coder-plus', 'gemini-2.5-pro', 'gpt-5.1-codex-mini'];
57
57
  }