@hailer/mcp 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (163) hide show
  1. package/.claude/commands/tool-builder.md +37 -0
  2. package/.claude/commands/ws-pull.md +44 -0
  3. package/.claude/settings.json +8 -0
  4. package/.claude/settings.local.json +49 -0
  5. package/.claude/skills/activity-api/SKILL.md +96 -0
  6. package/.claude/skills/activity-api/references/activity-endpoints.md +845 -0
  7. package/.claude/skills/add-app-member-skill/SKILL.md +977 -0
  8. package/.claude/skills/agent-building/SKILL.md +243 -0
  9. package/.claude/skills/agent-building/references/architecture-patterns.md +446 -0
  10. package/.claude/skills/agent-building/references/code-examples.md +587 -0
  11. package/.claude/skills/agent-building/references/implementation-guide.md +619 -0
  12. package/.claude/skills/app-api/SKILL.md +219 -0
  13. package/.claude/skills/app-api/references/app-endpoints.md +759 -0
  14. package/.claude/skills/building-hailer-apps-skill/SKILL.md +548 -0
  15. package/.claude/skills/create-app-skill/SKILL.md +1101 -0
  16. package/.claude/skills/create-insight-skill/SKILL.md +1317 -0
  17. package/.claude/skills/get-insight-data-skill/SKILL.md +1053 -0
  18. package/.claude/skills/hailer-api/SKILL.md +283 -0
  19. package/.claude/skills/hailer-api/references/activities.md +620 -0
  20. package/.claude/skills/hailer-api/references/authentication.md +216 -0
  21. package/.claude/skills/hailer-api/references/datasets.md +437 -0
  22. package/.claude/skills/hailer-api/references/files.md +301 -0
  23. package/.claude/skills/hailer-api/references/insights.md +469 -0
  24. package/.claude/skills/hailer-api/references/workflows.md +720 -0
  25. package/.claude/skills/hailer-api/references/workspaces-users.md +445 -0
  26. package/.claude/skills/insight-api/SKILL.md +185 -0
  27. package/.claude/skills/insight-api/references/insight-endpoints.md +514 -0
  28. package/.claude/skills/install-workflow-skill/SKILL.md +1056 -0
  29. package/.claude/skills/list-apps-skill/SKILL.md +1010 -0
  30. package/.claude/skills/list-workflows-minimal-skill/SKILL.md +992 -0
  31. package/.claude/skills/local-first-skill/SKILL.md +570 -0
  32. package/.claude/skills/mcp-tools/SKILL.md +419 -0
  33. package/.claude/skills/mcp-tools/references/api-endpoints.md +499 -0
  34. package/.claude/skills/mcp-tools/references/data-structures.md +554 -0
  35. package/.claude/skills/mcp-tools/references/implementation-patterns.md +717 -0
  36. package/.claude/skills/preview-insight-skill/SKILL.md +1290 -0
  37. package/.claude/skills/publish-hailer-app-skill/SKILL.md +453 -0
  38. package/.claude/skills/remove-app-member-skill/SKILL.md +671 -0
  39. package/.claude/skills/remove-app-skill/SKILL.md +985 -0
  40. package/.claude/skills/remove-insight-skill/SKILL.md +1011 -0
  41. package/.claude/skills/remove-workflow-skill/SKILL.md +920 -0
  42. package/.claude/skills/scaffold-hailer-app-skill/SKILL.md +1034 -0
  43. package/.claude/skills/skill-testing/README.md +137 -0
  44. package/.claude/skills/skill-testing/SKILL.md +348 -0
  45. package/.claude/skills/skill-testing/references/test-patterns.md +705 -0
  46. package/.claude/skills/skill-testing/references/testing-guide.md +603 -0
  47. package/.claude/skills/skill-testing/references/validation-checklist.md +537 -0
  48. package/.claude/skills/tool-builder/SKILL.md +328 -0
  49. package/.claude/skills/update-app-skill/SKILL.md +970 -0
  50. package/.claude/skills/update-workflow-field-skill/SKILL.md +1098 -0
  51. package/.env.example +81 -0
  52. package/.mcp.json +13 -0
  53. package/README.md +297 -0
  54. package/dist/app.d.ts +4 -0
  55. package/dist/app.js +74 -0
  56. package/dist/cli.d.ts +3 -0
  57. package/dist/cli.js +5 -0
  58. package/dist/client/adaptive-documentation-bot.d.ts +108 -0
  59. package/dist/client/adaptive-documentation-bot.js +475 -0
  60. package/dist/client/adaptive-documentation-types.d.ts +66 -0
  61. package/dist/client/adaptive-documentation-types.js +9 -0
  62. package/dist/client/agent-activity-bot.d.ts +51 -0
  63. package/dist/client/agent-activity-bot.js +166 -0
  64. package/dist/client/agent-tracker.d.ts +499 -0
  65. package/dist/client/agent-tracker.js +659 -0
  66. package/dist/client/description-updater.d.ts +56 -0
  67. package/dist/client/description-updater.js +259 -0
  68. package/dist/client/log-parser.d.ts +72 -0
  69. package/dist/client/log-parser.js +387 -0
  70. package/dist/client/mcp-client.d.ts +50 -0
  71. package/dist/client/mcp-client.js +532 -0
  72. package/dist/client/message-processor.d.ts +35 -0
  73. package/dist/client/message-processor.js +352 -0
  74. package/dist/client/multi-bot-manager.d.ts +24 -0
  75. package/dist/client/multi-bot-manager.js +74 -0
  76. package/dist/client/providers/anthropic-provider.d.ts +19 -0
  77. package/dist/client/providers/anthropic-provider.js +631 -0
  78. package/dist/client/providers/llm-provider.d.ts +47 -0
  79. package/dist/client/providers/llm-provider.js +367 -0
  80. package/dist/client/providers/openai-provider.d.ts +23 -0
  81. package/dist/client/providers/openai-provider.js +621 -0
  82. package/dist/client/simple-llm-caller.d.ts +19 -0
  83. package/dist/client/simple-llm-caller.js +100 -0
  84. package/dist/client/skill-generator.d.ts +81 -0
  85. package/dist/client/skill-generator.js +386 -0
  86. package/dist/client/test-adaptive-bot.d.ts +9 -0
  87. package/dist/client/test-adaptive-bot.js +82 -0
  88. package/dist/client/token-pricing.d.ts +38 -0
  89. package/dist/client/token-pricing.js +127 -0
  90. package/dist/client/token-tracker.d.ts +232 -0
  91. package/dist/client/token-tracker.js +457 -0
  92. package/dist/client/token-usage-bot.d.ts +53 -0
  93. package/dist/client/token-usage-bot.js +153 -0
  94. package/dist/client/tool-executor.d.ts +69 -0
  95. package/dist/client/tool-executor.js +159 -0
  96. package/dist/client/tool-schema-loader.d.ts +60 -0
  97. package/dist/client/tool-schema-loader.js +178 -0
  98. package/dist/client/types.d.ts +69 -0
  99. package/dist/client/types.js +7 -0
  100. package/dist/config.d.ts +162 -0
  101. package/dist/config.js +296 -0
  102. package/dist/core.d.ts +26 -0
  103. package/dist/core.js +147 -0
  104. package/dist/lib/context-manager.d.ts +111 -0
  105. package/dist/lib/context-manager.js +431 -0
  106. package/dist/lib/logger.d.ts +74 -0
  107. package/dist/lib/logger.js +277 -0
  108. package/dist/lib/materialize.d.ts +3 -0
  109. package/dist/lib/materialize.js +101 -0
  110. package/dist/lib/normalizedName.d.ts +7 -0
  111. package/dist/lib/normalizedName.js +48 -0
  112. package/dist/lib/prompt-length-manager.d.ts +81 -0
  113. package/dist/lib/prompt-length-manager.js +457 -0
  114. package/dist/lib/terminal-prompt.d.ts +9 -0
  115. package/dist/lib/terminal-prompt.js +108 -0
  116. package/dist/mcp/UserContextCache.d.ts +56 -0
  117. package/dist/mcp/UserContextCache.js +163 -0
  118. package/dist/mcp/auth.d.ts +2 -0
  119. package/dist/mcp/auth.js +29 -0
  120. package/dist/mcp/hailer-clients.d.ts +42 -0
  121. package/dist/mcp/hailer-clients.js +246 -0
  122. package/dist/mcp/signal-handler.d.ts +45 -0
  123. package/dist/mcp/signal-handler.js +317 -0
  124. package/dist/mcp/tool-registry.d.ts +100 -0
  125. package/dist/mcp/tool-registry.js +306 -0
  126. package/dist/mcp/tools/activity.d.ts +15 -0
  127. package/dist/mcp/tools/activity.js +955 -0
  128. package/dist/mcp/tools/app.d.ts +20 -0
  129. package/dist/mcp/tools/app.js +1488 -0
  130. package/dist/mcp/tools/discussion.d.ts +19 -0
  131. package/dist/mcp/tools/discussion.js +950 -0
  132. package/dist/mcp/tools/file.d.ts +15 -0
  133. package/dist/mcp/tools/file.js +119 -0
  134. package/dist/mcp/tools/insight.d.ts +17 -0
  135. package/dist/mcp/tools/insight.js +806 -0
  136. package/dist/mcp/tools/skill.d.ts +10 -0
  137. package/dist/mcp/tools/skill.js +279 -0
  138. package/dist/mcp/tools/user.d.ts +10 -0
  139. package/dist/mcp/tools/user.js +108 -0
  140. package/dist/mcp/tools/workflow-template.d.ts +19 -0
  141. package/dist/mcp/tools/workflow-template.js +822 -0
  142. package/dist/mcp/tools/workflow.d.ts +18 -0
  143. package/dist/mcp/tools/workflow.js +1362 -0
  144. package/dist/mcp/utils/api-errors.d.ts +45 -0
  145. package/dist/mcp/utils/api-errors.js +160 -0
  146. package/dist/mcp/utils/data-transformers.d.ts +102 -0
  147. package/dist/mcp/utils/data-transformers.js +194 -0
  148. package/dist/mcp/utils/file-upload.d.ts +33 -0
  149. package/dist/mcp/utils/file-upload.js +148 -0
  150. package/dist/mcp/utils/hailer-api-client.d.ts +120 -0
  151. package/dist/mcp/utils/hailer-api-client.js +323 -0
  152. package/dist/mcp/utils/index.d.ts +13 -0
  153. package/dist/mcp/utils/index.js +39 -0
  154. package/dist/mcp/utils/logger.d.ts +42 -0
  155. package/dist/mcp/utils/logger.js +103 -0
  156. package/dist/mcp/utils/types.d.ts +286 -0
  157. package/dist/mcp/utils/types.js +7 -0
  158. package/dist/mcp/workspace-cache.d.ts +42 -0
  159. package/dist/mcp/workspace-cache.js +97 -0
  160. package/dist/mcp-server.d.ts +42 -0
  161. package/dist/mcp-server.js +280 -0
  162. package/package.json +56 -0
  163. package/tsconfig.json +23 -0
@@ -0,0 +1,621 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.OpenAIProvider = void 0;
7
+ const openai_1 = __importDefault(require("openai"));
8
+ const llm_provider_1 = require("./llm-provider");
9
+ const context_manager_1 = require("../../lib/context-manager");
10
+ const token_pricing_1 = require("../token-pricing");
11
+ const tool_schema_loader_1 = require("../tool-schema-loader");
12
+ const tool_executor_1 = require("../tool-executor");
13
+ const tool_registry_1 = require("../../mcp/tool-registry");
14
+ class OpenAIProvider extends llm_provider_1.LlmProvider {
15
+ client;
16
+ contextManager;
17
+ toolSchemaLoader = new tool_schema_loader_1.ToolSchemaLoader();
18
+ toolExecutor = new tool_executor_1.ToolExecutor();
19
+ constructor(config) {
20
+ super(config);
21
+ this.client = new openai_1.default({
22
+ apiKey: config.apiKey,
23
+ });
24
+ this.contextManager = (0, context_manager_1.getContextManager)({
25
+ openaiApiKey: config.apiKey,
26
+ safetyMarginPercent: 25,
27
+ enableAutoSummarization: true,
28
+ maxSummarizationChunks: 10,
29
+ });
30
+ }
31
+ async generateConfirmationMessage(userMessage) {
32
+ if (!this.isEnabled()) {
33
+ return `🤖 Processing your request with ${this.name}...`;
34
+ }
35
+ try {
36
+ const confirmationPrompt = this.getConfirmationPrompt(userMessage);
37
+ const response = await this.client.chat.completions.create({
38
+ model: this.config.model || "gpt-4o",
39
+ max_tokens: 100,
40
+ temperature: 0.7,
41
+ messages: [
42
+ {
43
+ role: "user",
44
+ content: confirmationPrompt,
45
+ },
46
+ ],
47
+ });
48
+ const confirmationText = response.choices[0]?.message?.content?.trim() ||
49
+ `🤖 Processing your request with ${this.name}...`;
50
+ return confirmationText;
51
+ }
52
+ catch (error) {
53
+ this.logError(error, "generateConfirmationMessage");
54
+ return `🤖 Processing your request with ${this.name}...`;
55
+ }
56
+ }
57
+ async processMessage(userMessage, mcpServerUrl, botMcpApiKey, botEmail) {
58
+ if (!this.isEnabled()) {
59
+ this.logger.error("OpenAI provider is not enabled or missing API key");
60
+ return {
61
+ success: false,
62
+ error: "OpenAI provider is not enabled or missing API key",
63
+ };
64
+ }
65
+ try {
66
+ const systemPrompt = await this.createSystemPrompt({
67
+ userMessage,
68
+ mcpServerUrl,
69
+ mcpServerApiKey: botMcpApiKey,
70
+ botEmail,
71
+ });
72
+ const startTime = Date.now();
73
+ // Load tool index with automatic filtering
74
+ // Chat bot only gets READ + WRITE tools (no PLAYGROUND tools)
75
+ const allowedGroups = [tool_registry_1.ToolGroup.READ, tool_registry_1.ToolGroup.WRITE];
76
+ const toolIndex = await this.toolSchemaLoader.loadToolIndex({
77
+ mcpServerUrl,
78
+ mcpServerApiKey: botMcpApiKey,
79
+ allowedGroups
80
+ });
81
+ if (!toolIndex || toolIndex.length === 0) {
82
+ this.logger.warn("No MCP tools available");
83
+ return await this.handleDirectResponse(this.removeMentions(userMessage), systemPrompt);
84
+ }
85
+ // Convert to OpenAI tool format - minimal stubs initially (on-demand schema loading)
86
+ const minimalTools = this.toolSchemaLoader.toMinimalToolDefinitions(toolIndex);
87
+ // Track which tools have been loaded with full schemas
88
+ const loadedToolSchemas = new Map();
89
+ this.logger.info("Connected to MCP server (on-demand mode)", {
90
+ toolsForChatBot: toolIndex.length,
91
+ allowedGroups: allowedGroups.join(', '),
92
+ initialTokensSaved: "~28,000 tokens (95% reduction)"
93
+ });
94
+ // Initial prompt is simple and unlikely to need truncation for OpenAI
95
+ const finalSystemPrompt = systemPrompt;
96
+ const finalUserContent = this.removeMentions(userMessage);
97
+ // Check token count before initial API call (with minimal tools)
98
+ const initialMessages = [
99
+ { role: "system", content: finalSystemPrompt },
100
+ { role: "user", content: finalUserContent }
101
+ ];
102
+ // Convert minimal tools to OpenAI format
103
+ const openaiTools = await this.convertToolIndexToOpenAIFormat(mcpServerUrl, botMcpApiKey, minimalTools);
104
+ const initialTokenCount = this.contextManager.countTokens(finalSystemPrompt, initialMessages, openaiTools, 'openai');
105
+ this.logger.info("Initial prompt token count", {
106
+ totalTokens: initialTokenCount.totalTokens,
107
+ systemPromptTokens: initialTokenCount.systemPromptTokens,
108
+ messagesTokens: initialTokenCount.messagesTokens,
109
+ toolsTokens: initialTokenCount.toolsTokens,
110
+ safeLimit: initialTokenCount.limit.safeTokens,
111
+ exceedsLimit: initialTokenCount.exceedsLimit,
112
+ });
113
+ if (initialTokenCount.exceedsLimit) {
114
+ this.logger.error("Initial prompt exceeds token limit", {
115
+ totalTokens: initialTokenCount.totalTokens,
116
+ safeLimit: initialTokenCount.limit.safeTokens,
117
+ });
118
+ return {
119
+ success: false,
120
+ error: `Initial prompt too long: ${initialTokenCount.totalTokens} tokens exceeds safe limit of ${initialTokenCount.limit.safeTokens} tokens`,
121
+ };
122
+ }
123
+ // Initial request to OpenAI with tools
124
+ let response = await this.client.chat.completions.create({
125
+ model: this.config.model || "gpt-4o",
126
+ max_tokens: this.config.maxTokens || 2000,
127
+ temperature: this.config.temperature || 0.7,
128
+ messages: [
129
+ {
130
+ role: "system",
131
+ content: finalSystemPrompt,
132
+ },
133
+ {
134
+ role: "user",
135
+ content: finalUserContent,
136
+ },
137
+ ],
138
+ tools: openaiTools,
139
+ tool_choice: "auto",
140
+ });
141
+ const toolCalls = [];
142
+ // No conversation history - process each message independently
143
+ const conversationMessages = [
144
+ { role: "system", content: finalSystemPrompt },
145
+ { role: "user", content: finalUserContent },
146
+ ];
147
+ // Track token usage across all API calls
148
+ let totalInputTokens = 0;
149
+ let totalOutputTokens = 0;
150
+ // Accumulate tokens from initial response
151
+ if (response.usage) {
152
+ totalInputTokens += response.usage.prompt_tokens || 0;
153
+ totalOutputTokens += response.usage.completion_tokens || 0;
154
+ }
155
+ // Handle tool use requests
156
+ let maxIterations = 25; // Prevent infinite loops
157
+ while (maxIterations > 0 && response.choices[0]?.message?.tool_calls) {
158
+ maxIterations--;
159
+ if (maxIterations === 0) {
160
+ this.logger.warn("Max iteration limit reached, generating final response with current tool results", {
161
+ botId: userMessage.mentionedOrDirectMessagedBotId,
162
+ totalIterations: 25,
163
+ });
164
+ break; // Exit the loop but continue to generate response
165
+ }
166
+ const assistantMessage = response.choices[0].message;
167
+ const toolCalls_list = assistantMessage.tool_calls;
168
+ if (!toolCalls_list)
169
+ break;
170
+ // Add assistant's response to conversation
171
+ conversationMessages.push({
172
+ role: "assistant",
173
+ content: assistantMessage.content || null,
174
+ tool_calls: toolCalls_list,
175
+ });
176
+ // Build optimized tool set: full schemas for loaded tools, minimal stubs for others
177
+ const optimizedTools = this.buildOptimizedTools(minimalTools, loadedToolSchemas);
178
+ const toolResults = [];
179
+ const schemasJustLoaded = [];
180
+ // First pass: Check if any tools need schema loading
181
+ for (const toolCall of toolCalls_list) {
182
+ const funcCall = toolCall.function;
183
+ const toolName = funcCall?.name;
184
+ if (!loadedToolSchemas.has(toolName)) {
185
+ this.logger.info("Tool schema needed - loading before execution", { toolName });
186
+ const fullSchema = await this.toolSchemaLoader.loadToolSchema(mcpServerUrl, botMcpApiKey, toolName);
187
+ loadedToolSchemas.set(toolName, fullSchema);
188
+ schemasJustLoaded.push(toolName);
189
+ this.logger.debug("Tool schema loaded and will be sent to GPT", {
190
+ toolName,
191
+ totalLoaded: loadedToolSchemas.size
192
+ });
193
+ }
194
+ }
195
+ // If we just loaded schemas, don't execute yet - let GPT see them first
196
+ if (schemasJustLoaded.length > 0) {
197
+ this.logger.info("Loaded new tool schemas - giving GPT another iteration to see parameters", {
198
+ schemasLoaded: schemasJustLoaded,
199
+ tokensAdded: schemasJustLoaded.length * 200
200
+ });
201
+ // IMPORTANT: OpenAI expects tool results for every tool call
202
+ // Add stub results telling GPT to retry with full schemas
203
+ const stubResults = toolCalls_list.map(toolCall => ({
204
+ role: "tool",
205
+ tool_call_id: toolCall.id,
206
+ content: `Schema loaded. Full parameter details are now available. Please retry this tool call with the correct parameters based on the schema.`
207
+ }));
208
+ conversationMessages.push(...stubResults);
209
+ // Continue to next iteration - GPT will now see full schemas in optimizedTools
210
+ continue;
211
+ }
212
+ // Second pass: Execute tools (schemas are already loaded)
213
+ for (const toolCall of toolCalls_list) {
214
+ const toolCallStart = Date.now();
215
+ try {
216
+ const funcCall = toolCall.function;
217
+ this.logToolCall(funcCall?.name, JSON.parse(funcCall?.arguments || "{}"));
218
+ const args = JSON.parse(funcCall?.arguments || "{}");
219
+ const result = await this.callMcpTool(mcpServerUrl, botMcpApiKey, {
220
+ name: funcCall?.name,
221
+ arguments: args,
222
+ });
223
+ const toolCallDuration = Date.now() - toolCallStart;
224
+ toolCalls.push({
225
+ toolName: funcCall?.name,
226
+ args: args,
227
+ result: result?.content,
228
+ duration: toolCallDuration,
229
+ });
230
+ // Note: Individual tool truncation removed - now handled at prompt level
231
+ const resultContent = JSON.stringify(result?.content || {});
232
+ toolResults.push({
233
+ role: "tool",
234
+ tool_call_id: toolCall.id,
235
+ content: resultContent,
236
+ });
237
+ this.logToolCall(funcCall?.name, args, toolCallDuration);
238
+ // Log detailed tool call to activity logger
239
+ this.agentTracker.logToolCall({
240
+ toolName: funcCall?.name,
241
+ provider: "OpenAI",
242
+ request: {
243
+ arguments: args,
244
+ endpoint: mcpServerUrl,
245
+ method: "POST",
246
+ },
247
+ response: {
248
+ success: true,
249
+ data: result?.content,
250
+ },
251
+ duration: toolCallDuration,
252
+ botId: userMessage.mentionedOrDirectMessagedBotId,
253
+ discussionId: userMessage.discussionId,
254
+ workspaceId: userMessage.workspaceId,
255
+ });
256
+ }
257
+ catch (error) {
258
+ const funcCall = toolCall.function;
259
+ const toolCallDuration = Date.now() - toolCallStart;
260
+ this.logger.toolError(funcCall?.name, this.name, error, toolCallDuration);
261
+ toolCalls.push({
262
+ toolName: funcCall?.name,
263
+ args: JSON.parse(funcCall?.arguments || "{}"),
264
+ error: error instanceof Error ? error.message : String(error),
265
+ });
266
+ toolResults.push({
267
+ role: "tool",
268
+ tool_call_id: toolCall.id,
269
+ content: `Error: ${error instanceof Error ? error.message : String(error)}`,
270
+ });
271
+ // Log failed tool call to activity logger
272
+ this.agentTracker.logToolCall({
273
+ toolName: funcCall?.name,
274
+ provider: "OpenAI",
275
+ request: {
276
+ arguments: JSON.parse(funcCall?.arguments || "{}"),
277
+ endpoint: mcpServerUrl,
278
+ method: "POST",
279
+ },
280
+ response: {
281
+ success: false,
282
+ error: error instanceof Error ? error.message : String(error),
283
+ },
284
+ duration: toolCallDuration,
285
+ botId: userMessage.mentionedOrDirectMessagedBotId,
286
+ discussionId: userMessage.discussionId,
287
+ workspaceId: userMessage.workspaceId,
288
+ });
289
+ }
290
+ }
291
+ // Add tool results to conversation
292
+ if (toolResults.length > 0) {
293
+ conversationMessages.push(...toolResults);
294
+ // Check token count before API call with tool results
295
+ const tokenCount = this.contextManager.countTokens(finalSystemPrompt, conversationMessages, optimizedTools, 'openai');
296
+ this.logger.info("Token count after tool results", {
297
+ totalTokens: tokenCount.totalTokens,
298
+ messagesTokens: tokenCount.messagesTokens,
299
+ safeLimit: tokenCount.limit.safeTokens,
300
+ exceedsLimit: tokenCount.exceedsLimit,
301
+ messageCount: conversationMessages.length,
302
+ });
303
+ // Check if approaching limit (80%)
304
+ if (this.contextManager.approachingLimit(tokenCount.totalTokens, 'openai')) {
305
+ this.logger.warn("Approaching token limit", {
306
+ totalTokens: tokenCount.totalTokens,
307
+ safeLimit: tokenCount.limit.safeTokens,
308
+ percentUsed: Math.round((tokenCount.totalTokens / tokenCount.limit.safeTokens) * 100),
309
+ });
310
+ }
311
+ // If exceeds limit, summarize context
312
+ let messagesToSend = conversationMessages;
313
+ if (tokenCount.exceedsLimit) {
314
+ this.logger.warn("Context exceeds token limit, starting summarization", {
315
+ totalTokens: tokenCount.totalTokens,
316
+ safeLimit: tokenCount.limit.safeTokens,
317
+ messageCount: conversationMessages.length,
318
+ });
319
+ try {
320
+ // Extract system prompt from messages for summarization
321
+ const messagesWithoutSystem = conversationMessages.filter(m => m.role !== 'system');
322
+ const systemMsg = conversationMessages.find(m => m.role === 'system');
323
+ const summarizationResult = await this.contextManager.summarizeContext(systemMsg?.content || finalSystemPrompt, messagesWithoutSystem, 'openai', finalUserContent);
324
+ this.logger.info("Context summarization successful", {
325
+ originalTokens: summarizationResult.originalTokens,
326
+ summarizedTokens: summarizationResult.summarizedTokens,
327
+ reductionPercent: summarizationResult.reductionPercent,
328
+ chunksProcessed: summarizationResult.chunksProcessed,
329
+ originalMessages: conversationMessages.length,
330
+ summarizedMessages: summarizationResult.summarizedMessages.length,
331
+ });
332
+ // Replace conversation messages with summarized version
333
+ // Keep system prompt, first user message, add summarized results
334
+ messagesToSend = [
335
+ { role: "system", content: finalSystemPrompt },
336
+ conversationMessages.find(m => m.role === 'user'),
337
+ ...summarizationResult.summarizedMessages,
338
+ ];
339
+ // Verify summarized context is within limits
340
+ const summarizedTokenCount = this.contextManager.countTokens(finalSystemPrompt, messagesToSend, optimizedTools, 'openai');
341
+ if (summarizedTokenCount.exceedsLimit) {
342
+ this.logger.error("Summarized context still exceeds limit", {
343
+ summarizedTokens: summarizedTokenCount.totalTokens,
344
+ safeLimit: summarizedTokenCount.limit.safeTokens,
345
+ });
346
+ return {
347
+ success: false,
348
+ error: `Context too large even after summarization: ${summarizedTokenCount.totalTokens} tokens exceeds safe limit of ${summarizedTokenCount.limit.safeTokens} tokens`,
349
+ };
350
+ }
351
+ }
352
+ catch (error) {
353
+ this.logger.error("Summarization failed", error, {
354
+ originalTokens: tokenCount.totalTokens,
355
+ safeLimit: tokenCount.limit.safeTokens,
356
+ });
357
+ return {
358
+ success: false,
359
+ error: `Context too large and summarization failed: ${error instanceof Error ? error.message : String(error)}`,
360
+ };
361
+ }
362
+ }
363
+ // Get OpenAI's response to the tool results (potentially summarized)
364
+ response = await this.client.chat.completions.create({
365
+ model: this.config.model || "gpt-4o",
366
+ max_tokens: this.config.maxTokens || 2000,
367
+ temperature: this.config.temperature || 0.7,
368
+ messages: messagesToSend,
369
+ tools: optimizedTools,
370
+ tool_choice: "auto",
371
+ });
372
+ // Accumulate tokens from this API call
373
+ if (response.usage) {
374
+ totalInputTokens += response.usage.prompt_tokens || 0;
375
+ totalOutputTokens += response.usage.completion_tokens || 0;
376
+ }
377
+ }
378
+ }
379
+ // If we hit the iteration limit, force a final response without tool calls
380
+ if (maxIterations === 0 && response.choices[0]?.message?.tool_calls) {
381
+ this.logger.info("Forcing final response without tool calls after hitting iteration limit", {
382
+ botId: userMessage.mentionedOrDirectMessagedBotId,
383
+ });
384
+ // Add final instruction
385
+ const finalMessages = [
386
+ ...conversationMessages,
387
+ { role: 'user', content: "Please provide a summary response based on the information gathered from the tools above. Do not call any more tools." }
388
+ ];
389
+ // Check token count before final API call
390
+ const finalTokenCount = this.contextManager.countTokens(finalSystemPrompt, finalMessages, [], // No tools in final call
391
+ 'openai');
392
+ this.logger.info("Final forced response token count", {
393
+ totalTokens: finalTokenCount.totalTokens,
394
+ safeLimit: finalTokenCount.limit.safeTokens,
395
+ exceedsLimit: finalTokenCount.exceedsLimit,
396
+ });
397
+ let finalMessagesToSend = finalMessages;
398
+ if (finalTokenCount.exceedsLimit) {
399
+ this.logger.warn("Final context exceeds limit, summarizing", {
400
+ totalTokens: finalTokenCount.totalTokens,
401
+ safeLimit: finalTokenCount.limit.safeTokens,
402
+ });
403
+ try {
404
+ // Extract messages without system for summarization
405
+ const messagesWithoutSystem = conversationMessages.filter(m => m.role !== 'system');
406
+ const systemMsg = conversationMessages.find(m => m.role === 'system');
407
+ const summarizationResult = await this.contextManager.summarizeContext(systemMsg?.content || finalSystemPrompt, messagesWithoutSystem, 'openai', finalUserContent);
408
+ finalMessagesToSend = [
409
+ { role: "system", content: finalSystemPrompt },
410
+ conversationMessages.find(m => m.role === 'user'),
411
+ ...summarizationResult.summarizedMessages,
412
+ { role: 'user', content: "Please provide a summary response based on the information gathered from the tools above. Do not call any more tools." }
413
+ ];
414
+ this.logger.info("Final context summarized", {
415
+ originalTokens: summarizationResult.originalTokens,
416
+ summarizedTokens: summarizationResult.summarizedTokens,
417
+ reductionPercent: summarizationResult.reductionPercent,
418
+ });
419
+ }
420
+ catch (error) {
421
+ this.logger.error("Final summarization failed", error);
422
+ // Continue with truncated messages as fallback
423
+ }
424
+ }
425
+ // Make final call without tools
426
+ response = await this.client.chat.completions.create({
427
+ model: this.config.model || "gpt-4o",
428
+ max_tokens: this.config.maxTokens || 2000,
429
+ temperature: this.config.temperature || 0.7,
430
+ messages: finalMessagesToSend,
431
+ // No tools - force text response only
432
+ });
433
+ // Accumulate tokens from final API call
434
+ if (response.usage) {
435
+ totalInputTokens += response.usage.prompt_tokens || 0;
436
+ totalOutputTokens += response.usage.completion_tokens || 0;
437
+ }
438
+ }
439
+ // No cleanup needed for HTTP-based MCP calls
440
+ const duration = Date.now() - startTime;
441
+ const responseText = response.choices[0]?.message?.content?.trim() ||
442
+ "Task completed successfully.";
443
+ // Calculate total cost (OpenAI doesn't have cache tokens)
444
+ const model = this.config.model || "gpt-4o";
445
+ const cost = (0, token_pricing_1.calculateTokenCost)({
446
+ input_tokens: totalInputTokens,
447
+ output_tokens: totalOutputTokens,
448
+ }, model);
449
+ this.logger.info("Processed request", {
450
+ duration,
451
+ toolCallCount: toolCalls.length,
452
+ maxIterationsReached: maxIterations === 0,
453
+ tokens: {
454
+ input: totalInputTokens,
455
+ output: totalOutputTokens,
456
+ total: totalInputTokens + totalOutputTokens,
457
+ cost: cost,
458
+ }
459
+ });
460
+ return {
461
+ success: true,
462
+ toolCalls,
463
+ response: responseText,
464
+ tokens: {
465
+ input: totalInputTokens,
466
+ output: totalOutputTokens,
467
+ total: totalInputTokens + totalOutputTokens,
468
+ cost: cost,
469
+ },
470
+ };
471
+ }
472
+ catch (error) {
473
+ this.logError(error, "processMessage");
474
+ return {
475
+ success: false,
476
+ error: `OpenAI processing failed: ${error.message || error}`,
477
+ };
478
+ }
479
+ }
480
+ /**
481
+ * Convert tool index to OpenAI tool format
482
+ * Loads full schemas for all tools (for now - could optimize with on-demand loading later)
483
+ */
484
+ async convertToolIndexToOpenAIFormat(mcpServerUrl, mcpServerApiKey, toolIndex) {
485
+ const tools = [];
486
+ for (const tool of toolIndex) {
487
+ try {
488
+ // Load full schema for each tool
489
+ const schema = await this.toolSchemaLoader.loadToolSchema(mcpServerUrl, mcpServerApiKey, tool.name);
490
+ // Convert to OpenAI format
491
+ tools.push({
492
+ type: "function",
493
+ function: {
494
+ name: schema.name,
495
+ description: schema.description,
496
+ parameters: schema.inputSchema || schema.input_schema,
497
+ },
498
+ });
499
+ }
500
+ catch (error) {
501
+ this.logger.error(`Failed to load schema for tool ${tool.name}`, error);
502
+ // Skip this tool and continue
503
+ }
504
+ }
505
+ return tools;
506
+ }
507
+ /**
508
+ * Build optimized tools list: full schemas for loaded tools, minimal stubs for others
509
+ */
510
+ buildOptimizedTools(minimalTools, loadedToolSchemas) {
511
+ const optimizedTools = [];
512
+ for (const tool of minimalTools) {
513
+ if (loadedToolSchemas.has(tool.name)) {
514
+ // Use full schema from loaded schemas
515
+ const fullSchema = loadedToolSchemas.get(tool.name);
516
+ optimizedTools.push({
517
+ type: "function",
518
+ function: {
519
+ name: fullSchema.name,
520
+ description: fullSchema.description,
521
+ parameters: fullSchema.input_schema || fullSchema.inputSchema,
522
+ },
523
+ });
524
+ }
525
+ else {
526
+ // Use minimal stub (empty parameters)
527
+ optimizedTools.push({
528
+ type: "function",
529
+ function: {
530
+ name: tool.name,
531
+ description: tool.description,
532
+ parameters: {
533
+ type: "object",
534
+ properties: {},
535
+ required: [],
536
+ },
537
+ },
538
+ });
539
+ }
540
+ }
541
+ return optimizedTools;
542
+ }
543
+ async callMcpTool(mcpServerUrl, mcpServerApiKey, request) {
544
+ const url = `${mcpServerUrl}${mcpServerUrl.includes("?") ? "&" : "?"}apiKey=${mcpServerApiKey}`;
545
+ const response = await fetch(url, {
546
+ method: "POST",
547
+ headers: {
548
+ "Content-Type": "application/json",
549
+ Accept: "application/json, text/event-stream",
550
+ },
551
+ body: JSON.stringify({
552
+ jsonrpc: "2.0",
553
+ id: Math.random().toString(36).substring(2),
554
+ method: "tools/call",
555
+ params: {
556
+ name: request.name,
557
+ arguments: request.arguments || {},
558
+ },
559
+ }),
560
+ });
561
+ if (!response.ok) {
562
+ throw new Error(`MCP tool call failed with ${response.status}: ${response.statusText}`);
563
+ }
564
+ // Parse SSE response format
565
+ const responseText = await response.text();
566
+ const lines = responseText.split("\n");
567
+ let jsonData = null;
568
+ for (const line of lines) {
569
+ if (line.startsWith("data: ")) {
570
+ try {
571
+ jsonData = JSON.parse(line.substring(6));
572
+ break;
573
+ }
574
+ catch (e) {
575
+ // Skip non-JSON lines
576
+ }
577
+ }
578
+ }
579
+ if (!jsonData) {
580
+ throw new Error("Failed to parse MCP tool response");
581
+ }
582
+ if (jsonData.error) {
583
+ throw new Error(`MCP tool error: ${jsonData.error.message || jsonData.error}`);
584
+ }
585
+ return jsonData.result;
586
+ }
587
+ async handleDirectResponse(userMessage, systemPrompt) {
588
+ try {
589
+ const response = await this.client.chat.completions.create({
590
+ model: this.config.model || "gpt-4o",
591
+ max_tokens: this.config.maxTokens || 2000,
592
+ temperature: this.config.temperature || 0.7,
593
+ messages: [
594
+ {
595
+ role: "system",
596
+ content: systemPrompt,
597
+ },
598
+ {
599
+ role: "user",
600
+ content: userMessage,
601
+ },
602
+ ],
603
+ });
604
+ const responseText = response.choices[0]?.message?.content?.trim() ||
605
+ "No response generated";
606
+ return {
607
+ success: true,
608
+ response: responseText,
609
+ toolCalls: [],
610
+ };
611
+ }
612
+ catch (error) {
613
+ return {
614
+ success: false,
615
+ error: `Direct response failed: ${error.message || error}`,
616
+ };
617
+ }
618
+ }
619
+ }
620
+ exports.OpenAIProvider = OpenAIProvider;
621
+ //# sourceMappingURL=openai-provider.js.map
@@ -0,0 +1,19 @@
1
+ /**
2
+ * Simple LLM Caller for Adaptive Documentation Bot
3
+ *
4
+ * Provides basic LLM API calls without the full MCP provider infrastructure.
5
+ * Used for analyzing errors and generating improvements.
6
+ */
7
+ export declare class SimpleLLMCaller {
8
+ private apiKey;
9
+ private model;
10
+ private provider;
11
+ constructor(provider: 'anthropic' | 'openai', apiKey: string, model?: string);
12
+ /**
13
+ * Simple LLM call for text generation
14
+ */
15
+ generate(prompt: string, systemPrompt?: string): Promise<string>;
16
+ private callAnthropic;
17
+ private callOpenAI;
18
+ }
19
+ //# sourceMappingURL=simple-llm-caller.d.ts.map