@hailer/mcp 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/tool-builder.md +37 -0
- package/.claude/commands/ws-pull.md +44 -0
- package/.claude/settings.json +8 -0
- package/.claude/settings.local.json +49 -0
- package/.claude/skills/activity-api/SKILL.md +96 -0
- package/.claude/skills/activity-api/references/activity-endpoints.md +845 -0
- package/.claude/skills/add-app-member-skill/SKILL.md +977 -0
- package/.claude/skills/agent-building/SKILL.md +243 -0
- package/.claude/skills/agent-building/references/architecture-patterns.md +446 -0
- package/.claude/skills/agent-building/references/code-examples.md +587 -0
- package/.claude/skills/agent-building/references/implementation-guide.md +619 -0
- package/.claude/skills/app-api/SKILL.md +219 -0
- package/.claude/skills/app-api/references/app-endpoints.md +759 -0
- package/.claude/skills/building-hailer-apps-skill/SKILL.md +548 -0
- package/.claude/skills/create-app-skill/SKILL.md +1101 -0
- package/.claude/skills/create-insight-skill/SKILL.md +1317 -0
- package/.claude/skills/get-insight-data-skill/SKILL.md +1053 -0
- package/.claude/skills/hailer-api/SKILL.md +283 -0
- package/.claude/skills/hailer-api/references/activities.md +620 -0
- package/.claude/skills/hailer-api/references/authentication.md +216 -0
- package/.claude/skills/hailer-api/references/datasets.md +437 -0
- package/.claude/skills/hailer-api/references/files.md +301 -0
- package/.claude/skills/hailer-api/references/insights.md +469 -0
- package/.claude/skills/hailer-api/references/workflows.md +720 -0
- package/.claude/skills/hailer-api/references/workspaces-users.md +445 -0
- package/.claude/skills/insight-api/SKILL.md +185 -0
- package/.claude/skills/insight-api/references/insight-endpoints.md +514 -0
- package/.claude/skills/install-workflow-skill/SKILL.md +1056 -0
- package/.claude/skills/list-apps-skill/SKILL.md +1010 -0
- package/.claude/skills/list-workflows-minimal-skill/SKILL.md +992 -0
- package/.claude/skills/local-first-skill/SKILL.md +570 -0
- package/.claude/skills/mcp-tools/SKILL.md +419 -0
- package/.claude/skills/mcp-tools/references/api-endpoints.md +499 -0
- package/.claude/skills/mcp-tools/references/data-structures.md +554 -0
- package/.claude/skills/mcp-tools/references/implementation-patterns.md +717 -0
- package/.claude/skills/preview-insight-skill/SKILL.md +1290 -0
- package/.claude/skills/publish-hailer-app-skill/SKILL.md +453 -0
- package/.claude/skills/remove-app-member-skill/SKILL.md +671 -0
- package/.claude/skills/remove-app-skill/SKILL.md +985 -0
- package/.claude/skills/remove-insight-skill/SKILL.md +1011 -0
- package/.claude/skills/remove-workflow-skill/SKILL.md +920 -0
- package/.claude/skills/scaffold-hailer-app-skill/SKILL.md +1034 -0
- package/.claude/skills/skill-testing/README.md +137 -0
- package/.claude/skills/skill-testing/SKILL.md +348 -0
- package/.claude/skills/skill-testing/references/test-patterns.md +705 -0
- package/.claude/skills/skill-testing/references/testing-guide.md +603 -0
- package/.claude/skills/skill-testing/references/validation-checklist.md +537 -0
- package/.claude/skills/tool-builder/SKILL.md +328 -0
- package/.claude/skills/update-app-skill/SKILL.md +970 -0
- package/.claude/skills/update-workflow-field-skill/SKILL.md +1098 -0
- package/.env.example +81 -0
- package/.mcp.json +13 -0
- package/README.md +297 -0
- package/dist/app.d.ts +4 -0
- package/dist/app.js +74 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.js +5 -0
- package/dist/client/adaptive-documentation-bot.d.ts +108 -0
- package/dist/client/adaptive-documentation-bot.js +475 -0
- package/dist/client/adaptive-documentation-types.d.ts +66 -0
- package/dist/client/adaptive-documentation-types.js +9 -0
- package/dist/client/agent-activity-bot.d.ts +51 -0
- package/dist/client/agent-activity-bot.js +166 -0
- package/dist/client/agent-tracker.d.ts +499 -0
- package/dist/client/agent-tracker.js +659 -0
- package/dist/client/description-updater.d.ts +56 -0
- package/dist/client/description-updater.js +259 -0
- package/dist/client/log-parser.d.ts +72 -0
- package/dist/client/log-parser.js +387 -0
- package/dist/client/mcp-client.d.ts +50 -0
- package/dist/client/mcp-client.js +532 -0
- package/dist/client/message-processor.d.ts +35 -0
- package/dist/client/message-processor.js +352 -0
- package/dist/client/multi-bot-manager.d.ts +24 -0
- package/dist/client/multi-bot-manager.js +74 -0
- package/dist/client/providers/anthropic-provider.d.ts +19 -0
- package/dist/client/providers/anthropic-provider.js +631 -0
- package/dist/client/providers/llm-provider.d.ts +47 -0
- package/dist/client/providers/llm-provider.js +367 -0
- package/dist/client/providers/openai-provider.d.ts +23 -0
- package/dist/client/providers/openai-provider.js +621 -0
- package/dist/client/simple-llm-caller.d.ts +19 -0
- package/dist/client/simple-llm-caller.js +100 -0
- package/dist/client/skill-generator.d.ts +81 -0
- package/dist/client/skill-generator.js +386 -0
- package/dist/client/test-adaptive-bot.d.ts +9 -0
- package/dist/client/test-adaptive-bot.js +82 -0
- package/dist/client/token-pricing.d.ts +38 -0
- package/dist/client/token-pricing.js +127 -0
- package/dist/client/token-tracker.d.ts +232 -0
- package/dist/client/token-tracker.js +457 -0
- package/dist/client/token-usage-bot.d.ts +53 -0
- package/dist/client/token-usage-bot.js +153 -0
- package/dist/client/tool-executor.d.ts +69 -0
- package/dist/client/tool-executor.js +159 -0
- package/dist/client/tool-schema-loader.d.ts +60 -0
- package/dist/client/tool-schema-loader.js +178 -0
- package/dist/client/types.d.ts +69 -0
- package/dist/client/types.js +7 -0
- package/dist/config.d.ts +162 -0
- package/dist/config.js +296 -0
- package/dist/core.d.ts +26 -0
- package/dist/core.js +147 -0
- package/dist/lib/context-manager.d.ts +111 -0
- package/dist/lib/context-manager.js +431 -0
- package/dist/lib/logger.d.ts +74 -0
- package/dist/lib/logger.js +277 -0
- package/dist/lib/materialize.d.ts +3 -0
- package/dist/lib/materialize.js +101 -0
- package/dist/lib/normalizedName.d.ts +7 -0
- package/dist/lib/normalizedName.js +48 -0
- package/dist/lib/prompt-length-manager.d.ts +81 -0
- package/dist/lib/prompt-length-manager.js +457 -0
- package/dist/lib/terminal-prompt.d.ts +9 -0
- package/dist/lib/terminal-prompt.js +108 -0
- package/dist/mcp/UserContextCache.d.ts +56 -0
- package/dist/mcp/UserContextCache.js +163 -0
- package/dist/mcp/auth.d.ts +2 -0
- package/dist/mcp/auth.js +29 -0
- package/dist/mcp/hailer-clients.d.ts +42 -0
- package/dist/mcp/hailer-clients.js +246 -0
- package/dist/mcp/signal-handler.d.ts +45 -0
- package/dist/mcp/signal-handler.js +317 -0
- package/dist/mcp/tool-registry.d.ts +100 -0
- package/dist/mcp/tool-registry.js +306 -0
- package/dist/mcp/tools/activity.d.ts +15 -0
- package/dist/mcp/tools/activity.js +955 -0
- package/dist/mcp/tools/app.d.ts +20 -0
- package/dist/mcp/tools/app.js +1488 -0
- package/dist/mcp/tools/discussion.d.ts +19 -0
- package/dist/mcp/tools/discussion.js +950 -0
- package/dist/mcp/tools/file.d.ts +15 -0
- package/dist/mcp/tools/file.js +119 -0
- package/dist/mcp/tools/insight.d.ts +17 -0
- package/dist/mcp/tools/insight.js +806 -0
- package/dist/mcp/tools/skill.d.ts +10 -0
- package/dist/mcp/tools/skill.js +279 -0
- package/dist/mcp/tools/user.d.ts +10 -0
- package/dist/mcp/tools/user.js +108 -0
- package/dist/mcp/tools/workflow-template.d.ts +19 -0
- package/dist/mcp/tools/workflow-template.js +822 -0
- package/dist/mcp/tools/workflow.d.ts +18 -0
- package/dist/mcp/tools/workflow.js +1362 -0
- package/dist/mcp/utils/api-errors.d.ts +45 -0
- package/dist/mcp/utils/api-errors.js +160 -0
- package/dist/mcp/utils/data-transformers.d.ts +102 -0
- package/dist/mcp/utils/data-transformers.js +194 -0
- package/dist/mcp/utils/file-upload.d.ts +33 -0
- package/dist/mcp/utils/file-upload.js +148 -0
- package/dist/mcp/utils/hailer-api-client.d.ts +120 -0
- package/dist/mcp/utils/hailer-api-client.js +323 -0
- package/dist/mcp/utils/index.d.ts +13 -0
- package/dist/mcp/utils/index.js +39 -0
- package/dist/mcp/utils/logger.d.ts +42 -0
- package/dist/mcp/utils/logger.js +103 -0
- package/dist/mcp/utils/types.d.ts +286 -0
- package/dist/mcp/utils/types.js +7 -0
- package/dist/mcp/workspace-cache.d.ts +42 -0
- package/dist/mcp/workspace-cache.js +97 -0
- package/dist/mcp-server.d.ts +42 -0
- package/dist/mcp-server.js +280 -0
- package/package.json +56 -0
- package/tsconfig.json +23 -0
|
@@ -0,0 +1,631 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.AnthropicProvider = void 0;
|
|
7
|
+
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
|
|
8
|
+
const llm_provider_1 = require("./llm-provider");
|
|
9
|
+
const context_manager_1 = require("../../lib/context-manager");
|
|
10
|
+
const token_pricing_1 = require("../token-pricing");
|
|
11
|
+
const tool_registry_1 = require("../../mcp/tool-registry");
|
|
12
|
+
const tool_schema_loader_1 = require("../tool-schema-loader");
|
|
13
|
+
const tool_executor_1 = require("../tool-executor");
|
|
14
|
+
class AnthropicProvider extends llm_provider_1.LlmProvider {
|
|
15
|
+
client;
|
|
16
|
+
contextManager;
|
|
17
|
+
toolSchemaLoader = new tool_schema_loader_1.ToolSchemaLoader();
|
|
18
|
+
toolExecutor = new tool_executor_1.ToolExecutor();
|
|
19
|
+
// Deprecated - kept for backwards compatibility during migration
|
|
20
|
+
toolSchemaCache = new Map();
|
|
21
|
+
constructor(config) {
|
|
22
|
+
super(config);
|
|
23
|
+
this.client = new sdk_1.default({
|
|
24
|
+
apiKey: config.apiKey,
|
|
25
|
+
});
|
|
26
|
+
this.contextManager = (0, context_manager_1.getContextManager)({
|
|
27
|
+
anthropicApiKey: config.apiKey,
|
|
28
|
+
safetyMarginPercent: 25,
|
|
29
|
+
enableAutoSummarization: true,
|
|
30
|
+
maxSummarizationChunks: 10,
|
|
31
|
+
});
|
|
32
|
+
}
|
|
33
|
+
async generateConfirmationMessage(userMessage) {
|
|
34
|
+
if (!this.isEnabled()) {
|
|
35
|
+
return `🤖 Processing your request with ${this.name}...`;
|
|
36
|
+
}
|
|
37
|
+
try {
|
|
38
|
+
const confirmationPrompt = this.getConfirmationPrompt(userMessage);
|
|
39
|
+
const response = await this.client.messages.create({
|
|
40
|
+
model: this.config.model || "claude-sonnet-4-20250514",
|
|
41
|
+
max_tokens: 100,
|
|
42
|
+
temperature: 0.7,
|
|
43
|
+
messages: [
|
|
44
|
+
{
|
|
45
|
+
role: "user",
|
|
46
|
+
content: confirmationPrompt,
|
|
47
|
+
},
|
|
48
|
+
],
|
|
49
|
+
});
|
|
50
|
+
const confirmationText = response.content
|
|
51
|
+
.filter((content) => content.type === "text")
|
|
52
|
+
.map((content) => content.text)
|
|
53
|
+
.join("\n")
|
|
54
|
+
.trim() ||
|
|
55
|
+
`🤖 Processing your request with ${this.name}...`;
|
|
56
|
+
return confirmationText;
|
|
57
|
+
}
|
|
58
|
+
catch (error) {
|
|
59
|
+
this.logError(error, "generateConfirmationMessage");
|
|
60
|
+
return `🤖 Processing your request with ${this.name}...`;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
async processMessage(userMessage, mcpServerUrl, botMcpApiKey, botEmail) {
|
|
64
|
+
if (!this.isEnabled()) {
|
|
65
|
+
this.logger.error("Anthropic provider is not enabled or missing API key");
|
|
66
|
+
return {
|
|
67
|
+
success: false,
|
|
68
|
+
error: "Anthropic provider is not enabled or missing API key",
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
try {
|
|
72
|
+
const systemPrompt = await this.createSystemPrompt({
|
|
73
|
+
userMessage,
|
|
74
|
+
mcpServerUrl,
|
|
75
|
+
mcpServerApiKey: botMcpApiKey,
|
|
76
|
+
botEmail,
|
|
77
|
+
});
|
|
78
|
+
const startTime = Date.now();
|
|
79
|
+
// Load lightweight tool index with automatic filtering
|
|
80
|
+
// Chat bot only gets READ + WRITE tools (no PLAYGROUND tools)
|
|
81
|
+
const allowedGroups = [tool_registry_1.ToolGroup.READ, tool_registry_1.ToolGroup.WRITE];
|
|
82
|
+
const toolIndex = await this.toolSchemaLoader.loadToolIndex({
|
|
83
|
+
mcpServerUrl,
|
|
84
|
+
mcpServerApiKey: botMcpApiKey,
|
|
85
|
+
allowedGroups
|
|
86
|
+
});
|
|
87
|
+
// Convert to minimal tool stubs (full schemas loaded on-demand)
|
|
88
|
+
const minimalTools = this.toolSchemaLoader.toMinimalToolDefinitions(toolIndex);
|
|
89
|
+
// Track which tools have been loaded with full schemas
|
|
90
|
+
const loadedToolSchemas = new Map();
|
|
91
|
+
this.logger.info("Connected to MCP server (on-demand mode)", {
|
|
92
|
+
toolsForChatBot: toolIndex.length,
|
|
93
|
+
allowedGroups: allowedGroups.join(', '),
|
|
94
|
+
initialTokensSaved: "~28,000 tokens (95% reduction)"
|
|
95
|
+
});
|
|
96
|
+
// Initial prompt is simple and unlikely to need truncation, let Claude 4 handle it
|
|
97
|
+
const finalSystemPrompt = systemPrompt;
|
|
98
|
+
const finalUserContent = this.removeMentions(userMessage);
|
|
99
|
+
// Check token count before initial API call (with minimal tools)
|
|
100
|
+
const initialMessages = [{ role: "user", content: finalUserContent }];
|
|
101
|
+
const initialTokenCount = this.contextManager.countTokens(finalSystemPrompt, initialMessages, minimalTools, 'anthropic');
|
|
102
|
+
this.logger.info("Initial prompt token count (on-demand mode)", {
|
|
103
|
+
totalTokens: initialTokenCount.totalTokens,
|
|
104
|
+
systemPromptTokens: initialTokenCount.systemPromptTokens,
|
|
105
|
+
messagesTokens: initialTokenCount.messagesTokens,
|
|
106
|
+
toolsTokens: initialTokenCount.toolsTokens,
|
|
107
|
+
safeLimit: initialTokenCount.limit.safeTokens,
|
|
108
|
+
exceedsLimit: initialTokenCount.exceedsLimit,
|
|
109
|
+
note: "Using minimal tool stubs - full schemas loaded on-demand"
|
|
110
|
+
});
|
|
111
|
+
if (initialTokenCount.exceedsLimit) {
|
|
112
|
+
this.logger.error("Initial prompt exceeds token limit", {
|
|
113
|
+
totalTokens: initialTokenCount.totalTokens,
|
|
114
|
+
safeLimit: initialTokenCount.limit.safeTokens,
|
|
115
|
+
});
|
|
116
|
+
return {
|
|
117
|
+
success: false,
|
|
118
|
+
error: `Initial prompt too long: ${initialTokenCount.totalTokens} tokens exceeds safe limit of ${initialTokenCount.limit.safeTokens} tokens`,
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
let response = await this.client.messages.create({
|
|
122
|
+
model: this.config.model || "claude-sonnet-4-20250514",
|
|
123
|
+
max_tokens: this.config.maxTokens || 2000,
|
|
124
|
+
temperature: this.config.temperature || 0.7,
|
|
125
|
+
system: [
|
|
126
|
+
{
|
|
127
|
+
type: "text",
|
|
128
|
+
text: finalSystemPrompt,
|
|
129
|
+
cache_control: { type: "ephemeral" }
|
|
130
|
+
}
|
|
131
|
+
],
|
|
132
|
+
messages: [
|
|
133
|
+
{
|
|
134
|
+
role: "user",
|
|
135
|
+
content: finalUserContent,
|
|
136
|
+
},
|
|
137
|
+
],
|
|
138
|
+
tools: minimalTools,
|
|
139
|
+
});
|
|
140
|
+
const toolCalls = [];
|
|
141
|
+
// No conversation history - process each message independently
|
|
142
|
+
const currentUserContent = this.removeMentions(userMessage);
|
|
143
|
+
const conversationMessages = [{
|
|
144
|
+
role: "user",
|
|
145
|
+
content: currentUserContent,
|
|
146
|
+
}];
|
|
147
|
+
// Track token usage across all API calls
|
|
148
|
+
let totalInputTokens = 0;
|
|
149
|
+
let totalOutputTokens = 0;
|
|
150
|
+
let totalCacheCreation = 0;
|
|
151
|
+
let totalCacheRead = 0;
|
|
152
|
+
// Accumulate tokens from initial response
|
|
153
|
+
if (response.usage) {
|
|
154
|
+
totalInputTokens += response.usage.input_tokens;
|
|
155
|
+
totalOutputTokens += response.usage.output_tokens;
|
|
156
|
+
totalCacheCreation += response.usage.cache_creation_input_tokens || 0;
|
|
157
|
+
totalCacheRead += response.usage.cache_read_input_tokens || 0;
|
|
158
|
+
}
|
|
159
|
+
let maxIterations = 25;
|
|
160
|
+
while (maxIterations > 0 &&
|
|
161
|
+
response.content.some((content) => content.type === "tool_use")) {
|
|
162
|
+
maxIterations--;
|
|
163
|
+
if (maxIterations === 0) {
|
|
164
|
+
this.logger.warn("Max iteration limit reached, generating final response with current tool results", {
|
|
165
|
+
botId: userMessage.mentionedOrDirectMessagedBotId,
|
|
166
|
+
totalIterations: 25,
|
|
167
|
+
});
|
|
168
|
+
break; // Exit the loop but continue to generate response
|
|
169
|
+
}
|
|
170
|
+
// Add assistant's response to conversation
|
|
171
|
+
conversationMessages.push({
|
|
172
|
+
role: "assistant",
|
|
173
|
+
content: response.content,
|
|
174
|
+
});
|
|
175
|
+
// Build optimized tool set: full schemas for used tools, minimal stubs for others
|
|
176
|
+
const optimizedTools = minimalTools.map((tool, index) => {
|
|
177
|
+
let toolDef = tool;
|
|
178
|
+
if (loadedToolSchemas.has(tool.name)) {
|
|
179
|
+
toolDef = loadedToolSchemas.get(tool.name);
|
|
180
|
+
}
|
|
181
|
+
// Add cache control to last tool in array for maximum caching
|
|
182
|
+
if (index === minimalTools.length - 1) {
|
|
183
|
+
toolDef = { ...toolDef, cache_control: { type: "ephemeral" } };
|
|
184
|
+
}
|
|
185
|
+
return toolDef;
|
|
186
|
+
});
|
|
187
|
+
const toolResults = [];
|
|
188
|
+
const schemasJustLoaded = [];
|
|
189
|
+
// First pass: Check if any tools need schema loading
|
|
190
|
+
for (const content of response.content) {
|
|
191
|
+
if (content.type === "tool_use") {
|
|
192
|
+
if (!loadedToolSchemas.has(content.name)) {
|
|
193
|
+
this.logger.info("Tool schema needed - loading before execution", { toolName: content.name });
|
|
194
|
+
const fullSchema = await this.fetchMcpToolSchema(mcpServerUrl, botMcpApiKey, content.name);
|
|
195
|
+
loadedToolSchemas.set(content.name, fullSchema);
|
|
196
|
+
schemasJustLoaded.push(content.name);
|
|
197
|
+
this.logger.debug("Tool schema loaded and will be sent to Claude", {
|
|
198
|
+
toolName: content.name,
|
|
199
|
+
totalLoaded: loadedToolSchemas.size
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
// If we just loaded schemas, don't execute yet - let Claude see them first
|
|
205
|
+
if (schemasJustLoaded.length > 0) {
|
|
206
|
+
this.logger.info("Loaded new tool schemas - giving Claude another iteration to see parameters", {
|
|
207
|
+
schemasLoaded: schemasJustLoaded,
|
|
208
|
+
tokensAdded: schemasJustLoaded.length * 200
|
|
209
|
+
});
|
|
210
|
+
// IMPORTANT: Anthropic requires tool_result for every tool_use
|
|
211
|
+
// Add stub results telling Claude to retry with full schemas
|
|
212
|
+
const stubResults = response.content
|
|
213
|
+
.filter(content => content.type === "tool_use")
|
|
214
|
+
.map(content => ({
|
|
215
|
+
type: "tool_result",
|
|
216
|
+
tool_use_id: content.id,
|
|
217
|
+
content: `Schema loaded. Full parameter details are now available. Please retry this tool call with the correct parameters based on the schema.`
|
|
218
|
+
}));
|
|
219
|
+
conversationMessages.push({
|
|
220
|
+
role: "user",
|
|
221
|
+
content: stubResults,
|
|
222
|
+
});
|
|
223
|
+
// Continue to next iteration - Claude will now see full schemas in optimizedTools
|
|
224
|
+
continue;
|
|
225
|
+
}
|
|
226
|
+
// Second pass: Execute tools (schemas are already loaded)
|
|
227
|
+
for (const content of response.content) {
|
|
228
|
+
if (content.type === "tool_use") {
|
|
229
|
+
const toolCallStart = Date.now();
|
|
230
|
+
try {
|
|
231
|
+
this.logToolCall(content.name, content.input);
|
|
232
|
+
const result = await this.callMcpTool(mcpServerUrl, botMcpApiKey, {
|
|
233
|
+
name: content.name,
|
|
234
|
+
arguments: content.input ?? undefined,
|
|
235
|
+
});
|
|
236
|
+
const toolCallDuration = Date.now() - toolCallStart;
|
|
237
|
+
toolCalls.push({
|
|
238
|
+
toolName: content.name,
|
|
239
|
+
args: content.input,
|
|
240
|
+
result: result?.content,
|
|
241
|
+
duration: toolCallDuration,
|
|
242
|
+
});
|
|
243
|
+
// Note: Individual tool truncation removed - now handled at prompt level
|
|
244
|
+
const resultContent = JSON.stringify(result?.content || {});
|
|
245
|
+
toolResults.push({
|
|
246
|
+
type: "tool_result",
|
|
247
|
+
tool_use_id: content.id,
|
|
248
|
+
content: resultContent,
|
|
249
|
+
});
|
|
250
|
+
this.logToolCall(content.name, content.input, toolCallDuration);
|
|
251
|
+
// Log detailed tool call to activity logger
|
|
252
|
+
this.agentTracker.logToolCall({
|
|
253
|
+
toolName: content.name,
|
|
254
|
+
provider: "Anthropic",
|
|
255
|
+
request: {
|
|
256
|
+
arguments: content.input,
|
|
257
|
+
endpoint: mcpServerUrl,
|
|
258
|
+
method: "POST",
|
|
259
|
+
},
|
|
260
|
+
response: {
|
|
261
|
+
success: true,
|
|
262
|
+
data: result?.content,
|
|
263
|
+
},
|
|
264
|
+
duration: toolCallDuration,
|
|
265
|
+
botId: userMessage.mentionedOrDirectMessagedBotId,
|
|
266
|
+
discussionId: userMessage.discussionId,
|
|
267
|
+
workspaceId: userMessage.workspaceId,
|
|
268
|
+
});
|
|
269
|
+
}
|
|
270
|
+
catch (error) {
|
|
271
|
+
const toolCallDuration = Date.now() - toolCallStart;
|
|
272
|
+
this.logger.toolError(content.name, this.name, error, toolCallDuration);
|
|
273
|
+
toolCalls.push({
|
|
274
|
+
toolName: content.name,
|
|
275
|
+
args: content.input,
|
|
276
|
+
error: error instanceof Error ? error.message : String(error),
|
|
277
|
+
});
|
|
278
|
+
toolResults.push({
|
|
279
|
+
type: "tool_result",
|
|
280
|
+
tool_use_id: content.id,
|
|
281
|
+
content: `Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
282
|
+
is_error: true,
|
|
283
|
+
});
|
|
284
|
+
// Log failed tool call to activity logger
|
|
285
|
+
this.agentTracker.logToolCall({
|
|
286
|
+
toolName: content.name,
|
|
287
|
+
provider: "Anthropic",
|
|
288
|
+
request: {
|
|
289
|
+
arguments: content.input,
|
|
290
|
+
endpoint: mcpServerUrl,
|
|
291
|
+
method: "POST",
|
|
292
|
+
},
|
|
293
|
+
response: {
|
|
294
|
+
success: false,
|
|
295
|
+
error: error instanceof Error ? error.message : String(error),
|
|
296
|
+
},
|
|
297
|
+
duration: toolCallDuration,
|
|
298
|
+
botId: userMessage.mentionedOrDirectMessagedBotId,
|
|
299
|
+
discussionId: userMessage.discussionId,
|
|
300
|
+
workspaceId: userMessage.workspaceId,
|
|
301
|
+
});
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
if (toolResults.length > 0) {
|
|
306
|
+
conversationMessages.push({
|
|
307
|
+
role: "user",
|
|
308
|
+
content: toolResults,
|
|
309
|
+
});
|
|
310
|
+
// Check token count before API call with tool results
|
|
311
|
+
const tokenCount = this.contextManager.countTokens(finalSystemPrompt, conversationMessages, optimizedTools, 'anthropic');
|
|
312
|
+
this.logger.info("Token count after tool results (on-demand mode)", {
|
|
313
|
+
totalTokens: tokenCount.totalTokens,
|
|
314
|
+
messagesTokens: tokenCount.messagesTokens,
|
|
315
|
+
toolsTokens: tokenCount.toolsTokens,
|
|
316
|
+
safeLimit: tokenCount.limit.safeTokens,
|
|
317
|
+
exceedsLimit: tokenCount.exceedsLimit,
|
|
318
|
+
messageCount: conversationMessages.length,
|
|
319
|
+
loadedSchemas: loadedToolSchemas.size,
|
|
320
|
+
note: `${loadedToolSchemas.size} tools with full schemas, ${minimalTools.length - loadedToolSchemas.size} minimal stubs`
|
|
321
|
+
});
|
|
322
|
+
// Check if approaching limit (80%)
|
|
323
|
+
if (this.contextManager.approachingLimit(tokenCount.totalTokens, 'anthropic')) {
|
|
324
|
+
this.logger.warn("Approaching token limit", {
|
|
325
|
+
totalTokens: tokenCount.totalTokens,
|
|
326
|
+
safeLimit: tokenCount.limit.safeTokens,
|
|
327
|
+
percentUsed: Math.round((tokenCount.totalTokens / tokenCount.limit.safeTokens) * 100),
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
// If exceeds limit, summarize context
|
|
331
|
+
let messagesToSend = conversationMessages;
|
|
332
|
+
if (tokenCount.exceedsLimit) {
|
|
333
|
+
this.logger.warn("Context exceeds token limit, starting summarization", {
|
|
334
|
+
totalTokens: tokenCount.totalTokens,
|
|
335
|
+
safeLimit: tokenCount.limit.safeTokens,
|
|
336
|
+
messageCount: conversationMessages.length,
|
|
337
|
+
});
|
|
338
|
+
try {
|
|
339
|
+
const summarizationResult = await this.contextManager.summarizeContext(finalSystemPrompt, conversationMessages, 'anthropic', finalUserContent);
|
|
340
|
+
this.logger.info("Context summarization successful", {
|
|
341
|
+
originalTokens: summarizationResult.originalTokens,
|
|
342
|
+
summarizedTokens: summarizationResult.summarizedTokens,
|
|
343
|
+
reductionPercent: summarizationResult.reductionPercent,
|
|
344
|
+
chunksProcessed: summarizationResult.chunksProcessed,
|
|
345
|
+
originalMessages: conversationMessages.length,
|
|
346
|
+
summarizedMessages: summarizationResult.summarizedMessages.length,
|
|
347
|
+
});
|
|
348
|
+
// Replace conversation messages with summarized version
|
|
349
|
+
// Keep first user message, add summarized results
|
|
350
|
+
messagesToSend = [
|
|
351
|
+
conversationMessages[0], // Original user query
|
|
352
|
+
...summarizationResult.summarizedMessages,
|
|
353
|
+
];
|
|
354
|
+
// Verify summarized context is within limits
|
|
355
|
+
const summarizedTokenCount = this.contextManager.countTokens(finalSystemPrompt, messagesToSend, optimizedTools, 'anthropic');
|
|
356
|
+
if (summarizedTokenCount.exceedsLimit) {
|
|
357
|
+
this.logger.error("Summarized context still exceeds limit", {
|
|
358
|
+
summarizedTokens: summarizedTokenCount.totalTokens,
|
|
359
|
+
safeLimit: summarizedTokenCount.limit.safeTokens,
|
|
360
|
+
});
|
|
361
|
+
return {
|
|
362
|
+
success: false,
|
|
363
|
+
error: `Context too large even after summarization: ${summarizedTokenCount.totalTokens} tokens exceeds safe limit of ${summarizedTokenCount.limit.safeTokens} tokens`,
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
catch (error) {
|
|
368
|
+
this.logger.error("Summarization failed", error, {
|
|
369
|
+
originalTokens: tokenCount.totalTokens,
|
|
370
|
+
safeLimit: tokenCount.limit.safeTokens,
|
|
371
|
+
});
|
|
372
|
+
return {
|
|
373
|
+
success: false,
|
|
374
|
+
error: `Context too large and summarization failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
375
|
+
};
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
// Make API call with potentially summarized messages and optimized tools
|
|
379
|
+
response = await this.client.messages.create({
|
|
380
|
+
model: this.config.model || "claude-sonnet-4-20250514",
|
|
381
|
+
max_tokens: this.config.maxTokens || 2000,
|
|
382
|
+
temperature: this.config.temperature || 0.7,
|
|
383
|
+
system: [
|
|
384
|
+
{
|
|
385
|
+
type: "text",
|
|
386
|
+
text: finalSystemPrompt,
|
|
387
|
+
cache_control: { type: "ephemeral" }
|
|
388
|
+
}
|
|
389
|
+
],
|
|
390
|
+
messages: messagesToSend,
|
|
391
|
+
tools: optimizedTools,
|
|
392
|
+
});
|
|
393
|
+
// Accumulate tokens from this API call
|
|
394
|
+
if (response.usage) {
|
|
395
|
+
totalInputTokens += response.usage.input_tokens;
|
|
396
|
+
totalOutputTokens += response.usage.output_tokens;
|
|
397
|
+
totalCacheCreation += response.usage.cache_creation_input_tokens || 0;
|
|
398
|
+
totalCacheRead += response.usage.cache_read_input_tokens || 0;
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
// If we hit the iteration limit, force a final response without tool calls
|
|
403
|
+
if (maxIterations === 0 && response.content.some((content) => content.type === "tool_use")) {
|
|
404
|
+
this.logger.info("Forcing final response without tool calls after hitting iteration limit", {
|
|
405
|
+
botId: userMessage.mentionedOrDirectMessagedBotId,
|
|
406
|
+
});
|
|
407
|
+
// Add final instruction (centralized truncation will handle limits)
|
|
408
|
+
const finalMessages = [...conversationMessages, {
|
|
409
|
+
role: "user",
|
|
410
|
+
content: "Please provide a summary response based on the information gathered from the tools above. Do not call any more tools."
|
|
411
|
+
}];
|
|
412
|
+
// Check token count before final API call
|
|
413
|
+
const finalTokenCount = this.contextManager.countTokens(finalSystemPrompt, finalMessages, [], // No tools in final call
|
|
414
|
+
'anthropic');
|
|
415
|
+
this.logger.info("Final forced response token count", {
|
|
416
|
+
totalTokens: finalTokenCount.totalTokens,
|
|
417
|
+
safeLimit: finalTokenCount.limit.safeTokens,
|
|
418
|
+
exceedsLimit: finalTokenCount.exceedsLimit,
|
|
419
|
+
});
|
|
420
|
+
let finalMessagesToSend = finalMessages;
|
|
421
|
+
if (finalTokenCount.exceedsLimit) {
|
|
422
|
+
this.logger.warn("Final context exceeds limit, summarizing", {
|
|
423
|
+
totalTokens: finalTokenCount.totalTokens,
|
|
424
|
+
safeLimit: finalTokenCount.limit.safeTokens,
|
|
425
|
+
});
|
|
426
|
+
try {
|
|
427
|
+
const summarizationResult = await this.contextManager.summarizeContext(finalSystemPrompt, conversationMessages, // Summarize conversation before final instruction
|
|
428
|
+
'anthropic', finalUserContent);
|
|
429
|
+
finalMessagesToSend = [
|
|
430
|
+
conversationMessages[0], // Original user query
|
|
431
|
+
...summarizationResult.summarizedMessages,
|
|
432
|
+
{
|
|
433
|
+
role: "user",
|
|
434
|
+
content: "Please provide a summary response based on the information gathered from the tools above. Do not call any more tools."
|
|
435
|
+
}
|
|
436
|
+
];
|
|
437
|
+
this.logger.info("Final context summarized", {
|
|
438
|
+
originalTokens: summarizationResult.originalTokens,
|
|
439
|
+
summarizedTokens: summarizationResult.summarizedTokens,
|
|
440
|
+
reductionPercent: summarizationResult.reductionPercent,
|
|
441
|
+
});
|
|
442
|
+
}
|
|
443
|
+
catch (error) {
|
|
444
|
+
this.logger.error("Final summarization failed", error);
|
|
445
|
+
// Continue with truncated messages as fallback
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
// Make final call without tools
|
|
449
|
+
response = await this.client.messages.create({
|
|
450
|
+
model: this.config.model || "claude-sonnet-4-20250514",
|
|
451
|
+
max_tokens: this.config.maxTokens || 2000,
|
|
452
|
+
temperature: this.config.temperature || 0.7,
|
|
453
|
+
system: [
|
|
454
|
+
{
|
|
455
|
+
type: "text",
|
|
456
|
+
text: finalSystemPrompt,
|
|
457
|
+
cache_control: { type: "ephemeral" }
|
|
458
|
+
}
|
|
459
|
+
],
|
|
460
|
+
messages: finalMessagesToSend,
|
|
461
|
+
// No tools - force text response only
|
|
462
|
+
});
|
|
463
|
+
// Accumulate tokens from final API call
|
|
464
|
+
if (response.usage) {
|
|
465
|
+
totalInputTokens += response.usage.input_tokens;
|
|
466
|
+
totalOutputTokens += response.usage.output_tokens;
|
|
467
|
+
totalCacheCreation += response.usage.cache_creation_input_tokens || 0;
|
|
468
|
+
totalCacheRead += response.usage.cache_read_input_tokens || 0;
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
// No cleanup needed for HTTP-based MCP calls
|
|
472
|
+
const duration = Date.now() - startTime;
|
|
473
|
+
const responseText = response.content
|
|
474
|
+
.filter((content) => content.type === "text")
|
|
475
|
+
.map((content) => content.text)
|
|
476
|
+
.join("\n")
|
|
477
|
+
.trim();
|
|
478
|
+
// Calculate total cost
|
|
479
|
+
const model = this.config.model || "claude-sonnet-4-20250514";
|
|
480
|
+
const cost = (0, token_pricing_1.calculateTokenCost)({
|
|
481
|
+
input_tokens: totalInputTokens,
|
|
482
|
+
output_tokens: totalOutputTokens,
|
|
483
|
+
cache_creation_input_tokens: totalCacheCreation,
|
|
484
|
+
cache_read_input_tokens: totalCacheRead,
|
|
485
|
+
}, model);
|
|
486
|
+
this.logger.info("Processed request", {
|
|
487
|
+
duration,
|
|
488
|
+
toolCallCount: toolCalls.length,
|
|
489
|
+
maxIterationsReached: maxIterations === 0,
|
|
490
|
+
tokens: {
|
|
491
|
+
input: totalInputTokens,
|
|
492
|
+
output: totalOutputTokens,
|
|
493
|
+
total: totalInputTokens + totalOutputTokens,
|
|
494
|
+
cacheCreation: totalCacheCreation,
|
|
495
|
+
cacheRead: totalCacheRead,
|
|
496
|
+
cost: cost,
|
|
497
|
+
}
|
|
498
|
+
});
|
|
499
|
+
return {
|
|
500
|
+
success: true,
|
|
501
|
+
toolCalls,
|
|
502
|
+
response: responseText || "Task completed successfully.",
|
|
503
|
+
tokens: {
|
|
504
|
+
input: totalInputTokens,
|
|
505
|
+
output: totalOutputTokens,
|
|
506
|
+
total: totalInputTokens + totalOutputTokens,
|
|
507
|
+
cacheCreation: totalCacheCreation,
|
|
508
|
+
cacheRead: totalCacheRead,
|
|
509
|
+
cost: cost,
|
|
510
|
+
},
|
|
511
|
+
};
|
|
512
|
+
}
|
|
513
|
+
catch (error) {
|
|
514
|
+
this.logError(error, "processMessage");
|
|
515
|
+
return {
|
|
516
|
+
success: false,
|
|
517
|
+
error: `Anthropic processing failed: ${error.message || error}`,
|
|
518
|
+
};
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
/**
|
|
522
|
+
* Fetch specific tool schema on-demand
|
|
523
|
+
* Token-efficient: ~690 tokens per tool vs loading all tools upfront
|
|
524
|
+
*/
|
|
525
|
+
async fetchMcpToolSchema(mcpServerUrl, mcpServerApiKey, toolName) {
|
|
526
|
+
// Check cache first
|
|
527
|
+
const cacheKey = `${mcpServerUrl}:${toolName}`;
|
|
528
|
+
if (this.toolSchemaCache.has(cacheKey)) {
|
|
529
|
+
this.logger.debug('Using cached tool schema', { toolName });
|
|
530
|
+
return this.toolSchemaCache.get(cacheKey);
|
|
531
|
+
}
|
|
532
|
+
const url = `${mcpServerUrl}${mcpServerUrl.includes("?") ? "&" : "?"}apiKey=${mcpServerApiKey}`;
|
|
533
|
+
const response = await fetch(url, {
|
|
534
|
+
method: "POST",
|
|
535
|
+
headers: {
|
|
536
|
+
"Content-Type": "application/json",
|
|
537
|
+
Accept: "application/json, text/event-stream",
|
|
538
|
+
},
|
|
539
|
+
body: JSON.stringify({
|
|
540
|
+
jsonrpc: "2.0",
|
|
541
|
+
id: Math.random().toString(36).substring(2),
|
|
542
|
+
method: "tools/get_schema",
|
|
543
|
+
params: { name: toolName },
|
|
544
|
+
}),
|
|
545
|
+
});
|
|
546
|
+
if (!response.ok) {
|
|
547
|
+
throw new Error(`MCP server responded with ${response.status}: ${response.statusText}`);
|
|
548
|
+
}
|
|
549
|
+
// Parse SSE response format
|
|
550
|
+
const responseText = await response.text();
|
|
551
|
+
const lines = responseText.split("\n");
|
|
552
|
+
let jsonData = null;
|
|
553
|
+
for (const line of lines) {
|
|
554
|
+
if (line.startsWith("data: ")) {
|
|
555
|
+
try {
|
|
556
|
+
jsonData = JSON.parse(line.substring(6));
|
|
557
|
+
break;
|
|
558
|
+
}
|
|
559
|
+
catch (e) {
|
|
560
|
+
// Skip non-JSON lines
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
if (!jsonData) {
|
|
565
|
+
throw new Error("Failed to parse MCP server response");
|
|
566
|
+
}
|
|
567
|
+
if (jsonData.error) {
|
|
568
|
+
throw new Error(`MCP tool schema error: ${jsonData.error.message || jsonData.error}`);
|
|
569
|
+
}
|
|
570
|
+
// Convert MCP tool format to Anthropic tool format
|
|
571
|
+
const toolSchema = {
|
|
572
|
+
name: jsonData.result.name,
|
|
573
|
+
description: jsonData.result.description || `Tool: ${jsonData.result.name}`,
|
|
574
|
+
input_schema: jsonData.result.inputSchema || {
|
|
575
|
+
type: "object",
|
|
576
|
+
properties: {},
|
|
577
|
+
required: [],
|
|
578
|
+
},
|
|
579
|
+
};
|
|
580
|
+
// Cache the schema
|
|
581
|
+
this.toolSchemaCache.set(cacheKey, toolSchema);
|
|
582
|
+
this.logger.debug('Cached tool schema', { toolName });
|
|
583
|
+
return toolSchema;
|
|
584
|
+
}
|
|
585
|
+
async callMcpTool(mcpServerUrl, mcpServerApiKey, request) {
|
|
586
|
+
const url = `${mcpServerUrl}${mcpServerUrl.includes("?") ? "&" : "?"}apiKey=${mcpServerApiKey}`;
|
|
587
|
+
const response = await fetch(url, {
|
|
588
|
+
method: "POST",
|
|
589
|
+
headers: {
|
|
590
|
+
"Content-Type": "application/json",
|
|
591
|
+
Accept: "application/json, text/event-stream",
|
|
592
|
+
},
|
|
593
|
+
body: JSON.stringify({
|
|
594
|
+
jsonrpc: "2.0",
|
|
595
|
+
id: Math.random().toString(36).substring(2),
|
|
596
|
+
method: "tools/call",
|
|
597
|
+
params: {
|
|
598
|
+
name: request.name,
|
|
599
|
+
arguments: request.arguments || {},
|
|
600
|
+
},
|
|
601
|
+
}),
|
|
602
|
+
});
|
|
603
|
+
if (!response.ok) {
|
|
604
|
+
throw new Error(`MCP tool call failed with ${response.status}: ${response.statusText}`);
|
|
605
|
+
}
|
|
606
|
+
// Parse SSE response format
|
|
607
|
+
const responseText = await response.text();
|
|
608
|
+
const lines = responseText.split("\n");
|
|
609
|
+
let jsonData = null;
|
|
610
|
+
for (const line of lines) {
|
|
611
|
+
if (line.startsWith("data: ")) {
|
|
612
|
+
try {
|
|
613
|
+
jsonData = JSON.parse(line.substring(6));
|
|
614
|
+
break;
|
|
615
|
+
}
|
|
616
|
+
catch (e) {
|
|
617
|
+
// Skip non-JSON lines
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
}
|
|
621
|
+
if (!jsonData) {
|
|
622
|
+
throw new Error("Failed to parse MCP tool response");
|
|
623
|
+
}
|
|
624
|
+
if (jsonData.error) {
|
|
625
|
+
throw new Error(`MCP tool error: ${jsonData.error.message || jsonData.error}`);
|
|
626
|
+
}
|
|
627
|
+
return jsonData.result;
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
exports.AnthropicProvider = AnthropicProvider;
|
|
631
|
+
//# sourceMappingURL=anthropic-provider.js.map
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { LlmProviderConfig, ChatMessage, McpResponse } from "../types";
|
|
2
|
+
import { Logger } from "../../lib/logger";
|
|
3
|
+
import { AgentTracker } from "../agent-tracker";
|
|
4
|
+
import { PromptLengthManager } from "../../lib/prompt-length-manager";
|
|
5
|
+
export declare abstract class LlmProvider {
|
|
6
|
+
readonly name: string;
|
|
7
|
+
readonly type: string;
|
|
8
|
+
protected config: LlmProviderConfig;
|
|
9
|
+
protected logger: Logger;
|
|
10
|
+
protected agentTracker: AgentTracker;
|
|
11
|
+
protected promptLengthManager: PromptLengthManager;
|
|
12
|
+
constructor(config: LlmProviderConfig);
|
|
13
|
+
isEnabled(): boolean;
|
|
14
|
+
abstract processMessage(message: ChatMessage, mcpServerUrl: string, botMcpApiKey: string, botEmail: string): Promise<McpResponse>;
|
|
15
|
+
/**
|
|
16
|
+
* Generate a personalized confirmation message for the user
|
|
17
|
+
*/
|
|
18
|
+
abstract generateConfirmationMessage(userMessage: ChatMessage): Promise<string>;
|
|
19
|
+
/**
|
|
20
|
+
* Get the confirmation prompt template
|
|
21
|
+
*/
|
|
22
|
+
protected getConfirmationPrompt(userMessage: ChatMessage): string;
|
|
23
|
+
/**
|
|
24
|
+
* Fetch additional context for the bot to better understand the current situation
|
|
25
|
+
*/
|
|
26
|
+
protected fetchContextualInformation(mcpServerUrl: string, mcpServerApiKey: string, message: ChatMessage): Promise<{
|
|
27
|
+
workspaceInfo: string;
|
|
28
|
+
recentMessages: string;
|
|
29
|
+
}>;
|
|
30
|
+
protected abstract callMcpTool(mcpServerUrl: string, mcpServerApiKey: string, request: {
|
|
31
|
+
name: string;
|
|
32
|
+
arguments: any;
|
|
33
|
+
}): Promise<any>;
|
|
34
|
+
protected logToolCall(toolName: string, args: any, duration?: number): void;
|
|
35
|
+
protected logError(error: any, context: string): void;
|
|
36
|
+
protected createSystemPrompt(options: {
|
|
37
|
+
userMessage: ChatMessage;
|
|
38
|
+
mcpServerUrl: string;
|
|
39
|
+
mcpServerApiKey: string;
|
|
40
|
+
botEmail: string;
|
|
41
|
+
}): Promise<string>;
|
|
42
|
+
private buildWorkspacesPrompt;
|
|
43
|
+
private fetchAgentSpecificPrompt;
|
|
44
|
+
private buildSystemPrompt;
|
|
45
|
+
protected removeMentions(message: ChatMessage): string;
|
|
46
|
+
}
|
|
47
|
+
//# sourceMappingURL=llm-provider.d.ts.map
|