@hailer/mcp 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (163) hide show
  1. package/.claude/commands/tool-builder.md +37 -0
  2. package/.claude/commands/ws-pull.md +44 -0
  3. package/.claude/settings.json +8 -0
  4. package/.claude/settings.local.json +49 -0
  5. package/.claude/skills/activity-api/SKILL.md +96 -0
  6. package/.claude/skills/activity-api/references/activity-endpoints.md +845 -0
  7. package/.claude/skills/add-app-member-skill/SKILL.md +977 -0
  8. package/.claude/skills/agent-building/SKILL.md +243 -0
  9. package/.claude/skills/agent-building/references/architecture-patterns.md +446 -0
  10. package/.claude/skills/agent-building/references/code-examples.md +587 -0
  11. package/.claude/skills/agent-building/references/implementation-guide.md +619 -0
  12. package/.claude/skills/app-api/SKILL.md +219 -0
  13. package/.claude/skills/app-api/references/app-endpoints.md +759 -0
  14. package/.claude/skills/building-hailer-apps-skill/SKILL.md +548 -0
  15. package/.claude/skills/create-app-skill/SKILL.md +1101 -0
  16. package/.claude/skills/create-insight-skill/SKILL.md +1317 -0
  17. package/.claude/skills/get-insight-data-skill/SKILL.md +1053 -0
  18. package/.claude/skills/hailer-api/SKILL.md +283 -0
  19. package/.claude/skills/hailer-api/references/activities.md +620 -0
  20. package/.claude/skills/hailer-api/references/authentication.md +216 -0
  21. package/.claude/skills/hailer-api/references/datasets.md +437 -0
  22. package/.claude/skills/hailer-api/references/files.md +301 -0
  23. package/.claude/skills/hailer-api/references/insights.md +469 -0
  24. package/.claude/skills/hailer-api/references/workflows.md +720 -0
  25. package/.claude/skills/hailer-api/references/workspaces-users.md +445 -0
  26. package/.claude/skills/insight-api/SKILL.md +185 -0
  27. package/.claude/skills/insight-api/references/insight-endpoints.md +514 -0
  28. package/.claude/skills/install-workflow-skill/SKILL.md +1056 -0
  29. package/.claude/skills/list-apps-skill/SKILL.md +1010 -0
  30. package/.claude/skills/list-workflows-minimal-skill/SKILL.md +992 -0
  31. package/.claude/skills/local-first-skill/SKILL.md +570 -0
  32. package/.claude/skills/mcp-tools/SKILL.md +419 -0
  33. package/.claude/skills/mcp-tools/references/api-endpoints.md +499 -0
  34. package/.claude/skills/mcp-tools/references/data-structures.md +554 -0
  35. package/.claude/skills/mcp-tools/references/implementation-patterns.md +717 -0
  36. package/.claude/skills/preview-insight-skill/SKILL.md +1290 -0
  37. package/.claude/skills/publish-hailer-app-skill/SKILL.md +453 -0
  38. package/.claude/skills/remove-app-member-skill/SKILL.md +671 -0
  39. package/.claude/skills/remove-app-skill/SKILL.md +985 -0
  40. package/.claude/skills/remove-insight-skill/SKILL.md +1011 -0
  41. package/.claude/skills/remove-workflow-skill/SKILL.md +920 -0
  42. package/.claude/skills/scaffold-hailer-app-skill/SKILL.md +1034 -0
  43. package/.claude/skills/skill-testing/README.md +137 -0
  44. package/.claude/skills/skill-testing/SKILL.md +348 -0
  45. package/.claude/skills/skill-testing/references/test-patterns.md +705 -0
  46. package/.claude/skills/skill-testing/references/testing-guide.md +603 -0
  47. package/.claude/skills/skill-testing/references/validation-checklist.md +537 -0
  48. package/.claude/skills/tool-builder/SKILL.md +328 -0
  49. package/.claude/skills/update-app-skill/SKILL.md +970 -0
  50. package/.claude/skills/update-workflow-field-skill/SKILL.md +1098 -0
  51. package/.env.example +81 -0
  52. package/.mcp.json +13 -0
  53. package/README.md +297 -0
  54. package/dist/app.d.ts +4 -0
  55. package/dist/app.js +74 -0
  56. package/dist/cli.d.ts +3 -0
  57. package/dist/cli.js +5 -0
  58. package/dist/client/adaptive-documentation-bot.d.ts +108 -0
  59. package/dist/client/adaptive-documentation-bot.js +475 -0
  60. package/dist/client/adaptive-documentation-types.d.ts +66 -0
  61. package/dist/client/adaptive-documentation-types.js +9 -0
  62. package/dist/client/agent-activity-bot.d.ts +51 -0
  63. package/dist/client/agent-activity-bot.js +166 -0
  64. package/dist/client/agent-tracker.d.ts +499 -0
  65. package/dist/client/agent-tracker.js +659 -0
  66. package/dist/client/description-updater.d.ts +56 -0
  67. package/dist/client/description-updater.js +259 -0
  68. package/dist/client/log-parser.d.ts +72 -0
  69. package/dist/client/log-parser.js +387 -0
  70. package/dist/client/mcp-client.d.ts +50 -0
  71. package/dist/client/mcp-client.js +532 -0
  72. package/dist/client/message-processor.d.ts +35 -0
  73. package/dist/client/message-processor.js +352 -0
  74. package/dist/client/multi-bot-manager.d.ts +24 -0
  75. package/dist/client/multi-bot-manager.js +74 -0
  76. package/dist/client/providers/anthropic-provider.d.ts +19 -0
  77. package/dist/client/providers/anthropic-provider.js +631 -0
  78. package/dist/client/providers/llm-provider.d.ts +47 -0
  79. package/dist/client/providers/llm-provider.js +367 -0
  80. package/dist/client/providers/openai-provider.d.ts +23 -0
  81. package/dist/client/providers/openai-provider.js +621 -0
  82. package/dist/client/simple-llm-caller.d.ts +19 -0
  83. package/dist/client/simple-llm-caller.js +100 -0
  84. package/dist/client/skill-generator.d.ts +81 -0
  85. package/dist/client/skill-generator.js +386 -0
  86. package/dist/client/test-adaptive-bot.d.ts +9 -0
  87. package/dist/client/test-adaptive-bot.js +82 -0
  88. package/dist/client/token-pricing.d.ts +38 -0
  89. package/dist/client/token-pricing.js +127 -0
  90. package/dist/client/token-tracker.d.ts +232 -0
  91. package/dist/client/token-tracker.js +457 -0
  92. package/dist/client/token-usage-bot.d.ts +53 -0
  93. package/dist/client/token-usage-bot.js +153 -0
  94. package/dist/client/tool-executor.d.ts +69 -0
  95. package/dist/client/tool-executor.js +159 -0
  96. package/dist/client/tool-schema-loader.d.ts +60 -0
  97. package/dist/client/tool-schema-loader.js +178 -0
  98. package/dist/client/types.d.ts +69 -0
  99. package/dist/client/types.js +7 -0
  100. package/dist/config.d.ts +162 -0
  101. package/dist/config.js +296 -0
  102. package/dist/core.d.ts +26 -0
  103. package/dist/core.js +147 -0
  104. package/dist/lib/context-manager.d.ts +111 -0
  105. package/dist/lib/context-manager.js +431 -0
  106. package/dist/lib/logger.d.ts +74 -0
  107. package/dist/lib/logger.js +277 -0
  108. package/dist/lib/materialize.d.ts +3 -0
  109. package/dist/lib/materialize.js +101 -0
  110. package/dist/lib/normalizedName.d.ts +7 -0
  111. package/dist/lib/normalizedName.js +48 -0
  112. package/dist/lib/prompt-length-manager.d.ts +81 -0
  113. package/dist/lib/prompt-length-manager.js +457 -0
  114. package/dist/lib/terminal-prompt.d.ts +9 -0
  115. package/dist/lib/terminal-prompt.js +108 -0
  116. package/dist/mcp/UserContextCache.d.ts +56 -0
  117. package/dist/mcp/UserContextCache.js +163 -0
  118. package/dist/mcp/auth.d.ts +2 -0
  119. package/dist/mcp/auth.js +29 -0
  120. package/dist/mcp/hailer-clients.d.ts +42 -0
  121. package/dist/mcp/hailer-clients.js +246 -0
  122. package/dist/mcp/signal-handler.d.ts +45 -0
  123. package/dist/mcp/signal-handler.js +317 -0
  124. package/dist/mcp/tool-registry.d.ts +100 -0
  125. package/dist/mcp/tool-registry.js +306 -0
  126. package/dist/mcp/tools/activity.d.ts +15 -0
  127. package/dist/mcp/tools/activity.js +955 -0
  128. package/dist/mcp/tools/app.d.ts +20 -0
  129. package/dist/mcp/tools/app.js +1488 -0
  130. package/dist/mcp/tools/discussion.d.ts +19 -0
  131. package/dist/mcp/tools/discussion.js +950 -0
  132. package/dist/mcp/tools/file.d.ts +15 -0
  133. package/dist/mcp/tools/file.js +119 -0
  134. package/dist/mcp/tools/insight.d.ts +17 -0
  135. package/dist/mcp/tools/insight.js +806 -0
  136. package/dist/mcp/tools/skill.d.ts +10 -0
  137. package/dist/mcp/tools/skill.js +279 -0
  138. package/dist/mcp/tools/user.d.ts +10 -0
  139. package/dist/mcp/tools/user.js +108 -0
  140. package/dist/mcp/tools/workflow-template.d.ts +19 -0
  141. package/dist/mcp/tools/workflow-template.js +822 -0
  142. package/dist/mcp/tools/workflow.d.ts +18 -0
  143. package/dist/mcp/tools/workflow.js +1362 -0
  144. package/dist/mcp/utils/api-errors.d.ts +45 -0
  145. package/dist/mcp/utils/api-errors.js +160 -0
  146. package/dist/mcp/utils/data-transformers.d.ts +102 -0
  147. package/dist/mcp/utils/data-transformers.js +194 -0
  148. package/dist/mcp/utils/file-upload.d.ts +33 -0
  149. package/dist/mcp/utils/file-upload.js +148 -0
  150. package/dist/mcp/utils/hailer-api-client.d.ts +120 -0
  151. package/dist/mcp/utils/hailer-api-client.js +323 -0
  152. package/dist/mcp/utils/index.d.ts +13 -0
  153. package/dist/mcp/utils/index.js +39 -0
  154. package/dist/mcp/utils/logger.d.ts +42 -0
  155. package/dist/mcp/utils/logger.js +103 -0
  156. package/dist/mcp/utils/types.d.ts +286 -0
  157. package/dist/mcp/utils/types.js +7 -0
  158. package/dist/mcp/workspace-cache.d.ts +42 -0
  159. package/dist/mcp/workspace-cache.js +97 -0
  160. package/dist/mcp-server.d.ts +42 -0
  161. package/dist/mcp-server.js +280 -0
  162. package/package.json +56 -0
  163. package/tsconfig.json +23 -0
@@ -0,0 +1,431 @@
1
+ "use strict";
2
+ /**
3
+ * Context Manager
4
+ *
5
+ * Handles token counting and automatic summarization to prevent "prompt too long" errors.
6
+ * Uses character-based token estimation and provider-specific limits with safety margins.
7
+ */
8
+ var __importDefault = (this && this.__importDefault) || function (mod) {
9
+ return (mod && mod.__esModule) ? mod : { "default": mod };
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.ContextManager = void 0;
13
+ exports.getContextManager = getContextManager;
14
+ const logger_1 = require("./logger");
15
+ const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
16
+ const openai_1 = __importDefault(require("openai"));
17
+ // ================================================================================
18
+ // CONTEXT MANAGER
19
+ // ================================================================================
20
+ class ContextManager {
21
+ logger;
22
+ config;
23
+ // Provider limits (max context window)
24
+ PROVIDER_LIMITS = {
25
+ anthropic: 200_000, // Claude Sonnet 4
26
+ openai: 128_000, // GPT-4o
27
+ };
28
+ constructor(config = {}) {
29
+ this.config = {
30
+ safetyMarginPercent: 25,
31
+ enableAutoSummarization: true,
32
+ maxSummarizationChunks: 10,
33
+ ...config,
34
+ };
35
+ this.logger = (0, logger_1.createLogger)({ component: "ContextManager" });
36
+ this.logger.info("ContextManager initialized", {
37
+ safetyMargin: `${this.config.safetyMarginPercent}%`,
38
+ autoSummarization: this.config.enableAutoSummarization,
39
+ maxChunks: this.config.maxSummarizationChunks,
40
+ });
41
+ }
42
+ // ================================================================================
43
+ // TOKEN COUNTING
44
+ // ================================================================================
45
+ /**
46
+ * Estimate tokens from text using character-based approximation
47
+ * Formula: 1 token ≈ 4 characters
48
+ */
49
+ estimateTokens(text) {
50
+ return Math.ceil(text.length / 4);
51
+ }
52
+ /**
53
+ * Calculate token limits for a provider with safety margin
54
+ */
55
+ getTokenLimits(provider) {
56
+ const maxTokens = this.PROVIDER_LIMITS[provider];
57
+ const safetyMargin = this.config.safetyMarginPercent / 100;
58
+ const safeTokens = Math.floor(maxTokens * (1 - safetyMargin));
59
+ return { maxTokens, safeTokens, safetyMargin };
60
+ }
61
+ /**
62
+ * Count tokens for system prompt
63
+ */
64
+ countSystemPromptTokens(systemPrompt) {
65
+ if (typeof systemPrompt === 'string') {
66
+ return this.estimateTokens(systemPrompt);
67
+ }
68
+ // Handle array or complex system prompt formats
69
+ const text = JSON.stringify(systemPrompt);
70
+ return this.estimateTokens(text);
71
+ }
72
+ /**
73
+ * Count tokens for messages array
74
+ */
75
+ countMessagesTokens(messages) {
76
+ let total = 0;
77
+ for (const message of messages) {
78
+ // Handle different message formats
79
+ if (typeof message.content === 'string') {
80
+ total += this.estimateTokens(message.content);
81
+ }
82
+ else if (Array.isArray(message.content)) {
83
+ // Anthropic format with content blocks
84
+ for (const block of message.content) {
85
+ if (block.type === 'text' && block.text) {
86
+ total += this.estimateTokens(block.text);
87
+ }
88
+ else if (block.type === 'tool_result' && block.content) {
89
+ total += this.estimateTokens(typeof block.content === 'string' ? block.content : JSON.stringify(block.content));
90
+ }
91
+ else {
92
+ // Other content types (tool_use, etc.)
93
+ total += this.estimateTokens(JSON.stringify(block));
94
+ }
95
+ }
96
+ }
97
+ else if (message.content) {
98
+ // Fallback: stringify the content
99
+ total += this.estimateTokens(JSON.stringify(message.content));
100
+ }
101
+ // Add overhead for role and metadata (~10 tokens per message)
102
+ total += 10;
103
+ }
104
+ return total;
105
+ }
106
+ /**
107
+ * Count tokens for tools definitions
108
+ */
109
+ countToolsTokens(tools) {
110
+ if (!tools || tools.length === 0) {
111
+ return 0;
112
+ }
113
+ // Estimate tools definition size
114
+ const toolsText = JSON.stringify(tools);
115
+ return this.estimateTokens(toolsText);
116
+ }
117
+ /**
118
+ * Count total tokens for a complete prompt
119
+ */
120
+ countTokens(systemPrompt, messages, tools, provider) {
121
+ const limits = this.getTokenLimits(provider);
122
+ const systemPromptTokens = this.countSystemPromptTokens(systemPrompt);
123
+ const messagesTokens = this.countMessagesTokens(messages);
124
+ const toolsTokens = this.countToolsTokens(tools);
125
+ const totalTokens = systemPromptTokens + messagesTokens + toolsTokens;
126
+ const exceedsLimit = totalTokens > limits.safeTokens;
127
+ return {
128
+ totalTokens,
129
+ systemPromptTokens,
130
+ messagesTokens,
131
+ toolsTokens,
132
+ exceedsLimit,
133
+ provider,
134
+ limit: limits,
135
+ };
136
+ }
137
+ // ================================================================================
138
+ // MESSAGE SPLITTING
139
+ // ================================================================================
140
+ /**
141
+ * Split messages into logical groups based on tool call patterns
142
+ * Groups together: assistant message with tool_use -> tool results
143
+ */
144
+ splitIntoToolResultGroups(messages, provider) {
145
+ const groups = [];
146
+ let currentGroup = [];
147
+ let startIndex = 0;
148
+ for (let i = 0; i < messages.length; i++) {
149
+ const message = messages[i];
150
+ currentGroup.push(message);
151
+ // Check if this completes a tool call cycle
152
+ const isToolResult = provider === 'anthropic'
153
+ ? (Array.isArray(message.content) && message.content.some((c) => c.type === 'tool_result'))
154
+ : message.role === 'tool';
155
+ if (isToolResult) {
156
+ // End current group
157
+ const tokens = this.countMessagesTokens(currentGroup);
158
+ groups.push({
159
+ messages: [...currentGroup],
160
+ tokens,
161
+ startIndex,
162
+ endIndex: i,
163
+ });
164
+ currentGroup = [];
165
+ startIndex = i + 1;
166
+ }
167
+ }
168
+ // Add any remaining messages as final group
169
+ if (currentGroup.length > 0) {
170
+ const tokens = this.countMessagesTokens(currentGroup);
171
+ groups.push({
172
+ messages: currentGroup,
173
+ tokens,
174
+ startIndex,
175
+ endIndex: messages.length - 1,
176
+ });
177
+ }
178
+ return groups;
179
+ }
180
+ /**
181
+ * Split tool result groups into chunks that fit within safe limits
182
+ */
183
+ splitIntoChunks(groups, systemPromptTokens, userQueryTokens, provider) {
184
+ const limits = this.getTokenLimits(provider);
185
+ const baseTokens = systemPromptTokens + userQueryTokens + 1000; // +1000 for overhead and instructions
186
+ const availableTokens = limits.safeTokens - baseTokens;
187
+ const chunks = [];
188
+ let currentChunk = [];
189
+ let currentChunkTokens = 0;
190
+ for (const group of groups) {
191
+ if (currentChunkTokens + group.tokens > availableTokens && currentChunk.length > 0) {
192
+ // Current chunk is full, start new chunk
193
+ chunks.push([...currentChunk]);
194
+ currentChunk = [group];
195
+ currentChunkTokens = group.tokens;
196
+ }
197
+ else {
198
+ // Add to current chunk
199
+ currentChunk.push(group);
200
+ currentChunkTokens += group.tokens;
201
+ }
202
+ }
203
+ // Add final chunk
204
+ if (currentChunk.length > 0) {
205
+ chunks.push(currentChunk);
206
+ }
207
+ return chunks;
208
+ }
209
+ // ================================================================================
210
+ // SUMMARIZATION
211
+ // ================================================================================
212
+ /**
213
+ * Summarize a chunk of tool results using the LLM
214
+ */
215
+ async summarizeChunk(systemPrompt, userQuery, toolResultGroups, provider, chunkIndex, totalChunks) {
216
+ // Flatten groups into messages
217
+ const messages = toolResultGroups.flatMap(group => group.messages);
218
+ const summarizationInstruction = `You are analyzing tool call results to condense them while preserving all relevant information.
219
+
220
+ Context: This is chunk ${chunkIndex + 1} of ${totalChunks} being processed.
221
+
222
+ Instructions:
223
+ 1. Review the tool calls and their results below
224
+ 2. Extract and preserve ALL relevant information, data, and insights
225
+ 3. Remove redundant or unnecessary details
226
+ 4. Maintain the logical flow and relationships between data
227
+ 5. Keep important identifiers (IDs, names, dates, etc.)
228
+ 6. Provide a concise summary that captures everything important
229
+
230
+ Output your condensed analysis focusing on the key information needed to answer the user's question.`;
231
+ try {
232
+ if (provider === 'anthropic') {
233
+ return await this.summarizeWithAnthropic(systemPrompt, userQuery, messages, summarizationInstruction);
234
+ }
235
+ else {
236
+ return await this.summarizeWithOpenAI(systemPrompt, userQuery, messages, summarizationInstruction);
237
+ }
238
+ }
239
+ catch (error) {
240
+ this.logger.error("Failed to summarize chunk", error, {
241
+ chunkIndex,
242
+ totalChunks,
243
+ provider,
244
+ });
245
+ // Fallback: return truncated messages
246
+ const fallback = messages
247
+ .map(m => JSON.stringify(m).substring(0, 1000))
248
+ .join('\n')
249
+ .substring(0, 10000);
250
+ return `[Summarization failed, showing truncated results]\n${fallback}`;
251
+ }
252
+ }
253
+ /**
254
+ * Summarize using Anthropic
255
+ */
256
+ async summarizeWithAnthropic(systemPrompt, userQuery, messages, instruction) {
257
+ if (!this.config.anthropicApiKey) {
258
+ throw new Error("Anthropic API key not configured");
259
+ }
260
+ const client = new sdk_1.default({ apiKey: this.config.anthropicApiKey });
261
+ // Build prompt for summarization
262
+ const summarizationMessages = [
263
+ {
264
+ role: "user",
265
+ content: userQuery,
266
+ },
267
+ ...messages,
268
+ {
269
+ role: "user",
270
+ content: instruction,
271
+ },
272
+ ];
273
+ const response = await client.messages.create({
274
+ model: "claude-sonnet-4-20250514",
275
+ max_tokens: 4000,
276
+ temperature: 0.3, // Lower temperature for more focused summarization
277
+ system: systemPrompt,
278
+ messages: summarizationMessages,
279
+ });
280
+ const text = response.content
281
+ .filter(content => content.type === "text")
282
+ .map(content => content.text)
283
+ .join("\n");
284
+ return text;
285
+ }
286
+ /**
287
+ * Summarize using OpenAI
288
+ */
289
+ async summarizeWithOpenAI(systemPrompt, userQuery, messages, instruction) {
290
+ if (!this.config.openaiApiKey) {
291
+ throw new Error("OpenAI API key not configured");
292
+ }
293
+ const client = new openai_1.default({ apiKey: this.config.openaiApiKey });
294
+ // Build prompt for summarization
295
+ const summarizationMessages = [
296
+ { role: "system", content: systemPrompt },
297
+ { role: "user", content: userQuery },
298
+ ...messages,
299
+ { role: "user", content: instruction },
300
+ ];
301
+ const response = await client.chat.completions.create({
302
+ model: "gpt-4o",
303
+ max_tokens: 4000,
304
+ temperature: 0.3,
305
+ messages: summarizationMessages,
306
+ });
307
+ return response.choices[0]?.message?.content || "[No summary generated]";
308
+ }
309
+ /**
310
+ * Main summarization orchestration
311
+ */
312
+ async summarizeContext(systemPrompt, messages, provider, userQuery) {
313
+ if (!this.config.enableAutoSummarization) {
314
+ throw new Error("Auto-summarization is disabled");
315
+ }
316
+ const startTime = Date.now();
317
+ this.logger.info("Starting context summarization", {
318
+ provider,
319
+ messageCount: messages.length,
320
+ originalTokens: this.countMessagesTokens(messages),
321
+ });
322
+ // Extract user query from first user message if not provided
323
+ if (!userQuery) {
324
+ const firstUserMessage = messages.find(m => m.role === 'user');
325
+ userQuery = firstUserMessage?.content || "Process the tool results";
326
+ }
327
+ const systemPromptTokens = this.countSystemPromptTokens(systemPrompt);
328
+ const userQueryTokens = this.estimateTokens(userQuery || "");
329
+ // Split messages into logical groups
330
+ const groups = this.splitIntoToolResultGroups(messages, provider);
331
+ this.logger.info("Split messages into tool result groups", {
332
+ groupCount: groups.length,
333
+ groups: groups.map(g => ({
334
+ messageCount: g.messages.length,
335
+ tokens: g.tokens,
336
+ range: `${g.startIndex}-${g.endIndex}`,
337
+ })),
338
+ });
339
+ // Split groups into chunks that fit within limits
340
+ const chunks = this.splitIntoChunks(groups, systemPromptTokens, userQueryTokens, provider);
341
+ if (chunks.length > this.config.maxSummarizationChunks) {
342
+ this.logger.warn("Too many chunks required", {
343
+ chunksNeeded: chunks.length,
344
+ maxAllowed: this.config.maxSummarizationChunks,
345
+ });
346
+ throw new Error(`Context too large: requires ${chunks.length} chunks but max is ${this.config.maxSummarizationChunks}`);
347
+ }
348
+ this.logger.info("Split groups into chunks for summarization", {
349
+ chunkCount: chunks.length,
350
+ chunks: chunks.map((chunk, i) => ({
351
+ index: i,
352
+ groupCount: chunk.length,
353
+ tokens: chunk.reduce((sum, g) => sum + g.tokens, 0),
354
+ })),
355
+ });
356
+ // Summarize chunks in parallel
357
+ const summarizationPromises = chunks.map((chunk, index) => this.summarizeChunk(systemPrompt, userQuery, chunk, provider, index, chunks.length));
358
+ const summaries = await Promise.all(summarizationPromises);
359
+ // Build summarized messages
360
+ const summarizedMessages = summaries.map((summary, index) => {
361
+ if (provider === 'anthropic') {
362
+ return {
363
+ role: "user",
364
+ content: `[Summarized tool results ${index + 1}/${summaries.length}]\n${summary}`,
365
+ };
366
+ }
367
+ else {
368
+ return {
369
+ role: "assistant",
370
+ content: `[Summarized tool results ${index + 1}/${summaries.length}]\n${summary}`,
371
+ };
372
+ }
373
+ });
374
+ const originalTokens = this.countMessagesTokens(messages);
375
+ const summarizedTokens = this.countMessagesTokens(summarizedMessages);
376
+ const reductionPercent = Math.round(((originalTokens - summarizedTokens) / originalTokens) * 100);
377
+ const duration = Date.now() - startTime;
378
+ this.logger.info("Context summarization complete", {
379
+ duration,
380
+ chunksProcessed: chunks.length,
381
+ originalTokens,
382
+ summarizedTokens,
383
+ reductionPercent: `${reductionPercent}%`,
384
+ originalMessages: messages.length,
385
+ summarizedMessages: summarizedMessages.length,
386
+ });
387
+ return {
388
+ summarizedMessages,
389
+ originalTokens,
390
+ summarizedTokens,
391
+ chunksProcessed: chunks.length,
392
+ reductionPercent,
393
+ };
394
+ }
395
+ // ================================================================================
396
+ // HELPER METHODS
397
+ // ================================================================================
398
+ /**
399
+ * Check if token count exceeds safe limit
400
+ */
401
+ exceedsLimit(tokenCount, provider) {
402
+ const limits = this.getTokenLimits(provider);
403
+ return tokenCount > limits.safeTokens;
404
+ }
405
+ /**
406
+ * Get warning threshold (80% of safe limit)
407
+ */
408
+ approachingLimit(tokenCount, provider) {
409
+ const limits = this.getTokenLimits(provider);
410
+ return tokenCount > limits.safeTokens * 0.8;
411
+ }
412
+ /**
413
+ * Get provider limits info
414
+ */
415
+ getProviderLimits(provider) {
416
+ return this.getTokenLimits(provider);
417
+ }
418
+ }
419
+ exports.ContextManager = ContextManager;
420
+ // ================================================================================
421
+ // EXPORTS
422
+ // ================================================================================
423
+ // Export singleton instance
424
+ let contextManagerInstance = null;
425
+ function getContextManager(config) {
426
+ if (!contextManagerInstance) {
427
+ contextManagerInstance = new ContextManager(config);
428
+ }
429
+ return contextManagerInstance;
430
+ }
431
+ //# sourceMappingURL=context-manager.js.map
@@ -0,0 +1,74 @@
1
+ /**
2
+ * Unified Logger for Hailer MCP Server
3
+ * Based on backend logger pattern
4
+ * - Production: OpenTelemetry to Victoria Logs
5
+ * - Development: Console output only
6
+ * - Proper dependency injection pattern
7
+ */
8
+ export declare enum LogLevel {
9
+ DEBUG = 0,
10
+ INFO = 1,
11
+ WARN = 2,
12
+ ERROR = 3
13
+ }
14
+ export interface LogContext {
15
+ operation?: string;
16
+ endpoint?: string;
17
+ activityId?: string;
18
+ workflowId?: string;
19
+ discussionId?: string;
20
+ userId?: string;
21
+ duration?: number;
22
+ provider?: string;
23
+ toolName?: string;
24
+ requestId?: string;
25
+ botId?: string;
26
+ workspaceId?: string;
27
+ component?: string;
28
+ [key: string]: any;
29
+ }
30
+ export declare enum LogTag {
31
+ SYSTEM = "system",
32
+ API_CALL = "api_call",
33
+ TOOL_CALL = "tool_call",
34
+ MCP_ACTIVITY = "mcp_activity",
35
+ AUTH = "auth",
36
+ WEBSOCKET = "websocket",
37
+ ERROR = "error"
38
+ }
39
+ export declare class Logger {
40
+ private otelLogger?;
41
+ private level;
42
+ private context;
43
+ private isProduction;
44
+ constructor(context?: LogContext, level?: LogLevel);
45
+ private initializeOtelLogger;
46
+ private shouldLog;
47
+ private generateRequestId;
48
+ private logToOtel;
49
+ private log;
50
+ private getEmoji;
51
+ debug(message: string, context?: LogContext, tag?: LogTag): void;
52
+ info(message: string, context?: LogContext, tag?: LogTag): void;
53
+ warn(message: string, context?: LogContext, tag?: LogTag): void;
54
+ error(message: string, error?: Error | unknown, context?: LogContext, tag?: LogTag): void;
55
+ apiCall(operation: string, endpoint: string, params?: any, context?: LogContext): void;
56
+ apiSuccess(operation: string, endpoint: string, duration?: number, context?: LogContext): void;
57
+ apiError(operation: string, endpoint: string, error: Error | unknown, duration?: number, context?: LogContext): void;
58
+ toolCall(toolName: string, provider: string, args?: any, context?: LogContext): void;
59
+ toolSuccess(toolName: string, provider: string, duration?: number, context?: LogContext): void;
60
+ toolError(toolName: string, provider: string, error: Error | unknown, duration?: number, context?: LogContext): void;
61
+ mcpActivity(activity: string, context?: LogContext): void;
62
+ socketFallback(operation: string, reason: string, context?: LogContext): void;
63
+ auth(message: string, context?: LogContext): void;
64
+ authError(message: string, error?: Error | unknown, context?: LogContext): void;
65
+ child(additionalContext: LogContext): Logger;
66
+ setLevel(level: LogLevel): void;
67
+ }
68
+ /**
69
+ * Logger factory function - replaces the old singleton pattern
70
+ * Use this to create loggers with specific context
71
+ */
72
+ export declare function createLogger(context?: LogContext): Logger;
73
+ export declare function getDefaultLogger(): Logger;
74
+ //# sourceMappingURL=logger.d.ts.map