fraude-code 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/README.md +68 -0
  2. package/dist/index.js +179297 -0
  3. package/package.json +88 -0
  4. package/src/agent/agent.ts +475 -0
  5. package/src/agent/contextManager.ts +141 -0
  6. package/src/agent/index.ts +14 -0
  7. package/src/agent/pendingChanges.ts +270 -0
  8. package/src/agent/prompts/AskPrompt.txt +10 -0
  9. package/src/agent/prompts/FastPrompt.txt +40 -0
  10. package/src/agent/prompts/PlannerPrompt.txt +51 -0
  11. package/src/agent/prompts/ReviewerPrompt.txt +57 -0
  12. package/src/agent/prompts/WorkerPrompt.txt +33 -0
  13. package/src/agent/subagents/askAgent.ts +37 -0
  14. package/src/agent/subagents/extractionAgent.ts +123 -0
  15. package/src/agent/subagents/fastAgent.ts +45 -0
  16. package/src/agent/subagents/managerAgent.ts +36 -0
  17. package/src/agent/subagents/relationAgent.ts +76 -0
  18. package/src/agent/subagents/researchSubAgent.ts +79 -0
  19. package/src/agent/subagents/reviewerSubAgent.ts +42 -0
  20. package/src/agent/subagents/workerSubAgent.ts +42 -0
  21. package/src/agent/tools/bashTool.ts +94 -0
  22. package/src/agent/tools/descriptions/bash.txt +47 -0
  23. package/src/agent/tools/descriptions/edit.txt +7 -0
  24. package/src/agent/tools/descriptions/glob.txt +4 -0
  25. package/src/agent/tools/descriptions/grep.txt +8 -0
  26. package/src/agent/tools/descriptions/lsp.txt +20 -0
  27. package/src/agent/tools/descriptions/plan.txt +3 -0
  28. package/src/agent/tools/descriptions/read.txt +9 -0
  29. package/src/agent/tools/descriptions/todo.txt +12 -0
  30. package/src/agent/tools/descriptions/write.txt +8 -0
  31. package/src/agent/tools/editTool.ts +44 -0
  32. package/src/agent/tools/globTool.ts +59 -0
  33. package/src/agent/tools/grepTool.ts +343 -0
  34. package/src/agent/tools/lspTool.ts +429 -0
  35. package/src/agent/tools/planTool.ts +118 -0
  36. package/src/agent/tools/readTool.ts +78 -0
  37. package/src/agent/tools/rememberTool.ts +91 -0
  38. package/src/agent/tools/testRunnerTool.ts +77 -0
  39. package/src/agent/tools/testTool.ts +44 -0
  40. package/src/agent/tools/todoTool.ts +224 -0
  41. package/src/agent/tools/writeTool.ts +33 -0
  42. package/src/commands/COMMANDS.ts +38 -0
  43. package/src/commands/cerebras/auth.ts +27 -0
  44. package/src/commands/cerebras/index.ts +31 -0
  45. package/src/commands/forget.ts +29 -0
  46. package/src/commands/google/auth.ts +24 -0
  47. package/src/commands/google/index.ts +31 -0
  48. package/src/commands/groq/add_model.ts +60 -0
  49. package/src/commands/groq/auth.ts +24 -0
  50. package/src/commands/groq/index.ts +33 -0
  51. package/src/commands/index.ts +65 -0
  52. package/src/commands/knowledge.ts +92 -0
  53. package/src/commands/log.ts +32 -0
  54. package/src/commands/mistral/auth.ts +27 -0
  55. package/src/commands/mistral/index.ts +31 -0
  56. package/src/commands/model/index.ts +145 -0
  57. package/src/commands/models/index.ts +16 -0
  58. package/src/commands/ollama/index.ts +29 -0
  59. package/src/commands/openrouter/add_model.ts +64 -0
  60. package/src/commands/openrouter/auth.ts +24 -0
  61. package/src/commands/openrouter/index.ts +33 -0
  62. package/src/commands/remember.ts +48 -0
  63. package/src/commands/serve.ts +31 -0
  64. package/src/commands/session/index.ts +21 -0
  65. package/src/commands/usage.ts +15 -0
  66. package/src/commands/visualize.ts +773 -0
  67. package/src/components/App.tsx +55 -0
  68. package/src/components/IntroComponent.tsx +70 -0
  69. package/src/components/LoaderComponent.tsx +68 -0
  70. package/src/components/OutputRenderer.tsx +88 -0
  71. package/src/components/SettingsRenderer.tsx +23 -0
  72. package/src/components/input/CommandSuggestions.tsx +41 -0
  73. package/src/components/input/FileSuggestions.tsx +61 -0
  74. package/src/components/input/InputBox.tsx +371 -0
  75. package/src/components/output/CheckpointView.tsx +13 -0
  76. package/src/components/output/CommandView.tsx +13 -0
  77. package/src/components/output/CommentView.tsx +12 -0
  78. package/src/components/output/ConfirmationView.tsx +179 -0
  79. package/src/components/output/ContextUsage.tsx +62 -0
  80. package/src/components/output/DiffView.tsx +202 -0
  81. package/src/components/output/ErrorView.tsx +14 -0
  82. package/src/components/output/InteractiveServerView.tsx +69 -0
  83. package/src/components/output/KnowledgeView.tsx +220 -0
  84. package/src/components/output/MarkdownView.tsx +15 -0
  85. package/src/components/output/ModelSelectView.tsx +71 -0
  86. package/src/components/output/ReasoningView.tsx +21 -0
  87. package/src/components/output/ToolCallView.tsx +45 -0
  88. package/src/components/settings/ModelList.tsx +250 -0
  89. package/src/components/settings/TokenUsage.tsx +274 -0
  90. package/src/config/schema.ts +19 -0
  91. package/src/config/settings.ts +229 -0
  92. package/src/index.tsx +100 -0
  93. package/src/parsers/tree-sitter-python.wasm +0 -0
  94. package/src/providers/providers.ts +71 -0
  95. package/src/services/PluginLoader.ts +123 -0
  96. package/src/services/cerebras.ts +69 -0
  97. package/src/services/embeddingService.ts +229 -0
  98. package/src/services/google.ts +65 -0
  99. package/src/services/graphSerializer.ts +248 -0
  100. package/src/services/groq.ts +23 -0
  101. package/src/services/knowledgeOrchestrator.ts +286 -0
  102. package/src/services/mistral.ts +79 -0
  103. package/src/services/ollama.ts +109 -0
  104. package/src/services/openrouter.ts +23 -0
  105. package/src/services/symbolExtractor.ts +277 -0
  106. package/src/store/useFraudeStore.ts +123 -0
  107. package/src/store/useSettingsStore.ts +38 -0
  108. package/src/theme.ts +26 -0
  109. package/src/types/Agent.ts +147 -0
  110. package/src/types/CommandDefinition.ts +8 -0
  111. package/src/types/Model.ts +94 -0
  112. package/src/types/OutputItem.ts +24 -0
  113. package/src/types/PluginContext.ts +55 -0
  114. package/src/types/TokenUsage.ts +5 -0
  115. package/src/types/assets.d.ts +4 -0
  116. package/src/utils/agentCognition.ts +1152 -0
  117. package/src/utils/fileSuggestions.ts +111 -0
  118. package/src/utils/index.ts +17 -0
  119. package/src/utils/initFraude.ts +8 -0
  120. package/src/utils/logger.ts +24 -0
  121. package/src/utils/lspClient.ts +1415 -0
  122. package/src/utils/paths.ts +24 -0
  123. package/src/utils/queryHandler.ts +227 -0
  124. package/src/utils/router.ts +278 -0
  125. package/src/utils/streamHandler.ts +132 -0
  126. package/src/utils/treeSitterQueries.ts +125 -0
  127. package/tsconfig.json +33 -0
package/package.json ADDED
@@ -0,0 +1,88 @@
1
+ {
2
+ "name": "fraude-code",
3
+ "version": "0.1.0",
4
+ "description": "FraudeCode - An AI coding agent powered by Graph + Vector memory",
5
+ "author": "Matthew Branning",
6
+ "license": "MIT",
7
+ "repository": {
8
+ "type": "git",
9
+ "url": "git+https://github.com/mbranni03/FraudeCode.git"
10
+ },
11
+ "keywords": [
12
+ "ai",
13
+ "agent",
14
+ "cli",
15
+ "coding",
16
+ "bun",
17
+ "graph",
18
+ "vector",
19
+ "memory"
20
+ ],
21
+ "module": "index.ts",
22
+ "type": "module",
23
+ "private": false,
24
+ "bin": {
25
+ "fraude": "./dist/index.js"
26
+ },
27
+ "files": [
28
+ "dist",
29
+ "src",
30
+ "tsconfig.json",
31
+ "README.md"
32
+ ],
33
+ "devDependencies": {
34
+ "@types/bun": "latest",
35
+ "@types/diff": "^8.0.0",
36
+ "@types/react": "^19.2.7"
37
+ },
38
+ "peerDependencies": {
39
+ "typescript": "^5"
40
+ },
41
+ "dependencies": {
42
+ "@ai-sdk/cerebras": "^2.0.20",
43
+ "@ai-sdk/google": "^3.0.13",
44
+ "@ai-sdk/groq": "^3.0.7",
45
+ "@ai-sdk/mistral": "^3.0.12",
46
+ "@huggingface/transformers": "^3.8.1",
47
+ "@inkjs/ui": "^2.0.0",
48
+ "@inkkit/ink-markdown": "^1.0.0",
49
+ "@lancedb/lancedb": "^0.23.0",
50
+ "@openrouter/ai-sdk-provider": "^1.5.4",
51
+ "@qdrant/js-client-rest": "^1.16.2",
52
+ "@tavily/ai-sdk": "^0.4.1",
53
+ "@types/react-dom": "^19.2.3",
54
+ "@vscode/ripgrep": "^1.17.0",
55
+ "@xenova/transformers": "^2.17.2",
56
+ "ai": "^6.0.33",
57
+ "diff": "^8.0.2",
58
+ "fast-xml-parser": "^5.3.4",
59
+ "ignore": "^7.0.5",
60
+ "ink": "^6.6.0",
61
+ "ink-big-text": "^2.0.0",
62
+ "ink-gradient": "^3.0.0",
63
+ "kuzu": "^0.11.3",
64
+ "neo4j-driver": "^6.0.1",
65
+ "ollama-ai-provider-v2": "^3.0.1",
66
+ "pyodide": "^0.29.3",
67
+ "react": "^19.2.4",
68
+ "react-devtools-core": "^7.0.1",
69
+ "react-dom": "^19.2.4",
70
+ "vscode-jsonrpc": "^8.2.1",
71
+ "vscode-languageserver-protocol": "^3.17.5",
72
+ "vscode-languageserver-types": "^3.17.5",
73
+ "web-tree-sitter": "^0.25.10",
74
+ "zod": "^4.3.5",
75
+ "zustand": "^5.0.9"
76
+ },
77
+ "scripts": {
78
+ "dev": "bun run src/index.tsx",
79
+ "build": "bun run scripts/build.ts",
80
+ "start": "bun ./dist/index.js",
81
+ "type-check": "tsc --noEmit",
82
+ "prepublishOnly": "bun run build",
83
+ "get-log": "bun scripts/get-log.ts"
84
+ },
85
+ "trustedDependencies": [
86
+ "kuzu"
87
+ ]
88
+ }
@@ -0,0 +1,475 @@
1
+ import { generateText, streamText, stepCountIs, type ModelMessage } from "ai";
2
+ import path from "path";
3
+ import fs from "fs";
4
+ import { getModel } from "@/providers/providers";
5
+ import type {
6
+ AgentConfig,
7
+ AgentResponse,
8
+ ToolCallInfo,
9
+ StepInfo,
10
+ } from "@/types/Agent";
11
+ import log from "@/utils/logger";
12
+ import useFraudeStore from "@/store/useFraudeStore";
13
+ import { incrementModelUsage } from "@/config/settings";
14
+ import type { TokenUsage } from "@/types/TokenUsage";
15
+ import { handleStreamChunk } from "@/utils/streamHandler";
16
+ import ContextManager from "@/agent/contextManager";
17
+
18
+ // ============================================================================
19
+ // Helper to extract usage from SDK response
20
+ // ============================================================================
21
+
22
+ function extractUsage(usage: unknown): TokenUsage {
23
+ if (usage && typeof usage === "object") {
24
+ const u = usage as Record<string, number | undefined>;
25
+ const promptTokens = u.promptTokens ?? u.inputTokens ?? 0;
26
+ const completionTokens = u.completionTokens ?? u.outputTokens ?? 0;
27
+ const totalTokens = u.totalTokens ?? promptTokens + completionTokens;
28
+
29
+ return {
30
+ promptTokens,
31
+ completionTokens,
32
+ totalTokens,
33
+ };
34
+ }
35
+ return { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
36
+ }
37
+
38
+ function resolveFileReferences(input: string): string {
39
+ // Replace @path/to/file with absolute path
40
+ return input.replace(/@([^\s]+)/g, (match, ref) => {
41
+ try {
42
+ // Remove trailing punctuation that shouldn't be part of the path
43
+ const matchPunct = ref.match(/([.,;?!:]+)$/);
44
+ let possiblePath = ref;
45
+ let suffix = "";
46
+
47
+ if (matchPunct) {
48
+ suffix = matchPunct[1];
49
+ possiblePath = ref.substring(0, ref.length - suffix.length);
50
+ }
51
+
52
+ const fullPath = path.resolve(process.cwd(), possiblePath);
53
+ if (fs.existsSync(fullPath)) {
54
+ return fullPath + suffix;
55
+ }
56
+
57
+ // Fallback: check raw ref in case punctuation is part of filename
58
+ const fullPathOriginal = path.resolve(process.cwd(), ref);
59
+ if (fs.existsSync(fullPathOriginal)) {
60
+ return fullPathOriginal;
61
+ }
62
+ } catch {
63
+ // ignore
64
+ }
65
+ return match;
66
+ });
67
+ }
68
+
69
+ async function experimental_repairToolCall(failed: any) {
70
+ const lower = failed.toolCall.toolName.toLowerCase();
71
+ if (lower !== failed.toolCall.toolName && failed.tools?.[lower]) {
72
+ return {
73
+ ...failed.toolCall,
74
+ toolName: lower,
75
+ };
76
+ }
77
+ return {
78
+ ...failed.toolCall,
79
+ input: JSON.stringify({
80
+ tool: failed.toolCall.toolName,
81
+ error: failed.error.message,
82
+ }),
83
+ toolName: "invalid",
84
+ };
85
+ }
86
+
87
+ // ============================================================================
88
+ // Rate Limit Handling
89
+ // ============================================================================
90
+
91
+ const RATE_LIMIT_RETRY_DELAY_MS = 60000; // 60 seconds - wait for TPM limit to reset
92
+ const MAX_RATE_LIMIT_RETRIES = 3;
93
+
94
+ /**
95
+ * Check if an error is a rate limit error (429 or TPM limit)
96
+ */
97
+ function isRateLimitError(error: unknown): boolean {
98
+ if (error instanceof Error) {
99
+ const message = error.message.toLowerCase();
100
+ // Check for 429 status or TPM limit messages
101
+ if (
102
+ message.includes("429") ||
103
+ message.includes("rate limit") ||
104
+ message.includes("tokens per minute") ||
105
+ message.includes("tpm") ||
106
+ message.includes("request too large") ||
107
+ message.includes("too many requests")
108
+ ) {
109
+ return true;
110
+ }
111
+ }
112
+ // Check if error has a status property (API response errors)
113
+ if (error && typeof error === "object" && "status" in error) {
114
+ return (error as { status: number }).status === 429;
115
+ }
116
+ return false;
117
+ }
118
+
119
+ /**
120
+ * Sleep for a specified number of milliseconds
121
+ */
122
+ function sleep(ms: number): Promise<void> {
123
+ return new Promise((resolve) => setTimeout(resolve, ms));
124
+ }
125
+
126
+ /**
127
+ * Execute a function with rate limit retry logic.
128
+ * If all retries fail, prompts the user to select an alternative model.
129
+ *
130
+ * @param fn - The function to execute (should use currentModel from getModel callback)
131
+ * @param currentModelName - The current model name for error reporting
132
+ * @param onModelChange - Callback when user selects a new model (should update internal state and return new fn)
133
+ */
134
+ async function withRateLimitRetry<T>(
135
+ fn: () => Promise<T>,
136
+ currentModelName: string,
137
+ onModelChange?: (newModelName: string) => () => Promise<T>,
138
+ retries = MAX_RATE_LIMIT_RETRIES,
139
+ ): Promise<T> {
140
+ let lastError: unknown;
141
+ let currentFn = fn;
142
+ let modelName = currentModelName;
143
+
144
+ for (let attempt = 0; attempt <= retries; attempt++) {
145
+ try {
146
+ return await currentFn();
147
+ } catch (error) {
148
+ lastError = error;
149
+
150
+ if (isRateLimitError(error) && attempt < retries) {
151
+ const waitTime = RATE_LIMIT_RETRY_DELAY_MS;
152
+ log(
153
+ `Rate limit hit. Waiting ${waitTime / 1000} seconds before retry ${attempt + 1}/${retries}...`,
154
+ );
155
+ useFraudeStore.setState({
156
+ statusText: `Rate limited - waiting ${waitTime / 1000}s (retry ${attempt + 1}/${retries})`,
157
+ });
158
+ await sleep(waitTime);
159
+ continue;
160
+ }
161
+
162
+ // If it's a rate limit error and we've exhausted retries, offer model selection
163
+ if (isRateLimitError(error) && onModelChange) {
164
+ const errorMessage =
165
+ error instanceof Error ? error.message : String(error);
166
+ log(
167
+ `Rate limit retries exhausted. Prompting user for model selection...`,
168
+ );
169
+
170
+ const selectedModel = await useFraudeStore
171
+ .getState()
172
+ .requestModelSelection(modelName, errorMessage);
173
+
174
+ if (selectedModel) {
175
+ log(`User selected alternative model: ${selectedModel}`);
176
+ // Get new function with updated model
177
+ currentFn = onModelChange(selectedModel);
178
+ modelName = selectedModel;
179
+ // Reset retries for the new model
180
+ attempt = -1; // Will become 0 after continue
181
+ continue;
182
+ } else {
183
+ // User cancelled
184
+ throw new Error(
185
+ `Request cancelled by user after rate limit on model: ${modelName}`,
186
+ );
187
+ }
188
+ }
189
+
190
+ throw error;
191
+ }
192
+ }
193
+
194
+ throw lastError;
195
+ }
196
+
197
+ // ============================================================================
198
+ // Provider Options Builder
199
+ // ============================================================================
200
+
201
+ /**
202
+ * Build provider options object for generateText/streamText calls.
203
+ * Currently supports OpenAI-specific options like reasoningEffort.
204
+ */
205
+ function buildProviderOptions(
206
+ config: Partial<AgentConfig>,
207
+ ): Record<string, Record<string, string | number | boolean>> | undefined {
208
+ const openaiOptions: Record<string, string | number | boolean> = {};
209
+
210
+ // Add reasoning effort if specified
211
+ if (config.reasoningEffort) {
212
+ openaiOptions.reasoningEffort = config.reasoningEffort;
213
+ }
214
+
215
+ // Only return providerOptions if we have something to set
216
+ if (Object.keys(openaiOptions).length > 0) {
217
+ return {
218
+ openai: openaiOptions,
219
+ };
220
+ }
221
+
222
+ return undefined;
223
+ }
224
+
225
+ // ============================================================================
226
+ // Agent Class
227
+ // ============================================================================
228
+
229
+ /**
230
+ * A provider-agnostic Agent class that provides a unified interface for
231
+ * interacting with various LLM providers (Groq, Ollama, OpenRouter, etc.)
232
+ */
233
+ export default class Agent {
234
+ private config: AgentConfig;
235
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
236
+ private model: any;
237
+ private rawModel: string;
238
+ private isolatedContextManager: ContextManager | null = null;
239
+
240
+ constructor(config: AgentConfig) {
241
+ this.config = {
242
+ temperature: 0.7,
243
+ maxTokens: 4096,
244
+ maxSteps: 5,
245
+ autoExecuteTools: true,
246
+ ...config,
247
+ };
248
+ this.model = getModel(config.model);
249
+ this.rawModel = config.model;
250
+
251
+ // Create isolated context manager if requested (for subagents)
252
+ if (config.useIsolatedContext) {
253
+ this.isolatedContextManager = new ContextManager();
254
+ }
255
+ }
256
+
257
+ /**
258
+ * Get the appropriate context manager for this agent.
259
+ * Returns isolated context manager if configured, otherwise the global one.
260
+ */
261
+ private getContextManager(): ContextManager {
262
+ return (
263
+ this.isolatedContextManager ?? useFraudeStore.getState().contextManager
264
+ );
265
+ }
266
+
267
+ // ==========================================================================
268
+ // Core Methods
269
+ // ==========================================================================
270
+
271
+ /**
272
+ * Send a message and get a complete response.
273
+ * Supports both simple string input and full message history.
274
+ */
275
+ async chat(
276
+ input: string,
277
+ options?: Partial<AgentConfig>,
278
+ ): Promise<AgentResponse> {
279
+ const contextManager = this.getContextManager();
280
+ const processedInput = resolveFileReferences(input);
281
+ const messages = [...(await contextManager.addContext(processedInput))];
282
+ const mergedConfig = { ...this.config, ...options };
283
+
284
+ const result = await generateText({
285
+ model: this.model as Parameters<typeof generateText>[0]["model"],
286
+ messages,
287
+ system: mergedConfig.systemPrompt,
288
+ temperature: mergedConfig.temperature,
289
+ maxOutputTokens: mergedConfig.maxTokens,
290
+ tools: mergedConfig.tools,
291
+ providerOptions: buildProviderOptions(mergedConfig),
292
+ stopWhen: mergedConfig.maxSteps
293
+ ? stepCountIs(mergedConfig.maxSteps)
294
+ : undefined,
295
+ experimental_repairToolCall,
296
+ });
297
+
298
+ const response = this.mapResponse(result);
299
+ await incrementModelUsage(this.rawModel, response.usage);
300
+ return response;
301
+ }
302
+
303
+ /**
304
+ * Stream a response in real-time.
305
+ * Returns an async iterable for text chunks and a promise for the full response.
306
+ */
307
+ async stream(
308
+ input: string,
309
+ options?: Partial<AgentConfig>,
310
+ ): Promise<AgentResponse> {
311
+ const contextManager = this.getContextManager();
312
+ const processedInput = resolveFileReferences(input);
313
+ const messages = [...(await contextManager.addContext(processedInput))];
314
+ const mergedConfig = { ...this.config, ...options };
315
+
316
+ const result = streamText({
317
+ model: this.model as Parameters<typeof streamText>[0]["model"],
318
+ messages,
319
+ system: mergedConfig.systemPrompt,
320
+ temperature: mergedConfig.temperature,
321
+ maxOutputTokens: mergedConfig.maxTokens,
322
+ tools: mergedConfig.tools,
323
+ abortSignal: mergedConfig.abortSignal,
324
+ providerOptions: buildProviderOptions(mergedConfig),
325
+ stopWhen: mergedConfig.maxSteps
326
+ ? stepCountIs(mergedConfig.maxSteps)
327
+ : undefined,
328
+ experimental_repairToolCall,
329
+ onError: (error) => {
330
+ log(`Stream error: ${JSON.stringify(error, null, 2)}`);
331
+ },
332
+ });
333
+
334
+ return this.buildStreamingResponse(result, mergedConfig.abortSignal);
335
+ }
336
+
337
+ // ==========================================================================
338
+ // Configuration
339
+ // ==========================================================================
340
+
341
+ getModel(): string {
342
+ return this.rawModel;
343
+ }
344
+
345
+ /**
346
+ * Switch to a different model (used internally by rate limit retry logic)
347
+ */
348
+ setModel(modelName: string): void {
349
+ this.config.model = modelName;
350
+ this.rawModel = modelName;
351
+ this.model = getModel(modelName);
352
+ }
353
+
354
+ // ==========================================================================
355
+ // Private Helpers
356
+ // ==========================================================================
357
+
358
+ private mapResponse(
359
+ result: Awaited<ReturnType<typeof generateText>>,
360
+ ): AgentResponse {
361
+ const steps: StepInfo[] =
362
+ result.steps?.map((step, index) =>
363
+ this.mapStepInfo({ ...step, stepNumber: index + 1 }),
364
+ ) ?? [];
365
+ const toolCalls: ToolCallInfo[] = [];
366
+
367
+ // Aggregate all tool calls/results from steps
368
+ for (const step of steps) {
369
+ toolCalls.push(...step.toolCalls);
370
+ }
371
+
372
+ return {
373
+ text: result.text,
374
+ usage: extractUsage(result.usage),
375
+ finishReason: result.finishReason ?? "unknown",
376
+ steps,
377
+ toolCalls,
378
+ raw: result,
379
+ };
380
+ }
381
+
382
+ private mapStepInfo(step: Record<string, unknown>): StepInfo {
383
+ const actions = [];
384
+
385
+ const toolMap = new Map<string, ToolCallInfo>();
386
+
387
+ for (const event of step.content as any[]) {
388
+ if (event.type === "tool-call") {
389
+ toolMap.set(event.toolCallId, {
390
+ toolName: event.toolName,
391
+ args: event.input,
392
+ });
393
+ } else if (event.type === "tool-result") {
394
+ const toolCall = toolMap.get(event.toolCallId);
395
+ if (toolCall) {
396
+ toolCall.result = event.output;
397
+ actions.push({
398
+ role: "tool",
399
+ content: JSON.stringify(toolCall, null, 2),
400
+ });
401
+ }
402
+ } else if (event.text) {
403
+ actions.push({ role: "assistant", content: event.text });
404
+ this.getContextManager().addContext({
405
+ role: "assistant",
406
+ content: event.text,
407
+ });
408
+ }
409
+ }
410
+
411
+ this.getContextManager().addSessionActions(actions);
412
+
413
+ return {
414
+ stepNumber: (step.stepNumber as number) ?? 0,
415
+ actions,
416
+ toolCalls: Array.from(toolMap.values()),
417
+ finishReason: (step.finishReason as string) ?? "unknown",
418
+ };
419
+ }
420
+
421
+ private async buildStreamingResponse(
422
+ result: ReturnType<typeof streamText>,
423
+ abortSignal?: AbortSignal,
424
+ ): Promise<AgentResponse> {
425
+ log("Starting stream consumption...");
426
+
427
+ this.getContextManager().clearSessionActions();
428
+
429
+ try {
430
+ for await (const chunk of result.fullStream) {
431
+ if (abortSignal?.aborted) {
432
+ log("Stream consumption aborted by user");
433
+ const error = new Error("Aborted");
434
+ error.name = "AbortError";
435
+ throw error;
436
+ }
437
+
438
+ const usage: TokenUsage = handleStreamChunk(
439
+ chunk as Record<string, unknown>,
440
+ );
441
+ await incrementModelUsage(this.rawModel, usage);
442
+ }
443
+ log("Stream consumption completed.");
444
+ } catch (error) {
445
+ log(`Error during stream consumption: ${error}`);
446
+ if (error instanceof Error) {
447
+ log(`Error details: ${error.message}\n${error.stack}`);
448
+ }
449
+ throw error;
450
+ }
451
+ const text = await result.text;
452
+ const usage = await result.usage;
453
+ const finishReason = await result.finishReason;
454
+ const steps = await result.steps;
455
+
456
+ const mappedSteps: StepInfo[] =
457
+ steps?.map((step, index) =>
458
+ this.mapStepInfo({ ...step, stepNumber: index + 1 }),
459
+ ) ?? [];
460
+ const toolCalls: ToolCallInfo[] = [];
461
+
462
+ for (const step of mappedSteps) {
463
+ toolCalls.push(...step.toolCalls);
464
+ }
465
+
466
+ return {
467
+ text,
468
+ usage: extractUsage(usage),
469
+ finishReason: finishReason ?? "unknown",
470
+ steps: mappedSteps,
471
+ toolCalls,
472
+ raw: result,
473
+ };
474
+ }
475
+ }
@@ -0,0 +1,141 @@
1
+ import type { ModelMessage, StepResult, ToolSet } from "ai";
2
+ import { getKnowledgeOrchestrator } from "@/services/knowledgeOrchestrator";
3
+ import type KnowledgeOrchestrator from "@/services/knowledgeOrchestrator";
4
+ import AgentCognition from "@/utils/agentCognition";
5
+ import log from "@/utils/logger";
6
+
7
+ class ContextManager {
8
+ // private longTermSummary: string = "";
9
+ private history: ModelMessage[] = [];
10
+ private primingContext: string = "";
11
+ private currentQueryContext: string = "";
12
+ // private hasPrimed: boolean = false;
13
+ private cognition: AgentCognition;
14
+ private orchestrator: KnowledgeOrchestrator;
15
+ private sessionActions: { role: string; content: string }[] = [];
16
+
17
+ constructor(initialContext: ModelMessage[] = []) {
18
+ this.history = initialContext;
19
+ this.cognition = AgentCognition.getInstance();
20
+ this.orchestrator = getKnowledgeOrchestrator();
21
+ }
22
+
23
+ getContext(): ModelMessage[] {
24
+ if (this.currentQueryContext) {
25
+ // const primingMessage: ModelMessage = {
26
+ // role: "system",
27
+ // content: this.primingContext,
28
+ // };
29
+ const queryMessage: ModelMessage = {
30
+ role: "system",
31
+ content: this.currentQueryContext,
32
+ };
33
+ return [queryMessage, ...this.history]; //include primingContext if want to add previous session summaries
34
+ }
35
+ return this.history;
36
+ }
37
+
38
+ clearContext() {
39
+ this.history = [];
40
+ // this.longTermSummary = "";
41
+ this.primingContext = "";
42
+ this.currentQueryContext = "";
43
+ // this.hasPrimed = false;
44
+ this.clearSessionActions();
45
+ }
46
+
47
+ clearSessionActions() {
48
+ this.sessionActions = [];
49
+ }
50
+
51
+ async addSessionActions(actions: { role: string; content: string }[]) {
52
+ this.sessionActions.push(...actions);
53
+ return this.sessionActions;
54
+ }
55
+
56
+ async addContext(query: string | ModelMessage | ModelMessage[]) {
57
+ if (typeof query === "string") {
58
+ this.history.push({ role: "user", content: query });
59
+ } else if (Array.isArray(query)) {
60
+ this.history.push(...query);
61
+ } else {
62
+ this.history.push(query);
63
+ }
64
+ return this.history;
65
+ }
66
+
67
+ // Prime context with project knowledge (once per session)
68
+ // async primeWithKnowledge(): Promise<void> {
69
+ // if (this.hasPrimed) return;
70
+ // try {
71
+ // this.primingContext = await this.orchestrator.getPrimingContext();
72
+ // this.hasPrimed = true;
73
+ // } catch (e) {
74
+ // // Fail silently - priming is optional
75
+ // this.primingContext = "";
76
+ // }
77
+ // }
78
+
79
+ // Inject query-specific context using the orchestrator's formatted output
80
+ async injectQueryContext(query: string): Promise<void> {
81
+ try {
82
+ const context = await this.orchestrator.getContextForQuery(query);
83
+ if (context) {
84
+ this.currentQueryContext = context;
85
+ }
86
+ } catch (e) {
87
+ // Fail silently - context injection is optional
88
+ this.currentQueryContext = "";
89
+ }
90
+ }
91
+
92
+ // Persist session learnings before closing
93
+ async persistSession(): Promise<void> {
94
+ log("Persisting session learnings");
95
+ try {
96
+ await this.cognition.init();
97
+ // Extract and store facts from session
98
+ const facts = await this.cognition.extractFromSession(
99
+ this.sessionActions,
100
+ );
101
+ for (const fact of facts) {
102
+ await this.cognition.addFact(fact);
103
+ }
104
+ // Store session summary
105
+ // const summary = await this.cognition.summarizeSession(
106
+ // this.sessionActions,
107
+ // );
108
+ // if (summary) {
109
+ // await this.cognition.addFact({
110
+ // type: "summary",
111
+ // content: summary,
112
+ // confidence: 0.8,
113
+ // });
114
+ // }
115
+ } catch (e) {
116
+ // Fail silently - persistence is optional
117
+ log("Failed to persist session: " + e);
118
+ }
119
+ }
120
+
121
+ estimateContextTokens() {
122
+ return this.getContext().reduce(
123
+ (total, message) => total + this.estimateMessageTokens(message),
124
+ 0,
125
+ ); // + this.estimateMessageTokens(this.longTermSummary)
126
+ }
127
+
128
+ estimateMessageTokens(message: ModelMessage | string) {
129
+ const text =
130
+ typeof message === "string" ? message : (message.content as string);
131
+ if (/[\u4E00-\u9FFF]/.test(text)) {
132
+ return Math.ceil(text.length / 2); // CJK safety
133
+ }
134
+ if (/[\p{Emoji}]/u.test(text)) {
135
+ return Math.ceil(text.length); // worst-case
136
+ }
137
+ return Math.ceil(text.length / 4);
138
+ }
139
+ }
140
+
141
+ export default ContextManager;