deepagentsdk 0.9.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +159 -0
- package/package.json +95 -0
- package/src/agent.ts +1230 -0
- package/src/backends/composite.ts +273 -0
- package/src/backends/filesystem.ts +692 -0
- package/src/backends/index.ts +22 -0
- package/src/backends/local-sandbox.ts +175 -0
- package/src/backends/persistent.ts +593 -0
- package/src/backends/sandbox.ts +510 -0
- package/src/backends/state.ts +244 -0
- package/src/backends/utils.ts +287 -0
- package/src/checkpointer/file-saver.ts +98 -0
- package/src/checkpointer/index.ts +5 -0
- package/src/checkpointer/kv-saver.ts +82 -0
- package/src/checkpointer/memory-saver.ts +82 -0
- package/src/checkpointer/types.ts +125 -0
- package/src/cli/components/ApiKeyInput.tsx +300 -0
- package/src/cli/components/FilePreview.tsx +237 -0
- package/src/cli/components/Input.tsx +277 -0
- package/src/cli/components/Message.tsx +93 -0
- package/src/cli/components/ModelSelection.tsx +338 -0
- package/src/cli/components/SlashMenu.tsx +101 -0
- package/src/cli/components/StatusBar.tsx +89 -0
- package/src/cli/components/Subagent.tsx +91 -0
- package/src/cli/components/TodoList.tsx +133 -0
- package/src/cli/components/ToolApproval.tsx +70 -0
- package/src/cli/components/ToolCall.tsx +144 -0
- package/src/cli/components/ToolCallSummary.tsx +175 -0
- package/src/cli/components/Welcome.tsx +75 -0
- package/src/cli/components/index.ts +24 -0
- package/src/cli/hooks/index.ts +12 -0
- package/src/cli/hooks/useAgent.ts +933 -0
- package/src/cli/index.tsx +1066 -0
- package/src/cli/theme.ts +205 -0
- package/src/cli/utils/model-list.ts +365 -0
- package/src/constants/errors.ts +29 -0
- package/src/constants/limits.ts +195 -0
- package/src/index.ts +176 -0
- package/src/middleware/agent-memory.ts +330 -0
- package/src/prompts.ts +196 -0
- package/src/skills/index.ts +2 -0
- package/src/skills/load.ts +191 -0
- package/src/skills/types.ts +53 -0
- package/src/tools/execute.ts +167 -0
- package/src/tools/filesystem.ts +418 -0
- package/src/tools/index.ts +39 -0
- package/src/tools/subagent.ts +443 -0
- package/src/tools/todos.ts +101 -0
- package/src/tools/web.ts +567 -0
- package/src/types/backend.ts +177 -0
- package/src/types/core.ts +220 -0
- package/src/types/events.ts +429 -0
- package/src/types/index.ts +94 -0
- package/src/types/structured-output.ts +43 -0
- package/src/types/subagent.ts +96 -0
- package/src/types.ts +22 -0
- package/src/utils/approval.ts +213 -0
- package/src/utils/events.ts +416 -0
- package/src/utils/eviction.ts +181 -0
- package/src/utils/index.ts +34 -0
- package/src/utils/model-parser.ts +38 -0
- package/src/utils/patch-tool-calls.ts +233 -0
- package/src/utils/project-detection.ts +32 -0
- package/src/utils/summarization.ts +254 -0
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversation summarization utility.
|
|
3
|
+
*
|
|
4
|
+
* Automatically summarizes older messages when approaching token limits
|
|
5
|
+
* to prevent context overflow while preserving important context.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { generateText, type LanguageModel } from "ai";
|
|
9
|
+
import type { ModelMessage } from "../types.js";
|
|
10
|
+
import { estimateTokens } from "./eviction.js";
|
|
11
|
+
import {
|
|
12
|
+
DEFAULT_SUMMARIZATION_THRESHOLD as CENTRALIZED_THRESHOLD,
|
|
13
|
+
DEFAULT_KEEP_MESSAGES as CENTRALIZED_KEEP,
|
|
14
|
+
} from "../constants/limits.js";
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Default token threshold before triggering summarization.
|
|
18
|
+
* 170k tokens is a safe threshold for most models.
|
|
19
|
+
*/
|
|
20
|
+
export const DEFAULT_SUMMARIZATION_THRESHOLD = CENTRALIZED_THRESHOLD;
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Default number of recent messages to keep intact.
|
|
24
|
+
*/
|
|
25
|
+
export const DEFAULT_KEEP_MESSAGES = CENTRALIZED_KEEP;
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Options for summarization.
|
|
29
|
+
*/
|
|
30
|
+
export interface SummarizationOptions {
|
|
31
|
+
/** Model to use for summarization (AI SDK LanguageModel instance) */
|
|
32
|
+
model: LanguageModel;
|
|
33
|
+
/** Token threshold to trigger summarization (default: 170000) */
|
|
34
|
+
tokenThreshold?: number;
|
|
35
|
+
/** Number of recent messages to keep intact (default: 6) */
|
|
36
|
+
keepMessages?: number;
|
|
37
|
+
/** Generation options to pass through */
|
|
38
|
+
generationOptions?: any;
|
|
39
|
+
/** Advanced options to pass through */
|
|
40
|
+
advancedOptions?: any;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Result of summarization check.
|
|
45
|
+
*/
|
|
46
|
+
export interface SummarizationResult {
|
|
47
|
+
/** Whether summarization was needed */
|
|
48
|
+
summarized: boolean;
|
|
49
|
+
/** The processed messages (either original or with summary) */
|
|
50
|
+
messages: ModelMessage[];
|
|
51
|
+
/** Token count before processing */
|
|
52
|
+
tokensBefore?: number;
|
|
53
|
+
/** Token count after processing */
|
|
54
|
+
tokensAfter?: number;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Estimate total tokens in a messages array.
|
|
59
|
+
*/
|
|
60
|
+
export function estimateMessagesTokens(messages: ModelMessage[]): number {
|
|
61
|
+
let total = 0;
|
|
62
|
+
|
|
63
|
+
for (const message of messages) {
|
|
64
|
+
if (typeof message.content === "string") {
|
|
65
|
+
total += estimateTokens(message.content);
|
|
66
|
+
} else if (Array.isArray(message.content)) {
|
|
67
|
+
for (const part of message.content) {
|
|
68
|
+
if (typeof part === "object" && part !== null && "text" in part) {
|
|
69
|
+
total += estimateTokens(String(part.text));
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return total;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Extract text content from a message.
|
|
80
|
+
*/
|
|
81
|
+
function getMessageText(message: ModelMessage): string {
|
|
82
|
+
if (typeof message.content === "string") {
|
|
83
|
+
return message.content;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if (Array.isArray(message.content)) {
|
|
87
|
+
return message.content
|
|
88
|
+
.map((part) => {
|
|
89
|
+
if (typeof part === "object" && part !== null && "text" in part) {
|
|
90
|
+
return String(part.text);
|
|
91
|
+
}
|
|
92
|
+
if (typeof part === "object" && part !== null && "type" in part) {
|
|
93
|
+
if (part.type === "tool-call") {
|
|
94
|
+
return `[Tool call: ${(part as { toolName?: string }).toolName || "unknown"}]`;
|
|
95
|
+
}
|
|
96
|
+
if (part.type === "tool-result") {
|
|
97
|
+
return `[Tool result]`;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
return "";
|
|
101
|
+
})
|
|
102
|
+
.filter(Boolean)
|
|
103
|
+
.join("\n");
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return "";
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Format messages for summarization prompt.
|
|
111
|
+
*/
|
|
112
|
+
function formatMessagesForSummary(messages: ModelMessage[]): string {
|
|
113
|
+
return messages
|
|
114
|
+
.map((msg) => {
|
|
115
|
+
const role = msg.role === "user" ? "User" : msg.role === "assistant" ? "Assistant" : "System";
|
|
116
|
+
const text = getMessageText(msg);
|
|
117
|
+
return `${role}: ${text}`;
|
|
118
|
+
})
|
|
119
|
+
.join("\n\n");
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Generate a summary of conversation messages.
|
|
124
|
+
*/
|
|
125
|
+
async function generateSummary(
|
|
126
|
+
messages: ModelMessage[],
|
|
127
|
+
model: LanguageModel,
|
|
128
|
+
generationOptions?: any,
|
|
129
|
+
advancedOptions?: any
|
|
130
|
+
): Promise<string> {
|
|
131
|
+
const conversationText = formatMessagesForSummary(messages);
|
|
132
|
+
|
|
133
|
+
const generateTextOptions: any = {
|
|
134
|
+
model,
|
|
135
|
+
system: `You are a conversation summarizer. Your task is to create a concise but comprehensive summary of the conversation that preserves:
|
|
136
|
+
1. Key decisions and conclusions
|
|
137
|
+
2. Important context and background information
|
|
138
|
+
3. Any tasks or todos mentioned
|
|
139
|
+
4. Technical details that may be referenced later
|
|
140
|
+
5. The overall flow and progression of the conversation
|
|
141
|
+
|
|
142
|
+
Keep the summary focused and avoid redundancy. The summary should allow someone to understand the conversation context without reading the full history.`,
|
|
143
|
+
prompt: `Please summarize the following conversation:\n\n${conversationText}`,
|
|
144
|
+
};
|
|
145
|
+
|
|
146
|
+
// Add passthrough options
|
|
147
|
+
if (generationOptions) {
|
|
148
|
+
Object.assign(generateTextOptions, generationOptions);
|
|
149
|
+
}
|
|
150
|
+
if (advancedOptions) {
|
|
151
|
+
Object.assign(generateTextOptions, advancedOptions);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
const result = await generateText(generateTextOptions);
|
|
155
|
+
return result.text;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Summarize older messages when approaching token limits.
|
|
160
|
+
*
|
|
161
|
+
* This function checks if the total tokens in the messages exceed the threshold.
|
|
162
|
+
* If so, it summarizes older messages while keeping recent ones intact.
|
|
163
|
+
*
|
|
164
|
+
* @param messages - Array of conversation messages
|
|
165
|
+
* @param options - Summarization options
|
|
166
|
+
* @returns Processed messages with optional summary
|
|
167
|
+
*
|
|
168
|
+
* @example
|
|
169
|
+
* ```typescript
|
|
170
|
+
* import { anthropic } from '@ai-sdk/anthropic';
|
|
171
|
+
*
|
|
172
|
+
* const result = await summarizeIfNeeded(messages, {
|
|
173
|
+
* model: anthropic('claude-haiku-4-5-20251001'),
|
|
174
|
+
* tokenThreshold: 170000,
|
|
175
|
+
* keepMessages: 6,
|
|
176
|
+
* });
|
|
177
|
+
*
|
|
178
|
+
* if (result.summarized) {
|
|
179
|
+
* console.log(`Reduced from ${result.tokensBefore} to ${result.tokensAfter} tokens`);
|
|
180
|
+
* }
|
|
181
|
+
* ```
|
|
182
|
+
*/
|
|
183
|
+
export async function summarizeIfNeeded(
|
|
184
|
+
messages: ModelMessage[],
|
|
185
|
+
options: SummarizationOptions
|
|
186
|
+
): Promise<SummarizationResult> {
|
|
187
|
+
const {
|
|
188
|
+
model,
|
|
189
|
+
tokenThreshold = DEFAULT_SUMMARIZATION_THRESHOLD,
|
|
190
|
+
keepMessages = DEFAULT_KEEP_MESSAGES,
|
|
191
|
+
} = options;
|
|
192
|
+
|
|
193
|
+
// Estimate current token count
|
|
194
|
+
const tokensBefore = estimateMessagesTokens(messages);
|
|
195
|
+
|
|
196
|
+
// Check if summarization is needed
|
|
197
|
+
if (tokensBefore < tokenThreshold) {
|
|
198
|
+
return {
|
|
199
|
+
summarized: false,
|
|
200
|
+
messages,
|
|
201
|
+
tokensBefore,
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// Not enough messages to summarize
|
|
206
|
+
if (messages.length <= keepMessages) {
|
|
207
|
+
return {
|
|
208
|
+
summarized: false,
|
|
209
|
+
messages,
|
|
210
|
+
tokensBefore,
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// Split messages: older ones to summarize, recent ones to keep
|
|
215
|
+
const messagesToSummarize = messages.slice(0, -keepMessages);
|
|
216
|
+
const messagesToKeep = messages.slice(-keepMessages);
|
|
217
|
+
|
|
218
|
+
// Generate summary
|
|
219
|
+
const summary = await generateSummary(
|
|
220
|
+
messagesToSummarize,
|
|
221
|
+
model,
|
|
222
|
+
options.generationOptions,
|
|
223
|
+
options.advancedOptions
|
|
224
|
+
);
|
|
225
|
+
|
|
226
|
+
// Create summary message
|
|
227
|
+
const summaryMessage: ModelMessage = {
|
|
228
|
+
role: "system",
|
|
229
|
+
content: `[Previous conversation summary]\n${summary}\n\n[End of summary - recent messages follow]`,
|
|
230
|
+
} as ModelMessage;
|
|
231
|
+
|
|
232
|
+
// Combine summary with recent messages
|
|
233
|
+
const newMessages = [summaryMessage, ...messagesToKeep];
|
|
234
|
+
const tokensAfter = estimateMessagesTokens(newMessages);
|
|
235
|
+
|
|
236
|
+
return {
|
|
237
|
+
summarized: true,
|
|
238
|
+
messages: newMessages,
|
|
239
|
+
tokensBefore,
|
|
240
|
+
tokensAfter,
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
/**
|
|
245
|
+
* Check if messages need summarization without performing it.
|
|
246
|
+
*/
|
|
247
|
+
export function needsSummarization(
|
|
248
|
+
messages: ModelMessage[],
|
|
249
|
+
tokenThreshold: number = DEFAULT_SUMMARIZATION_THRESHOLD
|
|
250
|
+
): boolean {
|
|
251
|
+
const tokens = estimateMessagesTokens(messages);
|
|
252
|
+
return tokens >= tokenThreshold;
|
|
253
|
+
}
|
|
254
|
+
|