@cuylabs/agent-core 0.4.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +57 -8
  2. package/dist/builder-RcTZuYnO.d.ts +34 -0
  3. package/dist/capabilities/index.d.ts +97 -0
  4. package/dist/capabilities/index.js +46 -0
  5. package/dist/chunk-6TDTQJ4P.js +116 -0
  6. package/dist/chunk-7MUFEN4K.js +559 -0
  7. package/dist/chunk-BDBZ3SLK.js +745 -0
  8. package/dist/chunk-DWYX7ASF.js +26 -0
  9. package/dist/chunk-FG4MD5MU.js +54 -0
  10. package/dist/chunk-IMGQOTU2.js +2019 -0
  11. package/dist/chunk-IVUJDISU.js +556 -0
  12. package/dist/chunk-LRHOS4ZN.js +584 -0
  13. package/dist/chunk-OTUGSCED.js +691 -0
  14. package/dist/chunk-P6YF7USR.js +182 -0
  15. package/dist/chunk-QAQADS4X.js +258 -0
  16. package/dist/chunk-QWFMX226.js +879 -0
  17. package/dist/{chunk-6VKLWNRE.js → chunk-SDSBEQXG.js} +1 -132
  18. package/dist/chunk-VBWWUHWI.js +724 -0
  19. package/dist/chunk-VEKUXUVF.js +41 -0
  20. package/dist/chunk-X635CM2F.js +305 -0
  21. package/dist/chunk-YUUJK53A.js +91 -0
  22. package/dist/chunk-ZXAKHMWH.js +283 -0
  23. package/dist/config-D2xeGEHK.d.ts +52 -0
  24. package/dist/context/index.d.ts +259 -0
  25. package/dist/context/index.js +26 -0
  26. package/dist/identifiers-BLUxFqV_.d.ts +12 -0
  27. package/dist/index-p0kOsVsE.d.ts +1067 -0
  28. package/dist/index-tmhaADz5.d.ts +198 -0
  29. package/dist/index.d.ts +210 -5736
  30. package/dist/index.js +2126 -7766
  31. package/dist/mcp/index.d.ts +26 -0
  32. package/dist/mcp/index.js +14 -0
  33. package/dist/messages-BYWGn8TY.d.ts +110 -0
  34. package/dist/middleware/index.d.ts +7 -0
  35. package/dist/middleware/index.js +12 -0
  36. package/dist/models/index.d.ts +33 -0
  37. package/dist/models/index.js +12 -0
  38. package/dist/network-D76DS5ot.d.ts +5 -0
  39. package/dist/prompt/index.d.ts +224 -0
  40. package/dist/prompt/index.js +45 -0
  41. package/dist/reasoning/index.d.ts +71 -0
  42. package/dist/reasoning/index.js +47 -0
  43. package/dist/registry-CuRWWtcT.d.ts +164 -0
  44. package/dist/resolver-DOfZ-xuk.d.ts +254 -0
  45. package/dist/runner-C7aMP_x3.d.ts +596 -0
  46. package/dist/runtime/index.d.ts +357 -0
  47. package/dist/runtime/index.js +64 -0
  48. package/dist/session-manager-Uawm2Le7.d.ts +274 -0
  49. package/dist/skill/index.d.ts +103 -0
  50. package/dist/skill/index.js +39 -0
  51. package/dist/storage/index.d.ts +167 -0
  52. package/dist/storage/index.js +50 -0
  53. package/dist/sub-agent/index.d.ts +14 -0
  54. package/dist/sub-agent/index.js +15 -0
  55. package/dist/tool/index.d.ts +173 -1
  56. package/dist/tool/index.js +12 -3
  57. package/dist/tool-DYp6-cC3.d.ts +239 -0
  58. package/dist/tool-pFAnJc5Y.d.ts +419 -0
  59. package/dist/tracker-DClqYqTj.d.ts +96 -0
  60. package/dist/tracking/index.d.ts +109 -0
  61. package/dist/tracking/index.js +20 -0
  62. package/dist/types-CQaXbRsS.d.ts +47 -0
  63. package/dist/types-MM1JoX5T.d.ts +810 -0
  64. package/dist/types-VQgymC1N.d.ts +156 -0
  65. package/package.json +89 -5
  66. package/dist/index-BlSTfS-W.d.ts +0 -470
@@ -0,0 +1,283 @@
1
+ // src/mcp/modules.ts
2
+ var mcpModule;
3
+ var stdioModule;
4
+ async function getMcpSdkModule() {
5
+ if (!mcpModule) {
6
+ try {
7
+ mcpModule = await import("@ai-sdk/mcp");
8
+ } catch {
9
+ throw new Error(
10
+ "MCP support requires @ai-sdk/mcp. Install it with: npm install @ai-sdk/mcp"
11
+ );
12
+ }
13
+ }
14
+ return mcpModule;
15
+ }
16
+ async function getStdioClientModule() {
17
+ if (!stdioModule) {
18
+ try {
19
+ stdioModule = await import("@modelcontextprotocol/sdk/client/stdio.js");
20
+ } catch {
21
+ throw new Error(
22
+ "Stdio MCP transport requires @modelcontextprotocol/sdk. Install it with: npm install @modelcontextprotocol/sdk"
23
+ );
24
+ }
25
+ }
26
+ return stdioModule;
27
+ }
28
+
29
+ // src/mcp/manager.ts
30
+ var DefaultMCPManager = class {
31
+ config;
32
+ connections = /* @__PURE__ */ new Map();
33
+ toolCache;
34
+ constructor(config) {
35
+ this.config = config;
36
+ }
37
+ async connect() {
38
+ const results = /* @__PURE__ */ new Map();
39
+ const connectPromises = Object.entries(this.config).map(
40
+ async ([name, serverConfig]) => {
41
+ if (serverConfig.enabled === false) {
42
+ const status = { status: "disabled" };
43
+ this.connections.set(name, {
44
+ client: void 0,
45
+ config: serverConfig,
46
+ status
47
+ });
48
+ results.set(name, status);
49
+ return;
50
+ }
51
+ results.set(name, { status: "connecting" });
52
+ try {
53
+ const { client, toolCount } = await this.createClient(name, serverConfig);
54
+ const status = { status: "connected", toolCount };
55
+ this.connections.set(name, { client, config: serverConfig, status });
56
+ results.set(name, status);
57
+ } catch (error) {
58
+ const errorMessage = error instanceof Error ? error.message : String(error);
59
+ const status = {
60
+ status: "error",
61
+ error: errorMessage
62
+ };
63
+ results.set(name, status);
64
+ this.connections.set(name, {
65
+ client: void 0,
66
+ config: serverConfig,
67
+ status
68
+ });
69
+ }
70
+ }
71
+ );
72
+ await Promise.all(connectPromises);
73
+ this.toolCache = void 0;
74
+ return results;
75
+ }
76
+ async getTools() {
77
+ if (this.toolCache) {
78
+ return this.toolCache;
79
+ }
80
+ const allTools = {};
81
+ const toolPromises = Array.from(this.connections.entries()).map(
82
+ async ([serverName, connection]) => {
83
+ if (connection.status.status !== "connected") {
84
+ return;
85
+ }
86
+ try {
87
+ const tools = await connection.client.tools();
88
+ for (const [toolName, tool] of Object.entries(tools)) {
89
+ allTools[`${serverName}__${toolName}`] = tool;
90
+ }
91
+ } catch (error) {
92
+ console.warn(`[mcp] Failed to get tools from ${serverName}:`, error);
93
+ }
94
+ }
95
+ );
96
+ await Promise.all(toolPromises);
97
+ this.toolCache = allTools;
98
+ return allTools;
99
+ }
100
+ getStatus(serverName) {
101
+ const connection = this.connections.get(serverName);
102
+ return connection?.status ?? { status: "disconnected" };
103
+ }
104
+ getAllStatus() {
105
+ const status = /* @__PURE__ */ new Map();
106
+ for (const [name, connection] of this.connections) {
107
+ status.set(name, connection.status);
108
+ }
109
+ return status;
110
+ }
111
+ async listResources() {
112
+ const resources = [];
113
+ const promises = Array.from(this.connections.entries()).map(
114
+ async ([serverName, connection]) => {
115
+ if (connection.status.status !== "connected") {
116
+ return;
117
+ }
118
+ try {
119
+ const result = await connection.client.listResources();
120
+ for (const resource of result.resources) {
121
+ resources.push({
122
+ uri: resource.uri,
123
+ name: resource.name,
124
+ description: resource.description,
125
+ mimeType: resource.mimeType,
126
+ server: serverName
127
+ });
128
+ }
129
+ } catch {
130
+ }
131
+ }
132
+ );
133
+ await Promise.all(promises);
134
+ return resources;
135
+ }
136
+ async readResource(uri) {
137
+ for (const [, connection] of this.connections) {
138
+ if (connection.status.status !== "connected") {
139
+ continue;
140
+ }
141
+ try {
142
+ return await connection.client.readResource({ uri });
143
+ } catch {
144
+ }
145
+ }
146
+ throw new Error(`Resource not found: ${uri}`);
147
+ }
148
+ async listPrompts() {
149
+ const prompts = [];
150
+ const promises = Array.from(this.connections.entries()).map(
151
+ async ([serverName, connection]) => {
152
+ if (connection.status.status !== "connected") {
153
+ return;
154
+ }
155
+ try {
156
+ const result = await connection.client.experimental_listPrompts();
157
+ for (const prompt of result.prompts) {
158
+ prompts.push({
159
+ name: prompt.name,
160
+ description: prompt.description,
161
+ arguments: prompt.arguments,
162
+ server: serverName
163
+ });
164
+ }
165
+ } catch {
166
+ }
167
+ }
168
+ );
169
+ await Promise.all(promises);
170
+ return prompts;
171
+ }
172
+ async getPrompt(serverName, promptName, args) {
173
+ const connection = this.connections.get(serverName);
174
+ if (!connection || connection.status.status !== "connected") {
175
+ throw new Error(`Server not connected: ${serverName}`);
176
+ }
177
+ return await connection.client.experimental_getPrompt({
178
+ name: promptName,
179
+ arguments: args
180
+ });
181
+ }
182
+ async close() {
183
+ const closePromises = Array.from(this.connections.values()).map(
184
+ async (connection) => {
185
+ if (connection.client) {
186
+ try {
187
+ await connection.client.close();
188
+ } catch {
189
+ }
190
+ }
191
+ }
192
+ );
193
+ await Promise.all(closePromises);
194
+ this.connections.clear();
195
+ this.toolCache = void 0;
196
+ }
197
+ isConnected() {
198
+ for (const connection of this.connections.values()) {
199
+ if (connection.status.status === "connected") {
200
+ return true;
201
+ }
202
+ }
203
+ return false;
204
+ }
205
+ async createClient(name, config) {
206
+ const { createMCPClient } = await getMcpSdkModule();
207
+ const timeout = config.timeout ?? 3e4;
208
+ const withTimeout = (promise, ms) => {
209
+ return Promise.race([
210
+ promise,
211
+ new Promise(
212
+ (_, reject) => setTimeout(
213
+ () => reject(new Error(`MCP connection timeout after ${ms}ms`)),
214
+ ms
215
+ )
216
+ )
217
+ ]);
218
+ };
219
+ if (config.transport === "stdio") {
220
+ const { StdioClientTransport } = await getStdioClientModule();
221
+ const transport = new StdioClientTransport({
222
+ command: config.command,
223
+ args: config.args,
224
+ env: config.env,
225
+ cwd: config.cwd
226
+ });
227
+ const client2 = await createMCPClient({
228
+ transport,
229
+ name: config.name ?? name
230
+ });
231
+ const tools2 = await withTimeout(client2.tools(), timeout);
232
+ return { client: client2, toolCount: Object.keys(tools2).length };
233
+ }
234
+ const client = await createMCPClient({
235
+ transport: {
236
+ type: config.transport,
237
+ url: config.url,
238
+ headers: config.headers
239
+ },
240
+ name: config.name ?? name
241
+ });
242
+ const tools = await withTimeout(client.tools(), timeout);
243
+ return { client, toolCount: Object.keys(tools).length };
244
+ }
245
+ };
246
+
247
+ // src/mcp/factories.ts
248
+ function createMCPManager(config) {
249
+ return new DefaultMCPManager(config);
250
+ }
251
+ function defineServer(config) {
252
+ return config;
253
+ }
254
+ function stdioServer(command, args, options) {
255
+ return {
256
+ transport: "stdio",
257
+ command,
258
+ args,
259
+ ...options
260
+ };
261
+ }
262
+ function httpServer(url, options) {
263
+ return {
264
+ transport: "http",
265
+ url,
266
+ ...options
267
+ };
268
+ }
269
+ function sseServer(url, options) {
270
+ return {
271
+ transport: "sse",
272
+ url,
273
+ ...options
274
+ };
275
+ }
276
+
277
+ export {
278
+ createMCPManager,
279
+ defineServer,
280
+ stdioServer,
281
+ httpServer,
282
+ sseServer
283
+ };
@@ -0,0 +1,52 @@
1
+ import { LanguageModel } from 'ai';
2
+ import { ProviderOptions } from '@ai-sdk/provider-utils';
3
+ import { R as ReasoningLevel, a as ReasoningConfig } from './types-CQaXbRsS.js';
4
+
5
+ /**
6
+ * Reasoning Configuration & Option Builders
7
+ *
8
+ * Orchestrates capability detection and provider-specific option
9
+ * building to produce ready-to-use `providerOptions` for the
10
+ * Vercel AI SDK.
11
+ *
12
+ * Two flavours of every function are provided:
13
+ * - **Async** (`getReasoningConfig`, `buildReasoningOptions`) —
14
+ * uses the full capability resolver (network + cache).
15
+ * - **Sync** (`getReasoningConfigSync`, `buildReasoningOptionsSync`) —
16
+ * uses fast pattern-matching only (no network).
17
+ */
18
+
19
+ /**
20
+ * Get the reasoning configuration for a model.
21
+ *
22
+ * Uses the full capability resolver (including network lookups)
23
+ * for the most accurate result.
24
+ */
25
+ declare function getReasoningConfig(model: LanguageModel): Promise<ReasoningConfig>;
26
+ /**
27
+ * Synchronous reasoning config using pattern-matching only.
28
+ *
29
+ * Faster but less accurate than {@link getReasoningConfig}.
30
+ * Good for hot paths where async is impractical.
31
+ */
32
+ declare function getReasoningConfigSync(model: LanguageModel): ReasoningConfig;
33
+ /**
34
+ * Build `providerOptions` for a reasoning level (async).
35
+ *
36
+ * Returns `undefined` when reasoning is off or unsupported.
37
+ */
38
+ declare function buildReasoningOptions(model: LanguageModel, level: ReasoningLevel): Promise<ProviderOptions | undefined>;
39
+ /**
40
+ * Build `providerOptions` for a reasoning level (sync / pattern-only).
41
+ */
42
+ declare function buildReasoningOptionsSync(model: LanguageModel, level: ReasoningLevel): ProviderOptions | undefined;
43
+ /**
44
+ * Check whether a model supports reasoning (async, full resolver).
45
+ */
46
+ declare function supportsReasoning(model: LanguageModel): Promise<boolean>;
47
+ /**
48
+ * Synchronous check using pattern-matching only.
49
+ */
50
+ declare function supportsReasoningSync(model: LanguageModel): boolean;
51
+
52
+ export { buildReasoningOptionsSync as a, buildReasoningOptions as b, getReasoningConfigSync as c, supportsReasoningSync as d, getReasoningConfig as g, supportsReasoning as s };
@@ -0,0 +1,259 @@
1
+ import { M as Message } from '../messages-BYWGn8TY.js';
2
+ import { ModelMessage, LanguageModel } from 'ai';
3
+
4
+ /**
5
+ * Token Estimation Utilities
6
+ *
7
+ * Provides lightweight heuristic-based token counting for messages
8
+ * and conversations. Uses the chars/4 approximation — simple but
9
+ * effective for context-window planning decisions.
10
+ *
11
+ * These are *estimates*, not exact counts. For billing or hard limits,
12
+ * use the provider's native tokeniser instead.
13
+ */
14
+
15
+ /**
16
+ * Estimate token count for a plain string.
17
+ *
18
+ * Uses the widely-accepted `chars / 4` heuristic.
19
+ *
20
+ * @param text - Text to estimate
21
+ * @returns Estimated token count (always ≥ 1 for non-empty input)
22
+ */
23
+ declare function estimateTokens(text: string): number;
24
+ /**
25
+ * Estimate token count for a single message.
26
+ *
27
+ * Handles:
28
+ * - Plain string content
29
+ * - Multi-part / multimodal arrays (text parts + images)
30
+ *
31
+ * @param message - A `Message` (internal) or `ModelMessage` (AI SDK)
32
+ * @returns Estimated token count
33
+ */
34
+ declare function estimateMessageTokens(message: Message | ModelMessage): number;
35
+ /**
36
+ * Estimate total tokens for an entire conversation.
37
+ *
38
+ * Adds a small per-message overhead (≈ 4 tokens) for message
39
+ * framing (`role`, delimiters, etc.).
40
+ *
41
+ * @param messages - Array of messages to estimate
42
+ * @returns Estimated total token count
43
+ */
44
+ declare function estimateConversationTokens(messages: (Message | ModelMessage)[]): number;
45
+
46
+ /**
47
+ * Context Overflow Detection & Pruning
48
+ *
49
+ * Detects when a conversation is approaching the model's context-window
50
+ * limit and prunes old / redundant content to stay within bounds.
51
+ *
52
+ * Two pruning strategies are available:
53
+ * 1. **Tool-result pruning** — replaces large, stale tool outputs with
54
+ * compact placeholders (lightweight, no model call).
55
+ * 2. **Conversation cutting** — identifies a safe cut-point and removes
56
+ * older messages entirely (optionally summarised by the manager).
57
+ */
58
+
59
+ /**
60
+ * Context limits configuration.
61
+ *
62
+ * All values are in *estimated* tokens (see {@link estimateTokens}).
63
+ */
64
+ interface ContextLimits {
65
+ /** Maximum context window size in tokens */
66
+ contextWindow: number;
67
+ /** Reserve tokens for output generation */
68
+ reserveTokens: number;
69
+ /** Protect this many recent tokens from pruning */
70
+ protectedTokens: number;
71
+ /** Minimum tokens to trigger pruning (avoid pruning tiny contexts) */
72
+ pruneMinimum: number;
73
+ }
74
+ /**
75
+ * Default context limits.
76
+ * Based on typical 128 k context-window models.
77
+ */
78
+ declare const DEFAULT_CONTEXT_LIMITS: ContextLimits;
79
+ /**
80
+ * Check whether the context is overflowing.
81
+ *
82
+ * @param tokens - Current estimated token count
83
+ * @param limits - Context limits (defaults to 128 k window)
84
+ * @returns `true` if context exceeds the safe threshold
85
+ */
86
+ declare function isContextOverflowing(tokens: number, limits?: ContextLimits): boolean;
87
+ /**
88
+ * Check whether pruning should be triggered.
89
+ *
90
+ * Returns `false` when the conversation is still small enough that
91
+ * pruning would be wasteful (below {@link ContextLimits.pruneMinimum}).
92
+ */
93
+ declare function shouldPruneContext(tokens: number, limits?: ContextLimits): boolean;
94
+ /**
95
+ * Result of a pruning operation.
96
+ */
97
+ interface PruneResult {
98
+ /** Messages after pruning (may include a summary message) */
99
+ messages: Message[];
100
+ /** Number of messages removed */
101
+ removedCount: number;
102
+ /** Estimated tokens removed */
103
+ tokensRemoved: number;
104
+ /** Whether summarisation was used */
105
+ summarized: boolean;
106
+ /** The summary content, if generated */
107
+ summary?: string;
108
+ }
109
+ /**
110
+ * Find a safe index at which the conversation can be "cut".
111
+ *
112
+ * Rules:
113
+ * - Never cut in the middle of a tool-call sequence
114
+ * - Prefer cutting after assistant / user messages
115
+ * - Keep at least {@link protectedTokens} tokens at the end
116
+ *
117
+ * @param messages - Full message array
118
+ * @param protectedTokens - Tokens to preserve at the end
119
+ * @returns Cut index (exclusive — remove messages *before* this index).
120
+ * Returns `0` when no safe cut-point exists.
121
+ */
122
+ declare function findCutPoint(messages: Message[], protectedTokens?: number): number;
123
+ /**
124
+ * Prune old, large tool results from the conversation.
125
+ *
126
+ * Prune strategy:
127
+ * - Walks backwards through messages
128
+ * - Protects recent outputs (within {@link protectedTokens})
129
+ * - Skips protected tools (e.g. "skill")
130
+ * - Stamps pruned messages with a `compactedAt` timestamp
131
+ *
132
+ * @param messages - Messages to prune
133
+ * @param protectedTokens - Don't prune tool results in the last N tokens
134
+ * @param options - Extra tool names to protect
135
+ * @returns New message array with large tool outputs replaced
136
+ */
137
+ declare function pruneToolResults(messages: Message[], protectedTokens?: number, options?: {
138
+ /** Additional tools to protect from pruning */
139
+ protectedTools?: string[];
140
+ }): Message[];
141
+
142
+ /**
143
+ * Context Summarisation
144
+ *
145
+ * When tool-result pruning alone isn't enough to stay within the
146
+ * context window, the manager can *cut* the conversation and replace
147
+ * the removed portion with an LLM-generated summary.
148
+ *
149
+ * This module provides the summary-generation logic and the
150
+ * top-level `pruneContext()` orchestration function.
151
+ */
152
+
153
+ /**
154
+ * Options for summary generation.
155
+ */
156
+ interface SummarizationOptions {
157
+ /** Model used to generate the summary */
158
+ model: LanguageModel;
159
+ /** Max tokens for the summary output */
160
+ maxTokens?: number;
161
+ /** Custom summarisation system prompt */
162
+ customPrompt?: string;
163
+ }
164
+ /**
165
+ * Options for {@link pruneContext}.
166
+ */
167
+ interface PruneContextOptions {
168
+ /** Model for summarisation (omit to skip summary generation) */
169
+ model?: LanguageModel;
170
+ /** Context limits to enforce */
171
+ limits?: ContextLimits;
172
+ /** Custom summarisation prompt */
173
+ summaryPrompt?: string;
174
+ }
175
+ /**
176
+ * Generate a natural-language summary of a message sequence.
177
+ *
178
+ * @param messages - Messages to summarise
179
+ * @param options - Model & prompt configuration
180
+ * @returns Summary text
181
+ */
182
+ declare function generateSummary(messages: Message[], options: SummarizationOptions): Promise<string>;
183
+ /**
184
+ * Prune a conversation to fit within context-window limits.
185
+ *
186
+ * Strategy (in order):
187
+ * 1. Prune old tool outputs (lightweight, no model call)
188
+ * 2. If still overflowing, find a safe cut-point and remove
189
+ * older messages — optionally generating an LLM summary.
190
+ *
191
+ * @param messages - Current message array
192
+ * @param options - Limits, model, and prompt overrides
193
+ * @returns A {@link PruneResult} with the trimmed messages
194
+ */
195
+ declare function pruneContext(messages: Message[], options?: PruneContextOptions): Promise<PruneResult>;
196
+
197
+ /**
198
+ * Context Manager
199
+ *
200
+ * Stateful wrapper around the context-management primitives.
201
+ * Tracks per-session context limits and provides a single entry
202
+ * point for token estimation, overflow detection, and pruning.
203
+ */
204
+
205
+ /**
206
+ * Per-session context manager.
207
+ *
208
+ * Holds the active context-window limits and provides convenience
209
+ * methods for checking, reporting, and pruning context.
210
+ *
211
+ * @example
212
+ * ```typescript
213
+ * const ctx = new ContextManager({ limits: { contextWindow: 200_000 } });
214
+ *
215
+ * if (ctx.shouldPrune(messages)) {
216
+ * const result = await ctx.prune(messages);
217
+ * console.log(`Removed ${result.removedCount} messages`);
218
+ * }
219
+ * ```
220
+ */
221
+ declare class ContextManager {
222
+ private limits;
223
+ private model?;
224
+ private summaryPrompt?;
225
+ constructor(options?: {
226
+ limits?: Partial<ContextLimits>;
227
+ model?: LanguageModel;
228
+ summaryPrompt?: string;
229
+ });
230
+ /** Get a copy of the current context limits. */
231
+ getLimits(): ContextLimits;
232
+ /** Update context limits (e.g. when switching models). */
233
+ setLimits(limits: Partial<ContextLimits>): void;
234
+ /** Set the model used for summarisation. */
235
+ setModel(model: LanguageModel): void;
236
+ /** Estimate total tokens for a message array. */
237
+ estimateTokens(messages: (Message | ModelMessage)[]): number;
238
+ /** Check whether the context is overflowing. */
239
+ isOverflowing(messages: (Message | ModelMessage)[]): boolean;
240
+ /** Check whether pruning should be triggered. */
241
+ shouldPrune(messages: (Message | ModelMessage)[]): boolean;
242
+ /** Prune context to fit within limits. */
243
+ prune(messages: Message[]): Promise<PruneResult>;
244
+ /**
245
+ * Get a snapshot of token statistics.
246
+ *
247
+ * Useful for dashboards, logging, or deciding whether to prune.
248
+ */
249
+ getStats(messages: (Message | ModelMessage)[]): {
250
+ tokens: number;
251
+ limit: number;
252
+ available: number;
253
+ utilizationPercent: number;
254
+ isOverflowing: boolean;
255
+ shouldPrune: boolean;
256
+ };
257
+ }
258
+
259
+ export { type ContextLimits, ContextManager, DEFAULT_CONTEXT_LIMITS, type PruneContextOptions, type PruneResult, type SummarizationOptions, estimateConversationTokens, estimateMessageTokens, estimateTokens, findCutPoint, generateSummary, isContextOverflowing, pruneContext, pruneToolResults, shouldPruneContext };
@@ -0,0 +1,26 @@
1
+ import {
2
+ ContextManager,
3
+ DEFAULT_CONTEXT_LIMITS,
4
+ estimateConversationTokens,
5
+ estimateMessageTokens,
6
+ estimateTokens,
7
+ findCutPoint,
8
+ generateSummary,
9
+ isContextOverflowing,
10
+ pruneContext,
11
+ pruneToolResults,
12
+ shouldPruneContext
13
+ } from "../chunk-QAQADS4X.js";
14
+ export {
15
+ ContextManager,
16
+ DEFAULT_CONTEXT_LIMITS,
17
+ estimateConversationTokens,
18
+ estimateMessageTokens,
19
+ estimateTokens,
20
+ findCutPoint,
21
+ generateSummary,
22
+ isContextOverflowing,
23
+ pruneContext,
24
+ pruneToolResults,
25
+ shouldPruneContext
26
+ };
@@ -0,0 +1,12 @@
1
+ import { LanguageModel } from 'ai';
2
+
3
+ /**
4
+ * Extract a model ID string from a LanguageModel instance.
5
+ */
6
+ declare function getModelId(model: LanguageModel): string;
7
+ /**
8
+ * Extract a provider identifier from a LanguageModel instance.
9
+ */
10
+ declare function getProviderId(model: LanguageModel): string | undefined;
11
+
12
+ export { getProviderId as a, getModelId as g };