@hashgraphonline/conversational-agent 0.1.208 → 0.1.209
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/conversational-agent.d.ts +67 -8
- package/dist/cjs/index.cjs +1 -1
- package/dist/cjs/index.cjs.map +1 -1
- package/dist/cjs/index.d.ts +1 -0
- package/dist/cjs/langchain-agent.d.ts +8 -0
- package/dist/cjs/memory/SmartMemoryManager.d.ts +58 -21
- package/dist/cjs/memory/index.d.ts +1 -1
- package/dist/esm/index.js +8 -0
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/index12.js +124 -46
- package/dist/esm/index12.js.map +1 -1
- package/dist/esm/index13.js +178 -13
- package/dist/esm/index13.js.map +1 -1
- package/dist/esm/index14.js +604 -100
- package/dist/esm/index14.js.map +1 -1
- package/dist/esm/index15.js +464 -9
- package/dist/esm/index15.js.map +1 -1
- package/dist/esm/index16.js +44 -172
- package/dist/esm/index16.js.map +1 -1
- package/dist/esm/index17.js +11 -156
- package/dist/esm/index17.js.map +1 -1
- package/dist/esm/index18.js +106 -191
- package/dist/esm/index18.js.map +1 -1
- package/dist/esm/index19.js +9 -660
- package/dist/esm/index19.js.map +1 -1
- package/dist/esm/index2.js +22 -13
- package/dist/esm/index2.js.map +1 -1
- package/dist/esm/index20.js +150 -206
- package/dist/esm/index20.js.map +1 -1
- package/dist/esm/index21.js +140 -166
- package/dist/esm/index21.js.map +1 -1
- package/dist/esm/index22.js +47 -105
- package/dist/esm/index22.js.map +1 -1
- package/dist/esm/index23.js +24 -89
- package/dist/esm/index23.js.map +1 -1
- package/dist/esm/index24.js +83 -56
- package/dist/esm/index24.js.map +1 -1
- package/dist/esm/index25.js +236 -32
- package/dist/esm/index25.js.map +1 -1
- package/dist/esm/index5.js +1 -1
- package/dist/esm/index6.js +295 -17
- package/dist/esm/index6.js.map +1 -1
- package/dist/esm/index8.js +82 -8
- package/dist/esm/index8.js.map +1 -1
- package/dist/types/conversational-agent.d.ts +67 -8
- package/dist/types/index.d.ts +1 -0
- package/dist/types/langchain-agent.d.ts +8 -0
- package/dist/types/memory/SmartMemoryManager.d.ts +58 -21
- package/dist/types/memory/index.d.ts +1 -1
- package/package.json +3 -3
- package/src/context/ReferenceContextManager.ts +9 -4
- package/src/context/ReferenceResponseProcessor.ts +3 -4
- package/src/conversational-agent.ts +379 -31
- package/src/index.ts +2 -0
- package/src/langchain/ContentAwareAgentExecutor.ts +0 -1
- package/src/langchain-agent.ts +94 -11
- package/src/mcp/ContentProcessor.ts +13 -3
- package/src/mcp/adapters/langchain.ts +1 -9
- package/src/memory/ContentStorage.ts +3 -51
- package/src/memory/MemoryWindow.ts +4 -16
- package/src/memory/ReferenceIdGenerator.ts +0 -4
- package/src/memory/SmartMemoryManager.ts +400 -33
- package/src/memory/TokenCounter.ts +12 -16
- package/src/memory/index.ts +1 -1
- package/src/plugins/hcs-10/HCS10Plugin.ts +44 -14
- package/src/services/ContentStoreManager.ts +0 -3
- package/src/types/content-reference.ts +8 -8
- package/src/types/index.ts +0 -1
package/dist/cjs/index.d.ts
CHANGED
|
@@ -13,3 +13,4 @@ export * from 'hedera-agent-kit';
|
|
|
13
13
|
export type { IStateManager } from '@hashgraphonline/standards-agent-kit';
|
|
14
14
|
export type { MCPServerConfig, MCPConnectionStatus, MCPToolInfo } from './mcp/types';
|
|
15
15
|
export { MCPServers, createMCPConfig, validateServerConfig } from './mcp/helpers';
|
|
16
|
+
export * from './memory';
|
|
@@ -16,6 +16,14 @@ export declare class LangChainAgent extends BaseAgent {
|
|
|
16
16
|
private createExecutor;
|
|
17
17
|
private handleError;
|
|
18
18
|
private initializeMCP;
|
|
19
|
+
/**
|
|
20
|
+
* Connect to MCP servers asynchronously after agent boot
|
|
21
|
+
*/
|
|
22
|
+
connectMCPServers(): Promise<void>;
|
|
23
|
+
/**
|
|
24
|
+
* Connect to a single MCP server
|
|
25
|
+
*/
|
|
26
|
+
private connectServer;
|
|
19
27
|
/**
|
|
20
28
|
* Check if a string is valid JSON
|
|
21
29
|
*/
|
|
@@ -1,5 +1,32 @@
|
|
|
1
1
|
import { BaseMessage } from '@langchain/core/messages';
|
|
2
|
+
import { ContentStorage } from './ContentStorage';
|
|
2
3
|
|
|
4
|
+
/**
|
|
5
|
+
* Entity association for storing blockchain entity contexts
|
|
6
|
+
*/
|
|
7
|
+
export interface EntityAssociation {
|
|
8
|
+
/** The blockchain entity ID (e.g., tokenId, accountId, topicId) */
|
|
9
|
+
entityId: string;
|
|
10
|
+
/** User-provided or derived friendly name */
|
|
11
|
+
entityName: string;
|
|
12
|
+
/** Type of entity (token, account, topic, schedule, etc.) */
|
|
13
|
+
entityType: string;
|
|
14
|
+
/** When the entity was created/associated */
|
|
15
|
+
createdAt: Date;
|
|
16
|
+
/** Transaction ID that created this entity */
|
|
17
|
+
transactionId?: string;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Options for resolving entity references
|
|
21
|
+
*/
|
|
22
|
+
export interface EntityResolutionOptions {
|
|
23
|
+
/** Filter by specific entity type */
|
|
24
|
+
entityType?: string;
|
|
25
|
+
/** Maximum number of results to return */
|
|
26
|
+
limit?: number;
|
|
27
|
+
/** Whether to use fuzzy matching for natural language queries */
|
|
28
|
+
fuzzyMatch?: boolean;
|
|
29
|
+
}
|
|
3
30
|
/**
|
|
4
31
|
* Configuration for SmartMemoryManager
|
|
5
32
|
*/
|
|
@@ -106,14 +133,14 @@ export declare class SmartMemoryManager {
|
|
|
106
133
|
* Get statistics about the content storage
|
|
107
134
|
* @returns Storage usage statistics
|
|
108
135
|
*/
|
|
109
|
-
getStorageStats():
|
|
136
|
+
getStorageStats(): ReturnType<ContentStorage['getStorageStats']>;
|
|
110
137
|
/**
|
|
111
138
|
* Get combined statistics for both active memory and storage
|
|
112
139
|
* @returns Combined memory and storage statistics
|
|
113
140
|
*/
|
|
114
141
|
getOverallStats(): {
|
|
115
142
|
activeMemory: MemoryStats;
|
|
116
|
-
storage:
|
|
143
|
+
storage: ReturnType<ContentStorage['getStorageStats']>;
|
|
117
144
|
totalMessagesManaged: number;
|
|
118
145
|
activeMemoryUtilization: number;
|
|
119
146
|
storageUtilization: number;
|
|
@@ -154,19 +181,14 @@ export declare class SmartMemoryManager {
|
|
|
154
181
|
*/
|
|
155
182
|
exportState(): {
|
|
156
183
|
config: Required<SmartMemoryConfig>;
|
|
157
|
-
activeMessages: {
|
|
158
|
-
content:
|
|
159
|
-
type:
|
|
160
|
-
}
|
|
184
|
+
activeMessages: Array<{
|
|
185
|
+
content: unknown;
|
|
186
|
+
type: string;
|
|
187
|
+
}>;
|
|
161
188
|
systemPrompt: string;
|
|
162
189
|
memoryStats: MemoryStats;
|
|
163
|
-
storageStats:
|
|
164
|
-
storedMessages:
|
|
165
|
-
content: import('@langchain/core/messages').MessageContent;
|
|
166
|
-
type: import('@langchain/core/messages').MessageType;
|
|
167
|
-
storedAt: string;
|
|
168
|
-
id: string;
|
|
169
|
-
}[];
|
|
190
|
+
storageStats: ReturnType<ContentStorage['getStorageStats']>;
|
|
191
|
+
storedMessages: ReturnType<ContentStorage['exportMessages']>;
|
|
170
192
|
};
|
|
171
193
|
/**
|
|
172
194
|
* Get a summary of conversation context for external use
|
|
@@ -180,20 +202,35 @@ export declare class SmartMemoryManager {
|
|
|
180
202
|
recentMessages: BaseMessage[];
|
|
181
203
|
memoryUtilization: number;
|
|
182
204
|
hasStoredHistory: boolean;
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
storageStats: import('./ContentStorage').StorageStats;
|
|
186
|
-
activeMessageCount: number;
|
|
187
|
-
systemPrompt: string;
|
|
188
|
-
recentMessages: BaseMessage[];
|
|
189
|
-
memoryUtilization: number;
|
|
190
|
-
hasStoredHistory: boolean;
|
|
205
|
+
recentStoredMessages?: BaseMessage[];
|
|
206
|
+
storageStats?: ReturnType<ContentStorage['getStorageStats']>;
|
|
191
207
|
};
|
|
192
208
|
/**
|
|
193
209
|
* Perform maintenance operations
|
|
194
210
|
* Optimizes storage and cleans up resources
|
|
195
211
|
*/
|
|
196
212
|
performMaintenance(): void;
|
|
213
|
+
/**
|
|
214
|
+
* Store an entity association for later resolution
|
|
215
|
+
* @param entityId - The blockchain entity ID
|
|
216
|
+
* @param entityName - User-provided or derived friendly name
|
|
217
|
+
* @param entityType - Type of entity (token, account, topic, etc.)
|
|
218
|
+
* @param transactionId - Optional transaction ID that created this entity
|
|
219
|
+
*/
|
|
220
|
+
storeEntityAssociation(entityId: string, entityName: string, entityType: string, transactionId?: string): void;
|
|
221
|
+
/**
|
|
222
|
+
* Resolve entity references from natural language queries
|
|
223
|
+
* @param query - Search query (entity name or natural language reference)
|
|
224
|
+
* @param options - Resolution options for filtering and fuzzy matching
|
|
225
|
+
* @returns Array of matching entity associations
|
|
226
|
+
*/
|
|
227
|
+
resolveEntityReference(query: string, options?: EntityResolutionOptions): EntityAssociation[];
|
|
228
|
+
/**
|
|
229
|
+
* Get all entity associations, optionally filtered by type
|
|
230
|
+
* @param entityType - Optional filter by entity type
|
|
231
|
+
* @returns Array of entity associations
|
|
232
|
+
*/
|
|
233
|
+
getEntityAssociations(entityType?: string): EntityAssociation[];
|
|
197
234
|
/**
|
|
198
235
|
* Clean up resources and dispose of components
|
|
199
236
|
*/
|
|
@@ -2,6 +2,6 @@ export { TokenCounter } from './TokenCounter';
|
|
|
2
2
|
export { MemoryWindow } from './MemoryWindow';
|
|
3
3
|
export { ContentStorage } from './ContentStorage';
|
|
4
4
|
export { SmartMemoryManager } from './SmartMemoryManager';
|
|
5
|
-
export type { SmartMemoryConfig, SearchOptions, MemoryStats } from './SmartMemoryManager';
|
|
5
|
+
export type { SmartMemoryConfig, SearchOptions, MemoryStats, EntityAssociation, EntityResolutionOptions } from './SmartMemoryManager';
|
|
6
6
|
export type { AddMessageResult } from './MemoryWindow';
|
|
7
7
|
export type { StorageStats } from './ContentStorage';
|
package/dist/esm/index.js
CHANGED
|
@@ -9,8 +9,13 @@ import { createAgent } from "./index9.js";
|
|
|
9
9
|
import { LangChainProvider } from "./index10.js";
|
|
10
10
|
export * from "hedera-agent-kit";
|
|
11
11
|
import { MCPServers, createMCPConfig, validateServerConfig } from "./index11.js";
|
|
12
|
+
import { TokenCounter } from "./index12.js";
|
|
13
|
+
import { MemoryWindow } from "./index13.js";
|
|
14
|
+
import { ContentStorage } from "./index14.js";
|
|
15
|
+
import { SmartMemoryManager } from "./index15.js";
|
|
12
16
|
export {
|
|
13
17
|
BaseAgent,
|
|
18
|
+
ContentStorage,
|
|
14
19
|
ConversationalAgent,
|
|
15
20
|
HCS10Plugin,
|
|
16
21
|
HCS2Plugin,
|
|
@@ -19,7 +24,10 @@ export {
|
|
|
19
24
|
LangChainAgent,
|
|
20
25
|
LangChainProvider,
|
|
21
26
|
MCPServers,
|
|
27
|
+
MemoryWindow,
|
|
22
28
|
HCS10Plugin2 as OpenConvAIPlugin,
|
|
29
|
+
SmartMemoryManager,
|
|
30
|
+
TokenCounter,
|
|
23
31
|
createAgent,
|
|
24
32
|
createMCPConfig,
|
|
25
33
|
validateServerConfig
|
package/dist/esm/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sources":[],"sourcesContent":[],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"index.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;;;;;;;;"}
|
package/dist/esm/index12.js
CHANGED
|
@@ -1,49 +1,127 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
*
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
1
|
+
import { encoding_for_model } from "tiktoken";
|
|
2
|
+
const _TokenCounter = class _TokenCounter {
|
|
3
|
+
constructor(modelName = "gpt-4o") {
|
|
4
|
+
this.modelName = modelName;
|
|
5
|
+
try {
|
|
6
|
+
this.encoding = encoding_for_model(modelName);
|
|
7
|
+
} catch {
|
|
8
|
+
console.warn(`Model ${modelName} not found, falling back to gpt-4o encoding`);
|
|
9
|
+
this.encoding = encoding_for_model("gpt-4o");
|
|
10
|
+
this.modelName = "gpt-4o";
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Count tokens in raw text content
|
|
15
|
+
* @param text - The text to count tokens for
|
|
16
|
+
* @returns Number of tokens
|
|
17
|
+
*/
|
|
18
|
+
countTokens(text) {
|
|
19
|
+
if (!text || text.trim() === "") {
|
|
20
|
+
return 0;
|
|
21
|
+
}
|
|
22
|
+
try {
|
|
23
|
+
const tokens = this.encoding.encode(text);
|
|
24
|
+
return tokens.length;
|
|
25
|
+
} catch (error) {
|
|
26
|
+
console.warn("Error counting tokens, falling back to word-based estimation:", error);
|
|
27
|
+
return Math.ceil(text.split(/\s+/).length * 1.3);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Count tokens for a single chat message including role overhead
|
|
32
|
+
* @param message - The message to count tokens for
|
|
33
|
+
* @returns Number of tokens including message formatting overhead
|
|
34
|
+
*/
|
|
35
|
+
countMessageTokens(message) {
|
|
36
|
+
const contentTokens = this.countTokens(String(message.content ?? ""));
|
|
37
|
+
const roleTokens = this.countTokens(this.getMessageRole(message));
|
|
38
|
+
return contentTokens + roleTokens + _TokenCounter.MESSAGE_OVERHEAD + _TokenCounter.ROLE_OVERHEAD;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Count tokens for multiple messages
|
|
42
|
+
* @param messages - Array of messages to count
|
|
43
|
+
* @returns Total token count for all messages
|
|
44
|
+
*/
|
|
45
|
+
countMessagesTokens(messages) {
|
|
46
|
+
if (!messages || messages.length === 0) {
|
|
47
|
+
return 0;
|
|
48
|
+
}
|
|
49
|
+
let total = 0;
|
|
50
|
+
for (const message of messages) {
|
|
51
|
+
total += this.countMessageTokens(message);
|
|
52
|
+
}
|
|
53
|
+
return total;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Estimate tokens for system prompt
|
|
57
|
+
* System prompts have slightly different overhead in chat completions
|
|
58
|
+
* @param systemPrompt - The system prompt text
|
|
59
|
+
* @returns Estimated token count
|
|
60
|
+
*/
|
|
61
|
+
estimateSystemPromptTokens(systemPrompt) {
|
|
62
|
+
if (!systemPrompt || systemPrompt.trim() === "") {
|
|
63
|
+
return 0;
|
|
64
|
+
}
|
|
65
|
+
const contentTokens = this.countTokens(systemPrompt);
|
|
66
|
+
const roleTokens = this.countTokens("system");
|
|
67
|
+
return contentTokens + roleTokens + _TokenCounter.MESSAGE_OVERHEAD + _TokenCounter.ROLE_OVERHEAD;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Get total context size estimate including system prompt and messages
|
|
71
|
+
* @param systemPrompt - System prompt text
|
|
72
|
+
* @param messages - Conversation messages
|
|
73
|
+
* @returns Total estimated token count
|
|
74
|
+
*/
|
|
75
|
+
estimateContextSize(systemPrompt, messages) {
|
|
76
|
+
const systemTokens = this.estimateSystemPromptTokens(systemPrompt);
|
|
77
|
+
const messageTokens = this.countMessagesTokens(messages);
|
|
78
|
+
const completionOverhead = 10;
|
|
79
|
+
return systemTokens + messageTokens + completionOverhead;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Get the role string for a message
|
|
83
|
+
* @param message - The message to get the role for
|
|
84
|
+
* @returns Role string ('user', 'assistant', 'system', etc.)
|
|
85
|
+
*/
|
|
86
|
+
getMessageRole(message) {
|
|
87
|
+
const messageType = message._getType();
|
|
88
|
+
switch (messageType) {
|
|
89
|
+
case "human":
|
|
90
|
+
return "user";
|
|
91
|
+
case "ai":
|
|
92
|
+
return "assistant";
|
|
93
|
+
case "system":
|
|
94
|
+
return "system";
|
|
95
|
+
case "function":
|
|
96
|
+
return "function";
|
|
97
|
+
case "tool":
|
|
98
|
+
return "tool";
|
|
99
|
+
default:
|
|
100
|
+
return "user";
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
/**
|
|
104
|
+
* Get the model name being used for token counting
|
|
105
|
+
* @returns The tiktoken model name
|
|
106
|
+
*/
|
|
107
|
+
getModelName() {
|
|
108
|
+
return this.modelName;
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Clean up encoding resources
|
|
112
|
+
*/
|
|
113
|
+
dispose() {
|
|
114
|
+
try {
|
|
115
|
+
this.encoding.free();
|
|
116
|
+
} catch {
|
|
117
|
+
console.warn("Error disposing encoding");
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
_TokenCounter.MESSAGE_OVERHEAD = 3;
|
|
122
|
+
_TokenCounter.ROLE_OVERHEAD = 1;
|
|
123
|
+
let TokenCounter = _TokenCounter;
|
|
46
124
|
export {
|
|
47
|
-
|
|
125
|
+
TokenCounter
|
|
48
126
|
};
|
|
49
127
|
//# sourceMappingURL=index12.js.map
|
package/dist/esm/index12.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index12.js","sources":["../../src/
|
|
1
|
+
{"version":3,"file":"index12.js","sources":["../../src/memory/TokenCounter.ts"],"sourcesContent":["import { encoding_for_model } from 'tiktoken';\nimport type { TiktokenModel } from 'tiktoken';\nimport type { BaseMessage } from '@langchain/core/messages';\n\n/**\n * Token counter utility for OpenAI models using tiktoken encoding\n * Provides accurate token counting for text content and chat messages\n */\nexport class TokenCounter {\n private encoding: ReturnType<typeof encoding_for_model>;\n private modelName: TiktokenModel;\n\n private static readonly MESSAGE_OVERHEAD = 3;\n private static readonly ROLE_OVERHEAD = 1;\n\n constructor(modelName: TiktokenModel = 'gpt-4o') {\n this.modelName = modelName;\n try {\n this.encoding = encoding_for_model(modelName);\n } catch {\n console.warn(`Model ${modelName} not found, falling back to gpt-4o encoding`);\n this.encoding = encoding_for_model('gpt-4o');\n this.modelName = 'gpt-4o';\n }\n }\n\n /**\n * Count tokens in raw text content\n * @param text - The text to count tokens for\n * @returns Number of tokens\n */\n countTokens(text: string): number {\n if (!text || text.trim() === '') {\n return 0;\n }\n\n try {\n const tokens = this.encoding.encode(text);\n return tokens.length;\n } catch (error) {\n console.warn('Error counting tokens, falling back to word-based estimation:', error);\n return Math.ceil(text.split(/\\s+/).length * 1.3);\n }\n }\n\n /**\n * Count tokens for a single chat message including role overhead\n * @param message - The message to count tokens for\n * @returns Number of tokens including message formatting overhead\n */\n countMessageTokens(message: BaseMessage): number {\n const contentTokens = this.countTokens(String(message.content ?? ''));\n const roleTokens = this.countTokens(this.getMessageRole(message));\n \n return contentTokens + roleTokens + TokenCounter.MESSAGE_OVERHEAD + TokenCounter.ROLE_OVERHEAD;\n }\n\n /**\n * Count tokens for multiple messages\n * @param messages - Array of messages to count\n * @returns Total token count for all messages\n */\n countMessagesTokens(messages: BaseMessage[]): number {\n if (!messages || messages.length === 0) {\n return 0;\n }\n\n let total = 0;\n for (const message of messages) {\n total += this.countMessageTokens(message);\n }\n return total;\n }\n\n /**\n * Estimate tokens for system prompt\n * System prompts have slightly different overhead in chat completions\n * @param systemPrompt - The system prompt text\n * @returns Estimated token count\n */\n estimateSystemPromptTokens(systemPrompt: string): number {\n if (!systemPrompt || systemPrompt.trim() === '') {\n return 0;\n }\n\n const contentTokens = this.countTokens(systemPrompt);\n const roleTokens = this.countTokens('system');\n \n return contentTokens + roleTokens + TokenCounter.MESSAGE_OVERHEAD + TokenCounter.ROLE_OVERHEAD;\n }\n\n /**\n * Get total context size estimate including system prompt and messages\n * @param systemPrompt - System prompt text\n * @param messages - Conversation messages\n * @returns Total estimated token count\n */\n estimateContextSize(systemPrompt: string, messages: BaseMessage[]): number {\n const systemTokens = this.estimateSystemPromptTokens(systemPrompt);\n const messageTokens = this.countMessagesTokens(messages);\n \n const completionOverhead = 10;\n \n return systemTokens + messageTokens + completionOverhead;\n }\n\n /**\n * Get the role string for a message\n * @param message - The message to get the role for\n * @returns Role string ('user', 'assistant', 'system', etc.)\n */\n private getMessageRole(message: BaseMessage): string {\n const messageType = message._getType();\n switch (messageType) {\n case 'human':\n return 'user';\n case 'ai':\n return 'assistant';\n case 'system':\n return 'system';\n case 'function':\n return 'function';\n case 'tool':\n return 'tool';\n default:\n return 'user';\n }\n }\n\n /**\n * Get the model name being used for token counting\n * @returns The tiktoken model name\n */\n getModelName(): string {\n return this.modelName;\n }\n\n /**\n * Clean up encoding resources\n */\n dispose(): void {\n try {\n this.encoding.free();\n } catch {\n console.warn('Error disposing encoding');\n }\n }\n}"],"names":[],"mappings":";AAQO,MAAM,gBAAN,MAAM,cAAa;AAAA,EAOxB,YAAY,YAA2B,UAAU;AAC/C,SAAK,YAAY;AACjB,QAAI;AACF,WAAK,WAAW,mBAAmB,SAAS;AAAA,IAC9C,QAAQ;AACN,cAAQ,KAAK,SAAS,SAAS,6CAA6C;AAC5E,WAAK,WAAW,mBAAmB,QAAQ;AAC3C,WAAK,YAAY;AAAA,IACnB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,YAAY,MAAsB;AAChC,QAAI,CAAC,QAAQ,KAAK,KAAA,MAAW,IAAI;AAC/B,aAAO;AAAA,IACT;AAEA,QAAI;AACF,YAAM,SAAS,KAAK,SAAS,OAAO,IAAI;AACxC,aAAO,OAAO;AAAA,IAChB,SAAS,OAAO;AACd,cAAQ,KAAK,iEAAiE,KAAK;AACnF,aAAO,KAAK,KAAK,KAAK,MAAM,KAAK,EAAE,SAAS,GAAG;AAAA,IACjD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,mBAAmB,SAA8B;AAC/C,UAAM,gBAAgB,KAAK,YAAY,OAAO,QAAQ,WAAW,EAAE,CAAC;AACpE,UAAM,aAAa,KAAK,YAAY,KAAK,eAAe,OAAO,CAAC;AAEhE,WAAO,gBAAgB,aAAa,cAAa,mBAAmB,cAAa;AAAA,EACnF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,oBAAoB,UAAiC;AACnD,QAAI,CAAC,YAAY,SAAS,WAAW,GAAG;AACtC,aAAO;AAAA,IACT;AAEA,QAAI,QAAQ;AACZ,eAAW,WAAW,UAAU;AAC9B,eAAS,KAAK,mBAAmB,OAAO;AAAA,IAC1C;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,2BAA2B,cAA8B;AACvD,QAAI,CAAC,gBAAgB,aAAa,KAAA,MAAW,IAAI;AAC/C,aAAO;AAAA,IACT;AAEA,UAAM,gBAAgB,KAAK,YAAY,YAAY;AACnD,UAAM,aAAa,KAAK,YAAY,QAAQ;AAE5C,WAAO,gBAAgB,aAAa,cAAa,mBAAmB,cAAa;AAAA,EACnF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,oBAAoB,cAAsB,UAAiC;AACzE,UAAM,eAAe,KAAK,2BAA2B,YAAY;AACjE,UAAM,gBAAgB,KAAK,oBAAoB,QAAQ;AAEvD,UAAM,qBAAqB;AAE3B,WAAO,eAAe,gBAAgB;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,eAAe,SAA8B;AACnD,UAAM,cAAc,QAAQ,SAAA;AAC5B,YAAQ,aAAA;AAAA,MACN,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT;AACE,eAAO;AAAA,IAAA;AAAA,EAEb;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,UAAgB;AACd,QAAI;AACF,WAAK,SAAS,KAAA;AAAA,IAChB,QAAQ;AACN,cAAQ,KAAK,0BAA0B;AAAA,IACzC;AAAA,EACF;AACF;AAvIE,cAAwB,mBAAmB;AAC3C,cAAwB,gBAAgB;AALnC,IAAM,eAAN;"}
|
package/dist/esm/index13.js
CHANGED
|
@@ -1,16 +1,181 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
1
|
+
import { TokenCounter } from "./index12.js";
|
|
2
|
+
const _MemoryWindow = class _MemoryWindow {
|
|
3
|
+
constructor(maxTokens = _MemoryWindow.DEFAULT_MAX_TOKENS, reserveTokens = _MemoryWindow.DEFAULT_RESERVE_TOKENS, tokenCounter) {
|
|
4
|
+
this.messages = [];
|
|
5
|
+
this.systemPrompt = "";
|
|
6
|
+
this.systemPromptTokens = 0;
|
|
7
|
+
if (reserveTokens >= maxTokens) {
|
|
8
|
+
throw new Error("Reserve tokens must be less than max tokens");
|
|
9
|
+
}
|
|
10
|
+
this.maxTokens = maxTokens;
|
|
11
|
+
this.reserveTokens = reserveTokens;
|
|
12
|
+
this.tokenCounter = tokenCounter || new TokenCounter();
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Add a message to the memory window, pruning old messages if necessary
|
|
16
|
+
* @param message - The message to add
|
|
17
|
+
* @returns Result of the add operation including any pruned messages
|
|
18
|
+
*/
|
|
19
|
+
addMessage(message) {
|
|
20
|
+
this.tokenCounter.countMessageTokens(message);
|
|
21
|
+
this.messages.push(message);
|
|
22
|
+
const currentTokens = this.getCurrentTokenCount();
|
|
23
|
+
const availableTokens = this.maxTokens - this.reserveTokens;
|
|
24
|
+
let prunedMessages = [];
|
|
25
|
+
if (currentTokens > availableTokens) {
|
|
26
|
+
this.messages.pop();
|
|
27
|
+
prunedMessages = this.pruneToFit();
|
|
28
|
+
this.messages.push(message);
|
|
29
|
+
}
|
|
30
|
+
return {
|
|
31
|
+
added: true,
|
|
32
|
+
prunedMessages,
|
|
33
|
+
currentTokenCount: this.getCurrentTokenCount(),
|
|
34
|
+
remainingCapacity: this.getRemainingTokenCapacity()
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Prune old messages to fit within token limits
|
|
39
|
+
* Removes messages in pairs to maintain conversational flow
|
|
40
|
+
* @returns Array of pruned messages
|
|
41
|
+
*/
|
|
42
|
+
pruneToFit() {
|
|
43
|
+
const prunedMessages = [];
|
|
44
|
+
const targetTokens = this.maxTokens - this.reserveTokens;
|
|
45
|
+
while (this.getCurrentTokenCount() > targetTokens && this.messages.length > 0) {
|
|
46
|
+
const batchSize = Math.min(_MemoryWindow.PRUNING_BATCH_SIZE, this.messages.length);
|
|
47
|
+
for (let i = 0; i < batchSize; i++) {
|
|
48
|
+
const prunedMessage = this.messages.shift();
|
|
49
|
+
if (prunedMessage) {
|
|
50
|
+
prunedMessages.push(prunedMessage);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
if (prunedMessages.length > 1e3) {
|
|
54
|
+
console.warn("MemoryWindow: Excessive pruning detected, stopping to prevent infinite loop");
|
|
55
|
+
break;
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
return prunedMessages;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Get current token count including system prompt and messages
|
|
62
|
+
* @returns Current token count
|
|
63
|
+
*/
|
|
64
|
+
getCurrentTokenCount() {
|
|
65
|
+
const messageTokens = this.tokenCounter.countMessagesTokens(this.messages);
|
|
66
|
+
return this.systemPromptTokens + messageTokens;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Get remaining token capacity before hitting the reserve limit
|
|
70
|
+
* @returns Remaining tokens that can be used
|
|
71
|
+
*/
|
|
72
|
+
getRemainingTokenCapacity() {
|
|
73
|
+
return Math.max(0, this.maxTokens - this.getCurrentTokenCount());
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Check if a message can be added without exceeding limits
|
|
77
|
+
* @param message - The message to check
|
|
78
|
+
* @returns True if message can be added within reserve limits
|
|
79
|
+
*/
|
|
80
|
+
canAddMessage(message) {
|
|
81
|
+
const messageTokens = this.tokenCounter.countMessageTokens(message);
|
|
82
|
+
const currentTokens = this.getCurrentTokenCount();
|
|
83
|
+
const wouldExceedReserve = currentTokens + messageTokens > this.maxTokens - this.reserveTokens;
|
|
84
|
+
if (messageTokens > this.maxTokens) {
|
|
85
|
+
return false;
|
|
86
|
+
}
|
|
87
|
+
return !wouldExceedReserve || this.messages.length > 0;
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Get all messages in the memory window
|
|
91
|
+
* @returns Copy of current messages array
|
|
92
|
+
*/
|
|
93
|
+
getMessages() {
|
|
94
|
+
return [...this.messages];
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Clear all messages from the memory window
|
|
98
|
+
*/
|
|
99
|
+
clear() {
|
|
100
|
+
this.messages = [];
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
* Set the system prompt and update token calculations
|
|
104
|
+
* @param systemPrompt - The system prompt text
|
|
105
|
+
*/
|
|
106
|
+
setSystemPrompt(systemPrompt) {
|
|
107
|
+
this.systemPrompt = systemPrompt;
|
|
108
|
+
this.systemPromptTokens = this.tokenCounter.estimateSystemPromptTokens(systemPrompt);
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Get the current system prompt
|
|
112
|
+
* @returns Current system prompt
|
|
113
|
+
*/
|
|
114
|
+
getSystemPrompt() {
|
|
115
|
+
return this.systemPrompt;
|
|
116
|
+
}
|
|
117
|
+
/**
|
|
118
|
+
* Get current configuration
|
|
119
|
+
* @returns Memory window configuration
|
|
120
|
+
*/
|
|
121
|
+
getConfig() {
|
|
122
|
+
return {
|
|
123
|
+
maxTokens: this.maxTokens,
|
|
124
|
+
reserveTokens: this.reserveTokens,
|
|
125
|
+
currentTokens: this.getCurrentTokenCount(),
|
|
126
|
+
messageCount: this.messages.length,
|
|
127
|
+
systemPromptTokens: this.systemPromptTokens
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Update token limits
|
|
132
|
+
* @param maxTokens - New maximum token limit
|
|
133
|
+
* @param reserveTokens - New reserve token amount
|
|
134
|
+
*/
|
|
135
|
+
updateLimits(maxTokens, reserveTokens) {
|
|
136
|
+
if (reserveTokens !== void 0 && reserveTokens >= maxTokens) {
|
|
137
|
+
throw new Error("Reserve tokens must be less than max tokens");
|
|
138
|
+
}
|
|
139
|
+
this.maxTokens = maxTokens;
|
|
140
|
+
if (reserveTokens !== void 0) {
|
|
141
|
+
this.reserveTokens = reserveTokens;
|
|
142
|
+
}
|
|
143
|
+
if (this.getCurrentTokenCount() > this.maxTokens - this.reserveTokens) {
|
|
144
|
+
this.pruneToFit();
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
/**
|
|
148
|
+
* Get statistics about the memory window
|
|
149
|
+
* @returns Memory usage statistics
|
|
150
|
+
*/
|
|
151
|
+
getStats() {
|
|
152
|
+
const currentTokens = this.getCurrentTokenCount();
|
|
153
|
+
const capacity = this.maxTokens;
|
|
154
|
+
const usagePercentage = currentTokens / capacity * 100;
|
|
155
|
+
return {
|
|
156
|
+
totalMessages: this.messages.length,
|
|
157
|
+
currentTokens,
|
|
158
|
+
maxTokens: capacity,
|
|
159
|
+
reserveTokens: this.reserveTokens,
|
|
160
|
+
systemPromptTokens: this.systemPromptTokens,
|
|
161
|
+
usagePercentage: Math.round(usagePercentage * 100) / 100,
|
|
162
|
+
remainingCapacity: this.getRemainingTokenCapacity(),
|
|
163
|
+
canAcceptMore: this.getRemainingTokenCapacity() > this.reserveTokens
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Clean up resources
|
|
168
|
+
*/
|
|
169
|
+
dispose() {
|
|
170
|
+
this.clear();
|
|
171
|
+
this.tokenCounter.dispose();
|
|
172
|
+
}
|
|
173
|
+
};
|
|
174
|
+
_MemoryWindow.DEFAULT_MAX_TOKENS = 8e3;
|
|
175
|
+
_MemoryWindow.DEFAULT_RESERVE_TOKENS = 1e3;
|
|
176
|
+
_MemoryWindow.PRUNING_BATCH_SIZE = 2;
|
|
177
|
+
let MemoryWindow = _MemoryWindow;
|
|
13
178
|
export {
|
|
14
|
-
|
|
179
|
+
MemoryWindow
|
|
15
180
|
};
|
|
16
181
|
//# sourceMappingURL=index13.js.map
|
package/dist/esm/index13.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index13.js","sources":["../../src/config/system-message.ts"],"sourcesContent":["export const getSystemMessage = (accountId: string): string => `You are a helpful assistant managing Hashgraph Online HCS-10 connections, messages, HCS-2 registries, and content inscription.\n\nYou have access to tools for:\n- HCS-10: registering agents, finding registered agents, initiating connections, listing active connections, sending messages over connections, and checking for new messages\n- HCS-2: creating registries, registering entries, updating entries, deleting entries, migrating registries, and querying registry contents\n- Inscription: inscribing content from URLs, files, or buffers, creating Hashinal NFTs, and retrieving inscriptions\n\n*** IMPORTANT CONTEXT ***\nYou are currently operating as agent: ${accountId} on the Hashgraph Online network\nWhen users ask about \"my profile\", \"my account\", \"my connections\", etc., use this account ID: ${accountId}\n\nRemember the connection numbers when listing connections, as users might refer to them.`"],"names":[],"mappings":"AAAO,MAAM,mBAAmB,CAAC,cAA8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,wCAQvB,SAAS;AAAA,gGAC+C,SAAS;AAAA;AAAA;"}
|
|
1
|
+
{"version":3,"file":"index13.js","sources":["../../src/memory/MemoryWindow.ts"],"sourcesContent":["import type { BaseMessage } from '@langchain/core/messages';\nimport { TokenCounter } from './TokenCounter';\n\n/**\n * Result of adding a message to the memory window\n */\nexport interface AddMessageResult {\n /** Whether the message was successfully added */\n added: boolean;\n /** Messages that were pruned to make room */\n prunedMessages: BaseMessage[];\n /** Current token count after operation */\n currentTokenCount: number;\n /** Remaining token capacity */\n remainingCapacity: number;\n}\n\n/**\n * Memory window that manages conversation history with token-based size limits\n * Automatically prunes old messages to stay within token limits while preserving conversational context\n */\nexport class MemoryWindow {\n private messages: BaseMessage[] = [];\n private maxTokens: number;\n private reserveTokens: number;\n private tokenCounter: TokenCounter;\n private systemPrompt: string = '';\n private systemPromptTokens: number = 0;\n\n public static readonly DEFAULT_MAX_TOKENS = 8000;\n public static readonly DEFAULT_RESERVE_TOKENS = 1000;\n public static readonly PRUNING_BATCH_SIZE = 2;\n\n constructor(\n maxTokens: number = MemoryWindow.DEFAULT_MAX_TOKENS,\n reserveTokens: number = MemoryWindow.DEFAULT_RESERVE_TOKENS,\n tokenCounter?: TokenCounter\n ) {\n if (reserveTokens >= maxTokens) {\n throw new Error('Reserve tokens must be less than max tokens');\n }\n\n this.maxTokens = maxTokens;\n this.reserveTokens = reserveTokens;\n this.tokenCounter = tokenCounter || new TokenCounter();\n }\n\n /**\n * Add a message to the memory window, pruning old messages if necessary\n * @param message - The message to add\n * @returns Result of the add operation including any pruned messages\n */\n addMessage(message: BaseMessage): AddMessageResult {\n this.tokenCounter.countMessageTokens(message);\n \n this.messages.push(message);\n \n const currentTokens = this.getCurrentTokenCount();\n const availableTokens = this.maxTokens - this.reserveTokens;\n \n let prunedMessages: BaseMessage[] = [];\n \n if (currentTokens > availableTokens) {\n this.messages.pop();\n \n prunedMessages = this.pruneToFit();\n \n this.messages.push(message);\n }\n\n return {\n added: true,\n prunedMessages,\n currentTokenCount: this.getCurrentTokenCount(),\n remainingCapacity: this.getRemainingTokenCapacity()\n };\n }\n\n /**\n * Prune old messages to fit within token limits\n * Removes messages in pairs to maintain conversational flow\n * @returns Array of pruned messages\n */\n pruneToFit(): BaseMessage[] {\n const prunedMessages: BaseMessage[] = [];\n const targetTokens = this.maxTokens - this.reserveTokens;\n \n while (this.getCurrentTokenCount() > targetTokens && this.messages.length > 0) {\n const batchSize = Math.min(MemoryWindow.PRUNING_BATCH_SIZE, this.messages.length);\n \n for (let i = 0; i < batchSize; i++) {\n const prunedMessage = this.messages.shift();\n if (prunedMessage) {\n prunedMessages.push(prunedMessage);\n }\n }\n \n if (prunedMessages.length > 1000) {\n console.warn('MemoryWindow: Excessive pruning detected, stopping to prevent infinite loop');\n break;\n }\n }\n\n return prunedMessages;\n }\n\n /**\n * Get current token count including system prompt and messages\n * @returns Current token count\n */\n getCurrentTokenCount(): number {\n const messageTokens = this.tokenCounter.countMessagesTokens(this.messages);\n return this.systemPromptTokens + messageTokens;\n }\n\n /**\n * Get remaining token capacity before hitting the reserve limit\n * @returns Remaining tokens that can be used\n */\n getRemainingTokenCapacity(): number {\n return Math.max(0, this.maxTokens - this.getCurrentTokenCount());\n }\n\n /**\n * Check if a message can be added without exceeding limits\n * @param message - The message to check\n * @returns True if message can be added within reserve limits\n */\n canAddMessage(message: BaseMessage): boolean {\n const messageTokens = this.tokenCounter.countMessageTokens(message);\n const currentTokens = this.getCurrentTokenCount();\n const wouldExceedReserve = (currentTokens + messageTokens) > (this.maxTokens - this.reserveTokens);\n \n if (messageTokens > this.maxTokens) {\n return false;\n }\n \n return !wouldExceedReserve || this.messages.length > 0;\n }\n\n /**\n * Get all messages in the memory window\n * @returns Copy of current messages array\n */\n getMessages(): BaseMessage[] {\n return [...this.messages];\n }\n\n /**\n * Clear all messages from the memory window\n */\n clear(): void {\n this.messages = [];\n }\n\n /**\n * Set the system prompt and update token calculations\n * @param systemPrompt - The system prompt text\n */\n setSystemPrompt(systemPrompt: string): void {\n this.systemPrompt = systemPrompt;\n this.systemPromptTokens = this.tokenCounter.estimateSystemPromptTokens(systemPrompt);\n }\n\n /**\n * Get the current system prompt\n * @returns Current system prompt\n */\n getSystemPrompt(): string {\n return this.systemPrompt;\n }\n\n /**\n * Get current configuration\n * @returns Memory window configuration\n */\n getConfig() {\n return {\n maxTokens: this.maxTokens,\n reserveTokens: this.reserveTokens,\n currentTokens: this.getCurrentTokenCount(),\n messageCount: this.messages.length,\n systemPromptTokens: this.systemPromptTokens\n };\n }\n\n /**\n * Update token limits\n * @param maxTokens - New maximum token limit\n * @param reserveTokens - New reserve token amount\n */\n updateLimits(maxTokens: number, reserveTokens?: number): void {\n if (reserveTokens !== undefined && reserveTokens >= maxTokens) {\n throw new Error('Reserve tokens must be less than max tokens');\n }\n\n this.maxTokens = maxTokens;\n if (reserveTokens !== undefined) {\n this.reserveTokens = reserveTokens;\n }\n\n if (this.getCurrentTokenCount() > (this.maxTokens - this.reserveTokens)) {\n this.pruneToFit();\n }\n }\n\n /**\n * Get statistics about the memory window\n * @returns Memory usage statistics\n */\n getStats() {\n const currentTokens = this.getCurrentTokenCount();\n const capacity = this.maxTokens;\n const usagePercentage = (currentTokens / capacity) * 100;\n \n return {\n totalMessages: this.messages.length,\n currentTokens,\n maxTokens: capacity,\n reserveTokens: this.reserveTokens,\n systemPromptTokens: this.systemPromptTokens,\n usagePercentage: Math.round(usagePercentage * 100) / 100,\n remainingCapacity: this.getRemainingTokenCapacity(),\n canAcceptMore: this.getRemainingTokenCapacity() > this.reserveTokens\n };\n }\n\n /**\n * Clean up resources\n */\n dispose(): void {\n this.clear();\n this.tokenCounter.dispose();\n }\n}"],"names":[],"mappings":";AAqBO,MAAM,gBAAN,MAAM,cAAa;AAAA,EAYxB,YACE,YAAoB,cAAa,oBACjC,gBAAwB,cAAa,wBACrC,cACA;AAfF,SAAQ,WAA0B,CAAA;AAIlC,SAAQ,eAAuB;AAC/B,SAAQ,qBAA6B;AAWnC,QAAI,iBAAiB,WAAW;AAC9B,YAAM,IAAI,MAAM,6CAA6C;AAAA,IAC/D;AAEA,SAAK,YAAY;AACjB,SAAK,gBAAgB;AACrB,SAAK,eAAe,gBAAgB,IAAI,aAAA;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,WAAW,SAAwC;AACjD,SAAK,aAAa,mBAAmB,OAAO;AAE5C,SAAK,SAAS,KAAK,OAAO;AAE1B,UAAM,gBAAgB,KAAK,qBAAA;AAC3B,UAAM,kBAAkB,KAAK,YAAY,KAAK;AAE9C,QAAI,iBAAgC,CAAA;AAEpC,QAAI,gBAAgB,iBAAiB;AACnC,WAAK,SAAS,IAAA;AAEd,uBAAiB,KAAK,WAAA;AAEtB,WAAK,SAAS,KAAK,OAAO;AAAA,IAC5B;AAEA,WAAO;AAAA,MACL,OAAO;AAAA,MACP;AAAA,MACA,mBAAmB,KAAK,qBAAA;AAAA,MACxB,mBAAmB,KAAK,0BAAA;AAAA,IAA0B;AAAA,EAEtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,aAA4B;AAC1B,UAAM,iBAAgC,CAAA;AACtC,UAAM,eAAe,KAAK,YAAY,KAAK;AAE3C,WAAO,KAAK,yBAAyB,gBAAgB,KAAK,SAAS,SAAS,GAAG;AAC7E,YAAM,YAAY,KAAK,IAAI,cAAa,oBAAoB,KAAK,SAAS,MAAM;AAEhF,eAAS,IAAI,GAAG,IAAI,WAAW,KAAK;AAClC,cAAM,gBAAgB,KAAK,SAAS,MAAA;AACpC,YAAI,eAAe;AACjB,yBAAe,KAAK,aAAa;AAAA,QACnC;AAAA,MACF;AAEA,UAAI,eAAe,SAAS,KAAM;AAChC,gBAAQ,KAAK,6EAA6E;AAC1F;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,uBAA+B;AAC7B,UAAM,gBAAgB,KAAK,aAAa,oBAAoB,KAAK,QAAQ;AACzE,WAAO,KAAK,qBAAqB;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,4BAAoC;AAClC,WAAO,KAAK,IAAI,GAAG,KAAK,YAAY,KAAK,sBAAsB;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,cAAc,SAA+B;AAC3C,UAAM,gBAAgB,KAAK,aAAa,mBAAmB,OAAO;AAClE,UAAM,gBAAgB,KAAK,qBAAA;AAC3B,UAAM,qBAAsB,gBAAgB,gBAAkB,KAAK,YAAY,KAAK;AAEpF,QAAI,gBAAgB,KAAK,WAAW;AAClC,aAAO;AAAA,IACT;AAEA,WAAO,CAAC,sBAAsB,KAAK,SAAS,SAAS;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,cAA6B;AAC3B,WAAO,CAAC,GAAG,KAAK,QAAQ;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA,EAKA,QAAc;AACZ,SAAK,WAAW,CAAA;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,gBAAgB,cAA4B;AAC1C,SAAK,eAAe;AACpB,SAAK,qBAAqB,KAAK,aAAa,2BAA2B,YAAY;AAAA,EACrF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,kBAA0B;AACxB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,YAAY;AACV,WAAO;AAAA,MACL,WAAW,KAAK;AAAA,MAChB,eAAe,KAAK;AAAA,MACpB,eAAe,KAAK,qBAAA;AAAA,MACpB,cAAc,KAAK,SAAS;AAAA,MAC5B,oBAAoB,KAAK;AAAA,IAAA;AAAA,EAE7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,aAAa,WAAmB,eAA8B;AAC5D,QAAI,kBAAkB,UAAa,iBAAiB,WAAW;AAC7D,YAAM,IAAI,MAAM,6CAA6C;AAAA,IAC/D;AAEA,SAAK,YAAY;AACjB,QAAI,kBAAkB,QAAW;AAC/B,WAAK,gBAAgB;AAAA,IACvB;AAEA,QAAI,KAAK,qBAAA,IAA0B,KAAK,YAAY,KAAK,eAAgB;AACvE,WAAK,WAAA;AAAA,IACP;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,WAAW;AACT,UAAM,gBAAgB,KAAK,qBAAA;AAC3B,UAAM,WAAW,KAAK;AACtB,UAAM,kBAAmB,gBAAgB,WAAY;AAErD,WAAO;AAAA,MACL,eAAe,KAAK,SAAS;AAAA,MAC7B;AAAA,MACA,WAAW;AAAA,MACX,eAAe,KAAK;AAAA,MACpB,oBAAoB,KAAK;AAAA,MACzB,iBAAiB,KAAK,MAAM,kBAAkB,GAAG,IAAI;AAAA,MACrD,mBAAmB,KAAK,0BAAA;AAAA,MACxB,eAAe,KAAK,0BAAA,IAA8B,KAAK;AAAA,IAAA;AAAA,EAE3D;AAAA;AAAA;AAAA;AAAA,EAKA,UAAgB;AACd,SAAK,MAAA;AACL,SAAK,aAAa,QAAA;AAAA,EACpB;AACF;AA7ME,cAAuB,qBAAqB;AAC5C,cAAuB,yBAAyB;AAChD,cAAuB,qBAAqB;AAVvC,IAAM,eAAN;"}
|