@vybestack/llxprt-code-core 0.1.23 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -17
- package/dist/src/adapters/IStreamAdapter.d.ts +3 -3
- package/dist/src/auth/oauth-errors.d.ts +173 -0
- package/dist/src/auth/oauth-errors.js +461 -0
- package/dist/src/auth/oauth-errors.js.map +1 -0
- package/dist/src/auth/precedence.d.ts +1 -5
- package/dist/src/auth/precedence.js +28 -48
- package/dist/src/auth/precedence.js.map +1 -1
- package/dist/src/auth/token-store.js +2 -2
- package/dist/src/auth/token-store.js.map +1 -1
- package/dist/src/auth/types.d.ts +4 -4
- package/dist/src/code_assist/codeAssist.js +19 -6
- package/dist/src/code_assist/codeAssist.js.map +1 -1
- package/dist/src/code_assist/oauth2.d.ts +7 -0
- package/dist/src/code_assist/oauth2.js +82 -32
- package/dist/src/code_assist/oauth2.js.map +1 -1
- package/dist/src/code_assist/server.js +15 -4
- package/dist/src/code_assist/server.js.map +1 -1
- package/dist/src/code_assist/setup.js +9 -0
- package/dist/src/code_assist/setup.js.map +1 -1
- package/dist/src/config/index.d.ts +7 -0
- package/dist/src/config/index.js +8 -0
- package/dist/src/config/index.js.map +1 -0
- package/dist/src/core/client.d.ts +15 -20
- package/dist/src/core/client.js +98 -124
- package/dist/src/core/client.js.map +1 -1
- package/dist/src/core/compression-config.d.ts +10 -0
- package/dist/src/core/compression-config.js +17 -0
- package/dist/src/core/compression-config.js.map +1 -0
- package/dist/src/core/coreToolScheduler.js +50 -15
- package/dist/src/core/coreToolScheduler.js.map +1 -1
- package/dist/src/core/geminiChat.d.ts +68 -9
- package/dist/src/core/geminiChat.js +940 -405
- package/dist/src/core/geminiChat.js.map +1 -1
- package/dist/src/core/nonInteractiveToolExecutor.js +70 -19
- package/dist/src/core/nonInteractiveToolExecutor.js.map +1 -1
- package/dist/src/core/prompts.js +35 -25
- package/dist/src/core/prompts.js.map +1 -1
- package/dist/src/core/turn.d.ts +1 -0
- package/dist/src/core/turn.js +8 -6
- package/dist/src/core/turn.js.map +1 -1
- package/dist/src/ide/ide-client.d.ts +1 -1
- package/dist/src/ide/ide-client.js +12 -6
- package/dist/src/ide/ide-client.js.map +1 -1
- package/dist/src/index.d.ts +4 -2
- package/dist/src/index.js +5 -2
- package/dist/src/index.js.map +1 -1
- package/dist/src/prompt-config/TemplateEngine.js +17 -0
- package/dist/src/prompt-config/TemplateEngine.js.map +1 -1
- package/dist/src/prompt-config/defaults/core-defaults.js +39 -32
- package/dist/src/prompt-config/defaults/core-defaults.js.map +1 -1
- package/dist/src/prompt-config/defaults/core.md +2 -0
- package/dist/src/prompt-config/defaults/provider-defaults.js +34 -27
- package/dist/src/prompt-config/defaults/provider-defaults.js.map +1 -1
- package/dist/src/prompt-config/defaults/providers/gemini/core.md +270 -0
- package/dist/src/prompt-config/defaults/providers/gemini/models/gemini-2.5-flash/core.md +12 -0
- package/dist/src/prompt-config/defaults/providers/gemini/models/gemini-2.5-flash/gemini-2-5-flash/core.md +12 -0
- package/dist/src/prompt-config/types.d.ts +2 -0
- package/dist/src/providers/BaseProvider.d.ts +39 -13
- package/dist/src/providers/BaseProvider.js +102 -28
- package/dist/src/providers/BaseProvider.js.map +1 -1
- package/dist/src/providers/IProvider.d.ts +17 -3
- package/dist/src/providers/LoggingProviderWrapper.d.ts +10 -3
- package/dist/src/providers/LoggingProviderWrapper.js +33 -27
- package/dist/src/providers/LoggingProviderWrapper.js.map +1 -1
- package/dist/src/providers/ProviderContentGenerator.d.ts +2 -2
- package/dist/src/providers/ProviderContentGenerator.js +9 -6
- package/dist/src/providers/ProviderContentGenerator.js.map +1 -1
- package/dist/src/providers/ProviderManager.d.ts +4 -0
- package/dist/src/providers/ProviderManager.js +6 -0
- package/dist/src/providers/ProviderManager.js.map +1 -1
- package/dist/src/providers/anthropic/AnthropicProvider.d.ts +34 -21
- package/dist/src/providers/anthropic/AnthropicProvider.js +505 -492
- package/dist/src/providers/anthropic/AnthropicProvider.js.map +1 -1
- package/dist/src/providers/gemini/GeminiProvider.d.ts +23 -9
- package/dist/src/providers/gemini/GeminiProvider.js +344 -515
- package/dist/src/providers/gemini/GeminiProvider.js.map +1 -1
- package/dist/src/providers/openai/ConversationCache.d.ts +3 -3
- package/dist/src/providers/openai/IChatGenerateParams.d.ts +9 -4
- package/dist/src/providers/openai/OpenAIProvider.d.ts +46 -96
- package/dist/src/providers/openai/OpenAIProvider.js +532 -1393
- package/dist/src/providers/openai/OpenAIProvider.js.map +1 -1
- package/dist/src/providers/openai/buildResponsesRequest.d.ts +3 -3
- package/dist/src/providers/openai/buildResponsesRequest.js +67 -37
- package/dist/src/providers/openai/buildResponsesRequest.js.map +1 -1
- package/dist/src/providers/openai/estimateRemoteTokens.d.ts +2 -2
- package/dist/src/providers/openai/estimateRemoteTokens.js +21 -8
- package/dist/src/providers/openai/estimateRemoteTokens.js.map +1 -1
- package/dist/src/providers/openai/parseResponsesStream.d.ts +6 -2
- package/dist/src/providers/openai/parseResponsesStream.js +99 -391
- package/dist/src/providers/openai/parseResponsesStream.js.map +1 -1
- package/dist/src/providers/openai/syntheticToolResponses.d.ts +5 -5
- package/dist/src/providers/openai/syntheticToolResponses.js +102 -91
- package/dist/src/providers/openai/syntheticToolResponses.js.map +1 -1
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.d.ts +89 -0
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.js +451 -0
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.js.map +1 -0
- package/dist/src/providers/openai-responses/index.d.ts +1 -0
- package/dist/src/providers/openai-responses/index.js +2 -0
- package/dist/src/providers/openai-responses/index.js.map +1 -0
- package/dist/src/providers/tokenizers/OpenAITokenizer.js +3 -3
- package/dist/src/providers/tokenizers/OpenAITokenizer.js.map +1 -1
- package/dist/src/providers/types.d.ts +1 -1
- package/dist/src/services/ClipboardService.d.ts +19 -0
- package/dist/src/services/ClipboardService.js +66 -0
- package/dist/src/services/ClipboardService.js.map +1 -0
- package/dist/src/services/history/ContentConverters.d.ts +43 -0
- package/dist/src/services/history/ContentConverters.js +325 -0
- package/dist/src/services/history/ContentConverters.js.map +1 -0
- package/dist/src/{providers/IMessage.d.ts → services/history/HistoryEvents.d.ts} +16 -22
- package/dist/src/{providers/IMessage.js → services/history/HistoryEvents.js} +1 -1
- package/dist/src/services/history/HistoryEvents.js.map +1 -0
- package/dist/src/services/history/HistoryService.d.ts +220 -0
- package/dist/src/services/history/HistoryService.js +673 -0
- package/dist/src/services/history/HistoryService.js.map +1 -0
- package/dist/src/services/history/IContent.d.ts +183 -0
- package/dist/src/services/history/IContent.js +104 -0
- package/dist/src/services/history/IContent.js.map +1 -0
- package/dist/src/services/index.d.ts +1 -0
- package/dist/src/services/index.js +1 -0
- package/dist/src/services/index.js.map +1 -1
- package/dist/src/telemetry/types.d.ts +16 -4
- package/dist/src/telemetry/types.js.map +1 -1
- package/dist/src/tools/IToolFormatter.d.ts +2 -2
- package/dist/src/tools/ToolFormatter.d.ts +42 -4
- package/dist/src/tools/ToolFormatter.js +159 -37
- package/dist/src/tools/ToolFormatter.js.map +1 -1
- package/dist/src/tools/doubleEscapeUtils.d.ts +57 -0
- package/dist/src/tools/doubleEscapeUtils.js +241 -0
- package/dist/src/tools/doubleEscapeUtils.js.map +1 -0
- package/dist/src/tools/read-file.d.ts +6 -1
- package/dist/src/tools/read-file.js +25 -11
- package/dist/src/tools/read-file.js.map +1 -1
- package/dist/src/tools/todo-schemas.d.ts +4 -4
- package/dist/src/tools/tools.js +13 -0
- package/dist/src/tools/tools.js.map +1 -1
- package/dist/src/tools/write-file.d.ts +6 -1
- package/dist/src/tools/write-file.js +48 -26
- package/dist/src/tools/write-file.js.map +1 -1
- package/dist/src/types/modelParams.d.ts +8 -0
- package/dist/src/utils/bfsFileSearch.js +2 -6
- package/dist/src/utils/bfsFileSearch.js.map +1 -1
- package/dist/src/utils/schemaValidator.js +16 -1
- package/dist/src/utils/schemaValidator.js.map +1 -1
- package/package.json +8 -7
- package/dist/src/providers/IMessage.js.map +0 -1
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.d.ts +0 -69
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.js +0 -577
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.js.map +0 -1
@@ -0,0 +1,673 @@
|
|
1
|
+
/**
|
2
|
+
* Copyright 2025 Vybestack LLC
|
3
|
+
*
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
* you may not use this file except in compliance with the License.
|
6
|
+
* You may obtain a copy of the License at
|
7
|
+
*
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
*
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
* See the License for the specific language governing permissions and
|
14
|
+
* limitations under the License.
|
15
|
+
*/
|
16
|
+
import { ContentValidation, } from './IContent.js';
|
17
|
+
import { EventEmitter } from 'events';
|
18
|
+
import { OpenAITokenizer } from '../../providers/tokenizers/OpenAITokenizer.js';
|
19
|
+
import { AnthropicTokenizer } from '../../providers/tokenizers/AnthropicTokenizer.js';
|
20
|
+
import { DebugLogger } from '../../debug/index.js';
|
21
|
+
import { randomUUID } from 'crypto';
|
22
|
+
/**
|
23
|
+
* Service for managing conversation history in a provider-agnostic way.
|
24
|
+
* All history is stored as IContent. Providers are responsible for converting
|
25
|
+
* to/from their own formats.
|
26
|
+
*/
|
27
|
+
export class HistoryService extends EventEmitter {
|
28
|
+
history = [];
|
29
|
+
totalTokens = 0;
|
30
|
+
tokenizerCache = new Map();
|
31
|
+
tokenizerLock = Promise.resolve();
|
32
|
+
logger = new DebugLogger('llxprt:history:service');
|
33
|
+
// Compression state and queue
|
34
|
+
isCompressing = false;
|
35
|
+
pendingOperations = [];
|
36
|
+
/**
|
37
|
+
* Get or create tokenizer for a specific model
|
38
|
+
*/
|
39
|
+
getTokenizerForModel(modelName) {
|
40
|
+
if (this.tokenizerCache.has(modelName)) {
|
41
|
+
return this.tokenizerCache.get(modelName);
|
42
|
+
}
|
43
|
+
let tokenizer;
|
44
|
+
if (modelName.includes('claude') || modelName.includes('anthropic')) {
|
45
|
+
tokenizer = new AnthropicTokenizer();
|
46
|
+
}
|
47
|
+
else if (modelName.includes('gpt') ||
|
48
|
+
modelName.includes('openai') ||
|
49
|
+
modelName.includes('o1') ||
|
50
|
+
modelName.includes('o3')) {
|
51
|
+
tokenizer = new OpenAITokenizer();
|
52
|
+
}
|
53
|
+
else {
|
54
|
+
// Default to OpenAI tokenizer for Gemini and other models (tiktoken is pretty universal)
|
55
|
+
tokenizer = new OpenAITokenizer();
|
56
|
+
}
|
57
|
+
this.tokenizerCache.set(modelName, tokenizer);
|
58
|
+
return tokenizer;
|
59
|
+
}
|
60
|
+
/**
|
61
|
+
* Generate a new normalized history tool ID.
|
62
|
+
* Format: hist_tool_<uuid-v4>
|
63
|
+
*/
|
64
|
+
generateHistoryId() {
|
65
|
+
return `hist_tool_${randomUUID()}`;
|
66
|
+
}
|
67
|
+
/**
|
68
|
+
* Get a callback suitable for passing into converters
|
69
|
+
* which will generate normalized history IDs on demand.
|
70
|
+
*/
|
71
|
+
getIdGeneratorCallback() {
|
72
|
+
return () => this.generateHistoryId();
|
73
|
+
}
|
74
|
+
/**
|
75
|
+
* Get the current total token count
|
76
|
+
*/
|
77
|
+
getTotalTokens() {
|
78
|
+
return this.totalTokens;
|
79
|
+
}
|
80
|
+
/**
|
81
|
+
* Add content to the history
|
82
|
+
* Note: We accept all content including empty responses for comprehensive history.
|
83
|
+
* Filtering happens only when getting curated history.
|
84
|
+
*/
|
85
|
+
add(content, modelName) {
|
86
|
+
// If compression is active, queue this operation
|
87
|
+
if (this.isCompressing) {
|
88
|
+
this.logger.debug('Queueing add operation during compression', {
|
89
|
+
speaker: content.speaker,
|
90
|
+
blockTypes: content.blocks?.map((b) => b.type),
|
91
|
+
});
|
92
|
+
this.pendingOperations.push(() => {
|
93
|
+
this.addInternal(content, modelName);
|
94
|
+
});
|
95
|
+
return;
|
96
|
+
}
|
97
|
+
// Otherwise, add immediately
|
98
|
+
this.addInternal(content, modelName);
|
99
|
+
}
|
100
|
+
addInternal(content, modelName) {
|
101
|
+
// Log content being added with any tool call/response IDs
|
102
|
+
this.logger.debug('Adding content to history:', {
|
103
|
+
speaker: content.speaker,
|
104
|
+
blockTypes: content.blocks?.map((b) => b.type),
|
105
|
+
toolCallIds: content.blocks
|
106
|
+
?.filter((b) => b.type === 'tool_call')
|
107
|
+
.map((b) => b.id),
|
108
|
+
toolResponseIds: content.blocks
|
109
|
+
?.filter((b) => b.type === 'tool_response')
|
110
|
+
.map((b) => ({
|
111
|
+
callId: b.callId,
|
112
|
+
toolName: b.toolName,
|
113
|
+
})),
|
114
|
+
contentId: content.metadata?.id,
|
115
|
+
modelName,
|
116
|
+
});
|
117
|
+
// Only do basic validation - must have valid speaker
|
118
|
+
if (content.speaker && ['human', 'ai', 'tool'].includes(content.speaker)) {
|
119
|
+
this.history.push(content);
|
120
|
+
this.logger.debug('Content added successfully, history length:', this.history.length);
|
121
|
+
// Update token count asynchronously but atomically
|
122
|
+
this.updateTokenCount(content, modelName);
|
123
|
+
}
|
124
|
+
else {
|
125
|
+
this.logger.debug('Content rejected - invalid speaker:', content.speaker);
|
126
|
+
}
|
127
|
+
}
|
128
|
+
/**
|
129
|
+
* Atomically update token count for new content
|
130
|
+
*/
|
131
|
+
async updateTokenCount(content, modelName) {
|
132
|
+
// Use a lock to prevent race conditions
|
133
|
+
this.tokenizerLock = this.tokenizerLock.then(async () => {
|
134
|
+
let contentTokens = 0;
|
135
|
+
// First try to use usage data from the content metadata
|
136
|
+
if (content.metadata?.usage) {
|
137
|
+
contentTokens = content.metadata.usage.totalTokens;
|
138
|
+
}
|
139
|
+
else {
|
140
|
+
// Fall back to tokenizer estimation
|
141
|
+
// Default to gpt-4.1 tokenizer if no model name provided (most universal)
|
142
|
+
const defaultModel = modelName || 'gpt-4.1';
|
143
|
+
contentTokens = await this.estimateContentTokens(content, defaultModel);
|
144
|
+
}
|
145
|
+
// Atomically update the total
|
146
|
+
this.totalTokens += contentTokens;
|
147
|
+
// Emit event with updated count
|
148
|
+
const eventData = {
|
149
|
+
totalTokens: this.totalTokens,
|
150
|
+
addedTokens: contentTokens,
|
151
|
+
contentId: content.metadata?.id,
|
152
|
+
};
|
153
|
+
this.logger.debug('Emitting tokensUpdated:', eventData);
|
154
|
+
this.emit('tokensUpdated', eventData);
|
155
|
+
});
|
156
|
+
return this.tokenizerLock;
|
157
|
+
}
|
158
|
+
/**
|
159
|
+
* Estimate token count for content using tokenizer
|
160
|
+
*/
|
161
|
+
async estimateContentTokens(content, modelName) {
|
162
|
+
const tokenizer = this.getTokenizerForModel(modelName);
|
163
|
+
let totalTokens = 0;
|
164
|
+
for (const block of content.blocks) {
|
165
|
+
let blockText = '';
|
166
|
+
switch (block.type) {
|
167
|
+
case 'text':
|
168
|
+
blockText = block.text;
|
169
|
+
break;
|
170
|
+
case 'tool_call':
|
171
|
+
try {
|
172
|
+
blockText = JSON.stringify({
|
173
|
+
name: block.name,
|
174
|
+
parameters: block.parameters,
|
175
|
+
});
|
176
|
+
}
|
177
|
+
catch (error) {
|
178
|
+
// Handle circular references or other JSON.stringify errors
|
179
|
+
this.logger.debug('Error stringifying tool_call parameters, using fallback:', error);
|
180
|
+
// Fallback to just the tool name for token estimation
|
181
|
+
blockText = `tool_call: ${block.name}`;
|
182
|
+
}
|
183
|
+
break;
|
184
|
+
case 'tool_response':
|
185
|
+
try {
|
186
|
+
blockText = JSON.stringify(block.result || block.error || '');
|
187
|
+
}
|
188
|
+
catch (error) {
|
189
|
+
// Handle circular references or other JSON.stringify errors
|
190
|
+
this.logger.debug('Error stringifying tool_response result/error, using fallback:', error);
|
191
|
+
// Fallback to just the tool name for token estimation
|
192
|
+
blockText = `tool_response: ${block.toolName || 'unknown'}`;
|
193
|
+
}
|
194
|
+
break;
|
195
|
+
case 'thinking':
|
196
|
+
blockText = block.thought;
|
197
|
+
break;
|
198
|
+
case 'code':
|
199
|
+
blockText = block.code;
|
200
|
+
break;
|
201
|
+
case 'media':
|
202
|
+
// For media, just count the caption if any
|
203
|
+
blockText = block.caption || '';
|
204
|
+
break;
|
205
|
+
default:
|
206
|
+
// Unknown block type, skip
|
207
|
+
break;
|
208
|
+
}
|
209
|
+
if (blockText) {
|
210
|
+
try {
|
211
|
+
const blockTokens = await tokenizer.countTokens(blockText, modelName);
|
212
|
+
totalTokens += blockTokens;
|
213
|
+
}
|
214
|
+
catch (error) {
|
215
|
+
this.logger.debug('Error counting tokens for block, using fallback:', error);
|
216
|
+
totalTokens += this.simpleTokenEstimateForText(blockText);
|
217
|
+
}
|
218
|
+
}
|
219
|
+
}
|
220
|
+
return totalTokens;
|
221
|
+
}
|
222
|
+
/**
|
223
|
+
* Simple token estimation for text
|
224
|
+
*/
|
225
|
+
simpleTokenEstimateForText(text) {
|
226
|
+
if (!text)
|
227
|
+
return 0;
|
228
|
+
const wordCount = text.split(/\s+/).length;
|
229
|
+
const characterCount = text.length;
|
230
|
+
return Math.round(Math.max(wordCount * 1.3, characterCount / 4));
|
231
|
+
}
|
232
|
+
/**
|
233
|
+
* Add multiple contents to the history
|
234
|
+
*/
|
235
|
+
addAll(contents, modelName) {
|
236
|
+
for (const content of contents) {
|
237
|
+
this.add(content, modelName);
|
238
|
+
}
|
239
|
+
}
|
240
|
+
/**
|
241
|
+
* Get all history
|
242
|
+
*/
|
243
|
+
getAll() {
|
244
|
+
return [...this.history];
|
245
|
+
}
|
246
|
+
/**
|
247
|
+
* Clear all history
|
248
|
+
*/
|
249
|
+
clear() {
|
250
|
+
// If compression is active, queue this operation
|
251
|
+
if (this.isCompressing) {
|
252
|
+
this.logger.debug('Queueing clear operation during compression');
|
253
|
+
this.pendingOperations.push(() => {
|
254
|
+
this.clearInternal();
|
255
|
+
});
|
256
|
+
return;
|
257
|
+
}
|
258
|
+
// Otherwise, clear immediately
|
259
|
+
this.clearInternal();
|
260
|
+
}
|
261
|
+
clearInternal() {
|
262
|
+
this.logger.debug('Clearing history', {
|
263
|
+
previousLength: this.history.length,
|
264
|
+
});
|
265
|
+
const previousTokens = this.totalTokens;
|
266
|
+
this.history = [];
|
267
|
+
this.totalTokens = 0;
|
268
|
+
// Emit event with reset count
|
269
|
+
this.emit('tokensUpdated', {
|
270
|
+
totalTokens: 0,
|
271
|
+
addedTokens: -previousTokens, // Negative to indicate removal
|
272
|
+
contentId: null,
|
273
|
+
});
|
274
|
+
}
|
275
|
+
/**
|
276
|
+
* Get the last N messages from history
|
277
|
+
*/
|
278
|
+
getRecent(count) {
|
279
|
+
return this.history.slice(-count);
|
280
|
+
}
|
281
|
+
/**
|
282
|
+
* Get curated history (only valid, meaningful content)
|
283
|
+
* Matches the behavior of extractCuratedHistory in geminiChat.ts:
|
284
|
+
* - Always includes user/human messages
|
285
|
+
* - Always includes tool messages
|
286
|
+
* - Only includes AI messages if they are valid (have content)
|
287
|
+
*/
|
288
|
+
getCurated() {
|
289
|
+
// Wait if compression is in progress
|
290
|
+
if (this.isCompressing) {
|
291
|
+
this.logger.debug('getCurated called during compression - returning snapshot');
|
292
|
+
}
|
293
|
+
// Build the curated list without modifying history
|
294
|
+
const curated = [];
|
295
|
+
let excludedCount = 0;
|
296
|
+
for (const content of this.history) {
|
297
|
+
if (content.speaker === 'human' || content.speaker === 'tool') {
|
298
|
+
// Always include user and tool messages
|
299
|
+
curated.push(content);
|
300
|
+
}
|
301
|
+
else if (content.speaker === 'ai') {
|
302
|
+
// Only include AI messages if they have valid content
|
303
|
+
if (ContentValidation.hasContent(content)) {
|
304
|
+
curated.push(content);
|
305
|
+
}
|
306
|
+
else {
|
307
|
+
excludedCount++;
|
308
|
+
this.logger.debug('Excluding AI content without valid content:', {
|
309
|
+
blocks: content.blocks?.map((b) => ({
|
310
|
+
type: b.type,
|
311
|
+
hasContent: b.type === 'text' ? !!b.text : true,
|
312
|
+
})),
|
313
|
+
});
|
314
|
+
}
|
315
|
+
}
|
316
|
+
}
|
317
|
+
this.logger.debug('Curated history summary:', {
|
318
|
+
totalHistory: this.history.length,
|
319
|
+
curatedCount: curated.length,
|
320
|
+
excludedAiCount: excludedCount,
|
321
|
+
toolCallsInCurated: curated.reduce((acc, c) => acc + c.blocks.filter((b) => b.type === 'tool_call').length, 0),
|
322
|
+
toolResponsesInCurated: curated.reduce((acc, c) => acc + c.blocks.filter((b) => b.type === 'tool_response').length, 0),
|
323
|
+
isCompressing: this.isCompressing,
|
324
|
+
});
|
325
|
+
return curated;
|
326
|
+
}
|
327
|
+
/**
|
328
|
+
* Get comprehensive history (all content including invalid/empty)
|
329
|
+
*/
|
330
|
+
getComprehensive() {
|
331
|
+
return [...this.history];
|
332
|
+
}
|
333
|
+
/**
|
334
|
+
* Remove the last content if it matches the provided content
|
335
|
+
*/
|
336
|
+
removeLastIfMatches(content) {
|
337
|
+
const last = this.history[this.history.length - 1];
|
338
|
+
if (last === content) {
|
339
|
+
this.history.pop();
|
340
|
+
return true;
|
341
|
+
}
|
342
|
+
return false;
|
343
|
+
}
|
344
|
+
/**
|
345
|
+
* Pop the last content from history
|
346
|
+
*/
|
347
|
+
pop() {
|
348
|
+
const removed = this.history.pop();
|
349
|
+
if (removed) {
|
350
|
+
// Recalculate tokens since we removed content
|
351
|
+
// This is less efficient but ensures accuracy
|
352
|
+
this.recalculateTokens();
|
353
|
+
}
|
354
|
+
return removed;
|
355
|
+
}
|
356
|
+
/**
|
357
|
+
* Recalculate total tokens from scratch
|
358
|
+
* Use this when removing content or when token counts might be stale
|
359
|
+
*/
|
360
|
+
async recalculateTokens(defaultModel = 'gpt-4.1') {
|
361
|
+
this.tokenizerLock = this.tokenizerLock.then(async () => {
|
362
|
+
let newTotal = 0;
|
363
|
+
for (const content of this.history) {
|
364
|
+
if (content.metadata?.usage) {
|
365
|
+
newTotal += content.metadata.usage.totalTokens;
|
366
|
+
}
|
367
|
+
else {
|
368
|
+
// Use the model from content metadata, or fall back to provided default
|
369
|
+
const modelToUse = content.metadata?.model || defaultModel;
|
370
|
+
newTotal += await this.estimateContentTokens(content, modelToUse);
|
371
|
+
}
|
372
|
+
}
|
373
|
+
const oldTotal = this.totalTokens;
|
374
|
+
this.totalTokens = newTotal;
|
375
|
+
// Emit event with updated count
|
376
|
+
this.emit('tokensUpdated', {
|
377
|
+
totalTokens: this.totalTokens,
|
378
|
+
addedTokens: this.totalTokens - oldTotal,
|
379
|
+
contentId: null,
|
380
|
+
});
|
381
|
+
});
|
382
|
+
return this.tokenizerLock;
|
383
|
+
}
|
384
|
+
/**
|
385
|
+
* Get the last user (human) content
|
386
|
+
*/
|
387
|
+
getLastUserContent() {
|
388
|
+
for (let i = this.history.length - 1; i >= 0; i--) {
|
389
|
+
if (this.history[i].speaker === 'human') {
|
390
|
+
return this.history[i];
|
391
|
+
}
|
392
|
+
}
|
393
|
+
return undefined;
|
394
|
+
}
|
395
|
+
/**
|
396
|
+
* Get the last AI content
|
397
|
+
*/
|
398
|
+
getLastAIContent() {
|
399
|
+
for (let i = this.history.length - 1; i >= 0; i--) {
|
400
|
+
if (this.history[i].speaker === 'ai') {
|
401
|
+
return this.history[i];
|
402
|
+
}
|
403
|
+
}
|
404
|
+
return undefined;
|
405
|
+
}
|
406
|
+
/**
|
407
|
+
* Record a complete turn (user input + AI response + optional tool interactions)
|
408
|
+
*/
|
409
|
+
recordTurn(userInput, aiResponse, toolInteractions) {
|
410
|
+
this.add(userInput);
|
411
|
+
this.add(aiResponse);
|
412
|
+
if (toolInteractions) {
|
413
|
+
this.addAll(toolInteractions);
|
414
|
+
}
|
415
|
+
}
|
416
|
+
/**
|
417
|
+
* Get the number of messages in history
|
418
|
+
*/
|
419
|
+
length() {
|
420
|
+
return this.history.length;
|
421
|
+
}
|
422
|
+
/**
|
423
|
+
* Check if history is empty
|
424
|
+
*/
|
425
|
+
isEmpty() {
|
426
|
+
return this.history.length === 0;
|
427
|
+
}
|
428
|
+
/**
|
429
|
+
* Clone the history (deep copy)
|
430
|
+
*/
|
431
|
+
clone() {
|
432
|
+
return JSON.parse(JSON.stringify(this.history));
|
433
|
+
}
|
434
|
+
/**
|
435
|
+
* Find unmatched tool calls (tool calls without responses)
|
436
|
+
*/
|
437
|
+
findUnmatchedToolCalls() {
|
438
|
+
// With atomic tool call/response implementation, orphans are impossible by design
|
439
|
+
// Always return empty array since orphans cannot exist
|
440
|
+
this.logger.debug('No unmatched tool calls - atomic implementation prevents orphans');
|
441
|
+
return [];
|
442
|
+
}
|
443
|
+
/**
|
444
|
+
* Validate and fix the history to ensure proper tool call/response pairing
|
445
|
+
*/
|
446
|
+
validateAndFix() {
|
447
|
+
// With atomic tool call/response implementation, the history is always valid by design
|
448
|
+
// No fixing needed since orphans cannot exist
|
449
|
+
this.logger.debug('History validation skipped - atomic implementation ensures validity');
|
450
|
+
}
|
451
|
+
/**
|
452
|
+
* Get curated history with circular references removed for providers.
|
453
|
+
* This ensures the history can be safely serialized and sent to providers.
|
454
|
+
*/
|
455
|
+
getCuratedForProvider() {
|
456
|
+
// Get the curated history
|
457
|
+
const curated = this.getCurated();
|
458
|
+
// Deep clone to avoid circular references in tool call parameters
|
459
|
+
// We need a clean copy that can be serialized
|
460
|
+
return this.deepCloneWithoutCircularRefs(curated);
|
461
|
+
}
|
462
|
+
/**
|
463
|
+
* Deep clone content array, removing circular references
|
464
|
+
*/
|
465
|
+
deepCloneWithoutCircularRefs(contents) {
|
466
|
+
return contents.map((content) => {
|
467
|
+
// Create a clean copy of the content
|
468
|
+
const cloned = {
|
469
|
+
speaker: content.speaker,
|
470
|
+
blocks: content.blocks.map((block) => {
|
471
|
+
if (block.type === 'tool_call') {
|
472
|
+
const toolCall = block;
|
473
|
+
// For tool calls, sanitize the parameters to remove circular refs
|
474
|
+
return {
|
475
|
+
type: 'tool_call',
|
476
|
+
id: toolCall.id,
|
477
|
+
name: toolCall.name,
|
478
|
+
parameters: this.sanitizeParams(toolCall.parameters),
|
479
|
+
};
|
480
|
+
}
|
481
|
+
else if (block.type === 'tool_response') {
|
482
|
+
const toolResponse = block;
|
483
|
+
// For tool responses, sanitize the result to remove circular refs
|
484
|
+
return {
|
485
|
+
type: 'tool_response',
|
486
|
+
callId: toolResponse.callId,
|
487
|
+
toolName: toolResponse.toolName,
|
488
|
+
result: this.sanitizeParams(toolResponse.result),
|
489
|
+
error: toolResponse.error,
|
490
|
+
};
|
491
|
+
}
|
492
|
+
else {
|
493
|
+
// Other blocks should be safe to clone
|
494
|
+
try {
|
495
|
+
return JSON.parse(JSON.stringify(block));
|
496
|
+
}
|
497
|
+
catch {
|
498
|
+
// If any block fails, return minimal version
|
499
|
+
return { ...block };
|
500
|
+
}
|
501
|
+
}
|
502
|
+
}),
|
503
|
+
metadata: content.metadata ? { ...content.metadata } : {},
|
504
|
+
};
|
505
|
+
return cloned;
|
506
|
+
});
|
507
|
+
}
|
508
|
+
/**
|
509
|
+
* Sanitize parameters to remove circular references
|
510
|
+
*/
|
511
|
+
sanitizeParams(params) {
|
512
|
+
const seen = new WeakSet();
|
513
|
+
const sanitize = (obj) => {
|
514
|
+
// Handle primitives
|
515
|
+
if (obj === null || typeof obj !== 'object') {
|
516
|
+
return obj;
|
517
|
+
}
|
518
|
+
// Check for circular reference
|
519
|
+
if (seen.has(obj)) {
|
520
|
+
return { _circular: true };
|
521
|
+
}
|
522
|
+
seen.add(obj);
|
523
|
+
// Handle arrays
|
524
|
+
if (Array.isArray(obj)) {
|
525
|
+
return obj.map((item) => sanitize(item));
|
526
|
+
}
|
527
|
+
// Handle objects
|
528
|
+
const result = {};
|
529
|
+
for (const [key, value] of Object.entries(obj)) {
|
530
|
+
result[key] = sanitize(value);
|
531
|
+
}
|
532
|
+
return result;
|
533
|
+
};
|
534
|
+
try {
|
535
|
+
return sanitize(params);
|
536
|
+
}
|
537
|
+
catch (error) {
|
538
|
+
this.logger.debug('Error sanitizing params:', error);
|
539
|
+
return {
|
540
|
+
_note: 'Parameters contained circular references and were sanitized',
|
541
|
+
};
|
542
|
+
}
|
543
|
+
}
|
544
|
+
/**
|
545
|
+
* Merge two histories, handling duplicates and conflicts
|
546
|
+
*/
|
547
|
+
merge(other) {
|
548
|
+
// Simple append for now - could be made smarter to detect duplicates
|
549
|
+
this.addAll(other.getAll());
|
550
|
+
}
|
551
|
+
/**
|
552
|
+
* Get history within a token limit (for context window management)
|
553
|
+
*/
|
554
|
+
getWithinTokenLimit(maxTokens, countTokensFn) {
|
555
|
+
const result = [];
|
556
|
+
let totalTokens = 0;
|
557
|
+
// Work backwards to keep most recent messages
|
558
|
+
for (let i = this.history.length - 1; i >= 0; i--) {
|
559
|
+
const content = this.history[i];
|
560
|
+
const tokens = countTokensFn(content);
|
561
|
+
if (totalTokens + tokens <= maxTokens) {
|
562
|
+
result.unshift(content);
|
563
|
+
totalTokens += tokens;
|
564
|
+
}
|
565
|
+
else {
|
566
|
+
break;
|
567
|
+
}
|
568
|
+
}
|
569
|
+
return result;
|
570
|
+
}
|
571
|
+
/**
|
572
|
+
* Summarize older history to fit within token limits
|
573
|
+
*/
|
574
|
+
async summarizeOldHistory(keepRecentCount, summarizeFn) {
|
575
|
+
if (this.history.length <= keepRecentCount) {
|
576
|
+
return;
|
577
|
+
}
|
578
|
+
const toSummarize = this.history.slice(0, -keepRecentCount);
|
579
|
+
const toKeep = this.history.slice(-keepRecentCount);
|
580
|
+
const summary = await summarizeFn(toSummarize);
|
581
|
+
this.history = [summary, ...toKeep];
|
582
|
+
}
|
583
|
+
/**
|
584
|
+
* Export history to JSON
|
585
|
+
*/
|
586
|
+
toJSON() {
|
587
|
+
return JSON.stringify(this.history, null, 2);
|
588
|
+
}
|
589
|
+
/**
|
590
|
+
* Import history from JSON
|
591
|
+
*/
|
592
|
+
static fromJSON(json) {
|
593
|
+
const service = new HistoryService();
|
594
|
+
const history = JSON.parse(json);
|
595
|
+
service.addAll(history);
|
596
|
+
return service;
|
597
|
+
}
|
598
|
+
/**
|
599
|
+
* Mark compression as starting
|
600
|
+
* This will cause add() operations to queue until compression completes
|
601
|
+
*/
|
602
|
+
startCompression() {
|
603
|
+
this.logger.debug('Starting compression - locking history');
|
604
|
+
this.isCompressing = true;
|
605
|
+
}
|
606
|
+
/**
|
607
|
+
* Mark compression as complete
|
608
|
+
* This will flush all queued operations
|
609
|
+
*/
|
610
|
+
endCompression() {
|
611
|
+
this.logger.debug('Compression complete - unlocking history', {
|
612
|
+
pendingCount: this.pendingOperations.length,
|
613
|
+
});
|
614
|
+
this.isCompressing = false;
|
615
|
+
// Flush all pending operations
|
616
|
+
const operations = this.pendingOperations;
|
617
|
+
this.pendingOperations = [];
|
618
|
+
for (const operation of operations) {
|
619
|
+
operation();
|
620
|
+
}
|
621
|
+
this.logger.debug('Flushed pending operations', {
|
622
|
+
count: operations.length,
|
623
|
+
});
|
624
|
+
}
|
625
|
+
/**
|
626
|
+
* Wait for all pending operations to complete
|
627
|
+
* For synchronous operations, this is now a no-op but kept for API compatibility
|
628
|
+
*/
|
629
|
+
async waitForPendingOperations() {
|
630
|
+
// Since operations are now synchronous, nothing to wait for
|
631
|
+
return Promise.resolve();
|
632
|
+
}
|
633
|
+
/**
|
634
|
+
* Get conversation statistics
|
635
|
+
*/
|
636
|
+
getStatistics() {
|
637
|
+
let userMessages = 0;
|
638
|
+
let aiMessages = 0;
|
639
|
+
let toolCalls = 0;
|
640
|
+
let toolResponses = 0;
|
641
|
+
let totalTokens = 0;
|
642
|
+
let hasTokens = false;
|
643
|
+
for (const content of this.history) {
|
644
|
+
if (content.speaker === 'human') {
|
645
|
+
userMessages++;
|
646
|
+
}
|
647
|
+
else if (content.speaker === 'ai') {
|
648
|
+
aiMessages++;
|
649
|
+
}
|
650
|
+
for (const block of content.blocks) {
|
651
|
+
if (block.type === 'tool_call') {
|
652
|
+
toolCalls++;
|
653
|
+
}
|
654
|
+
else if (block.type === 'tool_response') {
|
655
|
+
toolResponses++;
|
656
|
+
}
|
657
|
+
}
|
658
|
+
if (content.metadata?.usage) {
|
659
|
+
totalTokens += content.metadata.usage.totalTokens;
|
660
|
+
hasTokens = true;
|
661
|
+
}
|
662
|
+
}
|
663
|
+
return {
|
664
|
+
totalMessages: this.history.length,
|
665
|
+
userMessages,
|
666
|
+
aiMessages,
|
667
|
+
toolCalls,
|
668
|
+
toolResponses,
|
669
|
+
totalTokens: hasTokens ? totalTokens : undefined,
|
670
|
+
};
|
671
|
+
}
|
672
|
+
}
|
673
|
+
//# sourceMappingURL=HistoryService.js.map
|