@probelabs/probe 0.6.0-rc100
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +583 -0
- package/bin/.gitkeep +0 -0
- package/bin/probe +158 -0
- package/bin/probe-binary +0 -0
- package/build/agent/ProbeAgent.d.ts +199 -0
- package/build/agent/ProbeAgent.js +1486 -0
- package/build/agent/acp/README.md +347 -0
- package/build/agent/acp/connection.js +237 -0
- package/build/agent/acp/connection.test.js +311 -0
- package/build/agent/acp/examples/simple-client.js +212 -0
- package/build/agent/acp/examples/tool-lifecycle.js +230 -0
- package/build/agent/acp/final-test.js +173 -0
- package/build/agent/acp/index.js +5 -0
- package/build/agent/acp/integration.test.js +385 -0
- package/build/agent/acp/manual-test.js +410 -0
- package/build/agent/acp/protocol-test.js +190 -0
- package/build/agent/acp/server.js +448 -0
- package/build/agent/acp/server.test.js +371 -0
- package/build/agent/acp/test-runner.js +216 -0
- package/build/agent/acp/test-utils/README.md +315 -0
- package/build/agent/acp/test-utils/acp-tester.js +484 -0
- package/build/agent/acp/test-utils/mock-acp-client.js +434 -0
- package/build/agent/acp/tools.js +368 -0
- package/build/agent/acp/tools.test.js +334 -0
- package/build/agent/acp/types.js +218 -0
- package/build/agent/acp/types.test.js +327 -0
- package/build/agent/appTracer.js +360 -0
- package/build/agent/fileSpanExporter.js +169 -0
- package/build/agent/index.js +7426 -0
- package/build/agent/mcp/client.js +338 -0
- package/build/agent/mcp/config.js +313 -0
- package/build/agent/mcp/index.js +64 -0
- package/build/agent/mcp/xmlBridge.js +371 -0
- package/build/agent/mockProvider.js +53 -0
- package/build/agent/probeTool.js +257 -0
- package/build/agent/schemaUtils.js +1726 -0
- package/build/agent/simpleTelemetry.js +267 -0
- package/build/agent/telemetry.js +225 -0
- package/build/agent/tokenCounter.js +395 -0
- package/build/agent/tools.js +163 -0
- package/build/cli.js +49 -0
- package/build/delegate.js +267 -0
- package/build/directory-resolver.js +237 -0
- package/build/downloader.js +750 -0
- package/build/extract.js +149 -0
- package/build/index.js +70 -0
- package/build/mcp/index.js +514 -0
- package/build/mcp/index.ts +608 -0
- package/build/query.js +116 -0
- package/build/search.js +247 -0
- package/build/tools/common.js +410 -0
- package/build/tools/index.js +40 -0
- package/build/tools/langchain.js +88 -0
- package/build/tools/system-message.js +121 -0
- package/build/tools/vercel.js +271 -0
- package/build/utils/file-lister.js +193 -0
- package/build/utils.js +128 -0
- package/cjs/agent/ProbeAgent.cjs +5829 -0
- package/cjs/index.cjs +6217 -0
- package/cjs/package.json +3 -0
- package/index.d.ts +401 -0
- package/package.json +114 -0
- package/scripts/postinstall.js +172 -0
- package/src/agent/ProbeAgent.d.ts +199 -0
- package/src/agent/ProbeAgent.js +1486 -0
- package/src/agent/acp/README.md +347 -0
- package/src/agent/acp/connection.js +237 -0
- package/src/agent/acp/connection.test.js +311 -0
- package/src/agent/acp/examples/simple-client.js +212 -0
- package/src/agent/acp/examples/tool-lifecycle.js +230 -0
- package/src/agent/acp/final-test.js +173 -0
- package/src/agent/acp/index.js +5 -0
- package/src/agent/acp/integration.test.js +385 -0
- package/src/agent/acp/manual-test.js +410 -0
- package/src/agent/acp/protocol-test.js +190 -0
- package/src/agent/acp/server.js +448 -0
- package/src/agent/acp/server.test.js +371 -0
- package/src/agent/acp/test-runner.js +216 -0
- package/src/agent/acp/test-utils/README.md +315 -0
- package/src/agent/acp/test-utils/acp-tester.js +484 -0
- package/src/agent/acp/test-utils/mock-acp-client.js +434 -0
- package/src/agent/acp/tools.js +368 -0
- package/src/agent/acp/tools.test.js +334 -0
- package/src/agent/acp/types.js +218 -0
- package/src/agent/acp/types.test.js +327 -0
- package/src/agent/appTracer.js +360 -0
- package/src/agent/fileSpanExporter.js +169 -0
- package/src/agent/index.js +813 -0
- package/src/agent/mcp/client.js +338 -0
- package/src/agent/mcp/config.js +313 -0
- package/src/agent/mcp/index.js +64 -0
- package/src/agent/mcp/xmlBridge.js +371 -0
- package/src/agent/mockProvider.js +53 -0
- package/src/agent/probeTool.js +257 -0
- package/src/agent/schemaUtils.js +1726 -0
- package/src/agent/simpleTelemetry.js +267 -0
- package/src/agent/telemetry.js +225 -0
- package/src/agent/tokenCounter.js +395 -0
- package/src/agent/tools.js +163 -0
- package/src/cli.js +49 -0
- package/src/delegate.js +267 -0
- package/src/directory-resolver.js +237 -0
- package/src/downloader.js +750 -0
- package/src/extract.js +149 -0
- package/src/index.js +70 -0
- package/src/mcp/index.ts +608 -0
- package/src/query.js +116 -0
- package/src/search.js +247 -0
- package/src/tools/common.js +410 -0
- package/src/tools/index.js +40 -0
- package/src/tools/langchain.js +88 -0
- package/src/tools/system-message.js +121 -0
- package/src/tools/vercel.js +271 -0
- package/src/utils/file-lister.js +193 -0
- package/src/utils.js +128 -0
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
import { get_encoding } from 'tiktoken';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* TokenCounter class to track token usage in the agent
|
|
5
|
+
*/
|
|
6
|
+
export class TokenCounter {
|
|
7
|
+
constructor() {
|
|
8
|
+
// Initialize the tokenizer with cl100k_base encoding (works for both Claude and GPT models)
|
|
9
|
+
try {
|
|
10
|
+
// Initialize tokenizer
|
|
11
|
+
this.tokenizer = get_encoding('cl100k_base');
|
|
12
|
+
|
|
13
|
+
// Context window tracking
|
|
14
|
+
this.contextSize = 0; // Current size based on history
|
|
15
|
+
this.history = []; // Store message history for context calculation
|
|
16
|
+
|
|
17
|
+
// Token counters
|
|
18
|
+
this.requestTokens = 0; // Total prompt tokens over session
|
|
19
|
+
this.responseTokens = 0; // Total completion tokens over session
|
|
20
|
+
this.currentRequestTokens = 0; // Prompt tokens for the current LLM call
|
|
21
|
+
this.currentResponseTokens = 0; // Completion tokens for the current LLM call
|
|
22
|
+
|
|
23
|
+
// Cache token tracking
|
|
24
|
+
this.cacheCreationTokens = 0; // Total Anthropic cache creation tokens
|
|
25
|
+
this.cacheReadTokens = 0; // Total Anthropic cache read tokens
|
|
26
|
+
this.currentCacheCreationTokens = 0; // Anthropic cache creation for current call
|
|
27
|
+
this.currentCacheReadTokens = 0; // Anthropic cache read for current call
|
|
28
|
+
this.cachedPromptTokens = 0; // Total OpenAI cached prompt tokens
|
|
29
|
+
this.currentCachedPromptTokens = 0; // OpenAI cached prompt for current call
|
|
30
|
+
|
|
31
|
+
} catch (error) {
|
|
32
|
+
console.error('Error initializing tokenizer:', error);
|
|
33
|
+
// Fallback to a simple token counting method if tiktoken fails
|
|
34
|
+
this.tokenizer = null;
|
|
35
|
+
this.contextSize = 0;
|
|
36
|
+
this.requestTokens = 0;
|
|
37
|
+
this.responseTokens = 0;
|
|
38
|
+
this.currentRequestTokens = 0;
|
|
39
|
+
this.currentResponseTokens = 0;
|
|
40
|
+
this.cacheCreationTokens = 0;
|
|
41
|
+
this.cacheReadTokens = 0;
|
|
42
|
+
this.currentCacheCreationTokens = 0;
|
|
43
|
+
this.currentCacheReadTokens = 0;
|
|
44
|
+
this.cachedPromptTokens = 0;
|
|
45
|
+
this.currentCachedPromptTokens = 0;
|
|
46
|
+
this.history = [];
|
|
47
|
+
}
|
|
48
|
+
this.debug = process.env.DEBUG === '1';
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Count tokens in a string using tiktoken or fallback method
|
|
53
|
+
* @param {string} text - The text to count tokens for
|
|
54
|
+
* @returns {number} - The number of tokens
|
|
55
|
+
*/
|
|
56
|
+
countTokens(text) {
|
|
57
|
+
if (typeof text !== 'string') {
|
|
58
|
+
text = String(text); // Ensure text is a string
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (this.tokenizer) {
|
|
62
|
+
try {
|
|
63
|
+
const tokens = this.tokenizer.encode(text);
|
|
64
|
+
return tokens.length;
|
|
65
|
+
} catch (error) {
|
|
66
|
+
// Fallback to a simple approximation (1 token ≈ 4 characters)
|
|
67
|
+
return Math.ceil(text.length / 4);
|
|
68
|
+
}
|
|
69
|
+
} else {
|
|
70
|
+
// Fallback to a simple approximation (1 token ≈ 4 characters)
|
|
71
|
+
return Math.ceil(text.length / 4);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Add to request token count (manual counting, less used now with recordUsage)
|
|
77
|
+
* @param {string|number} input - The text to count tokens for or the token count directly
|
|
78
|
+
*/
|
|
79
|
+
addRequestTokens(input) {
|
|
80
|
+
let tokenCount = 0;
|
|
81
|
+
|
|
82
|
+
if (typeof input === 'number') {
|
|
83
|
+
tokenCount = input;
|
|
84
|
+
} else if (typeof input === 'string') {
|
|
85
|
+
tokenCount = this.countTokens(input);
|
|
86
|
+
} else {
|
|
87
|
+
console.warn('[WARN] Invalid input type for addRequestTokens:', typeof input);
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
this.requestTokens += tokenCount;
|
|
92
|
+
this.currentRequestTokens = tokenCount;
|
|
93
|
+
|
|
94
|
+
if (this.debug) {
|
|
95
|
+
console.log(`[DEBUG] (Manual) Added ${tokenCount} request tokens. Total: ${this.requestTokens}, Current: ${this.currentRequestTokens}`);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Add to response token count (manual counting, less used now with recordUsage)
|
|
101
|
+
* @param {string|number} input - The text to count tokens for or the token count directly
|
|
102
|
+
*/
|
|
103
|
+
addResponseTokens(input) {
|
|
104
|
+
let tokenCount = 0;
|
|
105
|
+
|
|
106
|
+
if (typeof input === 'number') {
|
|
107
|
+
tokenCount = input;
|
|
108
|
+
} else if (typeof input === 'string') {
|
|
109
|
+
tokenCount = this.countTokens(input);
|
|
110
|
+
} else {
|
|
111
|
+
console.warn('[WARN] Invalid input type for addResponseTokens:', typeof input);
|
|
112
|
+
return;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
this.responseTokens += tokenCount;
|
|
116
|
+
this.currentResponseTokens = tokenCount;
|
|
117
|
+
|
|
118
|
+
if (this.debug) {
|
|
119
|
+
console.log(`[DEBUG] (Manual) Added ${tokenCount} response tokens. Total: ${this.responseTokens}, Current: ${this.currentResponseTokens}`);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Record token usage from the AI SDK's result for a single LLM call.
|
|
125
|
+
* This resets 'current' counters and updates totals.
|
|
126
|
+
* @param {Object} usage - The usage object { promptTokens, completionTokens, totalTokens }
|
|
127
|
+
* @param {Object} providerMetadata - Metadata possibly containing cache info
|
|
128
|
+
*/
|
|
129
|
+
recordUsage(usage, providerMetadata) {
|
|
130
|
+
if (!usage) {
|
|
131
|
+
console.warn('[WARN] No usage information provided to recordUsage');
|
|
132
|
+
return;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// --- Reset CURRENT counters for this specific API call ---
|
|
136
|
+
this.currentRequestTokens = 0;
|
|
137
|
+
this.currentResponseTokens = 0;
|
|
138
|
+
this.currentCacheCreationTokens = 0;
|
|
139
|
+
this.currentCacheReadTokens = 0;
|
|
140
|
+
this.currentCachedPromptTokens = 0;
|
|
141
|
+
|
|
142
|
+
// --- Process usage data ---
|
|
143
|
+
const promptTokens = Number(usage.promptTokens) || 0;
|
|
144
|
+
const completionTokens = Number(usage.completionTokens) || 0;
|
|
145
|
+
|
|
146
|
+
// Update CURRENT tokens for this call
|
|
147
|
+
this.currentRequestTokens = promptTokens;
|
|
148
|
+
this.currentResponseTokens = completionTokens;
|
|
149
|
+
|
|
150
|
+
// Update TOTAL tokens accumulated over the session
|
|
151
|
+
this.requestTokens += promptTokens;
|
|
152
|
+
this.responseTokens += completionTokens;
|
|
153
|
+
|
|
154
|
+
// --- Process Provider Metadata for Cache Info ---
|
|
155
|
+
if (providerMetadata?.anthropic) {
|
|
156
|
+
const cacheCreation = Number(providerMetadata.anthropic.cacheCreationInputTokens) || 0;
|
|
157
|
+
const cacheRead = Number(providerMetadata.anthropic.cacheReadInputTokens) || 0;
|
|
158
|
+
|
|
159
|
+
this.currentCacheCreationTokens = cacheCreation;
|
|
160
|
+
this.currentCacheReadTokens = cacheRead;
|
|
161
|
+
|
|
162
|
+
this.cacheCreationTokens += cacheCreation;
|
|
163
|
+
this.cacheReadTokens += cacheRead;
|
|
164
|
+
|
|
165
|
+
if (this.debug) {
|
|
166
|
+
console.log(`[DEBUG] Anthropic cache tokens (current): creation=${cacheCreation}, read=${cacheRead}`);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
if (providerMetadata?.openai) {
|
|
171
|
+
const cachedPrompt = Number(providerMetadata.openai.cachedPromptTokens) || 0;
|
|
172
|
+
|
|
173
|
+
this.currentCachedPromptTokens = cachedPrompt;
|
|
174
|
+
this.cachedPromptTokens += cachedPrompt;
|
|
175
|
+
|
|
176
|
+
if (this.debug) {
|
|
177
|
+
console.log(`[DEBUG] OpenAI cached prompt tokens (current): ${cachedPrompt}`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
if (this.debug) {
|
|
182
|
+
console.log(
|
|
183
|
+
`[DEBUG] Recorded usage: current(req=${this.currentRequestTokens}, resp=${this.currentResponseTokens}), total(req=${this.requestTokens}, resp=${this.responseTokens})`
|
|
184
|
+
);
|
|
185
|
+
console.log(`[DEBUG] Total cache tokens: Anthropic(create=${this.cacheCreationTokens}, read=${this.cacheReadTokens}), OpenAI(prompt=${this.cachedPromptTokens})`);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Calculate the current context window size based on provided messages or internal history.
|
|
191
|
+
* @param {Array|null} messages - Optional messages array to use for calculation. If null, uses internal this.history.
|
|
192
|
+
* @returns {number} - Total tokens estimated in the context window.
|
|
193
|
+
*/
|
|
194
|
+
calculateContextSize(messages = null) {
|
|
195
|
+
const msgsToCount = messages !== null ? messages : this.history;
|
|
196
|
+
let totalTokens = 0;
|
|
197
|
+
|
|
198
|
+
if (this.debug && messages === null) {
|
|
199
|
+
console.log(`[DEBUG] Calculating context size from internal history (${this.history.length} messages)`);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
for (const msg of msgsToCount) {
|
|
203
|
+
let messageTokens = 0;
|
|
204
|
+
// Add tokens for role overhead (approximate)
|
|
205
|
+
messageTokens += 4;
|
|
206
|
+
|
|
207
|
+
// Content tokens
|
|
208
|
+
if (typeof msg.content === 'string') {
|
|
209
|
+
messageTokens += this.countTokens(msg.content);
|
|
210
|
+
} else if (Array.isArray(msg.content)) {
|
|
211
|
+
// Handle array content (e.g., Vercel AI SDK tool usage format)
|
|
212
|
+
for (const item of msg.content) {
|
|
213
|
+
if (item.type === 'text' && typeof item.text === 'string') {
|
|
214
|
+
messageTokens += this.countTokens(item.text);
|
|
215
|
+
} else {
|
|
216
|
+
// Estimate tokens for other non-text parts (tool calls/results)
|
|
217
|
+
messageTokens += this.countTokens(JSON.stringify(item));
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
} else if (msg.content) {
|
|
221
|
+
// Fallback for other content types
|
|
222
|
+
messageTokens += this.countTokens(JSON.stringify(msg.content));
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// --- Add tokens for tool calls/results if present (Vercel SDK format) ---
|
|
226
|
+
if (msg.toolCalls) {
|
|
227
|
+
messageTokens += this.countTokens(JSON.stringify(msg.toolCalls));
|
|
228
|
+
messageTokens += 5; // Approx overhead for tool_calls structure
|
|
229
|
+
}
|
|
230
|
+
// For 'tool' role messages (results)
|
|
231
|
+
if (msg.role === 'tool' && msg.toolCallId) {
|
|
232
|
+
messageTokens += this.countTokens(msg.toolCallId); // Add tokens for the ID
|
|
233
|
+
messageTokens += 5; // Approx overhead for tool role structure
|
|
234
|
+
}
|
|
235
|
+
if (msg.toolCallResults) {
|
|
236
|
+
messageTokens += this.countTokens(JSON.stringify(msg.toolCallResults));
|
|
237
|
+
messageTokens += 5; // Approx overhead
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
totalTokens += messageTokens;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Update the instance property *only* if calculating based on internal history
|
|
244
|
+
if (messages === null) {
|
|
245
|
+
this.contextSize = totalTokens;
|
|
246
|
+
if (this.debug) {
|
|
247
|
+
console.log(`[DEBUG] Updated internal context size: ${this.contextSize} tokens`);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
return totalTokens;
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
/**
|
|
255
|
+
* Update internal history and recalculate internal context window size.
|
|
256
|
+
* @param {Array} messages - New message history array.
|
|
257
|
+
*/
|
|
258
|
+
updateHistory(messages) {
|
|
259
|
+
// Ensure messages is an array
|
|
260
|
+
if (!Array.isArray(messages)) {
|
|
261
|
+
console.warn("[WARN] updateHistory called with non-array:", messages);
|
|
262
|
+
this.history = [];
|
|
263
|
+
} else {
|
|
264
|
+
// Create a shallow copy to avoid external modifications
|
|
265
|
+
this.history = [...messages];
|
|
266
|
+
}
|
|
267
|
+
// Recalculate context size based on the new internal history
|
|
268
|
+
this.calculateContextSize(); // This updates this.contextSize
|
|
269
|
+
if (this.debug) {
|
|
270
|
+
console.log(`[DEBUG] History updated (${this.history.length} messages). Recalculated context size: ${this.contextSize}`);
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
/**
|
|
275
|
+
* Clear all counters and internal history. Reset context size.
|
|
276
|
+
*/
|
|
277
|
+
clear() {
|
|
278
|
+
// Reset counters
|
|
279
|
+
this.requestTokens = 0;
|
|
280
|
+
this.responseTokens = 0;
|
|
281
|
+
this.currentRequestTokens = 0;
|
|
282
|
+
this.currentResponseTokens = 0;
|
|
283
|
+
this.cacheCreationTokens = 0;
|
|
284
|
+
this.cacheReadTokens = 0;
|
|
285
|
+
this.currentCacheCreationTokens = 0;
|
|
286
|
+
this.currentCacheReadTokens = 0;
|
|
287
|
+
this.cachedPromptTokens = 0;
|
|
288
|
+
this.currentCachedPromptTokens = 0;
|
|
289
|
+
|
|
290
|
+
// Clear history and context
|
|
291
|
+
this.history = [];
|
|
292
|
+
this.contextSize = 0; // Reset calculated context size
|
|
293
|
+
|
|
294
|
+
if (this.debug) {
|
|
295
|
+
console.log('[DEBUG] TokenCounter cleared: usage, history, and context size reset.');
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
/**
|
|
300
|
+
* Start a new conversation turn - reset CURRENT token counters.
|
|
301
|
+
* Calculates context size based on history *before* the new turn.
|
|
302
|
+
*/
|
|
303
|
+
startNewTurn() {
|
|
304
|
+
this.currentRequestTokens = 0;
|
|
305
|
+
this.currentResponseTokens = 0;
|
|
306
|
+
this.currentCacheCreationTokens = 0;
|
|
307
|
+
this.currentCacheReadTokens = 0;
|
|
308
|
+
this.currentCachedPromptTokens = 0;
|
|
309
|
+
|
|
310
|
+
// Calculate context size based on current history *before* new messages are added
|
|
311
|
+
this.calculateContextSize(); // Updates this.contextSize
|
|
312
|
+
|
|
313
|
+
if (this.debug) {
|
|
314
|
+
console.log('[DEBUG] TokenCounter: New turn started. Current counters reset.');
|
|
315
|
+
console.log(`[DEBUG] Context size at start of turn: ${this.contextSize} tokens`);
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Get the current token usage state including context size.
|
|
321
|
+
* Recalculates context size from internal history before returning.
|
|
322
|
+
* @returns {Object} - Object containing current turn, total session, and context window usage.
|
|
323
|
+
*/
|
|
324
|
+
getTokenUsage() {
|
|
325
|
+
// Always calculate context window size from internal history right before returning usage
|
|
326
|
+
const currentContextSize = this.calculateContextSize(); // Recalculates and updates this.contextSize
|
|
327
|
+
|
|
328
|
+
// Consolidate cache info for simpler reporting
|
|
329
|
+
const currentCacheRead = this.currentCacheReadTokens + this.currentCachedPromptTokens;
|
|
330
|
+
const currentCacheWrite = this.currentCacheCreationTokens;
|
|
331
|
+
const totalCacheRead = this.cacheReadTokens + this.cachedPromptTokens;
|
|
332
|
+
const totalCacheWrite = this.cacheCreationTokens;
|
|
333
|
+
|
|
334
|
+
const usageData = {
|
|
335
|
+
contextWindow: currentContextSize, // Use the freshly calculated value
|
|
336
|
+
current: { // Usage for the *last* LLM call recorded
|
|
337
|
+
request: this.currentRequestTokens,
|
|
338
|
+
response: this.currentResponseTokens,
|
|
339
|
+
total: this.currentRequestTokens + this.currentResponseTokens,
|
|
340
|
+
cacheRead: currentCacheRead,
|
|
341
|
+
cacheWrite: currentCacheWrite,
|
|
342
|
+
cacheTotal: currentCacheRead + currentCacheWrite,
|
|
343
|
+
// Keep detailed breakdown if needed
|
|
344
|
+
anthropic: {
|
|
345
|
+
cacheCreation: this.currentCacheCreationTokens,
|
|
346
|
+
cacheRead: this.currentCacheReadTokens,
|
|
347
|
+
},
|
|
348
|
+
openai: {
|
|
349
|
+
cachedPrompt: this.currentCachedPromptTokens
|
|
350
|
+
}
|
|
351
|
+
},
|
|
352
|
+
total: { // Accumulated usage over the session
|
|
353
|
+
request: this.requestTokens,
|
|
354
|
+
response: this.responseTokens,
|
|
355
|
+
total: this.requestTokens + this.responseTokens,
|
|
356
|
+
cacheRead: totalCacheRead,
|
|
357
|
+
cacheWrite: totalCacheWrite,
|
|
358
|
+
cacheTotal: totalCacheRead + totalCacheWrite,
|
|
359
|
+
// Keep detailed breakdown if needed
|
|
360
|
+
anthropic: {
|
|
361
|
+
cacheCreation: this.cacheCreationTokens,
|
|
362
|
+
cacheRead: this.cacheReadTokens,
|
|
363
|
+
},
|
|
364
|
+
openai: {
|
|
365
|
+
cachedPrompt: this.cachedPromptTokens
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
};
|
|
369
|
+
|
|
370
|
+
if (this.debug) {
|
|
371
|
+
// console.log(`[DEBUG] getTokenUsage() called. Returning data:`, JSON.stringify(usageData, null, 2));
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
return usageData;
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
/**
|
|
378
|
+
* Get a summary of token usage for display
|
|
379
|
+
* @returns {Object} Usage summary
|
|
380
|
+
*/
|
|
381
|
+
getUsageSummary() {
|
|
382
|
+
const totalTokens = this.requestTokens + this.responseTokens;
|
|
383
|
+
const cacheRead = this.cacheReadTokens + this.cachedPromptTokens;
|
|
384
|
+
const cacheWrite = this.cacheCreationTokens;
|
|
385
|
+
|
|
386
|
+
return {
|
|
387
|
+
totalTokens,
|
|
388
|
+
requestTokens: this.requestTokens,
|
|
389
|
+
responseTokens: this.responseTokens,
|
|
390
|
+
cacheRead,
|
|
391
|
+
cacheWrite,
|
|
392
|
+
contextSize: this.contextSize
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
}
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
// Tool definitions and XML parsing for the probe agent
|
|
2
|
+
import {
|
|
3
|
+
searchTool,
|
|
4
|
+
queryTool,
|
|
5
|
+
extractTool,
|
|
6
|
+
delegateTool,
|
|
7
|
+
DEFAULT_SYSTEM_MESSAGE,
|
|
8
|
+
attemptCompletionSchema,
|
|
9
|
+
attemptCompletionToolDefinition,
|
|
10
|
+
searchSchema,
|
|
11
|
+
querySchema,
|
|
12
|
+
extractSchema,
|
|
13
|
+
delegateSchema,
|
|
14
|
+
searchToolDefinition,
|
|
15
|
+
queryToolDefinition,
|
|
16
|
+
extractToolDefinition,
|
|
17
|
+
delegateToolDefinition,
|
|
18
|
+
parseXmlToolCall
|
|
19
|
+
} from '../index.js';
|
|
20
|
+
import { randomUUID } from 'crypto';
|
|
21
|
+
|
|
22
|
+
// Create configured tool instances
|
|
23
|
+
export function createTools(configOptions) {
|
|
24
|
+
return {
|
|
25
|
+
searchTool: searchTool(configOptions),
|
|
26
|
+
queryTool: queryTool(configOptions),
|
|
27
|
+
extractTool: extractTool(configOptions),
|
|
28
|
+
delegateTool: delegateTool(configOptions)
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// Export tool definitions and schemas
|
|
33
|
+
export {
|
|
34
|
+
DEFAULT_SYSTEM_MESSAGE,
|
|
35
|
+
searchSchema,
|
|
36
|
+
querySchema,
|
|
37
|
+
extractSchema,
|
|
38
|
+
delegateSchema,
|
|
39
|
+
attemptCompletionSchema,
|
|
40
|
+
searchToolDefinition,
|
|
41
|
+
queryToolDefinition,
|
|
42
|
+
extractToolDefinition,
|
|
43
|
+
delegateToolDefinition,
|
|
44
|
+
attemptCompletionToolDefinition,
|
|
45
|
+
parseXmlToolCall
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
// Define the implement tool XML definition
|
|
49
|
+
export const implementToolDefinition = `
|
|
50
|
+
## implement
|
|
51
|
+
Description: Implement a given task. Can modify files. Can be used ONLY if task explicitly stated that something requires modification or implementation.
|
|
52
|
+
|
|
53
|
+
Parameters:
|
|
54
|
+
- task: (required) The task description. Should be as detailed as possible, ideally pointing to exact files which needs be modified or created.
|
|
55
|
+
- autoCommits: (optional) Whether to enable auto-commits in aider. Default is false.
|
|
56
|
+
|
|
57
|
+
Usage Example:
|
|
58
|
+
|
|
59
|
+
<examples>
|
|
60
|
+
|
|
61
|
+
User: Can you implement a function to calculate Fibonacci numbers in main.js?
|
|
62
|
+
<implement>
|
|
63
|
+
<task>Implement a recursive function to calculate the nth Fibonacci number in main.js</task>
|
|
64
|
+
</implement>
|
|
65
|
+
|
|
66
|
+
User: Can you implement a function to calculate Fibonacci numbers in main.js with auto-commits?
|
|
67
|
+
<implement>
|
|
68
|
+
<task>Implement a recursive function to calculate the nth Fibonacci number in main.js</task>
|
|
69
|
+
<autoCommits>true</autoCommits>
|
|
70
|
+
</implement>
|
|
71
|
+
|
|
72
|
+
</examples>
|
|
73
|
+
`;
|
|
74
|
+
|
|
75
|
+
// Define the listFiles tool XML definition
|
|
76
|
+
export const listFilesToolDefinition = `
|
|
77
|
+
## listFiles
|
|
78
|
+
Description: List files and directories in a specified location.
|
|
79
|
+
|
|
80
|
+
Parameters:
|
|
81
|
+
- directory: (optional) The directory path to list files from. Defaults to current directory if not specified.
|
|
82
|
+
|
|
83
|
+
Usage Example:
|
|
84
|
+
|
|
85
|
+
<examples>
|
|
86
|
+
|
|
87
|
+
User: Can you list the files in the src directory?
|
|
88
|
+
<listFiles>
|
|
89
|
+
<directory>src</directory>
|
|
90
|
+
</listFiles>
|
|
91
|
+
|
|
92
|
+
User: What files are in the current directory?
|
|
93
|
+
<listFiles>
|
|
94
|
+
</listFiles>
|
|
95
|
+
|
|
96
|
+
</examples>
|
|
97
|
+
`;
|
|
98
|
+
|
|
99
|
+
// Define the searchFiles tool XML definition
|
|
100
|
+
export const searchFilesToolDefinition = `
|
|
101
|
+
## searchFiles
|
|
102
|
+
Description: Find files with name matching a glob pattern with recursive search capability.
|
|
103
|
+
|
|
104
|
+
Parameters:
|
|
105
|
+
- pattern: (required) The glob pattern to search for (e.g., "**/*.js", "*.md").
|
|
106
|
+
- directory: (optional) The directory to search in. Defaults to current directory if not specified.
|
|
107
|
+
- recursive: (optional) Whether to search recursively. Defaults to true.
|
|
108
|
+
|
|
109
|
+
Usage Example:
|
|
110
|
+
|
|
111
|
+
<examples>
|
|
112
|
+
|
|
113
|
+
User: Can you find all JavaScript files in the project?
|
|
114
|
+
<searchFiles>
|
|
115
|
+
<pattern>**/*.js</pattern>
|
|
116
|
+
</searchFiles>
|
|
117
|
+
|
|
118
|
+
User: Find all markdown files in the docs directory, but only at the top level.
|
|
119
|
+
<searchFiles>
|
|
120
|
+
<pattern>*.md</pattern>
|
|
121
|
+
<directory>docs</directory>
|
|
122
|
+
<recursive>false</recursive>
|
|
123
|
+
</searchFiles>
|
|
124
|
+
|
|
125
|
+
</examples>
|
|
126
|
+
`;
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Enhanced XML parser that handles thinking tags and attempt_complete shorthand
|
|
130
|
+
* This function removes any <thinking></thinking> tags from the input string
|
|
131
|
+
* before passing it to the original parseXmlToolCall function
|
|
132
|
+
* @param {string} xmlString - The XML string to parse
|
|
133
|
+
* @param {string[]} [validTools] - List of valid tool names to parse (optional)
|
|
134
|
+
* @returns {Object|null} - The parsed tool call or null if no valid tool call found
|
|
135
|
+
*/
|
|
136
|
+
export function parseXmlToolCallWithThinking(xmlString, validTools) {
|
|
137
|
+
// Extract thinking content if present (for potential logging or analysis)
|
|
138
|
+
const thinkingMatch = xmlString.match(/<thinking>([\s\S]*?)<\/thinking>/);
|
|
139
|
+
const thinkingContent = thinkingMatch ? thinkingMatch[1].trim() : null;
|
|
140
|
+
|
|
141
|
+
// Remove thinking tags and their content from the XML string
|
|
142
|
+
let cleanedXmlString = xmlString.replace(/<thinking>[\s\S]*?<\/thinking>/g, '').trim();
|
|
143
|
+
|
|
144
|
+
// Check for attempt_complete shorthand (single tag with no closing tag and no parameters)
|
|
145
|
+
const attemptCompleteMatch = cleanedXmlString.match(/^<attempt_complete>\s*$/);
|
|
146
|
+
if (attemptCompleteMatch) {
|
|
147
|
+
// Convert shorthand to full attempt_completion format with special marker
|
|
148
|
+
return {
|
|
149
|
+
toolName: 'attempt_completion',
|
|
150
|
+
params: { result: '__PREVIOUS_RESPONSE__' }
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// Use the original parseXmlToolCall function to parse the cleaned XML string
|
|
155
|
+
const parsedTool = parseXmlToolCall(cleanedXmlString, validTools);
|
|
156
|
+
|
|
157
|
+
// If debugging is enabled, log the thinking content
|
|
158
|
+
if (process.env.DEBUG === '1' && thinkingContent) {
|
|
159
|
+
console.log(`[DEBUG] AI Thinking Process:\n${thinkingContent}`);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
return parsedTool;
|
|
163
|
+
}
|
package/build/cli.js
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* CLI wrapper for the probe binary
|
|
5
|
+
*
|
|
6
|
+
* This script ensures the probe binary is downloaded and then executes it with the provided arguments.
|
|
7
|
+
* It's designed to be as lightweight as possible, essentially just passing through to the actual binary.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { spawn } from 'child_process';
|
|
11
|
+
import { getBinaryPath } from './utils.js';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Main function
|
|
15
|
+
*/
|
|
16
|
+
async function main() {
|
|
17
|
+
try {
|
|
18
|
+
// Get the path to the probe binary (this will download it if needed)
|
|
19
|
+
const binaryPath = await getBinaryPath();
|
|
20
|
+
|
|
21
|
+
// Get the arguments passed to the CLI
|
|
22
|
+
const args = process.argv.slice(2);
|
|
23
|
+
|
|
24
|
+
// Spawn the probe binary with the provided arguments
|
|
25
|
+
const probeProcess = spawn(binaryPath, args, {
|
|
26
|
+
stdio: 'inherit' // Pipe stdin/stdout/stderr to the parent process
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
// Handle process exit
|
|
30
|
+
probeProcess.on('close', (code) => {
|
|
31
|
+
process.exit(code);
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
// Handle process errors
|
|
35
|
+
probeProcess.on('error', (error) => {
|
|
36
|
+
console.error(`Error executing probe binary: ${error.message}`);
|
|
37
|
+
process.exit(1);
|
|
38
|
+
});
|
|
39
|
+
} catch (error) {
|
|
40
|
+
console.error(`Error: ${error.message}`);
|
|
41
|
+
process.exit(1);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// Execute the main function
|
|
46
|
+
main().catch(error => {
|
|
47
|
+
console.error(`Unexpected error: ${error.message}`);
|
|
48
|
+
process.exit(1);
|
|
49
|
+
});
|