lynkr 7.2.5 → 8.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/config/model-tiers.json +89 -0
- package/docs/docs.html +1 -0
- package/docs/index.md +7 -0
- package/docs/toon-integration-spec.md +130 -0
- package/documentation/README.md +3 -2
- package/documentation/claude-code-cli.md +23 -16
- package/documentation/cursor-integration.md +17 -14
- package/documentation/docker.md +11 -4
- package/documentation/embeddings.md +7 -5
- package/documentation/faq.md +66 -12
- package/documentation/features.md +22 -15
- package/documentation/installation.md +66 -14
- package/documentation/production.md +43 -8
- package/documentation/providers.md +145 -42
- package/documentation/routing.md +476 -0
- package/documentation/token-optimization.md +7 -5
- package/documentation/troubleshooting.md +81 -5
- package/install.sh +6 -1
- package/package.json +4 -2
- package/scripts/setup.js +0 -1
- package/src/agents/executor.js +14 -6
- package/src/api/middleware/session.js +15 -2
- package/src/api/openai-router.js +130 -37
- package/src/api/providers-handler.js +15 -1
- package/src/api/router.js +107 -2
- package/src/budget/index.js +4 -3
- package/src/clients/databricks.js +431 -234
- package/src/clients/gpt-utils.js +181 -0
- package/src/clients/ollama-utils.js +66 -140
- package/src/clients/routing.js +0 -1
- package/src/clients/standard-tools.js +76 -3
- package/src/config/index.js +113 -35
- package/src/context/toon.js +173 -0
- package/src/logger/index.js +23 -0
- package/src/orchestrator/index.js +686 -211
- package/src/routing/agentic-detector.js +320 -0
- package/src/routing/complexity-analyzer.js +202 -2
- package/src/routing/cost-optimizer.js +305 -0
- package/src/routing/index.js +168 -159
- package/src/routing/model-tiers.js +365 -0
- package/src/server.js +2 -2
- package/src/sessions/cleanup.js +3 -3
- package/src/sessions/record.js +10 -1
- package/src/sessions/store.js +7 -2
- package/src/tools/agent-task.js +48 -1
- package/src/tools/index.js +15 -2
- package/te +11622 -0
- package/test/README.md +1 -1
- package/test/azure-openai-config.test.js +17 -8
- package/test/azure-openai-integration.test.js +7 -1
- package/test/azure-openai-routing.test.js +41 -43
- package/test/bedrock-integration.test.js +18 -32
- package/test/hybrid-routing-integration.test.js +35 -20
- package/test/hybrid-routing-performance.test.js +74 -64
- package/test/llamacpp-integration.test.js +28 -9
- package/test/lmstudio-integration.test.js +20 -8
- package/test/openai-integration.test.js +17 -20
- package/test/performance-tests.js +1 -1
- package/test/routing.test.js +65 -59
- package/test/toon-compression.test.js +131 -0
- package/CLAWROUTER_ROUTING_PLAN.md +0 -910
- package/ROUTER_COMPARISON.md +0 -173
- package/TIER_ROUTING_PLAN.md +0 -771
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model Tier Selector
|
|
3
|
+
* Maps complexity scores to appropriate models per provider
|
|
4
|
+
* Uses config/model-tiers.json for tier preferences
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const fs = require('fs');
|
|
8
|
+
const path = require('path');
|
|
9
|
+
const logger = require('../logger');
|
|
10
|
+
const config = require('../config');
|
|
11
|
+
|
|
12
|
+
// Load tier config
|
|
13
|
+
const TIER_CONFIG_PATH = path.join(__dirname, '../../config/model-tiers.json');
|
|
14
|
+
|
|
15
|
+
// Tier definitions with complexity ranges
|
|
16
|
+
const TIER_DEFINITIONS = {
|
|
17
|
+
SIMPLE: {
|
|
18
|
+
description: 'Greetings, simple Q&A, confirmations',
|
|
19
|
+
range: [0, 25],
|
|
20
|
+
priority: 1,
|
|
21
|
+
},
|
|
22
|
+
MEDIUM: {
|
|
23
|
+
description: 'Code reading, simple edits, research',
|
|
24
|
+
range: [26, 50],
|
|
25
|
+
priority: 2,
|
|
26
|
+
},
|
|
27
|
+
COMPLEX: {
|
|
28
|
+
description: 'Multi-file changes, debugging, architecture',
|
|
29
|
+
range: [51, 75],
|
|
30
|
+
priority: 3,
|
|
31
|
+
},
|
|
32
|
+
REASONING: {
|
|
33
|
+
description: 'Complex analysis, security audits, novel problems',
|
|
34
|
+
range: [76, 100],
|
|
35
|
+
priority: 4,
|
|
36
|
+
},
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
class ModelTierSelector {
|
|
40
|
+
constructor() {
|
|
41
|
+
this.tierConfig = null;
|
|
42
|
+
this.localProviders = {};
|
|
43
|
+
this.providerAliases = {};
|
|
44
|
+
this._loadConfig();
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Load tier configuration from JSON file
|
|
49
|
+
*/
|
|
50
|
+
_loadConfig() {
|
|
51
|
+
try {
|
|
52
|
+
if (fs.existsSync(TIER_CONFIG_PATH)) {
|
|
53
|
+
const data = JSON.parse(fs.readFileSync(TIER_CONFIG_PATH, 'utf8'));
|
|
54
|
+
this.tierConfig = data.tiers || {};
|
|
55
|
+
this.localProviders = data.localProviders || {};
|
|
56
|
+
this.providerAliases = data.providerAliases || {};
|
|
57
|
+
logger.debug({ tiers: Object.keys(this.tierConfig) }, '[ModelTiers] Config loaded');
|
|
58
|
+
} else {
|
|
59
|
+
logger.warn('[ModelTiers] Config file not found, using defaults');
|
|
60
|
+
this._loadDefaults();
|
|
61
|
+
}
|
|
62
|
+
} catch (err) {
|
|
63
|
+
logger.warn({ err: err.message }, '[ModelTiers] Config load failed, using defaults');
|
|
64
|
+
this._loadDefaults();
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Load default tier config
|
|
70
|
+
*/
|
|
71
|
+
_loadDefaults() {
|
|
72
|
+
this.tierConfig = {
|
|
73
|
+
SIMPLE: { preferred: { ollama: ['llama3.2'], openai: ['gpt-4o-mini'] } },
|
|
74
|
+
MEDIUM: { preferred: { openai: ['gpt-4o'], anthropic: ['claude-sonnet-4-20250514'] } },
|
|
75
|
+
COMPLEX: { preferred: { openai: ['o1-mini'], anthropic: ['claude-sonnet-4-20250514'] } },
|
|
76
|
+
REASONING: { preferred: { openai: ['o1'], anthropic: ['claude-opus-4-20250514'] } },
|
|
77
|
+
};
|
|
78
|
+
this.localProviders = {
|
|
79
|
+
ollama: { free: true, defaultTier: 'SIMPLE' },
|
|
80
|
+
llamacpp: { free: true, defaultTier: 'SIMPLE' },
|
|
81
|
+
lmstudio: { free: true, defaultTier: 'SIMPLE' },
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Normalize provider name using aliases
|
|
87
|
+
*/
|
|
88
|
+
_normalizeProvider(provider) {
|
|
89
|
+
if (!provider) return 'openai';
|
|
90
|
+
const lower = provider.toLowerCase();
|
|
91
|
+
return this.providerAliases[lower] || lower;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Get tier from complexity score
|
|
96
|
+
* @param {number} complexityScore - Score from 0-100
|
|
97
|
+
* @returns {string} Tier name (SIMPLE, MEDIUM, COMPLEX, REASONING)
|
|
98
|
+
*/
|
|
99
|
+
getTier(complexityScore) {
|
|
100
|
+
const score = Math.max(0, Math.min(100, complexityScore || 0));
|
|
101
|
+
|
|
102
|
+
for (const [tier, def] of Object.entries(TIER_DEFINITIONS)) {
|
|
103
|
+
if (score >= def.range[0] && score <= def.range[1]) {
|
|
104
|
+
return tier;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
return score > 75 ? 'REASONING' : 'SIMPLE';
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Get tier definition
|
|
113
|
+
*/
|
|
114
|
+
getTierDefinition(tier) {
|
|
115
|
+
return TIER_DEFINITIONS[tier] || TIER_DEFINITIONS.MEDIUM;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Get tier priority (1-4)
|
|
120
|
+
*/
|
|
121
|
+
getTierPriority(tier) {
|
|
122
|
+
return TIER_DEFINITIONS[tier]?.priority || 2;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Compare two tiers, returns positive if tier1 > tier2
|
|
127
|
+
*/
|
|
128
|
+
compareTiers(tier1, tier2) {
|
|
129
|
+
return this.getTierPriority(tier1) - this.getTierPriority(tier2);
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Get preferred models for a tier and provider
|
|
134
|
+
* @param {string} tier - Tier name
|
|
135
|
+
* @param {string} provider - Provider name
|
|
136
|
+
* @returns {string[]} Array of model names
|
|
137
|
+
*/
|
|
138
|
+
getPreferredModels(tier, provider) {
|
|
139
|
+
const normalizedProvider = this._normalizeProvider(provider);
|
|
140
|
+
return this.tierConfig[tier]?.preferred?.[normalizedProvider] || [];
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Select model for tier from TIER_* env var (mandatory)
|
|
145
|
+
* @param {string} tier - Tier name (SIMPLE, MEDIUM, COMPLEX, REASONING)
|
|
146
|
+
* @param {string} _unused - Deprecated parameter
|
|
147
|
+
* @returns {Object} { model, provider, source, tier }
|
|
148
|
+
*/
|
|
149
|
+
selectModel(tier, _unused = null) {
|
|
150
|
+
const tierConfig = config.modelTiers?.[tier];
|
|
151
|
+
if (!tierConfig) {
|
|
152
|
+
throw new Error(`TIER_${tier} not configured. Set TIER_${tier}=provider:model in .env`);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const parsed = this._parseTierConfig(tierConfig);
|
|
156
|
+
if (!parsed) {
|
|
157
|
+
throw new Error(`Invalid TIER_${tier} format. Expected provider:model, got: ${tierConfig}`);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return {
|
|
161
|
+
model: parsed.model,
|
|
162
|
+
provider: parsed.provider,
|
|
163
|
+
source: 'env_tier',
|
|
164
|
+
tier,
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Parse tier config string (format: provider:model)
|
|
170
|
+
* Examples: "ollama:llama3.2", "azure-openai:gpt-5.2-chat", "openai:gpt-4o"
|
|
171
|
+
*/
|
|
172
|
+
_parseTierConfig(configStr) {
|
|
173
|
+
if (!configStr || typeof configStr !== 'string') return null;
|
|
174
|
+
|
|
175
|
+
const colonIndex = configStr.indexOf(':');
|
|
176
|
+
if (colonIndex === -1) {
|
|
177
|
+
// No colon - treat as model name, use default provider
|
|
178
|
+
return {
|
|
179
|
+
provider: config.modelProvider?.type || 'openai',
|
|
180
|
+
model: configStr.trim(),
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
const provider = configStr.substring(0, colonIndex).trim().toLowerCase();
|
|
185
|
+
const model = configStr.substring(colonIndex + 1).trim();
|
|
186
|
+
|
|
187
|
+
if (!provider || !model) return null;
|
|
188
|
+
|
|
189
|
+
return { provider, model };
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Get the model configured for a provider from .env
|
|
194
|
+
*/
|
|
195
|
+
_getProviderModel(provider) {
|
|
196
|
+
switch (provider) {
|
|
197
|
+
case 'azure-openai':
|
|
198
|
+
case 'azureopenai':
|
|
199
|
+
return config.azureOpenAI?.deployment || null;
|
|
200
|
+
case 'openai':
|
|
201
|
+
return config.openai?.model || null;
|
|
202
|
+
case 'ollama':
|
|
203
|
+
return config.ollama?.model || null;
|
|
204
|
+
case 'openrouter':
|
|
205
|
+
return config.openrouter?.model || null;
|
|
206
|
+
case 'llamacpp':
|
|
207
|
+
return config.llamacpp?.model || null;
|
|
208
|
+
case 'lmstudio':
|
|
209
|
+
return config.lmstudio?.model || null;
|
|
210
|
+
case 'bedrock':
|
|
211
|
+
return config.bedrock?.modelId || null;
|
|
212
|
+
case 'zai':
|
|
213
|
+
return config.zai?.model || null;
|
|
214
|
+
case 'moonshot':
|
|
215
|
+
return config.moonshot?.model || null;
|
|
216
|
+
case 'vertex':
|
|
217
|
+
return config.vertex?.model || null;
|
|
218
|
+
case 'databricks':
|
|
219
|
+
return config.modelProvider?.defaultModel || null;
|
|
220
|
+
default:
|
|
221
|
+
return null;
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
/**
|
|
226
|
+
* Get provider for a specific tier (from env or fallback)
|
|
227
|
+
*/
|
|
228
|
+
getProviderForTier(tier) {
|
|
229
|
+
const tierConfig = config.modelTiers?.[tier];
|
|
230
|
+
if (tierConfig) {
|
|
231
|
+
const parsed = this._parseTierConfig(tierConfig);
|
|
232
|
+
if (parsed) return parsed.provider;
|
|
233
|
+
}
|
|
234
|
+
return config.modelProvider?.type || 'openai';
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Get fallback model if provider can't handle requested tier
|
|
239
|
+
*/
|
|
240
|
+
_getFallbackModel(requestedTier, provider) {
|
|
241
|
+
const tierOrder = ['REASONING', 'COMPLEX', 'MEDIUM', 'SIMPLE'];
|
|
242
|
+
const startIndex = tierOrder.indexOf(requestedTier);
|
|
243
|
+
|
|
244
|
+
// Try lower tiers
|
|
245
|
+
for (let i = startIndex + 1; i < tierOrder.length; i++) {
|
|
246
|
+
const fallbackTier = tierOrder[i];
|
|
247
|
+
const models = this.getPreferredModels(fallbackTier, provider);
|
|
248
|
+
|
|
249
|
+
if (models.length > 0) {
|
|
250
|
+
logger.debug({
|
|
251
|
+
from: requestedTier,
|
|
252
|
+
to: fallbackTier,
|
|
253
|
+
provider,
|
|
254
|
+
model: models[0],
|
|
255
|
+
}, '[ModelTiers] Downgrading tier');
|
|
256
|
+
|
|
257
|
+
return { model: models[0], tier: fallbackTier };
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
return null;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
/**
|
|
265
|
+
* Check if provider can handle a specific tier
|
|
266
|
+
*/
|
|
267
|
+
canHandleTier(provider, tier) {
|
|
268
|
+
const normalizedProvider = this._normalizeProvider(provider);
|
|
269
|
+
const models = this.getPreferredModels(tier, normalizedProvider);
|
|
270
|
+
return models.length > 0;
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
/**
|
|
274
|
+
* Check if provider is local/free
|
|
275
|
+
*/
|
|
276
|
+
isLocalProvider(provider) {
|
|
277
|
+
const normalizedProvider = this._normalizeProvider(provider);
|
|
278
|
+
return this.localProviders[normalizedProvider]?.free === true;
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
/**
|
|
282
|
+
* Get all providers that can handle a tier
|
|
283
|
+
*/
|
|
284
|
+
getProvidersForTier(tier) {
|
|
285
|
+
const tierConfig = this.tierConfig[tier];
|
|
286
|
+
if (!tierConfig?.preferred) return [];
|
|
287
|
+
return Object.keys(tierConfig.preferred);
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
/**
|
|
291
|
+
* Get all tiers a provider can handle
|
|
292
|
+
*/
|
|
293
|
+
getTiersForProvider(provider) {
|
|
294
|
+
const normalizedProvider = this._normalizeProvider(provider);
|
|
295
|
+
const tiers = [];
|
|
296
|
+
|
|
297
|
+
for (const tier of Object.keys(TIER_DEFINITIONS)) {
|
|
298
|
+
if (this.canHandleTier(normalizedProvider, tier)) {
|
|
299
|
+
tiers.push(tier);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
return tiers;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
/**
|
|
307
|
+
* Get tier stats for metrics endpoint
|
|
308
|
+
*/
|
|
309
|
+
getTierStats() {
|
|
310
|
+
const stats = {
|
|
311
|
+
tiers: {},
|
|
312
|
+
providers: {},
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
for (const [tier, def] of Object.entries(TIER_DEFINITIONS)) {
|
|
316
|
+
const providers = this.getProvidersForTier(tier);
|
|
317
|
+
stats.tiers[tier] = {
|
|
318
|
+
...def,
|
|
319
|
+
providerCount: providers.length,
|
|
320
|
+
providers: providers,
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
// Count models per provider
|
|
325
|
+
const allProviders = new Set();
|
|
326
|
+
for (const tierConfig of Object.values(this.tierConfig)) {
|
|
327
|
+
if (tierConfig.preferred) {
|
|
328
|
+
Object.keys(tierConfig.preferred).forEach(p => allProviders.add(p));
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
for (const provider of allProviders) {
|
|
333
|
+
stats.providers[provider] = {
|
|
334
|
+
tiers: this.getTiersForProvider(provider),
|
|
335
|
+
isLocal: this.isLocalProvider(provider),
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
return stats;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
/**
|
|
343
|
+
* Reload configuration (for hot reload)
|
|
344
|
+
*/
|
|
345
|
+
reload() {
|
|
346
|
+
this._loadConfig();
|
|
347
|
+
logger.info('[ModelTiers] Configuration reloaded');
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// Singleton instance
|
|
352
|
+
let instance = null;
|
|
353
|
+
|
|
354
|
+
function getModelTierSelector() {
|
|
355
|
+
if (!instance) {
|
|
356
|
+
instance = new ModelTierSelector();
|
|
357
|
+
}
|
|
358
|
+
return instance;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
module.exports = {
|
|
362
|
+
ModelTierSelector,
|
|
363
|
+
getModelTierSelector,
|
|
364
|
+
TIER_DEFINITIONS,
|
|
365
|
+
};
|
package/src/server.js
CHANGED
|
@@ -201,9 +201,9 @@ async function start() {
|
|
|
201
201
|
|
|
202
202
|
const app = createApp();
|
|
203
203
|
|
|
204
|
-
// Wait for Ollama if it's the configured provider or
|
|
204
|
+
// Wait for Ollama if it's the configured provider or referenced in tier config
|
|
205
205
|
const provider = config.modelProvider?.type?.toLowerCase();
|
|
206
|
-
if (provider === "ollama" || config.
|
|
206
|
+
if (provider === "ollama" || config.tiersReferenceOllama()) {
|
|
207
207
|
await waitForOllama();
|
|
208
208
|
}
|
|
209
209
|
|
package/src/sessions/cleanup.js
CHANGED
|
@@ -4,9 +4,9 @@ const { cleanupOldSessions, cleanupOldHistory } = require("./store");
|
|
|
4
4
|
class SessionCleanupManager {
|
|
5
5
|
constructor(options = {}) {
|
|
6
6
|
this.enabled = options.enabled !== false;
|
|
7
|
-
this.intervalMs = options.intervalMs ||
|
|
8
|
-
this.sessionMaxAgeMs = options.sessionMaxAgeMs ||
|
|
9
|
-
this.historyMaxAgeMs = options.historyMaxAgeMs ||
|
|
7
|
+
this.intervalMs = options.intervalMs || 300000; // 5 minutes (was 1 hour)
|
|
8
|
+
this.sessionMaxAgeMs = options.sessionMaxAgeMs || 24 * 60 * 60 * 1000; // 1 day (was 7 days)
|
|
9
|
+
this.historyMaxAgeMs = options.historyMaxAgeMs || 7 * 24 * 60 * 60 * 1000; // 7 days (was 30 days)
|
|
10
10
|
this.timer = null;
|
|
11
11
|
}
|
|
12
12
|
|
package/src/sessions/record.js
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
const { appendSessionTurn } = require("./store");
|
|
2
2
|
|
|
3
|
+
// Cap in-memory history to prevent unbounded growth during long tool loops
|
|
4
|
+
const MAX_IN_MEMORY_HISTORY = 100;
|
|
5
|
+
|
|
3
6
|
function ensureSessionShape(session) {
|
|
4
7
|
if (!session) return null;
|
|
5
8
|
if (!Array.isArray(session.history)) {
|
|
@@ -19,7 +22,13 @@ function appendTurnToSession(session, entry) {
|
|
|
19
22
|
target.history.push(turn);
|
|
20
23
|
target.updatedAt = turn.timestamp;
|
|
21
24
|
|
|
22
|
-
if
|
|
25
|
+
// Trim in-memory history if it exceeds the cap
|
|
26
|
+
if (target.history.length > MAX_IN_MEMORY_HISTORY) {
|
|
27
|
+
target.history = target.history.slice(-MAX_IN_MEMORY_HISTORY);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Skip DB write for ephemeral sessions (auto-generated, no client session ID)
|
|
31
|
+
if (target.id && !target._ephemeral) {
|
|
23
32
|
appendSessionTurn(target.id, turn, target.metadata ?? {});
|
|
24
33
|
}
|
|
25
34
|
|
package/src/sessions/store.js
CHANGED
|
@@ -4,11 +4,15 @@ const logger = require("../logger");
|
|
|
4
4
|
const selectSessionStmt = db.prepare(
|
|
5
5
|
"SELECT id, created_at, updated_at, metadata FROM sessions WHERE id = ?",
|
|
6
6
|
);
|
|
7
|
+
// Limit history to last 50 entries to prevent unbounded memory growth.
|
|
8
|
+
// Older entries remain in DB for auditing but aren't loaded into memory.
|
|
9
|
+
const MAX_HISTORY_ROWS = 50;
|
|
7
10
|
const selectHistoryStmt = db.prepare(
|
|
8
11
|
`SELECT role, type, status, content, metadata, timestamp
|
|
9
12
|
FROM session_history
|
|
10
13
|
WHERE session_id = ?
|
|
11
|
-
ORDER BY timestamp
|
|
14
|
+
ORDER BY timestamp DESC, id DESC
|
|
15
|
+
LIMIT ${MAX_HISTORY_ROWS}`,
|
|
12
16
|
);
|
|
13
17
|
const insertSessionStmt = db.prepare(
|
|
14
18
|
"INSERT INTO sessions (id, created_at, updated_at, metadata) VALUES (@id, @created_at, @updated_at, @metadata)",
|
|
@@ -75,7 +79,8 @@ function getSession(sessionId) {
|
|
|
75
79
|
if (!sessionId) return null;
|
|
76
80
|
const sessionRow = selectSessionStmt.get(sessionId);
|
|
77
81
|
if (!sessionRow) return null;
|
|
78
|
-
|
|
82
|
+
// Query returns rows in DESC order (for LIMIT to grab newest), reverse to ASC
|
|
83
|
+
const historyRows = selectHistoryStmt.all(sessionId).reverse();
|
|
79
84
|
return toSession(sessionRow, historyRows);
|
|
80
85
|
}
|
|
81
86
|
|
package/src/tools/agent-task.js
CHANGED
|
@@ -2,6 +2,50 @@ const { registerTool } = require(".");
|
|
|
2
2
|
const { spawnAgent, autoSelectAgent } = require("../agents");
|
|
3
3
|
const logger = require("../logger");
|
|
4
4
|
|
|
5
|
+
/**
|
|
6
|
+
* Extract text from Anthropic content blocks format
|
|
7
|
+
* Handles: [{"type":"text","text":"..."}] -> "..."
|
|
8
|
+
*/
|
|
9
|
+
function extractTextFromContentBlocks(content) {
|
|
10
|
+
if (typeof content !== 'string') {
|
|
11
|
+
return content;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
const trimmed = content.trim();
|
|
15
|
+
if (!trimmed.startsWith('[')) {
|
|
16
|
+
return content;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
try {
|
|
20
|
+
const parsed = JSON.parse(trimmed);
|
|
21
|
+
if (!Array.isArray(parsed)) {
|
|
22
|
+
return content;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// Extract text from content blocks
|
|
26
|
+
const textParts = parsed
|
|
27
|
+
.filter(block => block && typeof block === 'object')
|
|
28
|
+
.map(block => {
|
|
29
|
+
if (block.type === 'text' && typeof block.text === 'string') {
|
|
30
|
+
return block.text;
|
|
31
|
+
}
|
|
32
|
+
if (typeof block.text === 'string') {
|
|
33
|
+
return block.text;
|
|
34
|
+
}
|
|
35
|
+
return null;
|
|
36
|
+
})
|
|
37
|
+
.filter(text => text !== null);
|
|
38
|
+
|
|
39
|
+
if (textParts.length > 0) {
|
|
40
|
+
return textParts.join('\n\n');
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
return content;
|
|
44
|
+
} catch {
|
|
45
|
+
return content;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
5
49
|
function registerAgentTaskTool() {
|
|
6
50
|
registerTool(
|
|
7
51
|
"Task",
|
|
@@ -49,10 +93,13 @@ function registerAgentTaskTool() {
|
|
|
49
93
|
});
|
|
50
94
|
|
|
51
95
|
if (result.success) {
|
|
96
|
+
// Extract text from Anthropic content blocks if present
|
|
97
|
+
const cleanContent = extractTextFromContentBlocks(result.result);
|
|
98
|
+
|
|
52
99
|
return {
|
|
53
100
|
ok: true,
|
|
54
101
|
status: 200,
|
|
55
|
-
content:
|
|
102
|
+
content: cleanContent,
|
|
56
103
|
metadata: {
|
|
57
104
|
agentType: subagentType,
|
|
58
105
|
agentId: result.stats.agentId,
|
package/src/tools/index.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
const logger = require("../logger");
|
|
2
2
|
const { truncateToolOutput } = require("./truncate");
|
|
3
|
+
const { isGPTProvider, formatToolResultForGPT } = require("../clients/gpt-utils");
|
|
3
4
|
|
|
4
5
|
const registry = new Map();
|
|
5
6
|
const registryLowercase = new Map();
|
|
@@ -254,7 +255,18 @@ async function executeToolCall(call, context = {}) {
|
|
|
254
255
|
const formatted = normalizeHandlerResult(result);
|
|
255
256
|
|
|
256
257
|
// Apply tool output truncation for token efficiency
|
|
257
|
-
|
|
258
|
+
let truncatedContent = truncateToolOutput(normalisedCall.name, formatted.content);
|
|
259
|
+
|
|
260
|
+
// GPT-specific formatting temporarily disabled for testing
|
|
261
|
+
// const isGPT = context?.provider && isGPTProvider(context.provider);
|
|
262
|
+
// if (isGPT) {
|
|
263
|
+
// truncatedContent = formatToolResultForGPT(
|
|
264
|
+
// normalisedCall.name,
|
|
265
|
+
// truncatedContent,
|
|
266
|
+
// normalisedCall.arguments
|
|
267
|
+
// );
|
|
268
|
+
// }
|
|
269
|
+
const isGPT = false; // Disabled for testing
|
|
258
270
|
|
|
259
271
|
return {
|
|
260
272
|
id: normalisedCall.id,
|
|
@@ -267,7 +279,8 @@ async function executeToolCall(call, context = {}) {
|
|
|
267
279
|
registered: true,
|
|
268
280
|
truncated: truncatedContent !== formatted.content,
|
|
269
281
|
originalLength: formatted.content?.length,
|
|
270
|
-
truncatedLength: truncatedContent?.length
|
|
282
|
+
truncatedLength: truncatedContent?.length,
|
|
283
|
+
gptFormatted: isGPT,
|
|
271
284
|
},
|
|
272
285
|
};
|
|
273
286
|
} catch (err) {
|