@librechat/agents 3.1.62 → 3.1.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.1.62",
3
+ "version": "3.1.63",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -136,7 +136,7 @@
136
136
  "@opentelemetry/sdk-node": "^0.207.0",
137
137
  "@scarf/scarf": "^1.4.0",
138
138
  "ai-tokenizer": "^1.0.6",
139
- "axios": "^1.13.5",
139
+ "axios": "1.13.6",
140
140
  "cheerio": "^1.0.0",
141
141
  "dotenv": "^16.4.7",
142
142
  "https-proxy-agent": "^7.0.6",
@@ -153,7 +153,7 @@
153
153
  "@anthropic-ai/vertex-sdk": "^0.12.0",
154
154
  "@eslint/compat": "^1.2.7",
155
155
  "@rollup/plugin-alias": "^5.1.0",
156
- "@rollup/plugin-commonjs": "^28.0.3",
156
+ "@rollup/plugin-commonjs": "^29.0.2",
157
157
  "@rollup/plugin-json": "^6.1.0",
158
158
  "@rollup/plugin-node-resolve": "^15.2.3",
159
159
  "@rollup/plugin-typescript": "^12.1.2",
@@ -170,11 +170,11 @@
170
170
  "husky": "^9.1.7",
171
171
  "jest": "^30.2.0",
172
172
  "jest-util": "^30.2.0",
173
- "lint-staged": "^15.2.7",
173
+ "lint-staged": "^16.4.0",
174
174
  "prettier": "^3.6.2",
175
175
  "rollup": "^4.59.0",
176
176
  "rollup-plugin-cleandir": "^2.0.0",
177
- "ts-jest": "^29.4.5",
177
+ "ts-jest": "^29.4.6",
178
178
  "ts-node": "^10.9.2",
179
179
  "tsc-alias": "^1.8.10",
180
180
  "tsconfig-paths": "^4.2.0",
@@ -7,29 +7,19 @@ import type {
7
7
  BaseMessageFields,
8
8
  } from '@langchain/core/messages';
9
9
  import type { RunnableConfig, Runnable } from '@langchain/core/runnables';
10
- import type * as t from '@/types';
11
10
  import type { createPruneMessages } from '@/messages';
11
+ import type * as t from '@/types';
12
+ import {
13
+ ANTHROPIC_TOOL_TOKEN_MULTIPLIER,
14
+ DEFAULT_TOOL_TOKEN_MULTIPLIER,
15
+ ContentTypes,
16
+ Providers,
17
+ } from '@/common';
12
18
  import { createSchemaOnlyTools } from '@/tools/schema';
13
19
  import { addCacheControl } from '@/messages/cache';
14
- import { ContentTypes, Providers } from '@/common';
15
20
  import { DEFAULT_RESERVE_RATIO } from '@/messages';
16
21
  import { toJsonSchema } from '@/utils/schema';
17
22
 
18
- /**
19
- * Anthropic direct API tool schema overhead multiplier.
20
- * Empirically calibrated against real MCP tool sets (29 tools).
21
- * Accounts for Anthropic's internal XML-like tool encoding plus
22
- * a ~300-token hidden tool-system preamble.
23
- */
24
- const ANTHROPIC_TOOL_TOKEN_MULTIPLIER = 2.6;
25
-
26
- /**
27
- * Default tool schema overhead multiplier for all non-Anthropic providers.
28
- * Covers OpenAI function-calling format, Bedrock, and other providers.
29
- * Empirically calibrated at ~1.4× the raw JSON token count.
30
- */
31
- const DEFAULT_TOOL_TOKEN_MULTIPLIER = 1.4;
32
-
33
23
  /**
34
24
  * Encapsulates agent-specific state that can vary between agents in a multi-agent system
35
25
  */
@@ -64,6 +54,7 @@ export class AgentContext {
64
54
  initialSummary,
65
55
  contextPruningConfig,
66
56
  maxToolResultChars,
57
+ toolSchemaTokens,
67
58
  } = agentConfig;
68
59
 
69
60
  const agentContext = new AgentContext({
@@ -104,14 +95,22 @@ export class AgentContext {
104
95
  const tokenMap = indexTokenCountMap || {};
105
96
  agentContext.baseIndexTokenCountMap = { ...tokenMap };
106
97
  agentContext.indexTokenCountMap = tokenMap;
107
- agentContext.tokenCalculationPromise = agentContext
108
- .calculateInstructionTokens(tokenCounter)
109
- .then(() => {
110
- agentContext.updateTokenMapWithInstructions(tokenMap);
111
- })
112
- .catch((err) => {
113
- console.error('Error calculating instruction tokens:', err);
114
- });
98
+
99
+ if (toolSchemaTokens != null && toolSchemaTokens > 0) {
100
+ /** Use pre-computed (cached) tool schema tokens — skip calculateInstructionTokens */
101
+ agentContext.toolSchemaTokens = toolSchemaTokens;
102
+ agentContext.tokenCalculationPromise = Promise.resolve();
103
+ agentContext.updateTokenMapWithInstructions(tokenMap);
104
+ } else {
105
+ agentContext.tokenCalculationPromise = agentContext
106
+ .calculateInstructionTokens(tokenCounter)
107
+ .then(() => {
108
+ agentContext.updateTokenMapWithInstructions(tokenMap);
109
+ })
110
+ .catch((err) => {
111
+ console.error('Error calculating instruction tokens:', err);
112
+ });
113
+ }
115
114
  } else if (indexTokenCountMap) {
116
115
  agentContext.baseIndexTokenCountMap = { ...indexTokenCountMap };
117
116
  agentContext.indexTokenCountMap = indexTokenCountMap;
@@ -0,0 +1,14 @@
1
+ /**
2
+ * Anthropic direct API tool schema overhead multiplier.
3
+ * Empirically calibrated against real MCP tool sets (29 tools).
4
+ * Accounts for Anthropic's internal XML-like tool encoding plus
5
+ * a ~300-token hidden tool-system preamble.
6
+ */
7
+ export const ANTHROPIC_TOOL_TOKEN_MULTIPLIER = 2.6;
8
+
9
+ /**
10
+ * Default tool schema overhead multiplier for all non-Anthropic providers.
11
+ * Covers OpenAI function-calling format, Bedrock, and other providers.
12
+ * Empirically calibrated at ~1.4× the raw JSON token count.
13
+ */
14
+ export const DEFAULT_TOOL_TOKEN_MULTIPLIER = 1.4;
@@ -1,2 +1,3 @@
1
1
  // src/common/index.ts
2
- export * from './enum';
2
+ export * from './constants';
3
+ export * from './enum';
package/src/run.ts CHANGED
@@ -305,9 +305,13 @@ export class Run<_T extends t.BaseGraphState> {
305
305
  ) {
306
306
  const userId = config.configurable?.user_id;
307
307
  const sessionId = config.configurable?.thread_id;
308
+ const primaryContext = this.Graph.agentContexts.get(
309
+ this.Graph.defaultAgentId
310
+ );
308
311
  const traceMetadata = {
309
312
  messageId: this.id,
310
313
  parentMessageId: config.configurable?.requestBody?.parentMessageId,
314
+ agentName: primaryContext?.name,
311
315
  };
312
316
  const handler = new CallbackHandler({
313
317
  userId,
@@ -453,8 +457,12 @@ export class Run<_T extends t.BaseGraphState> {
453
457
  ) {
454
458
  const userId = chainOptions.configurable?.user_id;
455
459
  const sessionId = chainOptions.configurable?.thread_id;
460
+ const titleContext = this.Graph?.agentContexts.get(
461
+ this.Graph.defaultAgentId
462
+ );
456
463
  const traceMetadata = {
457
464
  messageId: 'title-' + this.id,
465
+ agentName: titleContext?.name,
458
466
  };
459
467
  const handler = new CallbackHandler({
460
468
  userId,
@@ -0,0 +1,91 @@
1
+ import { CallbackHandler } from '@langfuse/langchain';
2
+ import { Providers } from '@/common';
3
+ import { Run } from '@/run';
4
+
5
+ jest.mock('@langfuse/langchain', () => ({
6
+ CallbackHandler: jest.fn().mockImplementation(() => ({})),
7
+ }));
8
+
9
+ const MockedCallbackHandler = CallbackHandler as jest.MockedClass<
10
+ typeof CallbackHandler
11
+ >;
12
+
13
+ async function createTestRun(agentName?: string): Promise<Run<never>> {
14
+ const run = await Run.create({
15
+ runId: 'test-run-id',
16
+ graphConfig: {
17
+ type: 'standard',
18
+ agents: [
19
+ {
20
+ agentId: 'agent_abc123',
21
+ ...(agentName != null && { name: agentName }),
22
+ provider: Providers.OPENAI,
23
+ clientOptions: { model: 'gpt-4' },
24
+ tools: [],
25
+ },
26
+ ],
27
+ },
28
+ });
29
+
30
+ const emptyStream = (async function* (): AsyncGenerator {
31
+ /* no events */
32
+ })();
33
+ run.graphRunnable = { streamEvents: () => emptyStream } as never;
34
+
35
+ return run;
36
+ }
37
+
38
+ describe('Langfuse trace metadata includes agentName', () => {
39
+ const originalEnv = process.env;
40
+
41
+ beforeEach(() => {
42
+ jest.clearAllMocks();
43
+ process.env = {
44
+ ...originalEnv,
45
+ LANGFUSE_SECRET_KEY: 'sk-test',
46
+ LANGFUSE_PUBLIC_KEY: 'pk-test',
47
+ LANGFUSE_BASE_URL: 'https://langfuse.test',
48
+ };
49
+ });
50
+
51
+ afterEach(() => {
52
+ process.env = originalEnv;
53
+ });
54
+
55
+ it('passes agentName in processStream traceMetadata when agent has a name', async () => {
56
+ const run = await createTestRun('DWAINE');
57
+ await run.processStream(
58
+ { messages: [] },
59
+ { configurable: { thread_id: 't1', user_id: 'u1' }, version: 'v2' }
60
+ );
61
+
62
+ expect(MockedCallbackHandler).toHaveBeenCalledTimes(1);
63
+ const ctorArgs = MockedCallbackHandler.mock.calls[0][0];
64
+ expect(ctorArgs?.traceMetadata).toMatchObject({ agentName: 'DWAINE' });
65
+ });
66
+
67
+ it('falls back to agentId when agent has no explicit name', async () => {
68
+ const run = await createTestRun();
69
+ await run.processStream(
70
+ { messages: [] },
71
+ { configurable: { thread_id: 't1', user_id: 'u1' }, version: 'v2' }
72
+ );
73
+
74
+ expect(MockedCallbackHandler).toHaveBeenCalledTimes(1);
75
+ const ctorArgs = MockedCallbackHandler.mock.calls[0][0];
76
+ expect(ctorArgs?.traceMetadata).toMatchObject({
77
+ agentName: 'agent_abc123',
78
+ });
79
+ });
80
+
81
+ it('does not create CallbackHandler when Langfuse env vars are missing', async () => {
82
+ delete process.env.LANGFUSE_SECRET_KEY;
83
+ const run = await createTestRun('MAIA');
84
+ await run.processStream(
85
+ { messages: [] },
86
+ { configurable: { thread_id: 't1', user_id: 'u1' }, version: 'v2' }
87
+ );
88
+
89
+ expect(MockedCallbackHandler).not.toHaveBeenCalled();
90
+ });
91
+ });
@@ -429,6 +429,8 @@ export interface AgentInputs {
429
429
  initialSummary?: { text: string; tokenCount: number };
430
430
  contextPruningConfig?: ContextPruningConfig;
431
431
  maxToolResultChars?: number;
432
+ /** Pre-computed tool schema token count (from cache). Skips recalculation when provided. */
433
+ toolSchemaTokens?: number;
432
434
  }
433
435
 
434
436
  export interface ContextPruningConfig {