@librechat/agents 2.4.78 → 2.4.80

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/cjs/llm/anthropic/index.cjs +3 -0
  2. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  3. package/dist/cjs/llm/google/index.cjs +3 -0
  4. package/dist/cjs/llm/google/index.cjs.map +1 -1
  5. package/dist/cjs/llm/ollama/index.cjs +3 -0
  6. package/dist/cjs/llm/ollama/index.cjs.map +1 -1
  7. package/dist/cjs/llm/openai/index.cjs +18 -1
  8. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  9. package/dist/cjs/llm/openai/utils/index.cjs +6 -1
  10. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  11. package/dist/cjs/llm/openrouter/index.cjs +3 -0
  12. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  13. package/dist/cjs/llm/vertexai/index.cjs +1 -1
  14. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  15. package/dist/cjs/messages/prune.cjs +28 -0
  16. package/dist/cjs/messages/prune.cjs.map +1 -1
  17. package/dist/esm/llm/anthropic/index.mjs +3 -0
  18. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  19. package/dist/esm/llm/google/index.mjs +3 -0
  20. package/dist/esm/llm/google/index.mjs.map +1 -1
  21. package/dist/esm/llm/ollama/index.mjs +3 -0
  22. package/dist/esm/llm/ollama/index.mjs.map +1 -1
  23. package/dist/esm/llm/openai/index.mjs +18 -1
  24. package/dist/esm/llm/openai/index.mjs.map +1 -1
  25. package/dist/esm/llm/openai/utils/index.mjs +6 -1
  26. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  27. package/dist/esm/llm/openrouter/index.mjs +3 -0
  28. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  29. package/dist/esm/llm/vertexai/index.mjs +1 -1
  30. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  31. package/dist/esm/messages/prune.mjs +28 -0
  32. package/dist/esm/messages/prune.mjs.map +1 -1
  33. package/dist/types/llm/anthropic/index.d.ts +1 -0
  34. package/dist/types/llm/google/index.d.ts +1 -0
  35. package/dist/types/llm/ollama/index.d.ts +1 -0
  36. package/dist/types/llm/openai/index.d.ts +4 -0
  37. package/dist/types/llm/openrouter/index.d.ts +1 -0
  38. package/dist/types/llm/vertexai/index.d.ts +1 -1
  39. package/package.json +2 -2
  40. package/src/llm/anthropic/index.ts +4 -0
  41. package/src/llm/google/index.ts +4 -0
  42. package/src/llm/ollama/index.ts +3 -0
  43. package/src/llm/openai/index.ts +19 -1
  44. package/src/llm/openai/utils/index.ts +7 -1
  45. package/src/llm/openrouter/index.ts +3 -0
  46. package/src/llm/vertexai/index.ts +2 -2
  47. package/src/messages/prune.ts +51 -0
  48. package/src/scripts/tools.ts +4 -1
  49. package/src/utils/llmConfig.ts +24 -0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "2.4.78",
3
+ "version": "2.4.80",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -51,7 +51,7 @@
51
51
  "caching": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/caching.ts --name 'Jo' --location 'New York, NY'",
52
52
  "thinking": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking.ts --name 'Jo' --location 'New York, NY'",
53
53
  "memory": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/memory.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54
- "tool-test": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
54
+ "tool-test": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'openrouter' --name 'Jo' --location 'New York, NY'",
55
55
  "search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/search.ts --provider 'anthropic' --name 'Jo' --location 'New York, NY'",
56
56
  "ant_web_search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/ant_web_search.ts --name 'Jo' --location 'New York, NY'",
57
57
  "abort": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/abort.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
@@ -141,6 +141,10 @@ export class CustomAnthropic extends ChatAnthropicMessages {
141
141
  this._lc_stream_delay = fields?._lc_stream_delay ?? 25;
142
142
  }
143
143
 
144
+ static lc_name(): 'LibreChatAnthropic' {
145
+ return 'LibreChatAnthropic';
146
+ }
147
+
144
148
  /**
145
149
  * Get the parameters used to invoke the model
146
150
  */
@@ -107,6 +107,10 @@ export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
107
107
  this.streamUsage = fields.streamUsage ?? this.streamUsage;
108
108
  }
109
109
 
110
+ static lc_name(): 'LibreChatGoogleGenerativeAI' {
111
+ return 'LibreChatGoogleGenerativeAI';
112
+ }
113
+
110
114
  invocationParams(
111
115
  options?: this['ParsedCallOptions']
112
116
  ): Omit<GenerateContentRequest, 'contents'> {
@@ -13,6 +13,9 @@ import {
13
13
  } from './utils';
14
14
 
15
15
  export class ChatOllama extends BaseChatOllama {
16
+ static lc_name(): 'LibreChatOllama' {
17
+ return 'LibreChatOllama';
18
+ }
16
19
  async *_streamResponseChunks(
17
20
  messages: BaseMessage[],
18
21
  options: this['ParsedCallOptions'],
@@ -198,6 +198,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
198
198
  public get exposedClient(): CustomOpenAIClient {
199
199
  return this.client;
200
200
  }
201
+ static lc_name(): string {
202
+ return 'LibreChatOpenAI';
203
+ }
201
204
  protected _getClientOptions(
202
205
  options?: OpenAICoreRequestOptions
203
206
  ): OpenAICoreRequestOptions {
@@ -233,7 +236,8 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
233
236
  getReasoningParams(
234
237
  options?: this['ParsedCallOptions']
235
238
  ): OpenAIClient.Reasoning | undefined {
236
- if (!isReasoningModel(this.model)) {
239
+ const lc_name = (this.constructor as typeof ChatOpenAI).lc_name();
240
+ if (lc_name === 'LibreChatOpenAI' && !isReasoningModel(this.model)) {
237
241
  return;
238
242
  }
239
243
 
@@ -345,6 +349,10 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
345
349
  } else if ('reasoning' in delta) {
346
350
  chunk.additional_kwargs.reasoning_content = delta.reasoning;
347
351
  }
352
+ if ('provider_specific_fields' in delta) {
353
+ chunk.additional_kwargs.provider_specific_fields =
354
+ delta.provider_specific_fields;
355
+ }
348
356
  defaultRole = delta.role ?? defaultRole;
349
357
  const newTokenIndices = {
350
358
  prompt: options.promptIndex ?? 0,
@@ -435,6 +443,9 @@ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
435
443
  public get exposedClient(): CustomOpenAIClient {
436
444
  return this.client;
437
445
  }
446
+ static lc_name(): 'LibreChatAzureOpenAI' {
447
+ return 'LibreChatAzureOpenAI';
448
+ }
438
449
  /**
439
450
  * Returns backwards compatible reasoning parameters from constructor params and call options
440
451
  * @internal
@@ -576,6 +587,9 @@ export class ChatDeepSeek extends OriginalChatDeepSeek {
576
587
  public get exposedClient(): CustomOpenAIClient {
577
588
  return this.client;
578
589
  }
590
+ static lc_name(): 'LibreChatDeepSeek' {
591
+ return 'LibreChatDeepSeek';
592
+ }
579
593
  protected _getClientOptions(
580
594
  options?: OpenAICoreRequestOptions
581
595
  ): OpenAICoreRequestOptions {
@@ -644,6 +658,10 @@ export class ChatXAI extends OriginalChatXAI {
644
658
  }
645
659
  }
646
660
 
661
+ static lc_name(): 'LibreChatXAI' {
662
+ return 'LibreChatXAI';
663
+ }
664
+
647
665
  public get exposedClient(): CustomOpenAIClient {
648
666
  return this.client;
649
667
  }
@@ -298,10 +298,16 @@ export function _convertMessagesToOpenAIParams(
298
298
  role = 'developer';
299
299
  }
300
300
 
301
+ let hasAnthropicThinkingBlock: boolean = false;
302
+
301
303
  const content =
302
304
  typeof message.content === 'string'
303
305
  ? message.content
304
306
  : message.content.map((m) => {
307
+ if ('type' in m && m.type === 'thinking') {
308
+ hasAnthropicThinkingBlock = true;
309
+ return m;
310
+ }
305
311
  if (isDataContentBlock(m)) {
306
312
  return convertToProviderContentBlock(
307
313
  m,
@@ -326,7 +332,7 @@ export function _convertMessagesToOpenAIParams(
326
332
  completionParam.tool_calls = message.tool_calls.map(
327
333
  convertLangChainToolCallToOpenAI
328
334
  );
329
- completionParam.content = '';
335
+ completionParam.content = hasAnthropicThinkingBlock ? content : '';
330
336
  } else {
331
337
  if (message.additional_kwargs.tool_calls != null) {
332
338
  completionParam.tool_calls = message.additional_kwargs.tool_calls;
@@ -22,6 +22,9 @@ export class ChatOpenRouter extends ChatOpenAI {
22
22
  },
23
23
  });
24
24
  }
25
+ static lc_name(): 'LibreChatOpenRouter' {
26
+ return 'LibreChatOpenRouter';
27
+ }
25
28
  protected override _convertOpenAIDeltaToBaseMessageChunk(
26
29
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
27
30
  delta: Record<string, any>,
@@ -313,8 +313,8 @@ export class ChatVertexAI extends ChatGoogle {
313
313
  lc_namespace = ['langchain', 'chat_models', 'vertexai'];
314
314
  dynamicThinkingBudget = false;
315
315
 
316
- static lc_name(): 'ChatVertexAI' {
317
- return 'ChatVertexAI';
316
+ static lc_name(): 'LibreChatVertexAI' {
317
+ return 'LibreChatVertexAI';
318
318
  }
319
319
 
320
320
  constructor(fields?: VertexAIClientOptions) {
@@ -389,6 +389,14 @@ export function checkValidNumber(value: unknown): value is number {
389
389
  return typeof value === 'number' && !isNaN(value) && value > 0;
390
390
  }
391
391
 
392
+ type ThinkingBlocks = {
393
+ thinking_blocks?: Array<{
394
+ type: 'thinking';
395
+ thinking: string;
396
+ signature: string;
397
+ }>;
398
+ };
399
+
392
400
  export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
393
401
  const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };
394
402
  let lastTurnStartIndex = factoryParams.startIndex;
@@ -402,6 +410,49 @@ export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
402
410
  context: BaseMessage[];
403
411
  indexTokenCountMap: Record<string, number | undefined>;
404
412
  } {
413
+ if (
414
+ factoryParams.provider === Providers.OPENAI &&
415
+ factoryParams.thinkingEnabled === true
416
+ ) {
417
+ for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
418
+ const m = params.messages[i];
419
+ if (
420
+ m.getType() === 'ai' &&
421
+ typeof m.additional_kwargs.reasoning_content === 'string' &&
422
+ Array.isArray(
423
+ (
424
+ m.additional_kwargs.provider_specific_fields as
425
+ | ThinkingBlocks
426
+ | undefined
427
+ )?.thinking_blocks
428
+ ) &&
429
+ (m as AIMessage).tool_calls &&
430
+ ((m as AIMessage).tool_calls?.length ?? 0) > 0
431
+ ) {
432
+ const message = m as AIMessage;
433
+ const thinkingBlocks = (
434
+ message.additional_kwargs.provider_specific_fields as ThinkingBlocks
435
+ ).thinking_blocks;
436
+ const signature =
437
+ thinkingBlocks?.[thinkingBlocks.length - 1].signature;
438
+ const thinkingBlock: ThinkingContentText = {
439
+ signature,
440
+ type: ContentTypes.THINKING,
441
+ thinking: message.additional_kwargs.reasoning_content as string,
442
+ };
443
+
444
+ params.messages[i] = new AIMessage({
445
+ ...message,
446
+ content: [thinkingBlock],
447
+ additional_kwargs: {
448
+ ...message.additional_kwargs,
449
+ reasoning_content: undefined,
450
+ },
451
+ });
452
+ }
453
+ }
454
+ }
455
+
405
456
  let currentUsage: UsageMetadata | undefined;
406
457
  if (
407
458
  params.usageMetadata &&
@@ -127,7 +127,10 @@ async function testStandardStreaming(): Promise<void> {
127
127
  const inputs = {
128
128
  messages: conversationHistory,
129
129
  };
130
- const finalContentParts = await run.processStream(inputs, config);
130
+ const finalContentParts = await run.processStream(inputs, config, {
131
+ indexTokenCountMap: { 0: 35 },
132
+ maxContextTokens: 89000,
133
+ });
131
134
  const finalMessages = run.getRunMessages();
132
135
  if (finalMessages) {
133
136
  conversationHistory.push(...finalMessages);
@@ -12,6 +12,30 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
12
12
  streamUsage: true,
13
13
  // disableStreaming: true,
14
14
  },
15
+ anthropicLITELLM: {
16
+ provider: Providers.OPENAI,
17
+ streaming: true,
18
+ streamUsage: false,
19
+ apiKey: 'sk-1234',
20
+ model: 'claude-sonnet-4',
21
+ maxTokens: 8192,
22
+ modelKwargs: {
23
+ metadata: {
24
+ user_id: 'some_user_id',
25
+ },
26
+ thinking: {
27
+ type: 'enabled',
28
+ budget_tokens: 2000,
29
+ },
30
+ },
31
+ configuration: {
32
+ baseURL: 'http://host.docker.internal:4000/v1',
33
+ defaultHeaders: {
34
+ 'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
35
+ },
36
+ },
37
+ // disableStreaming: true,
38
+ },
15
39
  [Providers.XAI]: {
16
40
  provider: Providers.XAI,
17
41
  model: 'grok-2-latest',