@librechat/agents 3.1.75-dev.1 → 3.1.76

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/cjs/llm/openai/index.cjs +43 -0
  2. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  3. package/dist/cjs/llm/openai/utils/index.cjs +19 -10
  4. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  5. package/dist/cjs/messages/format.cjs +67 -10
  6. package/dist/cjs/messages/format.cjs.map +1 -1
  7. package/dist/cjs/tools/search/search.cjs +55 -66
  8. package/dist/cjs/tools/search/search.cjs.map +1 -1
  9. package/dist/cjs/tools/search/tavily-scraper.cjs +189 -0
  10. package/dist/cjs/tools/search/tavily-scraper.cjs.map +1 -0
  11. package/dist/cjs/tools/search/tavily-search.cjs +372 -0
  12. package/dist/cjs/tools/search/tavily-search.cjs.map +1 -0
  13. package/dist/cjs/tools/search/tool.cjs +26 -4
  14. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  15. package/dist/cjs/tools/search/utils.cjs +10 -3
  16. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  17. package/dist/esm/llm/openai/index.mjs +43 -0
  18. package/dist/esm/llm/openai/index.mjs.map +1 -1
  19. package/dist/esm/llm/openai/utils/index.mjs +19 -10
  20. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  21. package/dist/esm/messages/format.mjs +67 -10
  22. package/dist/esm/messages/format.mjs.map +1 -1
  23. package/dist/esm/tools/search/search.mjs +55 -66
  24. package/dist/esm/tools/search/search.mjs.map +1 -1
  25. package/dist/esm/tools/search/tavily-scraper.mjs +186 -0
  26. package/dist/esm/tools/search/tavily-scraper.mjs.map +1 -0
  27. package/dist/esm/tools/search/tavily-search.mjs +370 -0
  28. package/dist/esm/tools/search/tavily-search.mjs.map +1 -0
  29. package/dist/esm/tools/search/tool.mjs +26 -4
  30. package/dist/esm/tools/search/tool.mjs.map +1 -1
  31. package/dist/esm/tools/search/utils.mjs +10 -3
  32. package/dist/esm/tools/search/utils.mjs.map +1 -1
  33. package/dist/types/messages/format.d.ts +4 -1
  34. package/dist/types/tools/search/tavily-scraper.d.ts +19 -0
  35. package/dist/types/tools/search/tavily-search.d.ts +4 -0
  36. package/dist/types/tools/search/types.d.ts +99 -5
  37. package/dist/types/tools/search/utils.d.ts +2 -2
  38. package/package.json +1 -1
  39. package/src/llm/custom-chat-models.smoke.test.ts +175 -1
  40. package/src/llm/openai/index.ts +124 -0
  41. package/src/llm/openai/utils/index.ts +23 -14
  42. package/src/llm/openai/utils/messages.test.ts +159 -0
  43. package/src/messages/format.ts +90 -13
  44. package/src/messages/formatAgentMessages.test.ts +166 -1
  45. package/src/tools/search/search.ts +83 -73
  46. package/src/tools/search/tavily-scraper.ts +235 -0
  47. package/src/tools/search/tavily-search.ts +424 -0
  48. package/src/tools/search/tavily.test.ts +965 -0
  49. package/src/tools/search/tool.ts +36 -26
  50. package/src/tools/search/types.ts +134 -11
  51. package/src/tools/search/utils.ts +13 -5
@@ -84,6 +84,9 @@ interface LangChainMessage {
84
84
  * @returns - The formatted LangChain message.
85
85
  */
86
86
  export declare const formatFromLangChain: (message: LangChainMessage) => Record<string, any>;
87
+ interface FormatAgentMessagesOptions {
88
+ provider?: Providers;
89
+ }
87
90
  /**
88
91
  * Groups content parts by agent and formats them with agent labels
89
92
  * This preprocesses multi-agent content to prevent identity confusion
@@ -107,7 +110,7 @@ export declare const labelContentByAgent: (contentParts: MessageContentComplex[]
107
110
  * @param skills - Optional map of skill name to body for reconstructing skill HumanMessages.
108
111
  * @returns - Object containing formatted messages and updated indexTokenCountMap if provided.
109
112
  */
110
- export declare const formatAgentMessages: (payload: TPayload, indexTokenCountMap?: Record<number, number | undefined>, tools?: Set<string>, skills?: Map<string, string>) => {
113
+ export declare const formatAgentMessages: (payload: TPayload, indexTokenCountMap?: Record<number, number | undefined>, tools?: Set<string>, skills?: Map<string, string>, options?: FormatAgentMessagesOptions) => {
111
114
  messages: Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>;
112
115
  indexTokenCountMap?: Record<number, number>;
113
116
  /** Cross-run summary extracted from the payload. Should be forwarded to the
@@ -0,0 +1,19 @@
1
+ import type * as t from './types';
2
+ export declare class TavilyScraper implements t.BaseScraper {
3
+ private apiKey;
4
+ private apiUrl;
5
+ private timeout;
6
+ private payloadTimeout;
7
+ private logger;
8
+ private extractDepth;
9
+ private includeImages;
10
+ private includeFavicon;
11
+ private format;
12
+ constructor(config?: t.TavilyScraperConfig);
13
+ scrapeUrl(url: string, options?: t.TavilyScrapeOptions): Promise<[string, t.TavilyScrapeResponse]>;
14
+ scrapeUrls(urls: string[], options?: t.TavilyScrapeOptions): Promise<Array<[string, t.TavilyScrapeResponse]>>;
15
+ private extractBatch;
16
+ extractContent(response: t.TavilyScrapeResponse): [string, undefined | t.References];
17
+ extractMetadata(response: t.TavilyScrapeResponse): t.GenericScrapeMetadata;
18
+ }
19
+ export declare const createTavilyScraper: (config?: t.TavilyScraperConfig) => TavilyScraper;
@@ -0,0 +1,4 @@
1
+ import type * as t from './types';
2
+ export declare const createTavilyAPI: (apiKey?: string, apiUrl?: string, options?: t.TavilySearchOptions) => {
3
+ getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
4
+ };
@@ -2,8 +2,8 @@ import type { Logger as WinstonLogger } from 'winston';
2
2
  import type { RunnableConfig } from '@langchain/core/runnables';
3
3
  import type { BaseReranker } from './rerankers';
4
4
  import { DATE_RANGE } from './schema';
5
- export type SearchProvider = 'serper' | 'searxng';
6
- export type ScraperProvider = 'firecrawl' | 'serper';
5
+ export type SearchProvider = 'serper' | 'searxng' | 'tavily';
6
+ export type ScraperProvider = 'firecrawl' | 'serper' | 'tavily';
7
7
  export type RerankerType = 'infinity' | 'jina' | 'cohere' | 'none';
8
8
  export interface Highlight {
9
9
  score: number;
@@ -56,11 +56,50 @@ export interface Source {
56
56
  snippet?: string;
57
57
  date?: string;
58
58
  }
59
+ export type TavilyTimeRange = 'day' | 'week' | 'month' | 'year';
60
+ export type TavilyTimeRangeInput = TavilyTimeRange | 'h' | 'd' | 'w' | 'm' | 'y';
61
+ export interface TavilySearchOptions {
62
+ searchDepth?: 'basic' | 'advanced' | 'fast' | 'ultra-fast';
63
+ maxResults?: number;
64
+ includeImages?: boolean;
65
+ includeAnswer?: boolean | 'basic' | 'advanced';
66
+ includeRawContent?: boolean | 'markdown' | 'text';
67
+ includeDomains?: string[];
68
+ excludeDomains?: string[];
69
+ topic?: 'general' | 'news' | 'finance';
70
+ timeRange?: TavilyTimeRangeInput;
71
+ includeImageDescriptions?: boolean;
72
+ includeFavicon?: boolean;
73
+ chunksPerSource?: number;
74
+ safeSearch?: boolean;
75
+ timeout?: number;
76
+ }
77
+ export interface TavilySearchPayload {
78
+ query: string;
79
+ search_depth: NonNullable<TavilySearchOptions['searchDepth']>;
80
+ topic: NonNullable<TavilySearchOptions['topic']>;
81
+ max_results: number;
82
+ safe_search?: boolean;
83
+ time_range?: TavilyTimeRange;
84
+ country?: string;
85
+ include_images?: boolean;
86
+ include_answer?: NonNullable<TavilySearchOptions['includeAnswer']>;
87
+ include_raw_content?: NonNullable<TavilySearchOptions['includeRawContent']>;
88
+ include_domains?: string[];
89
+ exclude_domains?: string[];
90
+ include_image_descriptions?: boolean;
91
+ include_favicon?: boolean;
92
+ chunks_per_source?: number;
93
+ }
59
94
  export interface SearchConfig {
60
95
  searchProvider?: SearchProvider;
61
96
  serperApiKey?: string;
62
97
  searxngInstanceUrl?: string;
63
98
  searxngApiKey?: string;
99
+ tavilyApiKey?: string;
100
+ tavilySearchUrl?: string;
101
+ tavilyExtractUrl?: string;
102
+ tavilySearchOptions?: TavilySearchOptions;
64
103
  }
65
104
  export type References = {
66
105
  links: MediaReference[];
@@ -95,6 +134,16 @@ export interface SerperScraperConfig {
95
134
  logger?: Logger;
96
135
  includeMarkdown?: boolean;
97
136
  }
137
+ export interface TavilyScraperConfig {
138
+ apiKey?: string;
139
+ apiUrl?: string;
140
+ timeout?: number;
141
+ logger?: Logger;
142
+ extractDepth?: 'basic' | 'advanced';
143
+ includeImages?: boolean;
144
+ includeFavicon?: boolean;
145
+ format?: 'markdown' | 'text';
146
+ }
98
147
  export interface ScraperContentResult {
99
148
  content: string;
100
149
  }
@@ -135,6 +184,7 @@ export interface CohereRerankerResponse {
135
184
  export type SafeSearchLevel = 0 | 1 | 2;
136
185
  export type Logger = WinstonLogger;
137
186
  export interface SearchToolConfig extends SearchConfig, ProcessSourcesConfig, FirecrawlConfig {
187
+ tavilyScraperOptions?: TavilyScraperConfig;
138
188
  logger?: Logger;
139
189
  safeSearch?: SafeSearchLevel;
140
190
  jinaApiKey?: string;
@@ -157,15 +207,27 @@ export type UsedReferences = {
157
207
  originalIndex: number;
158
208
  reference: MediaReference;
159
209
  }[];
210
+ export type AnyScraperResponse = FirecrawlScrapeResponse | SerperScrapeResponse | TavilyScrapeResponse;
160
211
  /** Base Scraper Interface */
161
212
  export interface BaseScraper {
162
- scrapeUrl(url: string, options?: unknown): Promise<[string, FirecrawlScrapeResponse | SerperScrapeResponse]>;
163
- extractContent(response: FirecrawlScrapeResponse | SerperScrapeResponse): [string, undefined | References];
164
- extractMetadata(response: FirecrawlScrapeResponse | SerperScrapeResponse): ScrapeMetadata | Record<string, string | number | boolean | null | undefined>;
213
+ scrapeUrl(url: string, options?: unknown): Promise<[string, AnyScraperResponse]>;
214
+ scrapeUrls?(urls: string[], options?: unknown): Promise<Array<[string, AnyScraperResponse]>>;
215
+ extractContent(response: AnyScraperResponse): [string, undefined | References];
216
+ extractMetadata(response: AnyScraperResponse): ScrapeMetadata | GenericScrapeMetadata;
165
217
  }
166
218
  /** Firecrawl */
167
219
  export type FirecrawlScrapeOptions = Omit<FirecrawlScraperConfig, 'apiKey' | 'apiUrl' | 'version' | 'logger'>;
168
220
  export type SerperScrapeOptions = Omit<SerperScraperConfig, 'apiKey' | 'apiUrl' | 'logger'>;
221
+ export type TavilyScrapeOptions = Omit<TavilyScraperConfig, 'apiKey' | 'apiUrl' | 'logger'>;
222
+ export interface TavilyExtractPayload {
223
+ urls: string[];
224
+ extract_depth: NonNullable<TavilyScraperConfig['extractDepth']>;
225
+ include_images: boolean;
226
+ include_favicon?: boolean;
227
+ format?: NonNullable<TavilyScraperConfig['format']>;
228
+ timeout?: number;
229
+ }
230
+ export type GenericScrapeMetadata = Record<string, string | number | boolean | null | undefined>;
169
231
  export interface ScrapeMetadata {
170
232
  sourceURL?: string;
171
233
  url?: string;
@@ -241,6 +303,38 @@ export interface SerperScrapeResponse {
241
303
  };
242
304
  error?: string;
243
305
  }
306
+ export interface TavilyScrapeResponse {
307
+ success: boolean;
308
+ data?: {
309
+ rawContent?: string;
310
+ images?: string[];
311
+ favicon?: string;
312
+ };
313
+ error?: string;
314
+ }
315
+ export interface TavilySearchResult {
316
+ title?: string;
317
+ url?: string;
318
+ content?: string;
319
+ score?: number;
320
+ published_date?: string;
321
+ }
322
+ export type TavilyImageResult = string | {
323
+ url?: string;
324
+ description?: string;
325
+ };
326
+ export interface TavilySearchResponse {
327
+ answer?: string;
328
+ images?: TavilyImageResult[];
329
+ results?: TavilySearchResult[];
330
+ }
331
+ export interface TavilyExtractResult {
332
+ url: string;
333
+ raw_content?: string;
334
+ images?: string[];
335
+ favicon?: string;
336
+ error?: string;
337
+ }
244
338
  export interface FirecrawlScraperConfig {
245
339
  apiKey?: string;
246
340
  apiUrl?: string;
@@ -6,5 +6,5 @@ import type * as t from './types';
6
6
  */
7
7
  export declare const createDefaultLogger: () => t.Logger;
8
8
  export declare const fileExtRegex: RegExp;
9
- export declare const getDomainName: (link: string, metadata?: t.ScrapeMetadata, logger?: t.Logger) => string | undefined;
10
- export declare function getAttribution(link: string, metadata?: t.ScrapeMetadata, logger?: t.Logger): string | undefined;
9
+ export declare const getDomainName: (link: string, metadata?: t.ScrapeMetadata | t.GenericScrapeMetadata, logger?: t.Logger) => string | undefined;
10
+ export declare function getAttribution(link: string, metadata?: t.ScrapeMetadata | t.GenericScrapeMetadata, logger?: t.Logger): string | undefined;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.1.75-dev.1",
3
+ "version": "3.1.76",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -1,4 +1,8 @@
1
- import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
1
+ import {
2
+ AIMessage,
3
+ AIMessageChunk,
4
+ HumanMessage,
5
+ } from '@langchain/core/messages';
2
6
  import type { OpenAIChatInput, OpenAIClient } from '@langchain/openai';
3
7
  import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
4
8
  import type { CustomAnthropicInput } from '@/llm/anthropic';
@@ -69,6 +73,29 @@ type StreamingCompletionDelegate = {
69
73
  type StreamingCompletionBackedModel = {
70
74
  completions: StreamingCompletionDelegate;
71
75
  };
76
+ type OpenAIStreamEvent = {
77
+ event: string;
78
+ data?: unknown;
79
+ };
80
+ type OpenAIStreamItem =
81
+ | OpenAIClient.Chat.Completions.ChatCompletionChunk
82
+ | OpenAIStreamEvent;
83
+ type MockableCompletionCreate = (
84
+ request: unknown,
85
+ options?: unknown
86
+ ) => Promise<
87
+ AsyncIterable<OpenAIStreamItem> | OpenAIClient.Chat.Completions.ChatCompletion
88
+ >;
89
+ type MockableCompletionClient = {
90
+ chat: {
91
+ completions: {
92
+ create: MockableCompletionCreate;
93
+ };
94
+ };
95
+ };
96
+ type MockableCompletionDelegate = OpenAIResponsesDelegate & {
97
+ client?: MockableCompletionClient;
98
+ };
72
99
  type OpenRouterReasoningStreamDelta =
73
100
  OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice.Delta & {
74
101
  reasoning_details?: Array<
@@ -93,6 +120,7 @@ type OpenRouterReasoningStreamChoice = Omit<
93
120
  > & {
94
121
  delta: OpenRouterReasoningStreamDelta;
95
122
  };
123
+ type OpenAIStreamModel = ChatOpenAI | AzureChatOpenAI;
96
124
 
97
125
  const baseAzureFields = {
98
126
  azureOpenAIApiKey: 'test-azure-key',
@@ -109,6 +137,101 @@ const baseBedrockFields = {
109
137
  },
110
138
  };
111
139
 
140
+ const createOpenAIStreamChunk = (
141
+ content: string,
142
+ finishReason: OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice['finish_reason'] = null
143
+ ): OpenAIClient.Chat.Completions.ChatCompletionChunk => ({
144
+ id: 'chatcmpl-hermes-test',
145
+ object: 'chat.completion.chunk',
146
+ created: 0,
147
+ model: 'hermes-agent',
148
+ choices: [
149
+ {
150
+ index: 0,
151
+ delta: { content },
152
+ finish_reason: finishReason,
153
+ },
154
+ ],
155
+ });
156
+
157
+ async function* createOpenAIStreamWithCustomEvents(): AsyncGenerator<OpenAIStreamItem> {
158
+ yield createOpenAIStreamChunk('Hello ');
159
+ yield {
160
+ event: 'hermes.tool.progress',
161
+ data: {
162
+ tool: 'execute_code',
163
+ toolCallId: 'call_1',
164
+ status: 'running',
165
+ },
166
+ };
167
+ yield {
168
+ event: 'hermes.tool.progress',
169
+ data: null,
170
+ };
171
+ yield {
172
+ event: 'message',
173
+ data: createOpenAIStreamChunk('world', 'stop'),
174
+ };
175
+ }
176
+
177
+ function mockCompletionStream(
178
+ model: OpenAIStreamModel
179
+ ): MockableCompletionCreate {
180
+ const completions = (
181
+ model as unknown as { completions: MockableCompletionDelegate }
182
+ ).completions;
183
+ completions._getClientOptions(undefined);
184
+ const client = completions.client;
185
+ if (client == null) {
186
+ throw new Error('Expected OpenAI completions client');
187
+ }
188
+
189
+ const createMock = jest.fn(async () =>
190
+ createOpenAIStreamWithCustomEvents()
191
+ ) as MockableCompletionCreate;
192
+ client.chat.completions.create = createMock;
193
+ return createMock;
194
+ }
195
+
196
+ function mockCompletion(
197
+ model: ChatOpenAI,
198
+ response: OpenAIClient.Chat.Completions.ChatCompletion
199
+ ): MockableCompletionCreate {
200
+ const completions = (
201
+ model as unknown as { completions: MockableCompletionDelegate }
202
+ ).completions;
203
+ completions._getClientOptions(undefined);
204
+ const client = completions.client;
205
+ if (client == null) {
206
+ throw new Error('Expected OpenAI completions client');
207
+ }
208
+
209
+ const createMock = jest.fn(async () => response) as MockableCompletionCreate;
210
+ client.chat.completions.create = createMock;
211
+ return createMock;
212
+ }
213
+
214
+ async function expectCustomSSEEventsSkipped(
215
+ model: OpenAIStreamModel
216
+ ): Promise<void> {
217
+ const createMock = mockCompletionStream(model);
218
+ const chunks: AIMessageChunk[] = [];
219
+ const stream = await model.stream([new HumanMessage('use a tool')]);
220
+ for await (const chunk of stream) {
221
+ chunks.push(chunk);
222
+ }
223
+
224
+ const text = chunks
225
+ .map((chunk) => (typeof chunk.content === 'string' ? chunk.content : ''))
226
+ .join('');
227
+ expect(chunks).toHaveLength(2);
228
+ expect(text).toBe('Hello world');
229
+ expect(createMock).toHaveBeenCalledWith(
230
+ expect.objectContaining({ stream: true }),
231
+ expect.any(Object)
232
+ );
233
+ }
234
+
112
235
  describe('custom chat model class smoke tests', () => {
113
236
  it('keeps the custom OpenAI client, stream delay, and reasoning precedence', () => {
114
237
  const model = new ChatOpenAI({
@@ -252,6 +375,57 @@ describe('custom chat model class smoke tests', () => {
252
375
  expect(xaiRequestOptions.baseURL).toBe('https://xai.test/v1');
253
376
  });
254
377
 
378
+ it('skips custom OpenAI-compatible SSE events during OpenAI streaming', async () => {
379
+ await expectCustomSSEEventsSkipped(
380
+ new ChatOpenAI({
381
+ model: 'hermes-agent',
382
+ apiKey: 'test-key',
383
+ streaming: true,
384
+ })
385
+ );
386
+ });
387
+
388
+ it('skips custom OpenAI-compatible SSE events during Azure streaming', async () => {
389
+ await expectCustomSSEEventsSkipped(
390
+ new AzureChatOpenAI({
391
+ ...baseAzureFields,
392
+ })
393
+ );
394
+ });
395
+
396
+ it('passes non-streaming OpenAI completions through unchanged', async () => {
397
+ const model = new ChatOpenAI({
398
+ model: 'hermes-agent',
399
+ apiKey: 'test-key',
400
+ });
401
+ const createMock = mockCompletion(model, {
402
+ id: 'chatcmpl-nonstream-test',
403
+ object: 'chat.completion',
404
+ created: 0,
405
+ model: 'hermes-agent',
406
+ choices: [
407
+ {
408
+ index: 0,
409
+ finish_reason: 'stop',
410
+ logprobs: null,
411
+ message: {
412
+ role: 'assistant',
413
+ content: 'plain response',
414
+ refusal: null,
415
+ },
416
+ },
417
+ ],
418
+ });
419
+
420
+ const response = await model.invoke([new HumanMessage('no stream')]);
421
+
422
+ expect(response.content).toBe('plain response');
423
+ expect(createMock).toHaveBeenCalledWith(
424
+ expect.objectContaining({ stream: false }),
425
+ expect.any(Object)
426
+ );
427
+ });
428
+
255
429
  it('keeps Moonshot reasoning content in completion requests', async () => {
256
430
  const moonshot = new ChatMoonshot({
257
431
  model: 'moonshot-v1-8k',
@@ -119,6 +119,27 @@ type OpenAIClientDelegate = {
119
119
  options: OpenAICoreRequestOptions | undefined
120
120
  ): OpenAICoreRequestOptions;
121
121
  };
122
+ type OpenAIChatCompletion = OpenAIClient.Chat.Completions.ChatCompletion;
123
+ type OpenAIChatCompletionChunk =
124
+ OpenAIClient.Chat.Completions.ChatCompletionChunk;
125
+ type OpenAIChatCompletionStreamItem =
126
+ | OpenAIChatCompletionChunk
127
+ | {
128
+ event: string;
129
+ data?: unknown;
130
+ };
131
+ type OpenAIChatCompletionRequest =
132
+ | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming
133
+ | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming;
134
+ type OpenAIChatCompletionResult =
135
+ | AsyncIterable<OpenAIChatCompletionChunk>
136
+ | OpenAIChatCompletion;
137
+ type OpenAIChatCompletionRetry = (
138
+ request: OpenAIChatCompletionRequest,
139
+ requestOptions?: OpenAICoreRequestOptions
140
+ ) => Promise<
141
+ AsyncIterable<OpenAIChatCompletionStreamItem> | OpenAIChatCompletion
142
+ >;
122
143
 
123
144
  function getExposedOpenAIClient(
124
145
  completions: OpenAIClientDelegate,
@@ -179,6 +200,67 @@ function getGatedReasoningParams(
179
200
  return getReasoningParams(baseReasoning, options);
180
201
  }
181
202
 
203
+ function isObject(value: unknown): value is object {
204
+ return typeof value === 'object' && value !== null;
205
+ }
206
+
207
+ function isOpenAIChatCompletionChunk(
208
+ value: unknown
209
+ ): value is OpenAIChatCompletionChunk {
210
+ if (!isObject(value)) {
211
+ return false;
212
+ }
213
+
214
+ // Intentionally loose: downstream handlers already tolerate empty choices.
215
+ const { choices } = value as { choices?: unknown };
216
+ return Array.isArray(choices);
217
+ }
218
+
219
+ function getOpenAIChatCompletionChunk(
220
+ value: OpenAIChatCompletionStreamItem
221
+ ): OpenAIChatCompletionChunk | undefined {
222
+ if (isOpenAIChatCompletionChunk(value)) {
223
+ return value;
224
+ }
225
+
226
+ const { data } = value;
227
+ if (isOpenAIChatCompletionChunk(data)) {
228
+ return data;
229
+ }
230
+
231
+ return undefined;
232
+ }
233
+
234
+ async function* filterOpenAIChatCompletionStream(
235
+ stream: AsyncIterable<OpenAIChatCompletionStreamItem>
236
+ ): AsyncGenerator<OpenAIChatCompletionChunk> {
237
+ for await (const item of stream) {
238
+ const chunk = getOpenAIChatCompletionChunk(item);
239
+ if (chunk == null) {
240
+ continue;
241
+ }
242
+ yield chunk;
243
+ }
244
+ }
245
+
246
+ async function completionWithFilteredOpenAIStream(
247
+ request: OpenAIChatCompletionRequest,
248
+ requestOptions: OpenAICoreRequestOptions | undefined,
249
+ completionWithRetry: OpenAIChatCompletionRetry
250
+ ): Promise<OpenAIChatCompletionResult> {
251
+ if (request.stream !== true) {
252
+ return (await completionWithRetry(
253
+ request,
254
+ requestOptions
255
+ )) as OpenAIChatCompletion;
256
+ }
257
+
258
+ const stream = await completionWithRetry(request, requestOptions);
259
+ return filterOpenAIChatCompletionStream(
260
+ stream as AsyncIterable<OpenAIChatCompletionStreamItem>
261
+ );
262
+ }
263
+
182
264
  function attachLibreChatDeltaFields(
183
265
  chunk: BaseMessageChunk,
184
266
  delta: Record<string, unknown>
@@ -406,6 +488,27 @@ class LibreChatOpenAICompletions extends OriginalChatOpenAICompletions {
406
488
  return getCustomOpenAIClientOptions(this, options);
407
489
  }
408
490
 
491
+ async completionWithRetry(
492
+ request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
493
+ requestOptions?: OpenAICoreRequestOptions
494
+ ): Promise<AsyncIterable<OpenAIChatCompletionChunk>>;
495
+ async completionWithRetry(
496
+ request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
497
+ requestOptions?: OpenAICoreRequestOptions
498
+ ): Promise<OpenAIChatCompletion>;
499
+ async completionWithRetry(
500
+ request:
501
+ | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming
502
+ | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
503
+ requestOptions?: OpenAICoreRequestOptions
504
+ ): Promise<AsyncIterable<OpenAIChatCompletionChunk> | OpenAIChatCompletion> {
505
+ return completionWithFilteredOpenAIStream(
506
+ request,
507
+ requestOptions,
508
+ super.completionWithRetry.bind(this) as OpenAIChatCompletionRetry
509
+ );
510
+ }
511
+
409
512
  protected _convertCompletionsDeltaToBaseMessageChunk(
410
513
  delta: Record<string, unknown>,
411
514
  rawResponse: OpenAIClient.Chat.Completions.ChatCompletionChunk,
@@ -829,6 +932,27 @@ class LibreChatAzureOpenAICompletions extends OriginalAzureChatOpenAICompletions
829
932
  }
830
933
  return requestOptions;
831
934
  }
935
+
936
+ async completionWithRetry(
937
+ request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
938
+ requestOptions?: OpenAICoreRequestOptions
939
+ ): Promise<AsyncIterable<OpenAIChatCompletionChunk>>;
940
+ async completionWithRetry(
941
+ request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
942
+ requestOptions?: OpenAICoreRequestOptions
943
+ ): Promise<OpenAIChatCompletion>;
944
+ async completionWithRetry(
945
+ request:
946
+ | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming
947
+ | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
948
+ requestOptions?: OpenAICoreRequestOptions
949
+ ): Promise<AsyncIterable<OpenAIChatCompletionChunk> | OpenAIChatCompletion> {
950
+ return completionWithFilteredOpenAIStream(
951
+ request,
952
+ requestOptions,
953
+ super.completionWithRetry.bind(this) as OpenAIChatCompletionRetry
954
+ );
955
+ }
832
956
  }
833
957
 
834
958
  class LibreChatAzureOpenAIResponses extends OriginalAzureChatOpenAIResponses {
@@ -303,6 +303,7 @@ export function _convertMessagesToOpenAIParams(
303
303
  model?: string,
304
304
  options?: ConvertMessagesOptions
305
305
  ): OpenAICompletionParam[] {
306
+ let hasReasoningToolCallContext = false;
306
307
  // TODO: Function messages do not support array content, fix cast
307
308
  return messages.flatMap((message) => {
308
309
  let role = messageToOpenAIRole(message);
@@ -333,6 +334,8 @@ export function _convertMessagesToOpenAIParams(
333
334
  role,
334
335
  content,
335
336
  };
337
+ let messageHasToolCalls = false;
338
+ let messageIsToolResult = false;
336
339
  if (message.name != null) {
337
340
  completionParam.name = message.name;
338
341
  }
@@ -341,17 +344,11 @@ export function _convertMessagesToOpenAIParams(
341
344
  completionParam.content = '';
342
345
  }
343
346
  if (isAIMessage(message) && !!message.tool_calls?.length) {
347
+ messageHasToolCalls = true;
344
348
  completionParam.tool_calls = message.tool_calls.map(
345
349
  convertLangChainToolCallToOpenAI
346
350
  );
347
351
  completionParam.content = hasAnthropicThinkingBlock ? content : '';
348
- if (
349
- options?.includeReasoningContent === true &&
350
- message.additional_kwargs.reasoning_content != null
351
- ) {
352
- completionParam.reasoning_content =
353
- message.additional_kwargs.reasoning_content;
354
- }
355
352
  if (
356
353
  options?.includeReasoningDetails === true &&
357
354
  message.additional_kwargs.reasoning_details != null
@@ -399,14 +396,10 @@ export function _convertMessagesToOpenAIParams(
399
396
  }
400
397
  } else {
401
398
  if (message.additional_kwargs.tool_calls != null) {
399
+ messageHasToolCalls =
400
+ !Array.isArray(message.additional_kwargs.tool_calls) ||
401
+ message.additional_kwargs.tool_calls.length > 0;
402
402
  completionParam.tool_calls = message.additional_kwargs.tool_calls;
403
- if (
404
- options?.includeReasoningContent === true &&
405
- message.additional_kwargs.reasoning_content != null
406
- ) {
407
- completionParam.reasoning_content =
408
- message.additional_kwargs.reasoning_content;
409
- }
410
403
  if (
411
404
  options?.includeReasoningDetails === true &&
412
405
  message.additional_kwargs.reasoning_details != null
@@ -454,10 +447,26 @@ export function _convertMessagesToOpenAIParams(
454
447
  }
455
448
  }
456
449
  if ((message as ToolMessage).tool_call_id != null) {
450
+ messageIsToolResult = true;
457
451
  completionParam.tool_call_id = (message as ToolMessage).tool_call_id;
458
452
  }
459
453
  }
460
454
 
455
+ if (
456
+ options?.includeReasoningContent === true &&
457
+ isAIMessage(message) &&
458
+ (hasReasoningToolCallContext || messageHasToolCalls) &&
459
+ typeof message.additional_kwargs.reasoning_content === 'string' &&
460
+ message.additional_kwargs.reasoning_content !== ''
461
+ ) {
462
+ completionParam.reasoning_content =
463
+ message.additional_kwargs.reasoning_content;
464
+ }
465
+
466
+ if (messageHasToolCalls || messageIsToolResult) {
467
+ hasReasoningToolCallContext = true;
468
+ }
469
+
461
470
  if (
462
471
  message.additional_kwargs.audio &&
463
472
  typeof message.additional_kwargs.audio === 'object' &&