@librechat/agents 3.1.73 → 3.1.75-dev.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +66 -0
- package/dist/cjs/agents/AgentContext.cjs +146 -57
- package/dist/cjs/agents/AgentContext.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +13 -3
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/index.cjs +145 -52
- package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +25 -15
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +84 -70
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/index.cjs +1 -1
- package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +213 -3
- package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/utils/message_outputs.cjs +2 -1
- package/dist/cjs/llm/bedrock/utils/message_outputs.cjs.map +1 -1
- package/dist/cjs/llm/google/utils/common.cjs +5 -4
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
- package/dist/cjs/llm/openai/index.cjs +468 -647
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/openai/utils/index.cjs +1 -448
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
- package/dist/cjs/llm/openrouter/index.cjs +57 -175
- package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
- package/dist/cjs/llm/vertexai/index.cjs +5 -3
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
- package/dist/cjs/main.cjs +1 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/messages/cache.cjs +39 -4
- package/dist/cjs/messages/cache.cjs.map +1 -1
- package/dist/cjs/messages/core.cjs +7 -6
- package/dist/cjs/messages/core.cjs.map +1 -1
- package/dist/cjs/messages/format.cjs +7 -6
- package/dist/cjs/messages/format.cjs.map +1 -1
- package/dist/cjs/messages/langchain.cjs +26 -0
- package/dist/cjs/messages/langchain.cjs.map +1 -0
- package/dist/cjs/messages/prune.cjs +7 -6
- package/dist/cjs/messages/prune.cjs.map +1 -1
- package/dist/cjs/tools/BashExecutor.cjs +21 -11
- package/dist/cjs/tools/BashExecutor.cjs.map +1 -1
- package/dist/cjs/tools/CodeExecutor.cjs +37 -10
- package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
- package/dist/cjs/tools/ProgrammaticToolCalling.cjs +16 -11
- package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +5 -1
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/esm/agents/AgentContext.mjs +147 -58
- package/dist/esm/agents/AgentContext.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +13 -3
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/anthropic/index.mjs +146 -54
- package/dist/esm/llm/anthropic/index.mjs.map +1 -1
- package/dist/esm/llm/anthropic/types.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +25 -15
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs +84 -71
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
- package/dist/esm/llm/bedrock/index.mjs +1 -1
- package/dist/esm/llm/bedrock/index.mjs.map +1 -1
- package/dist/esm/llm/bedrock/utils/message_inputs.mjs +214 -4
- package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/bedrock/utils/message_outputs.mjs +2 -1
- package/dist/esm/llm/bedrock/utils/message_outputs.mjs.map +1 -1
- package/dist/esm/llm/google/utils/common.mjs +5 -4
- package/dist/esm/llm/google/utils/common.mjs.map +1 -1
- package/dist/esm/llm/openai/index.mjs +469 -648
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/openai/utils/index.mjs +4 -449
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
- package/dist/esm/llm/openrouter/index.mjs +57 -175
- package/dist/esm/llm/openrouter/index.mjs.map +1 -1
- package/dist/esm/llm/vertexai/index.mjs +5 -3
- package/dist/esm/llm/vertexai/index.mjs.map +1 -1
- package/dist/esm/main.mjs +1 -1
- package/dist/esm/messages/cache.mjs +39 -4
- package/dist/esm/messages/cache.mjs.map +1 -1
- package/dist/esm/messages/core.mjs +7 -6
- package/dist/esm/messages/core.mjs.map +1 -1
- package/dist/esm/messages/format.mjs +7 -6
- package/dist/esm/messages/format.mjs.map +1 -1
- package/dist/esm/messages/langchain.mjs +23 -0
- package/dist/esm/messages/langchain.mjs.map +1 -0
- package/dist/esm/messages/prune.mjs +7 -6
- package/dist/esm/messages/prune.mjs.map +1 -1
- package/dist/esm/tools/BashExecutor.mjs +22 -12
- package/dist/esm/tools/BashExecutor.mjs.map +1 -1
- package/dist/esm/tools/CodeExecutor.mjs +37 -11
- package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
- package/dist/esm/tools/ProgrammaticToolCalling.mjs +17 -12
- package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +5 -1
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/types/agents/AgentContext.d.ts +29 -4
- package/dist/types/agents/__tests__/promptCacheLiveHelpers.d.ts +46 -0
- package/dist/types/llm/anthropic/index.d.ts +22 -9
- package/dist/types/llm/anthropic/types.d.ts +5 -1
- package/dist/types/llm/anthropic/utils/message_outputs.d.ts +13 -6
- package/dist/types/llm/anthropic/utils/output_parsers.d.ts +1 -1
- package/dist/types/llm/openai/index.d.ts +21 -24
- package/dist/types/llm/openrouter/index.d.ts +11 -9
- package/dist/types/llm/vertexai/index.d.ts +1 -0
- package/dist/types/messages/cache.d.ts +4 -1
- package/dist/types/messages/langchain.d.ts +27 -0
- package/dist/types/tools/CodeExecutor.d.ts +6 -0
- package/dist/types/types/graph.d.ts +26 -38
- package/dist/types/types/llm.d.ts +3 -3
- package/dist/types/types/run.d.ts +2 -0
- package/dist/types/types/stream.d.ts +1 -1
- package/dist/types/types/tools.d.ts +9 -0
- package/package.json +17 -16
- package/src/agents/AgentContext.ts +189 -71
- package/src/agents/__tests__/AgentContext.anthropic.live.test.ts +116 -0
- package/src/agents/__tests__/AgentContext.bedrock.live.test.ts +149 -0
- package/src/agents/__tests__/AgentContext.test.ts +333 -2
- package/src/agents/__tests__/promptCacheLiveHelpers.ts +165 -0
- package/src/graphs/Graph.ts +24 -4
- package/src/graphs/__tests__/composition.smoke.test.ts +188 -0
- package/src/llm/anthropic/index.ts +252 -84
- package/src/llm/anthropic/llm.spec.ts +751 -102
- package/src/llm/anthropic/types.ts +9 -1
- package/src/llm/anthropic/utils/message_inputs.ts +43 -20
- package/src/llm/anthropic/utils/message_outputs.ts +119 -101
- package/src/llm/anthropic/utils/server-tool-inputs.test.ts +77 -0
- package/src/llm/bedrock/index.ts +2 -2
- package/src/llm/bedrock/llm.spec.ts +341 -0
- package/src/llm/bedrock/utils/message_inputs.ts +303 -4
- package/src/llm/bedrock/utils/message_outputs.ts +2 -1
- package/src/llm/custom-chat-models.smoke.test.ts +662 -0
- package/src/llm/google/llm.spec.ts +339 -57
- package/src/llm/google/utils/common.ts +53 -48
- package/src/llm/openai/contentBlocks.test.ts +346 -0
- package/src/llm/openai/index.ts +736 -837
- package/src/llm/openai/utils/index.ts +84 -64
- package/src/llm/openrouter/index.ts +124 -247
- package/src/llm/openrouter/reasoning.test.ts +8 -1
- package/src/llm/vertexai/index.ts +11 -5
- package/src/llm/vertexai/llm.spec.ts +28 -1
- package/src/messages/cache.test.ts +106 -4
- package/src/messages/cache.ts +57 -5
- package/src/messages/core.ts +16 -9
- package/src/messages/format.ts +9 -6
- package/src/messages/langchain.ts +39 -0
- package/src/messages/prune.ts +12 -8
- package/src/scripts/caching.ts +2 -3
- package/src/specs/anthropic.simple.test.ts +61 -0
- package/src/specs/summarization.test.ts +58 -61
- package/src/tools/BashExecutor.ts +37 -13
- package/src/tools/CodeExecutor.ts +55 -11
- package/src/tools/ProgrammaticToolCalling.ts +29 -14
- package/src/tools/ToolNode.ts +5 -1
- package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +60 -0
- package/src/types/graph.ts +35 -88
- package/src/types/llm.ts +3 -3
- package/src/types/run.ts +2 -0
- package/src/types/stream.ts +1 -1
- package/src/types/tools.ts +9 -0
- package/src/utils/llmConfig.ts +1 -6
|
@@ -4,18 +4,31 @@ import type { BaseChatModelParams } from '@langchain/core/language_models/chat_m
|
|
|
4
4
|
import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
5
5
|
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
6
6
|
import type { AnthropicInput } from '@langchain/anthropic';
|
|
7
|
-
import type {
|
|
7
|
+
import type { Anthropic } from '@anthropic-ai/sdk';
|
|
8
|
+
import type { AnthropicMessageCreateParams, AnthropicStreamingMessageCreateParams, AnthropicOutputConfig, AnthropicBeta, AnthropicMCPServerURLDefinition, AnthropicContextManagementConfigParam } from '@/llm/anthropic/types';
|
|
9
|
+
export declare function _documentsInParams(params: AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams): boolean;
|
|
8
10
|
export type CustomAnthropicInput = AnthropicInput & {
|
|
9
11
|
_lc_stream_delay?: number;
|
|
10
12
|
outputConfig?: AnthropicOutputConfig;
|
|
11
13
|
inferenceGeo?: string;
|
|
12
|
-
contextManagement?:
|
|
14
|
+
contextManagement?: AnthropicContextManagementConfigParam;
|
|
13
15
|
} & BaseChatModelParams;
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
16
|
+
export type CustomAnthropicCallOptions = {
|
|
17
|
+
outputConfig?: AnthropicOutputConfig;
|
|
18
|
+
outputFormat?: Anthropic.Messages.JSONOutputFormat;
|
|
19
|
+
inferenceGeo?: string;
|
|
20
|
+
betas?: AnthropicBeta[];
|
|
21
|
+
container?: string;
|
|
22
|
+
mcp_servers?: AnthropicMCPServerURLDefinition[];
|
|
23
|
+
};
|
|
24
|
+
type CustomAnthropicInvocationParams = {
|
|
25
|
+
betas?: AnthropicBeta[];
|
|
26
|
+
container?: string;
|
|
27
|
+
context_management?: AnthropicContextManagementConfigParam;
|
|
28
|
+
inference_geo?: string;
|
|
29
|
+
mcp_servers?: AnthropicMCPServerURLDefinition[];
|
|
30
|
+
output_config?: AnthropicOutputConfig;
|
|
31
|
+
};
|
|
19
32
|
export declare class CustomAnthropic extends ChatAnthropicMessages {
|
|
20
33
|
_lc_stream_delay: number;
|
|
21
34
|
private message_start;
|
|
@@ -25,13 +38,13 @@ export declare class CustomAnthropic extends ChatAnthropicMessages {
|
|
|
25
38
|
top_k: number | undefined;
|
|
26
39
|
outputConfig?: AnthropicOutputConfig;
|
|
27
40
|
inferenceGeo?: string;
|
|
28
|
-
contextManagement?:
|
|
41
|
+
contextManagement?: AnthropicContextManagementConfigParam;
|
|
29
42
|
constructor(fields?: CustomAnthropicInput);
|
|
30
43
|
static lc_name(): 'LibreChatAnthropic';
|
|
31
44
|
/**
|
|
32
45
|
* Get the parameters used to invoke the model
|
|
33
46
|
*/
|
|
34
|
-
invocationParams(options?: this['ParsedCallOptions']): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, 'messages'> &
|
|
47
|
+
invocationParams(options?: this['ParsedCallOptions'] & CustomAnthropicCallOptions): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, 'messages'> & CustomAnthropicInvocationParams;
|
|
35
48
|
/**
|
|
36
49
|
* Get stream usage as returned by this client's API response.
|
|
37
50
|
* @returns The stream usage object.
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
-
import { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
2
|
+
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
3
|
+
import type { AnthropicBeta } from '@anthropic-ai/sdk/resources';
|
|
4
|
+
export type { AnthropicBeta };
|
|
3
5
|
export type AnthropicStreamUsage = Anthropic.Usage;
|
|
4
6
|
export type AnthropicMessageDeltaEvent = Anthropic.MessageDeltaEvent;
|
|
5
7
|
export type AnthropicMessageStartEvent = Anthropic.MessageStartEvent;
|
|
@@ -14,8 +16,10 @@ export type AnthropicMessageResponse = Anthropic.ContentBlock | AnthropicToolRes
|
|
|
14
16
|
export type AnthropicMessageCreateParams = Anthropic.MessageCreateParamsNonStreaming;
|
|
15
17
|
export type AnthropicStreamingMessageCreateParams = Anthropic.MessageCreateParamsStreaming;
|
|
16
18
|
export type AnthropicThinkingConfigParam = Anthropic.ThinkingConfigParam;
|
|
19
|
+
export type AnthropicContextManagementConfigParam = Anthropic.Beta.BetaContextManagementConfig;
|
|
17
20
|
export type AnthropicMessageStreamEvent = Anthropic.MessageStreamEvent;
|
|
18
21
|
export type AnthropicRequestOptions = Anthropic.RequestOptions;
|
|
22
|
+
export type AnthropicMCPServerURLDefinition = Anthropic.Beta.Messages.BetaRequestMCPServerURLDefinition;
|
|
19
23
|
export type AnthropicToolChoice = {
|
|
20
24
|
type: 'tool';
|
|
21
25
|
name: string;
|
|
@@ -1,10 +1,16 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* This util file contains functions for converting Anthropic messages to LangChain messages.
|
|
3
|
-
*/
|
|
4
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
1
|
+
/** This util file contains functions for converting Anthropic messages to LangChain messages. */
|
|
5
2
|
import { AIMessageChunk } from '@langchain/core/messages';
|
|
6
|
-
import
|
|
7
|
-
import {
|
|
3
|
+
import type Anthropic from '@anthropic-ai/sdk';
|
|
4
|
+
import type { UsageMetadata } from '@langchain/core/messages';
|
|
5
|
+
import type { ChatGeneration } from '@langchain/core/outputs';
|
|
6
|
+
import type { AnthropicMessageResponse } from '../types';
|
|
7
|
+
interface AnthropicUsageData {
|
|
8
|
+
input_tokens?: number | null;
|
|
9
|
+
output_tokens?: number | null;
|
|
10
|
+
cache_creation_input_tokens?: number | null;
|
|
11
|
+
cache_read_input_tokens?: number | null;
|
|
12
|
+
}
|
|
13
|
+
export declare function getAnthropicUsageMetadata(usage: AnthropicUsageData | null | undefined): UsageMetadata | undefined;
|
|
8
14
|
export declare function _makeMessageChunkFromAnthropicEvent(data: Anthropic.Beta.Messages.BetaRawMessageStreamEvent, fields: {
|
|
9
15
|
streamUsage: boolean;
|
|
10
16
|
coerceContentToString: boolean;
|
|
@@ -12,3 +18,4 @@ export declare function _makeMessageChunkFromAnthropicEvent(data: Anthropic.Beta
|
|
|
12
18
|
chunk: AIMessageChunk;
|
|
13
19
|
} | null;
|
|
14
20
|
export declare function anthropicResponseToChatMessages(messages: AnthropicMessageResponse[], additionalKwargs: Record<string, unknown>): ChatGeneration[];
|
|
21
|
+
export {};
|
|
@@ -18,5 +18,5 @@ export declare class AnthropicToolsOutputParser<T extends Record<string, any> =
|
|
|
18
18
|
protected _validateResult(result: unknown): Promise<T>;
|
|
19
19
|
parseResult(generations: ChatGeneration[]): Promise<T>;
|
|
20
20
|
}
|
|
21
|
-
export declare function extractToolCalls(content: Record<string, any>[]): ToolCall[];
|
|
21
|
+
export declare function extractToolCalls(content: Record<string, any>[]): ToolCall<string, Record<string, any>>[];
|
|
22
22
|
export {};
|
|
@@ -1,20 +1,26 @@
|
|
|
1
1
|
import { AzureOpenAI as AzureOpenAIClient } from 'openai';
|
|
2
2
|
import { ChatXAI as OriginalChatXAI } from '@langchain/xai';
|
|
3
3
|
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
-
import { AIMessage } from '@langchain/core/messages';
|
|
5
4
|
import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
|
|
6
5
|
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
7
6
|
import { OpenAIClient, ChatOpenAI as OriginalChatOpenAI, AzureChatOpenAI as OriginalAzureChatOpenAI } from '@langchain/openai';
|
|
8
7
|
import type { HeaderValue, HeadersLike } from './types';
|
|
9
|
-
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
10
8
|
import type { BaseMessage } from '@langchain/core/messages';
|
|
11
|
-
import type {
|
|
9
|
+
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
12
10
|
import type { ChatXAIInput } from '@langchain/xai';
|
|
13
11
|
import type * as t from '@langchain/openai';
|
|
14
12
|
export declare function isHeaders(headers: unknown): headers is Headers;
|
|
15
13
|
export declare function normalizeHeaders(headers: HeadersLike): Record<string, HeaderValue | readonly HeaderValue[]>;
|
|
16
|
-
type OpenAICompletionParam = OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
|
|
17
14
|
type OpenAICoreRequestOptions = OpenAIClient.RequestOptions;
|
|
15
|
+
type LibreChatOpenAIFields = t.ChatOpenAIFields & {
|
|
16
|
+
_lc_stream_delay?: number;
|
|
17
|
+
includeReasoningContent?: boolean;
|
|
18
|
+
includeReasoningDetails?: boolean;
|
|
19
|
+
convertReasoningDetailsToContent?: boolean;
|
|
20
|
+
};
|
|
21
|
+
type LibreChatAzureOpenAIFields = t.AzureOpenAIInput & {
|
|
22
|
+
_lc_stream_delay?: number;
|
|
23
|
+
};
|
|
18
24
|
/**
|
|
19
25
|
* Formats a tool in either OpenAI format, or LangChain structured tool format
|
|
20
26
|
* into an OpenAI tool format. If the tool is already in OpenAI format, return without
|
|
@@ -41,15 +47,12 @@ export declare class CustomAzureOpenAIClient extends AzureOpenAIClient {
|
|
|
41
47
|
abortHandler?: () => void;
|
|
42
48
|
fetchWithTimeout(url: RequestInfo, init: RequestInit | undefined, ms: number, controller: AbortController): Promise<Response>;
|
|
43
49
|
}
|
|
44
|
-
/** @ts-expect-error We are intentionally overriding `getReasoningParams` */
|
|
45
50
|
export declare class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
46
51
|
_lc_stream_delay?: number;
|
|
47
|
-
constructor(fields?:
|
|
48
|
-
_lc_stream_delay?: number;
|
|
49
|
-
} & t.OpenAIChatInput['modelKwargs']);
|
|
52
|
+
constructor(fields?: LibreChatOpenAIFields & t.OpenAIChatInput['modelKwargs']);
|
|
50
53
|
get exposedClient(): CustomOpenAIClient;
|
|
51
54
|
static lc_name(): string;
|
|
52
|
-
|
|
55
|
+
_getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
|
|
53
56
|
/**
|
|
54
57
|
* Returns backwards compatible reasoning parameters from constructor params and call options
|
|
55
58
|
* @internal
|
|
@@ -57,14 +60,10 @@ export declare class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptio
|
|
|
57
60
|
getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
|
|
58
61
|
protected _getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
|
|
59
62
|
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
60
|
-
_streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
61
63
|
}
|
|
62
|
-
/** @ts-expect-error We are intentionally overriding `getReasoningParams` */
|
|
63
64
|
export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
64
65
|
_lc_stream_delay?: number;
|
|
65
|
-
constructor(fields?:
|
|
66
|
-
_lc_stream_delay?: number;
|
|
67
|
-
});
|
|
66
|
+
constructor(fields?: LibreChatAzureOpenAIFields);
|
|
68
67
|
get exposedClient(): CustomOpenAIClient;
|
|
69
68
|
static lc_name(): 'LibreChatAzureOpenAI';
|
|
70
69
|
/**
|
|
@@ -73,16 +72,17 @@ export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
|
73
72
|
*/
|
|
74
73
|
getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
|
|
75
74
|
protected _getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
|
|
76
|
-
|
|
75
|
+
_getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
|
|
77
76
|
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
78
77
|
}
|
|
79
78
|
export declare class ChatDeepSeek extends OriginalChatDeepSeek {
|
|
79
|
+
_lc_stream_delay?: number;
|
|
80
|
+
constructor(fields?: ConstructorParameters<typeof OriginalChatDeepSeek>[0] & {
|
|
81
|
+
_lc_stream_delay?: number;
|
|
82
|
+
});
|
|
80
83
|
get exposedClient(): CustomOpenAIClient;
|
|
81
84
|
static lc_name(): 'LibreChatDeepSeek';
|
|
82
|
-
|
|
83
|
-
_generate(messages: BaseMessage[], options: this['ParsedCallOptions'] | undefined, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
84
|
-
protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
|
|
85
|
-
protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
|
|
85
|
+
_getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
|
|
86
86
|
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
87
87
|
}
|
|
88
88
|
/** xAI-specific usage metadata type */
|
|
@@ -102,11 +102,8 @@ export interface XAIUsageMetadata extends OpenAIClient.Completions.CompletionUsa
|
|
|
102
102
|
num_sources_used?: number;
|
|
103
103
|
}
|
|
104
104
|
export declare class ChatMoonshot extends ChatOpenAI {
|
|
105
|
+
constructor(fields?: LibreChatOpenAIFields & t.OpenAIChatInput['modelKwargs']);
|
|
105
106
|
static lc_name(): 'LibreChatMoonshot';
|
|
106
|
-
protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
|
|
107
|
-
_generate(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
108
|
-
protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
|
|
109
|
-
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
110
107
|
}
|
|
111
108
|
export declare class ChatXAI extends OriginalChatXAI {
|
|
112
109
|
_lc_stream_delay?: number;
|
|
@@ -121,7 +118,7 @@ export declare class ChatXAI extends OriginalChatXAI {
|
|
|
121
118
|
});
|
|
122
119
|
static lc_name(): 'LibreChatXAI';
|
|
123
120
|
get exposedClient(): CustomOpenAIClient;
|
|
124
|
-
|
|
121
|
+
_getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
|
|
125
122
|
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
126
123
|
}
|
|
127
124
|
export {};
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { ChatOpenAI } from '@/llm/openai';
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import type {
|
|
2
|
+
import type { BaseMessage } from '@langchain/core/messages';
|
|
3
|
+
import type { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
+
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
5
5
|
import type { ChatOpenAICallOptions, OpenAIChatInput, OpenAIClient } from '@langchain/openai';
|
|
6
6
|
export type OpenRouterReasoningEffort = 'xhigh' | 'high' | 'medium' | 'low' | 'minimal' | 'none';
|
|
7
7
|
export interface OpenRouterReasoning {
|
|
@@ -16,20 +16,22 @@ export interface ChatOpenRouterCallOptions extends Omit<ChatOpenAICallOptions, '
|
|
|
16
16
|
reasoning?: OpenRouterReasoning;
|
|
17
17
|
modelKwargs?: OpenAIChatInput['modelKwargs'];
|
|
18
18
|
}
|
|
19
|
+
export type ChatOpenRouterInput = Partial<ChatOpenRouterCallOptions & OpenAIChatInput>;
|
|
19
20
|
/** invocationParams return type extended with OpenRouter reasoning */
|
|
20
21
|
export type OpenRouterInvocationParams = Omit<OpenAIClient.Chat.ChatCompletionCreateParams, 'messages'> & {
|
|
21
22
|
reasoning?: OpenRouterReasoning;
|
|
22
23
|
};
|
|
24
|
+
type InvocationParamsExtra = {
|
|
25
|
+
streaming?: boolean;
|
|
26
|
+
};
|
|
23
27
|
export declare class ChatOpenRouter extends ChatOpenAI {
|
|
24
28
|
private openRouterReasoning?;
|
|
25
29
|
/** @deprecated Use `reasoning` object instead */
|
|
26
30
|
private includeReasoning?;
|
|
27
|
-
constructor(_fields:
|
|
31
|
+
constructor(_fields: ChatOpenRouterInput);
|
|
28
32
|
static lc_name(): 'LibreChatOpenRouter';
|
|
29
|
-
invocationParams(options?: this['ParsedCallOptions'], extra?:
|
|
30
|
-
streaming?: boolean;
|
|
31
|
-
}): OpenRouterInvocationParams;
|
|
33
|
+
invocationParams(options?: this['ParsedCallOptions'], extra?: InvocationParamsExtra): OpenRouterInvocationParams;
|
|
32
34
|
private buildOpenRouterReasoning;
|
|
33
|
-
|
|
34
|
-
_streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
35
|
+
_streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
35
36
|
}
|
|
37
|
+
export {};
|
|
@@ -288,6 +288,7 @@ export declare class ChatVertexAI extends ChatGoogle {
|
|
|
288
288
|
dynamicThinkingBudget: boolean;
|
|
289
289
|
thinkingConfig?: GoogleThinkingConfig;
|
|
290
290
|
static lc_name(): 'LibreChatVertexAI';
|
|
291
|
+
constructor(model: string, fields?: Omit<VertexAIClientOptions, 'model'>);
|
|
291
292
|
constructor(fields?: VertexAIClientOptions);
|
|
292
293
|
invocationParams(options?: this['ParsedCallOptions'] | undefined): GoogleAIModelRequestParams;
|
|
293
294
|
buildConnection(fields: VertexAIClientOptions | undefined, client: GoogleAbstractedClient): void;
|
|
@@ -36,5 +36,8 @@ export declare function stripBedrockCacheControl<T extends MessageWithContent>(m
|
|
|
36
36
|
* @param messages - The array of message objects.
|
|
37
37
|
* @returns - A new array of message objects with cache points added.
|
|
38
38
|
*/
|
|
39
|
-
export declare function addBedrockCacheControl<T extends
|
|
39
|
+
export declare function addBedrockCacheControl<T extends MessageWithContent & {
|
|
40
|
+
getType?: () => string;
|
|
41
|
+
role?: string;
|
|
42
|
+
}>(messages: T[]): T[];
|
|
40
43
|
export {};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import type { MessageContent } from '@langchain/core/messages';
|
|
2
|
+
import type * as t from '@/types';
|
|
3
|
+
type LibreChatMessageContent = MessageContent | string | t.MessageContentComplex[] | t.ExtendedMessageContent[];
|
|
4
|
+
type WithLangChainContent<T extends {
|
|
5
|
+
content: LibreChatMessageContent;
|
|
6
|
+
}> = Omit<T, 'content'> & {
|
|
7
|
+
content: MessageContent;
|
|
8
|
+
};
|
|
9
|
+
/**
|
|
10
|
+
* Bridges LibreChat's extended content blocks to LangChain 1.x MessageContent.
|
|
11
|
+
*
|
|
12
|
+
* LangChain 1.x narrowed message constructor types around ContentBlock, while
|
|
13
|
+
* LibreChat still carries provider-specific blocks through the same content
|
|
14
|
+
* field. This helper keeps the runtime shape unchanged during the dependency
|
|
15
|
+
* upgrade; tracking issue: https://github.com/danny-avila/agents/issues/130.
|
|
16
|
+
*/
|
|
17
|
+
export declare function toLangChainContent(content: LibreChatMessageContent): MessageContent;
|
|
18
|
+
/**
|
|
19
|
+
* Applies the same LangChain 1.x content bridge to message constructor fields.
|
|
20
|
+
*
|
|
21
|
+
* Keep this cast-only helper local to constructor boundaries so follow-up work
|
|
22
|
+
* can replace it with aligned content types or explicit conversion logic.
|
|
23
|
+
*/
|
|
24
|
+
export declare function toLangChainMessageFields<T extends {
|
|
25
|
+
content: LibreChatMessageContent;
|
|
26
|
+
}>(message: T): WithLangChainContent<T>;
|
|
27
|
+
export {};
|
|
@@ -3,6 +3,12 @@ import type * as t from '@/types';
|
|
|
3
3
|
import { Constants } from '@/common';
|
|
4
4
|
export declare const imageExtRegex: RegExp;
|
|
5
5
|
export declare const getCodeBaseURL: () => string;
|
|
6
|
+
/**
|
|
7
|
+
* Renders one section of the post-execution file listing. Used by the
|
|
8
|
+
* code/bash tool formatters to keep generated outputs and inherited
|
|
9
|
+
* inputs visually separated. See BashExecutor for full docs.
|
|
10
|
+
*/
|
|
11
|
+
export declare function renderFileSection(header: string, files: t.FileRefs, defaultMessage: string): string;
|
|
6
12
|
export declare const CodeExecutionToolSchema: {
|
|
7
13
|
readonly type: "object";
|
|
8
14
|
readonly properties: {
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { START,
|
|
1
|
+
import type { START, StateGraph, StateGraphArgs } from '@langchain/langgraph';
|
|
2
2
|
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
3
3
|
import type { BaseMessage, AIMessageChunk, SystemMessage } from '@langchain/core/messages';
|
|
4
4
|
import type { RunnableConfig, Runnable } from '@langchain/core/runnables';
|
|
@@ -51,51 +51,36 @@ export interface EventHandler {
|
|
|
51
51
|
}
|
|
52
52
|
export type GraphStateChannels<T extends BaseGraphState> = StateGraphArgs<T>['channels'];
|
|
53
53
|
export type Workflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = StateGraph<T, U, N>;
|
|
54
|
-
|
|
55
|
-
export type
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
|
|
75
|
-
agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
|
|
76
|
-
}, StateDefinition>;
|
|
77
|
-
export type CompiledAgentWorfklow = CompiledStateGraph<AgentSubgraphState, Partial<AgentSubgraphState>, '__start__' | `agent=${string}` | `tools=${string}` | `summarize=${string}`, {
|
|
78
|
-
messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
|
|
79
|
-
summarizationRequest: BinaryOperatorAggregate<SummarizationNodeInput | undefined, SummarizationNodeInput | undefined>;
|
|
80
|
-
}, {
|
|
81
|
-
messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
|
|
82
|
-
summarizationRequest: BinaryOperatorAggregate<SummarizationNodeInput | undefined, SummarizationNodeInput | undefined>;
|
|
83
|
-
}, StateDefinition, {
|
|
84
|
-
[x: `agent=${string}`]: Partial<BaseGraphState>;
|
|
85
|
-
[x: `tools=${string}`]: any;
|
|
86
|
-
[x: `summarize=${string}`]: any;
|
|
87
|
-
}>;
|
|
54
|
+
type LangChainEventStreamCallbackHandlerInput = NonNullable<Parameters<Runnable['streamEvents']>[2]>;
|
|
55
|
+
export type EventStreamCallbackHandlerInput = LangChainEventStreamCallbackHandlerInput & {
|
|
56
|
+
autoClose?: boolean;
|
|
57
|
+
raiseError?: boolean;
|
|
58
|
+
ignoreCustomEvent?: boolean;
|
|
59
|
+
};
|
|
60
|
+
export type WorkflowValuesStreamConfig = RunnableConfig & {
|
|
61
|
+
streamMode: 'values';
|
|
62
|
+
};
|
|
63
|
+
/**
|
|
64
|
+
* LangGraph stream output is mode-dependent (`values`, `updates`, SSE, etc.).
|
|
65
|
+
* Keep the base Runnable stream output as unknown and narrow at callsites that
|
|
66
|
+
* choose a concrete streamMode.
|
|
67
|
+
*/
|
|
68
|
+
export type CompiledWorkflow<TInput extends BaseGraphState = BaseGraphState, TOutput extends BaseGraphState = TInput> = Omit<Runnable<TInput, unknown>, 'invoke'> & {
|
|
69
|
+
invoke(input: TInput, config?: RunnableConfig): Promise<TOutput>;
|
|
70
|
+
};
|
|
71
|
+
export type CompiledStateWorkflow = CompiledWorkflow;
|
|
72
|
+
export type CompiledMultiAgentWorkflow = CompiledWorkflow<MultiAgentGraphState>;
|
|
73
|
+
export type CompiledAgentWorfklow = CompiledWorkflow<AgentSubgraphState, AgentSubgraphState>;
|
|
88
74
|
export type SystemRunnable = Runnable<BaseMessage[], (BaseMessage | SystemMessage)[], RunnableConfig<Record<string, unknown>>> | undefined;
|
|
89
75
|
/**
|
|
90
76
|
* Optional compile options passed to workflow.compile().
|
|
91
77
|
* These are intentionally untyped to avoid coupling to library internals.
|
|
92
78
|
*/
|
|
93
79
|
export type CompileOptions = {
|
|
94
|
-
checkpointer?:
|
|
80
|
+
checkpointer?: unknown;
|
|
95
81
|
interruptBefore?: string[];
|
|
96
82
|
interruptAfter?: string[];
|
|
97
83
|
};
|
|
98
|
-
export type EventStreamCallbackHandlerInput = Parameters<CompiledWorkflow['streamEvents']>[2] extends Omit<infer T, 'autoClose'> ? T : never;
|
|
99
84
|
export type StreamChunk = (ChatGenerationChunk & {
|
|
100
85
|
message: AIMessageChunk;
|
|
101
86
|
}) | AIMessageChunk;
|
|
@@ -311,10 +296,12 @@ export interface AgentInputs {
|
|
|
311
296
|
toolMap?: ToolMap;
|
|
312
297
|
tools?: GraphTools;
|
|
313
298
|
provider: Providers;
|
|
299
|
+
/** Stable/cacheable system instructions. */
|
|
314
300
|
instructions?: string;
|
|
315
301
|
streamBuffer?: number;
|
|
316
302
|
maxContextTokens?: number;
|
|
317
303
|
clientOptions?: ClientOptions;
|
|
304
|
+
/** Dynamic system tail appended after stable instructions without provider cache markers. */
|
|
318
305
|
additional_instructions?: string;
|
|
319
306
|
reasoningKey?: 'reasoning_content' | 'reasoning';
|
|
320
307
|
/** Format content blocks as strings (for legacy compatibility i.e. Ollama/Azure Serverless) */
|
|
@@ -340,7 +327,7 @@ export interface AgentInputs {
|
|
|
340
327
|
summarizationEnabled?: boolean;
|
|
341
328
|
summarizationConfig?: SummarizationConfig;
|
|
342
329
|
/** Cross-run summary from a previous run, forwarded from formatAgentMessages.
|
|
343
|
-
* Injected into the system
|
|
330
|
+
* Injected into the dynamic system tail via AgentContext. */
|
|
344
331
|
initialSummary?: {
|
|
345
332
|
text: string;
|
|
346
333
|
tokenCount: number;
|
|
@@ -370,3 +357,4 @@ export interface ContextPruningConfig {
|
|
|
370
357
|
placeholder?: string;
|
|
371
358
|
};
|
|
372
359
|
}
|
|
360
|
+
export {};
|
|
@@ -3,7 +3,7 @@ import type { BindToolsInput, BaseChatModelParams } from '@langchain/core/langua
|
|
|
3
3
|
import type { OpenAIChatInput, ChatOpenAIFields, AzureOpenAIInput, ClientOptions as OAIClientOptions } from '@langchain/openai';
|
|
4
4
|
import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
|
|
5
5
|
import type { ChatVertexAIInput } from '@langchain/google-vertexai';
|
|
6
|
-
import type {
|
|
6
|
+
import type { ChatDeepSeekInput } from '@langchain/deepseek';
|
|
7
7
|
import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
|
|
8
8
|
import type { ChatBedrockConverseInput } from '@langchain/aws';
|
|
9
9
|
import type { ChatMistralAIInput } from '@langchain/mistralai';
|
|
@@ -49,7 +49,7 @@ export type AnthropicReasoning = {
|
|
|
49
49
|
export type GoogleThinkingConfig = {
|
|
50
50
|
thinkingBudget?: number;
|
|
51
51
|
includeThoughts?: boolean;
|
|
52
|
-
thinkingLevel?:
|
|
52
|
+
thinkingLevel?: 'THINKING_LEVEL_UNSPECIFIED' | 'LOW' | 'MEDIUM' | 'HIGH';
|
|
53
53
|
};
|
|
54
54
|
export type OpenAIClientOptions = ChatOpenAIFields;
|
|
55
55
|
export type AnthropicClientOptions = Omit<AnthropicInput, 'thinking'> & {
|
|
@@ -71,7 +71,7 @@ export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
|
|
|
71
71
|
customHeaders?: RequestOptions['customHeaders'];
|
|
72
72
|
thinkingConfig?: GoogleThinkingConfig;
|
|
73
73
|
};
|
|
74
|
-
export type DeepSeekClientOptions =
|
|
74
|
+
export type DeepSeekClientOptions = Partial<ChatDeepSeekInput>;
|
|
75
75
|
export type XAIClientOptions = ChatXAIInput;
|
|
76
76
|
export type ClientOptions = OpenAIClientOptions | AzureClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockConverseClientOptions | GoogleClientOptions | DeepSeekClientOptions | XAIClientOptions;
|
|
77
77
|
export type SharedLLMConfig = {
|
|
@@ -63,7 +63,9 @@ export interface AgentStateChannels {
|
|
|
63
63
|
messages: BaseMessage[];
|
|
64
64
|
next: string;
|
|
65
65
|
[key: string]: unknown;
|
|
66
|
+
/** Stable/cacheable system instructions. */
|
|
66
67
|
instructions?: string;
|
|
68
|
+
/** Dynamic system tail appended after stable instructions. */
|
|
67
69
|
additional_instructions?: string;
|
|
68
70
|
}
|
|
69
71
|
export interface Member {
|
|
@@ -96,6 +96,15 @@ export type FileRef = {
|
|
|
96
96
|
path?: string;
|
|
97
97
|
/** Session ID this file belongs to (for multi-session file tracking) */
|
|
98
98
|
session_id?: string;
|
|
99
|
+
/**
|
|
100
|
+
* `true` when the codeapi sandbox echoed this entry as an unchanged
|
|
101
|
+
* passthrough of an input the caller already owns (skill files,
|
|
102
|
+
* downloaded inputs whose hash matched the baseline, inherited
|
|
103
|
+
* `.dirkeep` markers). The tool-result formatter renders these as
|
|
104
|
+
* "Available files" rather than "Generated files" so the LLM doesn't
|
|
105
|
+
* conflate infrastructure inputs with newly-produced outputs.
|
|
106
|
+
*/
|
|
107
|
+
inherited?: true;
|
|
99
108
|
};
|
|
100
109
|
export type FileRefs = FileRef[];
|
|
101
110
|
export type ExecuteResult = {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@librechat/agents",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.75-dev.0",
|
|
4
4
|
"main": "./dist/cjs/main.cjs",
|
|
5
5
|
"module": "./dist/esm/main.mjs",
|
|
6
6
|
"types": "./dist/types/index.d.ts",
|
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
],
|
|
28
28
|
"packageManager": "npm@10.5.2",
|
|
29
29
|
"engines": {
|
|
30
|
-
"node": ">=
|
|
30
|
+
"node": ">=20.0.0"
|
|
31
31
|
},
|
|
32
32
|
"files": [
|
|
33
33
|
"dist",
|
|
@@ -111,7 +111,7 @@
|
|
|
111
111
|
"format": "prettier --write ."
|
|
112
112
|
},
|
|
113
113
|
"overrides": {
|
|
114
|
-
"@langchain/openai": "
|
|
114
|
+
"@langchain/openai": "1.4.5",
|
|
115
115
|
"@anthropic-ai/sdk": "$@anthropic-ai/sdk",
|
|
116
116
|
"@browserbasehq/stagehand": {
|
|
117
117
|
"openai": "$openai"
|
|
@@ -121,19 +121,19 @@
|
|
|
121
121
|
"minimatch": "3.1.4"
|
|
122
122
|
},
|
|
123
123
|
"dependencies": {
|
|
124
|
-
"@anthropic-ai/sdk": "^0.
|
|
124
|
+
"@anthropic-ai/sdk": "^0.92.0",
|
|
125
125
|
"@aws-sdk/client-bedrock-runtime": "^3.1013.0",
|
|
126
|
-
"@langchain/anthropic": "^
|
|
127
|
-
"@langchain/aws": "^
|
|
128
|
-
"@langchain/core": "^
|
|
129
|
-
"@langchain/deepseek": "^
|
|
130
|
-
"@langchain/google-genai": "^
|
|
131
|
-
"@langchain/google-vertexai": "^
|
|
132
|
-
"@langchain/langgraph": "^
|
|
133
|
-
"@langchain/mistralai": "^0.
|
|
134
|
-
"@langchain/openai": "
|
|
135
|
-
"@langchain/textsplitters": "^0.1
|
|
136
|
-
"@langchain/xai": "^
|
|
126
|
+
"@langchain/anthropic": "^1.3.28",
|
|
127
|
+
"@langchain/aws": "^1.3.5",
|
|
128
|
+
"@langchain/core": "^1.1.42",
|
|
129
|
+
"@langchain/deepseek": "^1.0.25",
|
|
130
|
+
"@langchain/google-genai": "^2.1.29",
|
|
131
|
+
"@langchain/google-vertexai": "^2.1.29",
|
|
132
|
+
"@langchain/langgraph": "^1.2.9",
|
|
133
|
+
"@langchain/mistralai": "^1.0.8",
|
|
134
|
+
"@langchain/openai": "1.4.5",
|
|
135
|
+
"@langchain/textsplitters": "^1.0.1",
|
|
136
|
+
"@langchain/xai": "^1.3.17",
|
|
137
137
|
"@langfuse/langchain": "^4.3.0",
|
|
138
138
|
"@langfuse/otel": "^4.3.0",
|
|
139
139
|
"@langfuse/tracing": "^4.3.0",
|
|
@@ -147,7 +147,8 @@
|
|
|
147
147
|
"mathjs": "^15.2.0",
|
|
148
148
|
"nanoid": "^3.3.7",
|
|
149
149
|
"okapibm25": "^1.4.1",
|
|
150
|
-
"openai": "
|
|
150
|
+
"openai": "^6.35.0",
|
|
151
|
+
"uuid": "^11.1.1"
|
|
151
152
|
},
|
|
152
153
|
"imports": {
|
|
153
154
|
"@/*": "./src/*",
|