@librechat/agents 2.4.37 → 2.4.40
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/Graph.cjs +2 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +13 -3
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
- package/dist/cjs/llm/google/index.cjs +73 -0
- package/dist/cjs/llm/google/index.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +101 -2
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/providers.cjs +11 -11
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +2 -1
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/anthropic/types.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +13 -3
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
- package/dist/esm/llm/google/index.mjs +71 -0
- package/dist/esm/llm/google/index.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +100 -4
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/providers.mjs +2 -2
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/types/llm/anthropic/types.d.ts +7 -3
- package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
- package/dist/types/llm/anthropic/utils/output_parsers.d.ts +2 -2
- package/dist/types/llm/google/index.d.ts +8 -0
- package/dist/types/llm/openai/index.d.ts +37 -3
- package/dist/types/types/llm.d.ts +6 -3
- package/package.json +14 -14
- package/src/graphs/Graph.ts +3 -4
- package/src/llm/anthropic/types.ts +23 -3
- package/src/llm/anthropic/utils/message_inputs.ts +21 -5
- package/src/llm/anthropic/utils/message_outputs.ts +21 -2
- package/src/llm/anthropic/utils/output_parsers.ts +23 -4
- package/src/llm/google/index.ts +97 -0
- package/src/llm/openai/index.ts +170 -14
- package/src/llm/providers.ts +2 -2
- package/src/scripts/simple.ts +1 -1
- package/src/types/llm.ts +6 -3
- package/src/utils/llmConfig.ts +2 -1
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
2
|
+
import { getEnvironmentVariable } from '@langchain/core/utils/env';
|
|
3
|
+
import { GoogleGenerativeAI as GenerativeAI } from '@google/generative-ai';
|
|
4
|
+
import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
|
|
5
|
+
import type { RequestOptions, SafetySetting } from '@google/generative-ai';
|
|
6
|
+
|
|
7
|
+
export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
8
|
+
constructor(
|
|
9
|
+
fields: GoogleGenerativeAIChatInput & {
|
|
10
|
+
customHeaders?: RequestOptions['customHeaders'];
|
|
11
|
+
}
|
|
12
|
+
) {
|
|
13
|
+
super(fields);
|
|
14
|
+
|
|
15
|
+
this.model = fields.model.replace(/^models\//, '');
|
|
16
|
+
|
|
17
|
+
this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
|
|
18
|
+
|
|
19
|
+
if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {
|
|
20
|
+
throw new Error('`maxOutputTokens` must be a positive integer');
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
this.temperature = fields.temperature ?? this.temperature;
|
|
24
|
+
if (
|
|
25
|
+
this.temperature != null &&
|
|
26
|
+
(this.temperature < 0 || this.temperature > 2)
|
|
27
|
+
) {
|
|
28
|
+
throw new Error('`temperature` must be in the range of [0.0,2.0]');
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
this.topP = fields.topP ?? this.topP;
|
|
32
|
+
if (this.topP != null && this.topP < 0) {
|
|
33
|
+
throw new Error('`topP` must be a positive integer');
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
if (this.topP != null && this.topP > 1) {
|
|
37
|
+
throw new Error('`topP` must be below 1.');
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
this.topK = fields.topK ?? this.topK;
|
|
41
|
+
if (this.topK != null && this.topK < 0) {
|
|
42
|
+
throw new Error('`topK` must be a positive integer');
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
this.stopSequences = fields.stopSequences ?? this.stopSequences;
|
|
46
|
+
|
|
47
|
+
this.apiKey = fields.apiKey ?? getEnvironmentVariable('GOOGLE_API_KEY');
|
|
48
|
+
if (this.apiKey == null || this.apiKey === '') {
|
|
49
|
+
throw new Error(
|
|
50
|
+
'Please set an API key for Google GenerativeAI ' +
|
|
51
|
+
'in the environment variable GOOGLE_API_KEY ' +
|
|
52
|
+
'or in the `apiKey` field of the ' +
|
|
53
|
+
'ChatGoogleGenerativeAI constructor'
|
|
54
|
+
);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
this.safetySettings = fields.safetySettings ?? this.safetySettings;
|
|
58
|
+
if (this.safetySettings && this.safetySettings.length > 0) {
|
|
59
|
+
const safetySettingsSet = new Set(
|
|
60
|
+
this.safetySettings.map((s) => s.category)
|
|
61
|
+
);
|
|
62
|
+
if (safetySettingsSet.size !== this.safetySettings.length) {
|
|
63
|
+
throw new Error(
|
|
64
|
+
'The categories in `safetySettings` array must be unique'
|
|
65
|
+
);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
this.streaming = fields.streaming ?? this.streaming;
|
|
70
|
+
this.json = fields.json;
|
|
71
|
+
|
|
72
|
+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
73
|
+
// @ts-ignore - Accessing private property from parent class
|
|
74
|
+
this.client = new GenerativeAI(this.apiKey).getGenerativeModel(
|
|
75
|
+
{
|
|
76
|
+
model: this.model,
|
|
77
|
+
safetySettings: this.safetySettings as SafetySetting[],
|
|
78
|
+
generationConfig: {
|
|
79
|
+
stopSequences: this.stopSequences,
|
|
80
|
+
maxOutputTokens: this.maxOutputTokens,
|
|
81
|
+
temperature: this.temperature,
|
|
82
|
+
topP: this.topP,
|
|
83
|
+
topK: this.topK,
|
|
84
|
+
...(this.json != null
|
|
85
|
+
? { responseMimeType: 'application/json' }
|
|
86
|
+
: {}),
|
|
87
|
+
},
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
apiVersion: fields.apiVersion,
|
|
91
|
+
baseUrl: fields.baseUrl,
|
|
92
|
+
customHeaders: fields.customHeaders,
|
|
93
|
+
}
|
|
94
|
+
);
|
|
95
|
+
this.streamUsage = fields.streamUsage ?? this.streamUsage;
|
|
96
|
+
}
|
|
97
|
+
}
|
package/src/llm/openai/index.ts
CHANGED
|
@@ -4,17 +4,150 @@ import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
|
|
|
4
4
|
import {
|
|
5
5
|
getEndpoint,
|
|
6
6
|
OpenAIClient,
|
|
7
|
+
formatToOpenAITool,
|
|
7
8
|
ChatOpenAI as OriginalChatOpenAI,
|
|
8
9
|
AzureChatOpenAI as OriginalAzureChatOpenAI,
|
|
9
10
|
} from '@langchain/openai';
|
|
10
|
-
import
|
|
11
|
+
import { isLangChainTool } from '@langchain/core/utils/function_calling';
|
|
12
|
+
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
|
13
|
+
import type { OpenAIEndpointConfig } from '@langchain/openai/dist/utils/azure';
|
|
14
|
+
import type { AIMessageChunk } from '@langchain/core/messages';
|
|
15
|
+
import type { Runnable } from '@langchain/core/runnables';
|
|
11
16
|
import type * as t from '@langchain/openai';
|
|
17
|
+
import {
|
|
18
|
+
isOpenAITool,
|
|
19
|
+
ToolDefinition,
|
|
20
|
+
BaseLanguageModelInput,
|
|
21
|
+
} from '@langchain/core/language_models/base';
|
|
22
|
+
|
|
23
|
+
type ResponsesCreateParams = Parameters<OpenAIClient.Responses['create']>[0];
|
|
24
|
+
type ResponsesTool = Exclude<ResponsesCreateParams['tools'], undefined>[number];
|
|
25
|
+
|
|
26
|
+
type ChatOpenAIToolType =
|
|
27
|
+
| BindToolsInput
|
|
28
|
+
| OpenAIClient.ChatCompletionTool
|
|
29
|
+
| ResponsesTool;
|
|
30
|
+
|
|
31
|
+
type HeaderValue = string | undefined | null;
|
|
32
|
+
export type HeadersLike =
|
|
33
|
+
| Headers
|
|
34
|
+
| readonly HeaderValue[][]
|
|
35
|
+
| Record<string, HeaderValue | readonly HeaderValue[]>
|
|
36
|
+
| undefined
|
|
37
|
+
| null
|
|
38
|
+
// NullableHeaders
|
|
39
|
+
| { values: Headers; [key: string]: unknown };
|
|
40
|
+
|
|
41
|
+
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
|
|
42
|
+
const iife = <T>(fn: () => T) => fn();
|
|
43
|
+
|
|
44
|
+
export function isHeaders(headers: unknown): headers is Headers {
|
|
45
|
+
return (
|
|
46
|
+
typeof Headers !== 'undefined' &&
|
|
47
|
+
headers !== null &&
|
|
48
|
+
typeof headers === 'object' &&
|
|
49
|
+
Object.prototype.toString.call(headers) === '[object Headers]'
|
|
50
|
+
);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export function normalizeHeaders(
|
|
54
|
+
headers: HeadersLike
|
|
55
|
+
): Record<string, HeaderValue | readonly HeaderValue[]> {
|
|
56
|
+
const output = iife(() => {
|
|
57
|
+
// If headers is a Headers instance
|
|
58
|
+
if (isHeaders(headers)) {
|
|
59
|
+
return headers;
|
|
60
|
+
}
|
|
61
|
+
// If headers is an array of [key, value] pairs
|
|
62
|
+
else if (Array.isArray(headers)) {
|
|
63
|
+
return new Headers(headers);
|
|
64
|
+
}
|
|
65
|
+
// If headers is a NullableHeaders-like object (has 'values' property that is a Headers)
|
|
66
|
+
else if (
|
|
67
|
+
typeof headers === 'object' &&
|
|
68
|
+
headers !== null &&
|
|
69
|
+
'values' in headers &&
|
|
70
|
+
isHeaders(headers.values)
|
|
71
|
+
) {
|
|
72
|
+
return headers.values;
|
|
73
|
+
}
|
|
74
|
+
// If headers is a plain object
|
|
75
|
+
else if (typeof headers === 'object' && headers !== null) {
|
|
76
|
+
const entries: [string, string][] = Object.entries(headers)
|
|
77
|
+
.filter(([, v]) => typeof v === 'string')
|
|
78
|
+
.map(([k, v]) => [k, v as string]);
|
|
79
|
+
return new Headers(entries);
|
|
80
|
+
}
|
|
81
|
+
return new Headers();
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
return Object.fromEntries(output.entries());
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
type OpenAICoreRequestOptions = OpenAIClient.RequestOptions;
|
|
12
88
|
|
|
13
89
|
function createAbortHandler(controller: AbortController): () => void {
|
|
14
90
|
return function (): void {
|
|
15
91
|
controller.abort();
|
|
16
92
|
};
|
|
17
93
|
}
|
|
94
|
+
/**
|
|
95
|
+
* Formats a tool in either OpenAI format, or LangChain structured tool format
|
|
96
|
+
* into an OpenAI tool format. If the tool is already in OpenAI format, return without
|
|
97
|
+
* any changes. If it is in LangChain structured tool format, convert it to OpenAI tool format
|
|
98
|
+
* using OpenAI's `zodFunction` util, falling back to `convertToOpenAIFunction` if the parameters
|
|
99
|
+
* returned from the `zodFunction` util are not defined.
|
|
100
|
+
*
|
|
101
|
+
* @param {BindToolsInput} tool The tool to convert to an OpenAI tool.
|
|
102
|
+
* @param {Object} [fields] Additional fields to add to the OpenAI tool.
|
|
103
|
+
* @returns {ToolDefinition} The inputted tool in OpenAI tool format.
|
|
104
|
+
*/
|
|
105
|
+
export function _convertToOpenAITool(
|
|
106
|
+
tool: BindToolsInput,
|
|
107
|
+
fields?: {
|
|
108
|
+
/**
|
|
109
|
+
* If `true`, model output is guaranteed to exactly match the JSON Schema
|
|
110
|
+
* provided in the function definition.
|
|
111
|
+
*/
|
|
112
|
+
strict?: boolean;
|
|
113
|
+
}
|
|
114
|
+
): OpenAIClient.ChatCompletionTool {
|
|
115
|
+
let toolDef: OpenAIClient.ChatCompletionTool | undefined;
|
|
116
|
+
|
|
117
|
+
if (isLangChainTool(tool)) {
|
|
118
|
+
toolDef = formatToOpenAITool(tool);
|
|
119
|
+
} else {
|
|
120
|
+
toolDef = tool as ToolDefinition;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if (fields?.strict !== undefined) {
|
|
124
|
+
toolDef.function.strict = fields.strict;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return toolDef;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
function _convertChatOpenAIToolTypeToOpenAITool(
|
|
131
|
+
tool: ChatOpenAIToolType,
|
|
132
|
+
fields?: {
|
|
133
|
+
strict?: boolean;
|
|
134
|
+
}
|
|
135
|
+
): OpenAIClient.ChatCompletionTool {
|
|
136
|
+
if (isOpenAITool(tool)) {
|
|
137
|
+
if (fields?.strict !== undefined) {
|
|
138
|
+
return {
|
|
139
|
+
...tool,
|
|
140
|
+
function: {
|
|
141
|
+
...tool.function,
|
|
142
|
+
strict: fields.strict,
|
|
143
|
+
},
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
return tool;
|
|
148
|
+
}
|
|
149
|
+
return _convertToOpenAITool(tool, fields);
|
|
150
|
+
}
|
|
18
151
|
|
|
19
152
|
export class CustomOpenAIClient extends OpenAIClient {
|
|
20
153
|
abortHandler?: () => void;
|
|
@@ -87,13 +220,36 @@ export class CustomAzureOpenAIClient extends AzureOpenAIClient {
|
|
|
87
220
|
}
|
|
88
221
|
}
|
|
89
222
|
|
|
223
|
+
function isBuiltInTool(tool: ChatOpenAIToolType): tool is ResponsesTool {
|
|
224
|
+
return 'type' in tool && tool.type !== 'function';
|
|
225
|
+
}
|
|
226
|
+
|
|
90
227
|
export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
91
228
|
public get exposedClient(): CustomOpenAIClient {
|
|
92
229
|
return this.client;
|
|
93
230
|
}
|
|
231
|
+
override bindTools(
|
|
232
|
+
tools: ChatOpenAIToolType[],
|
|
233
|
+
kwargs?: Partial<t.ChatOpenAICallOptions>
|
|
234
|
+
): Runnable<BaseLanguageModelInput, AIMessageChunk, t.ChatOpenAICallOptions> {
|
|
235
|
+
let strict: boolean | undefined;
|
|
236
|
+
if (kwargs?.strict !== undefined) {
|
|
237
|
+
strict = kwargs.strict;
|
|
238
|
+
} else if (this.supportsStrictToolCalling !== undefined) {
|
|
239
|
+
strict = this.supportsStrictToolCalling;
|
|
240
|
+
}
|
|
241
|
+
return this.withConfig({
|
|
242
|
+
tools: tools.map((tool) =>
|
|
243
|
+
isBuiltInTool(tool)
|
|
244
|
+
? tool
|
|
245
|
+
: _convertChatOpenAIToolTypeToOpenAITool(tool, { strict })
|
|
246
|
+
),
|
|
247
|
+
...kwargs,
|
|
248
|
+
} as Partial<t.ChatOpenAICallOptions>);
|
|
249
|
+
}
|
|
94
250
|
protected _getClientOptions(
|
|
95
|
-
options?:
|
|
96
|
-
):
|
|
251
|
+
options?: OpenAICoreRequestOptions
|
|
252
|
+
): OpenAICoreRequestOptions {
|
|
97
253
|
if (!(this.client as OpenAIClient | undefined)) {
|
|
98
254
|
const openAIEndpointConfig: t.OpenAIEndpointConfig = {
|
|
99
255
|
baseURL: this.clientConfig.baseURL,
|
|
@@ -115,7 +271,7 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
|
|
|
115
271
|
const requestOptions = {
|
|
116
272
|
...this.clientConfig,
|
|
117
273
|
...options,
|
|
118
|
-
} as
|
|
274
|
+
} as OpenAICoreRequestOptions;
|
|
119
275
|
return requestOptions;
|
|
120
276
|
}
|
|
121
277
|
}
|
|
@@ -125,10 +281,10 @@ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
|
125
281
|
return this.client;
|
|
126
282
|
}
|
|
127
283
|
protected _getClientOptions(
|
|
128
|
-
options:
|
|
129
|
-
):
|
|
130
|
-
if (!(this.client as AzureOpenAIClient | undefined)) {
|
|
131
|
-
const openAIEndpointConfig:
|
|
284
|
+
options: OpenAICoreRequestOptions | undefined
|
|
285
|
+
): OpenAICoreRequestOptions {
|
|
286
|
+
if (!(this.client as unknown as AzureOpenAIClient | undefined)) {
|
|
287
|
+
const openAIEndpointConfig: OpenAIEndpointConfig = {
|
|
132
288
|
azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
|
|
133
289
|
azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
|
|
134
290
|
azureOpenAIApiKey: this.azureOpenAIApiKey,
|
|
@@ -154,25 +310,26 @@ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
|
154
310
|
delete params.baseURL;
|
|
155
311
|
}
|
|
156
312
|
|
|
313
|
+
const defaultHeaders = normalizeHeaders(params.defaultHeaders);
|
|
157
314
|
params.defaultHeaders = {
|
|
158
315
|
...params.defaultHeaders,
|
|
159
316
|
'User-Agent':
|
|
160
|
-
|
|
161
|
-
? `${
|
|
317
|
+
defaultHeaders['User-Agent'] != null
|
|
318
|
+
? `${defaultHeaders['User-Agent']}: langchainjs-azure-openai-v2`
|
|
162
319
|
: 'langchainjs-azure-openai-v2',
|
|
163
320
|
};
|
|
164
321
|
|
|
165
322
|
this.client = new CustomAzureOpenAIClient({
|
|
166
323
|
apiVersion: this.azureOpenAIApiVersion,
|
|
167
324
|
azureADTokenProvider: this.azureADTokenProvider,
|
|
168
|
-
...params,
|
|
169
|
-
});
|
|
325
|
+
...(params as t.AzureOpenAIInput),
|
|
326
|
+
}) as unknown as CustomOpenAIClient;
|
|
170
327
|
}
|
|
171
328
|
|
|
172
329
|
const requestOptions = {
|
|
173
330
|
...this.clientConfig,
|
|
174
331
|
...options,
|
|
175
|
-
} as
|
|
332
|
+
} as OpenAICoreRequestOptions;
|
|
176
333
|
if (this.azureOpenAIApiKey != null) {
|
|
177
334
|
requestOptions.headers = {
|
|
178
335
|
'api-key': this.azureOpenAIApiKey,
|
|
@@ -186,7 +343,6 @@ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
|
|
|
186
343
|
return requestOptions;
|
|
187
344
|
}
|
|
188
345
|
}
|
|
189
|
-
|
|
190
346
|
export class ChatDeepSeek extends OriginalChatDeepSeek {
|
|
191
347
|
public get exposedClient(): CustomOpenAIClient {
|
|
192
348
|
return this.client;
|
package/src/llm/providers.ts
CHANGED
|
@@ -4,13 +4,13 @@ import { ChatMistralAI } from '@langchain/mistralai';
|
|
|
4
4
|
import { ChatBedrockConverse } from '@langchain/aws';
|
|
5
5
|
// import { ChatAnthropic } from '@langchain/anthropic';
|
|
6
6
|
import { ChatVertexAI } from '@langchain/google-vertexai';
|
|
7
|
-
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
8
7
|
import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
|
|
9
8
|
import type {
|
|
10
9
|
ChatModelConstructorMap,
|
|
11
10
|
ProviderOptionsMap,
|
|
12
11
|
ChatModelMap,
|
|
13
12
|
} from '@/types';
|
|
13
|
+
import { CustomChatGoogleGenerativeAI } from '@/llm/google';
|
|
14
14
|
import { CustomAnthropic } from '@/llm/anthropic';
|
|
15
15
|
import { ChatOpenRouter } from '@/llm/openrouter';
|
|
16
16
|
import {
|
|
@@ -35,7 +35,7 @@ export const llmProviders: Partial<ChatModelConstructorMap> = {
|
|
|
35
35
|
[Providers.BEDROCK_LEGACY]: BedrockChat,
|
|
36
36
|
[Providers.BEDROCK]: ChatBedrockConverse,
|
|
37
37
|
// [Providers.ANTHROPIC]: ChatAnthropic,
|
|
38
|
-
[Providers.GOOGLE]:
|
|
38
|
+
[Providers.GOOGLE]: CustomChatGoogleGenerativeAI,
|
|
39
39
|
};
|
|
40
40
|
|
|
41
41
|
export const manualToolStreamProviders = new Set<Providers | string>([
|
package/src/scripts/simple.ts
CHANGED
|
@@ -99,7 +99,7 @@ async function testStandardStreaming(): Promise<void> {
|
|
|
99
99
|
const openAIConfig = llmConfig as t.OpenAIClientOptions;
|
|
100
100
|
if (openAIConfig.configuration) {
|
|
101
101
|
openAIConfig.configuration.fetch = (
|
|
102
|
-
url:
|
|
102
|
+
url: string | URL | Request,
|
|
103
103
|
init?: RequestInit
|
|
104
104
|
) => {
|
|
105
105
|
console.log('Fetching:', url);
|
package/src/types/llm.ts
CHANGED
|
@@ -4,7 +4,6 @@ import { ChatAnthropic } from '@langchain/anthropic';
|
|
|
4
4
|
import { ChatMistralAI } from '@langchain/mistralai';
|
|
5
5
|
import { ChatBedrockConverse } from '@langchain/aws';
|
|
6
6
|
import { ChatVertexAI } from '@langchain/google-vertexai';
|
|
7
|
-
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
8
7
|
import { BedrockChat } from '@langchain/community/chat_models/bedrock/web';
|
|
9
8
|
import type {
|
|
10
9
|
BindToolsInput,
|
|
@@ -23,6 +22,7 @@ import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
|
|
|
23
22
|
import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
|
|
24
23
|
import type { ChatBedrockConverseInput } from '@langchain/aws';
|
|
25
24
|
import type { ChatMistralAIInput } from '@langchain/mistralai';
|
|
25
|
+
import type { RequestOptions } from '@google/generative-ai';
|
|
26
26
|
import type { StructuredTool } from '@langchain/core/tools';
|
|
27
27
|
import type { AnthropicInput } from '@langchain/anthropic';
|
|
28
28
|
import type { Runnable } from '@langchain/core/runnables';
|
|
@@ -35,6 +35,7 @@ import {
|
|
|
35
35
|
ChatDeepSeek,
|
|
36
36
|
AzureChatOpenAI,
|
|
37
37
|
} from '@/llm/openai';
|
|
38
|
+
import { CustomChatGoogleGenerativeAI } from '@/llm/google';
|
|
38
39
|
import { ChatOpenRouter } from '@/llm/openrouter';
|
|
39
40
|
import { Providers } from '@/common';
|
|
40
41
|
|
|
@@ -67,7 +68,9 @@ export type BedrockAnthropicInput = ChatBedrockConverseInput & {
|
|
|
67
68
|
AnthropicReasoning;
|
|
68
69
|
};
|
|
69
70
|
export type BedrockConverseClientOptions = ChatBedrockConverseInput;
|
|
70
|
-
export type GoogleClientOptions = GoogleGenerativeAIChatInput
|
|
71
|
+
export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
|
|
72
|
+
customHeaders?: RequestOptions['customHeaders'];
|
|
73
|
+
};
|
|
71
74
|
export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
|
|
72
75
|
export type XAIClientOptions = ChatXAIInput;
|
|
73
76
|
|
|
@@ -117,7 +120,7 @@ export type ChatModelMap = {
|
|
|
117
120
|
[Providers.OPENROUTER]: ChatOpenRouter;
|
|
118
121
|
[Providers.BEDROCK_LEGACY]: BedrockChat;
|
|
119
122
|
[Providers.BEDROCK]: ChatBedrockConverse;
|
|
120
|
-
[Providers.GOOGLE]:
|
|
123
|
+
[Providers.GOOGLE]: CustomChatGoogleGenerativeAI;
|
|
121
124
|
};
|
|
122
125
|
|
|
123
126
|
export type ChatModelConstructorMap = {
|
package/src/utils/llmConfig.ts
CHANGED
|
@@ -100,7 +100,7 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
|
|
|
100
100
|
} as t.VertexAIClientOptions & t.LLMConfig,
|
|
101
101
|
[Providers.GOOGLE]: {
|
|
102
102
|
provider: Providers.GOOGLE,
|
|
103
|
-
model: 'gemini-2.5-
|
|
103
|
+
model: 'gemini-2.5-flash-preview-04-17',
|
|
104
104
|
streaming: true,
|
|
105
105
|
streamUsage: true,
|
|
106
106
|
},
|
|
@@ -108,6 +108,7 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
|
|
|
108
108
|
provider: Providers.BEDROCK,
|
|
109
109
|
// model: 'anthropic.claude-3-sonnet-20240229-v1:0',
|
|
110
110
|
// model: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
|
|
111
|
+
// model: 'us.amazon.nova-pro-v1:0',
|
|
111
112
|
model: 'us.anthropic.claude-sonnet-4-20250514-v1:0',
|
|
112
113
|
// additionalModelRequestFields: { thinking: { type: 'enabled', budget_tokens: 2000 } },
|
|
113
114
|
region: process.env.BEDROCK_AWS_REGION,
|