illuma-agents 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +25 -21
- package/dist/cjs/agents/AgentContext.cjs +222 -0
- package/dist/cjs/agents/AgentContext.cjs.map +1 -0
- package/dist/cjs/common/enum.cjs +5 -4
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +7 -5
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +328 -207
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +507 -0
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
- package/dist/cjs/llm/google/index.cjs.map +1 -1
- package/dist/cjs/llm/ollama/index.cjs.map +1 -1
- package/dist/cjs/llm/openai/index.cjs +35 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/openai/utils/index.cjs +3 -1
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
- package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
- package/dist/cjs/llm/providers.cjs +0 -2
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
- package/dist/cjs/main.cjs +12 -1
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/messages/cache.cjs +123 -0
- package/dist/cjs/messages/cache.cjs.map +1 -0
- package/dist/cjs/messages/content.cjs +53 -0
- package/dist/cjs/messages/content.cjs.map +1 -0
- package/dist/cjs/messages/format.cjs +17 -29
- package/dist/cjs/messages/format.cjs.map +1 -1
- package/dist/cjs/run.cjs +119 -74
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/stream.cjs +77 -73
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/Calculator.cjs +45 -0
- package/dist/cjs/tools/Calculator.cjs.map +1 -0
- package/dist/cjs/tools/CodeExecutor.cjs +22 -22
- package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +5 -3
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +20 -20
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/cjs/utils/events.cjs +31 -0
- package/dist/cjs/utils/events.cjs.map +1 -0
- package/dist/cjs/utils/handlers.cjs +70 -0
- package/dist/cjs/utils/handlers.cjs.map +1 -0
- package/dist/cjs/utils/tokens.cjs +54 -7
- package/dist/cjs/utils/tokens.cjs.map +1 -1
- package/dist/esm/agents/AgentContext.mjs +220 -0
- package/dist/esm/agents/AgentContext.mjs.map +1 -0
- package/dist/esm/common/enum.mjs +5 -4
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +7 -5
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +330 -209
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +505 -0
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
- package/dist/esm/llm/anthropic/index.mjs.map +1 -1
- package/dist/esm/llm/google/index.mjs.map +1 -1
- package/dist/esm/llm/ollama/index.mjs.map +1 -1
- package/dist/esm/llm/openai/index.mjs +35 -0
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/openai/utils/index.mjs +3 -1
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
- package/dist/esm/llm/openrouter/index.mjs.map +1 -1
- package/dist/esm/llm/providers.mjs +0 -2
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/llm/vertexai/index.mjs.map +1 -1
- package/dist/esm/main.mjs +7 -2
- package/dist/esm/main.mjs.map +1 -1
- package/dist/esm/messages/cache.mjs +120 -0
- package/dist/esm/messages/cache.mjs.map +1 -0
- package/dist/esm/messages/content.mjs +51 -0
- package/dist/esm/messages/content.mjs.map +1 -0
- package/dist/esm/messages/format.mjs +18 -29
- package/dist/esm/messages/format.mjs.map +1 -1
- package/dist/esm/run.mjs +119 -74
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/stream.mjs +77 -73
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/Calculator.mjs +24 -0
- package/dist/esm/tools/Calculator.mjs.map +1 -0
- package/dist/esm/tools/CodeExecutor.mjs +22 -22
- package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +5 -3
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +20 -20
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/esm/utils/events.mjs +29 -0
- package/dist/esm/utils/events.mjs.map +1 -0
- package/dist/esm/utils/handlers.mjs +68 -0
- package/dist/esm/utils/handlers.mjs.map +1 -0
- package/dist/esm/utils/tokens.mjs +54 -8
- package/dist/esm/utils/tokens.mjs.map +1 -1
- package/dist/types/agents/AgentContext.d.ts +94 -0
- package/dist/types/common/enum.d.ts +7 -5
- package/dist/types/events.d.ts +3 -3
- package/dist/types/graphs/Graph.d.ts +60 -66
- package/dist/types/graphs/MultiAgentGraph.d.ts +47 -0
- package/dist/types/graphs/index.d.ts +1 -0
- package/dist/types/index.d.ts +1 -0
- package/dist/types/llm/openai/index.d.ts +10 -0
- package/dist/types/messages/cache.d.ts +20 -0
- package/dist/types/messages/content.d.ts +7 -0
- package/dist/types/messages/format.d.ts +1 -7
- package/dist/types/messages/index.d.ts +2 -0
- package/dist/types/messages/reducer.d.ts +9 -0
- package/dist/types/run.d.ts +16 -10
- package/dist/types/stream.d.ts +4 -3
- package/dist/types/tools/Calculator.d.ts +8 -0
- package/dist/types/tools/ToolNode.d.ts +1 -1
- package/dist/types/tools/handlers.d.ts +9 -7
- package/dist/types/tools/search/tool.d.ts +4 -4
- package/dist/types/types/graph.d.ts +124 -11
- package/dist/types/types/llm.d.ts +13 -9
- package/dist/types/types/messages.d.ts +4 -0
- package/dist/types/types/run.d.ts +46 -8
- package/dist/types/types/stream.d.ts +3 -2
- package/dist/types/utils/events.d.ts +6 -0
- package/dist/types/utils/handlers.d.ts +34 -0
- package/dist/types/utils/index.d.ts +1 -0
- package/dist/types/utils/tokens.d.ts +24 -0
- package/package.json +162 -145
- package/src/agents/AgentContext.ts +323 -0
- package/src/common/enum.ts +177 -176
- package/src/events.ts +197 -191
- package/src/graphs/Graph.ts +1058 -846
- package/src/graphs/MultiAgentGraph.ts +598 -0
- package/src/graphs/index.ts +2 -1
- package/src/index.ts +25 -24
- package/src/llm/anthropic/index.ts +413 -413
- package/src/llm/google/index.ts +222 -222
- package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -88
- package/src/llm/ollama/index.ts +92 -92
- package/src/llm/openai/index.ts +894 -853
- package/src/llm/openai/utils/index.ts +920 -918
- package/src/llm/openrouter/index.ts +60 -60
- package/src/llm/providers.ts +55 -57
- package/src/llm/vertexai/index.ts +360 -360
- package/src/messages/cache.test.ts +461 -0
- package/src/messages/cache.ts +151 -0
- package/src/messages/content.test.ts +362 -0
- package/src/messages/content.ts +63 -0
- package/src/messages/format.ts +611 -625
- package/src/messages/formatAgentMessages.test.ts +1144 -917
- package/src/messages/index.ts +6 -4
- package/src/messages/reducer.ts +80 -0
- package/src/run.ts +447 -381
- package/src/scripts/abort.ts +157 -138
- package/src/scripts/ant_web_search.ts +158 -158
- package/src/scripts/cli.ts +172 -167
- package/src/scripts/cli2.ts +133 -125
- package/src/scripts/cli3.ts +184 -178
- package/src/scripts/cli4.ts +191 -184
- package/src/scripts/cli5.ts +191 -184
- package/src/scripts/code_exec.ts +213 -214
- package/src/scripts/code_exec_simple.ts +147 -129
- package/src/scripts/content.ts +138 -120
- package/src/scripts/handoff-test.ts +135 -0
- package/src/scripts/multi-agent-chain.ts +278 -0
- package/src/scripts/multi-agent-conditional.ts +220 -0
- package/src/scripts/multi-agent-document-review-chain.ts +197 -0
- package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
- package/src/scripts/multi-agent-parallel.ts +343 -0
- package/src/scripts/multi-agent-sequence.ts +212 -0
- package/src/scripts/multi-agent-supervisor.ts +364 -0
- package/src/scripts/multi-agent-test.ts +186 -0
- package/src/scripts/search.ts +146 -150
- package/src/scripts/simple.ts +225 -225
- package/src/scripts/stream.ts +140 -122
- package/src/scripts/test-custom-prompt-key.ts +145 -0
- package/src/scripts/test-handoff-input.ts +170 -0
- package/src/scripts/test-multi-agent-list-handoff.ts +261 -0
- package/src/scripts/test-tools-before-handoff.ts +222 -0
- package/src/scripts/tools.ts +153 -155
- package/src/specs/agent-handoffs.test.ts +889 -0
- package/src/specs/anthropic.simple.test.ts +320 -317
- package/src/specs/azure.simple.test.ts +325 -316
- package/src/specs/openai.simple.test.ts +311 -316
- package/src/specs/openrouter.simple.test.ts +107 -0
- package/src/specs/prune.test.ts +758 -763
- package/src/specs/reasoning.test.ts +201 -165
- package/src/specs/thinking-prune.test.ts +769 -703
- package/src/specs/token-memoization.test.ts +39 -0
- package/src/stream.ts +664 -651
- package/src/tools/Calculator.test.ts +278 -0
- package/src/tools/Calculator.ts +25 -0
- package/src/tools/CodeExecutor.ts +220 -220
- package/src/tools/ToolNode.ts +170 -170
- package/src/tools/handlers.ts +341 -336
- package/src/types/graph.ts +372 -185
- package/src/types/llm.ts +141 -140
- package/src/types/messages.ts +4 -0
- package/src/types/run.ts +128 -89
- package/src/types/stream.ts +401 -400
- package/src/utils/events.ts +32 -0
- package/src/utils/handlers.ts +107 -0
- package/src/utils/index.ts +6 -5
- package/src/utils/llmConfig.ts +183 -183
- package/src/utils/tokens.ts +129 -70
- package/dist/types/scripts/abort.d.ts +0 -1
- package/dist/types/scripts/ant_web_search.d.ts +0 -1
- package/dist/types/scripts/args.d.ts +0 -7
- package/dist/types/scripts/caching.d.ts +0 -1
- package/dist/types/scripts/cli.d.ts +0 -1
- package/dist/types/scripts/cli2.d.ts +0 -1
- package/dist/types/scripts/cli3.d.ts +0 -1
- package/dist/types/scripts/cli4.d.ts +0 -1
- package/dist/types/scripts/cli5.d.ts +0 -1
- package/dist/types/scripts/code_exec.d.ts +0 -1
- package/dist/types/scripts/code_exec_files.d.ts +0 -1
- package/dist/types/scripts/code_exec_simple.d.ts +0 -1
- package/dist/types/scripts/content.d.ts +0 -1
- package/dist/types/scripts/empty_input.d.ts +0 -1
- package/dist/types/scripts/image.d.ts +0 -1
- package/dist/types/scripts/memory.d.ts +0 -1
- package/dist/types/scripts/search.d.ts +0 -1
- package/dist/types/scripts/simple.d.ts +0 -1
- package/dist/types/scripts/stream.d.ts +0 -1
- package/dist/types/scripts/thinking.d.ts +0 -1
- package/dist/types/scripts/tools.d.ts +0 -1
- package/dist/types/specs/spec.utils.d.ts +0 -1
- package/dist/types/tools/example.d.ts +0 -78
- package/src/tools/example.ts +0 -129
package/src/llm/google/index.ts
CHANGED
|
@@ -1,222 +1,222 @@
|
|
|
1
|
-
/* eslint-disable @typescript-eslint/ban-ts-comment */
|
|
2
|
-
import { AIMessageChunk } from '@langchain/core/messages';
|
|
3
|
-
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
-
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
5
|
-
import { getEnvironmentVariable } from '@langchain/core/utils/env';
|
|
6
|
-
import { GoogleGenerativeAI as GenerativeAI } from '@google/generative-ai';
|
|
7
|
-
import type {
|
|
8
|
-
GenerateContentRequest,
|
|
9
|
-
SafetySetting,
|
|
10
|
-
} from '@google/generative-ai';
|
|
11
|
-
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
12
|
-
import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
13
|
-
import type { GeminiGenerationConfig } from '@langchain/google-common';
|
|
14
|
-
import type { GeminiApiUsageMetadata } from './types';
|
|
15
|
-
import type { GoogleClientOptions } from '@/types';
|
|
16
|
-
import {
|
|
17
|
-
convertResponseContentToChatGenerationChunk,
|
|
18
|
-
convertBaseMessagesToContent,
|
|
19
|
-
} from './utils/common';
|
|
20
|
-
|
|
21
|
-
export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
22
|
-
thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
|
|
23
|
-
constructor(fields: GoogleClientOptions) {
|
|
24
|
-
super(fields);
|
|
25
|
-
|
|
26
|
-
this.model = fields.model.replace(/^models\//, '');
|
|
27
|
-
|
|
28
|
-
this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
|
|
29
|
-
|
|
30
|
-
if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {
|
|
31
|
-
throw new Error('`maxOutputTokens` must be a positive integer');
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
this.temperature = fields.temperature ?? this.temperature;
|
|
35
|
-
if (
|
|
36
|
-
this.temperature != null &&
|
|
37
|
-
(this.temperature < 0 || this.temperature > 2)
|
|
38
|
-
) {
|
|
39
|
-
throw new Error('`temperature` must be in the range of [0.0,2.0]');
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
this.topP = fields.topP ?? this.topP;
|
|
43
|
-
if (this.topP != null && this.topP < 0) {
|
|
44
|
-
throw new Error('`topP` must be a positive integer');
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
if (this.topP != null && this.topP > 1) {
|
|
48
|
-
throw new Error('`topP` must be below 1.');
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
this.topK = fields.topK ?? this.topK;
|
|
52
|
-
if (this.topK != null && this.topK < 0) {
|
|
53
|
-
throw new Error('`topK` must be a positive integer');
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
this.stopSequences = fields.stopSequences ?? this.stopSequences;
|
|
57
|
-
|
|
58
|
-
this.apiKey = fields.apiKey ?? getEnvironmentVariable('GOOGLE_API_KEY');
|
|
59
|
-
if (this.apiKey == null || this.apiKey === '') {
|
|
60
|
-
throw new Error(
|
|
61
|
-
'Please set an API key for Google GenerativeAI ' +
|
|
62
|
-
'in the environment variable GOOGLE_API_KEY ' +
|
|
63
|
-
'or in the `apiKey` field of the ' +
|
|
64
|
-
'ChatGoogleGenerativeAI constructor'
|
|
65
|
-
);
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
this.safetySettings = fields.safetySettings ?? this.safetySettings;
|
|
69
|
-
if (this.safetySettings && this.safetySettings.length > 0) {
|
|
70
|
-
const safetySettingsSet = new Set(
|
|
71
|
-
this.safetySettings.map((s) => s.category)
|
|
72
|
-
);
|
|
73
|
-
if (safetySettingsSet.size !== this.safetySettings.length) {
|
|
74
|
-
throw new Error(
|
|
75
|
-
'The categories in `safetySettings` array must be unique'
|
|
76
|
-
);
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
|
|
81
|
-
|
|
82
|
-
this.streaming = fields.streaming ?? this.streaming;
|
|
83
|
-
this.json = fields.json;
|
|
84
|
-
|
|
85
|
-
// @ts-ignore - Accessing private property from parent class
|
|
86
|
-
this.client = new GenerativeAI(this.apiKey).getGenerativeModel(
|
|
87
|
-
{
|
|
88
|
-
model: this.model,
|
|
89
|
-
safetySettings: this.safetySettings as SafetySetting[],
|
|
90
|
-
generationConfig: {
|
|
91
|
-
stopSequences: this.stopSequences,
|
|
92
|
-
maxOutputTokens: this.maxOutputTokens,
|
|
93
|
-
temperature: this.temperature,
|
|
94
|
-
topP: this.topP,
|
|
95
|
-
topK: this.topK,
|
|
96
|
-
...(this.json != null
|
|
97
|
-
? { responseMimeType: 'application/json' }
|
|
98
|
-
: {}),
|
|
99
|
-
},
|
|
100
|
-
},
|
|
101
|
-
{
|
|
102
|
-
apiVersion: fields.apiVersion,
|
|
103
|
-
baseUrl: fields.baseUrl,
|
|
104
|
-
customHeaders: fields.customHeaders,
|
|
105
|
-
}
|
|
106
|
-
);
|
|
107
|
-
this.streamUsage = fields.streamUsage ?? this.streamUsage;
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
static lc_name(): 'IllumaGoogleGenerativeAI' {
|
|
111
|
-
return 'IllumaGoogleGenerativeAI';
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
invocationParams(
|
|
115
|
-
options?: this['ParsedCallOptions']
|
|
116
|
-
): Omit<GenerateContentRequest, 'contents'> {
|
|
117
|
-
const params = super.invocationParams(options);
|
|
118
|
-
if (this.thinkingConfig) {
|
|
119
|
-
/** @ts-ignore */
|
|
120
|
-
this.client.generationConfig = {
|
|
121
|
-
/** @ts-ignore */
|
|
122
|
-
...this.client.generationConfig,
|
|
123
|
-
/** @ts-ignore */
|
|
124
|
-
thinkingConfig: this.thinkingConfig,
|
|
125
|
-
};
|
|
126
|
-
}
|
|
127
|
-
return params;
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
async *_streamResponseChunks(
|
|
131
|
-
messages: BaseMessage[],
|
|
132
|
-
options: this['ParsedCallOptions'],
|
|
133
|
-
runManager?: CallbackManagerForLLMRun
|
|
134
|
-
): AsyncGenerator<ChatGenerationChunk> {
|
|
135
|
-
const prompt = convertBaseMessagesToContent(
|
|
136
|
-
messages,
|
|
137
|
-
this._isMultimodalModel,
|
|
138
|
-
this.useSystemInstruction
|
|
139
|
-
);
|
|
140
|
-
let actualPrompt = prompt;
|
|
141
|
-
if (prompt?.[0].role === 'system') {
|
|
142
|
-
const [systemInstruction] = prompt;
|
|
143
|
-
/** @ts-ignore */
|
|
144
|
-
this.client.systemInstruction = systemInstruction;
|
|
145
|
-
actualPrompt = prompt.slice(1);
|
|
146
|
-
}
|
|
147
|
-
const parameters = this.invocationParams(options);
|
|
148
|
-
const request = {
|
|
149
|
-
...parameters,
|
|
150
|
-
contents: actualPrompt,
|
|
151
|
-
};
|
|
152
|
-
const stream = await this.caller.callWithOptions(
|
|
153
|
-
{ signal: options.signal },
|
|
154
|
-
async () => {
|
|
155
|
-
/** @ts-ignore */
|
|
156
|
-
const { stream } = await this.client.generateContentStream(request);
|
|
157
|
-
return stream;
|
|
158
|
-
}
|
|
159
|
-
);
|
|
160
|
-
|
|
161
|
-
let index = 0;
|
|
162
|
-
let lastUsageMetadata: UsageMetadata | undefined;
|
|
163
|
-
for await (const response of stream) {
|
|
164
|
-
if (
|
|
165
|
-
'usageMetadata' in response &&
|
|
166
|
-
this.streamUsage !== false &&
|
|
167
|
-
options.streamUsage !== false
|
|
168
|
-
) {
|
|
169
|
-
const genAIUsageMetadata = response.usageMetadata as
|
|
170
|
-
| GeminiApiUsageMetadata
|
|
171
|
-
| undefined;
|
|
172
|
-
|
|
173
|
-
const output_tokens =
|
|
174
|
-
(genAIUsageMetadata?.candidatesTokenCount ?? 0) +
|
|
175
|
-
(genAIUsageMetadata?.thoughtsTokenCount ?? 0);
|
|
176
|
-
lastUsageMetadata = {
|
|
177
|
-
input_tokens: genAIUsageMetadata?.promptTokenCount ?? 0,
|
|
178
|
-
output_tokens,
|
|
179
|
-
total_tokens: genAIUsageMetadata?.totalTokenCount ?? 0,
|
|
180
|
-
};
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
const chunk = convertResponseContentToChatGenerationChunk(response, {
|
|
184
|
-
usageMetadata: undefined,
|
|
185
|
-
index,
|
|
186
|
-
});
|
|
187
|
-
index += 1;
|
|
188
|
-
if (!chunk) {
|
|
189
|
-
continue;
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
yield chunk;
|
|
193
|
-
await runManager?.handleLLMNewToken(
|
|
194
|
-
chunk.text || '',
|
|
195
|
-
undefined,
|
|
196
|
-
undefined,
|
|
197
|
-
undefined,
|
|
198
|
-
undefined,
|
|
199
|
-
{ chunk }
|
|
200
|
-
);
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
if (lastUsageMetadata) {
|
|
204
|
-
const finalChunk = new ChatGenerationChunk({
|
|
205
|
-
text: '',
|
|
206
|
-
message: new AIMessageChunk({
|
|
207
|
-
content: '',
|
|
208
|
-
usage_metadata: lastUsageMetadata,
|
|
209
|
-
}),
|
|
210
|
-
});
|
|
211
|
-
yield finalChunk;
|
|
212
|
-
await runManager?.handleLLMNewToken(
|
|
213
|
-
finalChunk.text || '',
|
|
214
|
-
undefined,
|
|
215
|
-
undefined,
|
|
216
|
-
undefined,
|
|
217
|
-
undefined,
|
|
218
|
-
{ chunk: finalChunk }
|
|
219
|
-
);
|
|
220
|
-
}
|
|
221
|
-
}
|
|
222
|
-
}
|
|
1
|
+
/* eslint-disable @typescript-eslint/ban-ts-comment */
|
|
2
|
+
import { AIMessageChunk } from '@langchain/core/messages';
|
|
3
|
+
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
+
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
5
|
+
import { getEnvironmentVariable } from '@langchain/core/utils/env';
|
|
6
|
+
import { GoogleGenerativeAI as GenerativeAI } from '@google/generative-ai';
|
|
7
|
+
import type {
|
|
8
|
+
GenerateContentRequest,
|
|
9
|
+
SafetySetting,
|
|
10
|
+
} from '@google/generative-ai';
|
|
11
|
+
import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
|
12
|
+
import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
|
13
|
+
import type { GeminiGenerationConfig } from '@langchain/google-common';
|
|
14
|
+
import type { GeminiApiUsageMetadata } from './types';
|
|
15
|
+
import type { GoogleClientOptions } from '@/types';
|
|
16
|
+
import {
|
|
17
|
+
convertResponseContentToChatGenerationChunk,
|
|
18
|
+
convertBaseMessagesToContent,
|
|
19
|
+
} from './utils/common';
|
|
20
|
+
|
|
21
|
+
export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
22
|
+
thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
|
|
23
|
+
constructor(fields: GoogleClientOptions) {
|
|
24
|
+
super(fields);
|
|
25
|
+
|
|
26
|
+
this.model = fields.model.replace(/^models\//, '');
|
|
27
|
+
|
|
28
|
+
this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
|
|
29
|
+
|
|
30
|
+
if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {
|
|
31
|
+
throw new Error('`maxOutputTokens` must be a positive integer');
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
this.temperature = fields.temperature ?? this.temperature;
|
|
35
|
+
if (
|
|
36
|
+
this.temperature != null &&
|
|
37
|
+
(this.temperature < 0 || this.temperature > 2)
|
|
38
|
+
) {
|
|
39
|
+
throw new Error('`temperature` must be in the range of [0.0,2.0]');
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
this.topP = fields.topP ?? this.topP;
|
|
43
|
+
if (this.topP != null && this.topP < 0) {
|
|
44
|
+
throw new Error('`topP` must be a positive integer');
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
if (this.topP != null && this.topP > 1) {
|
|
48
|
+
throw new Error('`topP` must be below 1.');
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
this.topK = fields.topK ?? this.topK;
|
|
52
|
+
if (this.topK != null && this.topK < 0) {
|
|
53
|
+
throw new Error('`topK` must be a positive integer');
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
this.stopSequences = fields.stopSequences ?? this.stopSequences;
|
|
57
|
+
|
|
58
|
+
this.apiKey = fields.apiKey ?? getEnvironmentVariable('GOOGLE_API_KEY');
|
|
59
|
+
if (this.apiKey == null || this.apiKey === '') {
|
|
60
|
+
throw new Error(
|
|
61
|
+
'Please set an API key for Google GenerativeAI ' +
|
|
62
|
+
'in the environment variable GOOGLE_API_KEY ' +
|
|
63
|
+
'or in the `apiKey` field of the ' +
|
|
64
|
+
'ChatGoogleGenerativeAI constructor'
|
|
65
|
+
);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
this.safetySettings = fields.safetySettings ?? this.safetySettings;
|
|
69
|
+
if (this.safetySettings && this.safetySettings.length > 0) {
|
|
70
|
+
const safetySettingsSet = new Set(
|
|
71
|
+
this.safetySettings.map((s) => s.category)
|
|
72
|
+
);
|
|
73
|
+
if (safetySettingsSet.size !== this.safetySettings.length) {
|
|
74
|
+
throw new Error(
|
|
75
|
+
'The categories in `safetySettings` array must be unique'
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
|
|
81
|
+
|
|
82
|
+
this.streaming = fields.streaming ?? this.streaming;
|
|
83
|
+
this.json = fields.json;
|
|
84
|
+
|
|
85
|
+
// @ts-ignore - Accessing private property from parent class
|
|
86
|
+
this.client = new GenerativeAI(this.apiKey).getGenerativeModel(
|
|
87
|
+
{
|
|
88
|
+
model: this.model,
|
|
89
|
+
safetySettings: this.safetySettings as SafetySetting[],
|
|
90
|
+
generationConfig: {
|
|
91
|
+
stopSequences: this.stopSequences,
|
|
92
|
+
maxOutputTokens: this.maxOutputTokens,
|
|
93
|
+
temperature: this.temperature,
|
|
94
|
+
topP: this.topP,
|
|
95
|
+
topK: this.topK,
|
|
96
|
+
...(this.json != null
|
|
97
|
+
? { responseMimeType: 'application/json' }
|
|
98
|
+
: {}),
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
apiVersion: fields.apiVersion,
|
|
103
|
+
baseUrl: fields.baseUrl,
|
|
104
|
+
customHeaders: fields.customHeaders,
|
|
105
|
+
}
|
|
106
|
+
);
|
|
107
|
+
this.streamUsage = fields.streamUsage ?? this.streamUsage;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
static lc_name(): 'IllumaGoogleGenerativeAI' {
|
|
111
|
+
return 'IllumaGoogleGenerativeAI';
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
invocationParams(
|
|
115
|
+
options?: this['ParsedCallOptions']
|
|
116
|
+
): Omit<GenerateContentRequest, 'contents'> {
|
|
117
|
+
const params = super.invocationParams(options);
|
|
118
|
+
if (this.thinkingConfig) {
|
|
119
|
+
/** @ts-ignore */
|
|
120
|
+
this.client.generationConfig = {
|
|
121
|
+
/** @ts-ignore */
|
|
122
|
+
...this.client.generationConfig,
|
|
123
|
+
/** @ts-ignore */
|
|
124
|
+
thinkingConfig: this.thinkingConfig,
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
return params;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
async *_streamResponseChunks(
|
|
131
|
+
messages: BaseMessage[],
|
|
132
|
+
options: this['ParsedCallOptions'],
|
|
133
|
+
runManager?: CallbackManagerForLLMRun
|
|
134
|
+
): AsyncGenerator<ChatGenerationChunk> {
|
|
135
|
+
const prompt = convertBaseMessagesToContent(
|
|
136
|
+
messages,
|
|
137
|
+
this._isMultimodalModel,
|
|
138
|
+
this.useSystemInstruction
|
|
139
|
+
);
|
|
140
|
+
let actualPrompt = prompt;
|
|
141
|
+
if (prompt?.[0].role === 'system') {
|
|
142
|
+
const [systemInstruction] = prompt;
|
|
143
|
+
/** @ts-ignore */
|
|
144
|
+
this.client.systemInstruction = systemInstruction;
|
|
145
|
+
actualPrompt = prompt.slice(1);
|
|
146
|
+
}
|
|
147
|
+
const parameters = this.invocationParams(options);
|
|
148
|
+
const request = {
|
|
149
|
+
...parameters,
|
|
150
|
+
contents: actualPrompt,
|
|
151
|
+
};
|
|
152
|
+
const stream = await this.caller.callWithOptions(
|
|
153
|
+
{ signal: options.signal },
|
|
154
|
+
async () => {
|
|
155
|
+
/** @ts-ignore */
|
|
156
|
+
const { stream } = await this.client.generateContentStream(request);
|
|
157
|
+
return stream;
|
|
158
|
+
}
|
|
159
|
+
);
|
|
160
|
+
|
|
161
|
+
let index = 0;
|
|
162
|
+
let lastUsageMetadata: UsageMetadata | undefined;
|
|
163
|
+
for await (const response of stream) {
|
|
164
|
+
if (
|
|
165
|
+
'usageMetadata' in response &&
|
|
166
|
+
this.streamUsage !== false &&
|
|
167
|
+
options.streamUsage !== false
|
|
168
|
+
) {
|
|
169
|
+
const genAIUsageMetadata = response.usageMetadata as
|
|
170
|
+
| GeminiApiUsageMetadata
|
|
171
|
+
| undefined;
|
|
172
|
+
|
|
173
|
+
const output_tokens =
|
|
174
|
+
(genAIUsageMetadata?.candidatesTokenCount ?? 0) +
|
|
175
|
+
(genAIUsageMetadata?.thoughtsTokenCount ?? 0);
|
|
176
|
+
lastUsageMetadata = {
|
|
177
|
+
input_tokens: genAIUsageMetadata?.promptTokenCount ?? 0,
|
|
178
|
+
output_tokens,
|
|
179
|
+
total_tokens: genAIUsageMetadata?.totalTokenCount ?? 0,
|
|
180
|
+
};
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
const chunk = convertResponseContentToChatGenerationChunk(response, {
|
|
184
|
+
usageMetadata: undefined,
|
|
185
|
+
index,
|
|
186
|
+
});
|
|
187
|
+
index += 1;
|
|
188
|
+
if (!chunk) {
|
|
189
|
+
continue;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
yield chunk;
|
|
193
|
+
await runManager?.handleLLMNewToken(
|
|
194
|
+
chunk.text || '',
|
|
195
|
+
undefined,
|
|
196
|
+
undefined,
|
|
197
|
+
undefined,
|
|
198
|
+
undefined,
|
|
199
|
+
{ chunk }
|
|
200
|
+
);
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (lastUsageMetadata) {
|
|
204
|
+
const finalChunk = new ChatGenerationChunk({
|
|
205
|
+
text: '',
|
|
206
|
+
message: new AIMessageChunk({
|
|
207
|
+
content: '',
|
|
208
|
+
usage_metadata: lastUsageMetadata,
|
|
209
|
+
}),
|
|
210
|
+
});
|
|
211
|
+
yield finalChunk;
|
|
212
|
+
await runManager?.handleLLMNewToken(
|
|
213
|
+
finalChunk.text || '',
|
|
214
|
+
undefined,
|
|
215
|
+
undefined,
|
|
216
|
+
undefined,
|
|
217
|
+
undefined,
|
|
218
|
+
{ chunk: finalChunk }
|
|
219
|
+
);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
@@ -1,88 +1,86 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
obj
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
if (
|
|
46
|
-
newObj[key] = newObj[key]
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
schema
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
schema
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
return rest as GenerativeAIFunctionDeclarationSchema;
|
|
88
|
-
}
|
|
1
|
+
import {
|
|
2
|
+
type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,
|
|
3
|
+
type SchemaType as FunctionDeclarationSchemaType,
|
|
4
|
+
} from '@google/generative-ai';
|
|
5
|
+
import {
|
|
6
|
+
InteropZodType,
|
|
7
|
+
isInteropZodSchema,
|
|
8
|
+
} from '@langchain/core/utils/types';
|
|
9
|
+
import {
|
|
10
|
+
type JsonSchema7Type,
|
|
11
|
+
toJsonSchema,
|
|
12
|
+
} from '@langchain/core/utils/json_schema';
|
|
13
|
+
|
|
14
|
+
export interface GenerativeAIJsonSchema extends Record<string, unknown> {
|
|
15
|
+
properties?: Record<string, GenerativeAIJsonSchema>;
|
|
16
|
+
type: FunctionDeclarationSchemaType;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface GenerativeAIJsonSchemaDirty extends GenerativeAIJsonSchema {
|
|
20
|
+
properties?: Record<string, GenerativeAIJsonSchemaDirty>;
|
|
21
|
+
additionalProperties?: boolean;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export function removeAdditionalProperties(
|
|
25
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
26
|
+
obj: Record<string, any>
|
|
27
|
+
): GenerativeAIJsonSchema {
|
|
28
|
+
if (typeof obj === 'object' && obj !== null) {
|
|
29
|
+
const newObj = { ...obj };
|
|
30
|
+
|
|
31
|
+
if ('additionalProperties' in newObj) {
|
|
32
|
+
delete newObj.additionalProperties;
|
|
33
|
+
}
|
|
34
|
+
if ('$schema' in newObj) {
|
|
35
|
+
delete newObj.$schema;
|
|
36
|
+
}
|
|
37
|
+
if ('strict' in newObj) {
|
|
38
|
+
delete newObj.strict;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
for (const key in newObj) {
|
|
42
|
+
if (key in newObj) {
|
|
43
|
+
if (Array.isArray(newObj[key])) {
|
|
44
|
+
newObj[key] = newObj[key].map(removeAdditionalProperties);
|
|
45
|
+
} else if (typeof newObj[key] === 'object' && newObj[key] !== null) {
|
|
46
|
+
newObj[key] = removeAdditionalProperties(newObj[key]);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
return newObj as GenerativeAIJsonSchema;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return obj as GenerativeAIJsonSchema;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
export function schemaToGenerativeAIParameters<
|
|
58
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
59
|
+
RunOutput extends Record<string, any> = Record<string, any>,
|
|
60
|
+
>(
|
|
61
|
+
schema: InteropZodType<RunOutput> | JsonSchema7Type
|
|
62
|
+
): GenerativeAIFunctionDeclarationSchema {
|
|
63
|
+
// GenerativeAI doesn't accept either the $schema or additionalProperties
|
|
64
|
+
// attributes, so we need to explicitly remove them.
|
|
65
|
+
const jsonSchema = removeAdditionalProperties(
|
|
66
|
+
isInteropZodSchema(schema) ? toJsonSchema(schema) : schema
|
|
67
|
+
);
|
|
68
|
+
const { $schema: _s, ...rest } = jsonSchema;
|
|
69
|
+
|
|
70
|
+
return rest as GenerativeAIFunctionDeclarationSchema;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
export function jsonSchemaToGeminiParameters(
|
|
74
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
75
|
+
schema: Record<string, any>
|
|
76
|
+
): GenerativeAIFunctionDeclarationSchema {
|
|
77
|
+
// Gemini doesn't accept either the $schema or additionalProperties
|
|
78
|
+
// attributes, so we need to explicitly remove them.
|
|
79
|
+
|
|
80
|
+
const jsonSchema = removeAdditionalProperties(
|
|
81
|
+
schema as GenerativeAIJsonSchemaDirty
|
|
82
|
+
);
|
|
83
|
+
const { $schema: _s, ...rest } = jsonSchema;
|
|
84
|
+
|
|
85
|
+
return rest as GenerativeAIFunctionDeclarationSchema;
|
|
86
|
+
}
|