@illuma-ai/agents 1.1.21 → 1.1.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/Graph.cjs +12 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
- package/dist/cjs/run.cjs +20 -9
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +12 -1
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
- package/dist/esm/run.mjs +20 -9
- package/dist/esm/run.mjs.map +1 -1
- package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
- package/package.json +1 -1
- package/src/graphs/Graph.ts +12 -1
- package/src/graphs/MultiAgentGraph.ts +105 -1
- package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
- package/src/run.ts +20 -11
- package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
- package/src/agents/AgentContext.js +0 -782
- package/src/agents/AgentContext.test.js +0 -421
- package/src/agents/__tests__/AgentContext.test.js +0 -678
- package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
- package/src/common/enum.js +0 -192
- package/src/common/index.js +0 -3
- package/src/events.js +0 -166
- package/src/graphs/Graph.js +0 -1857
- package/src/graphs/MultiAgentGraph.js +0 -1092
- package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
- package/src/graphs/__tests__/structured-output.test.js +0 -144
- package/src/graphs/contextManagement.e2e.test.js +0 -718
- package/src/graphs/contextManagement.test.js +0 -485
- package/src/graphs/handoffValidation.test.js +0 -276
- package/src/graphs/index.js +0 -3
- package/src/index.js +0 -28
- package/src/instrumentation.js +0 -21
- package/src/llm/anthropic/index.js +0 -319
- package/src/llm/anthropic/types.js +0 -46
- package/src/llm/anthropic/utils/message_inputs.js +0 -627
- package/src/llm/anthropic/utils/message_outputs.js +0 -290
- package/src/llm/anthropic/utils/output_parsers.js +0 -89
- package/src/llm/anthropic/utils/tools.js +0 -25
- package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
- package/src/llm/bedrock/index.js +0 -303
- package/src/llm/bedrock/types.js +0 -2
- package/src/llm/bedrock/utils/index.js +0 -6
- package/src/llm/bedrock/utils/message_inputs.js +0 -463
- package/src/llm/bedrock/utils/message_outputs.js +0 -269
- package/src/llm/fake.js +0 -92
- package/src/llm/google/index.js +0 -215
- package/src/llm/google/types.js +0 -12
- package/src/llm/google/utils/common.js +0 -670
- package/src/llm/google/utils/tools.js +0 -111
- package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
- package/src/llm/openai/index.js +0 -1033
- package/src/llm/openai/types.js +0 -2
- package/src/llm/openai/utils/index.js +0 -756
- package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
- package/src/llm/openrouter/index.js +0 -261
- package/src/llm/openrouter/reasoning.test.js +0 -181
- package/src/llm/providers.js +0 -36
- package/src/llm/text.js +0 -65
- package/src/llm/vertexai/index.js +0 -402
- package/src/messages/__tests__/tools.test.js +0 -392
- package/src/messages/cache.js +0 -404
- package/src/messages/cache.test.js +0 -1167
- package/src/messages/content.js +0 -48
- package/src/messages/content.test.js +0 -314
- package/src/messages/core.js +0 -359
- package/src/messages/ensureThinkingBlock.test.js +0 -997
- package/src/messages/format.js +0 -973
- package/src/messages/formatAgentMessages.test.js +0 -2278
- package/src/messages/formatAgentMessages.tools.test.js +0 -362
- package/src/messages/formatMessage.test.js +0 -608
- package/src/messages/ids.js +0 -18
- package/src/messages/index.js +0 -9
- package/src/messages/labelContentByAgent.test.js +0 -725
- package/src/messages/prune.js +0 -438
- package/src/messages/reducer.js +0 -60
- package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
- package/src/messages/summarize.js +0 -146
- package/src/messages/summarize.test.js +0 -332
- package/src/messages/tools.js +0 -90
- package/src/mockStream.js +0 -81
- package/src/prompts/collab.js +0 -7
- package/src/prompts/index.js +0 -3
- package/src/prompts/taskmanager.js +0 -58
- package/src/run.js +0 -427
- package/src/schemas/index.js +0 -3
- package/src/schemas/schema-preparation.test.js +0 -370
- package/src/schemas/validate.js +0 -314
- package/src/schemas/validate.test.js +0 -264
- package/src/scripts/abort.js +0 -127
- package/src/scripts/ant_web_search.js +0 -130
- package/src/scripts/ant_web_search_edge_case.js +0 -133
- package/src/scripts/ant_web_search_error_edge_case.js +0 -119
- package/src/scripts/args.js +0 -41
- package/src/scripts/bedrock-cache-debug.js +0 -186
- package/src/scripts/bedrock-content-aggregation-test.js +0 -195
- package/src/scripts/bedrock-merge-test.js +0 -80
- package/src/scripts/bedrock-parallel-tools-test.js +0 -150
- package/src/scripts/caching.js +0 -106
- package/src/scripts/cli.js +0 -152
- package/src/scripts/cli2.js +0 -119
- package/src/scripts/cli3.js +0 -163
- package/src/scripts/cli4.js +0 -165
- package/src/scripts/cli5.js +0 -165
- package/src/scripts/code_exec.js +0 -171
- package/src/scripts/code_exec_files.js +0 -180
- package/src/scripts/code_exec_multi_session.js +0 -185
- package/src/scripts/code_exec_ptc.js +0 -265
- package/src/scripts/code_exec_session.js +0 -217
- package/src/scripts/code_exec_simple.js +0 -120
- package/src/scripts/content.js +0 -111
- package/src/scripts/empty_input.js +0 -125
- package/src/scripts/handoff-test.js +0 -96
- package/src/scripts/image.js +0 -138
- package/src/scripts/memory.js +0 -83
- package/src/scripts/multi-agent-chain.js +0 -271
- package/src/scripts/multi-agent-conditional.js +0 -185
- package/src/scripts/multi-agent-document-review-chain.js +0 -171
- package/src/scripts/multi-agent-hybrid-flow.js +0 -264
- package/src/scripts/multi-agent-parallel-start.js +0 -214
- package/src/scripts/multi-agent-parallel.js +0 -346
- package/src/scripts/multi-agent-sequence.js +0 -184
- package/src/scripts/multi-agent-supervisor.js +0 -324
- package/src/scripts/multi-agent-test.js +0 -147
- package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
- package/src/scripts/parallel-full-metadata-test.js +0 -176
- package/src/scripts/parallel-tools-test.js +0 -256
- package/src/scripts/programmatic_exec.js +0 -277
- package/src/scripts/programmatic_exec_agent.js +0 -168
- package/src/scripts/search.js +0 -118
- package/src/scripts/sequential-full-metadata-test.js +0 -143
- package/src/scripts/simple.js +0 -174
- package/src/scripts/single-agent-metadata-test.js +0 -152
- package/src/scripts/stream.js +0 -113
- package/src/scripts/test-custom-prompt-key.js +0 -132
- package/src/scripts/test-handoff-input.js +0 -143
- package/src/scripts/test-handoff-preamble.js +0 -227
- package/src/scripts/test-handoff-steering.js +0 -353
- package/src/scripts/test-multi-agent-list-handoff.js +0 -318
- package/src/scripts/test-parallel-agent-labeling.js +0 -253
- package/src/scripts/test-parallel-handoffs.js +0 -229
- package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
- package/src/scripts/test-thinking-handoff.js +0 -132
- package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
- package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
- package/src/scripts/test-tools-before-handoff.js +0 -187
- package/src/scripts/test_code_api.js +0 -263
- package/src/scripts/thinking-bedrock.js +0 -128
- package/src/scripts/thinking-vertexai.js +0 -130
- package/src/scripts/thinking.js +0 -134
- package/src/scripts/tool_search.js +0 -114
- package/src/scripts/tools.js +0 -125
- package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
- package/src/specs/agent-handoffs.test.js +0 -924
- package/src/specs/anthropic.simple.test.js +0 -287
- package/src/specs/azure.simple.test.js +0 -381
- package/src/specs/cache.simple.test.js +0 -282
- package/src/specs/custom-event-await.test.js +0 -148
- package/src/specs/deepseek.simple.test.js +0 -189
- package/src/specs/emergency-prune.test.js +0 -308
- package/src/specs/moonshot.simple.test.js +0 -237
- package/src/specs/observability.integration.test.js +0 -1337
- package/src/specs/openai.simple.test.js +0 -233
- package/src/specs/openrouter.simple.test.js +0 -202
- package/src/specs/prune.test.js +0 -733
- package/src/specs/reasoning.test.js +0 -144
- package/src/specs/spec.utils.js +0 -4
- package/src/specs/thinking-handoff.test.js +0 -486
- package/src/specs/thinking-prune.test.js +0 -600
- package/src/specs/token-distribution-edge-case.test.js +0 -246
- package/src/specs/token-memoization.test.js +0 -32
- package/src/specs/tokens.test.js +0 -49
- package/src/specs/tool-error.test.js +0 -139
- package/src/splitStream.js +0 -204
- package/src/splitStream.test.js +0 -504
- package/src/stream.js +0 -650
- package/src/stream.test.js +0 -225
- package/src/test/mockTools.js +0 -340
- package/src/tools/BrowserTools.js +0 -245
- package/src/tools/Calculator.js +0 -38
- package/src/tools/Calculator.test.js +0 -225
- package/src/tools/CodeExecutor.js +0 -233
- package/src/tools/ProgrammaticToolCalling.js +0 -602
- package/src/tools/StreamingToolCallBuffer.js +0 -179
- package/src/tools/ToolNode.js +0 -930
- package/src/tools/ToolSearch.js +0 -904
- package/src/tools/__tests__/BrowserTools.test.js +0 -306
- package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
- package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
- package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
- package/src/tools/__tests__/ToolApproval.test.js +0 -675
- package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
- package/src/tools/__tests__/ToolNode.session.test.js +0 -319
- package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
- package/src/tools/__tests__/ToolSearch.test.js +0 -812
- package/src/tools/__tests__/handlers.test.js +0 -799
- package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
- package/src/tools/handlers.js +0 -306
- package/src/tools/schema.js +0 -25
- package/src/tools/search/anthropic.js +0 -34
- package/src/tools/search/content.js +0 -116
- package/src/tools/search/content.test.js +0 -133
- package/src/tools/search/firecrawl.js +0 -173
- package/src/tools/search/format.js +0 -198
- package/src/tools/search/highlights.js +0 -241
- package/src/tools/search/index.js +0 -3
- package/src/tools/search/jina-reranker.test.js +0 -106
- package/src/tools/search/rerankers.js +0 -165
- package/src/tools/search/schema.js +0 -102
- package/src/tools/search/search.js +0 -561
- package/src/tools/search/serper-scraper.js +0 -126
- package/src/tools/search/test.js +0 -129
- package/src/tools/search/tool.js +0 -453
- package/src/tools/search/types.js +0 -2
- package/src/tools/search/utils.js +0 -59
- package/src/types/graph.js +0 -24
- package/src/types/graph.test.js +0 -192
- package/src/types/index.js +0 -7
- package/src/types/llm.js +0 -2
- package/src/types/messages.js +0 -2
- package/src/types/run.js +0 -2
- package/src/types/stream.js +0 -2
- package/src/types/tools.js +0 -2
- package/src/utils/contextAnalytics.js +0 -79
- package/src/utils/contextAnalytics.test.js +0 -166
- package/src/utils/events.js +0 -26
- package/src/utils/graph.js +0 -11
- package/src/utils/handlers.js +0 -65
- package/src/utils/index.js +0 -10
- package/src/utils/llm.js +0 -21
- package/src/utils/llmConfig.js +0 -205
- package/src/utils/logging.js +0 -37
- package/src/utils/misc.js +0 -51
- package/src/utils/run.js +0 -69
- package/src/utils/schema.js +0 -21
- package/src/utils/title.js +0 -119
- package/src/utils/tokens.js +0 -92
- package/src/utils/toonFormat.js +0 -379
|
@@ -1,269 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Utility functions for converting Bedrock Converse responses to LangChain messages.
|
|
3
|
-
* Ported from @langchain/aws common.js
|
|
4
|
-
*/
|
|
5
|
-
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
6
|
-
import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
|
|
7
|
-
/**
|
|
8
|
-
* Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.
|
|
9
|
-
*/
|
|
10
|
-
export function bedrockReasoningDeltaToLangchainPartialReasoningBlock(reasoningContent) {
|
|
11
|
-
const { text, redactedContent, signature } = reasoningContent;
|
|
12
|
-
if (typeof text === 'string') {
|
|
13
|
-
return {
|
|
14
|
-
type: 'reasoning_content',
|
|
15
|
-
reasoningText: { text },
|
|
16
|
-
};
|
|
17
|
-
}
|
|
18
|
-
if (signature != null) {
|
|
19
|
-
return {
|
|
20
|
-
type: 'reasoning_content',
|
|
21
|
-
reasoningText: { signature },
|
|
22
|
-
};
|
|
23
|
-
}
|
|
24
|
-
if (redactedContent != null) {
|
|
25
|
-
return {
|
|
26
|
-
type: 'reasoning_content',
|
|
27
|
-
redactedContent: Buffer.from(redactedContent).toString('base64'),
|
|
28
|
-
};
|
|
29
|
-
}
|
|
30
|
-
throw new Error('Invalid reasoning content');
|
|
31
|
-
}
|
|
32
|
-
/**
|
|
33
|
-
* Convert a Bedrock reasoning block to a LangChain reasoning block.
|
|
34
|
-
*/
|
|
35
|
-
export function bedrockReasoningBlockToLangchainReasoningBlock(reasoningContent) {
|
|
36
|
-
const { reasoningText, redactedContent } = reasoningContent;
|
|
37
|
-
if (reasoningText != null) {
|
|
38
|
-
return {
|
|
39
|
-
type: 'reasoning_content',
|
|
40
|
-
reasoningText: reasoningText,
|
|
41
|
-
};
|
|
42
|
-
}
|
|
43
|
-
if (redactedContent != null) {
|
|
44
|
-
return {
|
|
45
|
-
type: 'reasoning_content',
|
|
46
|
-
redactedContent: Buffer.from(redactedContent).toString('base64'),
|
|
47
|
-
};
|
|
48
|
-
}
|
|
49
|
-
throw new Error('Invalid reasoning content');
|
|
50
|
-
}
|
|
51
|
-
/**
|
|
52
|
-
* Convert a Bedrock Converse message to a LangChain message.
|
|
53
|
-
*/
|
|
54
|
-
export function convertConverseMessageToLangChainMessage(message, responseMetadata) {
|
|
55
|
-
if (message.content == null) {
|
|
56
|
-
throw new Error('No message content found in response.');
|
|
57
|
-
}
|
|
58
|
-
if (message.role !== 'assistant') {
|
|
59
|
-
throw new Error(`Unsupported message role received in ChatBedrockConverse response: ${message.role}`);
|
|
60
|
-
}
|
|
61
|
-
let requestId;
|
|
62
|
-
if ('$metadata' in responseMetadata &&
|
|
63
|
-
responseMetadata.$metadata != null &&
|
|
64
|
-
typeof responseMetadata.$metadata === 'object' &&
|
|
65
|
-
'requestId' in responseMetadata.$metadata) {
|
|
66
|
-
requestId = responseMetadata.$metadata.requestId;
|
|
67
|
-
}
|
|
68
|
-
let tokenUsage;
|
|
69
|
-
if (responseMetadata.usage != null) {
|
|
70
|
-
const usage = responseMetadata.usage;
|
|
71
|
-
const input_tokens = usage.inputTokens ?? 0;
|
|
72
|
-
const output_tokens = usage.outputTokens ?? 0;
|
|
73
|
-
const cacheRead = usage.cacheReadInputTokens;
|
|
74
|
-
const cacheWrite = usage.cacheWriteInputTokens;
|
|
75
|
-
tokenUsage = {
|
|
76
|
-
input_tokens,
|
|
77
|
-
output_tokens,
|
|
78
|
-
total_tokens: usage.totalTokens ?? input_tokens + output_tokens,
|
|
79
|
-
};
|
|
80
|
-
if (cacheRead != null || cacheWrite != null) {
|
|
81
|
-
tokenUsage.input_token_details = {
|
|
82
|
-
cache_read: cacheRead ?? 0,
|
|
83
|
-
cache_creation: cacheWrite ?? 0,
|
|
84
|
-
};
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
if (message.content.length === 1 &&
|
|
88
|
-
'text' in message.content[0] &&
|
|
89
|
-
typeof message.content[0].text === 'string') {
|
|
90
|
-
return new AIMessage({
|
|
91
|
-
content: message.content[0].text,
|
|
92
|
-
response_metadata: responseMetadata,
|
|
93
|
-
usage_metadata: tokenUsage,
|
|
94
|
-
id: requestId,
|
|
95
|
-
});
|
|
96
|
-
}
|
|
97
|
-
else {
|
|
98
|
-
const toolCalls = [];
|
|
99
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
100
|
-
const content = [];
|
|
101
|
-
message.content.forEach((c) => {
|
|
102
|
-
if ('toolUse' in c &&
|
|
103
|
-
c.toolUse != null &&
|
|
104
|
-
c.toolUse.name != null &&
|
|
105
|
-
c.toolUse.name !== '' &&
|
|
106
|
-
c.toolUse.input != null &&
|
|
107
|
-
typeof c.toolUse.input === 'object') {
|
|
108
|
-
toolCalls.push({
|
|
109
|
-
id: c.toolUse.toolUseId,
|
|
110
|
-
name: c.toolUse.name,
|
|
111
|
-
args: c.toolUse.input,
|
|
112
|
-
type: 'tool_call',
|
|
113
|
-
});
|
|
114
|
-
}
|
|
115
|
-
else if ('text' in c && typeof c.text === 'string') {
|
|
116
|
-
content.push({ type: 'text', text: c.text });
|
|
117
|
-
}
|
|
118
|
-
else if ('reasoningContent' in c && c.reasoningContent != null) {
|
|
119
|
-
content.push(bedrockReasoningBlockToLangchainReasoningBlock(c.reasoningContent));
|
|
120
|
-
}
|
|
121
|
-
else {
|
|
122
|
-
content.push(c);
|
|
123
|
-
}
|
|
124
|
-
});
|
|
125
|
-
return new AIMessage({
|
|
126
|
-
content: content.length ? content : '',
|
|
127
|
-
tool_calls: toolCalls.length ? toolCalls : undefined,
|
|
128
|
-
response_metadata: responseMetadata,
|
|
129
|
-
usage_metadata: tokenUsage,
|
|
130
|
-
id: requestId,
|
|
131
|
-
});
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
/**
|
|
135
|
-
* Handle a content block delta event from Bedrock Converse stream.
|
|
136
|
-
*/
|
|
137
|
-
export function handleConverseStreamContentBlockDelta(contentBlockDelta) {
|
|
138
|
-
if (contentBlockDelta.delta == null) {
|
|
139
|
-
throw new Error('No delta found in content block.');
|
|
140
|
-
}
|
|
141
|
-
if (typeof contentBlockDelta.delta.text === 'string') {
|
|
142
|
-
return new ChatGenerationChunk({
|
|
143
|
-
text: contentBlockDelta.delta.text,
|
|
144
|
-
message: new AIMessageChunk({
|
|
145
|
-
content: contentBlockDelta.delta.text,
|
|
146
|
-
response_metadata: {
|
|
147
|
-
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
148
|
-
},
|
|
149
|
-
}),
|
|
150
|
-
});
|
|
151
|
-
}
|
|
152
|
-
else if (contentBlockDelta.delta.toolUse != null) {
|
|
153
|
-
const index = contentBlockDelta.contentBlockIndex;
|
|
154
|
-
return new ChatGenerationChunk({
|
|
155
|
-
text: '',
|
|
156
|
-
message: new AIMessageChunk({
|
|
157
|
-
content: '',
|
|
158
|
-
tool_call_chunks: [
|
|
159
|
-
{
|
|
160
|
-
args: contentBlockDelta.delta.toolUse.input,
|
|
161
|
-
index,
|
|
162
|
-
type: 'tool_call_chunk',
|
|
163
|
-
},
|
|
164
|
-
],
|
|
165
|
-
response_metadata: {
|
|
166
|
-
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
167
|
-
},
|
|
168
|
-
}),
|
|
169
|
-
});
|
|
170
|
-
}
|
|
171
|
-
else if (contentBlockDelta.delta.reasoningContent != null) {
|
|
172
|
-
const reasoningBlock = bedrockReasoningDeltaToLangchainPartialReasoningBlock(contentBlockDelta.delta.reasoningContent);
|
|
173
|
-
let reasoningText = '';
|
|
174
|
-
if ('reasoningText' in reasoningBlock) {
|
|
175
|
-
reasoningText = reasoningBlock.reasoningText.text ?? '';
|
|
176
|
-
}
|
|
177
|
-
else if ('redactedContent' in reasoningBlock) {
|
|
178
|
-
reasoningText = reasoningBlock.redactedContent;
|
|
179
|
-
}
|
|
180
|
-
return new ChatGenerationChunk({
|
|
181
|
-
text: '',
|
|
182
|
-
message: new AIMessageChunk({
|
|
183
|
-
content: [reasoningBlock],
|
|
184
|
-
additional_kwargs: {
|
|
185
|
-
// Set reasoning_content for stream handler to detect reasoning mode
|
|
186
|
-
reasoning_content: reasoningText,
|
|
187
|
-
},
|
|
188
|
-
response_metadata: {
|
|
189
|
-
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
190
|
-
},
|
|
191
|
-
}),
|
|
192
|
-
});
|
|
193
|
-
}
|
|
194
|
-
else {
|
|
195
|
-
// Silent healing: unknown content block types are logged and skipped
|
|
196
|
-
console.warn(`[Bedrock:SilentHeal] Unknown content block delta type — skipping: ${JSON.stringify(contentBlockDelta.delta, null, 2)}`);
|
|
197
|
-
return new ChatGenerationChunk({
|
|
198
|
-
text: '',
|
|
199
|
-
message: new AIMessageChunk({
|
|
200
|
-
content: '',
|
|
201
|
-
response_metadata: {
|
|
202
|
-
contentBlockIndex: contentBlockDelta.contentBlockIndex,
|
|
203
|
-
},
|
|
204
|
-
}),
|
|
205
|
-
});
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
/**
|
|
209
|
-
* Handle a content block start event from Bedrock Converse stream.
|
|
210
|
-
*/
|
|
211
|
-
export function handleConverseStreamContentBlockStart(contentBlockStart) {
|
|
212
|
-
const index = contentBlockStart.contentBlockIndex;
|
|
213
|
-
if (contentBlockStart.start?.toolUse != null) {
|
|
214
|
-
return new ChatGenerationChunk({
|
|
215
|
-
text: '',
|
|
216
|
-
message: new AIMessageChunk({
|
|
217
|
-
content: '',
|
|
218
|
-
tool_call_chunks: [
|
|
219
|
-
{
|
|
220
|
-
name: contentBlockStart.start.toolUse.name,
|
|
221
|
-
id: contentBlockStart.start.toolUse.toolUseId,
|
|
222
|
-
index,
|
|
223
|
-
type: 'tool_call_chunk',
|
|
224
|
-
},
|
|
225
|
-
],
|
|
226
|
-
response_metadata: {
|
|
227
|
-
contentBlockIndex: index,
|
|
228
|
-
},
|
|
229
|
-
}),
|
|
230
|
-
});
|
|
231
|
-
}
|
|
232
|
-
// Return null for non-tool content block starts (text blocks don't need special handling)
|
|
233
|
-
return null;
|
|
234
|
-
}
|
|
235
|
-
/**
|
|
236
|
-
* Handle a metadata event from Bedrock Converse stream.
|
|
237
|
-
*/
|
|
238
|
-
export function handleConverseStreamMetadata(metadata, extra) {
|
|
239
|
-
const usage = metadata.usage;
|
|
240
|
-
const inputTokens = usage?.inputTokens ?? 0;
|
|
241
|
-
const outputTokens = usage?.outputTokens ?? 0;
|
|
242
|
-
const cacheRead = usage?.cacheReadInputTokens;
|
|
243
|
-
const cacheWrite = usage?.cacheWriteInputTokens;
|
|
244
|
-
const usage_metadata = {
|
|
245
|
-
input_tokens: inputTokens,
|
|
246
|
-
output_tokens: outputTokens,
|
|
247
|
-
total_tokens: usage?.totalTokens ?? inputTokens + outputTokens,
|
|
248
|
-
};
|
|
249
|
-
if (cacheRead != null || cacheWrite != null) {
|
|
250
|
-
usage_metadata.input_token_details = {
|
|
251
|
-
cache_read: cacheRead ?? 0,
|
|
252
|
-
cache_creation: cacheWrite ?? 0,
|
|
253
|
-
};
|
|
254
|
-
}
|
|
255
|
-
return new ChatGenerationChunk({
|
|
256
|
-
text: '',
|
|
257
|
-
message: new AIMessageChunk({
|
|
258
|
-
content: '',
|
|
259
|
-
usage_metadata: extra.streamUsage
|
|
260
|
-
? usage_metadata
|
|
261
|
-
: undefined,
|
|
262
|
-
response_metadata: {
|
|
263
|
-
// Use the same key as returned from the Converse API
|
|
264
|
-
metadata,
|
|
265
|
-
},
|
|
266
|
-
}),
|
|
267
|
-
});
|
|
268
|
-
}
|
|
269
|
-
//# sourceMappingURL=message_outputs.js.map
|
package/src/llm/fake.js
DELETED
|
@@ -1,92 +0,0 @@
|
|
|
1
|
-
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
2
|
-
import { AIMessageChunk } from '@langchain/core/messages';
|
|
3
|
-
import { FakeListChatModel } from '@langchain/core/utils/testing';
|
|
4
|
-
export class FakeChatModel extends FakeListChatModel {
|
|
5
|
-
splitStrategy;
|
|
6
|
-
toolCalls = [];
|
|
7
|
-
addedToolCalls = false;
|
|
8
|
-
constructor({ responses, sleep, emitCustomEvent, splitStrategy = { type: 'regex', value: /(?<=\s+)|(?=\s+)/ }, toolCalls = [] }) {
|
|
9
|
-
super({ responses, sleep, emitCustomEvent });
|
|
10
|
-
this.splitStrategy = splitStrategy;
|
|
11
|
-
this.toolCalls = toolCalls;
|
|
12
|
-
}
|
|
13
|
-
splitText(text) {
|
|
14
|
-
if (this.splitStrategy.type === 'regex') {
|
|
15
|
-
return text.split(this.splitStrategy.value);
|
|
16
|
-
}
|
|
17
|
-
else {
|
|
18
|
-
const chunkSize = this.splitStrategy.value;
|
|
19
|
-
const chunks = [];
|
|
20
|
-
for (let i = 0; i < text.length; i += chunkSize) {
|
|
21
|
-
chunks.push(text.slice(i, i + chunkSize));
|
|
22
|
-
}
|
|
23
|
-
return chunks;
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
_createResponseChunk(text, tool_call_chunks) {
|
|
27
|
-
return new ChatGenerationChunk({
|
|
28
|
-
text,
|
|
29
|
-
generationInfo: {},
|
|
30
|
-
message: new AIMessageChunk({
|
|
31
|
-
content: text,
|
|
32
|
-
tool_call_chunks,
|
|
33
|
-
additional_kwargs: tool_call_chunks ? {
|
|
34
|
-
tool_calls: tool_call_chunks.map((toolCall) => ({
|
|
35
|
-
index: toolCall.index ?? 0,
|
|
36
|
-
id: toolCall.id ?? '',
|
|
37
|
-
type: 'function',
|
|
38
|
-
function: {
|
|
39
|
-
name: toolCall.name ?? '',
|
|
40
|
-
arguments: toolCall.args ?? '',
|
|
41
|
-
},
|
|
42
|
-
})),
|
|
43
|
-
} : undefined,
|
|
44
|
-
})
|
|
45
|
-
});
|
|
46
|
-
}
|
|
47
|
-
async *_streamResponseChunks(_messages, options, runManager) {
|
|
48
|
-
const response = this._currentResponse();
|
|
49
|
-
this._incrementResponse();
|
|
50
|
-
if (this.emitCustomEvent) {
|
|
51
|
-
await runManager?.handleCustomEvent('some_test_event', {
|
|
52
|
-
someval: true,
|
|
53
|
-
});
|
|
54
|
-
}
|
|
55
|
-
const chunks = this.splitText(response);
|
|
56
|
-
for await (const chunk of chunks) {
|
|
57
|
-
await this._sleepIfRequested();
|
|
58
|
-
if (options.thrownErrorString != null && options.thrownErrorString) {
|
|
59
|
-
throw new Error(options.thrownErrorString);
|
|
60
|
-
}
|
|
61
|
-
const responseChunk = super._createResponseChunk(chunk);
|
|
62
|
-
yield responseChunk;
|
|
63
|
-
void runManager?.handleLLMNewToken(chunk);
|
|
64
|
-
}
|
|
65
|
-
await this._sleepIfRequested();
|
|
66
|
-
if (this.toolCalls.length > 0 && !this.addedToolCalls) {
|
|
67
|
-
this.addedToolCalls = true;
|
|
68
|
-
const toolCallChunks = this.toolCalls.map((toolCall) => {
|
|
69
|
-
;
|
|
70
|
-
return {
|
|
71
|
-
name: toolCall.name,
|
|
72
|
-
args: JSON.stringify(toolCall.args),
|
|
73
|
-
id: toolCall.id,
|
|
74
|
-
type: 'tool_call_chunk',
|
|
75
|
-
};
|
|
76
|
-
});
|
|
77
|
-
const responseChunk = this._createResponseChunk('', toolCallChunks);
|
|
78
|
-
yield responseChunk;
|
|
79
|
-
void runManager?.handleLLMNewToken('');
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
export function createFakeStreamingLLM({ responses, sleep, splitStrategy, toolCalls, }) {
|
|
84
|
-
return new FakeChatModel({
|
|
85
|
-
sleep,
|
|
86
|
-
responses,
|
|
87
|
-
emitCustomEvent: true,
|
|
88
|
-
splitStrategy,
|
|
89
|
-
toolCalls,
|
|
90
|
-
});
|
|
91
|
-
}
|
|
92
|
-
//# sourceMappingURL=fake.js.map
|
package/src/llm/google/index.js
DELETED
|
@@ -1,215 +0,0 @@
|
|
|
1
|
-
/* eslint-disable @typescript-eslint/ban-ts-comment */
|
|
2
|
-
import { AIMessageChunk } from '@langchain/core/messages';
|
|
3
|
-
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
4
|
-
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
5
|
-
import { getEnvironmentVariable } from '@langchain/core/utils/env';
|
|
6
|
-
import { GoogleGenerativeAI as GenerativeAI } from '@google/generative-ai';
|
|
7
|
-
import { convertResponseContentToChatGenerationChunk, convertBaseMessagesToContent, mapGenerateContentResultToChatResult, } from './utils/common';
|
|
8
|
-
export class CustomChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
9
|
-
thinkingConfig;
|
|
10
|
-
/**
|
|
11
|
-
* Override to add gemini-3 model support for multimodal and function calling thought signatures
|
|
12
|
-
*/
|
|
13
|
-
get _isMultimodalModel() {
|
|
14
|
-
return (this.model.startsWith('gemini-1.5') ||
|
|
15
|
-
this.model.startsWith('gemini-2') ||
|
|
16
|
-
(this.model.startsWith('gemma-3-') &&
|
|
17
|
-
!this.model.startsWith('gemma-3-1b')) ||
|
|
18
|
-
this.model.startsWith('gemini-3'));
|
|
19
|
-
}
|
|
20
|
-
constructor(fields) {
|
|
21
|
-
super(fields);
|
|
22
|
-
this.model = fields.model.replace(/^models\//, '');
|
|
23
|
-
this.maxOutputTokens = fields.maxOutputTokens ?? this.maxOutputTokens;
|
|
24
|
-
if (this.maxOutputTokens != null && this.maxOutputTokens < 0) {
|
|
25
|
-
throw new Error('`maxOutputTokens` must be a positive integer');
|
|
26
|
-
}
|
|
27
|
-
this.temperature = fields.temperature ?? this.temperature;
|
|
28
|
-
if (this.temperature != null &&
|
|
29
|
-
(this.temperature < 0 || this.temperature > 2)) {
|
|
30
|
-
throw new Error('`temperature` must be in the range of [0.0,2.0]');
|
|
31
|
-
}
|
|
32
|
-
this.topP = fields.topP ?? this.topP;
|
|
33
|
-
if (this.topP != null && this.topP < 0) {
|
|
34
|
-
throw new Error('`topP` must be a positive integer');
|
|
35
|
-
}
|
|
36
|
-
if (this.topP != null && this.topP > 1) {
|
|
37
|
-
throw new Error('`topP` must be below 1.');
|
|
38
|
-
}
|
|
39
|
-
this.topK = fields.topK ?? this.topK;
|
|
40
|
-
if (this.topK != null && this.topK < 0) {
|
|
41
|
-
throw new Error('`topK` must be a positive integer');
|
|
42
|
-
}
|
|
43
|
-
this.stopSequences = fields.stopSequences ?? this.stopSequences;
|
|
44
|
-
this.apiKey = fields.apiKey ?? getEnvironmentVariable('GOOGLE_API_KEY');
|
|
45
|
-
if (this.apiKey == null || this.apiKey === '') {
|
|
46
|
-
throw new Error('Please set an API key for Google GenerativeAI ' +
|
|
47
|
-
'in the environment variable GOOGLE_API_KEY ' +
|
|
48
|
-
'or in the `apiKey` field of the ' +
|
|
49
|
-
'ChatGoogleGenerativeAI constructor');
|
|
50
|
-
}
|
|
51
|
-
this.safetySettings = fields.safetySettings ?? this.safetySettings;
|
|
52
|
-
if (this.safetySettings && this.safetySettings.length > 0) {
|
|
53
|
-
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
|
|
54
|
-
if (safetySettingsSet.size !== this.safetySettings.length) {
|
|
55
|
-
throw new Error('The categories in `safetySettings` array must be unique');
|
|
56
|
-
}
|
|
57
|
-
}
|
|
58
|
-
this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
|
|
59
|
-
this.streaming = fields.streaming ?? this.streaming;
|
|
60
|
-
this.json = fields.json;
|
|
61
|
-
// @ts-ignore - Accessing private property from parent class
|
|
62
|
-
this.client = new GenerativeAI(this.apiKey).getGenerativeModel({
|
|
63
|
-
model: this.model,
|
|
64
|
-
safetySettings: this.safetySettings,
|
|
65
|
-
generationConfig: {
|
|
66
|
-
stopSequences: this.stopSequences,
|
|
67
|
-
maxOutputTokens: this.maxOutputTokens,
|
|
68
|
-
temperature: this.temperature,
|
|
69
|
-
topP: this.topP,
|
|
70
|
-
topK: this.topK,
|
|
71
|
-
...(this.json != null
|
|
72
|
-
? { responseMimeType: 'application/json' }
|
|
73
|
-
: {}),
|
|
74
|
-
},
|
|
75
|
-
}, {
|
|
76
|
-
apiVersion: fields.apiVersion,
|
|
77
|
-
baseUrl: fields.baseUrl,
|
|
78
|
-
customHeaders: fields.customHeaders,
|
|
79
|
-
});
|
|
80
|
-
this.streamUsage = fields.streamUsage ?? this.streamUsage;
|
|
81
|
-
}
|
|
82
|
-
static lc_name() {
|
|
83
|
-
return 'IllumaGoogleGenerativeAI';
|
|
84
|
-
}
|
|
85
|
-
/**
|
|
86
|
-
* Helper function to convert Gemini API usage metadata to LangChain format
|
|
87
|
-
* Includes support for cached tokens and tier-based tracking for gemini-3-pro-preview
|
|
88
|
-
*/
|
|
89
|
-
_convertToUsageMetadata(usageMetadata, model) {
|
|
90
|
-
if (!usageMetadata) {
|
|
91
|
-
return undefined;
|
|
92
|
-
}
|
|
93
|
-
const output = {
|
|
94
|
-
input_tokens: usageMetadata.promptTokenCount ?? 0,
|
|
95
|
-
output_tokens: (usageMetadata.candidatesTokenCount ?? 0) +
|
|
96
|
-
(usageMetadata.thoughtsTokenCount ?? 0),
|
|
97
|
-
total_tokens: usageMetadata.totalTokenCount ?? 0,
|
|
98
|
-
};
|
|
99
|
-
if (usageMetadata.cachedContentTokenCount) {
|
|
100
|
-
output.input_token_details ??= {};
|
|
101
|
-
output.input_token_details.cache_read =
|
|
102
|
-
usageMetadata.cachedContentTokenCount;
|
|
103
|
-
}
|
|
104
|
-
// gemini-3-pro-preview has bracket based tracking of tokens per request
|
|
105
|
-
if (model === 'gemini-3-pro-preview') {
|
|
106
|
-
const over200k = Math.max(0, (usageMetadata.promptTokenCount ?? 0) - 200000);
|
|
107
|
-
const cachedOver200k = Math.max(0, (usageMetadata.cachedContentTokenCount ?? 0) - 200000);
|
|
108
|
-
if (over200k) {
|
|
109
|
-
output.input_token_details = {
|
|
110
|
-
...output.input_token_details,
|
|
111
|
-
over_200k: over200k,
|
|
112
|
-
};
|
|
113
|
-
}
|
|
114
|
-
if (cachedOver200k) {
|
|
115
|
-
output.input_token_details = {
|
|
116
|
-
...output.input_token_details,
|
|
117
|
-
cache_read_over_200k: cachedOver200k,
|
|
118
|
-
};
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
return output;
|
|
122
|
-
}
|
|
123
|
-
invocationParams(options) {
|
|
124
|
-
const params = super.invocationParams(options);
|
|
125
|
-
if (this.thinkingConfig) {
|
|
126
|
-
/** @ts-ignore */
|
|
127
|
-
this.client.generationConfig = {
|
|
128
|
-
/** @ts-ignore */
|
|
129
|
-
...this.client.generationConfig,
|
|
130
|
-
/** @ts-ignore */
|
|
131
|
-
thinkingConfig: this.thinkingConfig,
|
|
132
|
-
};
|
|
133
|
-
}
|
|
134
|
-
return params;
|
|
135
|
-
}
|
|
136
|
-
async _generate(messages, options, runManager) {
|
|
137
|
-
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel, this.useSystemInstruction, this.model);
|
|
138
|
-
let actualPrompt = prompt;
|
|
139
|
-
if (prompt?.[0].role === 'system') {
|
|
140
|
-
const [systemInstruction] = prompt;
|
|
141
|
-
/** @ts-ignore */
|
|
142
|
-
this.client.systemInstruction = systemInstruction;
|
|
143
|
-
actualPrompt = prompt.slice(1);
|
|
144
|
-
}
|
|
145
|
-
const parameters = this.invocationParams(options);
|
|
146
|
-
const request = {
|
|
147
|
-
...parameters,
|
|
148
|
-
contents: actualPrompt,
|
|
149
|
-
};
|
|
150
|
-
const res = await this.caller.callWithOptions({ signal: options.signal }, async () =>
|
|
151
|
-
/** @ts-ignore */
|
|
152
|
-
this.client.generateContent(request));
|
|
153
|
-
const response = res.response;
|
|
154
|
-
const usageMetadata = this._convertToUsageMetadata(
|
|
155
|
-
/** @ts-ignore */
|
|
156
|
-
response.usageMetadata, this.model);
|
|
157
|
-
/** @ts-ignore */
|
|
158
|
-
const generationResult = mapGenerateContentResultToChatResult(response, {
|
|
159
|
-
usageMetadata,
|
|
160
|
-
});
|
|
161
|
-
await runManager?.handleLLMNewToken(generationResult.generations[0].text || '', undefined, undefined, undefined, undefined, undefined);
|
|
162
|
-
return generationResult;
|
|
163
|
-
}
|
|
164
|
-
async *_streamResponseChunks(messages, options, runManager) {
|
|
165
|
-
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel, this.useSystemInstruction, this.model);
|
|
166
|
-
let actualPrompt = prompt;
|
|
167
|
-
if (prompt?.[0].role === 'system') {
|
|
168
|
-
const [systemInstruction] = prompt;
|
|
169
|
-
/** @ts-ignore */
|
|
170
|
-
this.client.systemInstruction = systemInstruction;
|
|
171
|
-
actualPrompt = prompt.slice(1);
|
|
172
|
-
}
|
|
173
|
-
const parameters = this.invocationParams(options);
|
|
174
|
-
const request = {
|
|
175
|
-
...parameters,
|
|
176
|
-
contents: actualPrompt,
|
|
177
|
-
};
|
|
178
|
-
const stream = await this.caller.callWithOptions({ signal: options.signal }, async () => {
|
|
179
|
-
/** @ts-ignore */
|
|
180
|
-
const { stream } = await this.client.generateContentStream(request);
|
|
181
|
-
return stream;
|
|
182
|
-
});
|
|
183
|
-
let index = 0;
|
|
184
|
-
let lastUsageMetadata;
|
|
185
|
-
for await (const response of stream) {
|
|
186
|
-
if ('usageMetadata' in response &&
|
|
187
|
-
this.streamUsage !== false &&
|
|
188
|
-
options.streamUsage !== false) {
|
|
189
|
-
lastUsageMetadata = this._convertToUsageMetadata(response.usageMetadata, this.model);
|
|
190
|
-
}
|
|
191
|
-
const chunk = convertResponseContentToChatGenerationChunk(response, {
|
|
192
|
-
usageMetadata: undefined,
|
|
193
|
-
index,
|
|
194
|
-
});
|
|
195
|
-
index += 1;
|
|
196
|
-
if (!chunk) {
|
|
197
|
-
continue;
|
|
198
|
-
}
|
|
199
|
-
yield chunk;
|
|
200
|
-
await runManager?.handleLLMNewToken(chunk.text || '', undefined, undefined, undefined, undefined, { chunk });
|
|
201
|
-
}
|
|
202
|
-
if (lastUsageMetadata) {
|
|
203
|
-
const finalChunk = new ChatGenerationChunk({
|
|
204
|
-
text: '',
|
|
205
|
-
message: new AIMessageChunk({
|
|
206
|
-
content: '',
|
|
207
|
-
usage_metadata: lastUsageMetadata,
|
|
208
|
-
}),
|
|
209
|
-
});
|
|
210
|
-
yield finalChunk;
|
|
211
|
-
await runManager?.handleLLMNewToken(finalChunk.text || '', undefined, undefined, undefined, undefined, { chunk: finalChunk });
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
}
|
|
215
|
-
//# sourceMappingURL=index.js.map
|
package/src/llm/google/types.js
DELETED
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
/** Enum for content modality types */
|
|
2
|
-
var Modality;
|
|
3
|
-
(function (Modality) {
|
|
4
|
-
Modality["MODALITY_UNSPECIFIED"] = "MODALITY_UNSPECIFIED";
|
|
5
|
-
Modality["TEXT"] = "TEXT";
|
|
6
|
-
Modality["IMAGE"] = "IMAGE";
|
|
7
|
-
Modality["VIDEO"] = "VIDEO";
|
|
8
|
-
Modality["AUDIO"] = "AUDIO";
|
|
9
|
-
Modality["DOCUMENT"] = "DOCUMENT";
|
|
10
|
-
})(Modality || (Modality = {}));
|
|
11
|
-
export {};
|
|
12
|
-
//# sourceMappingURL=types.js.map
|