@librechat/agents 3.1.57 → 3.1.61
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/AgentContext.cjs +326 -62
- package/dist/cjs/agents/AgentContext.cjs.map +1 -1
- package/dist/cjs/common/enum.cjs +13 -0
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +7 -27
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +303 -222
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +4 -4
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +6 -2
- package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/init.cjs +60 -0
- package/dist/cjs/llm/init.cjs.map +1 -0
- package/dist/cjs/llm/invoke.cjs +90 -0
- package/dist/cjs/llm/invoke.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +2 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/request.cjs +41 -0
- package/dist/cjs/llm/request.cjs.map +1 -0
- package/dist/cjs/main.cjs +40 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/messages/cache.cjs +76 -89
- package/dist/cjs/messages/cache.cjs.map +1 -1
- package/dist/cjs/messages/contextPruning.cjs +156 -0
- package/dist/cjs/messages/contextPruning.cjs.map +1 -0
- package/dist/cjs/messages/contextPruningSettings.cjs +53 -0
- package/dist/cjs/messages/contextPruningSettings.cjs.map +1 -0
- package/dist/cjs/messages/core.cjs +23 -37
- package/dist/cjs/messages/core.cjs.map +1 -1
- package/dist/cjs/messages/format.cjs +156 -11
- package/dist/cjs/messages/format.cjs.map +1 -1
- package/dist/cjs/messages/prune.cjs +1161 -49
- package/dist/cjs/messages/prune.cjs.map +1 -1
- package/dist/cjs/messages/reducer.cjs +87 -0
- package/dist/cjs/messages/reducer.cjs.map +1 -0
- package/dist/cjs/run.cjs +81 -42
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/stream.cjs +54 -7
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/summarization/index.cjs +75 -0
- package/dist/cjs/summarization/index.cjs.map +1 -0
- package/dist/cjs/summarization/node.cjs +663 -0
- package/dist/cjs/summarization/node.cjs.map +1 -0
- package/dist/cjs/tools/ToolNode.cjs +16 -8
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +2 -0
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/cjs/utils/errors.cjs +115 -0
- package/dist/cjs/utils/errors.cjs.map +1 -0
- package/dist/cjs/utils/events.cjs +17 -0
- package/dist/cjs/utils/events.cjs.map +1 -1
- package/dist/cjs/utils/handlers.cjs +16 -0
- package/dist/cjs/utils/handlers.cjs.map +1 -1
- package/dist/cjs/utils/llm.cjs +10 -0
- package/dist/cjs/utils/llm.cjs.map +1 -1
- package/dist/cjs/utils/tokens.cjs +247 -14
- package/dist/cjs/utils/tokens.cjs.map +1 -1
- package/dist/cjs/utils/truncation.cjs +107 -0
- package/dist/cjs/utils/truncation.cjs.map +1 -0
- package/dist/esm/agents/AgentContext.mjs +325 -61
- package/dist/esm/agents/AgentContext.mjs.map +1 -1
- package/dist/esm/common/enum.mjs +13 -0
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +8 -28
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +307 -226
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +4 -4
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/bedrock/utils/message_inputs.mjs +6 -2
- package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/init.mjs +58 -0
- package/dist/esm/llm/init.mjs.map +1 -0
- package/dist/esm/llm/invoke.mjs +87 -0
- package/dist/esm/llm/invoke.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +2 -0
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/request.mjs +38 -0
- package/dist/esm/llm/request.mjs.map +1 -0
- package/dist/esm/main.mjs +13 -3
- package/dist/esm/main.mjs.map +1 -1
- package/dist/esm/messages/cache.mjs +76 -89
- package/dist/esm/messages/cache.mjs.map +1 -1
- package/dist/esm/messages/contextPruning.mjs +154 -0
- package/dist/esm/messages/contextPruning.mjs.map +1 -0
- package/dist/esm/messages/contextPruningSettings.mjs +50 -0
- package/dist/esm/messages/contextPruningSettings.mjs.map +1 -0
- package/dist/esm/messages/core.mjs +23 -37
- package/dist/esm/messages/core.mjs.map +1 -1
- package/dist/esm/messages/format.mjs +156 -11
- package/dist/esm/messages/format.mjs.map +1 -1
- package/dist/esm/messages/prune.mjs +1158 -52
- package/dist/esm/messages/prune.mjs.map +1 -1
- package/dist/esm/messages/reducer.mjs +83 -0
- package/dist/esm/messages/reducer.mjs.map +1 -0
- package/dist/esm/run.mjs +82 -43
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/stream.mjs +54 -7
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/summarization/index.mjs +73 -0
- package/dist/esm/summarization/index.mjs.map +1 -0
- package/dist/esm/summarization/node.mjs +659 -0
- package/dist/esm/summarization/node.mjs.map +1 -0
- package/dist/esm/tools/ToolNode.mjs +16 -8
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +2 -0
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/esm/utils/errors.mjs +111 -0
- package/dist/esm/utils/errors.mjs.map +1 -0
- package/dist/esm/utils/events.mjs +17 -1
- package/dist/esm/utils/events.mjs.map +1 -1
- package/dist/esm/utils/handlers.mjs +16 -0
- package/dist/esm/utils/handlers.mjs.map +1 -1
- package/dist/esm/utils/llm.mjs +10 -1
- package/dist/esm/utils/llm.mjs.map +1 -1
- package/dist/esm/utils/tokens.mjs +245 -15
- package/dist/esm/utils/tokens.mjs.map +1 -1
- package/dist/esm/utils/truncation.mjs +102 -0
- package/dist/esm/utils/truncation.mjs.map +1 -0
- package/dist/types/agents/AgentContext.d.ts +124 -6
- package/dist/types/common/enum.d.ts +14 -1
- package/dist/types/graphs/Graph.d.ts +22 -27
- package/dist/types/index.d.ts +5 -0
- package/dist/types/llm/init.d.ts +18 -0
- package/dist/types/llm/invoke.d.ts +48 -0
- package/dist/types/llm/request.d.ts +14 -0
- package/dist/types/messages/contextPruning.d.ts +42 -0
- package/dist/types/messages/contextPruningSettings.d.ts +44 -0
- package/dist/types/messages/core.d.ts +1 -1
- package/dist/types/messages/format.d.ts +17 -1
- package/dist/types/messages/index.d.ts +3 -0
- package/dist/types/messages/prune.d.ts +162 -1
- package/dist/types/messages/reducer.d.ts +18 -0
- package/dist/types/run.d.ts +12 -1
- package/dist/types/summarization/index.d.ts +20 -0
- package/dist/types/summarization/node.d.ts +29 -0
- package/dist/types/tools/ToolNode.d.ts +3 -1
- package/dist/types/types/graph.d.ts +44 -6
- package/dist/types/types/index.d.ts +1 -0
- package/dist/types/types/run.d.ts +30 -0
- package/dist/types/types/stream.d.ts +31 -4
- package/dist/types/types/summarize.d.ts +47 -0
- package/dist/types/types/tools.d.ts +7 -0
- package/dist/types/utils/errors.d.ts +28 -0
- package/dist/types/utils/events.d.ts +13 -0
- package/dist/types/utils/index.d.ts +2 -0
- package/dist/types/utils/llm.d.ts +4 -0
- package/dist/types/utils/tokens.d.ts +14 -1
- package/dist/types/utils/truncation.d.ts +49 -0
- package/package.json +3 -3
- package/src/agents/AgentContext.ts +388 -58
- package/src/agents/__tests__/AgentContext.test.ts +265 -5
- package/src/common/enum.ts +13 -0
- package/src/events.ts +9 -39
- package/src/graphs/Graph.ts +468 -331
- package/src/index.ts +7 -0
- package/src/llm/anthropic/llm.spec.ts +3 -3
- package/src/llm/anthropic/utils/message_inputs.ts +6 -4
- package/src/llm/bedrock/llm.spec.ts +1 -1
- package/src/llm/bedrock/utils/message_inputs.ts +6 -2
- package/src/llm/init.ts +63 -0
- package/src/llm/invoke.ts +144 -0
- package/src/llm/request.ts +55 -0
- package/src/messages/__tests__/observationMasking.test.ts +221 -0
- package/src/messages/cache.ts +77 -102
- package/src/messages/contextPruning.ts +191 -0
- package/src/messages/contextPruningSettings.ts +90 -0
- package/src/messages/core.ts +32 -53
- package/src/messages/ensureThinkingBlock.test.ts +39 -39
- package/src/messages/format.ts +227 -15
- package/src/messages/formatAgentMessages.test.ts +511 -1
- package/src/messages/index.ts +3 -0
- package/src/messages/prune.ts +1548 -62
- package/src/messages/reducer.ts +22 -0
- package/src/run.ts +104 -51
- package/src/scripts/bedrock-merge-test.ts +1 -1
- package/src/scripts/test-thinking-handoff-bedrock.ts +1 -1
- package/src/scripts/test-thinking-handoff.ts +1 -1
- package/src/scripts/thinking-bedrock.ts +1 -1
- package/src/scripts/thinking.ts +1 -1
- package/src/specs/anthropic.simple.test.ts +1 -1
- package/src/specs/multi-agent-summarization.test.ts +396 -0
- package/src/specs/prune.test.ts +1196 -23
- package/src/specs/summarization-unit.test.ts +868 -0
- package/src/specs/summarization.test.ts +3827 -0
- package/src/specs/summarize-prune.test.ts +376 -0
- package/src/specs/thinking-handoff.test.ts +10 -10
- package/src/specs/thinking-prune.test.ts +7 -4
- package/src/specs/token-accounting-e2e.test.ts +1034 -0
- package/src/specs/token-accounting-pipeline.test.ts +882 -0
- package/src/specs/token-distribution-edge-case.test.ts +25 -26
- package/src/splitStream.test.ts +42 -33
- package/src/stream.ts +64 -11
- package/src/summarization/__tests__/aggregator.test.ts +153 -0
- package/src/summarization/__tests__/node.test.ts +708 -0
- package/src/summarization/__tests__/trigger.test.ts +50 -0
- package/src/summarization/index.ts +102 -0
- package/src/summarization/node.ts +982 -0
- package/src/tools/ToolNode.ts +25 -3
- package/src/types/graph.ts +62 -7
- package/src/types/index.ts +1 -0
- package/src/types/run.ts +32 -0
- package/src/types/stream.ts +45 -5
- package/src/types/summarize.ts +58 -0
- package/src/types/tools.ts +7 -0
- package/src/utils/errors.ts +117 -0
- package/src/utils/events.ts +31 -0
- package/src/utils/handlers.ts +18 -0
- package/src/utils/index.ts +2 -0
- package/src/utils/llm.ts +12 -0
- package/src/utils/tokens.ts +336 -18
- package/src/utils/truncation.ts +124 -0
- package/src/scripts/image.ts +0 -180
package/src/utils/tokens.ts
CHANGED
|
@@ -4,6 +4,289 @@ import { ContentTypes } from '@/common/enum';
|
|
|
4
4
|
|
|
5
5
|
export type EncodingName = 'o200k_base' | 'claude';
|
|
6
6
|
|
|
7
|
+
/** Anthropic minimum image token cost. */
|
|
8
|
+
const ANTHROPIC_IMAGE_MIN_TOKENS = 1024;
|
|
9
|
+
/** Anthropic divisor: tokens = width × height / 750. */
|
|
10
|
+
const ANTHROPIC_IMAGE_DIVISOR = 750;
|
|
11
|
+
/** OpenAI low-detail fixed cost. */
|
|
12
|
+
const OPENAI_IMAGE_LOW_TOKENS = 85;
|
|
13
|
+
/** OpenAI high-detail tile size. */
|
|
14
|
+
const OPENAI_IMAGE_TILE_SIZE = 512;
|
|
15
|
+
/** OpenAI high-detail tokens per tile. */
|
|
16
|
+
const OPENAI_IMAGE_TOKENS_PER_TILE = 170;
|
|
17
|
+
/** Google Gemini fixed per-image cost. */
|
|
18
|
+
const _GEMINI_IMAGE_TOKENS = 258;
|
|
19
|
+
/** Safety margin for image and document token estimates (5% overestimate). */
|
|
20
|
+
const IMAGE_TOKEN_SAFETY_MARGIN = 1.05;
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Anthropic PDF: each page costs image tokens + text tokens.
|
|
24
|
+
* Typical range is 1500-3000 tokens/page. Using 2000 as midpoint.
|
|
25
|
+
*/
|
|
26
|
+
const ANTHROPIC_PDF_TOKENS_PER_PAGE = 2000;
|
|
27
|
+
/** OpenAI PDF: each page rendered as high-detail image. ~1500 tokens typical. */
|
|
28
|
+
const OPENAI_PDF_TOKENS_PER_PAGE = 1500;
|
|
29
|
+
/** Gemini PDF: fixed 258 tokens per page. */
|
|
30
|
+
const _GEMINI_PDF_TOKENS_PER_PAGE = 258;
|
|
31
|
+
/** Approximate base64 bytes per PDF page for page count estimation. */
|
|
32
|
+
const BASE64_BYTES_PER_PDF_PAGE = 75_000;
|
|
33
|
+
/** Fallback token cost for URL-referenced documents without local data. */
|
|
34
|
+
const URL_DOCUMENT_FALLBACK_TOKENS = 2000;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Extracts image dimensions from the first bytes of a base64-encoded
|
|
38
|
+
* PNG, JPEG, GIF, or WebP without decoding the full image.
|
|
39
|
+
* Returns null if the format is unrecognized or data is too short.
|
|
40
|
+
*/
|
|
41
|
+
export function extractImageDimensions(
|
|
42
|
+
base64Data: string
|
|
43
|
+
): { width: number; height: number } | null {
|
|
44
|
+
const raw = base64Data.startsWith('data:')
|
|
45
|
+
? base64Data.slice(base64Data.indexOf(',') + 1)
|
|
46
|
+
: base64Data;
|
|
47
|
+
|
|
48
|
+
if (raw.length < 32) {
|
|
49
|
+
return null;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
const bytes = new Uint8Array(Buffer.from(raw.slice(0, 80), 'base64'));
|
|
53
|
+
|
|
54
|
+
if (bytes[0] === 0x89 && bytes[1] === 0x50) {
|
|
55
|
+
// PNG: width at bytes 16-19, height at 20-23 (big-endian)
|
|
56
|
+
const width =
|
|
57
|
+
(bytes[16] << 24) | (bytes[17] << 16) | (bytes[18] << 8) | bytes[19];
|
|
58
|
+
const height =
|
|
59
|
+
(bytes[20] << 24) | (bytes[21] << 16) | (bytes[22] << 8) | bytes[23];
|
|
60
|
+
return { width, height };
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if (bytes[0] === 0xff && bytes[1] === 0xd8) {
|
|
64
|
+
// JPEG: scan for SOF0 (0xFFC0) or SOF2 (0xFFC2) marker
|
|
65
|
+
for (let i = 2; i < bytes.length - 9; i++) {
|
|
66
|
+
if (
|
|
67
|
+
bytes[i] === 0xff &&
|
|
68
|
+
(bytes[i + 1] === 0xc0 || bytes[i + 1] === 0xc2)
|
|
69
|
+
) {
|
|
70
|
+
const height = (bytes[i + 5] << 8) | bytes[i + 6];
|
|
71
|
+
const width = (bytes[i + 7] << 8) | bytes[i + 8];
|
|
72
|
+
return { width, height };
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
return null;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (bytes[0] === 0x47 && bytes[1] === 0x49 && bytes[2] === 0x46) {
|
|
79
|
+
// GIF: width at bytes 6-7, height at 8-9 (little-endian)
|
|
80
|
+
const width = bytes[6] | (bytes[7] << 8);
|
|
81
|
+
const height = bytes[8] | (bytes[9] << 8);
|
|
82
|
+
return { width, height };
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
if (
|
|
86
|
+
bytes[0] === 0x52 &&
|
|
87
|
+
bytes[1] === 0x49 &&
|
|
88
|
+
bytes[2] === 0x46 &&
|
|
89
|
+
bytes[3] === 0x46 &&
|
|
90
|
+
bytes[8] === 0x57 &&
|
|
91
|
+
bytes[9] === 0x45 &&
|
|
92
|
+
bytes[10] === 0x42 &&
|
|
93
|
+
bytes[11] === 0x50
|
|
94
|
+
) {
|
|
95
|
+
// WebP VP8: width at bytes 26-27, height at 28-29
|
|
96
|
+
if (bytes.length > 29) {
|
|
97
|
+
const width = (bytes[26] | (bytes[27] << 8)) & 0x3fff;
|
|
98
|
+
const height = (bytes[28] | (bytes[29] << 8)) & 0x3fff;
|
|
99
|
+
return { width, height };
|
|
100
|
+
}
|
|
101
|
+
return null;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
return null;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/** Estimates image token cost for Anthropic/Bedrock (Claude). */
|
|
108
|
+
export function estimateAnthropicImageTokens(
|
|
109
|
+
width: number,
|
|
110
|
+
height: number
|
|
111
|
+
): number {
|
|
112
|
+
return Math.max(
|
|
113
|
+
ANTHROPIC_IMAGE_MIN_TOKENS,
|
|
114
|
+
Math.ceil((width * height) / ANTHROPIC_IMAGE_DIVISOR)
|
|
115
|
+
);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/** Estimates image token cost for OpenAI (high detail). */
|
|
119
|
+
export function estimateOpenAIImageTokens(
|
|
120
|
+
width: number,
|
|
121
|
+
height: number,
|
|
122
|
+
detail: string = 'high'
|
|
123
|
+
): number {
|
|
124
|
+
if (detail === 'low') {
|
|
125
|
+
return OPENAI_IMAGE_LOW_TOKENS;
|
|
126
|
+
}
|
|
127
|
+
const tiles =
|
|
128
|
+
Math.ceil(width / OPENAI_IMAGE_TILE_SIZE) *
|
|
129
|
+
Math.ceil(height / OPENAI_IMAGE_TILE_SIZE);
|
|
130
|
+
return OPENAI_IMAGE_LOW_TOKENS + tiles * OPENAI_IMAGE_TOKENS_PER_TILE;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
* Estimates token cost for an image content block.
|
|
135
|
+
* Extracts dimensions from base64 header when available.
|
|
136
|
+
* Falls back to Anthropic minimum (1024) when dimensions can't be determined.
|
|
137
|
+
*/
|
|
138
|
+
function estimateImageBlockTokens(
|
|
139
|
+
block: Record<string, unknown>,
|
|
140
|
+
encoding: EncodingName
|
|
141
|
+
): number {
|
|
142
|
+
let base64Data: string | undefined;
|
|
143
|
+
|
|
144
|
+
if (block.type === ContentTypes.IMAGE_URL || block.type === 'image_url') {
|
|
145
|
+
const imageUrl = block.image_url as string | { url?: string } | undefined;
|
|
146
|
+
const url = typeof imageUrl === 'string' ? imageUrl : imageUrl?.url;
|
|
147
|
+
if (typeof url === 'string' && url.startsWith('data:')) {
|
|
148
|
+
base64Data = url;
|
|
149
|
+
} else {
|
|
150
|
+
return ANTHROPIC_IMAGE_MIN_TOKENS;
|
|
151
|
+
}
|
|
152
|
+
} else if (block.type === 'image') {
|
|
153
|
+
const source = block.source as { type?: string; data?: string } | undefined;
|
|
154
|
+
if (source?.type === 'base64' && typeof source.data === 'string') {
|
|
155
|
+
base64Data = source.data;
|
|
156
|
+
} else {
|
|
157
|
+
return ANTHROPIC_IMAGE_MIN_TOKENS;
|
|
158
|
+
}
|
|
159
|
+
} else {
|
|
160
|
+
return ANTHROPIC_IMAGE_MIN_TOKENS;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const dims = extractImageDimensions(base64Data);
|
|
164
|
+
if (dims == null) {
|
|
165
|
+
return ANTHROPIC_IMAGE_MIN_TOKENS;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
if (encoding === 'claude') {
|
|
169
|
+
return estimateAnthropicImageTokens(dims.width, dims.height);
|
|
170
|
+
}
|
|
171
|
+
return estimateOpenAIImageTokens(dims.width, dims.height);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/**
|
|
175
|
+
* Estimates token cost for a document/file content block.
|
|
176
|
+
* Handles both LangChain standard format (`type: 'file'` with `source_type`)
|
|
177
|
+
* and Anthropic format (`type: 'document'` with `source`).
|
|
178
|
+
*
|
|
179
|
+
* - Plain text: tokenized directly via `getTokenCount`.
|
|
180
|
+
* - Base64 PDF: page count estimated from base64 length × per-page cost.
|
|
181
|
+
* - URL reference: conservative flat estimate.
|
|
182
|
+
*/
|
|
183
|
+
function estimateDocumentBlockTokens(
|
|
184
|
+
block: Record<string, unknown>,
|
|
185
|
+
encoding: EncodingName,
|
|
186
|
+
getTokenCount: (text: string) => number
|
|
187
|
+
): number {
|
|
188
|
+
const pdfTokensPerPage =
|
|
189
|
+
encoding === 'claude'
|
|
190
|
+
? ANTHROPIC_PDF_TOKENS_PER_PAGE
|
|
191
|
+
: OPENAI_PDF_TOKENS_PER_PAGE;
|
|
192
|
+
|
|
193
|
+
// LangChain standard format: type='file', source_type, data/text/url, mime_type
|
|
194
|
+
const sourceType = block.source_type as string | undefined;
|
|
195
|
+
if (typeof sourceType === 'string') {
|
|
196
|
+
const mimeType = ((block.mime_type as string | undefined) ?? '').split(
|
|
197
|
+
';'
|
|
198
|
+
)[0];
|
|
199
|
+
|
|
200
|
+
if (sourceType === 'text' && typeof block.text === 'string') {
|
|
201
|
+
return getTokenCount(block.text as string);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
if (sourceType === 'base64' && typeof block.data === 'string') {
|
|
205
|
+
if (mimeType === 'application/pdf' || mimeType === '') {
|
|
206
|
+
const pageEstimate = Math.max(
|
|
207
|
+
1,
|
|
208
|
+
Math.ceil((block.data as string).length / BASE64_BYTES_PER_PDF_PAGE)
|
|
209
|
+
);
|
|
210
|
+
return pageEstimate * pdfTokensPerPage;
|
|
211
|
+
}
|
|
212
|
+
// Image inside a file block — delegate to image estimation
|
|
213
|
+
if (mimeType.startsWith('image/')) {
|
|
214
|
+
return estimateImageBlockTokens(
|
|
215
|
+
{
|
|
216
|
+
...block,
|
|
217
|
+
type: 'image',
|
|
218
|
+
source: { type: 'base64', data: block.data },
|
|
219
|
+
},
|
|
220
|
+
encoding
|
|
221
|
+
);
|
|
222
|
+
}
|
|
223
|
+
return getTokenCount(block.data as string);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if (sourceType === 'url') {
|
|
227
|
+
return URL_DOCUMENT_FALLBACK_TOKENS;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return URL_DOCUMENT_FALLBACK_TOKENS;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
// Anthropic format: type='document', source: { type, data, media_type }
|
|
234
|
+
const source = block.source as
|
|
235
|
+
| {
|
|
236
|
+
type?: string;
|
|
237
|
+
data?: string;
|
|
238
|
+
media_type?: string;
|
|
239
|
+
content?: unknown[];
|
|
240
|
+
}
|
|
241
|
+
| undefined;
|
|
242
|
+
|
|
243
|
+
if (source == null) {
|
|
244
|
+
return URL_DOCUMENT_FALLBACK_TOKENS;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
if (source.type === 'text' && typeof source.data === 'string') {
|
|
248
|
+
return getTokenCount(source.data);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
if (source.type === 'base64' && typeof source.data === 'string') {
|
|
252
|
+
const mediaType = (source.media_type ?? '').split(';')[0];
|
|
253
|
+
if (mediaType === 'application/pdf' || mediaType === '') {
|
|
254
|
+
const pageEstimate = Math.max(
|
|
255
|
+
1,
|
|
256
|
+
Math.ceil(source.data.length / BASE64_BYTES_PER_PDF_PAGE)
|
|
257
|
+
);
|
|
258
|
+
return pageEstimate * pdfTokensPerPage;
|
|
259
|
+
}
|
|
260
|
+
if (mediaType.startsWith('image/')) {
|
|
261
|
+
return estimateImageBlockTokens(
|
|
262
|
+
{ type: 'image', source: { type: 'base64', data: source.data } },
|
|
263
|
+
encoding
|
|
264
|
+
);
|
|
265
|
+
}
|
|
266
|
+
return getTokenCount(source.data);
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
if (source.type === 'url') {
|
|
270
|
+
return URL_DOCUMENT_FALLBACK_TOKENS;
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
// content-type source (wraps other blocks like images)
|
|
274
|
+
if (source.type === 'content' && Array.isArray(source.content)) {
|
|
275
|
+
let total = 0;
|
|
276
|
+
for (const inner of source.content) {
|
|
277
|
+
if (inner != null && typeof inner === 'object' && 'type' in inner) {
|
|
278
|
+
const innerBlock = inner as Record<string, unknown>;
|
|
279
|
+
if (innerBlock.type === 'image') {
|
|
280
|
+
total += estimateImageBlockTokens(innerBlock, encoding);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
return total > 0 ? total : URL_DOCUMENT_FALLBACK_TOKENS;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
return URL_DOCUMENT_FALLBACK_TOKENS;
|
|
288
|
+
}
|
|
289
|
+
|
|
7
290
|
const tokenizers: Partial<Record<EncodingName, Tokenizer>> = {};
|
|
8
291
|
|
|
9
292
|
async function getTokenizer(
|
|
@@ -31,43 +314,68 @@ export function encodingForModel(model: string): EncodingName {
|
|
|
31
314
|
|
|
32
315
|
export function getTokenCountForMessage(
|
|
33
316
|
message: BaseMessage,
|
|
34
|
-
getTokenCount: (text: string) => number
|
|
317
|
+
getTokenCount: (text: string) => number,
|
|
318
|
+
encoding: EncodingName = 'o200k_base'
|
|
35
319
|
): number {
|
|
36
320
|
const tokensPerMessage = 3;
|
|
37
321
|
|
|
322
|
+
type ContentBlock = Record<string, unknown> & {
|
|
323
|
+
type?: string;
|
|
324
|
+
tool_call?: { name?: string; args?: string; output?: string };
|
|
325
|
+
};
|
|
326
|
+
|
|
38
327
|
const processValue = (value: unknown): void => {
|
|
39
328
|
if (Array.isArray(value)) {
|
|
40
|
-
for (const
|
|
329
|
+
for (const raw of value) {
|
|
330
|
+
const item = raw as ContentBlock | null | undefined;
|
|
331
|
+
if (item == null || typeof item.type !== 'string') {
|
|
332
|
+
continue;
|
|
333
|
+
}
|
|
334
|
+
if (item.type === ContentTypes.ERROR) {
|
|
335
|
+
continue;
|
|
336
|
+
}
|
|
337
|
+
|
|
41
338
|
if (
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
item.type ===
|
|
45
|
-
item.type === ContentTypes.IMAGE_URL
|
|
339
|
+
item.type === ContentTypes.IMAGE_URL ||
|
|
340
|
+
item.type === 'image_url' ||
|
|
341
|
+
item.type === 'image'
|
|
46
342
|
) {
|
|
343
|
+
numTokens += Math.ceil(
|
|
344
|
+
estimateImageBlockTokens(item, encoding) * IMAGE_TOKEN_SAFETY_MARGIN
|
|
345
|
+
);
|
|
346
|
+
continue;
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
if (
|
|
350
|
+
item.type === 'document' ||
|
|
351
|
+
item.type === 'file' ||
|
|
352
|
+
item.type === ContentTypes.IMAGE_FILE
|
|
353
|
+
) {
|
|
354
|
+
numTokens += Math.ceil(
|
|
355
|
+
estimateDocumentBlockTokens(item, encoding, getTokenCount) *
|
|
356
|
+
IMAGE_TOKEN_SAFETY_MARGIN
|
|
357
|
+
);
|
|
47
358
|
continue;
|
|
48
359
|
}
|
|
49
360
|
|
|
50
361
|
if (item.type === ContentTypes.TOOL_CALL && item.tool_call != null) {
|
|
51
|
-
const toolName = item.tool_call
|
|
52
|
-
if (toolName
|
|
362
|
+
const toolName = item.tool_call.name;
|
|
363
|
+
if (typeof toolName === 'string' && toolName.length > 0) {
|
|
53
364
|
numTokens += getTokenCount(toolName);
|
|
54
365
|
}
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
if (args != null && args && typeof args === 'string') {
|
|
366
|
+
const args = item.tool_call.args;
|
|
367
|
+
if (typeof args === 'string' && args.length > 0) {
|
|
58
368
|
numTokens += getTokenCount(args);
|
|
59
369
|
}
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
if (output != null && output && typeof output === 'string') {
|
|
370
|
+
const output = item.tool_call.output;
|
|
371
|
+
if (typeof output === 'string' && output.length > 0) {
|
|
63
372
|
numTokens += getTokenCount(output);
|
|
64
373
|
}
|
|
65
374
|
continue;
|
|
66
375
|
}
|
|
67
376
|
|
|
68
377
|
const nestedValue = item[item.type];
|
|
69
|
-
|
|
70
|
-
if (!nestedValue) {
|
|
378
|
+
if (nestedValue == null) {
|
|
71
379
|
continue;
|
|
72
380
|
}
|
|
73
381
|
|
|
@@ -87,6 +395,13 @@ export function getTokenCountForMessage(
|
|
|
87
395
|
return numTokens;
|
|
88
396
|
}
|
|
89
397
|
|
|
398
|
+
/**
|
|
399
|
+
* Anthropic's API consistently reports ~10% more tokens than the local
|
|
400
|
+
* claude tokenizer due to internal message framing and content encoding.
|
|
401
|
+
* Verified empirically across content types via the count_tokens endpoint.
|
|
402
|
+
*/
|
|
403
|
+
const CLAUDE_TOKEN_CORRECTION = 1.1;
|
|
404
|
+
|
|
90
405
|
/**
|
|
91
406
|
* Creates a token counter function using the specified encoding.
|
|
92
407
|
* Lazily loads the encoding data on first use via dynamic import.
|
|
@@ -96,8 +411,11 @@ export const createTokenCounter = async (
|
|
|
96
411
|
): Promise<(message: BaseMessage) => number> => {
|
|
97
412
|
const tok = await getTokenizer(encoding);
|
|
98
413
|
const countTokens = (text: string): number => tok.count(text);
|
|
99
|
-
|
|
100
|
-
|
|
414
|
+
const isClaude = encoding === 'claude';
|
|
415
|
+
return (message: BaseMessage): number => {
|
|
416
|
+
const count = getTokenCountForMessage(message, countTokens, encoding);
|
|
417
|
+
return isClaude ? Math.ceil(count * CLAUDE_TOKEN_CORRECTION) : count;
|
|
418
|
+
};
|
|
101
419
|
};
|
|
102
420
|
|
|
103
421
|
/** Utility to manage the token encoder lifecycle explicitly. */
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ingestion-time and pre-flight truncation utilities for tool results.
|
|
3
|
+
*
|
|
4
|
+
* Prevents oversized tool outputs from entering the message array and
|
|
5
|
+
* consuming the entire context window.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Absolute hard cap on tool result length (characters).
|
|
10
|
+
* Even if the model has a 1M-token context, a single tool result
|
|
11
|
+
* larger than this is almost certainly a bug (e.g., dumping a binary file).
|
|
12
|
+
*/
|
|
13
|
+
export const HARD_MAX_TOOL_RESULT_CHARS = 400_000;
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Computes the dynamic max tool result size based on the model's context window.
|
|
17
|
+
* Uses 30% of the context window (in estimated characters, ~4 chars/token)
|
|
18
|
+
* capped at HARD_MAX_TOOL_RESULT_CHARS.
|
|
19
|
+
*
|
|
20
|
+
* @param contextWindowTokens - The model's max context tokens (optional).
|
|
21
|
+
* @returns Maximum allowed characters for a single tool result.
|
|
22
|
+
*/
|
|
23
|
+
export function calculateMaxToolResultChars(
|
|
24
|
+
contextWindowTokens?: number
|
|
25
|
+
): number {
|
|
26
|
+
if (contextWindowTokens == null || contextWindowTokens <= 0) {
|
|
27
|
+
return HARD_MAX_TOOL_RESULT_CHARS;
|
|
28
|
+
}
|
|
29
|
+
return Math.min(
|
|
30
|
+
Math.floor(contextWindowTokens * 0.3) * 4,
|
|
31
|
+
HARD_MAX_TOOL_RESULT_CHARS
|
|
32
|
+
);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Truncates a tool-call input (the arguments/payload of a tool_use block)
|
|
37
|
+
* using head+tail strategy. Returns an object with `_truncated` (the
|
|
38
|
+
* truncated string) and `_originalChars` (for diagnostics).
|
|
39
|
+
*
|
|
40
|
+
* Accepts any type — objects are JSON-serialized before truncation.
|
|
41
|
+
*
|
|
42
|
+
* @param input - The tool input (string, object, etc.).
|
|
43
|
+
* @param maxChars - Maximum allowed characters.
|
|
44
|
+
*/
|
|
45
|
+
export function truncateToolInput(
|
|
46
|
+
input: unknown,
|
|
47
|
+
maxChars: number
|
|
48
|
+
): { _truncated: string; _originalChars: number } {
|
|
49
|
+
const serialized = typeof input === 'string' ? input : JSON.stringify(input);
|
|
50
|
+
if (serialized.length <= maxChars) {
|
|
51
|
+
return { _truncated: serialized, _originalChars: serialized.length };
|
|
52
|
+
}
|
|
53
|
+
const indicator = `\n… [truncated: ${serialized.length} chars exceeded ${maxChars} limit] …\n`;
|
|
54
|
+
const available = maxChars - indicator.length;
|
|
55
|
+
|
|
56
|
+
if (available < 100) {
|
|
57
|
+
return {
|
|
58
|
+
_truncated: serialized.slice(0, maxChars) + indicator.trimEnd(),
|
|
59
|
+
_originalChars: serialized.length,
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const headSize = Math.ceil(available * 0.7);
|
|
64
|
+
const tailSize = available - headSize;
|
|
65
|
+
|
|
66
|
+
return {
|
|
67
|
+
_truncated:
|
|
68
|
+
serialized.slice(0, headSize) +
|
|
69
|
+
indicator +
|
|
70
|
+
serialized.slice(serialized.length - tailSize),
|
|
71
|
+
_originalChars: serialized.length,
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Truncates tool result content that exceeds `maxChars` using a head+tail
|
|
77
|
+
* strategy. Keeps the beginning (structure/headers) and end (return value /
|
|
78
|
+
* conclusion) of the content so the model retains both the opening context
|
|
79
|
+
* and the final outcome.
|
|
80
|
+
*
|
|
81
|
+
* Head gets ~70% of the budget, tail gets ~30%. Falls back to head-only
|
|
82
|
+
* when the budget is too small for a meaningful tail.
|
|
83
|
+
*
|
|
84
|
+
* @param content - The tool result string content.
|
|
85
|
+
* @param maxChars - Maximum allowed characters.
|
|
86
|
+
* @returns The (possibly truncated) content string.
|
|
87
|
+
*/
|
|
88
|
+
export function truncateToolResultContent(
|
|
89
|
+
content: string,
|
|
90
|
+
maxChars: number
|
|
91
|
+
): string {
|
|
92
|
+
if (content.length <= maxChars) {
|
|
93
|
+
return content;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
const indicator = `\n\n… [truncated: ${content.length} chars exceeded ${maxChars} limit] …\n\n`;
|
|
97
|
+
const available = maxChars - indicator.length;
|
|
98
|
+
if (available <= 0) {
|
|
99
|
+
return content.slice(0, maxChars);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// When budget is too small for a meaningful tail, fall back to head-only
|
|
103
|
+
if (available < 200) {
|
|
104
|
+
return content.slice(0, available) + indicator.trimEnd();
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const headSize = Math.ceil(available * 0.7);
|
|
108
|
+
const tailSize = available - headSize;
|
|
109
|
+
|
|
110
|
+
// Try to break at newline boundaries for cleaner output
|
|
111
|
+
let headEnd = headSize;
|
|
112
|
+
const headNewline = content.lastIndexOf('\n', headSize);
|
|
113
|
+
if (headNewline > headSize - 200 && headNewline > 0) {
|
|
114
|
+
headEnd = headNewline;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
let tailStart = content.length - tailSize;
|
|
118
|
+
const tailNewline = content.indexOf('\n', tailStart);
|
|
119
|
+
if (tailNewline > 0 && tailNewline < tailStart + 200) {
|
|
120
|
+
tailStart = tailNewline + 1;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
return content.slice(0, headEnd) + indicator + content.slice(tailStart);
|
|
124
|
+
}
|
package/src/scripts/image.ts
DELETED
|
@@ -1,180 +0,0 @@
|
|
|
1
|
-
// src/scripts/image.ts
|
|
2
|
-
import { config } from 'dotenv';
|
|
3
|
-
config();
|
|
4
|
-
import { HumanMessage, AIMessage, BaseMessage } from '@langchain/core/messages';
|
|
5
|
-
import type { RunnableConfig } from '@langchain/core/runnables';
|
|
6
|
-
import type * as t from '@/types';
|
|
7
|
-
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
8
|
-
import {
|
|
9
|
-
ToolEndHandler,
|
|
10
|
-
ModelEndHandler,
|
|
11
|
-
createMetadataAggregator,
|
|
12
|
-
} from '@/events';
|
|
13
|
-
// @ts-expect-error — example module not in current codebase
|
|
14
|
-
import { fetchRandomImageTool, fetchRandomImageURL } from '@/tools/example';
|
|
15
|
-
import { getLLMConfig } from '@/utils/llmConfig';
|
|
16
|
-
import { getArgs } from '@/scripts/args';
|
|
17
|
-
import { GraphEvents } from '@/common';
|
|
18
|
-
import { Run } from '@/run';
|
|
19
|
-
|
|
20
|
-
const conversationHistory: BaseMessage[] = [];
|
|
21
|
-
|
|
22
|
-
async function testCodeExecution(): Promise<void> {
|
|
23
|
-
const { userName, location, provider, currentDate } = await getArgs();
|
|
24
|
-
const { contentParts, aggregateContent } = createContentAggregator();
|
|
25
|
-
const customHandlers = {
|
|
26
|
-
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
27
|
-
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
|
|
28
|
-
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
29
|
-
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
30
|
-
handle: (
|
|
31
|
-
event: GraphEvents.ON_RUN_STEP_COMPLETED,
|
|
32
|
-
data: t.StreamEventData
|
|
33
|
-
): void => {
|
|
34
|
-
console.log('====== ON_RUN_STEP_COMPLETED ======');
|
|
35
|
-
console.dir(data, { depth: null });
|
|
36
|
-
aggregateContent({
|
|
37
|
-
event,
|
|
38
|
-
data: data as unknown as { result: t.ToolEndEvent },
|
|
39
|
-
});
|
|
40
|
-
},
|
|
41
|
-
},
|
|
42
|
-
[GraphEvents.ON_RUN_STEP]: {
|
|
43
|
-
handle: (
|
|
44
|
-
event: GraphEvents.ON_RUN_STEP,
|
|
45
|
-
data: t.StreamEventData
|
|
46
|
-
): void => {
|
|
47
|
-
console.log('====== ON_RUN_STEP ======');
|
|
48
|
-
console.dir(data, { depth: null });
|
|
49
|
-
aggregateContent({ event, data: data as t.RunStep });
|
|
50
|
-
},
|
|
51
|
-
},
|
|
52
|
-
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
53
|
-
handle: (
|
|
54
|
-
event: GraphEvents.ON_RUN_STEP_DELTA,
|
|
55
|
-
data: t.StreamEventData
|
|
56
|
-
): void => {
|
|
57
|
-
console.log('====== ON_RUN_STEP_DELTA ======');
|
|
58
|
-
console.dir(data, { depth: null });
|
|
59
|
-
aggregateContent({ event, data: data as t.RunStepDeltaEvent });
|
|
60
|
-
},
|
|
61
|
-
},
|
|
62
|
-
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
63
|
-
handle: (
|
|
64
|
-
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
65
|
-
data: t.StreamEventData
|
|
66
|
-
): void => {
|
|
67
|
-
console.log('====== ON_MESSAGE_DELTA ======');
|
|
68
|
-
console.dir(data, { depth: null });
|
|
69
|
-
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
70
|
-
},
|
|
71
|
-
},
|
|
72
|
-
[GraphEvents.TOOL_START]: {
|
|
73
|
-
handle: (
|
|
74
|
-
_event: string,
|
|
75
|
-
data: t.StreamEventData,
|
|
76
|
-
metadata?: Record<string, unknown>
|
|
77
|
-
): void => {
|
|
78
|
-
console.log('====== TOOL_START ======');
|
|
79
|
-
console.dir(data, { depth: null });
|
|
80
|
-
},
|
|
81
|
-
},
|
|
82
|
-
};
|
|
83
|
-
|
|
84
|
-
const llmConfig = getLLMConfig(provider);
|
|
85
|
-
|
|
86
|
-
const run = await Run.create<t.IState>({
|
|
87
|
-
runId: 'message-num-1',
|
|
88
|
-
graphConfig: {
|
|
89
|
-
type: 'standard',
|
|
90
|
-
llmConfig,
|
|
91
|
-
tools: [fetchRandomImageTool],
|
|
92
|
-
// tools: [fetchRandomImageURL],
|
|
93
|
-
instructions:
|
|
94
|
-
'You are a friendly AI assistant with internet capabilities. Always address the user by their name.',
|
|
95
|
-
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
96
|
-
},
|
|
97
|
-
returnContent: true,
|
|
98
|
-
skipCleanup: true,
|
|
99
|
-
customHandlers,
|
|
100
|
-
});
|
|
101
|
-
|
|
102
|
-
const config: Partial<RunnableConfig> & {
|
|
103
|
-
version: 'v1' | 'v2';
|
|
104
|
-
run_id?: string;
|
|
105
|
-
streamMode: string;
|
|
106
|
-
} = {
|
|
107
|
-
configurable: {
|
|
108
|
-
provider,
|
|
109
|
-
thread_id: 'conversation-num-1',
|
|
110
|
-
},
|
|
111
|
-
streamMode: 'values',
|
|
112
|
-
version: 'v2' as const,
|
|
113
|
-
};
|
|
114
|
-
|
|
115
|
-
console.log('Fetch Random Image');
|
|
116
|
-
|
|
117
|
-
const userMessage1 = `Hi ${userName} here. Please get me 2 random images. Describe them after you receive them.`;
|
|
118
|
-
|
|
119
|
-
conversationHistory.push(new HumanMessage(userMessage1));
|
|
120
|
-
|
|
121
|
-
let inputs = {
|
|
122
|
-
messages: conversationHistory,
|
|
123
|
-
};
|
|
124
|
-
const finalContentParts1 = await run.processStream(inputs, config);
|
|
125
|
-
const finalMessages1 = run.getRunMessages();
|
|
126
|
-
if (finalMessages1) {
|
|
127
|
-
conversationHistory.push(...finalMessages1);
|
|
128
|
-
}
|
|
129
|
-
console.log('\n\n====================\n\n');
|
|
130
|
-
console.dir(contentParts, { depth: null });
|
|
131
|
-
|
|
132
|
-
console.log('Test 2: Follow up with another message');
|
|
133
|
-
|
|
134
|
-
const userMessage2 = `thanks, you're the best!`;
|
|
135
|
-
|
|
136
|
-
conversationHistory.push(new HumanMessage(userMessage2));
|
|
137
|
-
|
|
138
|
-
inputs = {
|
|
139
|
-
messages: conversationHistory,
|
|
140
|
-
};
|
|
141
|
-
const finalContentParts2 = await run.processStream(inputs, config, {
|
|
142
|
-
keepContent: true,
|
|
143
|
-
});
|
|
144
|
-
const finalMessages2 = run.getRunMessages();
|
|
145
|
-
if (finalMessages2) {
|
|
146
|
-
conversationHistory.push(...finalMessages2);
|
|
147
|
-
}
|
|
148
|
-
console.log('\n\n====================\n\n');
|
|
149
|
-
console.dir(contentParts, { depth: null });
|
|
150
|
-
|
|
151
|
-
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
152
|
-
const titleResult = await run.generateTitle({
|
|
153
|
-
provider,
|
|
154
|
-
inputText: userMessage2,
|
|
155
|
-
contentParts,
|
|
156
|
-
chainOptions: {
|
|
157
|
-
callbacks: [
|
|
158
|
-
{
|
|
159
|
-
handleLLMEnd,
|
|
160
|
-
},
|
|
161
|
-
],
|
|
162
|
-
},
|
|
163
|
-
});
|
|
164
|
-
console.log('Generated Title:', titleResult);
|
|
165
|
-
console.log('Collected metadata:', collected);
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
process.on('unhandledRejection', (reason, promise) => {
|
|
169
|
-
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
|
170
|
-
console.log('Conversation history:');
|
|
171
|
-
console.dir(conversationHistory, { depth: null });
|
|
172
|
-
process.exit(1);
|
|
173
|
-
});
|
|
174
|
-
|
|
175
|
-
testCodeExecution().catch((err) => {
|
|
176
|
-
console.error(err);
|
|
177
|
-
console.log('Conversation history:');
|
|
178
|
-
console.dir(conversationHistory, { depth: null });
|
|
179
|
-
process.exit(1);
|
|
180
|
-
});
|