illuma-agents 1.0.8 → 1.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -5
- package/dist/cjs/common/enum.cjs +1 -2
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +11 -0
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +2 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/instrumentation.cjs +3 -1
- package/dist/cjs/instrumentation.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +79 -2
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/index.cjs +99 -0
- package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
- package/dist/cjs/llm/fake.cjs.map +1 -1
- package/dist/cjs/llm/openai/index.cjs +102 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/openai/utils/index.cjs +87 -1
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
- package/dist/cjs/llm/openrouter/index.cjs +175 -1
- package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
- package/dist/cjs/llm/providers.cjs +13 -16
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/llm/text.cjs.map +1 -1
- package/dist/cjs/messages/core.cjs +14 -14
- package/dist/cjs/messages/core.cjs.map +1 -1
- package/dist/cjs/messages/ids.cjs.map +1 -1
- package/dist/cjs/messages/prune.cjs.map +1 -1
- package/dist/cjs/run.cjs +18 -1
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/splitStream.cjs.map +1 -1
- package/dist/cjs/stream.cjs +24 -1
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +20 -1
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +29 -25
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/cjs/tools/search/anthropic.cjs.map +1 -1
- package/dist/cjs/tools/search/content.cjs.map +1 -1
- package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
- package/dist/cjs/tools/search/format.cjs.map +1 -1
- package/dist/cjs/tools/search/highlights.cjs.map +1 -1
- package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
- package/dist/cjs/tools/search/schema.cjs +27 -25
- package/dist/cjs/tools/search/schema.cjs.map +1 -1
- package/dist/cjs/tools/search/search.cjs +6 -1
- package/dist/cjs/tools/search/search.cjs.map +1 -1
- package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -1
- package/dist/cjs/tools/search/tool.cjs +182 -35
- package/dist/cjs/tools/search/tool.cjs.map +1 -1
- package/dist/cjs/tools/search/utils.cjs.map +1 -1
- package/dist/cjs/utils/graph.cjs.map +1 -1
- package/dist/cjs/utils/llm.cjs +0 -1
- package/dist/cjs/utils/llm.cjs.map +1 -1
- package/dist/cjs/utils/misc.cjs.map +1 -1
- package/dist/cjs/utils/run.cjs.map +1 -1
- package/dist/cjs/utils/title.cjs +7 -7
- package/dist/cjs/utils/title.cjs.map +1 -1
- package/dist/esm/common/enum.mjs +1 -2
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +11 -0
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +2 -1
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/instrumentation.mjs +3 -1
- package/dist/esm/instrumentation.mjs.map +1 -1
- package/dist/esm/llm/anthropic/types.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +79 -2
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -1
- package/dist/esm/llm/bedrock/index.mjs +97 -0
- package/dist/esm/llm/bedrock/index.mjs.map +1 -0
- package/dist/esm/llm/fake.mjs.map +1 -1
- package/dist/esm/llm/openai/index.mjs +103 -1
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/openai/utils/index.mjs +88 -2
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
- package/dist/esm/llm/openrouter/index.mjs +175 -1
- package/dist/esm/llm/openrouter/index.mjs.map +1 -1
- package/dist/esm/llm/providers.mjs +2 -5
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/llm/text.mjs.map +1 -1
- package/dist/esm/messages/core.mjs +14 -14
- package/dist/esm/messages/core.mjs.map +1 -1
- package/dist/esm/messages/ids.mjs.map +1 -1
- package/dist/esm/messages/prune.mjs.map +1 -1
- package/dist/esm/run.mjs +18 -1
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/splitStream.mjs.map +1 -1
- package/dist/esm/stream.mjs +24 -1
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +20 -1
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +30 -26
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/esm/tools/search/anthropic.mjs.map +1 -1
- package/dist/esm/tools/search/content.mjs.map +1 -1
- package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
- package/dist/esm/tools/search/format.mjs.map +1 -1
- package/dist/esm/tools/search/highlights.mjs.map +1 -1
- package/dist/esm/tools/search/rerankers.mjs.map +1 -1
- package/dist/esm/tools/search/schema.mjs +27 -25
- package/dist/esm/tools/search/schema.mjs.map +1 -1
- package/dist/esm/tools/search/search.mjs +6 -1
- package/dist/esm/tools/search/search.mjs.map +1 -1
- package/dist/esm/tools/search/serper-scraper.mjs.map +1 -1
- package/dist/esm/tools/search/tool.mjs +182 -35
- package/dist/esm/tools/search/tool.mjs.map +1 -1
- package/dist/esm/tools/search/utils.mjs.map +1 -1
- package/dist/esm/utils/graph.mjs.map +1 -1
- package/dist/esm/utils/llm.mjs +0 -1
- package/dist/esm/utils/llm.mjs.map +1 -1
- package/dist/esm/utils/misc.mjs.map +1 -1
- package/dist/esm/utils/run.mjs.map +1 -1
- package/dist/esm/utils/title.mjs +7 -7
- package/dist/esm/utils/title.mjs.map +1 -1
- package/dist/types/common/enum.d.ts +1 -2
- package/dist/types/llm/bedrock/index.d.ts +36 -0
- package/dist/types/llm/openai/index.d.ts +1 -0
- package/dist/types/llm/openai/utils/index.d.ts +10 -1
- package/dist/types/llm/openrouter/index.d.ts +4 -1
- package/dist/types/tools/search/types.d.ts +2 -0
- package/dist/types/types/llm.d.ts +3 -8
- package/package.json +16 -12
- package/src/common/enum.ts +1 -2
- package/src/common/index.ts +1 -1
- package/src/events.ts +11 -0
- package/src/graphs/Graph.ts +2 -1
- package/src/instrumentation.ts +25 -22
- package/src/llm/anthropic/llm.spec.ts +1442 -1442
- package/src/llm/anthropic/types.ts +140 -140
- package/src/llm/anthropic/utils/message_inputs.ts +757 -660
- package/src/llm/anthropic/utils/output_parsers.ts +133 -133
- package/src/llm/anthropic/utils/tools.ts +29 -29
- package/src/llm/bedrock/index.ts +128 -0
- package/src/llm/fake.ts +133 -133
- package/src/llm/google/llm.spec.ts +3 -1
- package/src/llm/google/utils/tools.ts +160 -160
- package/src/llm/openai/index.ts +126 -0
- package/src/llm/openai/types.ts +24 -24
- package/src/llm/openai/utils/index.ts +116 -1
- package/src/llm/openai/utils/isReasoningModel.test.ts +90 -90
- package/src/llm/openrouter/index.ts +222 -1
- package/src/llm/providers.ts +2 -7
- package/src/llm/text.ts +94 -94
- package/src/messages/core.ts +463 -463
- package/src/messages/formatAgentMessages.tools.test.ts +400 -400
- package/src/messages/formatMessage.test.ts +693 -693
- package/src/messages/ids.ts +26 -26
- package/src/messages/prune.ts +567 -567
- package/src/messages/shiftIndexTokenCountMap.test.ts +81 -81
- package/src/mockStream.ts +98 -98
- package/src/prompts/collab.ts +5 -5
- package/src/prompts/index.ts +1 -1
- package/src/prompts/taskmanager.ts +61 -61
- package/src/run.ts +22 -4
- package/src/scripts/ant_web_search_edge_case.ts +162 -0
- package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
- package/src/scripts/args.ts +48 -48
- package/src/scripts/caching.ts +123 -123
- package/src/scripts/code_exec_files.ts +193 -193
- package/src/scripts/empty_input.ts +137 -137
- package/src/scripts/memory.ts +97 -97
- package/src/scripts/test-tools-before-handoff.ts +1 -5
- package/src/scripts/thinking.ts +149 -149
- package/src/scripts/tools.ts +1 -4
- package/src/specs/anthropic.simple.test.ts +67 -0
- package/src/specs/spec.utils.ts +3 -3
- package/src/specs/token-distribution-edge-case.test.ts +316 -316
- package/src/specs/tool-error.test.ts +193 -193
- package/src/splitStream.test.ts +691 -691
- package/src/splitStream.ts +234 -234
- package/src/stream.test.ts +94 -94
- package/src/stream.ts +30 -1
- package/src/tools/ToolNode.ts +24 -1
- package/src/tools/handlers.ts +32 -28
- package/src/tools/search/anthropic.ts +51 -51
- package/src/tools/search/content.test.ts +173 -173
- package/src/tools/search/content.ts +147 -147
- package/src/tools/search/direct-url.test.ts +530 -0
- package/src/tools/search/firecrawl.ts +210 -210
- package/src/tools/search/format.ts +250 -250
- package/src/tools/search/highlights.ts +320 -320
- package/src/tools/search/index.ts +2 -2
- package/src/tools/search/jina-reranker.test.ts +126 -126
- package/src/tools/search/output.md +2775 -2775
- package/src/tools/search/rerankers.ts +242 -242
- package/src/tools/search/schema.ts +65 -63
- package/src/tools/search/search.ts +766 -759
- package/src/tools/search/serper-scraper.ts +155 -155
- package/src/tools/search/test.html +883 -883
- package/src/tools/search/test.md +642 -642
- package/src/tools/search/test.ts +159 -159
- package/src/tools/search/tool.ts +641 -471
- package/src/tools/search/types.ts +689 -687
- package/src/tools/search/utils.ts +79 -79
- package/src/types/index.ts +6 -6
- package/src/types/llm.ts +2 -8
- package/src/utils/graph.ts +10 -10
- package/src/utils/llm.ts +26 -27
- package/src/utils/llmConfig.ts +13 -5
- package/src/utils/logging.ts +48 -48
- package/src/utils/misc.ts +57 -57
- package/src/utils/run.ts +100 -100
- package/src/utils/title.ts +165 -165
- package/dist/cjs/llm/ollama/index.cjs +0 -70
- package/dist/cjs/llm/ollama/index.cjs.map +0 -1
- package/dist/cjs/llm/ollama/utils.cjs +0 -158
- package/dist/cjs/llm/ollama/utils.cjs.map +0 -1
- package/dist/esm/llm/ollama/index.mjs +0 -68
- package/dist/esm/llm/ollama/index.mjs.map +0 -1
- package/dist/esm/llm/ollama/utils.mjs +0 -155
- package/dist/esm/llm/ollama/utils.mjs.map +0 -1
- package/dist/types/llm/ollama/index.d.ts +0 -8
- package/dist/types/llm/ollama/utils.d.ts +0 -7
- package/src/llm/ollama/index.ts +0 -92
- package/src/llm/ollama/utils.ts +0 -193
- package/src/proto/CollabGraph.ts +0 -269
- package/src/proto/TaskManager.ts +0 -243
- package/src/proto/collab.ts +0 -200
- package/src/proto/collab_design.ts +0 -184
- package/src/proto/collab_design_v2.ts +0 -224
- package/src/proto/collab_design_v3.ts +0 -255
- package/src/proto/collab_design_v4.ts +0 -220
- package/src/proto/collab_design_v5.ts +0 -251
- package/src/proto/collab_graph.ts +0 -181
- package/src/proto/collab_original.ts +0 -123
- package/src/proto/example.ts +0 -93
- package/src/proto/example_new.ts +0 -68
- package/src/proto/example_old.ts +0 -201
- package/src/proto/example_test.ts +0 -152
- package/src/proto/example_test_anthropic.ts +0 -100
- package/src/proto/log_stream.ts +0 -202
- package/src/proto/main_collab_community_event.ts +0 -133
- package/src/proto/main_collab_design_v2.ts +0 -96
- package/src/proto/main_collab_design_v4.ts +0 -100
- package/src/proto/main_collab_design_v5.ts +0 -135
- package/src/proto/main_collab_global_analysis.ts +0 -122
- package/src/proto/main_collab_hackathon_event.ts +0 -153
- package/src/proto/main_collab_space_mission.ts +0 -153
- package/src/proto/main_philosophy.ts +0 -210
- package/src/proto/original_script.ts +0 -126
- package/src/proto/standard.ts +0 -100
- package/src/proto/stream.ts +0 -56
- package/src/proto/tasks.ts +0 -118
- package/src/proto/tools/global_analysis_tools.ts +0 -86
- package/src/proto/tools/space_mission_tools.ts +0 -60
- package/src/proto/vertexai.ts +0 -54
- package/src/scripts/image.ts +0 -178
|
@@ -1,316 +1,316 @@
|
|
|
1
|
-
// src/specs/token-distribution-edge-case.test.ts
|
|
2
|
-
import {
|
|
3
|
-
HumanMessage,
|
|
4
|
-
AIMessage,
|
|
5
|
-
SystemMessage,
|
|
6
|
-
BaseMessage,
|
|
7
|
-
} from '@langchain/core/messages';
|
|
8
|
-
import type { UsageMetadata } from '@langchain/core/messages';
|
|
9
|
-
import type * as t from '@/types';
|
|
10
|
-
import { createPruneMessages } from '@/messages/prune';
|
|
11
|
-
|
|
12
|
-
// Create a simple token counter for testing
|
|
13
|
-
const createTestTokenCounter = (): t.TokenCounter => {
|
|
14
|
-
// This simple token counter just counts characters as tokens for predictable testing
|
|
15
|
-
return (message: BaseMessage): number => {
|
|
16
|
-
// Use type assertion to help TypeScript understand the type
|
|
17
|
-
const content = message.content as
|
|
18
|
-
| string
|
|
19
|
-
| Array<t.MessageContentComplex | string>
|
|
20
|
-
| undefined;
|
|
21
|
-
|
|
22
|
-
// Handle string content
|
|
23
|
-
if (typeof content === 'string') {
|
|
24
|
-
return content.length;
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
// Handle array content
|
|
28
|
-
if (Array.isArray(content)) {
|
|
29
|
-
let totalLength = 0;
|
|
30
|
-
|
|
31
|
-
for (const item of content) {
|
|
32
|
-
if (typeof item === 'string') {
|
|
33
|
-
totalLength += item.length;
|
|
34
|
-
} else if (typeof item === 'object') {
|
|
35
|
-
if ('text' in item && typeof item.text === 'string') {
|
|
36
|
-
totalLength += item.text.length;
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
return totalLength;
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
// Default case - if content is null, undefined, or any other type
|
|
45
|
-
return 0;
|
|
46
|
-
};
|
|
47
|
-
};
|
|
48
|
-
|
|
49
|
-
describe('Token Distribution Edge Case Tests', () => {
|
|
50
|
-
it('should only distribute tokens to messages that remain in the context after pruning', () => {
|
|
51
|
-
// Create a token counter
|
|
52
|
-
const tokenCounter = createTestTokenCounter();
|
|
53
|
-
|
|
54
|
-
// Create messages
|
|
55
|
-
const messages = [
|
|
56
|
-
new SystemMessage('System instruction'), // Will always be included
|
|
57
|
-
new HumanMessage('Message 1'), // Will be pruned
|
|
58
|
-
new AIMessage('Response 1'), // Will be pruned
|
|
59
|
-
new HumanMessage('Message 2'), // Will remain
|
|
60
|
-
new AIMessage('Response 2'), // Will remain
|
|
61
|
-
];
|
|
62
|
-
|
|
63
|
-
// Calculate initial token counts for each message
|
|
64
|
-
const indexTokenCountMap: Record<string, number> = {
|
|
65
|
-
0: 17, // "System instruction"
|
|
66
|
-
1: 9, // "Message 1"
|
|
67
|
-
2: 10, // "Response 1"
|
|
68
|
-
3: 9, // "Message 2"
|
|
69
|
-
4: 10, // "Response 2"
|
|
70
|
-
};
|
|
71
|
-
|
|
72
|
-
// Set a token limit that will force pruning of the first two messages after the system message
|
|
73
|
-
const pruneMessages = createPruneMessages({
|
|
74
|
-
maxTokens: 40, // Only enough for system message + last two messages
|
|
75
|
-
startIndex: 0,
|
|
76
|
-
tokenCounter,
|
|
77
|
-
indexTokenCountMap: { ...indexTokenCountMap },
|
|
78
|
-
});
|
|
79
|
-
|
|
80
|
-
// First call to establish lastCutOffIndex
|
|
81
|
-
const initialResult = pruneMessages({ messages });
|
|
82
|
-
|
|
83
|
-
// Verify initial pruning
|
|
84
|
-
expect(initialResult.context.length).toBe(3);
|
|
85
|
-
expect(initialResult.context[0].content).toBe('System instruction');
|
|
86
|
-
expect(initialResult.context[1].content).toBe('Message 2');
|
|
87
|
-
expect(initialResult.context[2].content).toBe('Response 2');
|
|
88
|
-
|
|
89
|
-
// Now provide usage metadata with a different total token count
|
|
90
|
-
const usageMetadata: Partial<UsageMetadata> = {
|
|
91
|
-
input_tokens: 30,
|
|
92
|
-
output_tokens: 20,
|
|
93
|
-
total_tokens: 50, // Different from the sum of our initial token counts
|
|
94
|
-
};
|
|
95
|
-
|
|
96
|
-
// Call pruneMessages again with the usage metadata
|
|
97
|
-
const result = pruneMessages({
|
|
98
|
-
messages,
|
|
99
|
-
usageMetadata,
|
|
100
|
-
});
|
|
101
|
-
|
|
102
|
-
// The token distribution should only affect messages that remain in the context
|
|
103
|
-
// Messages at indices 0, 3, and 4 should have their token counts adjusted
|
|
104
|
-
// Messages at indices 1 and 2 should remain unchanged since they're pruned
|
|
105
|
-
|
|
106
|
-
// The token distribution should only affect messages that remain in the context
|
|
107
|
-
// Messages at indices 0, 3, and 4 should have their token counts adjusted
|
|
108
|
-
// Messages at indices 1 and 2 should remain unchanged since they're pruned
|
|
109
|
-
|
|
110
|
-
// Check that at least one of the pruned messages' token counts was not adjusted
|
|
111
|
-
// We're testing the principle that pruned messages don't get token redistribution
|
|
112
|
-
const atLeastOnePrunedMessageUnchanged =
|
|
113
|
-
result.indexTokenCountMap[1] === indexTokenCountMap[1] ||
|
|
114
|
-
result.indexTokenCountMap[2] === indexTokenCountMap[2];
|
|
115
|
-
|
|
116
|
-
expect(atLeastOnePrunedMessageUnchanged).toBe(true);
|
|
117
|
-
|
|
118
|
-
// Verify that the sum of tokens for messages in the context is close to the total_tokens from usageMetadata
|
|
119
|
-
// There might be small rounding differences or implementation details that affect the exact sum
|
|
120
|
-
const totalContextTokens =
|
|
121
|
-
(result.indexTokenCountMap[0] ?? 0) +
|
|
122
|
-
(result.indexTokenCountMap[3] ?? 0) +
|
|
123
|
-
(result.indexTokenCountMap[4] ?? 0);
|
|
124
|
-
expect(totalContextTokens).toBeGreaterThan(0);
|
|
125
|
-
|
|
126
|
-
// The key thing we're testing is that the token distribution happens for messages in the context
|
|
127
|
-
// and that the sum is reasonably close to the expected total
|
|
128
|
-
const tokenDifference = Math.abs(totalContextTokens - 50);
|
|
129
|
-
expect(tokenDifference).toBeLessThan(20); // Allow for some difference due to implementation details
|
|
130
|
-
});
|
|
131
|
-
|
|
132
|
-
it('should handle the case when all messages fit within the token limit', () => {
|
|
133
|
-
// Create a token counter
|
|
134
|
-
const tokenCounter = createTestTokenCounter();
|
|
135
|
-
|
|
136
|
-
// Create messages
|
|
137
|
-
const messages = [
|
|
138
|
-
new SystemMessage('System instruction'),
|
|
139
|
-
new HumanMessage('Message 1'),
|
|
140
|
-
new AIMessage('Response 1'),
|
|
141
|
-
];
|
|
142
|
-
|
|
143
|
-
// Calculate initial token counts for each message
|
|
144
|
-
const indexTokenCountMap: Record<string, number> = {
|
|
145
|
-
0: 17, // "System instruction"
|
|
146
|
-
1: 9, // "Message 1"
|
|
147
|
-
2: 10, // "Response 1"
|
|
148
|
-
};
|
|
149
|
-
|
|
150
|
-
// Set a token limit that will allow all messages to fit
|
|
151
|
-
const pruneMessages = createPruneMessages({
|
|
152
|
-
maxTokens: 100,
|
|
153
|
-
startIndex: 0,
|
|
154
|
-
tokenCounter,
|
|
155
|
-
indexTokenCountMap: { ...indexTokenCountMap },
|
|
156
|
-
});
|
|
157
|
-
|
|
158
|
-
// First call to establish lastCutOffIndex (should be 0 since no pruning occurs)
|
|
159
|
-
const initialResult = pruneMessages({ messages });
|
|
160
|
-
|
|
161
|
-
// Verify no pruning occurred
|
|
162
|
-
expect(initialResult.context.length).toBe(3);
|
|
163
|
-
|
|
164
|
-
// Now provide usage metadata with a different total token count
|
|
165
|
-
const usageMetadata: Partial<UsageMetadata> = {
|
|
166
|
-
input_tokens: 20,
|
|
167
|
-
output_tokens: 10,
|
|
168
|
-
total_tokens: 30, // Different from the sum of our initial token counts
|
|
169
|
-
};
|
|
170
|
-
|
|
171
|
-
// Call pruneMessages again with the usage metadata
|
|
172
|
-
const result = pruneMessages({
|
|
173
|
-
messages,
|
|
174
|
-
usageMetadata,
|
|
175
|
-
});
|
|
176
|
-
|
|
177
|
-
// Since all messages fit, all token counts should be adjusted
|
|
178
|
-
const initialTotalTokens =
|
|
179
|
-
indexTokenCountMap[0] + indexTokenCountMap[1] + indexTokenCountMap[2];
|
|
180
|
-
const expectedRatio = 30 / initialTotalTokens;
|
|
181
|
-
|
|
182
|
-
// Check that all token counts were adjusted
|
|
183
|
-
expect(result.indexTokenCountMap[0]).toBe(
|
|
184
|
-
Math.round(indexTokenCountMap[0] * expectedRatio)
|
|
185
|
-
);
|
|
186
|
-
expect(result.indexTokenCountMap[1]).toBe(
|
|
187
|
-
Math.round(indexTokenCountMap[1] * expectedRatio)
|
|
188
|
-
);
|
|
189
|
-
expect(result.indexTokenCountMap[2]).toBe(
|
|
190
|
-
Math.round(indexTokenCountMap[2] * expectedRatio)
|
|
191
|
-
);
|
|
192
|
-
|
|
193
|
-
// Verify that the sum of all tokens equals the total_tokens from usageMetadata
|
|
194
|
-
const totalTokens =
|
|
195
|
-
(result.indexTokenCountMap[0] ?? 0) +
|
|
196
|
-
(result.indexTokenCountMap[1] ?? 0) +
|
|
197
|
-
(result.indexTokenCountMap[2] ?? 0);
|
|
198
|
-
expect(totalTokens).toBe(30);
|
|
199
|
-
});
|
|
200
|
-
|
|
201
|
-
it('should handle multiple pruning operations with token redistribution', () => {
|
|
202
|
-
// Create a token counter
|
|
203
|
-
const tokenCounter = createTestTokenCounter();
|
|
204
|
-
|
|
205
|
-
// Create a longer sequence of messages
|
|
206
|
-
const messages = [
|
|
207
|
-
new SystemMessage('System instruction'), // Will always be included
|
|
208
|
-
new HumanMessage('Message 1'), // Will be pruned in first round
|
|
209
|
-
new AIMessage('Response 1'), // Will be pruned in first round
|
|
210
|
-
new HumanMessage('Message 2'), // Will be pruned in second round
|
|
211
|
-
new AIMessage('Response 2'), // Will be pruned in second round
|
|
212
|
-
new HumanMessage('Message 3'), // Will remain
|
|
213
|
-
new AIMessage('Response 3'), // Will remain
|
|
214
|
-
];
|
|
215
|
-
|
|
216
|
-
// Calculate initial token counts for each message
|
|
217
|
-
const indexTokenCountMap: Record<string, number> = {
|
|
218
|
-
0: 17, // "System instruction"
|
|
219
|
-
1: 9, // "Message 1"
|
|
220
|
-
2: 10, // "Response 1"
|
|
221
|
-
3: 9, // "Message 2"
|
|
222
|
-
4: 10, // "Response 2"
|
|
223
|
-
5: 9, // "Message 3"
|
|
224
|
-
6: 10, // "Response 3"
|
|
225
|
-
};
|
|
226
|
-
|
|
227
|
-
// Set a token limit that will force pruning
|
|
228
|
-
const pruneMessages = createPruneMessages({
|
|
229
|
-
maxTokens: 40, // Only enough for system message + last two messages
|
|
230
|
-
startIndex: 0,
|
|
231
|
-
tokenCounter,
|
|
232
|
-
indexTokenCountMap: { ...indexTokenCountMap },
|
|
233
|
-
});
|
|
234
|
-
|
|
235
|
-
// First pruning operation
|
|
236
|
-
const firstResult = pruneMessages({ messages });
|
|
237
|
-
|
|
238
|
-
// Verify first pruning
|
|
239
|
-
expect(firstResult.context.length).toBe(3);
|
|
240
|
-
expect(firstResult.context[0].content).toBe('System instruction');
|
|
241
|
-
expect(firstResult.context[1].content).toBe('Message 3');
|
|
242
|
-
expect(firstResult.context[2].content).toBe('Response 3');
|
|
243
|
-
|
|
244
|
-
// First usage metadata update
|
|
245
|
-
const firstUsageMetadata: Partial<UsageMetadata> = {
|
|
246
|
-
input_tokens: 30,
|
|
247
|
-
output_tokens: 20,
|
|
248
|
-
total_tokens: 50,
|
|
249
|
-
};
|
|
250
|
-
|
|
251
|
-
// Apply first usage metadata
|
|
252
|
-
const secondResult = pruneMessages({
|
|
253
|
-
messages,
|
|
254
|
-
usageMetadata: firstUsageMetadata,
|
|
255
|
-
});
|
|
256
|
-
|
|
257
|
-
// Add two more messages
|
|
258
|
-
messages.push(new HumanMessage('Message 4'));
|
|
259
|
-
const extendedMessages = [...messages, new AIMessage('Response 4')];
|
|
260
|
-
|
|
261
|
-
// Second usage metadata update
|
|
262
|
-
const secondUsageMetadata: Partial<UsageMetadata> = {
|
|
263
|
-
input_tokens: 30,
|
|
264
|
-
output_tokens: 20,
|
|
265
|
-
total_tokens: 50,
|
|
266
|
-
};
|
|
267
|
-
|
|
268
|
-
// Apply second usage metadata with extended messages
|
|
269
|
-
const thirdResult = pruneMessages({
|
|
270
|
-
messages: extendedMessages,
|
|
271
|
-
usageMetadata: secondUsageMetadata,
|
|
272
|
-
});
|
|
273
|
-
|
|
274
|
-
// The context should include the system message and some of the latest messages
|
|
275
|
-
expect(thirdResult.context.length).toBeGreaterThan(0);
|
|
276
|
-
expect(thirdResult.context[0].content).toBe('System instruction');
|
|
277
|
-
expect(thirdResult.context[1].content).toBe('Response 4');
|
|
278
|
-
|
|
279
|
-
// Find which messages are in the final context
|
|
280
|
-
const contextMessageIndices = thirdResult.context.map((msg) => {
|
|
281
|
-
// Find the index of this message in the original array
|
|
282
|
-
return extendedMessages.findIndex((m) => m.content === msg.content);
|
|
283
|
-
});
|
|
284
|
-
|
|
285
|
-
// Get the sum of token counts for messages in the context
|
|
286
|
-
let totalContextTokens = 0;
|
|
287
|
-
for (const idx of contextMessageIndices) {
|
|
288
|
-
totalContextTokens += thirdResult.indexTokenCountMap[idx] ?? 0;
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
// Verify that the sum of tokens for messages in the context is close to the total_tokens from usageMetadata
|
|
292
|
-
// There might be small rounding differences or implementation details that affect the exact sum
|
|
293
|
-
expect(totalContextTokens).toBeGreaterThan(0);
|
|
294
|
-
|
|
295
|
-
// The key thing we're testing is that the token distribution happens for messages in the context
|
|
296
|
-
// and that the sum is reasonably close to the expected total
|
|
297
|
-
const tokenDifference = Math.abs(totalContextTokens - 70);
|
|
298
|
-
expect(tokenDifference).toBeLessThan(50); // Allow for some difference due to implementation details
|
|
299
|
-
|
|
300
|
-
// Verify that messages not in the context have their original token counts or previously adjusted values
|
|
301
|
-
for (let i = 0; i < extendedMessages.length; i++) {
|
|
302
|
-
if (!contextMessageIndices.includes(i)) {
|
|
303
|
-
const expectedValue =
|
|
304
|
-
i < messages.length
|
|
305
|
-
? (secondResult.indexTokenCountMap[i] ?? 0) || indexTokenCountMap[i]
|
|
306
|
-
: ((indexTokenCountMap as Record<string, number | undefined>)[i] ??
|
|
307
|
-
0);
|
|
308
|
-
|
|
309
|
-
const difference = Math.abs(
|
|
310
|
-
(thirdResult.indexTokenCountMap[i] ?? 0) - expectedValue
|
|
311
|
-
);
|
|
312
|
-
expect(difference).toBe(0);
|
|
313
|
-
}
|
|
314
|
-
}
|
|
315
|
-
});
|
|
316
|
-
});
|
|
1
|
+
// src/specs/token-distribution-edge-case.test.ts
|
|
2
|
+
import {
|
|
3
|
+
HumanMessage,
|
|
4
|
+
AIMessage,
|
|
5
|
+
SystemMessage,
|
|
6
|
+
BaseMessage,
|
|
7
|
+
} from '@langchain/core/messages';
|
|
8
|
+
import type { UsageMetadata } from '@langchain/core/messages';
|
|
9
|
+
import type * as t from '@/types';
|
|
10
|
+
import { createPruneMessages } from '@/messages/prune';
|
|
11
|
+
|
|
12
|
+
// Create a simple token counter for testing
|
|
13
|
+
const createTestTokenCounter = (): t.TokenCounter => {
|
|
14
|
+
// This simple token counter just counts characters as tokens for predictable testing
|
|
15
|
+
return (message: BaseMessage): number => {
|
|
16
|
+
// Use type assertion to help TypeScript understand the type
|
|
17
|
+
const content = message.content as
|
|
18
|
+
| string
|
|
19
|
+
| Array<t.MessageContentComplex | string>
|
|
20
|
+
| undefined;
|
|
21
|
+
|
|
22
|
+
// Handle string content
|
|
23
|
+
if (typeof content === 'string') {
|
|
24
|
+
return content.length;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// Handle array content
|
|
28
|
+
if (Array.isArray(content)) {
|
|
29
|
+
let totalLength = 0;
|
|
30
|
+
|
|
31
|
+
for (const item of content) {
|
|
32
|
+
if (typeof item === 'string') {
|
|
33
|
+
totalLength += item.length;
|
|
34
|
+
} else if (typeof item === 'object') {
|
|
35
|
+
if ('text' in item && typeof item.text === 'string') {
|
|
36
|
+
totalLength += item.text.length;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return totalLength;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Default case - if content is null, undefined, or any other type
|
|
45
|
+
return 0;
|
|
46
|
+
};
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
describe('Token Distribution Edge Case Tests', () => {
|
|
50
|
+
it('should only distribute tokens to messages that remain in the context after pruning', () => {
|
|
51
|
+
// Create a token counter
|
|
52
|
+
const tokenCounter = createTestTokenCounter();
|
|
53
|
+
|
|
54
|
+
// Create messages
|
|
55
|
+
const messages = [
|
|
56
|
+
new SystemMessage('System instruction'), // Will always be included
|
|
57
|
+
new HumanMessage('Message 1'), // Will be pruned
|
|
58
|
+
new AIMessage('Response 1'), // Will be pruned
|
|
59
|
+
new HumanMessage('Message 2'), // Will remain
|
|
60
|
+
new AIMessage('Response 2'), // Will remain
|
|
61
|
+
];
|
|
62
|
+
|
|
63
|
+
// Calculate initial token counts for each message
|
|
64
|
+
const indexTokenCountMap: Record<string, number> = {
|
|
65
|
+
0: 17, // "System instruction"
|
|
66
|
+
1: 9, // "Message 1"
|
|
67
|
+
2: 10, // "Response 1"
|
|
68
|
+
3: 9, // "Message 2"
|
|
69
|
+
4: 10, // "Response 2"
|
|
70
|
+
};
|
|
71
|
+
|
|
72
|
+
// Set a token limit that will force pruning of the first two messages after the system message
|
|
73
|
+
const pruneMessages = createPruneMessages({
|
|
74
|
+
maxTokens: 40, // Only enough for system message + last two messages
|
|
75
|
+
startIndex: 0,
|
|
76
|
+
tokenCounter,
|
|
77
|
+
indexTokenCountMap: { ...indexTokenCountMap },
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
// First call to establish lastCutOffIndex
|
|
81
|
+
const initialResult = pruneMessages({ messages });
|
|
82
|
+
|
|
83
|
+
// Verify initial pruning
|
|
84
|
+
expect(initialResult.context.length).toBe(3);
|
|
85
|
+
expect(initialResult.context[0].content).toBe('System instruction');
|
|
86
|
+
expect(initialResult.context[1].content).toBe('Message 2');
|
|
87
|
+
expect(initialResult.context[2].content).toBe('Response 2');
|
|
88
|
+
|
|
89
|
+
// Now provide usage metadata with a different total token count
|
|
90
|
+
const usageMetadata: Partial<UsageMetadata> = {
|
|
91
|
+
input_tokens: 30,
|
|
92
|
+
output_tokens: 20,
|
|
93
|
+
total_tokens: 50, // Different from the sum of our initial token counts
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
// Call pruneMessages again with the usage metadata
|
|
97
|
+
const result = pruneMessages({
|
|
98
|
+
messages,
|
|
99
|
+
usageMetadata,
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
// The token distribution should only affect messages that remain in the context
|
|
103
|
+
// Messages at indices 0, 3, and 4 should have their token counts adjusted
|
|
104
|
+
// Messages at indices 1 and 2 should remain unchanged since they're pruned
|
|
105
|
+
|
|
106
|
+
// The token distribution should only affect messages that remain in the context
|
|
107
|
+
// Messages at indices 0, 3, and 4 should have their token counts adjusted
|
|
108
|
+
// Messages at indices 1 and 2 should remain unchanged since they're pruned
|
|
109
|
+
|
|
110
|
+
// Check that at least one of the pruned messages' token counts was not adjusted
|
|
111
|
+
// We're testing the principle that pruned messages don't get token redistribution
|
|
112
|
+
const atLeastOnePrunedMessageUnchanged =
|
|
113
|
+
result.indexTokenCountMap[1] === indexTokenCountMap[1] ||
|
|
114
|
+
result.indexTokenCountMap[2] === indexTokenCountMap[2];
|
|
115
|
+
|
|
116
|
+
expect(atLeastOnePrunedMessageUnchanged).toBe(true);
|
|
117
|
+
|
|
118
|
+
// Verify that the sum of tokens for messages in the context is close to the total_tokens from usageMetadata
|
|
119
|
+
// There might be small rounding differences or implementation details that affect the exact sum
|
|
120
|
+
const totalContextTokens =
|
|
121
|
+
(result.indexTokenCountMap[0] ?? 0) +
|
|
122
|
+
(result.indexTokenCountMap[3] ?? 0) +
|
|
123
|
+
(result.indexTokenCountMap[4] ?? 0);
|
|
124
|
+
expect(totalContextTokens).toBeGreaterThan(0);
|
|
125
|
+
|
|
126
|
+
// The key thing we're testing is that the token distribution happens for messages in the context
|
|
127
|
+
// and that the sum is reasonably close to the expected total
|
|
128
|
+
const tokenDifference = Math.abs(totalContextTokens - 50);
|
|
129
|
+
expect(tokenDifference).toBeLessThan(20); // Allow for some difference due to implementation details
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
it('should handle the case when all messages fit within the token limit', () => {
|
|
133
|
+
// Create a token counter
|
|
134
|
+
const tokenCounter = createTestTokenCounter();
|
|
135
|
+
|
|
136
|
+
// Create messages
|
|
137
|
+
const messages = [
|
|
138
|
+
new SystemMessage('System instruction'),
|
|
139
|
+
new HumanMessage('Message 1'),
|
|
140
|
+
new AIMessage('Response 1'),
|
|
141
|
+
];
|
|
142
|
+
|
|
143
|
+
// Calculate initial token counts for each message
|
|
144
|
+
const indexTokenCountMap: Record<string, number> = {
|
|
145
|
+
0: 17, // "System instruction"
|
|
146
|
+
1: 9, // "Message 1"
|
|
147
|
+
2: 10, // "Response 1"
|
|
148
|
+
};
|
|
149
|
+
|
|
150
|
+
// Set a token limit that will allow all messages to fit
|
|
151
|
+
const pruneMessages = createPruneMessages({
|
|
152
|
+
maxTokens: 100,
|
|
153
|
+
startIndex: 0,
|
|
154
|
+
tokenCounter,
|
|
155
|
+
indexTokenCountMap: { ...indexTokenCountMap },
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
// First call to establish lastCutOffIndex (should be 0 since no pruning occurs)
|
|
159
|
+
const initialResult = pruneMessages({ messages });
|
|
160
|
+
|
|
161
|
+
// Verify no pruning occurred
|
|
162
|
+
expect(initialResult.context.length).toBe(3);
|
|
163
|
+
|
|
164
|
+
// Now provide usage metadata with a different total token count
|
|
165
|
+
const usageMetadata: Partial<UsageMetadata> = {
|
|
166
|
+
input_tokens: 20,
|
|
167
|
+
output_tokens: 10,
|
|
168
|
+
total_tokens: 30, // Different from the sum of our initial token counts
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
// Call pruneMessages again with the usage metadata
|
|
172
|
+
const result = pruneMessages({
|
|
173
|
+
messages,
|
|
174
|
+
usageMetadata,
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
// Since all messages fit, all token counts should be adjusted
|
|
178
|
+
const initialTotalTokens =
|
|
179
|
+
indexTokenCountMap[0] + indexTokenCountMap[1] + indexTokenCountMap[2];
|
|
180
|
+
const expectedRatio = 30 / initialTotalTokens;
|
|
181
|
+
|
|
182
|
+
// Check that all token counts were adjusted
|
|
183
|
+
expect(result.indexTokenCountMap[0]).toBe(
|
|
184
|
+
Math.round(indexTokenCountMap[0] * expectedRatio)
|
|
185
|
+
);
|
|
186
|
+
expect(result.indexTokenCountMap[1]).toBe(
|
|
187
|
+
Math.round(indexTokenCountMap[1] * expectedRatio)
|
|
188
|
+
);
|
|
189
|
+
expect(result.indexTokenCountMap[2]).toBe(
|
|
190
|
+
Math.round(indexTokenCountMap[2] * expectedRatio)
|
|
191
|
+
);
|
|
192
|
+
|
|
193
|
+
// Verify that the sum of all tokens equals the total_tokens from usageMetadata
|
|
194
|
+
const totalTokens =
|
|
195
|
+
(result.indexTokenCountMap[0] ?? 0) +
|
|
196
|
+
(result.indexTokenCountMap[1] ?? 0) +
|
|
197
|
+
(result.indexTokenCountMap[2] ?? 0);
|
|
198
|
+
expect(totalTokens).toBe(30);
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
it('should handle multiple pruning operations with token redistribution', () => {
|
|
202
|
+
// Create a token counter
|
|
203
|
+
const tokenCounter = createTestTokenCounter();
|
|
204
|
+
|
|
205
|
+
// Create a longer sequence of messages
|
|
206
|
+
const messages = [
|
|
207
|
+
new SystemMessage('System instruction'), // Will always be included
|
|
208
|
+
new HumanMessage('Message 1'), // Will be pruned in first round
|
|
209
|
+
new AIMessage('Response 1'), // Will be pruned in first round
|
|
210
|
+
new HumanMessage('Message 2'), // Will be pruned in second round
|
|
211
|
+
new AIMessage('Response 2'), // Will be pruned in second round
|
|
212
|
+
new HumanMessage('Message 3'), // Will remain
|
|
213
|
+
new AIMessage('Response 3'), // Will remain
|
|
214
|
+
];
|
|
215
|
+
|
|
216
|
+
// Calculate initial token counts for each message
|
|
217
|
+
const indexTokenCountMap: Record<string, number> = {
|
|
218
|
+
0: 17, // "System instruction"
|
|
219
|
+
1: 9, // "Message 1"
|
|
220
|
+
2: 10, // "Response 1"
|
|
221
|
+
3: 9, // "Message 2"
|
|
222
|
+
4: 10, // "Response 2"
|
|
223
|
+
5: 9, // "Message 3"
|
|
224
|
+
6: 10, // "Response 3"
|
|
225
|
+
};
|
|
226
|
+
|
|
227
|
+
// Set a token limit that will force pruning
|
|
228
|
+
const pruneMessages = createPruneMessages({
|
|
229
|
+
maxTokens: 40, // Only enough for system message + last two messages
|
|
230
|
+
startIndex: 0,
|
|
231
|
+
tokenCounter,
|
|
232
|
+
indexTokenCountMap: { ...indexTokenCountMap },
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
// First pruning operation
|
|
236
|
+
const firstResult = pruneMessages({ messages });
|
|
237
|
+
|
|
238
|
+
// Verify first pruning
|
|
239
|
+
expect(firstResult.context.length).toBe(3);
|
|
240
|
+
expect(firstResult.context[0].content).toBe('System instruction');
|
|
241
|
+
expect(firstResult.context[1].content).toBe('Message 3');
|
|
242
|
+
expect(firstResult.context[2].content).toBe('Response 3');
|
|
243
|
+
|
|
244
|
+
// First usage metadata update
|
|
245
|
+
const firstUsageMetadata: Partial<UsageMetadata> = {
|
|
246
|
+
input_tokens: 30,
|
|
247
|
+
output_tokens: 20,
|
|
248
|
+
total_tokens: 50,
|
|
249
|
+
};
|
|
250
|
+
|
|
251
|
+
// Apply first usage metadata
|
|
252
|
+
const secondResult = pruneMessages({
|
|
253
|
+
messages,
|
|
254
|
+
usageMetadata: firstUsageMetadata,
|
|
255
|
+
});
|
|
256
|
+
|
|
257
|
+
// Add two more messages
|
|
258
|
+
messages.push(new HumanMessage('Message 4'));
|
|
259
|
+
const extendedMessages = [...messages, new AIMessage('Response 4')];
|
|
260
|
+
|
|
261
|
+
// Second usage metadata update
|
|
262
|
+
const secondUsageMetadata: Partial<UsageMetadata> = {
|
|
263
|
+
input_tokens: 30,
|
|
264
|
+
output_tokens: 20,
|
|
265
|
+
total_tokens: 50,
|
|
266
|
+
};
|
|
267
|
+
|
|
268
|
+
// Apply second usage metadata with extended messages
|
|
269
|
+
const thirdResult = pruneMessages({
|
|
270
|
+
messages: extendedMessages,
|
|
271
|
+
usageMetadata: secondUsageMetadata,
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
// The context should include the system message and some of the latest messages
|
|
275
|
+
expect(thirdResult.context.length).toBeGreaterThan(0);
|
|
276
|
+
expect(thirdResult.context[0].content).toBe('System instruction');
|
|
277
|
+
expect(thirdResult.context[1].content).toBe('Response 4');
|
|
278
|
+
|
|
279
|
+
// Find which messages are in the final context
|
|
280
|
+
const contextMessageIndices = thirdResult.context.map((msg) => {
|
|
281
|
+
// Find the index of this message in the original array
|
|
282
|
+
return extendedMessages.findIndex((m) => m.content === msg.content);
|
|
283
|
+
});
|
|
284
|
+
|
|
285
|
+
// Get the sum of token counts for messages in the context
|
|
286
|
+
let totalContextTokens = 0;
|
|
287
|
+
for (const idx of contextMessageIndices) {
|
|
288
|
+
totalContextTokens += thirdResult.indexTokenCountMap[idx] ?? 0;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Verify that the sum of tokens for messages in the context is close to the total_tokens from usageMetadata
|
|
292
|
+
// There might be small rounding differences or implementation details that affect the exact sum
|
|
293
|
+
expect(totalContextTokens).toBeGreaterThan(0);
|
|
294
|
+
|
|
295
|
+
// The key thing we're testing is that the token distribution happens for messages in the context
|
|
296
|
+
// and that the sum is reasonably close to the expected total
|
|
297
|
+
const tokenDifference = Math.abs(totalContextTokens - 70);
|
|
298
|
+
expect(tokenDifference).toBeLessThan(50); // Allow for some difference due to implementation details
|
|
299
|
+
|
|
300
|
+
// Verify that messages not in the context have their original token counts or previously adjusted values
|
|
301
|
+
for (let i = 0; i < extendedMessages.length; i++) {
|
|
302
|
+
if (!contextMessageIndices.includes(i)) {
|
|
303
|
+
const expectedValue =
|
|
304
|
+
i < messages.length
|
|
305
|
+
? (secondResult.indexTokenCountMap[i] ?? 0) || indexTokenCountMap[i]
|
|
306
|
+
: ((indexTokenCountMap as Record<string, number | undefined>)[i] ??
|
|
307
|
+
0);
|
|
308
|
+
|
|
309
|
+
const difference = Math.abs(
|
|
310
|
+
(thirdResult.indexTokenCountMap[i] ?? 0) - expectedValue
|
|
311
|
+
);
|
|
312
|
+
expect(difference).toBe(0);
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
});
|
|
316
|
+
});
|