graphlit-client 1.0.20250610005 → 1.0.20250610007
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.js +63 -1
- package/dist/streaming/chunk-buffer.d.ts +27 -12
- package/dist/streaming/chunk-buffer.js +130 -117
- package/dist/streaming/providers.js +51 -5
- package/package.json +1 -1
package/dist/client.js
CHANGED
@@ -1713,7 +1713,69 @@ class Graphlit {
|
|
1713
1713
|
continue;
|
1714
1714
|
}
|
1715
1715
|
try {
|
1716
|
-
|
1716
|
+
let args;
|
1717
|
+
try {
|
1718
|
+
args = JSON.parse(toolCall.arguments);
|
1719
|
+
}
|
1720
|
+
catch (parseError) {
|
1721
|
+
console.error(`Failed to parse tool arguments for ${toolCall.name}:`);
|
1722
|
+
console.error(`Arguments (${toolCall.arguments.length} chars):`, toolCall.arguments);
|
1723
|
+
console.error(`Parse error:`, parseError);
|
1724
|
+
// Check for common truncation patterns
|
1725
|
+
const lastChars = toolCall.arguments.slice(-20);
|
1726
|
+
let isTruncated = false;
|
1727
|
+
if (!toolCall.arguments.includes('}') || !lastChars.includes('}')) {
|
1728
|
+
console.error(`Possible truncation detected - arguments don't end with '}': ...${lastChars}`);
|
1729
|
+
isTruncated = true;
|
1730
|
+
}
|
1731
|
+
// Try to fix truncated JSON by adding missing closing braces
|
1732
|
+
if (isTruncated) {
|
1733
|
+
let fixedJson = toolCall.arguments;
|
1734
|
+
// Count open braces vs close braces to determine how many we need
|
1735
|
+
const openBraces = (fixedJson.match(/\{/g) || []).length;
|
1736
|
+
const closeBraces = (fixedJson.match(/\}/g) || []).length;
|
1737
|
+
const missingBraces = openBraces - closeBraces;
|
1738
|
+
if (missingBraces > 0) {
|
1739
|
+
// Add missing closing quote if the string ends with an unfinished string
|
1740
|
+
if (fixedJson.endsWith('"') === false && fixedJson.includes('"')) {
|
1741
|
+
const lastQuoteIndex = fixedJson.lastIndexOf('"');
|
1742
|
+
const afterLastQuote = fixedJson.slice(lastQuoteIndex + 1);
|
1743
|
+
if (!afterLastQuote.includes('"')) {
|
1744
|
+
fixedJson += '"';
|
1745
|
+
}
|
1746
|
+
}
|
1747
|
+
// Add missing closing braces
|
1748
|
+
fixedJson += '}'.repeat(missingBraces);
|
1749
|
+
console.log(`Attempting to fix truncated JSON by adding ${missingBraces} closing brace(s):`);
|
1750
|
+
console.log(fixedJson);
|
1751
|
+
try {
|
1752
|
+
args = JSON.parse(fixedJson);
|
1753
|
+
console.log(`✅ Successfully fixed truncated JSON for ${toolCall.name}`);
|
1754
|
+
}
|
1755
|
+
catch (fixError) {
|
1756
|
+
console.error(`❌ Failed to fix truncated JSON: ${fixError}`);
|
1757
|
+
// Fall through to error handling below
|
1758
|
+
}
|
1759
|
+
}
|
1760
|
+
}
|
1761
|
+
// If we couldn't parse or fix the JSON, log details and continue
|
1762
|
+
if (!args) {
|
1763
|
+
// Log position mentioned in error if available
|
1764
|
+
const errorMsg = parseError instanceof Error ? parseError.message : '';
|
1765
|
+
const posMatch = errorMsg.match(/position (\d+)/);
|
1766
|
+
if (posMatch) {
|
1767
|
+
const pos = parseInt(posMatch[1]);
|
1768
|
+
const context = toolCall.arguments.slice(Math.max(0, pos - 20), pos + 20);
|
1769
|
+
console.error(`Error context around position ${pos}: ...${context}...`);
|
1770
|
+
}
|
1771
|
+
// Update UI with error - use StreamEvent error type
|
1772
|
+
uiAdapter.handleEvent({
|
1773
|
+
type: "error",
|
1774
|
+
error: `Tool ${toolCall.name} failed: Invalid JSON arguments: ${parseError instanceof Error ? parseError.message : 'Unknown error'}`,
|
1775
|
+
});
|
1776
|
+
continue;
|
1777
|
+
}
|
1778
|
+
}
|
1717
1779
|
// Update UI
|
1718
1780
|
uiAdapter.handleEvent({
|
1719
1781
|
type: "tool_call_start",
|
@@ -1,25 +1,40 @@
|
|
1
|
+
/**
|
2
|
+
* Breaks an LLM’s streaming token deltas into character, word, or sentence
|
3
|
+
* chunks – or lets you plug in your own chunker.
|
4
|
+
*
|
5
|
+
* Usage
|
6
|
+
* -----
|
7
|
+
* const buf = new ChunkBuffer('sentence');
|
8
|
+
* stream.on('delta', d => buf.addToken(d).forEach(pushToUI));
|
9
|
+
* stream.on('end', () => buf.flush().forEach(pushToUI));
|
10
|
+
*/
|
1
11
|
export type ChunkingStrategy = "character" | "word" | "sentence" | ((text: string) => {
|
2
12
|
chunks: string[];
|
3
13
|
remainder: string;
|
4
14
|
});
|
15
|
+
export interface ChunkerOpts {
|
16
|
+
/** Flush “words” longer than this (default = 50 chars). */
|
17
|
+
maxWordLen?: number;
|
18
|
+
/** Force a break after this many chars with no whitespace (default = 400). */
|
19
|
+
maxBufferNoBreak?: number;
|
20
|
+
}
|
5
21
|
export declare class ChunkBuffer {
|
6
|
-
|
7
|
-
|
8
|
-
private static readonly MAX_BUFFER_NO_BREAK;
|
9
|
-
private readonly graphemeSeg;
|
10
|
-
private readonly wordSeg;
|
11
|
-
private readonly sentenceSeg;
|
12
|
-
private readonly customChunker?;
|
13
|
-
private readonly strategy;
|
14
|
-
constructor(strategy: ChunkingStrategy);
|
15
|
-
/** Feed one LLM token, receive zero-or-more flushed chunks. */
|
22
|
+
constructor(strategy: ChunkingStrategy, opts?: ChunkerOpts);
|
23
|
+
/** Feed one LLM delta; receive zero‑or‑more flushed chunks. */
|
16
24
|
addToken(token: string): string[];
|
17
|
-
/**
|
25
|
+
/** Call when the stream closes to emit the final remainder. */
|
18
26
|
flush(): string[];
|
27
|
+
private buffer;
|
28
|
+
private readonly strategy;
|
29
|
+
private readonly customChunker?;
|
30
|
+
private readonly MAX_WORD_LEN;
|
31
|
+
private readonly MAX_BUFFER_NO_BREAK;
|
32
|
+
private readonly graphemeSeg?;
|
33
|
+
private readonly wordSeg?;
|
34
|
+
private readonly sentenceSeg?;
|
19
35
|
private flushGraphemes;
|
20
36
|
private flushWords;
|
21
37
|
private flushSentences;
|
22
|
-
/** Fallback guard to break up very long runs of text with no natural breaks. */
|
23
38
|
private flushLongRuns;
|
24
39
|
private flushCustom;
|
25
40
|
}
|
@@ -1,15 +1,19 @@
|
|
1
|
+
/**
|
2
|
+
* Breaks an LLM’s streaming token deltas into character, word, or sentence
|
3
|
+
* chunks – or lets you plug in your own chunker.
|
4
|
+
*
|
5
|
+
* Usage
|
6
|
+
* -----
|
7
|
+
* const buf = new ChunkBuffer('sentence');
|
8
|
+
* stream.on('delta', d => buf.addToken(d).forEach(pushToUI));
|
9
|
+
* stream.on('end', () => buf.flush().forEach(pushToUI));
|
10
|
+
*/
|
11
|
+
const hasSegmenter = typeof Intl !== "undefined" && "Segmenter" in Intl;
|
1
12
|
export class ChunkBuffer {
|
2
|
-
|
3
|
-
//
|
4
|
-
|
5
|
-
|
6
|
-
// --------------------------------
|
7
|
-
graphemeSeg;
|
8
|
-
wordSeg;
|
9
|
-
sentenceSeg;
|
10
|
-
customChunker;
|
11
|
-
strategy;
|
12
|
-
constructor(strategy) {
|
13
|
+
// ────────────────────────────────────────────────────────────────────
|
14
|
+
// public API
|
15
|
+
// ────────────────────────────────────────────────────────────────────
|
16
|
+
constructor(strategy, opts = {}) {
|
13
17
|
if (typeof strategy === "function") {
|
14
18
|
this.customChunker = strategy;
|
15
19
|
this.strategy = "custom";
|
@@ -17,143 +21,152 @@ export class ChunkBuffer {
|
|
17
21
|
else {
|
18
22
|
this.strategy = strategy;
|
19
23
|
}
|
20
|
-
this.
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
24
|
+
this.MAX_WORD_LEN = opts.maxWordLen ?? 50;
|
25
|
+
this.MAX_BUFFER_NO_BREAK = opts.maxBufferNoBreak ?? 400;
|
26
|
+
if (hasSegmenter) {
|
27
|
+
this.graphemeSeg = new Intl.Segmenter(undefined, {
|
28
|
+
granularity: "grapheme",
|
29
|
+
});
|
30
|
+
this.wordSeg = new Intl.Segmenter(undefined, { granularity: "word" });
|
31
|
+
this.sentenceSeg = new Intl.Segmenter(undefined, {
|
32
|
+
granularity: "sentence",
|
33
|
+
});
|
34
|
+
}
|
27
35
|
}
|
28
|
-
/** Feed one LLM
|
36
|
+
/** Feed one LLM delta; receive zero‑or‑more flushed chunks. */
|
29
37
|
addToken(token) {
|
30
38
|
this.buffer += token;
|
31
|
-
if (this.customChunker)
|
39
|
+
if (this.customChunker)
|
32
40
|
return this.flushCustom();
|
33
|
-
|
34
|
-
|
35
|
-
const
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
case "word":
|
42
|
-
newChunks = this.flushWords();
|
43
|
-
break;
|
44
|
-
case "sentence":
|
45
|
-
newChunks = this.flushSentences();
|
46
|
-
break;
|
47
|
-
}
|
48
|
-
return [...longRunChunks, ...newChunks];
|
41
|
+
// emergency bailout for giant uninterrupted text
|
42
|
+
const forced = this.flushLongRuns();
|
43
|
+
const fresh = this.strategy === "character"
|
44
|
+
? this.flushGraphemes()
|
45
|
+
: this.strategy === "word"
|
46
|
+
? this.flushWords()
|
47
|
+
: this.flushSentences();
|
48
|
+
return forced.concat(fresh);
|
49
49
|
}
|
50
|
-
/**
|
50
|
+
/** Call when the stream closes to emit the final remainder. */
|
51
51
|
flush() {
|
52
|
-
if (!this.buffer)
|
52
|
+
if (!this.buffer.length)
|
53
53
|
return [];
|
54
|
-
let finalChunks = [];
|
55
54
|
if (this.customChunker) {
|
56
|
-
// For custom chunkers, flush everything by treating the whole buffer as input.
|
57
55
|
const { chunks, remainder } = this.customChunker(this.buffer);
|
58
|
-
|
59
|
-
|
60
|
-
finalChunks.push(remainder);
|
61
|
-
}
|
56
|
+
this.buffer = "";
|
57
|
+
return [...chunks, remainder].filter(Boolean);
|
62
58
|
}
|
63
|
-
|
64
|
-
|
65
|
-
|
59
|
+
// Re‑use the normal strategy until nothing more flushes.
|
60
|
+
const out = [];
|
61
|
+
while (true) {
|
62
|
+
const next = this.strategy === "character"
|
63
|
+
? this.flushGraphemes()
|
64
|
+
: this.strategy === "word"
|
65
|
+
? this.flushWords()
|
66
|
+
: this.flushSentences();
|
67
|
+
if (!next.length)
|
68
|
+
break;
|
69
|
+
out.push(...next);
|
66
70
|
}
|
71
|
+
if (this.buffer)
|
72
|
+
out.push(this.buffer);
|
67
73
|
this.buffer = "";
|
68
|
-
|
69
|
-
return finalChunks.filter((c) => c.length > 0);
|
74
|
+
return out;
|
70
75
|
}
|
71
|
-
//
|
72
|
-
//
|
73
|
-
//
|
76
|
+
// ────────────────────────────────────────────────────────────────────
|
77
|
+
// internals
|
78
|
+
// ────────────────────────────────────────────────────────────────────
|
79
|
+
buffer = "";
|
80
|
+
strategy;
|
81
|
+
customChunker;
|
82
|
+
MAX_WORD_LEN;
|
83
|
+
MAX_BUFFER_NO_BREAK;
|
84
|
+
// These are only defined when Intl.Segmenter exists.
|
85
|
+
graphemeSeg;
|
86
|
+
wordSeg;
|
87
|
+
sentenceSeg;
|
88
|
+
// -- character ------------------------------------------------------
|
74
89
|
flushGraphemes() {
|
75
|
-
|
76
|
-
|
77
|
-
|
90
|
+
if (!hasSegmenter)
|
91
|
+
return []; // unreachable on modern runtimes
|
92
|
+
const segs = Array.from(this.graphemeSeg.segment(this.buffer)).map((s) => s.segment);
|
93
|
+
/* Strategy: always keep exactly one segment in the buffer.
|
94
|
+
If we only have one segment so far, we don’t know whether it’s
|
95
|
+
complete (could be half a surrogate pair). Wait for more. */
|
96
|
+
if (segs.length <= 1)
|
78
97
|
return [];
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
this.buffer = segments[segments.length - 1];
|
83
|
-
return chunksToFlush;
|
98
|
+
const emit = segs.slice(0, -1);
|
99
|
+
this.buffer = segs[segs.length - 1];
|
100
|
+
return emit;
|
84
101
|
}
|
102
|
+
// -- word -----------------------------------------------------------
|
85
103
|
flushWords() {
|
104
|
+
if (!hasSegmenter)
|
105
|
+
return []; // unreachable on modern runtimes
|
86
106
|
const chunks = [];
|
87
|
-
let
|
88
|
-
let
|
89
|
-
|
90
|
-
const
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
107
|
+
let leadNonWord = "";
|
108
|
+
let word = "";
|
109
|
+
let tailNonWord = "";
|
110
|
+
for (const s of this.wordSeg.segment(this.buffer)) {
|
111
|
+
if (s.isWordLike) {
|
112
|
+
if (word && tailNonWord) {
|
113
|
+
// previous word finished
|
114
|
+
chunks.push(word + tailNonWord);
|
115
|
+
word = tailNonWord = "";
|
116
|
+
}
|
117
|
+
word += s.segment;
|
118
|
+
if (word.length > this.MAX_WORD_LEN) {
|
119
|
+
// force‑break huge “word”
|
120
|
+
chunks.push(word + tailNonWord);
|
121
|
+
word = tailNonWord = "";
|
101
122
|
}
|
102
|
-
currentWord += part.segment;
|
103
123
|
}
|
104
124
|
else {
|
105
|
-
//
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
currentWord = "";
|
113
|
-
currentNonWord = "";
|
125
|
+
// non‑word segment (space / punctuation)
|
126
|
+
if (!word) {
|
127
|
+
leadNonWord += s.segment; // leading whitespace
|
128
|
+
}
|
129
|
+
else {
|
130
|
+
tailNonWord += s.segment; // trailing whitespace
|
131
|
+
}
|
114
132
|
}
|
115
133
|
}
|
116
|
-
//
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
134
|
+
// flush leading non‑word if present and some word followed
|
135
|
+
if (leadNonWord && word) {
|
136
|
+
chunks.push(leadNonWord);
|
137
|
+
leadNonWord = "";
|
138
|
+
}
|
139
|
+
this.buffer = leadNonWord + word + tailNonWord;
|
140
|
+
return chunks.filter(Boolean);
|
121
141
|
}
|
142
|
+
// -- sentence -------------------------------------------------------
|
122
143
|
flushSentences() {
|
123
|
-
|
124
|
-
|
125
|
-
//
|
126
|
-
const
|
127
|
-
let
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
}
|
132
|
-
if (lastMatchIndex === -1) {
|
133
|
-
// No definitive sentence boundary found yet.
|
144
|
+
if (!hasSegmenter)
|
145
|
+
return []; // unreachable on modern runtimes
|
146
|
+
// find last confirmed boundary with regex (includes CJK punctuation)
|
147
|
+
const boundary = /.*?[.?!。!?](\s+|$)/g; // negative‑look‑behind ellipsis left out for perf
|
148
|
+
let last = -1, m;
|
149
|
+
while ((m = boundary.exec(this.buffer)))
|
150
|
+
last = boundary.lastIndex;
|
151
|
+
if (last === -1)
|
134
152
|
return [];
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
this.buffer = this.buffer.substring(lastMatchIndex);
|
139
|
-
// 3. Now, use Intl.Segmenter on the confirmed text to correctly split it.
|
140
|
-
// This handles cases where `textToFlush` contains multiple sentences.
|
141
|
-
return Array.from(this.sentenceSeg.segment(textToFlush))
|
153
|
+
const slice = this.buffer.slice(0, last);
|
154
|
+
this.buffer = this.buffer.slice(last);
|
155
|
+
return Array.from(this.sentenceSeg.segment(slice))
|
142
156
|
.map((s) => s.segment)
|
143
|
-
.filter(
|
157
|
+
.filter(Boolean);
|
144
158
|
}
|
145
|
-
|
159
|
+
// -- long‑run bailout ----------------------------------------------
|
146
160
|
flushLongRuns() {
|
147
|
-
|
148
|
-
// If the buffer is very long and contains no spaces (e.g., a single long word/URL),
|
149
|
-
// force a break to prevent excessive buffering.
|
150
|
-
if (this.buffer.length > ChunkBuffer.MAX_BUFFER_NO_BREAK &&
|
161
|
+
if (this.buffer.length > this.MAX_BUFFER_NO_BREAK &&
|
151
162
|
!/\s/.test(this.buffer)) {
|
152
|
-
|
153
|
-
this.buffer = this.buffer.slice(
|
163
|
+
const head = this.buffer.slice(0, this.MAX_BUFFER_NO_BREAK);
|
164
|
+
this.buffer = this.buffer.slice(this.MAX_BUFFER_NO_BREAK);
|
165
|
+
return [head];
|
154
166
|
}
|
155
|
-
return
|
167
|
+
return [];
|
156
168
|
}
|
169
|
+
// -- custom ---------------------------------------------------------
|
157
170
|
flushCustom() {
|
158
171
|
try {
|
159
172
|
const { chunks, remainder } = this.customChunker(this.buffer);
|
@@ -161,7 +174,7 @@ export class ChunkBuffer {
|
|
161
174
|
return chunks;
|
162
175
|
}
|
163
176
|
catch (err) {
|
164
|
-
console.error("Custom chunker failed
|
177
|
+
console.error("Custom chunker failed – flushing whole buffer to avoid data loss", err);
|
165
178
|
const all = this.buffer;
|
166
179
|
this.buffer = "";
|
167
180
|
return [all];
|
@@ -1,4 +1,16 @@
|
|
1
1
|
import { getModelName } from "../model-mapping.js";
|
2
|
+
/**
|
3
|
+
* Helper to check if a string is valid JSON
|
4
|
+
*/
|
5
|
+
function isValidJSON(str) {
|
6
|
+
try {
|
7
|
+
JSON.parse(str);
|
8
|
+
return true;
|
9
|
+
}
|
10
|
+
catch {
|
11
|
+
return false;
|
12
|
+
}
|
13
|
+
}
|
2
14
|
/**
|
3
15
|
* Stream with OpenAI SDK
|
4
16
|
*/
|
@@ -17,8 +29,12 @@ onEvent, onComplete) {
|
|
17
29
|
stream: true,
|
18
30
|
temperature: specification.openAI?.temperature,
|
19
31
|
//top_p: specification.openAI?.probability,
|
20
|
-
max_completion_tokens: specification.openAI?.completionTokenLimit,
|
21
32
|
};
|
33
|
+
// Only add max_completion_tokens if it's defined
|
34
|
+
if (specification.openAI?.completionTokenLimit) {
|
35
|
+
streamConfig.max_completion_tokens =
|
36
|
+
specification.openAI.completionTokenLimit;
|
37
|
+
}
|
22
38
|
// Add tools if provided
|
23
39
|
if (tools && tools.length > 0) {
|
24
40
|
streamConfig.tools = tools.map((tool) => ({
|
@@ -111,7 +127,7 @@ onEvent, onComplete) {
|
|
111
127
|
stream: true,
|
112
128
|
temperature: specification.anthropic?.temperature,
|
113
129
|
//top_p: specification.anthropic?.probability,
|
114
|
-
max_tokens: specification.anthropic?.completionTokenLimit,
|
130
|
+
max_tokens: specification.anthropic?.completionTokenLimit || 8192, // required
|
115
131
|
};
|
116
132
|
if (systemPrompt) {
|
117
133
|
streamConfig.system = systemPrompt;
|
@@ -156,6 +172,11 @@ onEvent, onComplete) {
|
|
156
172
|
const currentTool = toolCalls[toolCalls.length - 1];
|
157
173
|
if (currentTool) {
|
158
174
|
currentTool.arguments += chunk.delta.partial_json;
|
175
|
+
// Debug logging for partial JSON accumulation
|
176
|
+
if (process.env.DEBUG_STREAMING) {
|
177
|
+
console.log(`[Anthropic] Tool ${currentTool.name} - Partial JSON chunk: "${chunk.delta.partial_json}"`);
|
178
|
+
console.log(`[Anthropic] Tool ${currentTool.name} - Total accumulated: ${currentTool.arguments.length} chars`);
|
179
|
+
}
|
159
180
|
onEvent({
|
160
181
|
type: "tool_call_delta",
|
161
182
|
toolCallId: currentTool.id,
|
@@ -168,6 +189,28 @@ onEvent, onComplete) {
|
|
168
189
|
// Tool call complete
|
169
190
|
const currentTool = toolCalls[toolCalls.length - 1];
|
170
191
|
if (currentTool) {
|
192
|
+
// Log the final JSON for debugging
|
193
|
+
if (process.env.DEBUG_STREAMING ||
|
194
|
+
!isValidJSON(currentTool.arguments)) {
|
195
|
+
console.log(`[Anthropic] Tool ${currentTool.name} complete with arguments (${currentTool.arguments.length} chars):`);
|
196
|
+
console.log(currentTool.arguments);
|
197
|
+
// Check if JSON appears truncated
|
198
|
+
const lastChars = currentTool.arguments.slice(-10);
|
199
|
+
if (!lastChars.includes("}") &&
|
200
|
+
currentTool.arguments.length > 100) {
|
201
|
+
console.warn(`[Anthropic] WARNING: JSON may be truncated - doesn't end with '}': ...${lastChars}`);
|
202
|
+
}
|
203
|
+
// Validate JSON
|
204
|
+
try {
|
205
|
+
JSON.parse(currentTool.arguments);
|
206
|
+
if (process.env.DEBUG_STREAMING) {
|
207
|
+
console.log(`[Anthropic] ✅ Valid JSON for ${currentTool.name}`);
|
208
|
+
}
|
209
|
+
}
|
210
|
+
catch (e) {
|
211
|
+
console.error(`[Anthropic] ❌ Invalid JSON for ${currentTool.name}: ${e}`);
|
212
|
+
}
|
213
|
+
}
|
171
214
|
onEvent({
|
172
215
|
type: "tool_call_complete",
|
173
216
|
toolCall: {
|
@@ -207,8 +250,11 @@ onEvent, onComplete) {
|
|
207
250
|
stream: true,
|
208
251
|
temperature: specification.google?.temperature,
|
209
252
|
//top_p: specification.google?.probability,
|
210
|
-
max_tokens: specification.google?.completionTokenLimit,
|
211
253
|
};
|
254
|
+
// Only add max_tokens if it's defined
|
255
|
+
if (specification.google?.completionTokenLimit) {
|
256
|
+
streamConfig.max_tokens = specification.google.completionTokenLimit;
|
257
|
+
}
|
212
258
|
if (systemPrompt) {
|
213
259
|
streamConfig.system = systemPrompt;
|
214
260
|
}
|
@@ -235,8 +281,8 @@ onEvent, onComplete) {
|
|
235
281
|
const model = googleClient.getGenerativeModel({
|
236
282
|
model: modelName,
|
237
283
|
generationConfig: {
|
238
|
-
temperature: streamConfig.temperature
|
239
|
-
maxOutputTokens: streamConfig.max_tokens
|
284
|
+
temperature: streamConfig.temperature,
|
285
|
+
maxOutputTokens: streamConfig.max_tokens,
|
240
286
|
},
|
241
287
|
tools: googleTools,
|
242
288
|
});
|