plugin-custom-llm 1.3.0 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -66,6 +66,11 @@ function getChatOpenAICompletions() {
|
|
|
66
66
|
}
|
|
67
67
|
return _ChatOpenAICompletions;
|
|
68
68
|
}
|
|
69
|
+
function sanitizeToolCallId(id) {
|
|
70
|
+
if (!id || typeof id !== "string") return id;
|
|
71
|
+
const idx = id.indexOf("__thought__");
|
|
72
|
+
return idx !== -1 ? id.substring(0, idx) : id;
|
|
73
|
+
}
|
|
69
74
|
function getToolCallsKey(toolCalls = []) {
|
|
70
75
|
return toolCalls.map((tc) => {
|
|
71
76
|
var _a;
|
|
@@ -537,6 +542,52 @@ function fixEmptyToolProperties(model) {
|
|
|
537
542
|
};
|
|
538
543
|
return model;
|
|
539
544
|
}
|
|
545
|
+
function sanitizeGenerateResult(result) {
|
|
546
|
+
if (!result) return result;
|
|
547
|
+
for (const gen of (result == null ? void 0 : result.generations) ?? []) {
|
|
548
|
+
const msg = gen == null ? void 0 : gen.message;
|
|
549
|
+
if (msg == null ? void 0 : msg.tool_calls) {
|
|
550
|
+
for (const tc of msg.tool_calls) {
|
|
551
|
+
tc.id = sanitizeToolCallId(tc.id);
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
return result;
|
|
556
|
+
}
|
|
557
|
+
function sanitizeStreamChunk(chunk) {
|
|
558
|
+
const msg = chunk == null ? void 0 : chunk.message;
|
|
559
|
+
if (msg == null ? void 0 : msg.tool_call_chunks) {
|
|
560
|
+
for (const tc of msg.tool_call_chunks) {
|
|
561
|
+
tc.id = sanitizeToolCallId(tc.id);
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
if (msg == null ? void 0 : msg.tool_calls) {
|
|
565
|
+
for (const tc of msg.tool_calls) {
|
|
566
|
+
tc.id = sanitizeToolCallId(tc.id);
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
return chunk;
|
|
570
|
+
}
|
|
571
|
+
function createSanitizedChatClass(BaseClass) {
|
|
572
|
+
return class SanitizedChatModel extends BaseClass {
|
|
573
|
+
async _generate(messages, options, runManager) {
|
|
574
|
+
const result = await super._generate(messages, options, runManager);
|
|
575
|
+
return sanitizeGenerateResult(result);
|
|
576
|
+
}
|
|
577
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
578
|
+
for await (const chunk of super._streamResponseChunks(messages, options, runManager)) {
|
|
579
|
+
yield sanitizeStreamChunk(chunk);
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
async *_stream(messages, options, runManager) {
|
|
583
|
+
if (typeof super._stream === "function") {
|
|
584
|
+
for await (const chunk of super._stream(messages, options, runManager)) {
|
|
585
|
+
yield sanitizeStreamChunk(chunk);
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
};
|
|
590
|
+
}
|
|
540
591
|
class CustomLLMProvider extends import_plugin_ai.LLMProvider {
|
|
541
592
|
get baseURL() {
|
|
542
593
|
return null;
|
|
@@ -574,7 +625,8 @@ class CustomLLMProvider extends import_plugin_ai.LLMProvider {
|
|
|
574
625
|
if (reqConfig.extraBody && typeof reqConfig.extraBody === "object") {
|
|
575
626
|
Object.assign(modelKwargs, reqConfig.extraBody);
|
|
576
627
|
}
|
|
577
|
-
const
|
|
628
|
+
const BaseChatClass = enableReasoning ? createReasoningChatClass() : getChatOpenAI();
|
|
629
|
+
const ChatClass = createSanitizedChatClass(BaseChatClass);
|
|
578
630
|
const config = {
|
|
579
631
|
apiKey,
|
|
580
632
|
...this.modelOptions,
|
|
@@ -627,7 +679,7 @@ class CustomLLMProvider extends import_plugin_ai.LLMProvider {
|
|
|
627
679
|
workContext
|
|
628
680
|
};
|
|
629
681
|
if (toolCalls) {
|
|
630
|
-
content.tool_calls = toolCalls;
|
|
682
|
+
content.tool_calls = Array.isArray(toolCalls) ? toolCalls.map((tc) => ({ ...tc, id: sanitizeToolCallId(tc.id) })) : toolCalls;
|
|
631
683
|
}
|
|
632
684
|
if (Array.isArray(content.content)) {
|
|
633
685
|
const textBlocks = content.content.filter((block) => block.type === "text");
|
package/package.json
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
"displayName": "AI LLM: Custom (OpenAI Compatible)",
|
|
4
4
|
"displayName.zh-CN": "AI LLM:自定义(OpenAI 兼容)",
|
|
5
5
|
"description": "OpenAI-compatible LLM provider with auto response format detection for external LLM services.",
|
|
6
|
-
"version": "1.3.
|
|
6
|
+
"version": "1.3.2",
|
|
7
7
|
"main": "dist/server/index.js",
|
|
8
8
|
"files": [
|
|
9
9
|
"dist",
|
|
@@ -50,6 +50,17 @@ function getChatOpenAICompletions() {
|
|
|
50
50
|
return _ChatOpenAICompletions;
|
|
51
51
|
}
|
|
52
52
|
|
|
53
|
+
/**
|
|
54
|
+
* Sanitize a tool call ID by stripping the `__thought__<base64>` suffix
|
|
55
|
+
* that Gemini models append during streaming. The suffix is excessively
|
|
56
|
+
* long and causes errors when langgraph reads messages back from history.
|
|
57
|
+
*/
|
|
58
|
+
function sanitizeToolCallId(id: string | undefined): string | undefined {
|
|
59
|
+
if (!id || typeof id !== 'string') return id;
|
|
60
|
+
const idx = id.indexOf('__thought__');
|
|
61
|
+
return idx !== -1 ? id.substring(0, idx) : id;
|
|
62
|
+
}
|
|
63
|
+
|
|
53
64
|
/**
|
|
54
65
|
* Build tool_calls key for reasoning content map lookup.
|
|
55
66
|
*/
|
|
@@ -715,6 +726,69 @@ function fixEmptyToolProperties(model: any) {
|
|
|
715
726
|
return model;
|
|
716
727
|
}
|
|
717
728
|
|
|
729
|
+
/**
|
|
730
|
+
* Sanitize all tool call IDs in a ChatResult (used after _generate).
|
|
731
|
+
*/
|
|
732
|
+
function sanitizeGenerateResult(result: any): any {
|
|
733
|
+
if (!result) return result;
|
|
734
|
+
for (const gen of result?.generations ?? []) {
|
|
735
|
+
const msg = gen?.message;
|
|
736
|
+
if (msg?.tool_calls) {
|
|
737
|
+
for (const tc of msg.tool_calls) {
|
|
738
|
+
tc.id = sanitizeToolCallId(tc.id);
|
|
739
|
+
}
|
|
740
|
+
}
|
|
741
|
+
}
|
|
742
|
+
return result;
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
/**
|
|
746
|
+
* Sanitize tool call IDs in a streaming chunk.
|
|
747
|
+
*/
|
|
748
|
+
function sanitizeStreamChunk(chunk: any): any {
|
|
749
|
+
const msg = chunk?.message;
|
|
750
|
+
if (msg?.tool_call_chunks) {
|
|
751
|
+
for (const tc of msg.tool_call_chunks) {
|
|
752
|
+
tc.id = sanitizeToolCallId(tc.id);
|
|
753
|
+
}
|
|
754
|
+
}
|
|
755
|
+
if (msg?.tool_calls) {
|
|
756
|
+
for (const tc of msg.tool_calls) {
|
|
757
|
+
tc.id = sanitizeToolCallId(tc.id);
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
return chunk;
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
/**
|
|
764
|
+
* Create a subclass of the given ChatModel class that sanitizes tool call IDs
|
|
765
|
+
* in all outputs. Gemini models return IDs like `call_xxx__thought__<long_base64>`
|
|
766
|
+
* which are too long for langgraph. Using class-level overrides (instead of
|
|
767
|
+
* instance patching) ensures the sanitization survives bindTools/RunnableBinding.
|
|
768
|
+
*/
|
|
769
|
+
function createSanitizedChatClass(BaseClass: any) {
|
|
770
|
+
return class SanitizedChatModel extends BaseClass {
|
|
771
|
+
async _generate(messages: any[], options: any, runManager?: any) {
|
|
772
|
+
const result = await super._generate(messages, options, runManager);
|
|
773
|
+
return sanitizeGenerateResult(result);
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
async *_streamResponseChunks(messages: any[], options: any, runManager?: any) {
|
|
777
|
+
for await (const chunk of super._streamResponseChunks(messages, options, runManager)) {
|
|
778
|
+
yield sanitizeStreamChunk(chunk);
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
async *_stream(messages: any[], options: any, runManager?: any) {
|
|
783
|
+
if (typeof super._stream === 'function') {
|
|
784
|
+
for await (const chunk of super._stream(messages, options, runManager)) {
|
|
785
|
+
yield sanitizeStreamChunk(chunk);
|
|
786
|
+
}
|
|
787
|
+
}
|
|
788
|
+
}
|
|
789
|
+
};
|
|
790
|
+
}
|
|
791
|
+
|
|
718
792
|
export class CustomLLMProvider extends LLMProvider {
|
|
719
793
|
get baseURL() {
|
|
720
794
|
return null;
|
|
@@ -755,7 +829,11 @@ export class CustomLLMProvider extends LLMProvider {
|
|
|
755
829
|
// Issue #4: Use ReasoningChatOpenAI when enableReasoning is set.
|
|
756
830
|
// This ensures reasoning_content is preserved and patched back into
|
|
757
831
|
// assistant messages during tool call round-trips (required by DeepSeek-R1, etc.)
|
|
758
|
-
|
|
832
|
+
// Wrap with tool call ID sanitizer at the class level — ensures
|
|
833
|
+
// __thought__<base64> suffixes from Gemini are stripped in all code paths
|
|
834
|
+
// (invoke, stream, bindTools bindings) via prototype chain.
|
|
835
|
+
const BaseChatClass = enableReasoning ? createReasoningChatClass() : getChatOpenAI();
|
|
836
|
+
const ChatClass = createSanitizedChatClass(BaseChatClass);
|
|
759
837
|
const config: Record<string, any> = {
|
|
760
838
|
apiKey,
|
|
761
839
|
...this.modelOptions,
|
|
@@ -830,7 +908,9 @@ export class CustomLLMProvider extends LLMProvider {
|
|
|
830
908
|
};
|
|
831
909
|
|
|
832
910
|
if (toolCalls) {
|
|
833
|
-
content.tool_calls = toolCalls
|
|
911
|
+
content.tool_calls = Array.isArray(toolCalls)
|
|
912
|
+
? toolCalls.map((tc: any) => ({ ...tc, id: sanitizeToolCallId(tc.id) }))
|
|
913
|
+
: toolCalls;
|
|
834
914
|
}
|
|
835
915
|
|
|
836
916
|
if (Array.isArray(content.content)) {
|