@workglow/ai-provider 0.1.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/common/ToolCallParsers.d.ts +253 -0
- package/dist/common/ToolCallParsers.d.ts.map +1 -0
- package/dist/provider-anthropic/AnthropicProvider.d.ts +3 -2
- package/dist/provider-anthropic/AnthropicProvider.d.ts.map +1 -1
- package/dist/provider-anthropic/AnthropicQueuedProvider.d.ts +3 -2
- package/dist/provider-anthropic/AnthropicQueuedProvider.d.ts.map +1 -1
- package/dist/provider-anthropic/common/Anthropic_Client.d.ts.map +1 -1
- package/dist/provider-anthropic/common/Anthropic_JobRunFns.d.ts.map +1 -1
- package/dist/provider-anthropic/common/Anthropic_ModelSchema.d.ts.map +1 -1
- package/dist/provider-anthropic/common/Anthropic_ToolCalling.d.ts +10 -0
- package/dist/provider-anthropic/common/Anthropic_ToolCalling.d.ts.map +1 -0
- package/dist/provider-anthropic/index.js +3 -4
- package/dist/provider-anthropic/index.js.map +3 -3
- package/dist/provider-anthropic/runtime.js +225 -8
- package/dist/provider-anthropic/runtime.js.map +9 -8
- package/dist/provider-chrome/WebBrowserProvider.d.ts +2 -1
- package/dist/provider-chrome/WebBrowserProvider.d.ts.map +1 -1
- package/dist/provider-chrome/WebBrowserQueuedProvider.d.ts +2 -1
- package/dist/provider-chrome/WebBrowserQueuedProvider.d.ts.map +1 -1
- package/dist/provider-chrome/common/WebBrowser_Constants.d.ts.map +1 -1
- package/dist/provider-chrome/common/WebBrowser_ModelSchema.d.ts.map +1 -1
- package/dist/provider-chrome/index.js +2 -4
- package/dist/provider-chrome/index.js.map +3 -3
- package/dist/provider-chrome/runtime.js +3 -7
- package/dist/provider-chrome/runtime.js.map +4 -4
- package/dist/provider-gemini/GoogleGeminiProvider.d.ts +3 -2
- package/dist/provider-gemini/GoogleGeminiProvider.d.ts.map +1 -1
- package/dist/provider-gemini/GoogleGeminiQueuedProvider.d.ts +3 -2
- package/dist/provider-gemini/GoogleGeminiQueuedProvider.d.ts.map +1 -1
- package/dist/provider-gemini/common/Gemini_JobRunFns.d.ts.map +1 -1
- package/dist/provider-gemini/common/Gemini_ModelSchema.d.ts.map +1 -1
- package/dist/provider-gemini/common/Gemini_ToolCalling.d.ts +10 -0
- package/dist/provider-gemini/common/Gemini_ToolCalling.d.ts.map +1 -0
- package/dist/provider-gemini/index.js +3 -4
- package/dist/provider-gemini/index.js.map +3 -3
- package/dist/provider-gemini/runtime.js +188 -8
- package/dist/provider-gemini/runtime.js.map +7 -6
- package/dist/provider-hf-inference/HfInferenceProvider.d.ts +3 -2
- package/dist/provider-hf-inference/HfInferenceProvider.d.ts.map +1 -1
- package/dist/provider-hf-inference/HfInferenceQueuedProvider.d.ts +3 -2
- package/dist/provider-hf-inference/HfInferenceQueuedProvider.d.ts.map +1 -1
- package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts.map +1 -1
- package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts.map +1 -1
- package/dist/provider-hf-inference/common/HFI_ToolCalling.d.ts +10 -0
- package/dist/provider-hf-inference/common/HFI_ToolCalling.d.ts.map +1 -0
- package/dist/provider-hf-inference/index.js +3 -4
- package/dist/provider-hf-inference/index.js.map +3 -3
- package/dist/provider-hf-inference/runtime.js +157 -8
- package/dist/provider-hf-inference/runtime.js.map +7 -6
- package/dist/provider-hf-transformers/HuggingFaceTransformersProvider.d.ts +3 -2
- package/dist/provider-hf-transformers/HuggingFaceTransformersProvider.d.ts.map +1 -1
- package/dist/provider-hf-transformers/HuggingFaceTransformersQueuedProvider.d.ts +3 -3
- package/dist/provider-hf-transformers/HuggingFaceTransformersQueuedProvider.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_Constants.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_JobRunFns.d.ts +52 -0
- package/dist/provider-hf-transformers/common/HFT_JobRunFns.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_ModelSchema.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_OnnxDtypes.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_Pipeline.d.ts +1 -1
- package/dist/provider-hf-transformers/common/HFT_Pipeline.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_StructuredGeneration.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_TextClassification.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_TextQuestionAnswer.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_TextSummary.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_TextTranslation.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_ToolCalling.d.ts +10 -0
- package/dist/provider-hf-transformers/common/HFT_ToolCalling.d.ts.map +1 -0
- package/dist/provider-hf-transformers/common/HFT_ToolMarkup.d.ts +20 -0
- package/dist/provider-hf-transformers/common/HFT_ToolMarkup.d.ts.map +1 -0
- package/dist/provider-hf-transformers/common/HFT_ToolParser.d.ts +67 -0
- package/dist/provider-hf-transformers/common/HFT_ToolParser.d.ts.map +1 -0
- package/dist/provider-hf-transformers/index.d.ts +1 -0
- package/dist/provider-hf-transformers/index.d.ts.map +1 -1
- package/dist/provider-hf-transformers/index.js +71 -7
- package/dist/provider-hf-transformers/index.js.map +7 -6
- package/dist/provider-hf-transformers/runtime.d.ts +1 -0
- package/dist/provider-hf-transformers/runtime.d.ts.map +1 -1
- package/dist/provider-hf-transformers/runtime.js +1244 -13
- package/dist/provider-hf-transformers/runtime.js.map +18 -15
- package/dist/provider-llamacpp/LlamaCppProvider.d.ts +3 -2
- package/dist/provider-llamacpp/LlamaCppProvider.d.ts.map +1 -1
- package/dist/provider-llamacpp/LlamaCppQueuedProvider.d.ts +3 -2
- package/dist/provider-llamacpp/LlamaCppQueuedProvider.d.ts.map +1 -1
- package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts.map +1 -1
- package/dist/provider-llamacpp/common/LlamaCpp_ModelSchema.d.ts.map +1 -1
- package/dist/provider-llamacpp/common/LlamaCpp_ToolCalling.d.ts +10 -0
- package/dist/provider-llamacpp/common/LlamaCpp_ToolCalling.d.ts.map +1 -0
- package/dist/provider-llamacpp/common/LlamaCpp_ToolParser.d.ts +35 -0
- package/dist/provider-llamacpp/common/LlamaCpp_ToolParser.d.ts.map +1 -0
- package/dist/provider-llamacpp/index.js +3 -4
- package/dist/provider-llamacpp/index.js.map +3 -3
- package/dist/provider-llamacpp/runtime.js +1394 -9
- package/dist/provider-llamacpp/runtime.js.map +11 -8
- package/dist/provider-ollama/OllamaProvider.d.ts +3 -2
- package/dist/provider-ollama/OllamaProvider.d.ts.map +1 -1
- package/dist/provider-ollama/OllamaQueuedProvider.d.ts +3 -2
- package/dist/provider-ollama/OllamaQueuedProvider.d.ts.map +1 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts +47 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts.map +1 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts +46 -0
- package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts.map +1 -1
- package/dist/provider-ollama/common/Ollama_ModelSchema.d.ts.map +1 -1
- package/dist/provider-ollama/common/Ollama_ToolCalling.d.ts +16 -0
- package/dist/provider-ollama/common/Ollama_ToolCalling.d.ts.map +1 -0
- package/dist/provider-ollama/index.browser.js +3 -4
- package/dist/provider-ollama/index.browser.js.map +3 -3
- package/dist/provider-ollama/index.js +3 -4
- package/dist/provider-ollama/index.js.map +3 -3
- package/dist/provider-ollama/runtime.browser.js +130 -8
- package/dist/provider-ollama/runtime.browser.js.map +8 -7
- package/dist/provider-ollama/runtime.js +125 -8
- package/dist/provider-ollama/runtime.js.map +8 -7
- package/dist/provider-openai/OpenAiProvider.d.ts +3 -2
- package/dist/provider-openai/OpenAiProvider.d.ts.map +1 -1
- package/dist/provider-openai/OpenAiQueuedProvider.d.ts +3 -2
- package/dist/provider-openai/OpenAiQueuedProvider.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_Client.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.browser.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_ModelSchema.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_ToolCalling.d.ts +10 -0
- package/dist/provider-openai/common/OpenAI_ToolCalling.d.ts.map +1 -0
- package/dist/provider-openai/index.browser.js +3 -4
- package/dist/provider-openai/index.browser.js.map +3 -3
- package/dist/provider-openai/index.js +3 -4
- package/dist/provider-openai/index.js.map +3 -3
- package/dist/provider-openai/runtime.browser.js +138 -8
- package/dist/provider-openai/runtime.browser.js.map +8 -7
- package/dist/provider-openai/runtime.js +138 -8
- package/dist/provider-openai/runtime.js.map +8 -7
- package/dist/provider-tf-mediapipe/TensorFlowMediaPipeProvider.d.ts +2 -1
- package/dist/provider-tf-mediapipe/TensorFlowMediaPipeProvider.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/TensorFlowMediaPipeQueuedProvider.d.ts +2 -1
- package/dist/provider-tf-mediapipe/TensorFlowMediaPipeQueuedProvider.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_Constants.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_Download.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_ImageClassification.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_JobRunFns.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_ModelSchema.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_Runtime.d.ts +3 -16
- package/dist/provider-tf-mediapipe/common/TFMP_Runtime.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/index.js.map +2 -2
- package/dist/provider-tf-mediapipe/runtime.js +2 -4
- package/dist/provider-tf-mediapipe/runtime.js.map +13 -13
- package/dist/test.d.ts +7 -0
- package/dist/test.d.ts.map +1 -0
- package/dist/test.js +913 -0
- package/dist/test.js.map +10 -0
- package/package.json +24 -17
|
@@ -644,6 +644,1391 @@ var LlamaCpp_TextSummary_Stream = async function* (input, model, signal) {
|
|
|
644
644
|
}
|
|
645
645
|
};
|
|
646
646
|
|
|
647
|
+
// src/provider-llamacpp/common/LlamaCpp_ToolCalling.ts
|
|
648
|
+
import { filterValidToolCalls } from "@workglow/ai/worker";
|
|
649
|
+
|
|
650
|
+
// src/common/ToolCallParsers.ts
|
|
651
|
+
function stripModelArtifacts(text) {
|
|
652
|
+
return text.replace(/<think>(?:[^<]|<(?!\/think>))*<\/think>/g, "").replace(/<\|[a-z_]+\|>/g, "").trim();
|
|
653
|
+
}
|
|
654
|
+
function makeToolCall(name, args, id = null) {
|
|
655
|
+
return { name, arguments: args, id };
|
|
656
|
+
}
|
|
657
|
+
function tryParseJson(text) {
|
|
658
|
+
try {
|
|
659
|
+
return JSON.parse(text);
|
|
660
|
+
} catch {
|
|
661
|
+
return;
|
|
662
|
+
}
|
|
663
|
+
}
|
|
664
|
+
function findBalancedBlocks(source, openChar, closeChar, startFrom = 0) {
|
|
665
|
+
const results = [];
|
|
666
|
+
const length = source.length;
|
|
667
|
+
let i = startFrom;
|
|
668
|
+
while (i < length) {
|
|
669
|
+
if (source[i] !== openChar) {
|
|
670
|
+
i++;
|
|
671
|
+
continue;
|
|
672
|
+
}
|
|
673
|
+
let depth = 1;
|
|
674
|
+
let j = i + 1;
|
|
675
|
+
let inString = false;
|
|
676
|
+
let escape = false;
|
|
677
|
+
while (j < length && depth > 0) {
|
|
678
|
+
const ch = source[j];
|
|
679
|
+
if (inString) {
|
|
680
|
+
if (escape) {
|
|
681
|
+
escape = false;
|
|
682
|
+
} else if (ch === "\\") {
|
|
683
|
+
escape = true;
|
|
684
|
+
} else if (ch === '"') {
|
|
685
|
+
inString = false;
|
|
686
|
+
}
|
|
687
|
+
} else {
|
|
688
|
+
if (ch === '"') {
|
|
689
|
+
inString = true;
|
|
690
|
+
} else if (ch === openChar) {
|
|
691
|
+
depth++;
|
|
692
|
+
} else if (ch === closeChar) {
|
|
693
|
+
depth--;
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
j++;
|
|
697
|
+
}
|
|
698
|
+
if (depth === 0) {
|
|
699
|
+
results.push({ text: source.slice(i, j), start: i, end: j });
|
|
700
|
+
i = j;
|
|
701
|
+
} else {
|
|
702
|
+
break;
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
return results;
|
|
706
|
+
}
|
|
707
|
+
function parseJsonToolCallArray(jsonStr, nameKey = "name", argsKeys = ["arguments", "parameters"]) {
|
|
708
|
+
const parsed = tryParseJson(jsonStr.trim());
|
|
709
|
+
if (!parsed)
|
|
710
|
+
return;
|
|
711
|
+
const arr = Array.isArray(parsed) ? parsed : [parsed];
|
|
712
|
+
const calls = arr.filter((c) => !!c && typeof c === "object" && !!c[nameKey]).map((c) => {
|
|
713
|
+
const args = argsKeys.reduce((found, key) => found ?? c[key], undefined);
|
|
714
|
+
return makeToolCall(c[nameKey], args ?? {}, c.id ?? null);
|
|
715
|
+
});
|
|
716
|
+
return calls.length > 0 ? calls : undefined;
|
|
717
|
+
}
|
|
718
|
+
function parseKeyValueArgs(argsStr) {
|
|
719
|
+
const args = {};
|
|
720
|
+
if (!argsStr)
|
|
721
|
+
return args;
|
|
722
|
+
const argRegex = /(?<!\w)(\w+)\s*=\s*(?:"([^"]*)"|'([^']*)'|([^\s,]+))/g;
|
|
723
|
+
let match;
|
|
724
|
+
while ((match = argRegex.exec(argsStr)) !== null) {
|
|
725
|
+
const key = match[1];
|
|
726
|
+
const value = match[2] ?? match[3] ?? match[4];
|
|
727
|
+
args[key] = coerceArgValue(value);
|
|
728
|
+
}
|
|
729
|
+
return args;
|
|
730
|
+
}
|
|
731
|
+
function coerceArgValue(value) {
|
|
732
|
+
if (value === "true")
|
|
733
|
+
return true;
|
|
734
|
+
if (value === "false")
|
|
735
|
+
return false;
|
|
736
|
+
if (value !== "" && !isNaN(Number(value)))
|
|
737
|
+
return Number(value);
|
|
738
|
+
return value;
|
|
739
|
+
}
|
|
740
|
+
function parseFunctionGemmaArgumentValue(rawValue) {
|
|
741
|
+
const trimmed = rawValue.trim();
|
|
742
|
+
if (trimmed.length === 0)
|
|
743
|
+
return "";
|
|
744
|
+
if (trimmed === "true")
|
|
745
|
+
return true;
|
|
746
|
+
if (trimmed === "false")
|
|
747
|
+
return false;
|
|
748
|
+
if (trimmed === "null")
|
|
749
|
+
return null;
|
|
750
|
+
const numeric = Number(trimmed);
|
|
751
|
+
if (!Number.isNaN(numeric) && /^-?\d+(?:\.\d+)?$/.test(trimmed)) {
|
|
752
|
+
return numeric;
|
|
753
|
+
}
|
|
754
|
+
if (trimmed.startsWith('"') && trimmed.endsWith('"') || trimmed.startsWith("{") && trimmed.endsWith("}") || trimmed.startsWith("[") && trimmed.endsWith("]")) {
|
|
755
|
+
try {
|
|
756
|
+
return JSON.parse(trimmed);
|
|
757
|
+
} catch {}
|
|
758
|
+
}
|
|
759
|
+
return trimmed;
|
|
760
|
+
}
|
|
761
|
+
function parseFunctionGemmaLooseObject(text) {
|
|
762
|
+
const trimmed = text.trim();
|
|
763
|
+
if (!trimmed.startsWith("{") || !trimmed.endsWith("}")) {
|
|
764
|
+
return;
|
|
765
|
+
}
|
|
766
|
+
const inner = trimmed.slice(1, -1).trim();
|
|
767
|
+
if (inner.length === 0) {
|
|
768
|
+
return {};
|
|
769
|
+
}
|
|
770
|
+
const result = {};
|
|
771
|
+
const pairs = inner.matchAll(/([A-Za-z0-9_]+)\s*:\s*('[^']*'|"[^"]*"|[^,}]+)/g);
|
|
772
|
+
for (const [_, rawKey, rawValue] of pairs) {
|
|
773
|
+
const key = rawKey.trim();
|
|
774
|
+
const valueText = rawValue.trim().replace(/^'([^']*)'$/, '"$1"');
|
|
775
|
+
result[key] = parseFunctionGemmaArgumentValue(valueText);
|
|
776
|
+
}
|
|
777
|
+
return Object.keys(result).length > 0 ? result : undefined;
|
|
778
|
+
}
|
|
779
|
+
var parseLlama = (text) => {
|
|
780
|
+
const calls = [];
|
|
781
|
+
let content = text;
|
|
782
|
+
const pythonTagMatch = text.match(/<\|python_tag\|>((?:[^<]|<(?!\|eot_id\|>|\|eom_id\|>))*)(?:<\|eot_id\|>|<\|eom_id\|>|$)/);
|
|
783
|
+
if (pythonTagMatch) {
|
|
784
|
+
content = text.slice(0, text.indexOf("<|python_tag|>")).trim();
|
|
785
|
+
const jsonSection = pythonTagMatch[1].trim();
|
|
786
|
+
for (const line of jsonSection.split(`
|
|
787
|
+
`)) {
|
|
788
|
+
const trimmed = line.trim();
|
|
789
|
+
if (!trimmed)
|
|
790
|
+
continue;
|
|
791
|
+
const parsed = tryParseJson(trimmed);
|
|
792
|
+
if (parsed?.name) {
|
|
793
|
+
calls.push(makeToolCall(parsed.name, parsed.parameters ?? parsed.arguments ?? {}, parsed.id ?? null));
|
|
794
|
+
}
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
if (calls.length === 0) {
|
|
798
|
+
const funcTagRegex = /<function=(\w+)>((?:[^<]|<(?!\/function>))*)<\/function>/g;
|
|
799
|
+
let funcMatch;
|
|
800
|
+
while ((funcMatch = funcTagRegex.exec(text)) !== null) {
|
|
801
|
+
const args = tryParseJson(funcMatch[2].trim());
|
|
802
|
+
if (args) {
|
|
803
|
+
calls.push(makeToolCall(funcMatch[1], args));
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
if (calls.length > 0) {
|
|
807
|
+
content = text.replace(/<function=\w+>(?:[^<]|<(?!\/function>))*<\/function>/g, "").trim();
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
if (calls.length === 0) {
|
|
811
|
+
const blocks = findBalancedBlocks(text, "{", "}");
|
|
812
|
+
for (const block of blocks) {
|
|
813
|
+
const parsed = tryParseJson(block.text);
|
|
814
|
+
if (parsed?.name && (parsed.parameters !== undefined || parsed.arguments !== undefined)) {
|
|
815
|
+
calls.push(makeToolCall(parsed.name, parsed.parameters ?? parsed.arguments ?? {}, parsed.id ?? null));
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
if (calls.length > 0) {
|
|
819
|
+
content = text.slice(0, text.indexOf(calls[0].name) - '{"name": "'.length).trim();
|
|
820
|
+
}
|
|
821
|
+
}
|
|
822
|
+
return calls.length > 0 ? { tool_calls: calls, content, parser: "llama" } : null;
|
|
823
|
+
};
|
|
824
|
+
var parseMistral = (text) => {
|
|
825
|
+
const marker = "[TOOL_CALLS]";
|
|
826
|
+
const idx = text.indexOf(marker);
|
|
827
|
+
if (idx === -1)
|
|
828
|
+
return null;
|
|
829
|
+
const content = text.slice(0, idx).trim();
|
|
830
|
+
const jsonStr = text.slice(idx + marker.length).trim();
|
|
831
|
+
const calls = parseJsonToolCallArray(jsonStr);
|
|
832
|
+
return calls ? { tool_calls: calls, content, parser: "mistral" } : null;
|
|
833
|
+
};
|
|
834
|
+
var parseHermes = (text) => {
|
|
835
|
+
const regex = /<tool_call>((?:[^<]|<(?!\/tool_call>))*)<\/tool_call>/g;
|
|
836
|
+
const calls = [];
|
|
837
|
+
let match;
|
|
838
|
+
while ((match = regex.exec(text)) !== null) {
|
|
839
|
+
const parsed = tryParseJson(match[1].trim());
|
|
840
|
+
if (parsed) {
|
|
841
|
+
calls.push(makeToolCall(parsed.name ?? "", parsed.arguments ?? parsed.parameters ?? {}, parsed.id ?? null));
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
if (calls.length === 0)
|
|
845
|
+
return null;
|
|
846
|
+
const content = text.replace(/<tool_call>(?:[^<]|<(?!\/tool_call>))*<\/tool_call>/g, "").trim();
|
|
847
|
+
return { tool_calls: calls, content, parser: "hermes" };
|
|
848
|
+
};
|
|
849
|
+
var parseCohere = (text) => {
|
|
850
|
+
const blockMatch = text.match(/Action:\s*```(?:json)?\n?((?:[^`]|`(?!``))*)\n?```/);
|
|
851
|
+
let inlineJsonStr;
|
|
852
|
+
if (!blockMatch) {
|
|
853
|
+
const actionIdx2 = text.indexOf("Action:");
|
|
854
|
+
if (actionIdx2 !== -1) {
|
|
855
|
+
const afterAction = text.slice(actionIdx2 + "Action:".length).trimStart();
|
|
856
|
+
if (afterAction.startsWith("[")) {
|
|
857
|
+
const blocks = findBalancedBlocks(afterAction, "[", "]");
|
|
858
|
+
if (blocks.length > 0) {
|
|
859
|
+
inlineJsonStr = blocks[0].text;
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
const jsonStr = blockMatch?.[1] ?? inlineJsonStr;
|
|
865
|
+
if (!jsonStr)
|
|
866
|
+
return null;
|
|
867
|
+
const calls = parseJsonToolCallArray(jsonStr, "tool_name", ["parameters", "arguments"]);
|
|
868
|
+
if (!calls) {
|
|
869
|
+
const fallbackCalls = parseJsonToolCallArray(jsonStr);
|
|
870
|
+
if (!fallbackCalls)
|
|
871
|
+
return null;
|
|
872
|
+
const actionIdx2 = text.indexOf("Action:");
|
|
873
|
+
const content2 = text.slice(0, actionIdx2).trim();
|
|
874
|
+
return { tool_calls: fallbackCalls, content: content2, parser: "cohere" };
|
|
875
|
+
}
|
|
876
|
+
const actionIdx = text.indexOf("Action:");
|
|
877
|
+
const content = text.slice(0, actionIdx).trim();
|
|
878
|
+
return { tool_calls: calls, content, parser: "cohere" };
|
|
879
|
+
};
|
|
880
|
+
var parseDeepSeek = (text) => {
|
|
881
|
+
const calls = [];
|
|
882
|
+
const bar = "(?:||\\|)";
|
|
883
|
+
const sep = "[\\s▁]";
|
|
884
|
+
const v31Regex = new RegExp(`<${bar}tool${sep}call${sep}begin${bar}>\\s*(\\w+)\\s*<${bar}tool${sep}sep${bar}>\\s*([^<]*(?:<(?!${bar}tool${sep}call${sep}end${bar}>)[^<]*)*)\\s*<${bar}tool${sep}call${sep}end${bar}>`, "g");
|
|
885
|
+
let match;
|
|
886
|
+
while ((match = v31Regex.exec(text)) !== null) {
|
|
887
|
+
const args = tryParseJson(match[2].trim());
|
|
888
|
+
if (args) {
|
|
889
|
+
calls.push(makeToolCall(match[1], args));
|
|
890
|
+
}
|
|
891
|
+
}
|
|
892
|
+
if (calls.length === 0) {
|
|
893
|
+
const v2Regex = new RegExp(`<${bar}tool${sep}call${sep}begin${bar}>\\s*(\\w+)\\s*\\n\`\`\`(?:json)?\\n([^\`]*(?:\`(?!\`\`)[^\`]*)*)\\n\`\`\`\\s*<${bar}tool${sep}call${sep}end${bar}>`, "g");
|
|
894
|
+
while ((match = v2Regex.exec(text)) !== null) {
|
|
895
|
+
const args = tryParseJson(match[2].trim());
|
|
896
|
+
if (args) {
|
|
897
|
+
calls.push(makeToolCall(match[1], args));
|
|
898
|
+
}
|
|
899
|
+
}
|
|
900
|
+
}
|
|
901
|
+
if (calls.length === 0)
|
|
902
|
+
return null;
|
|
903
|
+
const content = text.replace(new RegExp(`<${bar}tool${sep}calls?${sep}(?:begin|end)${bar}>`, "g"), "").replace(new RegExp(`<${bar}tool${sep}call${sep}(?:begin|end)${bar}>[^<]*(?:<(?!${bar}tool${sep}call${sep}end${bar}>)[^<]*)*<${bar}tool${sep}call${sep}end${bar}>`, "g"), "").replace(new RegExp(`<${bar}tool${sep}sep${bar}>`, "g"), "").trim();
|
|
904
|
+
return { tool_calls: calls, content, parser: "deepseek" };
|
|
905
|
+
};
|
|
906
|
+
var parsePhi = (text) => {
|
|
907
|
+
const match = text.match(/<\|tool_calls\|>((?:[^<]|<(?!\|\/tool_calls\|>))*)<\|\/tool_calls\|>/);
|
|
908
|
+
if (!match)
|
|
909
|
+
return null;
|
|
910
|
+
const calls = parseJsonToolCallArray(match[1]);
|
|
911
|
+
if (!calls)
|
|
912
|
+
return null;
|
|
913
|
+
const content = text.slice(0, text.indexOf("<|tool_calls|>")).trim();
|
|
914
|
+
return { tool_calls: calls, content, parser: "phi" };
|
|
915
|
+
};
|
|
916
|
+
var parsePhiFunctools = (text) => {
|
|
917
|
+
const idx = text.indexOf("functools");
|
|
918
|
+
if (idx === -1)
|
|
919
|
+
return null;
|
|
920
|
+
let start = idx + "functools".length;
|
|
921
|
+
while (start < text.length && /\s/.test(text[start]))
|
|
922
|
+
start++;
|
|
923
|
+
if (start >= text.length || text[start] !== "[")
|
|
924
|
+
return null;
|
|
925
|
+
const blocks = findBalancedBlocks(text, "[", "]", start);
|
|
926
|
+
if (blocks.length === 0)
|
|
927
|
+
return null;
|
|
928
|
+
const calls = parseJsonToolCallArray(blocks[0].text);
|
|
929
|
+
if (!calls)
|
|
930
|
+
return null;
|
|
931
|
+
const content = text.slice(0, idx).trim();
|
|
932
|
+
return { tool_calls: calls, content, parser: "phi_functools" };
|
|
933
|
+
};
|
|
934
|
+
var parseInternLM = (text) => {
|
|
935
|
+
const regex = /<\|action_start\|>\s*<\|plugin\|>((?:[^<]|<(?!\|action_end\|>))*)<\|action_end\|>/g;
|
|
936
|
+
const calls = [];
|
|
937
|
+
let match;
|
|
938
|
+
while ((match = regex.exec(text)) !== null) {
|
|
939
|
+
const parsed = tryParseJson(match[1].trim());
|
|
940
|
+
if (parsed) {
|
|
941
|
+
calls.push(makeToolCall(parsed.name ?? "", parsed.parameters ?? parsed.arguments ?? {}, parsed.id ?? null));
|
|
942
|
+
}
|
|
943
|
+
}
|
|
944
|
+
if (calls.length === 0)
|
|
945
|
+
return null;
|
|
946
|
+
const content = text.replace(/<\|action_start\|>\s*<\|plugin\|>(?:[^<]|<(?!\|action_end\|>))*<\|action_end\|>/g, "").trim();
|
|
947
|
+
return { tool_calls: calls, content, parser: "internlm" };
|
|
948
|
+
};
|
|
949
|
+
var parseChatGLM = (text) => {
|
|
950
|
+
const match = text.match(/^(\w+)\n(\{[\s\S]*\})\s*$/m);
|
|
951
|
+
if (!match)
|
|
952
|
+
return null;
|
|
953
|
+
const args = tryParseJson(match[2].trim());
|
|
954
|
+
if (!args)
|
|
955
|
+
return null;
|
|
956
|
+
return {
|
|
957
|
+
tool_calls: [makeToolCall(match[1], args)],
|
|
958
|
+
content: "",
|
|
959
|
+
parser: "chatglm"
|
|
960
|
+
};
|
|
961
|
+
};
|
|
962
|
+
var parseFunctionary = (text) => {
|
|
963
|
+
const regex = />>>\s*(\w+)\s*\n((?:(?!>>>)[\s\S])*)/g;
|
|
964
|
+
const calls = [];
|
|
965
|
+
let content = "";
|
|
966
|
+
let match;
|
|
967
|
+
while ((match = regex.exec(text)) !== null) {
|
|
968
|
+
const funcName = match[1].trim();
|
|
969
|
+
const body = match[2].trim();
|
|
970
|
+
if (funcName === "all") {
|
|
971
|
+
content += body;
|
|
972
|
+
continue;
|
|
973
|
+
}
|
|
974
|
+
const args = tryParseJson(body);
|
|
975
|
+
calls.push(makeToolCall(funcName, args ?? { content: body }));
|
|
976
|
+
}
|
|
977
|
+
if (calls.length === 0)
|
|
978
|
+
return null;
|
|
979
|
+
return { tool_calls: calls, content: content.trim(), parser: "functionary" };
|
|
980
|
+
};
|
|
981
|
+
var parseGorilla = (text) => {
|
|
982
|
+
const regex = /<<function>>\s{0,20}(\w+)\(([^)]*)\)/g;
|
|
983
|
+
const calls = [];
|
|
984
|
+
let match;
|
|
985
|
+
while ((match = regex.exec(text)) !== null) {
|
|
986
|
+
calls.push(makeToolCall(match[1], parseKeyValueArgs(match[2].trim())));
|
|
987
|
+
}
|
|
988
|
+
if (calls.length === 0)
|
|
989
|
+
return null;
|
|
990
|
+
const content = text.replace(/<<function>>\s{0,20}\w+\([^)]*\)/g, "").trim();
|
|
991
|
+
return { tool_calls: calls, content, parser: "gorilla" };
|
|
992
|
+
};
|
|
993
|
+
var parseNexusRaven = (text) => {
|
|
994
|
+
const regex = /Call:\s{0,20}(\w+)\(([^)]*)\)/g;
|
|
995
|
+
const calls = [];
|
|
996
|
+
let match;
|
|
997
|
+
while ((match = regex.exec(text)) !== null) {
|
|
998
|
+
calls.push(makeToolCall(match[1], parseKeyValueArgs(match[2].trim())));
|
|
999
|
+
}
|
|
1000
|
+
if (calls.length === 0)
|
|
1001
|
+
return null;
|
|
1002
|
+
const thoughtMatch = text.match(/Thought:\s*((?:(?!Call:)[\s\S])*)/);
|
|
1003
|
+
const content = thoughtMatch?.[1]?.trim() ?? text.replace(/Call:\s{0,20}\w+\([^)]*\)/g, "").trim();
|
|
1004
|
+
return { tool_calls: calls, content, parser: "nexusraven" };
|
|
1005
|
+
};
|
|
1006
|
+
var parseXLAM = (text) => {
|
|
1007
|
+
const codeBlockMatch = text.match(/```(?:json)?\n?((?:[^`]|`(?!``))*)\n?```/);
|
|
1008
|
+
let jsonStr;
|
|
1009
|
+
let isCodeBlock = false;
|
|
1010
|
+
if (codeBlockMatch) {
|
|
1011
|
+
const inner = codeBlockMatch[1].trim();
|
|
1012
|
+
if (inner.startsWith("[")) {
|
|
1013
|
+
jsonStr = inner;
|
|
1014
|
+
isCodeBlock = true;
|
|
1015
|
+
}
|
|
1016
|
+
}
|
|
1017
|
+
if (!jsonStr) {
|
|
1018
|
+
const trimmed = text.trim();
|
|
1019
|
+
if (!trimmed.startsWith("["))
|
|
1020
|
+
return null;
|
|
1021
|
+
jsonStr = trimmed;
|
|
1022
|
+
}
|
|
1023
|
+
const calls = parseJsonToolCallArray(jsonStr);
|
|
1024
|
+
if (!calls)
|
|
1025
|
+
return null;
|
|
1026
|
+
const content = isCodeBlock ? text.slice(0, text.indexOf("```")).trim() : "";
|
|
1027
|
+
return { tool_calls: calls, content, parser: "xlam" };
|
|
1028
|
+
};
|
|
1029
|
+
var parseFireFunction = (text) => {
|
|
1030
|
+
const toolCallsIdx = text.indexOf('"tool_calls"');
|
|
1031
|
+
if (toolCallsIdx === -1)
|
|
1032
|
+
return null;
|
|
1033
|
+
let bracketStart = text.indexOf("[", toolCallsIdx);
|
|
1034
|
+
if (bracketStart === -1)
|
|
1035
|
+
return null;
|
|
1036
|
+
const blocks = findBalancedBlocks(text, "[", "]", bracketStart);
|
|
1037
|
+
if (blocks.length === 0)
|
|
1038
|
+
return null;
|
|
1039
|
+
const parsed = tryParseJson(blocks[0].text);
|
|
1040
|
+
if (!parsed || !Array.isArray(parsed))
|
|
1041
|
+
return null;
|
|
1042
|
+
const calls = [];
|
|
1043
|
+
for (const c of parsed) {
|
|
1044
|
+
const fn = c.function;
|
|
1045
|
+
if (!fn?.name)
|
|
1046
|
+
continue;
|
|
1047
|
+
let args = fn.arguments ?? {};
|
|
1048
|
+
if (typeof args === "string") {
|
|
1049
|
+
args = tryParseJson(args) ?? {};
|
|
1050
|
+
}
|
|
1051
|
+
calls.push(makeToolCall(fn.name, args, c.id ?? null));
|
|
1052
|
+
}
|
|
1053
|
+
return calls.length > 0 ? { tool_calls: calls, content: "", parser: "firefunction" } : null;
|
|
1054
|
+
};
|
|
1055
|
+
var parseGranite = (text) => {
|
|
1056
|
+
const regex = /<\|tool_call\|>((?:[^<]|<(?!\|\/tool_call\|>|\|end_of_text\|>))*?)(?:<\|\/tool_call\|>|<\|end_of_text\|>|$)/g;
|
|
1057
|
+
const calls = [];
|
|
1058
|
+
let match;
|
|
1059
|
+
while ((match = regex.exec(text)) !== null) {
|
|
1060
|
+
const parsed = tryParseJson(match[1].trim());
|
|
1061
|
+
if (parsed) {
|
|
1062
|
+
calls.push(makeToolCall(parsed.name ?? "", parsed.arguments ?? parsed.parameters ?? {}, parsed.id ?? null));
|
|
1063
|
+
}
|
|
1064
|
+
}
|
|
1065
|
+
if (calls.length === 0)
|
|
1066
|
+
return null;
|
|
1067
|
+
const content = text.replace(/<\|tool_call\|>(?:[^<]|<(?!\|\/tool_call\|>|\|end_of_text\|>))*(?:<\|\/tool_call\|>|$)/g, "").trim();
|
|
1068
|
+
return { tool_calls: calls, content, parser: "granite" };
|
|
1069
|
+
};
|
|
1070
|
+
var parseGemma = (text) => {
|
|
1071
|
+
const openMarker = "```tool_code";
|
|
1072
|
+
const openIdx = text.indexOf(openMarker);
|
|
1073
|
+
if (openIdx === -1)
|
|
1074
|
+
return null;
|
|
1075
|
+
const lineStart = text.indexOf(`
|
|
1076
|
+
`, openIdx + openMarker.length);
|
|
1077
|
+
if (lineStart === -1)
|
|
1078
|
+
return null;
|
|
1079
|
+
let closeIdx = -1;
|
|
1080
|
+
let searchFrom = lineStart + 1;
|
|
1081
|
+
while (searchFrom < text.length) {
|
|
1082
|
+
const candidate = text.indexOf("```", searchFrom);
|
|
1083
|
+
if (candidate === -1)
|
|
1084
|
+
break;
|
|
1085
|
+
const lineBegin = text.lastIndexOf(`
|
|
1086
|
+
`, candidate - 1);
|
|
1087
|
+
if (lineBegin >= lineStart && text.slice(lineBegin + 1, candidate).trim() === "") {
|
|
1088
|
+
closeIdx = candidate;
|
|
1089
|
+
break;
|
|
1090
|
+
}
|
|
1091
|
+
searchFrom = candidate + 3;
|
|
1092
|
+
}
|
|
1093
|
+
if (closeIdx === -1)
|
|
1094
|
+
return null;
|
|
1095
|
+
const rawCode = text.slice(lineStart + 1, closeIdx).replace(/\n[ \t]*$/, "");
|
|
1096
|
+
const code = rawCode.trim();
|
|
1097
|
+
const funcMatch = code.match(/^(\w+)\(([\s\S]*)\)$/);
|
|
1098
|
+
if (!funcMatch)
|
|
1099
|
+
return null;
|
|
1100
|
+
const blockEnd = closeIdx + 3;
|
|
1101
|
+
const content = (text.slice(0, openIdx) + text.slice(blockEnd)).trim();
|
|
1102
|
+
return {
|
|
1103
|
+
tool_calls: [makeToolCall(funcMatch[1], parseKeyValueArgs(funcMatch[2].trim()))],
|
|
1104
|
+
content,
|
|
1105
|
+
parser: "gemma"
|
|
1106
|
+
};
|
|
1107
|
+
};
|
|
1108
|
+
function parseFunctionGemmaArgs(argsStr) {
|
|
1109
|
+
const args = {};
|
|
1110
|
+
if (!argsStr.trim())
|
|
1111
|
+
return args;
|
|
1112
|
+
const escapeRegex = /(?<![A-Za-z0-9_])([A-Za-z0-9_]+)\s*:\s*<escape>((?:[^<]|<(?!escape>))*)<escape>/g;
|
|
1113
|
+
let escapeMatch;
|
|
1114
|
+
while ((escapeMatch = escapeRegex.exec(argsStr)) !== null) {
|
|
1115
|
+
args[escapeMatch[1]] = coerceArgValue(escapeMatch[2]);
|
|
1116
|
+
}
|
|
1117
|
+
if (Object.keys(args).length > 0)
|
|
1118
|
+
return args;
|
|
1119
|
+
const plainRegex = /(?<![A-Za-z0-9_])(?=([A-Za-z0-9_]+))\1\s*:\s*(?:'([^']*)'|"([^"]*)"|([^,}]+))/g;
|
|
1120
|
+
let plainMatch;
|
|
1121
|
+
while ((plainMatch = plainRegex.exec(argsStr)) !== null) {
|
|
1122
|
+
const key = plainMatch[1].trim();
|
|
1123
|
+
const value = (plainMatch[2] ?? plainMatch[3] ?? plainMatch[4] ?? "").replace(/<escape>/g, "").trim();
|
|
1124
|
+
args[key] = parseFunctionGemmaArgumentValue(value);
|
|
1125
|
+
}
|
|
1126
|
+
if (Object.keys(args).length > 0)
|
|
1127
|
+
return args;
|
|
1128
|
+
const jsonResult = tryParseJson(`{${argsStr}}`);
|
|
1129
|
+
if (jsonResult && typeof jsonResult === "object")
|
|
1130
|
+
return jsonResult;
|
|
1131
|
+
return args;
|
|
1132
|
+
}
|
|
1133
|
+
var parseFunctionGemma = (text) => {
|
|
1134
|
+
const regex = /(?:<start_function_call>\s*)?call:(?=([\w.]+))\1\s*\{([^}]*)\}(?:\s*<end_function_call>)?/g;
|
|
1135
|
+
const calls = [];
|
|
1136
|
+
let match;
|
|
1137
|
+
while ((match = regex.exec(text)) !== null) {
|
|
1138
|
+
calls.push(makeToolCall(match[1].trim(), parseFunctionGemmaArgs(match[2])));
|
|
1139
|
+
}
|
|
1140
|
+
if (calls.length === 0) {
|
|
1141
|
+
const fallbackRegex = /^:([A-Za-z_]\w*)\s*\{([^}]*)\}$/;
|
|
1142
|
+
const fallbackMatch = text.trim().match(fallbackRegex);
|
|
1143
|
+
if (fallbackMatch) {
|
|
1144
|
+
calls.push(makeToolCall(fallbackMatch[1].trim(), parseFunctionGemmaArgs(fallbackMatch[2])));
|
|
1145
|
+
}
|
|
1146
|
+
}
|
|
1147
|
+
if (calls.length === 0)
|
|
1148
|
+
return null;
|
|
1149
|
+
const content = text.replace(/(?:<start_function_call>\s*)?(?:call)?:(?=([\w.]+))\1\s*\{[^}]*\}(?:\s*<end_function_call>)?/g, "").trim();
|
|
1150
|
+
return { tool_calls: calls, content, parser: "functiongemma" };
|
|
1151
|
+
};
|
|
1152
|
+
function parseLiquidArgs(argsStr) {
|
|
1153
|
+
const trimmed = argsStr.trim();
|
|
1154
|
+
const paramsMatch = trimmed.match(/^params\s*=\s*(\{[\s\S]*\})$/);
|
|
1155
|
+
if (paramsMatch) {
|
|
1156
|
+
const jsonStr = paramsMatch[1].replace(/'/g, '"');
|
|
1157
|
+
const parsed = tryParseJson(jsonStr);
|
|
1158
|
+
if (parsed && typeof parsed === "object") {
|
|
1159
|
+
return parsed;
|
|
1160
|
+
}
|
|
1161
|
+
}
|
|
1162
|
+
if (trimmed.startsWith("{") && trimmed.endsWith("}")) {
|
|
1163
|
+
const jsonified = trimmed.replace(/([{,]\s*)(\w+)\s*:/g, '$1"$2":');
|
|
1164
|
+
const parsed = tryParseJson(jsonified);
|
|
1165
|
+
if (parsed && typeof parsed === "object") {
|
|
1166
|
+
return parsed;
|
|
1167
|
+
}
|
|
1168
|
+
}
|
|
1169
|
+
return parseKeyValueArgs(argsStr);
|
|
1170
|
+
}
|
|
1171
|
+
function extractPythonicCalls(text) {
|
|
1172
|
+
const calls = [];
|
|
1173
|
+
const startRegex = /(?<!\w)(\w+)\(/g;
|
|
1174
|
+
let startMatch;
|
|
1175
|
+
while ((startMatch = startRegex.exec(text)) !== null) {
|
|
1176
|
+
const funcName = startMatch[1];
|
|
1177
|
+
const argsStart = startMatch.index + startMatch[0].length;
|
|
1178
|
+
let depth = 1;
|
|
1179
|
+
let i = argsStart;
|
|
1180
|
+
while (i < text.length && depth > 0) {
|
|
1181
|
+
if (text[i] === "(")
|
|
1182
|
+
depth++;
|
|
1183
|
+
else if (text[i] === ")")
|
|
1184
|
+
depth--;
|
|
1185
|
+
i++;
|
|
1186
|
+
}
|
|
1187
|
+
if (depth === 0) {
|
|
1188
|
+
const argsStr = text.slice(argsStart, i - 1);
|
|
1189
|
+
calls.push(makeToolCall(funcName, parseLiquidArgs(argsStr)));
|
|
1190
|
+
startRegex.lastIndex = i;
|
|
1191
|
+
}
|
|
1192
|
+
}
|
|
1193
|
+
return calls;
|
|
1194
|
+
}
|
|
1195
|
+
var parseLiquid = (text) => {
|
|
1196
|
+
const specialMatch = text.match(/<\|tool_call_start\|>((?:[^<]|<(?!\|tool_call_end\|>))*)<\|tool_call_end\|>/);
|
|
1197
|
+
if (specialMatch) {
|
|
1198
|
+
const inner = specialMatch[1].trim();
|
|
1199
|
+
const unwrapped = inner.startsWith("[") && inner.endsWith("]") ? inner.slice(1, -1) : inner;
|
|
1200
|
+
const calls = extractPythonicCalls(unwrapped);
|
|
1201
|
+
if (calls.length > 0) {
|
|
1202
|
+
const content = stripModelArtifacts(text.replace(/<\|tool_call_start\|>(?:[^<]|<(?!\|tool_call_end\|>))*<\|tool_call_end\|>/g, ""));
|
|
1203
|
+
return { tool_calls: calls, content, parser: "liquid" };
|
|
1204
|
+
}
|
|
1205
|
+
}
|
|
1206
|
+
const bracketCalls = [];
|
|
1207
|
+
const bracketSpans = [];
|
|
1208
|
+
{
|
|
1209
|
+
const bracketOpenRegex = /\[(?=\w+\()/g;
|
|
1210
|
+
let bm;
|
|
1211
|
+
while ((bm = bracketOpenRegex.exec(text)) !== null) {
|
|
1212
|
+
const innerStart = bm.index + 1;
|
|
1213
|
+
let depth = 0;
|
|
1214
|
+
let i = innerStart;
|
|
1215
|
+
let foundClose = false;
|
|
1216
|
+
while (i < text.length) {
|
|
1217
|
+
const ch = text[i];
|
|
1218
|
+
if (ch === "(")
|
|
1219
|
+
depth++;
|
|
1220
|
+
else if (ch === ")") {
|
|
1221
|
+
depth--;
|
|
1222
|
+
if (depth === 0 && i + 1 < text.length && text[i + 1] === "]") {
|
|
1223
|
+
const inner = text.slice(innerStart, i + 1);
|
|
1224
|
+
const calls = extractPythonicCalls(inner);
|
|
1225
|
+
bracketCalls.push(...calls);
|
|
1226
|
+
bracketSpans.push([bm.index, i + 2]);
|
|
1227
|
+
bracketOpenRegex.lastIndex = i + 2;
|
|
1228
|
+
foundClose = true;
|
|
1229
|
+
break;
|
|
1230
|
+
}
|
|
1231
|
+
}
|
|
1232
|
+
i++;
|
|
1233
|
+
}
|
|
1234
|
+
if (!foundClose)
|
|
1235
|
+
break;
|
|
1236
|
+
}
|
|
1237
|
+
}
|
|
1238
|
+
if (bracketCalls.length > 0) {
|
|
1239
|
+
let content = text;
|
|
1240
|
+
for (let k = bracketSpans.length - 1;k >= 0; k--) {
|
|
1241
|
+
content = content.slice(0, bracketSpans[k][0]) + content.slice(bracketSpans[k][1]);
|
|
1242
|
+
}
|
|
1243
|
+
return { tool_calls: bracketCalls, content: stripModelArtifacts(content), parser: "liquid" };
|
|
1244
|
+
}
|
|
1245
|
+
const callPrefixRegex = /\|?\|?Call:\s*/g;
|
|
1246
|
+
let callPrefixMatch;
|
|
1247
|
+
const callCalls = [];
|
|
1248
|
+
while ((callPrefixMatch = callPrefixRegex.exec(text)) !== null) {
|
|
1249
|
+
const afterPrefix = text.slice(callPrefixMatch.index + callPrefixMatch[0].length);
|
|
1250
|
+
const calls = extractPythonicCalls(afterPrefix);
|
|
1251
|
+
if (calls.length > 0) {
|
|
1252
|
+
callCalls.push(calls[0]);
|
|
1253
|
+
}
|
|
1254
|
+
}
|
|
1255
|
+
if (callCalls.length > 0) {
|
|
1256
|
+
const content = stripModelArtifacts(text.replace(/\|?\|?Call:\s{0,20}\w+\([^)]*\)/g, ""));
|
|
1257
|
+
return { tool_calls: callCalls, content, parser: "liquid" };
|
|
1258
|
+
}
|
|
1259
|
+
return null;
|
|
1260
|
+
};
|
|
1261
|
+
var parseJamba = (text) => {
|
|
1262
|
+
const tagMatch = text.match(/<tool_calls>((?:[^<]|<(?!\/tool_calls>))*)<\/tool_calls>/);
|
|
1263
|
+
if (tagMatch) {
|
|
1264
|
+
const parsed = tryParseJson(tagMatch[1].trim());
|
|
1265
|
+
if (parsed) {
|
|
1266
|
+
const arr = Array.isArray(parsed) ? parsed : [parsed];
|
|
1267
|
+
const calls = [];
|
|
1268
|
+
for (const c of arr) {
|
|
1269
|
+
if (!c.name)
|
|
1270
|
+
continue;
|
|
1271
|
+
let args = c.arguments ?? c.parameters ?? {};
|
|
1272
|
+
if (typeof args === "string") {
|
|
1273
|
+
args = tryParseJson(args) ?? {};
|
|
1274
|
+
}
|
|
1275
|
+
calls.push(makeToolCall(c.name, args, c.id ?? null));
|
|
1276
|
+
}
|
|
1277
|
+
if (calls.length > 0) {
|
|
1278
|
+
const content = text.slice(0, text.indexOf("<tool_calls>")).trim();
|
|
1279
|
+
return { tool_calls: calls, content, parser: "jamba" };
|
|
1280
|
+
}
|
|
1281
|
+
}
|
|
1282
|
+
}
|
|
1283
|
+
return parseFireFunction(text);
|
|
1284
|
+
};
|
|
1285
|
+
var parseQwen35Xml = (text) => {
|
|
1286
|
+
const toolCallMatches = text.matchAll(/<tool_call>((?:[^<]|<(?!\/tool_call>))*)<\/tool_call>/g);
|
|
1287
|
+
const calls = [];
|
|
1288
|
+
for (const [_, toolCallBody] of toolCallMatches) {
|
|
1289
|
+
const functionMatch = toolCallBody.trim().match(/<function=([^>\n<]+)>((?:[^<]|<(?!\/function>))*)<\/function>/);
|
|
1290
|
+
if (!functionMatch) {
|
|
1291
|
+
continue;
|
|
1292
|
+
}
|
|
1293
|
+
const [, rawName, functionBody] = functionMatch;
|
|
1294
|
+
const parsedInput = {};
|
|
1295
|
+
const parameterMatches = functionBody.matchAll(/<parameter=([^>\n<]+)>((?:[^<]|<(?!\/parameter>))*)<\/parameter>/g);
|
|
1296
|
+
for (const [__, rawParamName, rawValue] of parameterMatches) {
|
|
1297
|
+
const paramName = rawParamName.trim();
|
|
1298
|
+
const valueText = rawValue.trim();
|
|
1299
|
+
if (paramName === "params") {
|
|
1300
|
+
try {
|
|
1301
|
+
const parsedValue = JSON.parse(valueText);
|
|
1302
|
+
if (parsedValue && typeof parsedValue === "object" && !Array.isArray(parsedValue)) {
|
|
1303
|
+
Object.assign(parsedInput, parsedValue);
|
|
1304
|
+
continue;
|
|
1305
|
+
}
|
|
1306
|
+
} catch {}
|
|
1307
|
+
}
|
|
1308
|
+
parsedInput[paramName] = valueText;
|
|
1309
|
+
}
|
|
1310
|
+
calls.push(makeToolCall(rawName.trim(), parsedInput));
|
|
1311
|
+
}
|
|
1312
|
+
if (calls.length === 0)
|
|
1313
|
+
return null;
|
|
1314
|
+
const content = text.replace(/<tool_call>(?:[^<]|<(?!\/tool_call>))*<\/tool_call>/g, "").trim();
|
|
1315
|
+
return { tool_calls: calls, content, parser: "qwen35xml" };
|
|
1316
|
+
};
|
|
1317
|
+
var MODEL_PARSERS = {
|
|
1318
|
+
llama: [parseLlama, parseHermes],
|
|
1319
|
+
mistral: [parseMistral, parseHermes],
|
|
1320
|
+
mixtral: [parseMistral, parseHermes],
|
|
1321
|
+
qwen: [parseHermes, parseLlama],
|
|
1322
|
+
qwen2: [parseHermes, parseLlama],
|
|
1323
|
+
qwen3: [parseHermes, parseQwen35Xml, parseLlama],
|
|
1324
|
+
qwen35: [parseQwen35Xml, parseHermes, parseLlama],
|
|
1325
|
+
cohere: [parseCohere, parseHermes],
|
|
1326
|
+
command: [parseCohere, parseHermes],
|
|
1327
|
+
deepseek: [parseDeepSeek, parseHermes],
|
|
1328
|
+
hermes: [parseHermes],
|
|
1329
|
+
phi: [parsePhi, parsePhiFunctools, parseHermes],
|
|
1330
|
+
internlm: [parseInternLM, parseHermes],
|
|
1331
|
+
chatglm: [parseChatGLM],
|
|
1332
|
+
glm: [parseChatGLM],
|
|
1333
|
+
functiongemma: [parseFunctionGemma, parseGemma, parseHermes],
|
|
1334
|
+
gemma: [parseFunctionGemma, parseGemma, parseHermes],
|
|
1335
|
+
functionary: [parseFunctionary],
|
|
1336
|
+
gorilla: [parseGorilla],
|
|
1337
|
+
nexusraven: [parseNexusRaven],
|
|
1338
|
+
xlam: [parseXLAM],
|
|
1339
|
+
firefunction: [parseFireFunction, parsePhiFunctools],
|
|
1340
|
+
granite: [parseGranite, parseHermes],
|
|
1341
|
+
solar: [parseHermes],
|
|
1342
|
+
jamba: [parseJamba, parseHermes],
|
|
1343
|
+
liquid: [parseLiquid, parseHermes],
|
|
1344
|
+
lfm: [parseLiquid, parseHermes],
|
|
1345
|
+
yi: [parseHermes, parseLlama],
|
|
1346
|
+
falcon: [parseHermes, parseLlama]
|
|
1347
|
+
};
|
|
1348
|
+
var DEFAULT_PARSER_CHAIN = [
|
|
1349
|
+
parsePhi,
|
|
1350
|
+
parseMistral,
|
|
1351
|
+
parseDeepSeek,
|
|
1352
|
+
parseInternLM,
|
|
1353
|
+
parseGranite,
|
|
1354
|
+
parseFunctionGemma,
|
|
1355
|
+
parseQwen35Xml,
|
|
1356
|
+
parseHermes,
|
|
1357
|
+
parseCohere,
|
|
1358
|
+
parseFunctionary,
|
|
1359
|
+
parseGorilla,
|
|
1360
|
+
parseNexusRaven,
|
|
1361
|
+
parseFireFunction,
|
|
1362
|
+
parsePhiFunctools,
|
|
1363
|
+
parseLiquid,
|
|
1364
|
+
parseLlama,
|
|
1365
|
+
parseGemma,
|
|
1366
|
+
parseXLAM
|
|
1367
|
+
];
|
|
1368
|
+
function detectModelFamily(tokenizerOrName) {
|
|
1369
|
+
let name = "";
|
|
1370
|
+
if (typeof tokenizerOrName === "string") {
|
|
1371
|
+
name = tokenizerOrName.toLowerCase();
|
|
1372
|
+
} else if (tokenizerOrName) {
|
|
1373
|
+
const config = tokenizerOrName.config ?? {};
|
|
1374
|
+
name = (config.name_or_path ?? config._name_or_path ?? config.model_type ?? tokenizerOrName.name_or_path ?? "").toLowerCase();
|
|
1375
|
+
}
|
|
1376
|
+
if (!name)
|
|
1377
|
+
return null;
|
|
1378
|
+
for (const family of Object.keys(MODEL_PARSERS)) {
|
|
1379
|
+
if (name.includes(family)) {
|
|
1380
|
+
return family;
|
|
1381
|
+
}
|
|
1382
|
+
}
|
|
1383
|
+
return null;
|
|
1384
|
+
}
|
|
1385
|
+
function parseToolCalls(text, { tokenizer = null, model = null, parser = null } = {}) {
|
|
1386
|
+
if (!text || typeof text !== "string") {
|
|
1387
|
+
return { tool_calls: [], content: text ?? "", parser: "none" };
|
|
1388
|
+
}
|
|
1389
|
+
let parsersToTry;
|
|
1390
|
+
if (parser) {
|
|
1391
|
+
const key = parser.toLowerCase();
|
|
1392
|
+
const found = MODEL_PARSERS[key];
|
|
1393
|
+
if (!found) {
|
|
1394
|
+
throw new Error(`Unknown parser "${parser}". Available parsers: ${Object.keys(MODEL_PARSERS).join(", ")}`);
|
|
1395
|
+
}
|
|
1396
|
+
parsersToTry = found;
|
|
1397
|
+
} else {
|
|
1398
|
+
const family = detectModelFamily(tokenizer ?? model ?? null);
|
|
1399
|
+
parsersToTry = family ? MODEL_PARSERS[family] : DEFAULT_PARSER_CHAIN;
|
|
1400
|
+
}
|
|
1401
|
+
for (const parserFn of parsersToTry) {
|
|
1402
|
+
const result = parserFn(text);
|
|
1403
|
+
if (result)
|
|
1404
|
+
return result;
|
|
1405
|
+
}
|
|
1406
|
+
return { tool_calls: [], content: text, parser: "none" };
|
|
1407
|
+
}
|
|
1408
|
+
function hasToolCalls(text) {
|
|
1409
|
+
if (!text)
|
|
1410
|
+
return false;
|
|
1411
|
+
return text.includes("<tool_call>") || text.includes("[TOOL_CALLS]") || text.includes("<|python_tag|>") || text.includes("<function=") || text.includes("<|tool_calls|>") || text.includes("<tool_calls>") || text.includes("<|action_start|>") || text.includes("<<function>>") || text.includes(">>>") || text.includes("Call:") || text.includes("Action:") || text.includes("functools") || text.includes("<start_function_call>") || text.includes("<|tool_call|>") || text.includes("<|tool_call_start|>") || /tool[\s\u2581]call[\s\u2581]begin/.test(text);
|
|
1412
|
+
}
|
|
1413
|
+
function getAvailableParsers() {
|
|
1414
|
+
return Object.keys(MODEL_PARSERS);
|
|
1415
|
+
}
|
|
1416
|
+
function getGenerationPrefix(family, forcedToolName) {
|
|
1417
|
+
if (!family)
|
|
1418
|
+
return;
|
|
1419
|
+
switch (family) {
|
|
1420
|
+
case "functiongemma":
|
|
1421
|
+
return forcedToolName ? `<start_function_call>call:${forcedToolName}{` : "<start_function_call>call:";
|
|
1422
|
+
default:
|
|
1423
|
+
return;
|
|
1424
|
+
}
|
|
1425
|
+
}
|
|
1426
|
+
function parseToolCallsFromText(responseText) {
|
|
1427
|
+
const functionGemmaResult = parseFunctionGemma(responseText);
|
|
1428
|
+
if (functionGemmaResult && functionGemmaResult.tool_calls.length > 0) {
|
|
1429
|
+
return {
|
|
1430
|
+
text: functionGemmaResult.content,
|
|
1431
|
+
toolCalls: functionGemmaResult.tool_calls.map((call, index) => ({
|
|
1432
|
+
id: call.id ?? `call_${index}`,
|
|
1433
|
+
name: call.name,
|
|
1434
|
+
input: call.arguments
|
|
1435
|
+
}))
|
|
1436
|
+
};
|
|
1437
|
+
}
|
|
1438
|
+
const looseObject = parseFunctionGemmaLooseObject(responseText);
|
|
1439
|
+
if (looseObject) {
|
|
1440
|
+
return {
|
|
1441
|
+
text: "",
|
|
1442
|
+
toolCalls: [{ id: "call_0", name: "", input: looseObject }]
|
|
1443
|
+
};
|
|
1444
|
+
}
|
|
1445
|
+
const hermesResult = parseHermes(responseText);
|
|
1446
|
+
if (hermesResult && hermesResult.tool_calls.length > 0) {
|
|
1447
|
+
return {
|
|
1448
|
+
text: hermesResult.content,
|
|
1449
|
+
toolCalls: hermesResult.tool_calls.map((call, index) => ({
|
|
1450
|
+
id: call.id ?? `call_${index}`,
|
|
1451
|
+
name: call.name,
|
|
1452
|
+
input: call.arguments
|
|
1453
|
+
}))
|
|
1454
|
+
};
|
|
1455
|
+
}
|
|
1456
|
+
const toolCalls = [];
|
|
1457
|
+
let callIndex = 0;
|
|
1458
|
+
const jsonCandidates = findBalancedBlocks(responseText, "{", "}");
|
|
1459
|
+
const matchedRanges = [];
|
|
1460
|
+
for (const candidate of jsonCandidates) {
|
|
1461
|
+
try {
|
|
1462
|
+
const parsed = JSON.parse(candidate.text);
|
|
1463
|
+
if (parsed.name && (parsed.arguments !== undefined || parsed.parameters !== undefined)) {
|
|
1464
|
+
const id = `call_${callIndex++}`;
|
|
1465
|
+
toolCalls.push({
|
|
1466
|
+
id,
|
|
1467
|
+
name: parsed.name,
|
|
1468
|
+
input: parsed.arguments ?? parsed.parameters ?? {}
|
|
1469
|
+
});
|
|
1470
|
+
matchedRanges.push({ start: candidate.start, end: candidate.end });
|
|
1471
|
+
} else if (parsed.function?.name) {
|
|
1472
|
+
let functionArgs = parsed.function.arguments ?? {};
|
|
1473
|
+
if (typeof functionArgs === "string") {
|
|
1474
|
+
try {
|
|
1475
|
+
functionArgs = JSON.parse(functionArgs);
|
|
1476
|
+
} catch {
|
|
1477
|
+
functionArgs = {};
|
|
1478
|
+
}
|
|
1479
|
+
}
|
|
1480
|
+
const id = `call_${callIndex++}`;
|
|
1481
|
+
toolCalls.push({
|
|
1482
|
+
id,
|
|
1483
|
+
name: parsed.function.name,
|
|
1484
|
+
input: functionArgs ?? {}
|
|
1485
|
+
});
|
|
1486
|
+
matchedRanges.push({ start: candidate.start, end: candidate.end });
|
|
1487
|
+
}
|
|
1488
|
+
} catch {}
|
|
1489
|
+
}
|
|
1490
|
+
let cleanedText = responseText;
|
|
1491
|
+
if (toolCalls.length > 0) {
|
|
1492
|
+
let result = "";
|
|
1493
|
+
let lastIndex = 0;
|
|
1494
|
+
for (const range of matchedRanges) {
|
|
1495
|
+
result += responseText.slice(lastIndex, range.start);
|
|
1496
|
+
lastIndex = range.end;
|
|
1497
|
+
}
|
|
1498
|
+
result += responseText.slice(lastIndex);
|
|
1499
|
+
cleanedText = result.trim();
|
|
1500
|
+
}
|
|
1501
|
+
return { text: cleanedText, toolCalls };
|
|
1502
|
+
}
|
|
1503
|
+
|
|
1504
|
+
// src/provider-llamacpp/common/LlamaCpp_ToolParser.ts
|
|
1505
|
+
function getModelTextCandidates(model) {
|
|
1506
|
+
return [
|
|
1507
|
+
model.model_id,
|
|
1508
|
+
model.title,
|
|
1509
|
+
model.description,
|
|
1510
|
+
model.provider_config.model_url,
|
|
1511
|
+
model.provider_config.model_path
|
|
1512
|
+
].filter((value) => typeof value === "string" && value.length > 0).map((value) => value.toLowerCase());
|
|
1513
|
+
}
|
|
1514
|
+
function toolChoiceForcesToolCall(toolChoice) {
|
|
1515
|
+
return toolChoice === "required" || toolChoice !== undefined && toolChoice !== "auto" && toolChoice !== "none";
|
|
1516
|
+
}
|
|
1517
|
+
function forcedToolChoiceName(toolChoice) {
|
|
1518
|
+
if (typeof toolChoice !== "string" || toolChoice === "auto" || toolChoice === "none" || toolChoice === "required") {
|
|
1519
|
+
return;
|
|
1520
|
+
}
|
|
1521
|
+
return toolChoice;
|
|
1522
|
+
}
|
|
1523
|
+
function forcedToolSelection(input) {
|
|
1524
|
+
const explicitToolName = forcedToolChoiceName(input.toolChoice);
|
|
1525
|
+
if (explicitToolName !== undefined) {
|
|
1526
|
+
return explicitToolName;
|
|
1527
|
+
}
|
|
1528
|
+
if (input.toolChoice === "required" && input.tools.length === 1) {
|
|
1529
|
+
return input.tools[0]?.name;
|
|
1530
|
+
}
|
|
1531
|
+
return;
|
|
1532
|
+
}
|
|
1533
|
+
function resolveParsedToolName(name, input) {
|
|
1534
|
+
if (input.tools.some((tool) => tool.name === name)) {
|
|
1535
|
+
return name;
|
|
1536
|
+
}
|
|
1537
|
+
return forcedToolSelection(input) ?? name;
|
|
1538
|
+
}
|
|
1539
|
+
function detectFunctionGemmaModel(model) {
|
|
1540
|
+
return getModelTextCandidates(model).some((value) => value.includes("functiongemma"));
|
|
1541
|
+
}
|
|
1542
|
+
function functionGemmaDeclarationSchema(schema) {
|
|
1543
|
+
if (!schema) {
|
|
1544
|
+
return "{type: OBJECT}";
|
|
1545
|
+
}
|
|
1546
|
+
const type = typeof schema.type === "string" ? schema.type.toUpperCase() : "OBJECT";
|
|
1547
|
+
const description = typeof schema.description === "string" ? `description: ${schema.description} ,` : "";
|
|
1548
|
+
if (type === "OBJECT") {
|
|
1549
|
+
const properties = schema.properties && typeof schema.properties === "object" ? Object.entries(schema.properties).map(([key, value]) => {
|
|
1550
|
+
const property = value ?? {};
|
|
1551
|
+
const propertyType = typeof property.type === "string" ? property.type.toUpperCase() : "STRING";
|
|
1552
|
+
const propertyDescription = typeof property.description === "string" ? `description: ${property.description} ,` : "";
|
|
1553
|
+
return `${key}:{${propertyDescription}type: ${propertyType}}`;
|
|
1554
|
+
}).join(",") : "";
|
|
1555
|
+
const required = Array.isArray(schema.required) ? schema.required.join(",") : "";
|
|
1556
|
+
return `{${description}parameters:{properties:{${properties}},required:[${required}],type: OBJECT}}`;
|
|
1557
|
+
}
|
|
1558
|
+
return `{${description}type: ${type}}`;
|
|
1559
|
+
}
|
|
1560
|
+
function buildFunctionGemmaDeclarations(tools) {
|
|
1561
|
+
return tools.map((tool) => {
|
|
1562
|
+
const description = tool.description?.trim() ?? "";
|
|
1563
|
+
return `declaration:${tool.name}
|
|
1564
|
+
` + `{description: ${description} ,parameters:` + `${functionGemmaDeclarationSchema(tool.inputSchema).slice(1, -1)}}`;
|
|
1565
|
+
}).join(`
|
|
1566
|
+
`);
|
|
1567
|
+
}
|
|
1568
|
+
function buildFunctionGemmaDeveloperPrompt(baseSystemPrompt, required) {
|
|
1569
|
+
const lines = [
|
|
1570
|
+
baseSystemPrompt,
|
|
1571
|
+
"You are a model that can do function calling with the following functions",
|
|
1572
|
+
required ? "You must call at least one function from the provided list." : undefined,
|
|
1573
|
+
"If you call a function, output only the function call and nothing else.",
|
|
1574
|
+
"If tool results are already available and no further function call is needed, answer the user normally."
|
|
1575
|
+
].filter((value) => typeof value === "string" && value.trim().length > 0);
|
|
1576
|
+
return lines.join(`
|
|
1577
|
+
`);
|
|
1578
|
+
}
|
|
1579
|
+
function extractMessageText(content) {
|
|
1580
|
+
if (typeof content === "string") {
|
|
1581
|
+
return content;
|
|
1582
|
+
}
|
|
1583
|
+
if (!Array.isArray(content)) {
|
|
1584
|
+
return String(content ?? "");
|
|
1585
|
+
}
|
|
1586
|
+
return content.filter((block) => block && typeof block === "object" && block.type === "text").map((block) => String(block.text ?? "")).join("");
|
|
1587
|
+
}
|
|
1588
|
+
function serializeFunctionGemmaValue(value) {
|
|
1589
|
+
if (typeof value === "string") {
|
|
1590
|
+
return JSON.stringify(value);
|
|
1591
|
+
}
|
|
1592
|
+
if (typeof value === "number" || typeof value === "boolean" || value === null || Array.isArray(value) || typeof value === "object") {
|
|
1593
|
+
return JSON.stringify(value);
|
|
1594
|
+
}
|
|
1595
|
+
return JSON.stringify(String(value));
|
|
1596
|
+
}
|
|
1597
|
+
function serializeFunctionGemmaToolCall(name, input) {
|
|
1598
|
+
const args = Object.entries(input).map(([key, value]) => `${key}:${serializeFunctionGemmaValue(value)}`).join(",");
|
|
1599
|
+
return `call:${name}{${args}}`;
|
|
1600
|
+
}
|
|
1601
|
+
function buildFunctionGemmaConversationPrompt(input) {
|
|
1602
|
+
if (!input.messages || input.messages.length === 0) {
|
|
1603
|
+
return String(input.prompt);
|
|
1604
|
+
}
|
|
1605
|
+
const turns = [];
|
|
1606
|
+
const toolNamesById = new Map;
|
|
1607
|
+
for (const message of input.messages) {
|
|
1608
|
+
if (message.role === "user") {
|
|
1609
|
+
turns.push("user", extractMessageText(message.content));
|
|
1610
|
+
continue;
|
|
1611
|
+
}
|
|
1612
|
+
if (message.role === "assistant" && Array.isArray(message.content)) {
|
|
1613
|
+
const toolUses = message.content.filter((block) => block.type === "tool_use");
|
|
1614
|
+
const serializedCalls = toolUses.map((block) => {
|
|
1615
|
+
toolNamesById.set(block.id, block.name);
|
|
1616
|
+
return serializeFunctionGemmaToolCall(block.name, block.input);
|
|
1617
|
+
});
|
|
1618
|
+
const text = message.content.filter((block) => block.type === "text").map((block) => block.text).join("").trim();
|
|
1619
|
+
const serializedCallText = serializedCalls.join(`
|
|
1620
|
+
`).trim();
|
|
1621
|
+
if (text && text !== serializedCallText && !serializedCalls.some((call) => text.includes(call))) {
|
|
1622
|
+
turns.push("model", text);
|
|
1623
|
+
}
|
|
1624
|
+
if (serializedCalls.length > 0) {
|
|
1625
|
+
turns.push("model", serializedCalls.join(`
|
|
1626
|
+
`));
|
|
1627
|
+
}
|
|
1628
|
+
continue;
|
|
1629
|
+
}
|
|
1630
|
+
if (message.role === "tool" && Array.isArray(message.content)) {
|
|
1631
|
+
for (const block of message.content) {
|
|
1632
|
+
const toolName = toolNamesById.get(block.tool_use_id) ?? "tool";
|
|
1633
|
+
const resultText = extractMessageText(block.content);
|
|
1634
|
+
turns.push("user", `The function ${toolName} already returned this result: ${resultText}
|
|
1635
|
+
` + `Do not call ${toolName} again just to repeat the same lookup.
|
|
1636
|
+
` + `If this result answers the user's request, reply with the final answer.
|
|
1637
|
+
` + `Only call another function if a different function is still needed to complete the request.`);
|
|
1638
|
+
}
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
return turns.join(`
|
|
1642
|
+
`);
|
|
1643
|
+
}
|
|
1644
|
+
function buildFunctionGemmaRawPrompt(input, systemPrompt) {
|
|
1645
|
+
const hasMessages = input.messages && input.messages.length > 0;
|
|
1646
|
+
const userPrompt = buildFunctionGemmaConversationPrompt(input);
|
|
1647
|
+
const parts = [
|
|
1648
|
+
"developer",
|
|
1649
|
+
buildFunctionGemmaDeveloperPrompt(systemPrompt, input.toolChoice === "required"),
|
|
1650
|
+
buildFunctionGemmaDeclarations(input.tools)
|
|
1651
|
+
];
|
|
1652
|
+
if (hasMessages) {
|
|
1653
|
+
parts.push(userPrompt);
|
|
1654
|
+
} else {
|
|
1655
|
+
parts.push("user", userPrompt);
|
|
1656
|
+
}
|
|
1657
|
+
parts.push("model");
|
|
1658
|
+
return parts.join(`
|
|
1659
|
+
`);
|
|
1660
|
+
}
|
|
1661
|
+
function buildRawCompletionPrompt(input, model, systemPrompt) {
|
|
1662
|
+
if (detectFunctionGemmaModel(model) && input.toolChoice !== "none") {
|
|
1663
|
+
return buildFunctionGemmaRawPrompt(input, systemPrompt);
|
|
1664
|
+
}
|
|
1665
|
+
return;
|
|
1666
|
+
}
|
|
1667
|
+
function supportsNativeFunctions(input, model) {
|
|
1668
|
+
return input.toolChoice !== "none" && !detectFunctionGemmaModel(model);
|
|
1669
|
+
}
|
|
1670
|
+
function truncateAtTurnBoundary(text) {
|
|
1671
|
+
const markers = [`
|
|
1672
|
+
user
|
|
1673
|
+
`, `
|
|
1674
|
+
developer
|
|
1675
|
+
`];
|
|
1676
|
+
let truncateAt = text.length;
|
|
1677
|
+
for (const marker of markers) {
|
|
1678
|
+
const idx = text.indexOf(marker);
|
|
1679
|
+
if (idx !== -1 && idx < truncateAt) {
|
|
1680
|
+
truncateAt = idx;
|
|
1681
|
+
}
|
|
1682
|
+
}
|
|
1683
|
+
return text.slice(0, truncateAt).trim();
|
|
1684
|
+
}
|
|
1685
|
+
function adaptParserResult(result, input) {
|
|
1686
|
+
return result.tool_calls.map((call, index) => ({
|
|
1687
|
+
id: call.id ?? `call_${index}`,
|
|
1688
|
+
name: resolveParsedToolName(call.name, input),
|
|
1689
|
+
input: call.arguments
|
|
1690
|
+
}));
|
|
1691
|
+
}
|
|
1692
|
+
function extractToolCallsFromText(text, input, model) {
|
|
1693
|
+
if (detectFunctionGemmaModel(model)) {
|
|
1694
|
+
const functionGemmaResult = parseFunctionGemma(text);
|
|
1695
|
+
if (functionGemmaResult && functionGemmaResult.tool_calls.length > 0) {
|
|
1696
|
+
return adaptParserResult(functionGemmaResult, input);
|
|
1697
|
+
}
|
|
1698
|
+
const forcedToolName = forcedToolSelection(input);
|
|
1699
|
+
const looseObject = forcedToolName ? parseFunctionGemmaLooseObject(text) : undefined;
|
|
1700
|
+
if (forcedToolName && looseObject) {
|
|
1701
|
+
return [{ id: "call_0", name: forcedToolName, input: looseObject }];
|
|
1702
|
+
}
|
|
1703
|
+
}
|
|
1704
|
+
const liquidResult = parseLiquid(text);
|
|
1705
|
+
if (liquidResult && liquidResult.tool_calls.length > 0) {
|
|
1706
|
+
return adaptParserResult(liquidResult, input);
|
|
1707
|
+
}
|
|
1708
|
+
const hermesResult = parseHermes(text);
|
|
1709
|
+
if (hermesResult && hermesResult.tool_calls.length > 0) {
|
|
1710
|
+
return adaptParserResult(hermesResult, input);
|
|
1711
|
+
}
|
|
1712
|
+
const qwen35Result = parseQwen35Xml(text);
|
|
1713
|
+
if (qwen35Result && qwen35Result.tool_calls.length > 0) {
|
|
1714
|
+
return adaptParserResult(qwen35Result, input);
|
|
1715
|
+
}
|
|
1716
|
+
const llamaResult = parseLlama(text);
|
|
1717
|
+
if (llamaResult && llamaResult.tool_calls.length > 0) {
|
|
1718
|
+
return adaptParserResult(llamaResult, input);
|
|
1719
|
+
}
|
|
1720
|
+
return [];
|
|
1721
|
+
}
|
|
1722
|
+
|
|
1723
|
+
// src/provider-llamacpp/common/LlamaCpp_ToolCalling.ts
|
|
1724
|
+
import { getLogger } from "@workglow/util/worker";
|
|
1725
|
+
function buildSystemPrompt(input) {
|
|
1726
|
+
const base = input.systemPrompt;
|
|
1727
|
+
if (input.toolChoice === "required") {
|
|
1728
|
+
const instruction = "You must call at least one tool from the provided tool list when answering.";
|
|
1729
|
+
return base ? `${base}
|
|
1730
|
+
|
|
1731
|
+
${instruction}` : instruction;
|
|
1732
|
+
}
|
|
1733
|
+
return base || undefined;
|
|
1734
|
+
}
|
|
1735
|
+
function extractTextFromContent(content) {
|
|
1736
|
+
if (typeof content === "string")
|
|
1737
|
+
return content;
|
|
1738
|
+
if (!Array.isArray(content))
|
|
1739
|
+
return String(content ?? "");
|
|
1740
|
+
return content.filter((block) => block?.type === "text").map((block) => String(block.text ?? "")).join("");
|
|
1741
|
+
}
|
|
1742
|
+
function convertMessagesToChatHistory(messages, prompt, systemPrompt) {
|
|
1743
|
+
const history = [];
|
|
1744
|
+
if (systemPrompt) {
|
|
1745
|
+
history.push({ type: "system", text: systemPrompt });
|
|
1746
|
+
}
|
|
1747
|
+
if (!messages || messages.length === 0) {
|
|
1748
|
+
const promptText = typeof prompt === "string" ? prompt : String(prompt ?? "");
|
|
1749
|
+
history.push({ type: "user", text: promptText });
|
|
1750
|
+
return history;
|
|
1751
|
+
}
|
|
1752
|
+
for (const msg of messages) {
|
|
1753
|
+
if (msg.role === "user") {
|
|
1754
|
+
const text = extractTextFromContent(msg.content);
|
|
1755
|
+
history.push({ type: "user", text });
|
|
1756
|
+
continue;
|
|
1757
|
+
}
|
|
1758
|
+
if (msg.role === "assistant" && Array.isArray(msg.content)) {
|
|
1759
|
+
const response = [];
|
|
1760
|
+
for (const block of msg.content) {
|
|
1761
|
+
if (block.type === "text" && block.text) {
|
|
1762
|
+
response.push(block.text);
|
|
1763
|
+
} else if (block.type === "tool_use") {
|
|
1764
|
+
response.push({
|
|
1765
|
+
type: "functionCall",
|
|
1766
|
+
name: block.name,
|
|
1767
|
+
description: undefined,
|
|
1768
|
+
params: block.input ?? {},
|
|
1769
|
+
result: undefined,
|
|
1770
|
+
_toolUseId: block.id
|
|
1771
|
+
});
|
|
1772
|
+
}
|
|
1773
|
+
}
|
|
1774
|
+
history.push({ type: "model", response });
|
|
1775
|
+
continue;
|
|
1776
|
+
}
|
|
1777
|
+
if (msg.role === "tool" && Array.isArray(msg.content)) {
|
|
1778
|
+
let lastModel;
|
|
1779
|
+
for (let i = history.length - 1;i >= 0; i--) {
|
|
1780
|
+
if (history[i].type === "model") {
|
|
1781
|
+
lastModel = history[i];
|
|
1782
|
+
break;
|
|
1783
|
+
}
|
|
1784
|
+
}
|
|
1785
|
+
if (!lastModel)
|
|
1786
|
+
continue;
|
|
1787
|
+
for (const block of msg.content) {
|
|
1788
|
+
const toolUseId = block.tool_use_id;
|
|
1789
|
+
if (!toolUseId)
|
|
1790
|
+
continue;
|
|
1791
|
+
const fnCall = lastModel.response.find((item) => typeof item === "object" && item !== null && item.type === "functionCall" && item._toolUseId === toolUseId && item.result === undefined);
|
|
1792
|
+
if (fnCall) {
|
|
1793
|
+
fnCall.result = typeof block.content === "string" ? block.content : JSON.stringify(block.content);
|
|
1794
|
+
}
|
|
1795
|
+
}
|
|
1796
|
+
continue;
|
|
1797
|
+
}
|
|
1798
|
+
}
|
|
1799
|
+
for (const item of history) {
|
|
1800
|
+
if (item.type === "model" && Array.isArray(item.response)) {
|
|
1801
|
+
for (const entry of item.response) {
|
|
1802
|
+
if (typeof entry === "object" && entry !== null && "_toolUseId" in entry) {
|
|
1803
|
+
delete entry._toolUseId;
|
|
1804
|
+
}
|
|
1805
|
+
}
|
|
1806
|
+
}
|
|
1807
|
+
}
|
|
1808
|
+
return history;
|
|
1809
|
+
}
|
|
1810
|
+
function buildChatModelFunctions(tools) {
|
|
1811
|
+
const functions = {};
|
|
1812
|
+
for (const tool of tools) {
|
|
1813
|
+
functions[tool.name] = {
|
|
1814
|
+
...tool.description && { description: tool.description },
|
|
1815
|
+
...tool.inputSchema && { params: tool.inputSchema }
|
|
1816
|
+
};
|
|
1817
|
+
}
|
|
1818
|
+
return functions;
|
|
1819
|
+
}
|
|
1820
|
+
function llamaCppRawCompletionOptions(input, model) {
|
|
1821
|
+
const opts = {
|
|
1822
|
+
...llamaCppSeedPromptSpread(model.provider_config)
|
|
1823
|
+
};
|
|
1824
|
+
if (input.maxTokens !== undefined) {
|
|
1825
|
+
opts.maxTokens = input.maxTokens;
|
|
1826
|
+
}
|
|
1827
|
+
if (input.temperature !== undefined) {
|
|
1828
|
+
opts.temperature = input.temperature;
|
|
1829
|
+
} else if (toolChoiceForcesToolCall(input.toolChoice)) {
|
|
1830
|
+
opts.temperature = 0.2;
|
|
1831
|
+
}
|
|
1832
|
+
return opts;
|
|
1833
|
+
}
|
|
1834
|
+
function llamaCppChatGenerateOptions(input, model) {
|
|
1835
|
+
const opts = {
|
|
1836
|
+
...llamaCppSeedPromptSpread(model.provider_config)
|
|
1837
|
+
};
|
|
1838
|
+
if (input.maxTokens !== undefined) {
|
|
1839
|
+
opts.maxTokens = input.maxTokens;
|
|
1840
|
+
}
|
|
1841
|
+
if (input.temperature !== undefined) {
|
|
1842
|
+
opts.temperature = input.temperature;
|
|
1843
|
+
} else if (toolChoiceForcesToolCall(input.toolChoice)) {
|
|
1844
|
+
opts.temperature = 0.2;
|
|
1845
|
+
}
|
|
1846
|
+
return opts;
|
|
1847
|
+
}
|
|
1848
|
+
function extractNativeFunctionCalls(functionCalls) {
|
|
1849
|
+
if (!functionCalls || functionCalls.length === 0)
|
|
1850
|
+
return [];
|
|
1851
|
+
return functionCalls.map((fc, index) => ({
|
|
1852
|
+
id: `call_${index}`,
|
|
1853
|
+
name: fc.functionName,
|
|
1854
|
+
input: fc.params ?? {}
|
|
1855
|
+
}));
|
|
1856
|
+
}
|
|
1857
|
+
var LlamaCpp_ToolCalling = async (input, model, update_progress, signal) => {
|
|
1858
|
+
if (!model)
|
|
1859
|
+
throw new Error("Model config is required for ToolCallingTask.");
|
|
1860
|
+
await loadSdk();
|
|
1861
|
+
update_progress(0, "Loading model");
|
|
1862
|
+
const context = await getOrCreateTextContext(model);
|
|
1863
|
+
update_progress(10, "Running tool calling");
|
|
1864
|
+
const sequence = context.getSequence();
|
|
1865
|
+
const { LlamaChat, LlamaCompletion } = getLlamaCppSdk();
|
|
1866
|
+
const systemPrompt = buildSystemPrompt(input);
|
|
1867
|
+
const rawPrompt = buildRawCompletionPrompt(input, model, systemPrompt);
|
|
1868
|
+
getLogger().debug("LlamaCpp_ToolCalling", { rawPrompt, systemPrompt });
|
|
1869
|
+
if (rawPrompt !== undefined) {
|
|
1870
|
+
const completion = new LlamaCompletion({ contextSequence: sequence });
|
|
1871
|
+
try {
|
|
1872
|
+
const rawText = await completion.generateCompletion(rawPrompt, {
|
|
1873
|
+
signal,
|
|
1874
|
+
...llamaCppRawCompletionOptions(input, model)
|
|
1875
|
+
});
|
|
1876
|
+
const text = truncateAtTurnBoundary(rawText);
|
|
1877
|
+
getLogger().debug("LlamaCpp_ToolCalling LlamaCompletion", { rawText, text });
|
|
1878
|
+
const toolCalls = filterValidToolCalls(extractToolCallsFromText(text, input, model), input.tools);
|
|
1879
|
+
getLogger().debug("LlamaCpp_ToolCalling LlamaCompletion", { toolCalls });
|
|
1880
|
+
update_progress(100, "Tool calling complete");
|
|
1881
|
+
return { text, toolCalls };
|
|
1882
|
+
} finally {
|
|
1883
|
+
completion.dispose({ disposeSequence: false });
|
|
1884
|
+
sequence.dispose();
|
|
1885
|
+
}
|
|
1886
|
+
}
|
|
1887
|
+
const llamaChat = new LlamaChat({
|
|
1888
|
+
contextSequence: sequence,
|
|
1889
|
+
...llamaCppChatSessionConstructorSpread(model)
|
|
1890
|
+
});
|
|
1891
|
+
const promptText = typeof input.prompt === "string" ? input.prompt : extractTextFromContent(input.prompt);
|
|
1892
|
+
const chatHistory = convertMessagesToChatHistory(input.messages, promptText, systemPrompt);
|
|
1893
|
+
const functions = supportsNativeFunctions(input, model) ? buildChatModelFunctions(input.tools) : undefined;
|
|
1894
|
+
getLogger().debug("LlamaCpp_ToolCalling LlamaChat", { chatHistory, functions });
|
|
1895
|
+
try {
|
|
1896
|
+
const res = await llamaChat.generateResponse(chatHistory, {
|
|
1897
|
+
signal,
|
|
1898
|
+
...llamaCppChatGenerateOptions(input, model),
|
|
1899
|
+
...functions && {
|
|
1900
|
+
functions,
|
|
1901
|
+
...toolChoiceForcesToolCall(input.toolChoice) && { documentFunctionParams: true }
|
|
1902
|
+
}
|
|
1903
|
+
});
|
|
1904
|
+
const text = res.response;
|
|
1905
|
+
const toolCalls = extractNativeFunctionCalls(res.functionCalls);
|
|
1906
|
+
if (toolCalls.length === 0 && input.tools.length > 0 && input.toolChoice !== "none") {
|
|
1907
|
+
toolCalls.push(...extractToolCallsFromText(text, input, model));
|
|
1908
|
+
}
|
|
1909
|
+
update_progress(100, "Tool calling complete");
|
|
1910
|
+
return { text, toolCalls: filterValidToolCalls(toolCalls, input.tools) };
|
|
1911
|
+
} finally {
|
|
1912
|
+
llamaChat.dispose({ disposeSequence: false });
|
|
1913
|
+
sequence.dispose();
|
|
1914
|
+
}
|
|
1915
|
+
};
|
|
1916
|
+
async function* streamTextChunks(startGeneration, signal, cleanup) {
|
|
1917
|
+
const queue = [];
|
|
1918
|
+
let isComplete = false;
|
|
1919
|
+
let completionError;
|
|
1920
|
+
let resolveWait = null;
|
|
1921
|
+
let accumulatedText = "";
|
|
1922
|
+
let result;
|
|
1923
|
+
const notifyWaiter = () => {
|
|
1924
|
+
resolveWait?.();
|
|
1925
|
+
resolveWait = null;
|
|
1926
|
+
};
|
|
1927
|
+
const generationPromise = startGeneration((chunk) => {
|
|
1928
|
+
queue.push(chunk);
|
|
1929
|
+
notifyWaiter();
|
|
1930
|
+
}).then((res) => {
|
|
1931
|
+
result = res;
|
|
1932
|
+
isComplete = true;
|
|
1933
|
+
notifyWaiter();
|
|
1934
|
+
}).catch((err) => {
|
|
1935
|
+
completionError = err;
|
|
1936
|
+
isComplete = true;
|
|
1937
|
+
notifyWaiter();
|
|
1938
|
+
});
|
|
1939
|
+
try {
|
|
1940
|
+
while (true) {
|
|
1941
|
+
while (queue.length > 0) {
|
|
1942
|
+
const chunk = queue.shift();
|
|
1943
|
+
accumulatedText += chunk;
|
|
1944
|
+
yield { type: "text-delta", port: "text", textDelta: chunk };
|
|
1945
|
+
}
|
|
1946
|
+
if (isComplete)
|
|
1947
|
+
break;
|
|
1948
|
+
await new Promise((r) => {
|
|
1949
|
+
resolveWait = r;
|
|
1950
|
+
});
|
|
1951
|
+
}
|
|
1952
|
+
while (queue.length > 0) {
|
|
1953
|
+
const chunk = queue.shift();
|
|
1954
|
+
accumulatedText += chunk;
|
|
1955
|
+
yield { type: "text-delta", port: "text", textDelta: chunk };
|
|
1956
|
+
}
|
|
1957
|
+
} finally {
|
|
1958
|
+
await generationPromise.catch(() => {});
|
|
1959
|
+
cleanup();
|
|
1960
|
+
}
|
|
1961
|
+
if (completionError) {
|
|
1962
|
+
throw completionError;
|
|
1963
|
+
}
|
|
1964
|
+
if (signal.aborted) {
|
|
1965
|
+
throw signal.reason ?? new Error("The operation was aborted");
|
|
1966
|
+
}
|
|
1967
|
+
return { text: accumulatedText, result };
|
|
1968
|
+
}
|
|
1969
|
+
var LlamaCpp_ToolCalling_Stream = async function* (input, model, signal) {
|
|
1970
|
+
if (!model)
|
|
1971
|
+
throw new Error("Model config is required for ToolCallingTask.");
|
|
1972
|
+
await loadSdk();
|
|
1973
|
+
const context = await getOrCreateTextContext(model);
|
|
1974
|
+
const sequence = context.getSequence();
|
|
1975
|
+
const { LlamaChat, LlamaCompletion } = getLlamaCppSdk();
|
|
1976
|
+
const systemPrompt = buildSystemPrompt(input);
|
|
1977
|
+
const rawPrompt = buildRawCompletionPrompt(input, model, systemPrompt);
|
|
1978
|
+
if (rawPrompt !== undefined) {
|
|
1979
|
+
const completion = new LlamaCompletion({ contextSequence: sequence });
|
|
1980
|
+
const { text: rawText } = yield* streamTextChunks((onTextChunk) => completion.generateCompletion(rawPrompt, {
|
|
1981
|
+
signal,
|
|
1982
|
+
...llamaCppRawCompletionOptions(input, model),
|
|
1983
|
+
onTextChunk
|
|
1984
|
+
}), signal, () => {
|
|
1985
|
+
completion.dispose({ disposeSequence: false });
|
|
1986
|
+
sequence.dispose();
|
|
1987
|
+
});
|
|
1988
|
+
const text = truncateAtTurnBoundary(rawText);
|
|
1989
|
+
const validToolCalls2 = filterValidToolCalls(extractToolCallsFromText(text, input, model), input.tools);
|
|
1990
|
+
if (validToolCalls2.length > 0) {
|
|
1991
|
+
yield { type: "object-delta", port: "toolCalls", objectDelta: [...validToolCalls2] };
|
|
1992
|
+
}
|
|
1993
|
+
yield {
|
|
1994
|
+
type: "finish",
|
|
1995
|
+
data: { text, toolCalls: validToolCalls2 }
|
|
1996
|
+
};
|
|
1997
|
+
return;
|
|
1998
|
+
}
|
|
1999
|
+
const llamaChat = new LlamaChat({
|
|
2000
|
+
contextSequence: sequence,
|
|
2001
|
+
...llamaCppChatSessionConstructorSpread(model)
|
|
2002
|
+
});
|
|
2003
|
+
const promptText = typeof input.prompt === "string" ? input.prompt : extractTextFromContent(input.prompt);
|
|
2004
|
+
const chatHistory = convertMessagesToChatHistory(input.messages, promptText, systemPrompt);
|
|
2005
|
+
const functions = supportsNativeFunctions(input, model) ? buildChatModelFunctions(input.tools) : undefined;
|
|
2006
|
+
const { text: accumulatedText, result: chatResponse } = yield* streamTextChunks((onTextChunk) => llamaChat.generateResponse(chatHistory, {
|
|
2007
|
+
signal,
|
|
2008
|
+
...llamaCppChatGenerateOptions(input, model),
|
|
2009
|
+
...functions && {
|
|
2010
|
+
functions,
|
|
2011
|
+
...toolChoiceForcesToolCall(input.toolChoice) && { documentFunctionParams: true }
|
|
2012
|
+
},
|
|
2013
|
+
onTextChunk
|
|
2014
|
+
}), signal, () => {
|
|
2015
|
+
llamaChat.dispose({ disposeSequence: false });
|
|
2016
|
+
sequence.dispose();
|
|
2017
|
+
});
|
|
2018
|
+
const toolCalls = extractNativeFunctionCalls(chatResponse?.functionCalls);
|
|
2019
|
+
if (toolCalls.length === 0 && input.tools.length > 0 && input.toolChoice !== "none") {
|
|
2020
|
+
toolCalls.push(...extractToolCallsFromText(accumulatedText, input, model));
|
|
2021
|
+
}
|
|
2022
|
+
const validToolCalls = filterValidToolCalls(toolCalls, input.tools);
|
|
2023
|
+
if (validToolCalls.length > 0) {
|
|
2024
|
+
yield { type: "object-delta", port: "toolCalls", objectDelta: [...validToolCalls] };
|
|
2025
|
+
}
|
|
2026
|
+
yield {
|
|
2027
|
+
type: "finish",
|
|
2028
|
+
data: { text: accumulatedText, toolCalls: validToolCalls }
|
|
2029
|
+
};
|
|
2030
|
+
};
|
|
2031
|
+
|
|
647
2032
|
// src/provider-llamacpp/common/LlamaCpp_Unload.ts
|
|
648
2033
|
var LlamaCpp_Unload = async (input, model, update_progress, _signal) => {
|
|
649
2034
|
if (!model)
|
|
@@ -682,6 +2067,7 @@ var LLAMACPP_TASKS = {
|
|
|
682
2067
|
TextEmbeddingTask: LlamaCpp_TextEmbedding,
|
|
683
2068
|
TextRewriterTask: LlamaCpp_TextRewriter,
|
|
684
2069
|
TextSummaryTask: LlamaCpp_TextSummary,
|
|
2070
|
+
ToolCallingTask: LlamaCpp_ToolCalling,
|
|
685
2071
|
StructuredGenerationTask: LlamaCpp_StructuredGeneration,
|
|
686
2072
|
ModelSearchTask: LlamaCpp_ModelSearch
|
|
687
2073
|
};
|
|
@@ -689,6 +2075,7 @@ var LLAMACPP_STREAM_TASKS = {
|
|
|
689
2075
|
TextGenerationTask: LlamaCpp_TextGeneration_Stream,
|
|
690
2076
|
TextRewriterTask: LlamaCpp_TextRewriter_Stream,
|
|
691
2077
|
TextSummaryTask: LlamaCpp_TextSummary_Stream,
|
|
2078
|
+
ToolCallingTask: LlamaCpp_ToolCalling_Stream,
|
|
692
2079
|
StructuredGenerationTask: LlamaCpp_StructuredGeneration_Stream
|
|
693
2080
|
};
|
|
694
2081
|
var LLAMACPP_REACTIVE_TASKS = {
|
|
@@ -696,9 +2083,7 @@ var LLAMACPP_REACTIVE_TASKS = {
|
|
|
696
2083
|
};
|
|
697
2084
|
|
|
698
2085
|
// src/provider-llamacpp/LlamaCppQueuedProvider.ts
|
|
699
|
-
import {
|
|
700
|
-
QueuedAiProvider
|
|
701
|
-
} from "@workglow/ai";
|
|
2086
|
+
import { QueuedAiProvider } from "@workglow/ai";
|
|
702
2087
|
class LlamaCppQueuedProvider extends QueuedAiProvider {
|
|
703
2088
|
name = LOCAL_LLAMACPP;
|
|
704
2089
|
displayName = "Local llama.cpp";
|
|
@@ -713,6 +2098,7 @@ class LlamaCppQueuedProvider extends QueuedAiProvider {
|
|
|
713
2098
|
"TextEmbeddingTask",
|
|
714
2099
|
"TextRewriterTask",
|
|
715
2100
|
"TextSummaryTask",
|
|
2101
|
+
"ToolCallingTask",
|
|
716
2102
|
"ModelSearchTask"
|
|
717
2103
|
];
|
|
718
2104
|
constructor(tasks, streamTasks, reactiveTasks) {
|
|
@@ -725,12 +2111,10 @@ async function registerLlamaCppInline(options) {
|
|
|
725
2111
|
await new LlamaCppQueuedProvider(LLAMACPP_TASKS, LLAMACPP_STREAM_TASKS, LLAMACPP_REACTIVE_TASKS).register(options ?? {});
|
|
726
2112
|
}
|
|
727
2113
|
// src/provider-llamacpp/registerLlamaCppWorker.ts
|
|
728
|
-
import { getLogger, globalServiceRegistry, WORKER_SERVER } from "@workglow/util/worker";
|
|
2114
|
+
import { getLogger as getLogger2, globalServiceRegistry, WORKER_SERVER } from "@workglow/util/worker";
|
|
729
2115
|
|
|
730
2116
|
// src/provider-llamacpp/LlamaCppProvider.ts
|
|
731
|
-
import {
|
|
732
|
-
AiProvider
|
|
733
|
-
} from "@workglow/ai/worker";
|
|
2117
|
+
import { AiProvider } from "@workglow/ai/worker";
|
|
734
2118
|
class LlamaCppProvider extends AiProvider {
|
|
735
2119
|
name = LOCAL_LLAMACPP;
|
|
736
2120
|
displayName = "Local llama.cpp";
|
|
@@ -745,6 +2129,7 @@ class LlamaCppProvider extends AiProvider {
|
|
|
745
2129
|
"TextEmbeddingTask",
|
|
746
2130
|
"TextRewriterTask",
|
|
747
2131
|
"TextSummaryTask",
|
|
2132
|
+
"ToolCallingTask",
|
|
748
2133
|
"ModelSearchTask"
|
|
749
2134
|
];
|
|
750
2135
|
constructor(tasks, streamTasks, reactiveTasks) {
|
|
@@ -757,7 +2142,7 @@ async function registerLlamaCppWorker() {
|
|
|
757
2142
|
const workerServer = globalServiceRegistry.get(WORKER_SERVER);
|
|
758
2143
|
new LlamaCppProvider(LLAMACPP_TASKS, LLAMACPP_STREAM_TASKS, LLAMACPP_REACTIVE_TASKS).registerOnWorkerServer(workerServer);
|
|
759
2144
|
workerServer.sendReady();
|
|
760
|
-
|
|
2145
|
+
getLogger2().info("LlamaCpp worker job run functions registered");
|
|
761
2146
|
}
|
|
762
2147
|
export {
|
|
763
2148
|
streamFromSession,
|
|
@@ -780,4 +2165,4 @@ export {
|
|
|
780
2165
|
disposeLlamaCppResources
|
|
781
2166
|
};
|
|
782
2167
|
|
|
783
|
-
//# debugId=
|
|
2168
|
+
//# debugId=86AE88EC6A1801D664756E2164756E21
|