@console-agent/agent 1.2.2 → 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +56 -2
- package/dist/index.cjs +127 -1
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +127 -1
- package/dist/index.js.map +1 -1
- package/package.json +2 -1
package/dist/index.js
CHANGED
|
@@ -792,6 +792,130 @@ function parseResponse(text) {
|
|
|
792
792
|
}
|
|
793
793
|
}
|
|
794
794
|
|
|
795
|
+
// src/providers/ollama.ts
|
|
796
|
+
var JSON_RESPONSE_INSTRUCTION2 = `
|
|
797
|
+
|
|
798
|
+
IMPORTANT: You MUST respond with ONLY a valid JSON object (no markdown, no code fences, no extra text).
|
|
799
|
+
Use this exact format:
|
|
800
|
+
{"success": true, "summary": "one-line conclusion", "reasoning": "your thought process", "data": {"result": "primary finding"}, "actions": ["tools/steps used"], "confidence": 0.95}`;
|
|
801
|
+
function buildMessages2(prompt, context, sourceFile, _files) {
|
|
802
|
+
const parts = [];
|
|
803
|
+
parts.push({ type: "text", text: prompt });
|
|
804
|
+
if (context) {
|
|
805
|
+
parts.push({ type: "text", text: `
|
|
806
|
+
--- Context ---
|
|
807
|
+
${context}` });
|
|
808
|
+
}
|
|
809
|
+
if (sourceFile) {
|
|
810
|
+
const formatted = formatSourceForContext(sourceFile);
|
|
811
|
+
parts.push({ type: "text", text: `
|
|
812
|
+
${formatted}` });
|
|
813
|
+
}
|
|
814
|
+
if (_files && _files.length > 0) {
|
|
815
|
+
logDebug("WARNING: File attachments have limited support with Ollama. Only text-based files included as context.");
|
|
816
|
+
}
|
|
817
|
+
return [{ role: "user", content: parts }];
|
|
818
|
+
}
|
|
819
|
+
function parseResponse2(text) {
|
|
820
|
+
try {
|
|
821
|
+
return JSON.parse(text);
|
|
822
|
+
} catch {
|
|
823
|
+
const jsonMatch = text.match(/```(?:json)?\s*\n?([\s\S]*?)\n?\s*```/);
|
|
824
|
+
if (jsonMatch) {
|
|
825
|
+
try {
|
|
826
|
+
return JSON.parse(jsonMatch[1]);
|
|
827
|
+
} catch {
|
|
828
|
+
}
|
|
829
|
+
}
|
|
830
|
+
const objectMatch = text.match(/\{[\s\S]*\}/);
|
|
831
|
+
if (objectMatch) {
|
|
832
|
+
try {
|
|
833
|
+
return JSON.parse(objectMatch[0]);
|
|
834
|
+
} catch {
|
|
835
|
+
}
|
|
836
|
+
}
|
|
837
|
+
return {
|
|
838
|
+
success: true,
|
|
839
|
+
summary: text.substring(0, 200),
|
|
840
|
+
data: { raw: text },
|
|
841
|
+
actions: [],
|
|
842
|
+
confidence: 0.5
|
|
843
|
+
};
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
async function callOllama(prompt, context, persona, config2, options, sourceFile, files) {
|
|
847
|
+
const startTime = Date.now();
|
|
848
|
+
let modelName = options?.model ?? config2.model;
|
|
849
|
+
if (modelName.startsWith("gemini")) {
|
|
850
|
+
modelName = "llama3.2";
|
|
851
|
+
logDebug(`Ollama provider: defaulting model to ${modelName}`);
|
|
852
|
+
}
|
|
853
|
+
logDebug(`Using model: ${modelName}`);
|
|
854
|
+
logDebug(`Persona: ${persona.name}`);
|
|
855
|
+
const host = config2.ollamaHost || process.env.OLLAMA_HOST || "http://localhost:11434";
|
|
856
|
+
if (options?.tools && options.tools.length > 0) {
|
|
857
|
+
logDebug('WARNING: Tools are not supported with the Ollama provider. Tools will be ignored. Use provider="google" for tool support.');
|
|
858
|
+
}
|
|
859
|
+
if (options?.thinking) {
|
|
860
|
+
logDebug("WARNING: Thinking config is not supported with the Ollama provider. It will be ignored.");
|
|
861
|
+
}
|
|
862
|
+
logDebug(`Ollama host: ${host}`);
|
|
863
|
+
const { createOllama } = await import('ai-sdk-ollama');
|
|
864
|
+
const { generateText: generateText2 } = await import('ai');
|
|
865
|
+
const ollama = createOllama({ baseURL: host });
|
|
866
|
+
const useCustomSchema = !!(options?.schema || options?.responseFormat);
|
|
867
|
+
const systemPrompt = useCustomSchema ? `${persona.systemPrompt}
|
|
868
|
+
|
|
869
|
+
IMPORTANT: You must respond with structured data matching the requested output schema. Do not include AgentResult wrapper fields \u2014 just return the data matching the schema.` : persona.systemPrompt + JSON_RESPONSE_INSTRUCTION2;
|
|
870
|
+
const messages = buildMessages2(prompt, context, sourceFile, files);
|
|
871
|
+
const result = await generateText2({
|
|
872
|
+
model: ollama(modelName),
|
|
873
|
+
system: systemPrompt,
|
|
874
|
+
messages,
|
|
875
|
+
maxOutputTokens: config2.budget.maxTokensPerCall,
|
|
876
|
+
abortSignal: AbortSignal.timeout(config2.timeout)
|
|
877
|
+
});
|
|
878
|
+
const latencyMs = Date.now() - startTime;
|
|
879
|
+
const tokensUsed = result.usage?.totalTokens ?? 0;
|
|
880
|
+
const collectedToolCalls = [];
|
|
881
|
+
logDebug(`Response received: ${latencyMs}ms, ${tokensUsed} tokens`);
|
|
882
|
+
if (useCustomSchema) {
|
|
883
|
+
const parsed2 = parseResponse2(result.text);
|
|
884
|
+
const customData = parsed2 && !parsed2.raw ? parsed2 : { result: result.text };
|
|
885
|
+
logDebug("Custom schema output received, wrapping in AgentResult");
|
|
886
|
+
return {
|
|
887
|
+
success: true,
|
|
888
|
+
summary: `Structured output returned (${Object.keys(customData).length} fields)`,
|
|
889
|
+
data: customData,
|
|
890
|
+
actions: [],
|
|
891
|
+
confidence: 1,
|
|
892
|
+
metadata: {
|
|
893
|
+
model: modelName,
|
|
894
|
+
tokensUsed,
|
|
895
|
+
latencyMs,
|
|
896
|
+
toolCalls: collectedToolCalls,
|
|
897
|
+
cached: false
|
|
898
|
+
}
|
|
899
|
+
};
|
|
900
|
+
}
|
|
901
|
+
const parsed = parseResponse2(result.text);
|
|
902
|
+
return {
|
|
903
|
+
success: parsed?.success ?? true,
|
|
904
|
+
summary: parsed?.summary ?? result.text.substring(0, 200),
|
|
905
|
+
reasoning: parsed?.reasoning,
|
|
906
|
+
data: parsed?.data ?? { raw: result.text },
|
|
907
|
+
actions: parsed?.actions ?? [],
|
|
908
|
+
confidence: parsed?.confidence ?? 0.5,
|
|
909
|
+
metadata: {
|
|
910
|
+
model: modelName,
|
|
911
|
+
tokensUsed,
|
|
912
|
+
latencyMs,
|
|
913
|
+
toolCalls: collectedToolCalls,
|
|
914
|
+
cached: false
|
|
915
|
+
}
|
|
916
|
+
};
|
|
917
|
+
}
|
|
918
|
+
|
|
795
919
|
// src/utils/anonymize.ts
|
|
796
920
|
var patterns = {
|
|
797
921
|
// API keys and tokens (long alphanumeric strings near sensitive keywords)
|
|
@@ -988,6 +1112,7 @@ var BudgetTracker = class {
|
|
|
988
1112
|
var DEFAULT_CONFIG = {
|
|
989
1113
|
provider: "google",
|
|
990
1114
|
model: "gemini-2.5-flash-lite",
|
|
1115
|
+
ollamaHost: "http://localhost:11434",
|
|
991
1116
|
persona: "general",
|
|
992
1117
|
budget: {
|
|
993
1118
|
maxCallsPerDay: 100,
|
|
@@ -1074,8 +1199,9 @@ async function executeAgent(prompt, context, options) {
|
|
|
1074
1199
|
const files = options?.files;
|
|
1075
1200
|
const spinner = startSpinner(persona, processedPrompt, verbose);
|
|
1076
1201
|
try {
|
|
1202
|
+
const providerCall = config.provider === "ollama" ? callOllama(processedPrompt, contextStr, persona, config, options, sourceFile, files) : callGoogle(processedPrompt, contextStr, persona, config, options, sourceFile, files);
|
|
1077
1203
|
const result = await Promise.race([
|
|
1078
|
-
|
|
1204
|
+
providerCall,
|
|
1079
1205
|
createTimeout(config.timeout)
|
|
1080
1206
|
]);
|
|
1081
1207
|
budgetTracker.recordUsage(
|