@console-agent/agent 1.2.2 → 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,5 +1,8 @@
1
- # @console-agent/agent
2
-
1
+ ```
2
+ █▀▀ █▀█ █▄ █ █▀ █▀█ █ █▀▀
3
+ █▄▄ █▄█ █ ▀█ ▄█ █▄█ █▄▄ ██▄
4
+ .agent
5
+ ```
3
6
  > Drop `console.agent(...)` anywhere in your code to execute agentic workflows — as easy as `console.log()`
4
7
 
5
8
  [![npm](https://img.shields.io/npm/v/@console-agent/agent)](https://www.npmjs.com/package/@console-agent/agent)
@@ -39,14 +42,65 @@ console.agent.debug("why is this slow?", { duration, query });
39
42
  console.agent.architect("review this API design", endpoint);
40
43
  ```
41
44
 
45
+ ## 🔌 Providers
46
+
47
+ ### Google Gemini (default)
48
+
49
+ Cloud-hosted, full tool support, API key required.
50
+
51
+ ```typescript
52
+ import { init } from '@console-agent/agent';
53
+
54
+ init({
55
+ provider: 'google', // default
56
+ apiKey: process.env.GEMINI_API_KEY,
57
+ model: 'gemini-2.5-flash-lite',
58
+ });
59
+ ```
60
+
61
+ ### Ollama (Local Models)
62
+
63
+ Run models locally with [Ollama](https://ollama.com). Free, private, no API key needed.
64
+
65
+ ```bash
66
+ # 1. Install Ollama: https://ollama.com
67
+ # 2. Pull a model
68
+ ollama pull llama3.2
69
+ ```
70
+
71
+ ```typescript
72
+ import { init } from '@console-agent/agent';
73
+
74
+ init({
75
+ provider: 'ollama',
76
+ model: 'llama3.2', // any model from `ollama list`
77
+ ollamaHost: 'http://localhost:11434', // default
78
+ });
79
+ ```
80
+
81
+ ### Provider Comparison
82
+
83
+ | | Google Gemini | Ollama |
84
+ |---|---|---|
85
+ | Setup | `GEMINI_API_KEY` env var | Install Ollama + pull model |
86
+ | Config | `provider: 'google'` | `provider: 'ollama'` |
87
+ | Models | `gemini-2.5-flash-lite`, etc. | `llama3.2`, any `ollama list` model |
88
+ | Tools | ✅ google_search, code_execution, url_context | ❌ Not supported |
89
+ | Thinking | ✅ Supported | ❌ Not supported |
90
+ | File attachments | ✅ Full support | ⚠️ Text-only |
91
+ | Cost | Pay per token (very cheap) | Free (local) |
92
+ | Privacy | Cloud (with anonymization) | 100% local |
93
+
42
94
  ## Configuration
43
95
 
44
96
  ```typescript
45
97
  import { init } from '@console-agent/agent';
46
98
 
47
99
  init({
100
+ provider: 'google', // 'google' | 'ollama'
48
101
  apiKey: process.env.GEMINI_API_KEY, // Or set GEMINI_API_KEY env var
49
102
  model: 'gemini-2.5-flash-lite', // Default (fast & cheap)
103
+ ollamaHost: 'http://localhost:11434', // Ollama host (when provider='ollama')
50
104
  persona: 'general', // 'debugger' | 'security' | 'architect' | 'general'
51
105
  mode: 'fire-and-forget', // 'fire-and-forget' | 'blocking'
52
106
  timeout: 10000, // ms
package/dist/index.cjs CHANGED
@@ -799,6 +799,130 @@ function parseResponse(text) {
799
799
  }
800
800
  }
801
801
 
802
+ // src/providers/ollama.ts
803
+ var JSON_RESPONSE_INSTRUCTION2 = `
804
+
805
+ IMPORTANT: You MUST respond with ONLY a valid JSON object (no markdown, no code fences, no extra text).
806
+ Use this exact format:
807
+ {"success": true, "summary": "one-line conclusion", "reasoning": "your thought process", "data": {"result": "primary finding"}, "actions": ["tools/steps used"], "confidence": 0.95}`;
808
+ function buildMessages2(prompt, context, sourceFile, _files) {
809
+ const parts = [];
810
+ parts.push({ type: "text", text: prompt });
811
+ if (context) {
812
+ parts.push({ type: "text", text: `
813
+ --- Context ---
814
+ ${context}` });
815
+ }
816
+ if (sourceFile) {
817
+ const formatted = formatSourceForContext(sourceFile);
818
+ parts.push({ type: "text", text: `
819
+ ${formatted}` });
820
+ }
821
+ if (_files && _files.length > 0) {
822
+ logDebug("WARNING: File attachments have limited support with Ollama. Only text-based files included as context.");
823
+ }
824
+ return [{ role: "user", content: parts }];
825
+ }
826
+ function parseResponse2(text) {
827
+ try {
828
+ return JSON.parse(text);
829
+ } catch {
830
+ const jsonMatch = text.match(/```(?:json)?\s*\n?([\s\S]*?)\n?\s*```/);
831
+ if (jsonMatch) {
832
+ try {
833
+ return JSON.parse(jsonMatch[1]);
834
+ } catch {
835
+ }
836
+ }
837
+ const objectMatch = text.match(/\{[\s\S]*\}/);
838
+ if (objectMatch) {
839
+ try {
840
+ return JSON.parse(objectMatch[0]);
841
+ } catch {
842
+ }
843
+ }
844
+ return {
845
+ success: true,
846
+ summary: text.substring(0, 200),
847
+ data: { raw: text },
848
+ actions: [],
849
+ confidence: 0.5
850
+ };
851
+ }
852
+ }
853
+ async function callOllama(prompt, context, persona, config2, options, sourceFile, files) {
854
+ const startTime = Date.now();
855
+ let modelName = options?.model ?? config2.model;
856
+ if (modelName.startsWith("gemini")) {
857
+ modelName = "llama3.2";
858
+ logDebug(`Ollama provider: defaulting model to ${modelName}`);
859
+ }
860
+ logDebug(`Using model: ${modelName}`);
861
+ logDebug(`Persona: ${persona.name}`);
862
+ const host = config2.ollamaHost || process.env.OLLAMA_HOST || "http://localhost:11434";
863
+ if (options?.tools && options.tools.length > 0) {
864
+ logDebug('WARNING: Tools are not supported with the Ollama provider. Tools will be ignored. Use provider="google" for tool support.');
865
+ }
866
+ if (options?.thinking) {
867
+ logDebug("WARNING: Thinking config is not supported with the Ollama provider. It will be ignored.");
868
+ }
869
+ logDebug(`Ollama host: ${host}`);
870
+ const { createOllama } = await import('ai-sdk-ollama');
871
+ const { generateText: generateText2 } = await import('ai');
872
+ const ollama = createOllama({ baseURL: host });
873
+ const useCustomSchema = !!(options?.schema || options?.responseFormat);
874
+ const systemPrompt = useCustomSchema ? `${persona.systemPrompt}
875
+
876
+ IMPORTANT: You must respond with structured data matching the requested output schema. Do not include AgentResult wrapper fields \u2014 just return the data matching the schema.` : persona.systemPrompt + JSON_RESPONSE_INSTRUCTION2;
877
+ const messages = buildMessages2(prompt, context, sourceFile, files);
878
+ const result = await generateText2({
879
+ model: ollama(modelName),
880
+ system: systemPrompt,
881
+ messages,
882
+ maxOutputTokens: config2.budget.maxTokensPerCall,
883
+ abortSignal: AbortSignal.timeout(config2.timeout)
884
+ });
885
+ const latencyMs = Date.now() - startTime;
886
+ const tokensUsed = result.usage?.totalTokens ?? 0;
887
+ const collectedToolCalls = [];
888
+ logDebug(`Response received: ${latencyMs}ms, ${tokensUsed} tokens`);
889
+ if (useCustomSchema) {
890
+ const parsed2 = parseResponse2(result.text);
891
+ const customData = parsed2 && !parsed2.raw ? parsed2 : { result: result.text };
892
+ logDebug("Custom schema output received, wrapping in AgentResult");
893
+ return {
894
+ success: true,
895
+ summary: `Structured output returned (${Object.keys(customData).length} fields)`,
896
+ data: customData,
897
+ actions: [],
898
+ confidence: 1,
899
+ metadata: {
900
+ model: modelName,
901
+ tokensUsed,
902
+ latencyMs,
903
+ toolCalls: collectedToolCalls,
904
+ cached: false
905
+ }
906
+ };
907
+ }
908
+ const parsed = parseResponse2(result.text);
909
+ return {
910
+ success: parsed?.success ?? true,
911
+ summary: parsed?.summary ?? result.text.substring(0, 200),
912
+ reasoning: parsed?.reasoning,
913
+ data: parsed?.data ?? { raw: result.text },
914
+ actions: parsed?.actions ?? [],
915
+ confidence: parsed?.confidence ?? 0.5,
916
+ metadata: {
917
+ model: modelName,
918
+ tokensUsed,
919
+ latencyMs,
920
+ toolCalls: collectedToolCalls,
921
+ cached: false
922
+ }
923
+ };
924
+ }
925
+
802
926
  // src/utils/anonymize.ts
803
927
  var patterns = {
804
928
  // API keys and tokens (long alphanumeric strings near sensitive keywords)
@@ -995,6 +1119,7 @@ var BudgetTracker = class {
995
1119
  var DEFAULT_CONFIG = {
996
1120
  provider: "google",
997
1121
  model: "gemini-2.5-flash-lite",
1122
+ ollamaHost: "http://localhost:11434",
998
1123
  persona: "general",
999
1124
  budget: {
1000
1125
  maxCallsPerDay: 100,
@@ -1081,8 +1206,9 @@ async function executeAgent(prompt, context, options) {
1081
1206
  const files = options?.files;
1082
1207
  const spinner = startSpinner(persona, processedPrompt, verbose);
1083
1208
  try {
1209
+ const providerCall = config.provider === "ollama" ? callOllama(processedPrompt, contextStr, persona, config, options, sourceFile, files) : callGoogle(processedPrompt, contextStr, persona, config, options, sourceFile, files);
1084
1210
  const result = await Promise.race([
1085
- callGoogle(processedPrompt, contextStr, persona, config, options, sourceFile, files),
1211
+ providerCall,
1086
1212
  createTimeout(config.timeout)
1087
1213
  ]);
1088
1214
  budgetTracker.recordUsage(