nc1709 1.15.4__py3-none-any.whl → 1.18.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nc1709/__init__.py +1 -1
- nc1709/agent/core.py +172 -19
- nc1709/agent/permissions.py +2 -2
- nc1709/agent/tools/bash_tool.py +295 -8
- nc1709/cli.py +435 -19
- nc1709/cli_ui.py +137 -52
- nc1709/conversation_logger.py +416 -0
- nc1709/llm_adapter.py +62 -4
- nc1709/plugins/agents/database_agent.py +695 -0
- nc1709/plugins/agents/django_agent.py +11 -4
- nc1709/plugins/agents/docker_agent.py +11 -4
- nc1709/plugins/agents/fastapi_agent.py +11 -4
- nc1709/plugins/agents/git_agent.py +11 -4
- nc1709/plugins/agents/nextjs_agent.py +11 -4
- nc1709/plugins/agents/ollama_agent.py +574 -0
- nc1709/plugins/agents/test_agent.py +702 -0
- nc1709/prompts/unified_prompt.py +156 -14
- nc1709/requirements_tracker.py +526 -0
- nc1709/thinking_messages.py +337 -0
- nc1709/version_check.py +6 -2
- nc1709/web/server.py +63 -3
- nc1709/web/templates/index.html +819 -140
- {nc1709-1.15.4.dist-info → nc1709-1.18.8.dist-info}/METADATA +10 -7
- {nc1709-1.15.4.dist-info → nc1709-1.18.8.dist-info}/RECORD +28 -22
- {nc1709-1.15.4.dist-info → nc1709-1.18.8.dist-info}/WHEEL +0 -0
- {nc1709-1.15.4.dist-info → nc1709-1.18.8.dist-info}/entry_points.txt +0 -0
- {nc1709-1.15.4.dist-info → nc1709-1.18.8.dist-info}/licenses/LICENSE +0 -0
- {nc1709-1.15.4.dist-info → nc1709-1.18.8.dist-info}/top_level.txt +0 -0
|
@@ -7,10 +7,17 @@ import re
|
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
from typing import Dict, Any, Optional, List
|
|
9
9
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
10
|
+
try:
|
|
11
|
+
from ..base import (
|
|
12
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
13
|
+
ActionResult
|
|
14
|
+
)
|
|
15
|
+
except ImportError:
|
|
16
|
+
# When loaded dynamically via importlib
|
|
17
|
+
from nc1709.plugins.base import (
|
|
18
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
19
|
+
ActionResult
|
|
20
|
+
)
|
|
14
21
|
|
|
15
22
|
|
|
16
23
|
class DjangoAgent(Plugin):
|
|
@@ -9,10 +9,17 @@ from pathlib import Path
|
|
|
9
9
|
from typing import Dict, Any, Optional, List
|
|
10
10
|
from dataclasses import dataclass
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
12
|
+
try:
|
|
13
|
+
from ..base import (
|
|
14
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
15
|
+
ActionResult
|
|
16
|
+
)
|
|
17
|
+
except ImportError:
|
|
18
|
+
# When loaded dynamically via importlib
|
|
19
|
+
from nc1709.plugins.base import (
|
|
20
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
21
|
+
ActionResult
|
|
22
|
+
)
|
|
16
23
|
|
|
17
24
|
|
|
18
25
|
@dataclass
|
|
@@ -8,10 +8,17 @@ from pathlib import Path
|
|
|
8
8
|
from typing import Dict, Any, Optional, List
|
|
9
9
|
from dataclasses import dataclass
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
11
|
+
try:
|
|
12
|
+
from ..base import (
|
|
13
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
14
|
+
ActionResult
|
|
15
|
+
)
|
|
16
|
+
except ImportError:
|
|
17
|
+
# When loaded dynamically via importlib
|
|
18
|
+
from nc1709.plugins.base import (
|
|
19
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
20
|
+
ActionResult
|
|
21
|
+
)
|
|
15
22
|
|
|
16
23
|
|
|
17
24
|
@dataclass
|
|
@@ -8,10 +8,17 @@ from pathlib import Path
|
|
|
8
8
|
from typing import Dict, Any, Optional, List
|
|
9
9
|
from dataclasses import dataclass
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
11
|
+
try:
|
|
12
|
+
from ..base import (
|
|
13
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
14
|
+
ActionResult
|
|
15
|
+
)
|
|
16
|
+
except ImportError:
|
|
17
|
+
# When loaded dynamically via importlib
|
|
18
|
+
from nc1709.plugins.base import (
|
|
19
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
20
|
+
ActionResult
|
|
21
|
+
)
|
|
15
22
|
|
|
16
23
|
|
|
17
24
|
@dataclass
|
|
@@ -7,10 +7,17 @@ import json
|
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
from typing import Dict, Any, Optional, List
|
|
9
9
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
10
|
+
try:
|
|
11
|
+
from ..base import (
|
|
12
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
13
|
+
ActionResult
|
|
14
|
+
)
|
|
15
|
+
except ImportError:
|
|
16
|
+
# When loaded dynamically via importlib
|
|
17
|
+
from nc1709.plugins.base import (
|
|
18
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
19
|
+
ActionResult
|
|
20
|
+
)
|
|
14
21
|
|
|
15
22
|
|
|
16
23
|
class NextJSAgent(Plugin):
|
|
@@ -0,0 +1,574 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ollama Agent for NC1709
|
|
3
|
+
Handles Ollama LLM model management operations
|
|
4
|
+
"""
|
|
5
|
+
import subprocess
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, Any, Optional, List
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from ..base import (
|
|
14
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
15
|
+
ActionResult
|
|
16
|
+
)
|
|
17
|
+
except ImportError:
|
|
18
|
+
# When loaded dynamically via importlib
|
|
19
|
+
from nc1709.plugins.base import (
|
|
20
|
+
Plugin, PluginMetadata, PluginCapability,
|
|
21
|
+
ActionResult
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class ModelInfo:
|
|
27
|
+
"""Represents an Ollama model"""
|
|
28
|
+
name: str
|
|
29
|
+
size: str
|
|
30
|
+
modified: str
|
|
31
|
+
digest: str = ""
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def base_name(self) -> str:
|
|
35
|
+
"""Get the model name without tag"""
|
|
36
|
+
return self.name.split(":")[0] if ":" in self.name else self.name
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def tag(self) -> str:
|
|
40
|
+
"""Get the model tag"""
|
|
41
|
+
return self.name.split(":")[1] if ":" in self.name else "latest"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class RunningModel:
|
|
46
|
+
"""Represents a running Ollama model"""
|
|
47
|
+
name: str
|
|
48
|
+
size: str
|
|
49
|
+
processor: str
|
|
50
|
+
until: str
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class OllamaAgent(Plugin):
|
|
54
|
+
"""
|
|
55
|
+
Ollama model management agent.
|
|
56
|
+
|
|
57
|
+
Provides Ollama operations:
|
|
58
|
+
- Model management (list, pull, remove, show)
|
|
59
|
+
- Model running (run, stop)
|
|
60
|
+
- Model information (show details)
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
METADATA = PluginMetadata(
|
|
64
|
+
name="ollama",
|
|
65
|
+
version="1.0.0",
|
|
66
|
+
description="Ollama LLM model management",
|
|
67
|
+
author="NC1709 Team",
|
|
68
|
+
capabilities=[
|
|
69
|
+
PluginCapability.COMMAND_EXECUTION
|
|
70
|
+
],
|
|
71
|
+
keywords=[
|
|
72
|
+
"ollama", "model", "llm", "llama", "mistral", "codellama",
|
|
73
|
+
"pull", "download", "run", "local", "ai", "chat",
|
|
74
|
+
"qwen", "phi", "gemma", "deepseek", "starcoder"
|
|
75
|
+
],
|
|
76
|
+
config_schema={
|
|
77
|
+
"default_model": {"type": "string", "default": "llama3.2"},
|
|
78
|
+
"host": {"type": "string", "default": "http://localhost:11434"}
|
|
79
|
+
}
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Popular models with descriptions
|
|
83
|
+
POPULAR_MODELS = {
|
|
84
|
+
"llama3.2": "Meta's latest Llama 3.2 (1B/3B parameters)",
|
|
85
|
+
"llama3.1": "Meta's Llama 3.1 (8B/70B/405B parameters)",
|
|
86
|
+
"codellama": "Code-specialized Llama model",
|
|
87
|
+
"mistral": "Mistral 7B - fast and capable",
|
|
88
|
+
"mixtral": "Mistral's MoE model (8x7B)",
|
|
89
|
+
"phi3": "Microsoft's Phi-3 (3.8B parameters)",
|
|
90
|
+
"gemma2": "Google's Gemma 2 model",
|
|
91
|
+
"qwen2.5": "Alibaba's Qwen 2.5 model",
|
|
92
|
+
"qwen2.5-coder": "Qwen 2.5 optimized for coding",
|
|
93
|
+
"deepseek-coder-v2": "DeepSeek's coding model",
|
|
94
|
+
"starcoder2": "BigCode's StarCoder 2",
|
|
95
|
+
"nomic-embed-text": "Text embedding model",
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def metadata(self) -> PluginMetadata:
|
|
100
|
+
return self.METADATA
|
|
101
|
+
|
|
102
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
|
103
|
+
super().__init__(config)
|
|
104
|
+
self._ollama_available = False
|
|
105
|
+
self._ollama_version = ""
|
|
106
|
+
|
|
107
|
+
def initialize(self) -> bool:
|
|
108
|
+
"""Initialize the Ollama agent"""
|
|
109
|
+
try:
|
|
110
|
+
result = subprocess.run(
|
|
111
|
+
["ollama", "--version"],
|
|
112
|
+
capture_output=True,
|
|
113
|
+
text=True
|
|
114
|
+
)
|
|
115
|
+
if result.returncode == 0:
|
|
116
|
+
self._ollama_available = True
|
|
117
|
+
# Parse version
|
|
118
|
+
output = result.stdout.strip() or result.stderr.strip()
|
|
119
|
+
if "version" in output.lower():
|
|
120
|
+
self._ollama_version = output
|
|
121
|
+
return True
|
|
122
|
+
except FileNotFoundError:
|
|
123
|
+
self._error = "Ollama is not installed. Install from https://ollama.ai"
|
|
124
|
+
return False
|
|
125
|
+
except Exception as e:
|
|
126
|
+
self._error = f"Error checking Ollama: {e}"
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
return False
|
|
130
|
+
|
|
131
|
+
def cleanup(self) -> None:
|
|
132
|
+
"""Cleanup resources"""
|
|
133
|
+
pass
|
|
134
|
+
|
|
135
|
+
def _register_actions(self) -> None:
|
|
136
|
+
"""Register Ollama actions"""
|
|
137
|
+
# Model listing
|
|
138
|
+
self.register_action(
|
|
139
|
+
"list",
|
|
140
|
+
self.list_models,
|
|
141
|
+
"List installed models"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
self.register_action(
|
|
145
|
+
"ps",
|
|
146
|
+
self.list_running,
|
|
147
|
+
"List running models"
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Model management
|
|
151
|
+
self.register_action(
|
|
152
|
+
"pull",
|
|
153
|
+
self.pull_model,
|
|
154
|
+
"Download/update a model",
|
|
155
|
+
parameters={"model": {"type": "string", "required": True}}
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
self.register_action(
|
|
159
|
+
"rm",
|
|
160
|
+
self.remove_model,
|
|
161
|
+
"Remove a model",
|
|
162
|
+
parameters={"model": {"type": "string", "required": True}},
|
|
163
|
+
requires_confirmation=True,
|
|
164
|
+
dangerous=True
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
self.register_action(
|
|
168
|
+
"show",
|
|
169
|
+
self.show_model,
|
|
170
|
+
"Show model details",
|
|
171
|
+
parameters={"model": {"type": "string", "required": True}}
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
self.register_action(
|
|
175
|
+
"cp",
|
|
176
|
+
self.copy_model,
|
|
177
|
+
"Copy a model to a new name",
|
|
178
|
+
parameters={
|
|
179
|
+
"source": {"type": "string", "required": True},
|
|
180
|
+
"destination": {"type": "string", "required": True}
|
|
181
|
+
}
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# Model running
|
|
185
|
+
self.register_action(
|
|
186
|
+
"run",
|
|
187
|
+
self.run_model,
|
|
188
|
+
"Run a model (start it for inference)",
|
|
189
|
+
parameters={
|
|
190
|
+
"model": {"type": "string", "required": True},
|
|
191
|
+
"prompt": {"type": "string", "optional": True}
|
|
192
|
+
}
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
self.register_action(
|
|
196
|
+
"stop",
|
|
197
|
+
self.stop_model,
|
|
198
|
+
"Stop a running model",
|
|
199
|
+
parameters={"model": {"type": "string", "required": True}}
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# Utility
|
|
203
|
+
self.register_action(
|
|
204
|
+
"search",
|
|
205
|
+
self.search_models,
|
|
206
|
+
"Search for available models",
|
|
207
|
+
parameters={"query": {"type": "string", "optional": True}}
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
self.register_action(
|
|
211
|
+
"recommend",
|
|
212
|
+
self.recommend_model,
|
|
213
|
+
"Get model recommendations based on use case",
|
|
214
|
+
parameters={"use_case": {"type": "string", "required": True}}
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
def _run_ollama(self, *args, timeout: int = 60) -> subprocess.CompletedProcess:
|
|
218
|
+
"""Run an ollama command"""
|
|
219
|
+
cmd = ["ollama"] + list(args)
|
|
220
|
+
return subprocess.run(
|
|
221
|
+
cmd,
|
|
222
|
+
capture_output=True,
|
|
223
|
+
text=True,
|
|
224
|
+
timeout=timeout
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
def list_models(self) -> ActionResult:
|
|
228
|
+
"""List installed Ollama models
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
ActionResult with model list
|
|
232
|
+
"""
|
|
233
|
+
result = self._run_ollama("list")
|
|
234
|
+
|
|
235
|
+
if result.returncode != 0:
|
|
236
|
+
return ActionResult.fail(result.stderr or "Failed to list models")
|
|
237
|
+
|
|
238
|
+
models = []
|
|
239
|
+
lines = result.stdout.strip().split("\n")
|
|
240
|
+
|
|
241
|
+
# Skip header line
|
|
242
|
+
for line in lines[1:]:
|
|
243
|
+
if not line.strip():
|
|
244
|
+
continue
|
|
245
|
+
# Parse: NAME ID SIZE MODIFIED
|
|
246
|
+
parts = line.split()
|
|
247
|
+
if len(parts) >= 4:
|
|
248
|
+
models.append(ModelInfo(
|
|
249
|
+
name=parts[0],
|
|
250
|
+
digest=parts[1] if len(parts) > 1 else "",
|
|
251
|
+
size=parts[2] if len(parts) > 2 else "",
|
|
252
|
+
modified=" ".join(parts[3:]) if len(parts) > 3 else ""
|
|
253
|
+
))
|
|
254
|
+
|
|
255
|
+
if not models:
|
|
256
|
+
return ActionResult.ok(
|
|
257
|
+
message="No models installed. Use 'ollama pull <model>' to download one.",
|
|
258
|
+
data=[]
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
return ActionResult.ok(
|
|
262
|
+
message=f"{len(models)} models installed",
|
|
263
|
+
data=models
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
def list_running(self) -> ActionResult:
|
|
267
|
+
"""List currently running models
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
ActionResult with running model list
|
|
271
|
+
"""
|
|
272
|
+
result = self._run_ollama("ps")
|
|
273
|
+
|
|
274
|
+
if result.returncode != 0:
|
|
275
|
+
return ActionResult.fail(result.stderr or "Failed to list running models")
|
|
276
|
+
|
|
277
|
+
models = []
|
|
278
|
+
lines = result.stdout.strip().split("\n")
|
|
279
|
+
|
|
280
|
+
# Skip header line
|
|
281
|
+
for line in lines[1:]:
|
|
282
|
+
if not line.strip():
|
|
283
|
+
continue
|
|
284
|
+
parts = line.split()
|
|
285
|
+
if len(parts) >= 4:
|
|
286
|
+
models.append(RunningModel(
|
|
287
|
+
name=parts[0],
|
|
288
|
+
size=parts[2] if len(parts) > 2 else "",
|
|
289
|
+
processor=parts[3] if len(parts) > 3 else "",
|
|
290
|
+
until=" ".join(parts[4:]) if len(parts) > 4 else ""
|
|
291
|
+
))
|
|
292
|
+
|
|
293
|
+
if not models:
|
|
294
|
+
return ActionResult.ok(
|
|
295
|
+
message="No models currently running",
|
|
296
|
+
data=[]
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
return ActionResult.ok(
|
|
300
|
+
message=f"{len(models)} models running",
|
|
301
|
+
data=models
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
def pull_model(self, model: str) -> ActionResult:
|
|
305
|
+
"""Download or update a model
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
model: Model name (e.g., "llama3.2", "codellama:7b")
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
ActionResult with download status
|
|
312
|
+
"""
|
|
313
|
+
# Extended timeout for large model downloads (30 minutes)
|
|
314
|
+
result = self._run_ollama("pull", model, timeout=1800)
|
|
315
|
+
|
|
316
|
+
if result.returncode != 0:
|
|
317
|
+
error_msg = result.stderr or "Failed to pull model"
|
|
318
|
+
if "not found" in error_msg.lower():
|
|
319
|
+
return ActionResult.fail(
|
|
320
|
+
f"Model '{model}' not found. Check https://ollama.ai/library for available models."
|
|
321
|
+
)
|
|
322
|
+
return ActionResult.fail(error_msg)
|
|
323
|
+
|
|
324
|
+
return ActionResult.ok(
|
|
325
|
+
message=f"Successfully pulled model: {model}",
|
|
326
|
+
data=result.stdout
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
def remove_model(self, model: str) -> ActionResult:
|
|
330
|
+
"""Remove a model
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
model: Model name to remove
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
ActionResult with removal status
|
|
337
|
+
"""
|
|
338
|
+
result = self._run_ollama("rm", model)
|
|
339
|
+
|
|
340
|
+
if result.returncode != 0:
|
|
341
|
+
return ActionResult.fail(result.stderr or f"Failed to remove model: {model}")
|
|
342
|
+
|
|
343
|
+
return ActionResult.ok(f"Removed model: {model}")
|
|
344
|
+
|
|
345
|
+
def show_model(self, model: str) -> ActionResult:
|
|
346
|
+
"""Show model details
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
model: Model name
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
ActionResult with model details
|
|
353
|
+
"""
|
|
354
|
+
result = self._run_ollama("show", model)
|
|
355
|
+
|
|
356
|
+
if result.returncode != 0:
|
|
357
|
+
return ActionResult.fail(result.stderr or f"Model '{model}' not found")
|
|
358
|
+
|
|
359
|
+
return ActionResult.ok(
|
|
360
|
+
message=f"Details for {model}",
|
|
361
|
+
data=result.stdout
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def copy_model(self, source: str, destination: str) -> ActionResult:
|
|
365
|
+
"""Copy a model to a new name
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
source: Source model name
|
|
369
|
+
destination: New model name
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
ActionResult with copy status
|
|
373
|
+
"""
|
|
374
|
+
result = self._run_ollama("cp", source, destination)
|
|
375
|
+
|
|
376
|
+
if result.returncode != 0:
|
|
377
|
+
return ActionResult.fail(result.stderr or "Failed to copy model")
|
|
378
|
+
|
|
379
|
+
return ActionResult.ok(f"Copied {source} to {destination}")
|
|
380
|
+
|
|
381
|
+
def run_model(self, model: str, prompt: Optional[str] = None) -> ActionResult:
|
|
382
|
+
"""Start a model for inference
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
model: Model name
|
|
386
|
+
prompt: Optional prompt to send
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
ActionResult with run status
|
|
390
|
+
"""
|
|
391
|
+
if prompt:
|
|
392
|
+
# Run with prompt
|
|
393
|
+
result = self._run_ollama("run", model, prompt, timeout=300)
|
|
394
|
+
else:
|
|
395
|
+
# Just load the model
|
|
396
|
+
result = self._run_ollama("run", model, "--", timeout=60)
|
|
397
|
+
|
|
398
|
+
if result.returncode != 0:
|
|
399
|
+
error_msg = result.stderr or "Failed to run model"
|
|
400
|
+
if "not found" in error_msg.lower():
|
|
401
|
+
return ActionResult.fail(
|
|
402
|
+
f"Model '{model}' not found locally. Pull it first with: ollama pull {model}"
|
|
403
|
+
)
|
|
404
|
+
return ActionResult.fail(error_msg)
|
|
405
|
+
|
|
406
|
+
return ActionResult.ok(
|
|
407
|
+
message=f"Model {model} response",
|
|
408
|
+
data=result.stdout
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
def stop_model(self, model: str) -> ActionResult:
|
|
412
|
+
"""Stop a running model
|
|
413
|
+
|
|
414
|
+
Args:
|
|
415
|
+
model: Model name to stop
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
ActionResult with stop status
|
|
419
|
+
"""
|
|
420
|
+
result = self._run_ollama("stop", model)
|
|
421
|
+
|
|
422
|
+
if result.returncode != 0:
|
|
423
|
+
return ActionResult.fail(result.stderr or f"Failed to stop model: {model}")
|
|
424
|
+
|
|
425
|
+
return ActionResult.ok(f"Stopped model: {model}")
|
|
426
|
+
|
|
427
|
+
def search_models(self, query: Optional[str] = None) -> ActionResult:
|
|
428
|
+
"""Search for available models
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
query: Optional search query
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
ActionResult with matching models
|
|
435
|
+
"""
|
|
436
|
+
# Return popular models (since ollama doesn't have a search API)
|
|
437
|
+
if query:
|
|
438
|
+
query_lower = query.lower()
|
|
439
|
+
matches = {
|
|
440
|
+
name: desc for name, desc in self.POPULAR_MODELS.items()
|
|
441
|
+
if query_lower in name.lower() or query_lower in desc.lower()
|
|
442
|
+
}
|
|
443
|
+
else:
|
|
444
|
+
matches = self.POPULAR_MODELS
|
|
445
|
+
|
|
446
|
+
if not matches:
|
|
447
|
+
return ActionResult.ok(
|
|
448
|
+
message=f"No models found matching '{query}'. Visit https://ollama.ai/library for full list.",
|
|
449
|
+
data=[]
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
return ActionResult.ok(
|
|
453
|
+
message=f"Found {len(matches)} popular models" + (f" matching '{query}'" if query else ""),
|
|
454
|
+
data=matches
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
def recommend_model(self, use_case: str) -> ActionResult:
|
|
458
|
+
"""Recommend models based on use case
|
|
459
|
+
|
|
460
|
+
Args:
|
|
461
|
+
use_case: Description of intended use (e.g., "coding", "chat", "embeddings")
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
ActionResult with recommendations
|
|
465
|
+
"""
|
|
466
|
+
use_case_lower = use_case.lower()
|
|
467
|
+
|
|
468
|
+
recommendations = {}
|
|
469
|
+
|
|
470
|
+
if any(kw in use_case_lower for kw in ["code", "programming", "developer", "coding"]):
|
|
471
|
+
recommendations = {
|
|
472
|
+
"qwen2.5-coder": "Best for coding tasks, code completion, and review",
|
|
473
|
+
"codellama": "Meta's code-specialized model",
|
|
474
|
+
"deepseek-coder-v2": "Excellent for code generation",
|
|
475
|
+
"starcoder2": "Trained on code repositories",
|
|
476
|
+
}
|
|
477
|
+
elif any(kw in use_case_lower for kw in ["chat", "conversation", "assistant", "general"]):
|
|
478
|
+
recommendations = {
|
|
479
|
+
"llama3.2": "Fast, compact, great for general chat",
|
|
480
|
+
"llama3.1": "More capable, slower",
|
|
481
|
+
"mistral": "Fast and capable for conversations",
|
|
482
|
+
"qwen2.5": "Strong multilingual support",
|
|
483
|
+
}
|
|
484
|
+
elif any(kw in use_case_lower for kw in ["embed", "search", "rag", "vector"]):
|
|
485
|
+
recommendations = {
|
|
486
|
+
"nomic-embed-text": "Best for text embeddings and RAG",
|
|
487
|
+
}
|
|
488
|
+
elif any(kw in use_case_lower for kw in ["small", "fast", "light", "edge"]):
|
|
489
|
+
recommendations = {
|
|
490
|
+
"llama3.2:1b": "1B parameters, very fast",
|
|
491
|
+
"phi3:mini": "Small but capable",
|
|
492
|
+
"gemma2:2b": "Google's small model",
|
|
493
|
+
}
|
|
494
|
+
elif any(kw in use_case_lower for kw in ["large", "powerful", "best"]):
|
|
495
|
+
recommendations = {
|
|
496
|
+
"llama3.1:70b": "70B parameters, very capable",
|
|
497
|
+
"mixtral": "8x7B MoE, excellent quality",
|
|
498
|
+
"qwen2.5:72b": "72B parameters, multilingual",
|
|
499
|
+
}
|
|
500
|
+
else:
|
|
501
|
+
recommendations = {
|
|
502
|
+
"llama3.2": "Good all-around default choice",
|
|
503
|
+
"mistral": "Fast and capable",
|
|
504
|
+
"qwen2.5-coder": "If you need coding help",
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
return ActionResult.ok(
|
|
508
|
+
message=f"Recommendations for: {use_case}",
|
|
509
|
+
data=recommendations
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
def can_handle(self, request: str) -> float:
|
|
513
|
+
"""Check if request is Ollama-related"""
|
|
514
|
+
request_lower = request.lower()
|
|
515
|
+
|
|
516
|
+
# High confidence keywords
|
|
517
|
+
high_conf = ["ollama", "pull model", "download model", "local llm", "install model"]
|
|
518
|
+
for kw in high_conf:
|
|
519
|
+
if kw in request_lower:
|
|
520
|
+
return 0.95
|
|
521
|
+
|
|
522
|
+
# Model names
|
|
523
|
+
model_names = ["llama", "mistral", "codellama", "mixtral", "phi", "gemma", "qwen", "deepseek", "starcoder"]
|
|
524
|
+
for model in model_names:
|
|
525
|
+
if model in request_lower:
|
|
526
|
+
# Check if it's about running/pulling/installing
|
|
527
|
+
if any(verb in request_lower for verb in ["pull", "download", "install", "run", "use", "get"]):
|
|
528
|
+
return 0.85
|
|
529
|
+
|
|
530
|
+
# Medium confidence
|
|
531
|
+
med_conf = ["local model", "llm model", "ai model"]
|
|
532
|
+
for kw in med_conf:
|
|
533
|
+
if kw in request_lower:
|
|
534
|
+
return 0.6
|
|
535
|
+
|
|
536
|
+
return super().can_handle(request)
|
|
537
|
+
|
|
538
|
+
def handle_request(self, request: str, **kwargs) -> Optional[ActionResult]:
|
|
539
|
+
"""Handle a natural language request"""
|
|
540
|
+
request_lower = request.lower()
|
|
541
|
+
|
|
542
|
+
# List models
|
|
543
|
+
if any(kw in request_lower for kw in ["list models", "show models", "installed models", "what models"]):
|
|
544
|
+
return self.list_models()
|
|
545
|
+
|
|
546
|
+
# Running models
|
|
547
|
+
if any(kw in request_lower for kw in ["running models", "active models", "loaded models"]):
|
|
548
|
+
return self.list_running()
|
|
549
|
+
|
|
550
|
+
# Pull/download model
|
|
551
|
+
if any(kw in request_lower for kw in ["pull", "download", "install", "get"]):
|
|
552
|
+
# Extract model name
|
|
553
|
+
for model in self.POPULAR_MODELS:
|
|
554
|
+
if model in request_lower:
|
|
555
|
+
return self.pull_model(model)
|
|
556
|
+
# Check for specific model names like "llama3.2:1b"
|
|
557
|
+
model_match = re.search(r'(llama\S*|mistral\S*|codellama\S*|qwen\S*|phi\S*|gemma\S*)', request_lower)
|
|
558
|
+
if model_match:
|
|
559
|
+
return self.pull_model(model_match.group(1))
|
|
560
|
+
|
|
561
|
+
# Recommend
|
|
562
|
+
if any(kw in request_lower for kw in ["recommend", "suggest", "best model", "which model"]):
|
|
563
|
+
# Extract use case
|
|
564
|
+
use_case = request_lower
|
|
565
|
+
for remove_kw in ["recommend", "suggest", "best model", "which model", "for", "me", "a"]:
|
|
566
|
+
use_case = use_case.replace(remove_kw, "")
|
|
567
|
+
return self.recommend_model(use_case.strip() or "general")
|
|
568
|
+
|
|
569
|
+
# Search
|
|
570
|
+
if "search" in request_lower or "find model" in request_lower:
|
|
571
|
+
query = request_lower.replace("search", "").replace("find model", "").strip()
|
|
572
|
+
return self.search_models(query or None)
|
|
573
|
+
|
|
574
|
+
return None
|