mcp-ollama-python 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_ollama_python/__init__.py +1 -0
- mcp_ollama_python/__main__.py +8 -0
- mcp_ollama_python/autoloader.py +97 -0
- mcp_ollama_python/examples/get_spanish_poem.py +59 -0
- mcp_ollama_python/main.py +214 -0
- mcp_ollama_python/models.py +119 -0
- mcp_ollama_python/ollama_client.py +169 -0
- mcp_ollama_python/response_formatter.py +120 -0
- mcp_ollama_python/server.py +286 -0
- mcp_ollama_python/tools/__init__.py +6 -0
- mcp_ollama_python/tools/chat.py +130 -0
- mcp_ollama_python/tools/delete.py +48 -0
- mcp_ollama_python/tools/embed.py +63 -0
- mcp_ollama_python/tools/execute.py +196 -0
- mcp_ollama_python/tools/generate.py +76 -0
- mcp_ollama_python/tools/list.py +34 -0
- mcp_ollama_python/tools/ps.py +34 -0
- mcp_ollama_python/tools/pull.py +43 -0
- mcp_ollama_python/tools/show.py +48 -0
- mcp_ollama_python-1.0.1.dist-info/METADATA +554 -0
- mcp_ollama_python-1.0.1.dist-info/RECORD +24 -0
- mcp_ollama_python-1.0.1.dist-info/WHEEL +4 -0
- mcp_ollama_python-1.0.1.dist-info/entry_points.txt +3 -0
- mcp_ollama_python-1.0.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Response formatting utilities
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Dict, List
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from mcp_ollama_python.models import ResponseFormat
|
|
10
|
+
except ImportError:
|
|
11
|
+
from .models import ResponseFormat
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def format_response(content: Any, format: ResponseFormat) -> str:
|
|
15
|
+
"""Format response content based on the specified format"""
|
|
16
|
+
# Handle dict/list input - convert to JSON string first
|
|
17
|
+
if isinstance(content, (dict, list)):
|
|
18
|
+
if format == ResponseFormat.JSON:
|
|
19
|
+
return json.dumps(content, indent=2)
|
|
20
|
+
else:
|
|
21
|
+
# Format as markdown
|
|
22
|
+
return json_to_markdown(content)
|
|
23
|
+
|
|
24
|
+
# Handle string input
|
|
25
|
+
if format == ResponseFormat.JSON:
|
|
26
|
+
# For JSON format, validate and potentially wrap errors
|
|
27
|
+
try:
|
|
28
|
+
# Try to parse to validate it's valid JSON
|
|
29
|
+
json.loads(content)
|
|
30
|
+
return content
|
|
31
|
+
except json.JSONDecodeError:
|
|
32
|
+
# If not valid JSON, wrap in error object
|
|
33
|
+
return json.dumps(
|
|
34
|
+
{
|
|
35
|
+
"error": "Invalid JSON content",
|
|
36
|
+
"raw_content": content,
|
|
37
|
+
}
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Format as markdown
|
|
41
|
+
try:
|
|
42
|
+
data = json.loads(content)
|
|
43
|
+
return json_to_markdown(data)
|
|
44
|
+
except json.JSONDecodeError:
|
|
45
|
+
# If not valid JSON, return as-is
|
|
46
|
+
return content
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def json_to_markdown(data: Any, indent: str = "") -> str:
|
|
50
|
+
"""Convert JSON data to markdown format"""
|
|
51
|
+
# Handle null/undefined
|
|
52
|
+
if data is None:
|
|
53
|
+
return f"{indent}_null_"
|
|
54
|
+
|
|
55
|
+
# Handle primitives
|
|
56
|
+
if not isinstance(data, (dict, list)):
|
|
57
|
+
return f"{indent}{str(data)}"
|
|
58
|
+
|
|
59
|
+
# Handle arrays
|
|
60
|
+
if isinstance(data, list):
|
|
61
|
+
if len(data) == 0:
|
|
62
|
+
return f"{indent}_empty array_"
|
|
63
|
+
|
|
64
|
+
# Check if array of objects with consistent keys (table format)
|
|
65
|
+
if len(data) > 0 and isinstance(data[0], dict) and data[0] is not None:
|
|
66
|
+
return array_to_markdown_table(data, indent)
|
|
67
|
+
|
|
68
|
+
# Array of primitives or mixed types
|
|
69
|
+
return "\n".join(f"{indent}- {json_to_markdown(item, '')}" for item in data)
|
|
70
|
+
|
|
71
|
+
# Handle objects
|
|
72
|
+
entries = list(data.items())
|
|
73
|
+
if len(entries) == 0:
|
|
74
|
+
return f"{indent}_empty object_"
|
|
75
|
+
|
|
76
|
+
return "\n".join(_format_object_entry(key, value, indent) for key, value in entries)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _format_object_entry(key: str, value: Any, indent: str) -> str:
|
|
80
|
+
"""Format a single key-value pair in an object"""
|
|
81
|
+
formatted_key = key.replace("_", " ")
|
|
82
|
+
if isinstance(value, (dict, list)) and value is not None:
|
|
83
|
+
return f"{indent}**{formatted_key}:**\n{json_to_markdown(value, indent + ' ')}"
|
|
84
|
+
return f"{indent}**{formatted_key}:** {value}"
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def array_to_markdown_table(data: List[Dict[str, Any]], indent: str = "") -> str:
|
|
88
|
+
"""Convert array of objects to markdown table format"""
|
|
89
|
+
if not data or not isinstance(data[0], dict):
|
|
90
|
+
return json_to_markdown(data, indent)
|
|
91
|
+
|
|
92
|
+
# Get all unique keys from all objects, preserving insertion order
|
|
93
|
+
all_keys = dict.fromkeys(
|
|
94
|
+
key for item in data if isinstance(item, dict) for key in item
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
if not all_keys:
|
|
98
|
+
return f"{indent}_empty array_"
|
|
99
|
+
|
|
100
|
+
headers = list(all_keys)
|
|
101
|
+
rows = []
|
|
102
|
+
|
|
103
|
+
# Add header row
|
|
104
|
+
header_row = "| " + " | ".join(headers) + " |"
|
|
105
|
+
separator_row = "|" + "|".join("---" for _ in headers) + "|"
|
|
106
|
+
rows.extend([header_row, separator_row])
|
|
107
|
+
|
|
108
|
+
# Add data rows
|
|
109
|
+
for item in data:
|
|
110
|
+
if isinstance(item, dict):
|
|
111
|
+
row_values = []
|
|
112
|
+
for header in headers:
|
|
113
|
+
value = item.get(header, "")
|
|
114
|
+
# Truncate long values for table display
|
|
115
|
+
if isinstance(value, str) and len(value) > 50:
|
|
116
|
+
value = value[:47] + "..."
|
|
117
|
+
row_values.append(str(value))
|
|
118
|
+
rows.append("| " + " | ".join(row_values) + " |")
|
|
119
|
+
|
|
120
|
+
return "\n".join(rows)
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Server implementation for Ollama
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Dict, Optional
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from mcp_ollama_python.ollama_client import OllamaClient
|
|
10
|
+
from mcp_ollama_python.autoloader import discover_tools_with_handlers, ToolRegistry
|
|
11
|
+
from mcp_ollama_python.models import ResponseFormat
|
|
12
|
+
except ImportError:
|
|
13
|
+
from .ollama_client import OllamaClient
|
|
14
|
+
from .autoloader import discover_tools_with_handlers, ToolRegistry
|
|
15
|
+
from .models import ResponseFormat
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OllamaMCPServer:
|
|
19
|
+
"""MCP Server for Ollama operations"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, ollama_client: Optional[OllamaClient] = None):
|
|
22
|
+
self.ollama_client = ollama_client or OllamaClient()
|
|
23
|
+
self.tool_registry: Optional[ToolRegistry] = None
|
|
24
|
+
self._resources: Dict[str, Dict[str, Any]] = {}
|
|
25
|
+
self._prompts: Dict[str, Dict[str, Any]] = {}
|
|
26
|
+
self._initialize_default_resources()
|
|
27
|
+
self._initialize_default_prompts()
|
|
28
|
+
|
|
29
|
+
async def handle_list_tools(self) -> Dict[str, Any]:
|
|
30
|
+
"""Handle list_tools request"""
|
|
31
|
+
# Discover tools and cache the registry
|
|
32
|
+
if self.tool_registry is None:
|
|
33
|
+
self.tool_registry = await discover_tools_with_handlers()
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
"tools": [
|
|
37
|
+
{
|
|
38
|
+
"name": tool.name,
|
|
39
|
+
"description": tool.description,
|
|
40
|
+
"inputSchema": tool.input_schema,
|
|
41
|
+
}
|
|
42
|
+
for tool in self.tool_registry.tools
|
|
43
|
+
]
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
async def handle_call_tool(self, name: str, args: Dict[str, Any]) -> Dict[str, Any]:
|
|
47
|
+
"""Handle call_tool request"""
|
|
48
|
+
try:
|
|
49
|
+
# Ensure tool registry is loaded
|
|
50
|
+
if self.tool_registry is None:
|
|
51
|
+
self.tool_registry = await discover_tools_with_handlers()
|
|
52
|
+
|
|
53
|
+
# Get the handler for this tool
|
|
54
|
+
handler = self.tool_registry.get_handler(name)
|
|
55
|
+
|
|
56
|
+
if not handler:
|
|
57
|
+
raise ValueError(f"Unknown tool: {name}")
|
|
58
|
+
|
|
59
|
+
# Determine format from args
|
|
60
|
+
format_arg = args.get("format", "json")
|
|
61
|
+
response_format = (
|
|
62
|
+
ResponseFormat.MARKDOWN
|
|
63
|
+
if format_arg == "markdown"
|
|
64
|
+
else ResponseFormat.JSON
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Call the tool handler directly
|
|
68
|
+
result = await handler(self.ollama_client, args, response_format)
|
|
69
|
+
|
|
70
|
+
# Try to parse the result as JSON for structured content
|
|
71
|
+
structured_data = None
|
|
72
|
+
try:
|
|
73
|
+
structured_data = json.loads(result)
|
|
74
|
+
except (json.JSONDecodeError, TypeError):
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
return {
|
|
78
|
+
"content": [
|
|
79
|
+
{
|
|
80
|
+
"type": "text",
|
|
81
|
+
"text": result,
|
|
82
|
+
}
|
|
83
|
+
],
|
|
84
|
+
"structuredContent": structured_data,
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
except Exception as error:
|
|
88
|
+
error_message = str(error) if isinstance(error, Exception) else str(error)
|
|
89
|
+
return {
|
|
90
|
+
"content": [
|
|
91
|
+
{
|
|
92
|
+
"type": "text",
|
|
93
|
+
"text": f"Error: {error_message}",
|
|
94
|
+
}
|
|
95
|
+
],
|
|
96
|
+
"isError": True,
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
def _initialize_default_resources(self):
|
|
100
|
+
"""Initialize default resources for the MCP server"""
|
|
101
|
+
self._resources = {
|
|
102
|
+
"ollama://models": {
|
|
103
|
+
"uri": "ollama://models",
|
|
104
|
+
"name": "Available Models",
|
|
105
|
+
"description": "List of all available Ollama models",
|
|
106
|
+
"mimeType": "application/json",
|
|
107
|
+
},
|
|
108
|
+
"ollama://running": {
|
|
109
|
+
"uri": "ollama://running",
|
|
110
|
+
"name": "Running Models",
|
|
111
|
+
"description": "List of currently running models",
|
|
112
|
+
"mimeType": "application/json",
|
|
113
|
+
},
|
|
114
|
+
"ollama://config": {
|
|
115
|
+
"uri": "ollama://config",
|
|
116
|
+
"name": "Ollama Configuration",
|
|
117
|
+
"description": "Current Ollama server configuration",
|
|
118
|
+
"mimeType": "application/json",
|
|
119
|
+
},
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
def _initialize_default_prompts(self):
|
|
123
|
+
"""Initialize default prompts for the MCP server"""
|
|
124
|
+
self._prompts = {
|
|
125
|
+
"explain_lora": {
|
|
126
|
+
"name": "explain_lora",
|
|
127
|
+
"description": "Explain LoRA (Low-Rank Adaptation) technique",
|
|
128
|
+
"arguments": [
|
|
129
|
+
{
|
|
130
|
+
"name": "detail_level",
|
|
131
|
+
"description": "Level of detail: basic, intermediate, or advanced",
|
|
132
|
+
"required": False,
|
|
133
|
+
}
|
|
134
|
+
],
|
|
135
|
+
},
|
|
136
|
+
"code_review": {
|
|
137
|
+
"name": "code_review",
|
|
138
|
+
"description": "Review code and provide feedback",
|
|
139
|
+
"arguments": [
|
|
140
|
+
{
|
|
141
|
+
"name": "language",
|
|
142
|
+
"description": "Programming language",
|
|
143
|
+
"required": True,
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
"name": "focus",
|
|
147
|
+
"description": "Focus areas: security, performance, style, or all",
|
|
148
|
+
"required": False,
|
|
149
|
+
},
|
|
150
|
+
],
|
|
151
|
+
},
|
|
152
|
+
"hello_world": {
|
|
153
|
+
"name": "hello_world",
|
|
154
|
+
"description": "Generate Hello World code in any language",
|
|
155
|
+
"arguments": [
|
|
156
|
+
{
|
|
157
|
+
"name": "language",
|
|
158
|
+
"description": "Programming language",
|
|
159
|
+
"required": True,
|
|
160
|
+
}
|
|
161
|
+
],
|
|
162
|
+
},
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
async def handle_list_resources(self) -> Dict[str, Any]:
|
|
166
|
+
"""Handle list_resources request"""
|
|
167
|
+
return {
|
|
168
|
+
"resources": [
|
|
169
|
+
{
|
|
170
|
+
"uri": resource["uri"],
|
|
171
|
+
"name": resource["name"],
|
|
172
|
+
"description": resource["description"],
|
|
173
|
+
"mimeType": resource.get("mimeType", "text/plain"),
|
|
174
|
+
}
|
|
175
|
+
for resource in self._resources.values()
|
|
176
|
+
]
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
async def handle_read_resource(self, uri: str) -> Dict[str, Any]:
|
|
180
|
+
"""Handle read_resource request"""
|
|
181
|
+
try:
|
|
182
|
+
if uri not in self._resources:
|
|
183
|
+
raise ValueError(f"Unknown resource: {uri}")
|
|
184
|
+
|
|
185
|
+
# Fetch the actual resource data
|
|
186
|
+
if uri == "ollama://models":
|
|
187
|
+
data = await self.ollama_client.list()
|
|
188
|
+
content = json.dumps(data, indent=2)
|
|
189
|
+
elif uri == "ollama://running":
|
|
190
|
+
data = await self.ollama_client.ps()
|
|
191
|
+
content = json.dumps(data, indent=2)
|
|
192
|
+
elif uri == "ollama://config":
|
|
193
|
+
config_data = {
|
|
194
|
+
"host": self.ollama_client.host,
|
|
195
|
+
"has_api_key": bool(self.ollama_client.api_key),
|
|
196
|
+
}
|
|
197
|
+
content = json.dumps(config_data, indent=2)
|
|
198
|
+
else:
|
|
199
|
+
content = "Resource data not available"
|
|
200
|
+
|
|
201
|
+
return {
|
|
202
|
+
"contents": [
|
|
203
|
+
{
|
|
204
|
+
"uri": uri,
|
|
205
|
+
"mimeType": self._resources[uri].get("mimeType", "text/plain"),
|
|
206
|
+
"text": content,
|
|
207
|
+
}
|
|
208
|
+
]
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
except Exception as error:
|
|
212
|
+
error_message = str(error) if isinstance(error, Exception) else str(error)
|
|
213
|
+
return {
|
|
214
|
+
"contents": [
|
|
215
|
+
{
|
|
216
|
+
"uri": uri,
|
|
217
|
+
"mimeType": "text/plain",
|
|
218
|
+
"text": f"Error reading resource: {error_message}",
|
|
219
|
+
}
|
|
220
|
+
],
|
|
221
|
+
"isError": True,
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
async def handle_list_prompts(self) -> Dict[str, Any]:
|
|
225
|
+
"""Handle list_prompts request"""
|
|
226
|
+
return {
|
|
227
|
+
"prompts": [
|
|
228
|
+
{
|
|
229
|
+
"name": prompt["name"],
|
|
230
|
+
"description": prompt["description"],
|
|
231
|
+
"arguments": prompt.get("arguments", []),
|
|
232
|
+
}
|
|
233
|
+
for prompt in self._prompts.values()
|
|
234
|
+
]
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
async def handle_get_prompt(
|
|
238
|
+
self, name: str, arguments: Optional[Dict[str, str]] = None
|
|
239
|
+
) -> Dict[str, Any]:
|
|
240
|
+
"""Handle get_prompt request"""
|
|
241
|
+
try:
|
|
242
|
+
if name not in self._prompts:
|
|
243
|
+
raise ValueError(f"Unknown prompt: {name}")
|
|
244
|
+
|
|
245
|
+
prompt_def = self._prompts[name]
|
|
246
|
+
args = arguments or {}
|
|
247
|
+
|
|
248
|
+
# Generate prompt based on name
|
|
249
|
+
if name == "explain_lora":
|
|
250
|
+
detail = args.get("detail_level", "basic")
|
|
251
|
+
prompt_text = f"""Explain LoRA (Low-Rank Adaptation) at a {detail} level.
|
|
252
|
+
Include:
|
|
253
|
+
- What it is and why it's useful
|
|
254
|
+
- How it works technically
|
|
255
|
+
- Use cases and benefits
|
|
256
|
+
- Comparison with full fine-tuning"""
|
|
257
|
+
elif name == "code_review":
|
|
258
|
+
language = args.get("language", "Python")
|
|
259
|
+
focus = args.get("focus", "all")
|
|
260
|
+
prompt_text = f"""Review the following {language} code with focus on {focus}.
|
|
261
|
+
Provide:
|
|
262
|
+
- Issues found
|
|
263
|
+
- Suggestions for improvement
|
|
264
|
+
- Best practices recommendations
|
|
265
|
+
- Security concerns (if applicable)"""
|
|
266
|
+
elif name == "hello_world":
|
|
267
|
+
language = args.get("language", "Python")
|
|
268
|
+
prompt_text = f"""Write a complete, well-commented Hello World program in {language}.
|
|
269
|
+
Include:
|
|
270
|
+
- Proper syntax and structure
|
|
271
|
+
- Comments explaining each part
|
|
272
|
+
- Best practices for the language
|
|
273
|
+
- How to run the program"""
|
|
274
|
+
else:
|
|
275
|
+
prompt_text = f"Prompt template for {name}"
|
|
276
|
+
|
|
277
|
+
return {
|
|
278
|
+
"description": prompt_def["description"],
|
|
279
|
+
"messages": [
|
|
280
|
+
{"role": "user", "content": {"type": "text", "text": prompt_text}}
|
|
281
|
+
],
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
except Exception as error:
|
|
285
|
+
error_message = str(error) if isinstance(error, Exception) else str(error)
|
|
286
|
+
raise ValueError(f"Error getting prompt: {error_message}")
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ollama chat tool
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
from ..models import (
|
|
7
|
+
ToolDefinition,
|
|
8
|
+
ResponseFormat,
|
|
9
|
+
ChatMessage,
|
|
10
|
+
Tool as OllamaTool,
|
|
11
|
+
GenerationOptions,
|
|
12
|
+
ModelNotFoundError,
|
|
13
|
+
)
|
|
14
|
+
from ..ollama_client import OllamaClient
|
|
15
|
+
from ..response_formatter import format_response
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def chat_handler(
|
|
19
|
+
ollama: OllamaClient, args: Dict[str, Any], format: ResponseFormat
|
|
20
|
+
) -> str:
|
|
21
|
+
"""Interactive chat with models (supports tools/functions)"""
|
|
22
|
+
model = args.get("model")
|
|
23
|
+
messages = args.get("messages")
|
|
24
|
+
tools = args.get("tools")
|
|
25
|
+
options = args.get("options")
|
|
26
|
+
|
|
27
|
+
if not model:
|
|
28
|
+
raise ValueError("Model name is required")
|
|
29
|
+
if not messages:
|
|
30
|
+
raise ValueError("Messages are required")
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
# Convert messages to ChatMessage objects
|
|
34
|
+
chat_messages = []
|
|
35
|
+
for msg in messages:
|
|
36
|
+
chat_messages.append(ChatMessage(**msg))
|
|
37
|
+
|
|
38
|
+
# Convert tools if provided
|
|
39
|
+
chat_tools = None
|
|
40
|
+
if tools:
|
|
41
|
+
chat_tools = []
|
|
42
|
+
for tool in tools:
|
|
43
|
+
chat_tools.append(OllamaTool(**tool))
|
|
44
|
+
|
|
45
|
+
# Convert options if provided
|
|
46
|
+
gen_options = None
|
|
47
|
+
if options:
|
|
48
|
+
gen_options = GenerationOptions(**options)
|
|
49
|
+
|
|
50
|
+
result = await ollama.chat(model, chat_messages, chat_tools, gen_options)
|
|
51
|
+
return format_response(result, format)
|
|
52
|
+
except Exception as e:
|
|
53
|
+
if "model not found" in str(e).lower():
|
|
54
|
+
raise ModelNotFoundError(model)
|
|
55
|
+
raise
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# Tool definition
|
|
59
|
+
tool_definition = ToolDefinition(
|
|
60
|
+
name="ollama_chat",
|
|
61
|
+
description="Interactive chat with Ollama models. Supports multi-turn conversations, tool calling, and structured outputs.",
|
|
62
|
+
input_schema={
|
|
63
|
+
"type": "object",
|
|
64
|
+
"properties": {
|
|
65
|
+
"model": {
|
|
66
|
+
"type": "string",
|
|
67
|
+
"description": "Name of the model to use for chat",
|
|
68
|
+
},
|
|
69
|
+
"messages": {
|
|
70
|
+
"type": "array",
|
|
71
|
+
"description": "Array of chat messages",
|
|
72
|
+
"items": {
|
|
73
|
+
"type": "object",
|
|
74
|
+
"properties": {
|
|
75
|
+
"role": {
|
|
76
|
+
"type": "string",
|
|
77
|
+
"enum": ["system", "user", "assistant"],
|
|
78
|
+
},
|
|
79
|
+
"content": {
|
|
80
|
+
"type": "string",
|
|
81
|
+
},
|
|
82
|
+
"images": {
|
|
83
|
+
"type": "array",
|
|
84
|
+
"items": {"type": "string"},
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
"required": ["role", "content"],
|
|
88
|
+
},
|
|
89
|
+
},
|
|
90
|
+
"tools": {
|
|
91
|
+
"type": "array",
|
|
92
|
+
"description": "Array of tools available to the model",
|
|
93
|
+
"items": {
|
|
94
|
+
"type": "object",
|
|
95
|
+
"properties": {
|
|
96
|
+
"type": {"type": "string"},
|
|
97
|
+
"function": {
|
|
98
|
+
"type": "object",
|
|
99
|
+
"properties": {
|
|
100
|
+
"name": {"type": "string"},
|
|
101
|
+
"description": {"type": "string"},
|
|
102
|
+
"parameters": {"type": "object"},
|
|
103
|
+
},
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
},
|
|
107
|
+
},
|
|
108
|
+
"options": {
|
|
109
|
+
"type": "object",
|
|
110
|
+
"description": "Generation options (temperature, top_p, etc.)",
|
|
111
|
+
"properties": {
|
|
112
|
+
"temperature": {"type": "number", "minimum": 0, "maximum": 2},
|
|
113
|
+
"top_p": {"type": "number", "minimum": 0, "maximum": 1},
|
|
114
|
+
"top_k": {"type": "integer", "minimum": 0},
|
|
115
|
+
"num_predict": {"type": "integer", "minimum": 1},
|
|
116
|
+
"repeat_penalty": {"type": "number", "minimum": 0},
|
|
117
|
+
"seed": {"type": "integer"},
|
|
118
|
+
"stop": {"type": "array", "items": {"type": "string"}},
|
|
119
|
+
},
|
|
120
|
+
},
|
|
121
|
+
"format": {
|
|
122
|
+
"type": "string",
|
|
123
|
+
"enum": ["json", "markdown"],
|
|
124
|
+
"description": "Output format (default: json)",
|
|
125
|
+
"default": "json",
|
|
126
|
+
},
|
|
127
|
+
},
|
|
128
|
+
"required": ["model", "messages"],
|
|
129
|
+
},
|
|
130
|
+
)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ollama delete model tool
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
from ..models import ToolDefinition, ResponseFormat, ModelNotFoundError
|
|
7
|
+
from ..ollama_client import OllamaClient
|
|
8
|
+
from ..response_formatter import format_response
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def delete_handler(
|
|
12
|
+
ollama: OllamaClient, args: Dict[str, Any], format: ResponseFormat
|
|
13
|
+
) -> str:
|
|
14
|
+
"""Remove models from local storage"""
|
|
15
|
+
model = args.get("model")
|
|
16
|
+
if not model:
|
|
17
|
+
raise ValueError("Model name is required")
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
result = await ollama.delete(model)
|
|
21
|
+
return format_response(result, format)
|
|
22
|
+
except Exception as e:
|
|
23
|
+
if "model not found" in str(e).lower() or "no such file" in str(e).lower():
|
|
24
|
+
raise ModelNotFoundError(model)
|
|
25
|
+
raise
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Tool definition
|
|
29
|
+
tool_definition = ToolDefinition(
|
|
30
|
+
name="ollama_delete",
|
|
31
|
+
description="Remove models from local Ollama storage to free up disk space.",
|
|
32
|
+
input_schema={
|
|
33
|
+
"type": "object",
|
|
34
|
+
"properties": {
|
|
35
|
+
"model": {
|
|
36
|
+
"type": "string",
|
|
37
|
+
"description": "Name of the model to delete",
|
|
38
|
+
},
|
|
39
|
+
"format": {
|
|
40
|
+
"type": "string",
|
|
41
|
+
"enum": ["json", "markdown"],
|
|
42
|
+
"description": "Output format (default: json)",
|
|
43
|
+
"default": "json",
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
"required": ["model"],
|
|
47
|
+
},
|
|
48
|
+
)
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ollama embed tool
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
from ..models import ToolDefinition, ResponseFormat, ModelNotFoundError
|
|
7
|
+
from ..ollama_client import OllamaClient
|
|
8
|
+
from ..response_formatter import format_response
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def embed_handler(
|
|
12
|
+
ollama: OllamaClient, args: Dict[str, Any], format: ResponseFormat
|
|
13
|
+
) -> str:
|
|
14
|
+
"""Generate embeddings for text"""
|
|
15
|
+
model = args.get("model")
|
|
16
|
+
input_text = args.get("input")
|
|
17
|
+
|
|
18
|
+
if not model:
|
|
19
|
+
raise ValueError("Model name is required")
|
|
20
|
+
if not input_text:
|
|
21
|
+
raise ValueError("Input text is required")
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
result = await ollama.embed(model, input_text)
|
|
25
|
+
return format_response(result, format)
|
|
26
|
+
except Exception as e:
|
|
27
|
+
if "model not found" in str(e).lower():
|
|
28
|
+
raise ModelNotFoundError(model)
|
|
29
|
+
raise
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Tool definition
|
|
33
|
+
tool_definition = ToolDefinition(
|
|
34
|
+
name="ollama_embed",
|
|
35
|
+
description="Generate vector embeddings for text using Ollama's embedding models like nomic-embed-text.",
|
|
36
|
+
input_schema={
|
|
37
|
+
"type": "object",
|
|
38
|
+
"properties": {
|
|
39
|
+
"model": {
|
|
40
|
+
"type": "string",
|
|
41
|
+
"description": "Name of the embedding model (e.g., 'nomic-embed-text')",
|
|
42
|
+
},
|
|
43
|
+
"input": {
|
|
44
|
+
"oneOf": [
|
|
45
|
+
{"type": "string", "description": "Single text string to embed"},
|
|
46
|
+
{
|
|
47
|
+
"type": "array",
|
|
48
|
+
"items": {"type": "string"},
|
|
49
|
+
"description": "Array of text strings to embed",
|
|
50
|
+
},
|
|
51
|
+
],
|
|
52
|
+
"description": "Text or array of texts to generate embeddings for",
|
|
53
|
+
},
|
|
54
|
+
"format": {
|
|
55
|
+
"type": "string",
|
|
56
|
+
"enum": ["json", "markdown"],
|
|
57
|
+
"description": "Output format (default: json)",
|
|
58
|
+
"default": "json",
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
"required": ["model", "input"],
|
|
62
|
+
},
|
|
63
|
+
)
|