agnt5 0.1.0__cp39-abi3-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agnt5/__init__.py +307 -0
- agnt5/__pycache__/__init__.cpython-311.pyc +0 -0
- agnt5/__pycache__/agent.cpython-311.pyc +0 -0
- agnt5/__pycache__/context.cpython-311.pyc +0 -0
- agnt5/__pycache__/durable.cpython-311.pyc +0 -0
- agnt5/__pycache__/extraction.cpython-311.pyc +0 -0
- agnt5/__pycache__/memory.cpython-311.pyc +0 -0
- agnt5/__pycache__/reflection.cpython-311.pyc +0 -0
- agnt5/__pycache__/runtime.cpython-311.pyc +0 -0
- agnt5/__pycache__/task.cpython-311.pyc +0 -0
- agnt5/__pycache__/tool.cpython-311.pyc +0 -0
- agnt5/__pycache__/tracing.cpython-311.pyc +0 -0
- agnt5/__pycache__/types.cpython-311.pyc +0 -0
- agnt5/__pycache__/workflow.cpython-311.pyc +0 -0
- agnt5/_core.abi3.so +0 -0
- agnt5/agent.py +1086 -0
- agnt5/context.py +406 -0
- agnt5/durable.py +1050 -0
- agnt5/extraction.py +410 -0
- agnt5/llm/__init__.py +179 -0
- agnt5/llm/__pycache__/__init__.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/anthropic.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/azure.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/base.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/google.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/mistral.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/openai.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/together.cpython-311.pyc +0 -0
- agnt5/llm/anthropic.py +319 -0
- agnt5/llm/azure.py +348 -0
- agnt5/llm/base.py +315 -0
- agnt5/llm/google.py +373 -0
- agnt5/llm/mistral.py +330 -0
- agnt5/llm/model_registry.py +467 -0
- agnt5/llm/models.json +227 -0
- agnt5/llm/openai.py +334 -0
- agnt5/llm/together.py +377 -0
- agnt5/memory.py +746 -0
- agnt5/reflection.py +514 -0
- agnt5/runtime.py +699 -0
- agnt5/task.py +476 -0
- agnt5/testing.py +451 -0
- agnt5/tool.py +516 -0
- agnt5/tracing.py +624 -0
- agnt5/types.py +210 -0
- agnt5/workflow.py +897 -0
- agnt5-0.1.0.dist-info/METADATA +93 -0
- agnt5-0.1.0.dist-info/RECORD +49 -0
- agnt5-0.1.0.dist-info/WHEEL +4 -0
agnt5/llm/mistral.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mistral AI integration for AGNT5 SDK.
|
|
3
|
+
|
|
4
|
+
Provides integration with Mistral's models using OpenAI-compatible API
|
|
5
|
+
including proper message conversion, tool calling, and streaming support.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from typing import Any, AsyncIterator, Dict, List, Optional, Union
|
|
11
|
+
|
|
12
|
+
from .base import (
|
|
13
|
+
LanguageModel,
|
|
14
|
+
LanguageModelResponse,
|
|
15
|
+
LanguageModelType,
|
|
16
|
+
LLMError,
|
|
17
|
+
Message,
|
|
18
|
+
Role,
|
|
19
|
+
TokenUsage,
|
|
20
|
+
ToolCall,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
import openai
|
|
25
|
+
OPENAI_AVAILABLE = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
OPENAI_AVAILABLE = False
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class MistralError(LLMError):
|
|
31
|
+
"""Mistral-specific errors."""
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class MistralLanguageModel(LanguageModel):
|
|
36
|
+
"""
|
|
37
|
+
Mistral AI language model implementation using OpenAI-compatible API.
|
|
38
|
+
|
|
39
|
+
Supports all Mistral models with proper message conversion, tool calling,
|
|
40
|
+
and streaming capabilities.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
llm_model: LanguageModelType,
|
|
46
|
+
api_key: Optional[str] = None,
|
|
47
|
+
base_url: Optional[str] = None,
|
|
48
|
+
system_prompt: Optional[str] = None,
|
|
49
|
+
**kwargs
|
|
50
|
+
):
|
|
51
|
+
if not OPENAI_AVAILABLE:
|
|
52
|
+
raise MistralError("OpenAI library not installed. Install with: pip install openai")
|
|
53
|
+
|
|
54
|
+
super().__init__(llm_model, system_prompt, **kwargs)
|
|
55
|
+
|
|
56
|
+
# Get API key
|
|
57
|
+
self.api_key = api_key or os.getenv("MISTRAL_API_KEY")
|
|
58
|
+
if not self.api_key:
|
|
59
|
+
raise MistralError("Mistral API key required. Set MISTRAL_API_KEY or pass api_key parameter")
|
|
60
|
+
|
|
61
|
+
# Set base URL for Mistral API
|
|
62
|
+
self.base_url = base_url or "https://api.mistral.ai/v1"
|
|
63
|
+
|
|
64
|
+
# Initialize client with Mistral endpoint
|
|
65
|
+
self.client = openai.AsyncOpenAI(
|
|
66
|
+
api_key=self.api_key,
|
|
67
|
+
base_url=self.base_url
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Validate model is supported by Mistral
|
|
71
|
+
if not any(self.model_name.startswith(prefix) for prefix in
|
|
72
|
+
["mistral", "open-mistral", "open-mixtral", "codestral"]):
|
|
73
|
+
raise MistralError(f"Model {self.model_name} is not a Mistral model")
|
|
74
|
+
|
|
75
|
+
async def generate(
|
|
76
|
+
self,
|
|
77
|
+
messages: List[Message],
|
|
78
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
79
|
+
max_tokens: int = 1024,
|
|
80
|
+
temperature: float = 0.7,
|
|
81
|
+
top_p: float = 1.0,
|
|
82
|
+
stream: bool = False,
|
|
83
|
+
**kwargs
|
|
84
|
+
) -> Union[LanguageModelResponse, AsyncIterator[LanguageModelResponse]]:
|
|
85
|
+
"""Generate response using Mistral AI."""
|
|
86
|
+
try:
|
|
87
|
+
# Validate and prepare messages
|
|
88
|
+
self.validate_messages(messages)
|
|
89
|
+
prepared_messages = self.prepare_system_message(messages)
|
|
90
|
+
|
|
91
|
+
# Convert to OpenAI format (Mistral API is compatible)
|
|
92
|
+
openai_messages = self.convert_messages_to_provider_format(prepared_messages)
|
|
93
|
+
|
|
94
|
+
# Prepare request parameters
|
|
95
|
+
request_params = {
|
|
96
|
+
"model": self.model_name,
|
|
97
|
+
"messages": openai_messages,
|
|
98
|
+
"max_tokens": max_tokens,
|
|
99
|
+
"temperature": temperature,
|
|
100
|
+
"top_p": top_p,
|
|
101
|
+
"stream": stream,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
if tools:
|
|
105
|
+
request_params["tools"] = self.convert_tools_to_provider_format(tools)
|
|
106
|
+
request_params["tool_choice"] = "auto"
|
|
107
|
+
|
|
108
|
+
# Add any additional parameters
|
|
109
|
+
request_params.update(kwargs)
|
|
110
|
+
|
|
111
|
+
if stream:
|
|
112
|
+
return self._generate_stream(**request_params)
|
|
113
|
+
else:
|
|
114
|
+
return await self._generate_single(**request_params)
|
|
115
|
+
|
|
116
|
+
except openai.APIError as e:
|
|
117
|
+
# Handle Mistral-specific API errors
|
|
118
|
+
error_msg = str(e)
|
|
119
|
+
if "authentication" in error_msg.lower():
|
|
120
|
+
raise MistralError(f"Mistral API authentication error: {e}", provider="mistral", model=self.model_name) from e
|
|
121
|
+
elif "rate_limit" in error_msg.lower() or "quota" in error_msg.lower():
|
|
122
|
+
raise MistralError(f"Mistral API rate limit error: {e}", provider="mistral", model=self.model_name) from e
|
|
123
|
+
else:
|
|
124
|
+
raise MistralError(f"Mistral API error: {e}", provider="mistral", model=self.model_name) from e
|
|
125
|
+
except Exception as e:
|
|
126
|
+
raise MistralError(f"Unexpected error: {e}", provider="mistral", model=self.model_name) from e
|
|
127
|
+
|
|
128
|
+
async def _generate_single(self, **request_params) -> LanguageModelResponse:
|
|
129
|
+
"""Generate a single response."""
|
|
130
|
+
response = await self.client.chat.completions.create(**request_params)
|
|
131
|
+
|
|
132
|
+
message = response.choices[0].message
|
|
133
|
+
|
|
134
|
+
# Extract text content
|
|
135
|
+
response_text = message.content or ""
|
|
136
|
+
|
|
137
|
+
# Extract tool calls
|
|
138
|
+
tool_calls = []
|
|
139
|
+
if message.tool_calls:
|
|
140
|
+
for tool_call in message.tool_calls:
|
|
141
|
+
if tool_call.type == "function":
|
|
142
|
+
try:
|
|
143
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
144
|
+
except json.JSONDecodeError:
|
|
145
|
+
arguments = {"raw_arguments": tool_call.function.arguments}
|
|
146
|
+
|
|
147
|
+
tool_calls.append(ToolCall(
|
|
148
|
+
id=tool_call.id,
|
|
149
|
+
name=tool_call.function.name,
|
|
150
|
+
arguments=arguments
|
|
151
|
+
))
|
|
152
|
+
|
|
153
|
+
# Calculate token usage
|
|
154
|
+
usage = TokenUsage()
|
|
155
|
+
if response.usage:
|
|
156
|
+
usage = TokenUsage(
|
|
157
|
+
prompt_tokens=response.usage.prompt_tokens,
|
|
158
|
+
completion_tokens=response.usage.completion_tokens,
|
|
159
|
+
total_tokens=response.usage.total_tokens
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return LanguageModelResponse(
|
|
163
|
+
message=response_text,
|
|
164
|
+
usage=usage,
|
|
165
|
+
tool_calls=tool_calls if tool_calls else None,
|
|
166
|
+
model=response.model,
|
|
167
|
+
finish_reason=response.choices[0].finish_reason,
|
|
168
|
+
metadata={"response_id": response.id}
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
async def _generate_stream(self, **request_params) -> AsyncIterator[LanguageModelResponse]:
|
|
172
|
+
"""Generate streaming response."""
|
|
173
|
+
stream = await self.client.chat.completions.create(**request_params)
|
|
174
|
+
|
|
175
|
+
async for chunk in stream:
|
|
176
|
+
if chunk.choices:
|
|
177
|
+
choice = chunk.choices[0]
|
|
178
|
+
|
|
179
|
+
# Handle content delta
|
|
180
|
+
if choice.delta.content:
|
|
181
|
+
yield LanguageModelResponse(
|
|
182
|
+
message=choice.delta.content,
|
|
183
|
+
usage=TokenUsage(),
|
|
184
|
+
model=chunk.model
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Handle tool calls
|
|
188
|
+
if choice.delta.tool_calls:
|
|
189
|
+
for tool_call_delta in choice.delta.tool_calls:
|
|
190
|
+
if tool_call_delta.function:
|
|
191
|
+
# Note: In streaming, tool calls come in pieces
|
|
192
|
+
# This is a simplified version - full implementation would
|
|
193
|
+
# need to accumulate the complete tool call
|
|
194
|
+
yield LanguageModelResponse(
|
|
195
|
+
message="",
|
|
196
|
+
usage=TokenUsage(),
|
|
197
|
+
model=chunk.model,
|
|
198
|
+
metadata={"tool_call_delta": tool_call_delta}
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def convert_messages_to_provider_format(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
|
202
|
+
"""Convert internal messages to OpenAI/Mistral format."""
|
|
203
|
+
openai_messages = []
|
|
204
|
+
|
|
205
|
+
for message in messages:
|
|
206
|
+
# Convert role
|
|
207
|
+
if message.role == Role.SYSTEM:
|
|
208
|
+
role = "system"
|
|
209
|
+
elif message.role == Role.USER:
|
|
210
|
+
role = "user"
|
|
211
|
+
elif message.role == Role.ASSISTANT:
|
|
212
|
+
role = "assistant"
|
|
213
|
+
elif message.role == Role.TOOL:
|
|
214
|
+
role = "tool"
|
|
215
|
+
else:
|
|
216
|
+
continue # Skip unsupported roles
|
|
217
|
+
|
|
218
|
+
# Prepare content
|
|
219
|
+
if isinstance(message.content, str):
|
|
220
|
+
content = message.content
|
|
221
|
+
elif isinstance(message.content, list):
|
|
222
|
+
content = self._convert_content_blocks(message.content)
|
|
223
|
+
else:
|
|
224
|
+
content = str(message.content)
|
|
225
|
+
|
|
226
|
+
openai_message = {
|
|
227
|
+
"role": role,
|
|
228
|
+
"content": content
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
# Add name if present
|
|
232
|
+
if message.name:
|
|
233
|
+
openai_message["name"] = message.name
|
|
234
|
+
|
|
235
|
+
# Handle tool calls for assistant messages
|
|
236
|
+
if message.tool_calls and message.role == Role.ASSISTANT:
|
|
237
|
+
openai_message["tool_calls"] = [
|
|
238
|
+
{
|
|
239
|
+
"id": tool_call.id,
|
|
240
|
+
"type": "function",
|
|
241
|
+
"function": {
|
|
242
|
+
"name": tool_call.name,
|
|
243
|
+
"arguments": json.dumps(tool_call.arguments)
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
for tool_call in message.tool_calls
|
|
247
|
+
]
|
|
248
|
+
|
|
249
|
+
# Handle tool call ID for tool messages
|
|
250
|
+
if message.tool_call_id:
|
|
251
|
+
openai_message["tool_call_id"] = message.tool_call_id
|
|
252
|
+
|
|
253
|
+
openai_messages.append(openai_message)
|
|
254
|
+
|
|
255
|
+
return openai_messages
|
|
256
|
+
|
|
257
|
+
def _convert_content_blocks(self, content_blocks: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
|
|
258
|
+
"""Convert content blocks to OpenAI/Mistral format."""
|
|
259
|
+
# For simple text-only blocks, return as string
|
|
260
|
+
if len(content_blocks) == 1 and content_blocks[0].get("type") == "text":
|
|
261
|
+
return content_blocks[0].get("text", str(content_blocks[0]))
|
|
262
|
+
|
|
263
|
+
# For complex content, return as structured blocks
|
|
264
|
+
converted_blocks = []
|
|
265
|
+
|
|
266
|
+
for block in content_blocks:
|
|
267
|
+
if isinstance(block, str):
|
|
268
|
+
converted_blocks.append({"type": "text", "text": block})
|
|
269
|
+
elif isinstance(block, dict):
|
|
270
|
+
block_type = block.get("type", "text")
|
|
271
|
+
|
|
272
|
+
if block_type == "text":
|
|
273
|
+
converted_blocks.append({
|
|
274
|
+
"type": "text",
|
|
275
|
+
"text": block.get("text", str(block))
|
|
276
|
+
})
|
|
277
|
+
else:
|
|
278
|
+
# Convert unknown blocks to text (Mistral doesn't support images yet)
|
|
279
|
+
converted_blocks.append({
|
|
280
|
+
"type": "text",
|
|
281
|
+
"text": str(block)
|
|
282
|
+
})
|
|
283
|
+
else:
|
|
284
|
+
converted_blocks.append({"type": "text", "text": str(block)})
|
|
285
|
+
|
|
286
|
+
return converted_blocks
|
|
287
|
+
|
|
288
|
+
def convert_tools_to_provider_format(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
289
|
+
"""Convert tools to OpenAI/Mistral format."""
|
|
290
|
+
openai_tools = []
|
|
291
|
+
|
|
292
|
+
for tool in tools:
|
|
293
|
+
if "function" in tool:
|
|
294
|
+
# Already in OpenAI format
|
|
295
|
+
openai_tools.append(tool)
|
|
296
|
+
else:
|
|
297
|
+
# Convert from simple format
|
|
298
|
+
openai_tool = {
|
|
299
|
+
"type": "function",
|
|
300
|
+
"function": {
|
|
301
|
+
"name": tool.get("name", "unknown"),
|
|
302
|
+
"description": tool.get("description", ""),
|
|
303
|
+
"parameters": tool.get("parameters", tool.get("input_schema", {}))
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
openai_tools.append(openai_tool)
|
|
307
|
+
|
|
308
|
+
return openai_tools
|
|
309
|
+
|
|
310
|
+
def extract_tool_calls_from_response(self, response: Any) -> List[ToolCall]:
|
|
311
|
+
"""Extract tool calls from Mistral response."""
|
|
312
|
+
tool_calls = []
|
|
313
|
+
|
|
314
|
+
if hasattr(response, "choices") and response.choices:
|
|
315
|
+
message = response.choices[0].message
|
|
316
|
+
if hasattr(message, "tool_calls") and message.tool_calls:
|
|
317
|
+
for tool_call in message.tool_calls:
|
|
318
|
+
if tool_call.type == "function":
|
|
319
|
+
try:
|
|
320
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
321
|
+
except json.JSONDecodeError:
|
|
322
|
+
arguments = {"raw_arguments": tool_call.function.arguments}
|
|
323
|
+
|
|
324
|
+
tool_calls.append(ToolCall(
|
|
325
|
+
id=tool_call.id,
|
|
326
|
+
name=tool_call.function.name,
|
|
327
|
+
arguments=arguments
|
|
328
|
+
))
|
|
329
|
+
|
|
330
|
+
return tool_calls
|