agnt5 0.1.0__cp39-abi3-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agnt5/__init__.py +307 -0
- agnt5/__pycache__/__init__.cpython-311.pyc +0 -0
- agnt5/__pycache__/agent.cpython-311.pyc +0 -0
- agnt5/__pycache__/context.cpython-311.pyc +0 -0
- agnt5/__pycache__/durable.cpython-311.pyc +0 -0
- agnt5/__pycache__/extraction.cpython-311.pyc +0 -0
- agnt5/__pycache__/memory.cpython-311.pyc +0 -0
- agnt5/__pycache__/reflection.cpython-311.pyc +0 -0
- agnt5/__pycache__/runtime.cpython-311.pyc +0 -0
- agnt5/__pycache__/task.cpython-311.pyc +0 -0
- agnt5/__pycache__/tool.cpython-311.pyc +0 -0
- agnt5/__pycache__/tracing.cpython-311.pyc +0 -0
- agnt5/__pycache__/types.cpython-311.pyc +0 -0
- agnt5/__pycache__/workflow.cpython-311.pyc +0 -0
- agnt5/_core.abi3.so +0 -0
- agnt5/agent.py +1086 -0
- agnt5/context.py +406 -0
- agnt5/durable.py +1050 -0
- agnt5/extraction.py +410 -0
- agnt5/llm/__init__.py +179 -0
- agnt5/llm/__pycache__/__init__.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/anthropic.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/azure.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/base.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/google.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/mistral.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/openai.cpython-311.pyc +0 -0
- agnt5/llm/__pycache__/together.cpython-311.pyc +0 -0
- agnt5/llm/anthropic.py +319 -0
- agnt5/llm/azure.py +348 -0
- agnt5/llm/base.py +315 -0
- agnt5/llm/google.py +373 -0
- agnt5/llm/mistral.py +330 -0
- agnt5/llm/model_registry.py +467 -0
- agnt5/llm/models.json +227 -0
- agnt5/llm/openai.py +334 -0
- agnt5/llm/together.py +377 -0
- agnt5/memory.py +746 -0
- agnt5/reflection.py +514 -0
- agnt5/runtime.py +699 -0
- agnt5/task.py +476 -0
- agnt5/testing.py +451 -0
- agnt5/tool.py +516 -0
- agnt5/tracing.py +624 -0
- agnt5/types.py +210 -0
- agnt5/workflow.py +897 -0
- agnt5-0.1.0.dist-info/METADATA +93 -0
- agnt5-0.1.0.dist-info/RECORD +49 -0
- agnt5-0.1.0.dist-info/WHEEL +4 -0
agnt5/llm/together.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Together AI integration for AGNT5 SDK.
|
|
3
|
+
|
|
4
|
+
Provides integration with Together AI's models using OpenAI-compatible API
|
|
5
|
+
for various open-source models including Llama, Mixtral, Qwen, and more.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from typing import Any, AsyncIterator, Dict, List, Optional, Union
|
|
11
|
+
|
|
12
|
+
from .base import (
|
|
13
|
+
LanguageModel,
|
|
14
|
+
LanguageModelResponse,
|
|
15
|
+
LanguageModelType,
|
|
16
|
+
LLMError,
|
|
17
|
+
Message,
|
|
18
|
+
Role,
|
|
19
|
+
TokenUsage,
|
|
20
|
+
ToolCall,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
import openai
|
|
25
|
+
OPENAI_AVAILABLE = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
OPENAI_AVAILABLE = False
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TogetherAIError(LLMError):
|
|
31
|
+
"""Together AI-specific errors."""
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class TogetherAILanguageModel(LanguageModel):
|
|
36
|
+
"""
|
|
37
|
+
Together AI language model implementation using OpenAI-compatible API.
|
|
38
|
+
|
|
39
|
+
Supports various open-source models hosted on Together AI including
|
|
40
|
+
Llama, Mixtral, Qwen, and other popular models.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
llm_model: LanguageModelType,
|
|
46
|
+
api_key: Optional[str] = None,
|
|
47
|
+
base_url: Optional[str] = None,
|
|
48
|
+
system_prompt: Optional[str] = None,
|
|
49
|
+
**kwargs
|
|
50
|
+
):
|
|
51
|
+
if not OPENAI_AVAILABLE:
|
|
52
|
+
raise TogetherAIError("OpenAI library not installed. Install with: pip install openai")
|
|
53
|
+
|
|
54
|
+
super().__init__(llm_model, system_prompt, **kwargs)
|
|
55
|
+
|
|
56
|
+
# Get API key
|
|
57
|
+
self.api_key = api_key or os.getenv("TOGETHER_API_KEY")
|
|
58
|
+
if not self.api_key:
|
|
59
|
+
raise TogetherAIError("Together AI API key required. Set TOGETHER_API_KEY or pass api_key parameter")
|
|
60
|
+
|
|
61
|
+
# Set base URL for Together AI API
|
|
62
|
+
self.base_url = base_url or "https://api.together.xyz/v1"
|
|
63
|
+
|
|
64
|
+
# Initialize client with Together AI endpoint
|
|
65
|
+
self.client = openai.AsyncOpenAI(
|
|
66
|
+
api_key=self.api_key,
|
|
67
|
+
base_url=self.base_url
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Validate model is supported by Together AI
|
|
71
|
+
if not (self.model_name.startswith("meta-llama") or
|
|
72
|
+
self.model_name.startswith("mistralai") or
|
|
73
|
+
self.model_name.startswith("Qwen") or
|
|
74
|
+
"/" in self.model_name): # Together uses namespace/model format
|
|
75
|
+
raise TogetherAIError(f"Model {self.model_name} is not a recognized Together AI model")
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def provider_name(self) -> str:
|
|
79
|
+
return "together"
|
|
80
|
+
|
|
81
|
+
async def generate(
|
|
82
|
+
self,
|
|
83
|
+
messages: List[Message],
|
|
84
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
85
|
+
max_tokens: int = 1024,
|
|
86
|
+
temperature: float = 0.7,
|
|
87
|
+
top_p: float = 1.0,
|
|
88
|
+
stream: bool = False,
|
|
89
|
+
**kwargs
|
|
90
|
+
) -> Union[LanguageModelResponse, AsyncIterator[LanguageModelResponse]]:
|
|
91
|
+
"""Generate response using Together AI."""
|
|
92
|
+
try:
|
|
93
|
+
# Validate and prepare messages
|
|
94
|
+
self.validate_messages(messages)
|
|
95
|
+
prepared_messages = self.prepare_system_message(messages)
|
|
96
|
+
|
|
97
|
+
# Convert to OpenAI format (Together AI API is compatible)
|
|
98
|
+
openai_messages = self.convert_messages_to_provider_format(prepared_messages)
|
|
99
|
+
|
|
100
|
+
# Prepare request parameters
|
|
101
|
+
request_params = {
|
|
102
|
+
"model": self.model_name,
|
|
103
|
+
"messages": openai_messages,
|
|
104
|
+
"max_tokens": max_tokens,
|
|
105
|
+
"temperature": temperature,
|
|
106
|
+
"top_p": top_p,
|
|
107
|
+
"stream": stream,
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
# Together AI supports tools for some models
|
|
111
|
+
if tools and self._supports_tools():
|
|
112
|
+
request_params["tools"] = self.convert_tools_to_provider_format(tools)
|
|
113
|
+
request_params["tool_choice"] = "auto"
|
|
114
|
+
|
|
115
|
+
# Add any additional parameters
|
|
116
|
+
request_params.update(kwargs)
|
|
117
|
+
|
|
118
|
+
if stream:
|
|
119
|
+
return self._generate_stream(**request_params)
|
|
120
|
+
else:
|
|
121
|
+
return await self._generate_single(**request_params)
|
|
122
|
+
|
|
123
|
+
except openai.APIError as e:
|
|
124
|
+
# Handle Together AI-specific API errors
|
|
125
|
+
error_msg = str(e)
|
|
126
|
+
if "authentication" in error_msg.lower() or "api key" in error_msg.lower():
|
|
127
|
+
raise TogetherAIError(f"Together AI authentication error: {e}", provider="together", model=self.model_name) from e
|
|
128
|
+
elif "rate_limit" in error_msg.lower() or "quota" in error_msg.lower():
|
|
129
|
+
raise TogetherAIError(f"Together AI rate limit error: {e}", provider="together", model=self.model_name) from e
|
|
130
|
+
elif "model" in error_msg.lower() and "not found" in error_msg.lower():
|
|
131
|
+
raise TogetherAIError(f"Together AI model not found: {e}. Available models: https://docs.together.ai/docs/inference-models", provider="together", model=self.model_name) from e
|
|
132
|
+
else:
|
|
133
|
+
raise TogetherAIError(f"Together AI API error: {e}", provider="together", model=self.model_name) from e
|
|
134
|
+
except Exception as e:
|
|
135
|
+
raise TogetherAIError(f"Unexpected error: {e}", provider="together", model=self.model_name) from e
|
|
136
|
+
|
|
137
|
+
def _supports_tools(self) -> bool:
|
|
138
|
+
"""Check if the current model supports tool calling."""
|
|
139
|
+
# Tool support varies by model on Together AI
|
|
140
|
+
# Generally, newer instruct models support tools
|
|
141
|
+
tool_supported_models = [
|
|
142
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
143
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
144
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
145
|
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
return (self.model_name in tool_supported_models or
|
|
149
|
+
"instruct" in self.model_name.lower())
|
|
150
|
+
|
|
151
|
+
async def _generate_single(self, **request_params) -> LanguageModelResponse:
|
|
152
|
+
"""Generate a single response."""
|
|
153
|
+
response = await self.client.chat.completions.create(**request_params)
|
|
154
|
+
|
|
155
|
+
message = response.choices[0].message
|
|
156
|
+
|
|
157
|
+
# Extract text content
|
|
158
|
+
response_text = message.content or ""
|
|
159
|
+
|
|
160
|
+
# Extract tool calls (if supported)
|
|
161
|
+
tool_calls = []
|
|
162
|
+
if message.tool_calls:
|
|
163
|
+
for tool_call in message.tool_calls:
|
|
164
|
+
if tool_call.type == "function":
|
|
165
|
+
try:
|
|
166
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
167
|
+
except json.JSONDecodeError:
|
|
168
|
+
arguments = {"raw_arguments": tool_call.function.arguments}
|
|
169
|
+
|
|
170
|
+
tool_calls.append(ToolCall(
|
|
171
|
+
id=tool_call.id,
|
|
172
|
+
name=tool_call.function.name,
|
|
173
|
+
arguments=arguments
|
|
174
|
+
))
|
|
175
|
+
|
|
176
|
+
# Calculate token usage
|
|
177
|
+
usage = TokenUsage()
|
|
178
|
+
if response.usage:
|
|
179
|
+
usage = TokenUsage(
|
|
180
|
+
prompt_tokens=response.usage.prompt_tokens,
|
|
181
|
+
completion_tokens=response.usage.completion_tokens,
|
|
182
|
+
total_tokens=response.usage.total_tokens
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return LanguageModelResponse(
|
|
186
|
+
message=response_text,
|
|
187
|
+
usage=usage,
|
|
188
|
+
tool_calls=tool_calls if tool_calls else None,
|
|
189
|
+
model=response.model,
|
|
190
|
+
finish_reason=response.choices[0].finish_reason,
|
|
191
|
+
metadata={
|
|
192
|
+
"response_id": response.id,
|
|
193
|
+
"provider": "together",
|
|
194
|
+
"supports_tools": self._supports_tools()
|
|
195
|
+
}
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
async def _generate_stream(self, **request_params) -> AsyncIterator[LanguageModelResponse]:
|
|
199
|
+
"""Generate streaming response."""
|
|
200
|
+
stream = await self.client.chat.completions.create(**request_params)
|
|
201
|
+
|
|
202
|
+
async for chunk in stream:
|
|
203
|
+
if chunk.choices:
|
|
204
|
+
choice = chunk.choices[0]
|
|
205
|
+
|
|
206
|
+
# Handle content delta
|
|
207
|
+
if choice.delta.content:
|
|
208
|
+
yield LanguageModelResponse(
|
|
209
|
+
message=choice.delta.content,
|
|
210
|
+
usage=TokenUsage(),
|
|
211
|
+
model=chunk.model
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Handle tool calls (if supported)
|
|
215
|
+
if choice.delta.tool_calls:
|
|
216
|
+
for tool_call_delta in choice.delta.tool_calls:
|
|
217
|
+
if tool_call_delta.function:
|
|
218
|
+
# Note: In streaming, tool calls come in pieces
|
|
219
|
+
# This is a simplified version - full implementation would
|
|
220
|
+
# need to accumulate the complete tool call
|
|
221
|
+
yield LanguageModelResponse(
|
|
222
|
+
message="",
|
|
223
|
+
usage=TokenUsage(),
|
|
224
|
+
model=chunk.model,
|
|
225
|
+
metadata={"tool_call_delta": tool_call_delta}
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
def convert_messages_to_provider_format(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
|
229
|
+
"""Convert internal messages to OpenAI/Together AI format."""
|
|
230
|
+
openai_messages = []
|
|
231
|
+
|
|
232
|
+
for message in messages:
|
|
233
|
+
# Convert role
|
|
234
|
+
if message.role == Role.SYSTEM:
|
|
235
|
+
role = "system"
|
|
236
|
+
elif message.role == Role.USER:
|
|
237
|
+
role = "user"
|
|
238
|
+
elif message.role == Role.ASSISTANT:
|
|
239
|
+
role = "assistant"
|
|
240
|
+
elif message.role == Role.TOOL:
|
|
241
|
+
role = "tool"
|
|
242
|
+
else:
|
|
243
|
+
continue # Skip unsupported roles
|
|
244
|
+
|
|
245
|
+
# Prepare content
|
|
246
|
+
if isinstance(message.content, str):
|
|
247
|
+
content = message.content
|
|
248
|
+
elif isinstance(message.content, list):
|
|
249
|
+
content = self._convert_content_blocks(message.content)
|
|
250
|
+
else:
|
|
251
|
+
content = str(message.content)
|
|
252
|
+
|
|
253
|
+
openai_message = {
|
|
254
|
+
"role": role,
|
|
255
|
+
"content": content
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
# Add name if present
|
|
259
|
+
if message.name:
|
|
260
|
+
openai_message["name"] = message.name
|
|
261
|
+
|
|
262
|
+
# Handle tool calls for assistant messages (if supported)
|
|
263
|
+
if message.tool_calls and message.role == Role.ASSISTANT and self._supports_tools():
|
|
264
|
+
openai_message["tool_calls"] = [
|
|
265
|
+
{
|
|
266
|
+
"id": tool_call.id,
|
|
267
|
+
"type": "function",
|
|
268
|
+
"function": {
|
|
269
|
+
"name": tool_call.name,
|
|
270
|
+
"arguments": json.dumps(tool_call.arguments)
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
for tool_call in message.tool_calls
|
|
274
|
+
]
|
|
275
|
+
|
|
276
|
+
# Handle tool call ID for tool messages
|
|
277
|
+
if message.tool_call_id:
|
|
278
|
+
openai_message["tool_call_id"] = message.tool_call_id
|
|
279
|
+
|
|
280
|
+
openai_messages.append(openai_message)
|
|
281
|
+
|
|
282
|
+
return openai_messages
|
|
283
|
+
|
|
284
|
+
def _convert_content_blocks(self, content_blocks: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
|
|
285
|
+
"""Convert content blocks to OpenAI/Together AI format."""
|
|
286
|
+
# For simple text-only blocks, return as string
|
|
287
|
+
if len(content_blocks) == 1 and content_blocks[0].get("type") == "text":
|
|
288
|
+
return content_blocks[0].get("text", str(content_blocks[0]))
|
|
289
|
+
|
|
290
|
+
# For complex content, return as structured blocks
|
|
291
|
+
# Note: Together AI primarily supports text, limited multimodal support
|
|
292
|
+
converted_blocks = []
|
|
293
|
+
|
|
294
|
+
for block in content_blocks:
|
|
295
|
+
if isinstance(block, str):
|
|
296
|
+
converted_blocks.append({"type": "text", "text": block})
|
|
297
|
+
elif isinstance(block, dict):
|
|
298
|
+
block_type = block.get("type", "text")
|
|
299
|
+
|
|
300
|
+
if block_type == "text":
|
|
301
|
+
converted_blocks.append({
|
|
302
|
+
"type": "text",
|
|
303
|
+
"text": block.get("text", str(block))
|
|
304
|
+
})
|
|
305
|
+
elif block_type == "image_url" and self._supports_vision():
|
|
306
|
+
# Some Together AI models support vision
|
|
307
|
+
converted_blocks.append({
|
|
308
|
+
"type": "image_url",
|
|
309
|
+
"image_url": block.get("image_url", {})
|
|
310
|
+
})
|
|
311
|
+
else:
|
|
312
|
+
# Convert unknown/unsupported blocks to text
|
|
313
|
+
converted_blocks.append({
|
|
314
|
+
"type": "text",
|
|
315
|
+
"text": f"[{block_type.upper()}]: {str(block)}"
|
|
316
|
+
})
|
|
317
|
+
else:
|
|
318
|
+
converted_blocks.append({"type": "text", "text": str(block)})
|
|
319
|
+
|
|
320
|
+
return converted_blocks
|
|
321
|
+
|
|
322
|
+
def _supports_vision(self) -> bool:
|
|
323
|
+
"""Check if the current model supports vision/image input."""
|
|
324
|
+
vision_models = [
|
|
325
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
326
|
+
]
|
|
327
|
+
return self.model_name in vision_models
|
|
328
|
+
|
|
329
|
+
def convert_tools_to_provider_format(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
330
|
+
"""Convert tools to OpenAI/Together AI format."""
|
|
331
|
+
if not self._supports_tools():
|
|
332
|
+
return []
|
|
333
|
+
|
|
334
|
+
openai_tools = []
|
|
335
|
+
|
|
336
|
+
for tool in tools:
|
|
337
|
+
if "function" in tool:
|
|
338
|
+
# Already in OpenAI format
|
|
339
|
+
openai_tools.append(tool)
|
|
340
|
+
else:
|
|
341
|
+
# Convert from simple format
|
|
342
|
+
openai_tool = {
|
|
343
|
+
"type": "function",
|
|
344
|
+
"function": {
|
|
345
|
+
"name": tool.get("name", "unknown"),
|
|
346
|
+
"description": tool.get("description", ""),
|
|
347
|
+
"parameters": tool.get("parameters", tool.get("input_schema", {}))
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
openai_tools.append(openai_tool)
|
|
351
|
+
|
|
352
|
+
return openai_tools
|
|
353
|
+
|
|
354
|
+
def extract_tool_calls_from_response(self, response: Any) -> List[ToolCall]:
|
|
355
|
+
"""Extract tool calls from Together AI response."""
|
|
356
|
+
if not self._supports_tools():
|
|
357
|
+
return []
|
|
358
|
+
|
|
359
|
+
tool_calls = []
|
|
360
|
+
|
|
361
|
+
if hasattr(response, "choices") and response.choices:
|
|
362
|
+
message = response.choices[0].message
|
|
363
|
+
if hasattr(message, "tool_calls") and message.tool_calls:
|
|
364
|
+
for tool_call in message.tool_calls:
|
|
365
|
+
if tool_call.type == "function":
|
|
366
|
+
try:
|
|
367
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
368
|
+
except json.JSONDecodeError:
|
|
369
|
+
arguments = {"raw_arguments": tool_call.function.arguments}
|
|
370
|
+
|
|
371
|
+
tool_calls.append(ToolCall(
|
|
372
|
+
id=tool_call.id,
|
|
373
|
+
name=tool_call.function.name,
|
|
374
|
+
arguments=arguments
|
|
375
|
+
))
|
|
376
|
+
|
|
377
|
+
return tool_calls
|