agnt5 0.1.0__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. agnt5/__init__.py +307 -0
  2. agnt5/__pycache__/__init__.cpython-311.pyc +0 -0
  3. agnt5/__pycache__/agent.cpython-311.pyc +0 -0
  4. agnt5/__pycache__/context.cpython-311.pyc +0 -0
  5. agnt5/__pycache__/durable.cpython-311.pyc +0 -0
  6. agnt5/__pycache__/extraction.cpython-311.pyc +0 -0
  7. agnt5/__pycache__/memory.cpython-311.pyc +0 -0
  8. agnt5/__pycache__/reflection.cpython-311.pyc +0 -0
  9. agnt5/__pycache__/runtime.cpython-311.pyc +0 -0
  10. agnt5/__pycache__/task.cpython-311.pyc +0 -0
  11. agnt5/__pycache__/tool.cpython-311.pyc +0 -0
  12. agnt5/__pycache__/tracing.cpython-311.pyc +0 -0
  13. agnt5/__pycache__/types.cpython-311.pyc +0 -0
  14. agnt5/__pycache__/workflow.cpython-311.pyc +0 -0
  15. agnt5/_core.abi3.so +0 -0
  16. agnt5/agent.py +1086 -0
  17. agnt5/context.py +406 -0
  18. agnt5/durable.py +1050 -0
  19. agnt5/extraction.py +410 -0
  20. agnt5/llm/__init__.py +179 -0
  21. agnt5/llm/__pycache__/__init__.cpython-311.pyc +0 -0
  22. agnt5/llm/__pycache__/anthropic.cpython-311.pyc +0 -0
  23. agnt5/llm/__pycache__/azure.cpython-311.pyc +0 -0
  24. agnt5/llm/__pycache__/base.cpython-311.pyc +0 -0
  25. agnt5/llm/__pycache__/google.cpython-311.pyc +0 -0
  26. agnt5/llm/__pycache__/mistral.cpython-311.pyc +0 -0
  27. agnt5/llm/__pycache__/openai.cpython-311.pyc +0 -0
  28. agnt5/llm/__pycache__/together.cpython-311.pyc +0 -0
  29. agnt5/llm/anthropic.py +319 -0
  30. agnt5/llm/azure.py +348 -0
  31. agnt5/llm/base.py +315 -0
  32. agnt5/llm/google.py +373 -0
  33. agnt5/llm/mistral.py +330 -0
  34. agnt5/llm/model_registry.py +467 -0
  35. agnt5/llm/models.json +227 -0
  36. agnt5/llm/openai.py +334 -0
  37. agnt5/llm/together.py +377 -0
  38. agnt5/memory.py +746 -0
  39. agnt5/reflection.py +514 -0
  40. agnt5/runtime.py +699 -0
  41. agnt5/task.py +476 -0
  42. agnt5/testing.py +451 -0
  43. agnt5/tool.py +516 -0
  44. agnt5/tracing.py +624 -0
  45. agnt5/types.py +210 -0
  46. agnt5/workflow.py +897 -0
  47. agnt5-0.1.0.dist-info/METADATA +93 -0
  48. agnt5-0.1.0.dist-info/RECORD +49 -0
  49. agnt5-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,467 @@
1
+ """
2
+ Dynamic model registry for LLM providers.
3
+
4
+ This module provides a future-proof way to handle model discovery and management
5
+ without hardcoding model names. It supports:
6
+ 1. Dynamic model discovery via API calls
7
+ 2. Configuration-based model definitions
8
+ 3. Capability-based model selection
9
+ 4. Automatic fallback and aliasing
10
+ """
11
+
12
+ import json
13
+ import os
14
+ import asyncio
15
+ from datetime import datetime, timedelta
16
+ from typing import Dict, List, Optional, Set, Any, Union
17
+ from dataclasses import dataclass, field
18
+ from enum import Enum
19
+ import logging
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class ModelCapability(Enum):
25
+ """Model capabilities for intelligent selection."""
26
+ TEXT_GENERATION = "text_generation"
27
+ TOOL_CALLING = "tool_calling"
28
+ VISION = "vision"
29
+ CODE_GENERATION = "code_generation"
30
+ REASONING = "reasoning"
31
+ CREATIVE_WRITING = "creative_writing"
32
+ ANALYSIS = "analysis"
33
+ STREAMING = "streaming"
34
+ LONG_CONTEXT = "long_context"
35
+ MULTILINGUAL = "multilingual"
36
+ FAST_INFERENCE = "fast_inference"
37
+ COST_EFFICIENT = "cost_efficient"
38
+
39
+
40
+ class ModelTier(Enum):
41
+ """Model performance/cost tiers."""
42
+ FLAGSHIP = "flagship" # Best performance, highest cost
43
+ PERFORMANCE = "performance" # High performance, moderate cost
44
+ BALANCED = "balanced" # Good performance, reasonable cost
45
+ EFFICIENT = "efficient" # Fast inference, low cost
46
+ EXPERIMENTAL = "experimental" # Beta/preview models
47
+
48
+
49
+ @dataclass
50
+ class ModelInfo:
51
+ """Comprehensive model information."""
52
+ name: str
53
+ provider: str
54
+ tier: ModelTier
55
+ capabilities: Set[ModelCapability]
56
+ context_length: Optional[int] = None
57
+ max_output_tokens: Optional[int] = None
58
+ cost_per_input_token: Optional[float] = None
59
+ cost_per_output_token: Optional[float] = None
60
+ supports_streaming: bool = True
61
+ supports_tools: bool = False
62
+ supports_vision: bool = False
63
+ deprecated: bool = False
64
+ aliases: List[str] = field(default_factory=list)
65
+ metadata: Dict[str, Any] = field(default_factory=dict)
66
+ last_updated: datetime = field(default_factory=datetime.utcnow)
67
+
68
+
69
+ class ModelRegistry:
70
+ """
71
+ Dynamic model registry with automatic discovery and configuration.
72
+
73
+ This registry supports multiple strategies for model management:
74
+ 1. Static configuration from files
75
+ 2. Dynamic discovery via provider APIs
76
+ 3. Capability-based selection
77
+ 4. Automatic fallback handling
78
+ """
79
+
80
+ def __init__(self, config_path: Optional[str] = None, cache_ttl: int = 3600):
81
+ self.config_path = config_path or os.path.join(
82
+ os.path.dirname(__file__), "models.json"
83
+ )
84
+ self.cache_ttl = cache_ttl # Cache TTL in seconds
85
+ self._models: Dict[str, ModelInfo] = {}
86
+ self._aliases: Dict[str, str] = {}
87
+ self._capabilities_cache: Dict[ModelCapability, List[str]] = {}
88
+ self._last_refresh = None
89
+
90
+ # Load initial configuration
91
+ self._load_static_config()
92
+
93
+ def _load_static_config(self):
94
+ """Load static model configuration from file."""
95
+ try:
96
+ if os.path.exists(self.config_path):
97
+ with open(self.config_path, 'r') as f:
98
+ config = json.load(f)
99
+ self._parse_config(config)
100
+ else:
101
+ # Create default configuration
102
+ self._create_default_config()
103
+ except Exception as e:
104
+ logger.warning(f"Failed to load model config: {e}")
105
+ self._create_default_config()
106
+
107
+ def _create_default_config(self):
108
+ """Create default model configuration with capability-based aliases."""
109
+ default_models = {
110
+ # Anthropic models
111
+ "anthropic": {
112
+ "claude-3-5-sonnet-latest": {
113
+ "tier": "flagship",
114
+ "capabilities": ["text_generation", "tool_calling", "vision", "reasoning", "analysis"],
115
+ "aliases": ["claude-latest", "best-reasoning"],
116
+ "context_length": 200000,
117
+ "supports_tools": True,
118
+ "supports_vision": True
119
+ },
120
+ "claude-3-5-haiku-latest": {
121
+ "tier": "efficient",
122
+ "capabilities": ["text_generation", "tool_calling", "fast_inference"],
123
+ "aliases": ["claude-fast", "fastest"],
124
+ "context_length": 200000,
125
+ "supports_tools": True
126
+ }
127
+ },
128
+ # OpenAI models
129
+ "openai": {
130
+ "gpt-4o": {
131
+ "tier": "flagship",
132
+ "capabilities": ["text_generation", "tool_calling", "vision", "reasoning", "analysis"],
133
+ "aliases": ["gpt-latest", "best-multimodal"],
134
+ "context_length": 128000,
135
+ "supports_tools": True,
136
+ "supports_vision": True
137
+ },
138
+ "gpt-4o-mini": {
139
+ "tier": "efficient",
140
+ "capabilities": ["text_generation", "tool_calling", "fast_inference", "cost_efficient"],
141
+ "aliases": ["gpt-fast", "most-affordable"],
142
+ "context_length": 128000,
143
+ "supports_tools": True
144
+ }
145
+ },
146
+ # Google models
147
+ "google": {
148
+ "gemini-1.5-pro": {
149
+ "tier": "flagship",
150
+ "capabilities": ["text_generation", "tool_calling", "vision", "long_context"],
151
+ "aliases": ["gemini-latest"],
152
+ "context_length": 2000000,
153
+ "supports_tools": True,
154
+ "supports_vision": True
155
+ },
156
+ "gemini-1.5-flash": {
157
+ "tier": "efficient",
158
+ "capabilities": ["text_generation", "tool_calling", "fast_inference"],
159
+ "aliases": ["gemini-fast"],
160
+ "context_length": 1000000,
161
+ "supports_tools": True
162
+ }
163
+ },
164
+ # Mistral models
165
+ "mistral": {
166
+ "mistral-large-latest": {
167
+ "tier": "flagship",
168
+ "capabilities": ["text_generation", "tool_calling", "reasoning", "multilingual"],
169
+ "aliases": ["mistral-latest"],
170
+ "supports_tools": True
171
+ },
172
+ "mistral-small-latest": {
173
+ "tier": "efficient",
174
+ "capabilities": ["text_generation", "fast_inference", "cost_efficient"],
175
+ "aliases": ["mistral-fast"]
176
+ }
177
+ },
178
+ # Together AI models (dynamic patterns)
179
+ "together": {
180
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": {
181
+ "tier": "performance",
182
+ "capabilities": ["text_generation", "tool_calling", "reasoning"],
183
+ "aliases": ["llama-latest"],
184
+ "supports_tools": True
185
+ },
186
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": {
187
+ "tier": "balanced",
188
+ "capabilities": ["text_generation", "tool_calling", "multilingual"],
189
+ "aliases": ["mixtral-latest"],
190
+ "supports_tools": True
191
+ }
192
+ }
193
+ }
194
+
195
+ self._parse_config(default_models)
196
+
197
+ def _parse_config(self, config: Dict[str, Any]):
198
+ """Parse configuration and populate model registry."""
199
+ self._models.clear()
200
+ self._aliases.clear()
201
+
202
+ for provider, models in config.items():
203
+ for model_name, model_config in models.items():
204
+ # Convert capabilities from strings to enum
205
+ capabilities = set()
206
+ for cap_str in model_config.get("capabilities", []):
207
+ try:
208
+ capabilities.add(ModelCapability(cap_str))
209
+ except ValueError:
210
+ logger.warning(f"Unknown capability: {cap_str}")
211
+
212
+ # Create ModelInfo
213
+ model_info = ModelInfo(
214
+ name=model_name,
215
+ provider=provider,
216
+ tier=ModelTier(model_config.get("tier", "balanced")),
217
+ capabilities=capabilities,
218
+ context_length=model_config.get("context_length"),
219
+ max_output_tokens=model_config.get("max_output_tokens"),
220
+ supports_streaming=model_config.get("supports_streaming", True),
221
+ supports_tools=model_config.get("supports_tools", False),
222
+ supports_vision=model_config.get("supports_vision", False),
223
+ deprecated=model_config.get("deprecated", False),
224
+ aliases=model_config.get("aliases", []),
225
+ metadata=model_config.get("metadata", {})
226
+ )
227
+
228
+ self._models[model_name] = model_info
229
+
230
+ # Register aliases
231
+ for alias in model_info.aliases:
232
+ self._aliases[alias] = model_name
233
+
234
+ self._last_refresh = datetime.utcnow()
235
+ logger.info(f"Loaded {len(self._models)} models with {len(self._aliases)} aliases")
236
+
237
+ async def refresh_models(self, force: bool = False):
238
+ """Refresh model information from providers."""
239
+ if not force and self._last_refresh:
240
+ elapsed = datetime.utcnow() - self._last_refresh
241
+ if elapsed.total_seconds() < self.cache_ttl:
242
+ return
243
+
244
+ logger.info("Refreshing model registry from providers...")
245
+
246
+ # Attempt to fetch latest models from each provider
247
+ tasks = []
248
+ if self._should_refresh_provider("anthropic"):
249
+ tasks.append(self._refresh_anthropic_models())
250
+ if self._should_refresh_provider("openai"):
251
+ tasks.append(self._refresh_openai_models())
252
+ if self._should_refresh_provider("google"):
253
+ tasks.append(self._refresh_google_models())
254
+
255
+ if tasks:
256
+ try:
257
+ await asyncio.gather(*tasks, return_exceptions=True)
258
+ self._last_refresh = datetime.utcnow()
259
+ except Exception as e:
260
+ logger.warning(f"Failed to refresh some providers: {e}")
261
+
262
+ def _should_refresh_provider(self, provider: str) -> bool:
263
+ """Check if provider should be refreshed (has API key, etc.)."""
264
+ api_key_vars = {
265
+ "anthropic": "ANTHROPIC_API_KEY",
266
+ "openai": "OPENAI_API_KEY",
267
+ "google": "GOOGLE_API_KEY"
268
+ }
269
+ return os.getenv(api_key_vars.get(provider, "")) is not None
270
+
271
+ async def _refresh_anthropic_models(self):
272
+ """Refresh Anthropic models (placeholder - would call actual API)."""
273
+ # In a real implementation, this would call Anthropic's API
274
+ # to get the latest model list and capabilities
275
+ logger.debug("Refreshing Anthropic models...")
276
+
277
+ async def _refresh_openai_models(self):
278
+ """Refresh OpenAI models (placeholder - would call actual API)."""
279
+ # In a real implementation, this would call OpenAI's API
280
+ logger.debug("Refreshing OpenAI models...")
281
+
282
+ async def _refresh_google_models(self):
283
+ """Refresh Google models (placeholder - would call actual API)."""
284
+ # In a real implementation, this would call Google's API
285
+ logger.debug("Refreshing Google models...")
286
+
287
+ def resolve_model(self, model_identifier: str) -> Optional[ModelInfo]:
288
+ """
289
+ Resolve a model identifier to actual model information.
290
+
291
+ Args:
292
+ model_identifier: Can be:
293
+ - Exact model name (e.g., "gpt-4o")
294
+ - Alias (e.g., "gpt-latest", "fastest")
295
+ - Capability (e.g., "best-reasoning")
296
+
297
+ Returns:
298
+ ModelInfo if found, None otherwise
299
+ """
300
+ # Exact match first
301
+ if model_identifier in self._models:
302
+ return self._models[model_identifier]
303
+
304
+ # Check aliases
305
+ if model_identifier in self._aliases:
306
+ actual_name = self._aliases[model_identifier]
307
+ return self._models.get(actual_name)
308
+
309
+ # Fuzzy matching for flexibility
310
+ return self._fuzzy_match(model_identifier)
311
+
312
+ def _fuzzy_match(self, identifier: str) -> Optional[ModelInfo]:
313
+ """Attempt fuzzy matching for unknown identifiers."""
314
+ identifier_lower = identifier.lower()
315
+
316
+ # Provider-based matching
317
+ if "claude" in identifier_lower:
318
+ return self._find_best_model_for_provider("anthropic")
319
+ elif "gpt" in identifier_lower:
320
+ return self._find_best_model_for_provider("openai")
321
+ elif "gemini" in identifier_lower:
322
+ return self._find_best_model_for_provider("google")
323
+ elif "mistral" in identifier_lower:
324
+ return self._find_best_model_for_provider("mistral")
325
+ elif "llama" in identifier_lower or "meta-llama" in identifier_lower:
326
+ return self._find_best_model_for_provider("together")
327
+
328
+ return None
329
+
330
+ def _find_best_model_for_provider(self, provider: str) -> Optional[ModelInfo]:
331
+ """Find the best model for a given provider."""
332
+ provider_models = [m for m in self._models.values() if m.provider == provider and not m.deprecated]
333
+
334
+ if not provider_models:
335
+ return None
336
+
337
+ # Prefer flagship tier, then performance, then balanced
338
+ tier_order = [ModelTier.FLAGSHIP, ModelTier.PERFORMANCE, ModelTier.BALANCED, ModelTier.EFFICIENT]
339
+
340
+ for tier in tier_order:
341
+ tier_models = [m for m in provider_models if m.tier == tier]
342
+ if tier_models:
343
+ return tier_models[0] # Return first match
344
+
345
+ return provider_models[0] # Fallback to any model
346
+
347
+ def find_models_by_capability(self, capability: ModelCapability, provider: Optional[str] = None) -> List[ModelInfo]:
348
+ """Find models that support a specific capability."""
349
+ models = []
350
+ for model in self._models.values():
351
+ if capability in model.capabilities and not model.deprecated:
352
+ if provider is None or model.provider == provider:
353
+ models.append(model)
354
+
355
+ # Sort by tier (flagship first)
356
+ tier_order = {ModelTier.FLAGSHIP: 0, ModelTier.PERFORMANCE: 1, ModelTier.BALANCED: 2, ModelTier.EFFICIENT: 3}
357
+ models.sort(key=lambda m: tier_order.get(m.tier, 999))
358
+
359
+ return models
360
+
361
+ def get_fastest_model(self, provider: Optional[str] = None) -> Optional[ModelInfo]:
362
+ """Get the fastest model available."""
363
+ fast_models = self.find_models_by_capability(ModelCapability.FAST_INFERENCE, provider)
364
+ return fast_models[0] if fast_models else None
365
+
366
+ def get_most_capable_model(self, provider: Optional[str] = None) -> Optional[ModelInfo]:
367
+ """Get the most capable model available."""
368
+ all_models = [m for m in self._models.values() if not m.deprecated]
369
+ if provider:
370
+ all_models = [m for m in all_models if m.provider == provider]
371
+
372
+ if not all_models:
373
+ return None
374
+
375
+ # Score models by number of capabilities and tier
376
+ def score_model(model: ModelInfo) -> int:
377
+ tier_score = {ModelTier.FLAGSHIP: 100, ModelTier.PERFORMANCE: 80, ModelTier.BALANCED: 60, ModelTier.EFFICIENT: 40}
378
+ return len(model.capabilities) * 10 + tier_score.get(model.tier, 0)
379
+
380
+ return max(all_models, key=score_model)
381
+
382
+ def get_cheapest_model(self, provider: Optional[str] = None) -> Optional[ModelInfo]:
383
+ """Get the most cost-efficient model."""
384
+ cheap_models = self.find_models_by_capability(ModelCapability.COST_EFFICIENT, provider)
385
+ return cheap_models[0] if cheap_models else None
386
+
387
+ def list_providers(self) -> List[str]:
388
+ """Get list of available providers."""
389
+ return list(set(model.provider for model in self._models.values()))
390
+
391
+ def list_models(self, provider: Optional[str] = None, include_deprecated: bool = False) -> List[ModelInfo]:
392
+ """List all available models."""
393
+ models = list(self._models.values())
394
+
395
+ if provider:
396
+ models = [m for m in models if m.provider == provider]
397
+
398
+ if not include_deprecated:
399
+ models = [m for m in models if not m.deprecated]
400
+
401
+ return models
402
+
403
+ def get_model_by_name(self, name: str) -> Optional[ModelInfo]:
404
+ """Get model by exact name."""
405
+ return self._models.get(name)
406
+
407
+ def register_dynamic_model(self, name: str, provider: str, **kwargs) -> ModelInfo:
408
+ """Register a new model dynamically."""
409
+ model_info = ModelInfo(
410
+ name=name,
411
+ provider=provider,
412
+ tier=kwargs.get("tier", ModelTier.EXPERIMENTAL),
413
+ capabilities=set(kwargs.get("capabilities", [ModelCapability.TEXT_GENERATION])),
414
+ **{k: v for k, v in kwargs.items() if k not in ["tier", "capabilities"]}
415
+ )
416
+
417
+ self._models[name] = model_info
418
+
419
+ # Register aliases if provided
420
+ for alias in model_info.aliases:
421
+ self._aliases[alias] = name
422
+
423
+ logger.info(f"Registered dynamic model: {name}")
424
+ return model_info
425
+
426
+
427
+ # Global registry instance
428
+ _registry = None
429
+
430
+ def get_model_registry() -> ModelRegistry:
431
+ """Get the global model registry instance."""
432
+ global _registry
433
+ if _registry is None:
434
+ _registry = ModelRegistry()
435
+ return _registry
436
+
437
+
438
+ def resolve_model_name(identifier: str) -> str:
439
+ """
440
+ Resolve any model identifier to an actual model name.
441
+
442
+ This is the main entry point for model resolution.
443
+ """
444
+ registry = get_model_registry()
445
+ model_info = registry.resolve_model(identifier)
446
+
447
+ if model_info:
448
+ return model_info.name
449
+ else:
450
+ # If we can't resolve it, return as-is and let the provider handle it
451
+ # This allows for forward compatibility with new models
452
+ logger.warning(f"Unknown model identifier '{identifier}', using as-is")
453
+ return identifier
454
+
455
+
456
+ def get_provider_for_model(model_name: str) -> Optional[str]:
457
+ """Get the provider name for a model."""
458
+ registry = get_model_registry()
459
+ model_info = registry.resolve_model(model_name)
460
+ return model_info.provider if model_info else None
461
+
462
+
463
+ def supports_capability(model_name: str, capability: ModelCapability) -> bool:
464
+ """Check if a model supports a specific capability."""
465
+ registry = get_model_registry()
466
+ model_info = registry.resolve_model(model_name)
467
+ return model_info and capability in model_info.capabilities