unrealon 1.0.9__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unrealon/__init__.py +23 -21
- unrealon-1.1.1.dist-info/METADATA +722 -0
- unrealon-1.1.1.dist-info/RECORD +82 -0
- {unrealon-1.0.9.dist-info → unrealon-1.1.1.dist-info}/WHEEL +1 -1
- unrealon-1.1.1.dist-info/entry_points.txt +9 -0
- {unrealon-1.0.9.dist-info → unrealon-1.1.1.dist-info/licenses}/LICENSE +1 -1
- unrealon_bridge/__init__.py +114 -0
- unrealon_bridge/cli.py +316 -0
- unrealon_bridge/client/__init__.py +93 -0
- unrealon_bridge/client/base.py +78 -0
- unrealon_bridge/client/commands.py +89 -0
- unrealon_bridge/client/connection.py +90 -0
- unrealon_bridge/client/events.py +65 -0
- unrealon_bridge/client/health.py +38 -0
- unrealon_bridge/client/html_parser.py +146 -0
- unrealon_bridge/client/logging.py +139 -0
- unrealon_bridge/client/proxy.py +70 -0
- unrealon_bridge/client/scheduler.py +450 -0
- unrealon_bridge/client/session.py +70 -0
- unrealon_bridge/configs/__init__.py +14 -0
- unrealon_bridge/configs/bridge_config.py +212 -0
- unrealon_bridge/configs/bridge_config.yaml +39 -0
- unrealon_bridge/models/__init__.py +138 -0
- unrealon_bridge/models/base.py +28 -0
- unrealon_bridge/models/command.py +41 -0
- unrealon_bridge/models/events.py +40 -0
- unrealon_bridge/models/html_parser.py +79 -0
- unrealon_bridge/models/logging.py +55 -0
- unrealon_bridge/models/parser.py +63 -0
- unrealon_bridge/models/proxy.py +41 -0
- unrealon_bridge/models/requests.py +95 -0
- unrealon_bridge/models/responses.py +88 -0
- unrealon_bridge/models/scheduler.py +592 -0
- unrealon_bridge/models/session.py +28 -0
- unrealon_bridge/server/__init__.py +91 -0
- unrealon_bridge/server/base.py +171 -0
- unrealon_bridge/server/handlers/__init__.py +23 -0
- unrealon_bridge/server/handlers/command.py +110 -0
- unrealon_bridge/server/handlers/html_parser.py +139 -0
- unrealon_bridge/server/handlers/logging.py +95 -0
- unrealon_bridge/server/handlers/parser.py +95 -0
- unrealon_bridge/server/handlers/proxy.py +75 -0
- unrealon_bridge/server/handlers/scheduler.py +545 -0
- unrealon_bridge/server/handlers/session.py +66 -0
- unrealon_browser/__init__.py +61 -18
- unrealon_browser/{src/cli → cli}/browser_cli.py +6 -13
- unrealon_browser/{src/cli → cli}/cookies_cli.py +5 -1
- unrealon_browser/{src/core → core}/browser_manager.py +2 -2
- unrealon_browser/{src/managers → managers}/captcha.py +1 -1
- unrealon_browser/{src/managers → managers}/cookies.py +1 -1
- unrealon_browser/managers/logger_bridge.py +231 -0
- unrealon_browser/{src/managers → managers}/profile.py +1 -1
- unrealon_driver/__init__.py +73 -19
- unrealon_driver/browser/__init__.py +8 -0
- unrealon_driver/browser/config.py +74 -0
- unrealon_driver/browser/manager.py +416 -0
- unrealon_driver/exceptions.py +28 -0
- unrealon_driver/parser/__init__.py +55 -0
- unrealon_driver/parser/cli_manager.py +141 -0
- unrealon_driver/parser/daemon_manager.py +227 -0
- unrealon_driver/parser/managers/__init__.py +46 -0
- unrealon_driver/parser/managers/browser.py +51 -0
- unrealon_driver/parser/managers/config.py +281 -0
- unrealon_driver/parser/managers/error.py +412 -0
- unrealon_driver/parser/managers/html.py +732 -0
- unrealon_driver/parser/managers/logging.py +609 -0
- unrealon_driver/parser/managers/result.py +321 -0
- unrealon_driver/parser/parser_manager.py +628 -0
- unrealon/sdk_config.py +0 -88
- unrealon-1.0.9.dist-info/METADATA +0 -810
- unrealon-1.0.9.dist-info/RECORD +0 -246
- unrealon_browser/pyproject.toml +0 -182
- unrealon_browser/src/__init__.py +0 -62
- unrealon_browser/src/managers/logger_bridge.py +0 -395
- unrealon_driver/README.md +0 -204
- unrealon_driver/pyproject.toml +0 -187
- unrealon_driver/src/__init__.py +0 -90
- unrealon_driver/src/cli/__init__.py +0 -10
- unrealon_driver/src/cli/main.py +0 -66
- unrealon_driver/src/cli/simple.py +0 -510
- unrealon_driver/src/config/__init__.py +0 -11
- unrealon_driver/src/config/auto_config.py +0 -478
- unrealon_driver/src/core/__init__.py +0 -18
- unrealon_driver/src/core/exceptions.py +0 -289
- unrealon_driver/src/core/parser.py +0 -638
- unrealon_driver/src/dto/__init__.py +0 -66
- unrealon_driver/src/dto/cli.py +0 -119
- unrealon_driver/src/dto/config.py +0 -18
- unrealon_driver/src/dto/events.py +0 -237
- unrealon_driver/src/dto/execution.py +0 -313
- unrealon_driver/src/dto/services.py +0 -311
- unrealon_driver/src/execution/__init__.py +0 -23
- unrealon_driver/src/execution/daemon_mode.py +0 -317
- unrealon_driver/src/execution/interactive_mode.py +0 -88
- unrealon_driver/src/execution/modes.py +0 -45
- unrealon_driver/src/execution/scheduled_mode.py +0 -209
- unrealon_driver/src/execution/test_mode.py +0 -250
- unrealon_driver/src/logging/__init__.py +0 -24
- unrealon_driver/src/logging/driver_logger.py +0 -512
- unrealon_driver/src/services/__init__.py +0 -24
- unrealon_driver/src/services/browser_service.py +0 -726
- unrealon_driver/src/services/llm/__init__.py +0 -15
- unrealon_driver/src/services/llm/browser_llm_service.py +0 -363
- unrealon_driver/src/services/llm/llm.py +0 -195
- unrealon_driver/src/services/logger_service.py +0 -232
- unrealon_driver/src/services/metrics_service.py +0 -185
- unrealon_driver/src/services/scheduler_service.py +0 -489
- unrealon_driver/src/services/websocket_service.py +0 -362
- unrealon_driver/src/utils/__init__.py +0 -16
- unrealon_driver/src/utils/service_factory.py +0 -317
- unrealon_driver/src/utils/time_formatter.py +0 -338
- unrealon_llm/README.md +0 -44
- unrealon_llm/__init__.py +0 -26
- unrealon_llm/pyproject.toml +0 -154
- unrealon_llm/src/__init__.py +0 -228
- unrealon_llm/src/cli/__init__.py +0 -0
- unrealon_llm/src/core/__init__.py +0 -11
- unrealon_llm/src/core/smart_client.py +0 -438
- unrealon_llm/src/dto/__init__.py +0 -155
- unrealon_llm/src/dto/models/__init__.py +0 -0
- unrealon_llm/src/dto/models/config.py +0 -343
- unrealon_llm/src/dto/models/core.py +0 -328
- unrealon_llm/src/dto/models/enums.py +0 -123
- unrealon_llm/src/dto/models/html_analysis.py +0 -345
- unrealon_llm/src/dto/models/statistics.py +0 -473
- unrealon_llm/src/dto/models/translation.py +0 -383
- unrealon_llm/src/dto/models/type_conversion.py +0 -462
- unrealon_llm/src/dto/schemas/__init__.py +0 -0
- unrealon_llm/src/exceptions.py +0 -392
- unrealon_llm/src/llm_config/__init__.py +0 -20
- unrealon_llm/src/llm_config/logging_config.py +0 -178
- unrealon_llm/src/llm_logging/__init__.py +0 -42
- unrealon_llm/src/llm_logging/llm_events.py +0 -107
- unrealon_llm/src/llm_logging/llm_logger.py +0 -466
- unrealon_llm/src/managers/__init__.py +0 -15
- unrealon_llm/src/managers/cache_manager.py +0 -67
- unrealon_llm/src/managers/cost_manager.py +0 -107
- unrealon_llm/src/managers/request_manager.py +0 -298
- unrealon_llm/src/modules/__init__.py +0 -0
- unrealon_llm/src/modules/html_processor/__init__.py +0 -25
- unrealon_llm/src/modules/html_processor/base_processor.py +0 -415
- unrealon_llm/src/modules/html_processor/details_processor.py +0 -85
- unrealon_llm/src/modules/html_processor/listing_processor.py +0 -91
- unrealon_llm/src/modules/html_processor/models/__init__.py +0 -20
- unrealon_llm/src/modules/html_processor/models/processing_models.py +0 -40
- unrealon_llm/src/modules/html_processor/models/universal_model.py +0 -56
- unrealon_llm/src/modules/html_processor/processor.py +0 -102
- unrealon_llm/src/modules/llm/__init__.py +0 -0
- unrealon_llm/src/modules/translator/__init__.py +0 -0
- unrealon_llm/src/provider.py +0 -116
- unrealon_llm/src/utils/__init__.py +0 -95
- unrealon_llm/src/utils/common.py +0 -64
- unrealon_llm/src/utils/data_extractor.py +0 -188
- unrealon_llm/src/utils/html_cleaner.py +0 -767
- unrealon_llm/src/utils/language_detector.py +0 -308
- unrealon_llm/src/utils/models_cache.py +0 -592
- unrealon_llm/src/utils/smart_counter.py +0 -229
- unrealon_llm/src/utils/token_counter.py +0 -189
- unrealon_sdk/README.md +0 -25
- unrealon_sdk/__init__.py +0 -30
- unrealon_sdk/pyproject.toml +0 -231
- unrealon_sdk/src/__init__.py +0 -150
- unrealon_sdk/src/cli/__init__.py +0 -12
- unrealon_sdk/src/cli/commands/__init__.py +0 -22
- unrealon_sdk/src/cli/commands/benchmark.py +0 -42
- unrealon_sdk/src/cli/commands/diagnostics.py +0 -573
- unrealon_sdk/src/cli/commands/health.py +0 -46
- unrealon_sdk/src/cli/commands/integration.py +0 -498
- unrealon_sdk/src/cli/commands/reports.py +0 -43
- unrealon_sdk/src/cli/commands/security.py +0 -36
- unrealon_sdk/src/cli/commands/server.py +0 -483
- unrealon_sdk/src/cli/commands/servers.py +0 -56
- unrealon_sdk/src/cli/commands/tests.py +0 -55
- unrealon_sdk/src/cli/main.py +0 -126
- unrealon_sdk/src/cli/utils/reporter.py +0 -519
- unrealon_sdk/src/clients/openapi.yaml +0 -3347
- unrealon_sdk/src/clients/python_http/__init__.py +0 -3
- unrealon_sdk/src/clients/python_http/api_config.py +0 -228
- unrealon_sdk/src/clients/python_http/models/BaseModel.py +0 -12
- unrealon_sdk/src/clients/python_http/models/BroadcastDeliveryStats.py +0 -33
- unrealon_sdk/src/clients/python_http/models/BroadcastMessage.py +0 -17
- unrealon_sdk/src/clients/python_http/models/BroadcastMessageRequest.py +0 -35
- unrealon_sdk/src/clients/python_http/models/BroadcastPriority.py +0 -10
- unrealon_sdk/src/clients/python_http/models/BroadcastResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/BroadcastResultResponse.py +0 -33
- unrealon_sdk/src/clients/python_http/models/BroadcastTarget.py +0 -11
- unrealon_sdk/src/clients/python_http/models/ConnectionStats.py +0 -27
- unrealon_sdk/src/clients/python_http/models/ConnectionsResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/DeveloperMessageResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ErrorResponse.py +0 -25
- unrealon_sdk/src/clients/python_http/models/HTTPValidationError.py +0 -16
- unrealon_sdk/src/clients/python_http/models/HealthResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/HealthStatus.py +0 -33
- unrealon_sdk/src/clients/python_http/models/LogLevel.py +0 -10
- unrealon_sdk/src/clients/python_http/models/LoggingRequest.py +0 -27
- unrealon_sdk/src/clients/python_http/models/LoggingResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/MaintenanceMode.py +0 -9
- unrealon_sdk/src/clients/python_http/models/MaintenanceModeRequest.py +0 -33
- unrealon_sdk/src/clients/python_http/models/MaintenanceStatusResponse.py +0 -39
- unrealon_sdk/src/clients/python_http/models/ParserCommandRequest.py +0 -25
- unrealon_sdk/src/clients/python_http/models/ParserMessageResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/ParserRegistrationRequest.py +0 -28
- unrealon_sdk/src/clients/python_http/models/ParserRegistrationResponse.py +0 -25
- unrealon_sdk/src/clients/python_http/models/ParserType.py +0 -10
- unrealon_sdk/src/clients/python_http/models/ProxyBlockRequest.py +0 -19
- unrealon_sdk/src/clients/python_http/models/ProxyEndpointResponse.py +0 -20
- unrealon_sdk/src/clients/python_http/models/ProxyListResponse.py +0 -19
- unrealon_sdk/src/clients/python_http/models/ProxyProvider.py +0 -10
- unrealon_sdk/src/clients/python_http/models/ProxyPurchaseRequest.py +0 -25
- unrealon_sdk/src/clients/python_http/models/ProxyResponse.py +0 -47
- unrealon_sdk/src/clients/python_http/models/ProxyRotationRequest.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ProxyStatus.py +0 -10
- unrealon_sdk/src/clients/python_http/models/ProxyUsageRequest.py +0 -19
- unrealon_sdk/src/clients/python_http/models/ProxyUsageStatsResponse.py +0 -26
- unrealon_sdk/src/clients/python_http/models/ServiceRegistrationDto.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ServiceStatsResponse.py +0 -31
- unrealon_sdk/src/clients/python_http/models/SessionStartRequest.py +0 -23
- unrealon_sdk/src/clients/python_http/models/SuccessResponse.py +0 -25
- unrealon_sdk/src/clients/python_http/models/SystemNotificationResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ValidationError.py +0 -18
- unrealon_sdk/src/clients/python_http/models/ValidationErrorResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/WebSocketMetrics.py +0 -21
- unrealon_sdk/src/clients/python_http/models/__init__.py +0 -44
- unrealon_sdk/src/clients/python_http/services/None_service.py +0 -35
- unrealon_sdk/src/clients/python_http/services/ParserManagement_service.py +0 -190
- unrealon_sdk/src/clients/python_http/services/ProxyManagement_service.py +0 -289
- unrealon_sdk/src/clients/python_http/services/SocketLogging_service.py +0 -187
- unrealon_sdk/src/clients/python_http/services/SystemHealth_service.py +0 -119
- unrealon_sdk/src/clients/python_http/services/WebSocketAPI_service.py +0 -198
- unrealon_sdk/src/clients/python_http/services/__init__.py +0 -0
- unrealon_sdk/src/clients/python_http/services/admin_service.py +0 -125
- unrealon_sdk/src/clients/python_http/services/async_None_service.py +0 -35
- unrealon_sdk/src/clients/python_http/services/async_ParserManagement_service.py +0 -190
- unrealon_sdk/src/clients/python_http/services/async_ProxyManagement_service.py +0 -289
- unrealon_sdk/src/clients/python_http/services/async_SocketLogging_service.py +0 -189
- unrealon_sdk/src/clients/python_http/services/async_SystemHealth_service.py +0 -123
- unrealon_sdk/src/clients/python_http/services/async_WebSocketAPI_service.py +0 -200
- unrealon_sdk/src/clients/python_http/services/async_admin_service.py +0 -125
- unrealon_sdk/src/clients/python_websocket/__init__.py +0 -28
- unrealon_sdk/src/clients/python_websocket/client.py +0 -490
- unrealon_sdk/src/clients/python_websocket/events.py +0 -732
- unrealon_sdk/src/clients/python_websocket/example.py +0 -136
- unrealon_sdk/src/clients/python_websocket/types.py +0 -871
- unrealon_sdk/src/core/__init__.py +0 -64
- unrealon_sdk/src/core/client.py +0 -556
- unrealon_sdk/src/core/config.py +0 -465
- unrealon_sdk/src/core/exceptions.py +0 -239
- unrealon_sdk/src/core/metadata.py +0 -191
- unrealon_sdk/src/core/models.py +0 -142
- unrealon_sdk/src/core/types.py +0 -68
- unrealon_sdk/src/dto/__init__.py +0 -268
- unrealon_sdk/src/dto/authentication.py +0 -108
- unrealon_sdk/src/dto/cache.py +0 -208
- unrealon_sdk/src/dto/common.py +0 -19
- unrealon_sdk/src/dto/concurrency.py +0 -393
- unrealon_sdk/src/dto/events.py +0 -108
- unrealon_sdk/src/dto/health.py +0 -339
- unrealon_sdk/src/dto/load_balancing.py +0 -336
- unrealon_sdk/src/dto/logging.py +0 -230
- unrealon_sdk/src/dto/performance.py +0 -165
- unrealon_sdk/src/dto/rate_limiting.py +0 -295
- unrealon_sdk/src/dto/resource_pooling.py +0 -128
- unrealon_sdk/src/dto/structured_logging.py +0 -112
- unrealon_sdk/src/dto/task_scheduling.py +0 -121
- unrealon_sdk/src/dto/websocket.py +0 -55
- unrealon_sdk/src/enterprise/__init__.py +0 -59
- unrealon_sdk/src/enterprise/authentication.py +0 -401
- unrealon_sdk/src/enterprise/cache_manager.py +0 -578
- unrealon_sdk/src/enterprise/error_recovery.py +0 -494
- unrealon_sdk/src/enterprise/event_system.py +0 -549
- unrealon_sdk/src/enterprise/health_monitor.py +0 -747
- unrealon_sdk/src/enterprise/load_balancer.py +0 -964
- unrealon_sdk/src/enterprise/logging/__init__.py +0 -68
- unrealon_sdk/src/enterprise/logging/cleanup.py +0 -156
- unrealon_sdk/src/enterprise/logging/development.py +0 -744
- unrealon_sdk/src/enterprise/logging/service.py +0 -410
- unrealon_sdk/src/enterprise/multithreading_manager.py +0 -853
- unrealon_sdk/src/enterprise/performance_monitor.py +0 -539
- unrealon_sdk/src/enterprise/proxy_manager.py +0 -696
- unrealon_sdk/src/enterprise/rate_limiter.py +0 -652
- unrealon_sdk/src/enterprise/resource_pool.py +0 -763
- unrealon_sdk/src/enterprise/task_scheduler.py +0 -709
- unrealon_sdk/src/internal/__init__.py +0 -10
- unrealon_sdk/src/internal/command_router.py +0 -497
- unrealon_sdk/src/internal/connection_manager.py +0 -397
- unrealon_sdk/src/internal/http_client.py +0 -446
- unrealon_sdk/src/internal/websocket_client.py +0 -420
- unrealon_sdk/src/provider.py +0 -471
- unrealon_sdk/src/utils.py +0 -234
- /unrealon_browser/{src/cli → cli}/__init__.py +0 -0
- /unrealon_browser/{src/cli → cli}/interactive_mode.py +0 -0
- /unrealon_browser/{src/cli → cli}/main.py +0 -0
- /unrealon_browser/{src/core → core}/__init__.py +0 -0
- /unrealon_browser/{src/dto → dto}/__init__.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/config.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/core.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/dataclasses.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/detection.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/enums.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/statistics.py +0 -0
- /unrealon_browser/{src/managers → managers}/__init__.py +0 -0
- /unrealon_browser/{src/managers → managers}/stealth.py +0 -0
|
@@ -1,592 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Models Cache Utilities
|
|
3
|
-
|
|
4
|
-
Automatically fetch and cache LLM model information with pricing from providers.
|
|
5
|
-
Eliminates hardcoding and provides up-to-date model data and pricing.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import asyncio
|
|
9
|
-
import json
|
|
10
|
-
import logging
|
|
11
|
-
import os
|
|
12
|
-
from datetime import datetime, timedelta
|
|
13
|
-
from pathlib import Path
|
|
14
|
-
from typing import Any, Dict, List, Optional
|
|
15
|
-
|
|
16
|
-
import aiohttp
|
|
17
|
-
from cachetools import TTLCache
|
|
18
|
-
|
|
19
|
-
from unrealon_llm.src.dto import LLMProvider
|
|
20
|
-
from unrealon_llm.src.exceptions import APIError, CacheError, ConfigurationError
|
|
21
|
-
|
|
22
|
-
logger = logging.getLogger(__name__)
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class ModelInfo:
|
|
26
|
-
"""Model information with pricing and capabilities"""
|
|
27
|
-
|
|
28
|
-
def __init__(
|
|
29
|
-
self,
|
|
30
|
-
id: str,
|
|
31
|
-
name: str,
|
|
32
|
-
provider: str,
|
|
33
|
-
context_length: int = 0,
|
|
34
|
-
prompt_price: float = 0.0,
|
|
35
|
-
completion_price: float = 0.0,
|
|
36
|
-
description: Optional[str] = None,
|
|
37
|
-
tags: Optional[List[str]] = None,
|
|
38
|
-
is_available: bool = True,
|
|
39
|
-
currency: str = "USD",
|
|
40
|
-
):
|
|
41
|
-
self.id = id
|
|
42
|
-
self.name = name
|
|
43
|
-
self.provider = provider
|
|
44
|
-
self.context_length = context_length
|
|
45
|
-
self.prompt_price = prompt_price # Price per 1M tokens
|
|
46
|
-
self.completion_price = completion_price # Price per 1M tokens
|
|
47
|
-
self.description = description or ""
|
|
48
|
-
self.tags = tags or []
|
|
49
|
-
self.is_available = is_available
|
|
50
|
-
self.currency = currency
|
|
51
|
-
|
|
52
|
-
def estimate_cost(self, input_tokens: int, output_tokens: int) -> float:
|
|
53
|
-
"""Estimate cost for given token usage"""
|
|
54
|
-
input_cost = (input_tokens / 1_000_000) * self.prompt_price
|
|
55
|
-
output_cost = (output_tokens / 1_000_000) * self.completion_price
|
|
56
|
-
return input_cost + output_cost
|
|
57
|
-
|
|
58
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
59
|
-
"""Convert to dictionary representation"""
|
|
60
|
-
return {
|
|
61
|
-
"id": self.id,
|
|
62
|
-
"name": self.name,
|
|
63
|
-
"provider": self.provider,
|
|
64
|
-
"context_length": self.context_length,
|
|
65
|
-
"prompt_price": self.prompt_price,
|
|
66
|
-
"completion_price": self.completion_price,
|
|
67
|
-
"description": self.description,
|
|
68
|
-
"tags": self.tags,
|
|
69
|
-
"is_available": self.is_available,
|
|
70
|
-
"currency": self.currency,
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
@classmethod
|
|
74
|
-
def from_dict(cls, data: Dict[str, Any]) -> "ModelInfo":
|
|
75
|
-
"""Create ModelInfo from dictionary"""
|
|
76
|
-
return cls(
|
|
77
|
-
id=data.get("id", ""),
|
|
78
|
-
name=data.get("name", ""),
|
|
79
|
-
provider=data.get("provider", ""),
|
|
80
|
-
context_length=data.get("context_length", 0),
|
|
81
|
-
prompt_price=data.get("prompt_price", 0.0),
|
|
82
|
-
completion_price=data.get("completion_price", 0.0),
|
|
83
|
-
description=data.get("description"),
|
|
84
|
-
tags=data.get("tags", []),
|
|
85
|
-
is_available=data.get("is_available", True),
|
|
86
|
-
currency=data.get("currency", "USD"),
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
class ModelsCache:
|
|
91
|
-
"""
|
|
92
|
-
Automatic models cache with provider integration
|
|
93
|
-
Fetches current model data and pricing from APIs
|
|
94
|
-
"""
|
|
95
|
-
|
|
96
|
-
def __init__(
|
|
97
|
-
self,
|
|
98
|
-
openrouter_api_key: Optional[str] = None,
|
|
99
|
-
openai_api_key: Optional[str] = None,
|
|
100
|
-
anthropic_api_key: Optional[str] = None,
|
|
101
|
-
cache_ttl_hours: int = 24, # Cache for 24 hours
|
|
102
|
-
max_cache_size: int = 1000,
|
|
103
|
-
):
|
|
104
|
-
"""
|
|
105
|
-
Initialize models cache with provider API keys
|
|
106
|
-
|
|
107
|
-
Args:
|
|
108
|
-
openrouter_api_key: OpenRouter API key
|
|
109
|
-
openai_api_key: OpenAI API key
|
|
110
|
-
anthropic_api_key: Anthropic API key
|
|
111
|
-
cache_ttl_hours: Cache TTL in hours
|
|
112
|
-
max_cache_size: Maximum cache entries
|
|
113
|
-
"""
|
|
114
|
-
self.openrouter_api_key = openrouter_api_key
|
|
115
|
-
self.openai_api_key = openai_api_key
|
|
116
|
-
self.anthropic_api_key = anthropic_api_key
|
|
117
|
-
|
|
118
|
-
# Initialize cache
|
|
119
|
-
cache_ttl = cache_ttl_hours * 3600 # Convert to seconds
|
|
120
|
-
self.cache = TTLCache(maxsize=max_cache_size, ttl=cache_ttl)
|
|
121
|
-
|
|
122
|
-
# Models storage
|
|
123
|
-
self.models: Dict[str, ModelInfo] = {}
|
|
124
|
-
self.last_fetch_time: Optional[datetime] = None
|
|
125
|
-
|
|
126
|
-
# Cache keys
|
|
127
|
-
self.MODELS_CACHE_KEY = "all_models"
|
|
128
|
-
self.OPENROUTER_CACHE_KEY = "openrouter_models"
|
|
129
|
-
self.OPENAI_CACHE_KEY = "openai_models"
|
|
130
|
-
self.ANTHROPIC_CACHE_KEY = "anthropic_models"
|
|
131
|
-
|
|
132
|
-
# Cache settings
|
|
133
|
-
self.cache_ttl_hours = cache_ttl_hours
|
|
134
|
-
|
|
135
|
-
# Provider endpoints
|
|
136
|
-
self.provider_endpoints = {
|
|
137
|
-
"openrouter": "https://openrouter.ai/api/v1/models",
|
|
138
|
-
"openai": "https://api.openai.com/v1/models",
|
|
139
|
-
"anthropic": "https://api.anthropic.com/v1/models",
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
async def fetch_all_models(
|
|
143
|
-
self, force_refresh: bool = False
|
|
144
|
-
) -> Dict[str, ModelInfo]:
|
|
145
|
-
"""
|
|
146
|
-
Fetch models from all available providers
|
|
147
|
-
|
|
148
|
-
Args:
|
|
149
|
-
force_refresh: Force refresh even if cache is valid
|
|
150
|
-
|
|
151
|
-
Returns:
|
|
152
|
-
Dictionary of model_id -> ModelInfo
|
|
153
|
-
"""
|
|
154
|
-
# Quick check - if models already loaded, return them
|
|
155
|
-
if not force_refresh and self.models:
|
|
156
|
-
logger.info(f"🔥 Using already loaded {len(self.models)} models from memory cache")
|
|
157
|
-
return self.models
|
|
158
|
-
|
|
159
|
-
# Check cache first
|
|
160
|
-
if not force_refresh and self.MODELS_CACHE_KEY in self.cache:
|
|
161
|
-
logger.info("💾 Using models from TTL cache")
|
|
162
|
-
cached_data = self.cache[self.MODELS_CACHE_KEY]
|
|
163
|
-
self.models = {
|
|
164
|
-
k: ModelInfo.from_dict(v) for k, v in cached_data["models"].items()
|
|
165
|
-
}
|
|
166
|
-
self.last_fetch_time = datetime.fromisoformat(cached_data["fetch_time"])
|
|
167
|
-
return self.models
|
|
168
|
-
|
|
169
|
-
logger.info("🌐 Fetching fresh models from all providers (no cache)")
|
|
170
|
-
|
|
171
|
-
# Fetch from all available providers
|
|
172
|
-
all_models = {}
|
|
173
|
-
|
|
174
|
-
# Fetch OpenRouter models (priority provider)
|
|
175
|
-
if self.openrouter_api_key:
|
|
176
|
-
try:
|
|
177
|
-
openrouter_models = await self._fetch_openrouter_models()
|
|
178
|
-
all_models.update(openrouter_models)
|
|
179
|
-
logger.info(f"Fetched {len(openrouter_models)} models from OpenRouter")
|
|
180
|
-
except Exception as e:
|
|
181
|
-
logger.warning(f"Failed to fetch OpenRouter models: {e}")
|
|
182
|
-
|
|
183
|
-
# Fetch OpenAI models
|
|
184
|
-
if self.openai_api_key:
|
|
185
|
-
try:
|
|
186
|
-
openai_models = await self._fetch_openai_models()
|
|
187
|
-
all_models.update(openai_models)
|
|
188
|
-
logger.info(f"Fetched {len(openai_models)} models from OpenAI")
|
|
189
|
-
except Exception as e:
|
|
190
|
-
logger.warning(f"Failed to fetch OpenAI models: {e}")
|
|
191
|
-
|
|
192
|
-
# Fetch Anthropic models
|
|
193
|
-
if self.anthropic_api_key:
|
|
194
|
-
try:
|
|
195
|
-
anthropic_models = await self._fetch_anthropic_models()
|
|
196
|
-
all_models.update(anthropic_models)
|
|
197
|
-
logger.info(f"Fetched {len(anthropic_models)} models from Anthropic")
|
|
198
|
-
except Exception as e:
|
|
199
|
-
logger.warning(f"Failed to fetch Anthropic models: {e}")
|
|
200
|
-
|
|
201
|
-
# Add fallback models if no providers available
|
|
202
|
-
if not all_models:
|
|
203
|
-
logger.warning("No models fetched from providers, using fallback models")
|
|
204
|
-
all_models = self._get_fallback_models()
|
|
205
|
-
|
|
206
|
-
# Update cache
|
|
207
|
-
self.models = all_models
|
|
208
|
-
self.last_fetch_time = datetime.now()
|
|
209
|
-
|
|
210
|
-
cache_data = {
|
|
211
|
-
"models": {k: v.to_dict() for k, v in all_models.items()},
|
|
212
|
-
"fetch_time": self.last_fetch_time.isoformat(),
|
|
213
|
-
}
|
|
214
|
-
self.cache[self.MODELS_CACHE_KEY] = cache_data
|
|
215
|
-
|
|
216
|
-
logger.info(f"Total models cached: {len(all_models)}")
|
|
217
|
-
return self.models
|
|
218
|
-
|
|
219
|
-
async def _fetch_openrouter_models(self) -> Dict[str, ModelInfo]:
|
|
220
|
-
"""Fetch models from OpenRouter API"""
|
|
221
|
-
if not self.openrouter_api_key:
|
|
222
|
-
return {}
|
|
223
|
-
|
|
224
|
-
async with aiohttp.ClientSession() as session:
|
|
225
|
-
try:
|
|
226
|
-
async with session.get(
|
|
227
|
-
self.provider_endpoints["openrouter"],
|
|
228
|
-
headers={
|
|
229
|
-
"Authorization": f"Bearer {self.openrouter_api_key}",
|
|
230
|
-
"Content-Type": "application/json",
|
|
231
|
-
},
|
|
232
|
-
timeout=aiohttp.ClientTimeout(total=30),
|
|
233
|
-
) as response:
|
|
234
|
-
response.raise_for_status()
|
|
235
|
-
data = await response.json()
|
|
236
|
-
|
|
237
|
-
models = {}
|
|
238
|
-
for model_data in data.get("data", []):
|
|
239
|
-
model_info = self._parse_openrouter_model(model_data)
|
|
240
|
-
if model_info:
|
|
241
|
-
models[model_info.id] = model_info
|
|
242
|
-
|
|
243
|
-
return models
|
|
244
|
-
|
|
245
|
-
except aiohttp.ClientError as e:
|
|
246
|
-
raise APIError(f"OpenRouter API error: {e}")
|
|
247
|
-
except Exception as e:
|
|
248
|
-
raise APIError(f"Failed to fetch OpenRouter models: {e}")
|
|
249
|
-
|
|
250
|
-
async def _fetch_openai_models(self) -> Dict[str, ModelInfo]:
|
|
251
|
-
"""Fetch models from OpenAI API"""
|
|
252
|
-
if not self.openai_api_key:
|
|
253
|
-
return {}
|
|
254
|
-
|
|
255
|
-
async with aiohttp.ClientSession() as session:
|
|
256
|
-
try:
|
|
257
|
-
async with session.get(
|
|
258
|
-
self.provider_endpoints["openai"],
|
|
259
|
-
headers={
|
|
260
|
-
"Authorization": f"Bearer {self.openai_api_key}",
|
|
261
|
-
"Content-Type": "application/json",
|
|
262
|
-
},
|
|
263
|
-
timeout=aiohttp.ClientTimeout(total=30),
|
|
264
|
-
) as response:
|
|
265
|
-
response.raise_for_status()
|
|
266
|
-
data = await response.json()
|
|
267
|
-
|
|
268
|
-
models = {}
|
|
269
|
-
for model_data in data.get("data", []):
|
|
270
|
-
model_info = self._parse_openai_model(model_data)
|
|
271
|
-
if model_info:
|
|
272
|
-
models[model_info.id] = model_info
|
|
273
|
-
|
|
274
|
-
return models
|
|
275
|
-
|
|
276
|
-
except aiohttp.ClientError as e:
|
|
277
|
-
raise APIError(f"OpenAI API error: {e}")
|
|
278
|
-
except Exception as e:
|
|
279
|
-
raise APIError(f"Failed to fetch OpenAI models: {e}")
|
|
280
|
-
|
|
281
|
-
async def _fetch_anthropic_models(self) -> Dict[str, ModelInfo]:
|
|
282
|
-
"""Fetch models from Anthropic API"""
|
|
283
|
-
# Anthropic doesn't provide a public models endpoint yet
|
|
284
|
-
# Return known Anthropic models with approximate pricing
|
|
285
|
-
return {
|
|
286
|
-
"claude-3-opus-20240229": ModelInfo(
|
|
287
|
-
id="claude-3-opus-20240229",
|
|
288
|
-
name="Claude 3 Opus",
|
|
289
|
-
provider="anthropic",
|
|
290
|
-
context_length=200000,
|
|
291
|
-
prompt_price=15.0,
|
|
292
|
-
completion_price=75.0,
|
|
293
|
-
description="Most powerful Claude 3 model",
|
|
294
|
-
tags=["reasoning", "analysis", "coding"],
|
|
295
|
-
),
|
|
296
|
-
"claude-3-sonnet-20240229": ModelInfo(
|
|
297
|
-
id="claude-3-sonnet-20240229",
|
|
298
|
-
name="Claude 3 Sonnet",
|
|
299
|
-
provider="anthropic",
|
|
300
|
-
context_length=200000,
|
|
301
|
-
prompt_price=3.0,
|
|
302
|
-
completion_price=15.0,
|
|
303
|
-
description="Balanced Claude 3 model",
|
|
304
|
-
tags=["general", "reasoning", "coding"],
|
|
305
|
-
),
|
|
306
|
-
"claude-sonnet-4-20240307": ModelInfo(
|
|
307
|
-
id="claude-sonnet-4-20240307",
|
|
308
|
-
name="Claude 3 Haiku",
|
|
309
|
-
provider="anthropic",
|
|
310
|
-
context_length=200000,
|
|
311
|
-
prompt_price=0.25,
|
|
312
|
-
completion_price=1.25,
|
|
313
|
-
description="Fastest Claude 3 model",
|
|
314
|
-
tags=["speed", "general", "cost-effective"],
|
|
315
|
-
),
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
def _parse_openrouter_model(
|
|
319
|
-
self, model_data: Dict[str, Any]
|
|
320
|
-
) -> Optional[ModelInfo]:
|
|
321
|
-
"""Parse OpenRouter model data"""
|
|
322
|
-
try:
|
|
323
|
-
model_id = model_data.get("id")
|
|
324
|
-
if not model_id:
|
|
325
|
-
return None
|
|
326
|
-
|
|
327
|
-
pricing = model_data.get("pricing", {})
|
|
328
|
-
|
|
329
|
-
return ModelInfo(
|
|
330
|
-
id=model_id,
|
|
331
|
-
name=model_data.get("name", model_id),
|
|
332
|
-
provider="openrouter",
|
|
333
|
-
context_length=model_data.get("context_length", 0),
|
|
334
|
-
prompt_price=float(pricing.get("prompt", 0))
|
|
335
|
-
* 1_000_000, # Convert to per 1M
|
|
336
|
-
completion_price=float(pricing.get("completion", 0))
|
|
337
|
-
* 1_000_000, # Convert to per 1M
|
|
338
|
-
description=model_data.get("description"),
|
|
339
|
-
tags=model_data.get("tags", []),
|
|
340
|
-
is_available=model_data.get("available", True),
|
|
341
|
-
)
|
|
342
|
-
except (ValueError, TypeError):
|
|
343
|
-
return None
|
|
344
|
-
|
|
345
|
-
def _parse_openai_model(self, model_data: Dict[str, Any]) -> Optional[ModelInfo]:
|
|
346
|
-
"""Parse OpenAI model data"""
|
|
347
|
-
try:
|
|
348
|
-
model_id = model_data.get("id")
|
|
349
|
-
if not model_id:
|
|
350
|
-
return None
|
|
351
|
-
|
|
352
|
-
# OpenAI pricing (approximate, should be updated regularly)
|
|
353
|
-
pricing_map = {
|
|
354
|
-
"gpt-4": {"prompt": 30.0, "completion": 60.0},
|
|
355
|
-
"gpt-4-32k": {"prompt": 60.0, "completion": 120.0},
|
|
356
|
-
"gpt-3.5-turbo": {"prompt": 0.5, "completion": 1.5},
|
|
357
|
-
"gpt-3.5-turbo-16k": {"prompt": 3.0, "completion": 4.0},
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
pricing = pricing_map.get(model_id, {"prompt": 2.0, "completion": 4.0})
|
|
361
|
-
|
|
362
|
-
return ModelInfo(
|
|
363
|
-
id=model_id,
|
|
364
|
-
name=model_data.get("id", model_id),
|
|
365
|
-
provider="openai",
|
|
366
|
-
context_length=self._get_openai_context_length(model_id),
|
|
367
|
-
prompt_price=pricing["prompt"],
|
|
368
|
-
completion_price=pricing["completion"],
|
|
369
|
-
description=f"OpenAI {model_id} model",
|
|
370
|
-
tags=["openai", "chat"] if "gpt" in model_id else ["openai"],
|
|
371
|
-
)
|
|
372
|
-
except (ValueError, TypeError):
|
|
373
|
-
return None
|
|
374
|
-
|
|
375
|
-
def _get_openai_context_length(self, model_id: str) -> int:
|
|
376
|
-
"""Get context length for OpenAI models"""
|
|
377
|
-
context_map = {
|
|
378
|
-
"gpt-4": 8192,
|
|
379
|
-
"gpt-4-32k": 32768,
|
|
380
|
-
"gpt-3.5-turbo": 4096,
|
|
381
|
-
"gpt-3.5-turbo-16k": 16384,
|
|
382
|
-
}
|
|
383
|
-
return context_map.get(model_id, 4096)
|
|
384
|
-
|
|
385
|
-
def _get_fallback_models(self) -> Dict[str, ModelInfo]:
|
|
386
|
-
"""Fallback models when API fetching fails"""
|
|
387
|
-
return {
|
|
388
|
-
"gpt-3.5-turbo": ModelInfo(
|
|
389
|
-
id="gpt-3.5-turbo",
|
|
390
|
-
name="GPT-3.5 Turbo",
|
|
391
|
-
provider="openai",
|
|
392
|
-
context_length=4096,
|
|
393
|
-
prompt_price=0.5,
|
|
394
|
-
completion_price=1.5,
|
|
395
|
-
description="OpenAI GPT-3.5 Turbo",
|
|
396
|
-
tags=["openai", "chat", "cost-effective"],
|
|
397
|
-
),
|
|
398
|
-
"claude-sonnet-4": ModelInfo(
|
|
399
|
-
id="claude-sonnet-4",
|
|
400
|
-
name="Claude 3 Haiku",
|
|
401
|
-
provider="anthropic",
|
|
402
|
-
context_length=200000,
|
|
403
|
-
prompt_price=0.25,
|
|
404
|
-
completion_price=1.25,
|
|
405
|
-
description="Anthropic Claude 3 Haiku",
|
|
406
|
-
tags=["anthropic", "fast", "cost-effective"],
|
|
407
|
-
),
|
|
408
|
-
"llama-2-70b": ModelInfo(
|
|
409
|
-
id="llama-2-70b",
|
|
410
|
-
name="Llama 2 70B",
|
|
411
|
-
provider="meta",
|
|
412
|
-
context_length=4096,
|
|
413
|
-
prompt_price=0.7,
|
|
414
|
-
completion_price=0.8,
|
|
415
|
-
description="Meta Llama 2 70B",
|
|
416
|
-
tags=["meta", "open-source", "reasoning"],
|
|
417
|
-
),
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
# Query methods
|
|
421
|
-
def get_model(self, model_id: str) -> Optional[ModelInfo]:
|
|
422
|
-
"""Get model by ID"""
|
|
423
|
-
return self.models.get(model_id)
|
|
424
|
-
|
|
425
|
-
def get_models_by_provider(self, provider: str) -> List[ModelInfo]:
|
|
426
|
-
"""Get models by provider"""
|
|
427
|
-
return [model for model in self.models.values() if model.provider == provider]
|
|
428
|
-
|
|
429
|
-
def get_free_models(self) -> List[ModelInfo]:
|
|
430
|
-
"""Get free models (no cost)"""
|
|
431
|
-
return [
|
|
432
|
-
model
|
|
433
|
-
for model in self.models.values()
|
|
434
|
-
if model.prompt_price == 0.0
|
|
435
|
-
and model.completion_price == 0.0
|
|
436
|
-
and model.is_available
|
|
437
|
-
]
|
|
438
|
-
|
|
439
|
-
def get_budget_models(self, max_price_per_1m: float = 1.0) -> List[ModelInfo]:
|
|
440
|
-
"""Get budget-friendly models"""
|
|
441
|
-
return [
|
|
442
|
-
model
|
|
443
|
-
for model in self.models.values()
|
|
444
|
-
if model.prompt_price <= max_price_per_1m and model.is_available
|
|
445
|
-
]
|
|
446
|
-
|
|
447
|
-
def get_premium_models(self, min_price_per_1m: float = 10.0) -> List[ModelInfo]:
|
|
448
|
-
"""Get premium models"""
|
|
449
|
-
return [
|
|
450
|
-
model
|
|
451
|
-
for model in self.models.values()
|
|
452
|
-
if model.prompt_price >= min_price_per_1m and model.is_available
|
|
453
|
-
]
|
|
454
|
-
|
|
455
|
-
def search_models(self, query: str) -> List[ModelInfo]:
|
|
456
|
-
"""Search models by name, description, or tags"""
|
|
457
|
-
query_lower = query.lower()
|
|
458
|
-
results = []
|
|
459
|
-
|
|
460
|
-
for model in self.models.values():
|
|
461
|
-
if not model.is_available:
|
|
462
|
-
continue
|
|
463
|
-
|
|
464
|
-
# Search in name
|
|
465
|
-
if query_lower in model.name.lower():
|
|
466
|
-
results.append(model)
|
|
467
|
-
continue
|
|
468
|
-
|
|
469
|
-
# Search in description
|
|
470
|
-
if query_lower in model.description.lower():
|
|
471
|
-
results.append(model)
|
|
472
|
-
continue
|
|
473
|
-
|
|
474
|
-
# Search in tags
|
|
475
|
-
if any(query_lower in tag.lower() for tag in model.tags):
|
|
476
|
-
results.append(model)
|
|
477
|
-
continue
|
|
478
|
-
|
|
479
|
-
return results
|
|
480
|
-
|
|
481
|
-
def get_cheapest_model(self, provider: Optional[str] = None) -> Optional[ModelInfo]:
|
|
482
|
-
"""Get cheapest available model"""
|
|
483
|
-
available_models = [m for m in self.models.values() if m.is_available]
|
|
484
|
-
|
|
485
|
-
if provider:
|
|
486
|
-
available_models = [m for m in available_models if m.provider == provider]
|
|
487
|
-
|
|
488
|
-
if not available_models:
|
|
489
|
-
return None
|
|
490
|
-
|
|
491
|
-
return min(available_models, key=lambda m: m.prompt_price)
|
|
492
|
-
|
|
493
|
-
def estimate_cost(
|
|
494
|
-
self, model_id: str, input_tokens: int, output_tokens: int
|
|
495
|
-
) -> Optional[float]:
|
|
496
|
-
"""Estimate cost for model usage"""
|
|
497
|
-
model = self.get_model(model_id)
|
|
498
|
-
if not model:
|
|
499
|
-
return None
|
|
500
|
-
|
|
501
|
-
return model.estimate_cost(input_tokens, output_tokens)
|
|
502
|
-
|
|
503
|
-
def get_models_summary(self) -> Dict[str, Any]:
|
|
504
|
-
"""Get summary of available models"""
|
|
505
|
-
if not self.models:
|
|
506
|
-
return {"error": "No models loaded"}
|
|
507
|
-
|
|
508
|
-
available_models = [m for m in self.models.values() if m.is_available]
|
|
509
|
-
|
|
510
|
-
# Provider breakdown
|
|
511
|
-
provider_counts = {}
|
|
512
|
-
for model in available_models:
|
|
513
|
-
provider_counts[model.provider] = provider_counts.get(model.provider, 0) + 1
|
|
514
|
-
|
|
515
|
-
# Price statistics
|
|
516
|
-
prices = [m.prompt_price for m in available_models if m.prompt_price > 0]
|
|
517
|
-
|
|
518
|
-
return {
|
|
519
|
-
"total_models": len(self.models),
|
|
520
|
-
"available_models": len(available_models),
|
|
521
|
-
"providers": provider_counts,
|
|
522
|
-
"free_models": len(self.get_free_models()),
|
|
523
|
-
"budget_models": len(self.get_budget_models()),
|
|
524
|
-
"premium_models": len(self.get_premium_models()),
|
|
525
|
-
"price_range": {
|
|
526
|
-
"min": min(prices) if prices else 0,
|
|
527
|
-
"max": max(prices) if prices else 0,
|
|
528
|
-
"avg": sum(prices) / len(prices) if prices else 0,
|
|
529
|
-
},
|
|
530
|
-
"last_updated": (
|
|
531
|
-
self.last_fetch_time.isoformat() if self.last_fetch_time else None
|
|
532
|
-
),
|
|
533
|
-
}
|
|
534
|
-
|
|
535
|
-
def clear_cache(self) -> None:
|
|
536
|
-
"""Clear all cached data"""
|
|
537
|
-
self.cache.clear()
|
|
538
|
-
self.models.clear()
|
|
539
|
-
self.last_fetch_time = None
|
|
540
|
-
logger.info("Models cache cleared")
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
# Global cache instance
|
|
544
|
-
_models_cache: Optional[ModelsCache] = None
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
def get_models_cache(
|
|
548
|
-
openrouter_api_key: Optional[str] = None,
|
|
549
|
-
openai_api_key: Optional[str] = None,
|
|
550
|
-
anthropic_api_key: Optional[str] = None,
|
|
551
|
-
) -> ModelsCache:
|
|
552
|
-
"""Get global models cache instance"""
|
|
553
|
-
global _models_cache
|
|
554
|
-
|
|
555
|
-
if _models_cache is None:
|
|
556
|
-
_models_cache = ModelsCache(
|
|
557
|
-
openrouter_api_key=openrouter_api_key,
|
|
558
|
-
openai_api_key=openai_api_key,
|
|
559
|
-
anthropic_api_key=anthropic_api_key,
|
|
560
|
-
)
|
|
561
|
-
|
|
562
|
-
return _models_cache
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
# Convenience functions
|
|
566
|
-
async def get_available_models(force_refresh: bool = False) -> Dict[str, ModelInfo]:
|
|
567
|
-
"""Get all available models"""
|
|
568
|
-
cache = get_models_cache()
|
|
569
|
-
return await cache.fetch_all_models(force_refresh=force_refresh)
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
async def get_model_info(model_id: str) -> Optional[ModelInfo]:
|
|
573
|
-
"""Get information for specific model"""
|
|
574
|
-
cache = get_models_cache()
|
|
575
|
-
await cache.fetch_all_models()
|
|
576
|
-
return cache.get_model(model_id)
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
async def estimate_model_cost(
|
|
580
|
-
model_id: str, input_tokens: int, output_tokens: int
|
|
581
|
-
) -> Optional[float]:
|
|
582
|
-
"""Estimate cost for model usage"""
|
|
583
|
-
cache = get_models_cache()
|
|
584
|
-
await cache.fetch_all_models()
|
|
585
|
-
return cache.estimate_cost(model_id, input_tokens, output_tokens)
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
async def find_cheapest_model(provider: Optional[str] = None) -> Optional[ModelInfo]:
|
|
589
|
-
"""Find the cheapest available model"""
|
|
590
|
-
cache = get_models_cache()
|
|
591
|
-
await cache.fetch_all_models()
|
|
592
|
-
return cache.get_cheapest_model(provider=provider)
|