unrealon 1.0.9__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unrealon/__init__.py +23 -21
- unrealon-1.1.1.dist-info/METADATA +722 -0
- unrealon-1.1.1.dist-info/RECORD +82 -0
- {unrealon-1.0.9.dist-info → unrealon-1.1.1.dist-info}/WHEEL +1 -1
- unrealon-1.1.1.dist-info/entry_points.txt +9 -0
- {unrealon-1.0.9.dist-info → unrealon-1.1.1.dist-info/licenses}/LICENSE +1 -1
- unrealon_bridge/__init__.py +114 -0
- unrealon_bridge/cli.py +316 -0
- unrealon_bridge/client/__init__.py +93 -0
- unrealon_bridge/client/base.py +78 -0
- unrealon_bridge/client/commands.py +89 -0
- unrealon_bridge/client/connection.py +90 -0
- unrealon_bridge/client/events.py +65 -0
- unrealon_bridge/client/health.py +38 -0
- unrealon_bridge/client/html_parser.py +146 -0
- unrealon_bridge/client/logging.py +139 -0
- unrealon_bridge/client/proxy.py +70 -0
- unrealon_bridge/client/scheduler.py +450 -0
- unrealon_bridge/client/session.py +70 -0
- unrealon_bridge/configs/__init__.py +14 -0
- unrealon_bridge/configs/bridge_config.py +212 -0
- unrealon_bridge/configs/bridge_config.yaml +39 -0
- unrealon_bridge/models/__init__.py +138 -0
- unrealon_bridge/models/base.py +28 -0
- unrealon_bridge/models/command.py +41 -0
- unrealon_bridge/models/events.py +40 -0
- unrealon_bridge/models/html_parser.py +79 -0
- unrealon_bridge/models/logging.py +55 -0
- unrealon_bridge/models/parser.py +63 -0
- unrealon_bridge/models/proxy.py +41 -0
- unrealon_bridge/models/requests.py +95 -0
- unrealon_bridge/models/responses.py +88 -0
- unrealon_bridge/models/scheduler.py +592 -0
- unrealon_bridge/models/session.py +28 -0
- unrealon_bridge/server/__init__.py +91 -0
- unrealon_bridge/server/base.py +171 -0
- unrealon_bridge/server/handlers/__init__.py +23 -0
- unrealon_bridge/server/handlers/command.py +110 -0
- unrealon_bridge/server/handlers/html_parser.py +139 -0
- unrealon_bridge/server/handlers/logging.py +95 -0
- unrealon_bridge/server/handlers/parser.py +95 -0
- unrealon_bridge/server/handlers/proxy.py +75 -0
- unrealon_bridge/server/handlers/scheduler.py +545 -0
- unrealon_bridge/server/handlers/session.py +66 -0
- unrealon_browser/__init__.py +61 -18
- unrealon_browser/{src/cli → cli}/browser_cli.py +6 -13
- unrealon_browser/{src/cli → cli}/cookies_cli.py +5 -1
- unrealon_browser/{src/core → core}/browser_manager.py +2 -2
- unrealon_browser/{src/managers → managers}/captcha.py +1 -1
- unrealon_browser/{src/managers → managers}/cookies.py +1 -1
- unrealon_browser/managers/logger_bridge.py +231 -0
- unrealon_browser/{src/managers → managers}/profile.py +1 -1
- unrealon_driver/__init__.py +73 -19
- unrealon_driver/browser/__init__.py +8 -0
- unrealon_driver/browser/config.py +74 -0
- unrealon_driver/browser/manager.py +416 -0
- unrealon_driver/exceptions.py +28 -0
- unrealon_driver/parser/__init__.py +55 -0
- unrealon_driver/parser/cli_manager.py +141 -0
- unrealon_driver/parser/daemon_manager.py +227 -0
- unrealon_driver/parser/managers/__init__.py +46 -0
- unrealon_driver/parser/managers/browser.py +51 -0
- unrealon_driver/parser/managers/config.py +281 -0
- unrealon_driver/parser/managers/error.py +412 -0
- unrealon_driver/parser/managers/html.py +732 -0
- unrealon_driver/parser/managers/logging.py +609 -0
- unrealon_driver/parser/managers/result.py +321 -0
- unrealon_driver/parser/parser_manager.py +628 -0
- unrealon/sdk_config.py +0 -88
- unrealon-1.0.9.dist-info/METADATA +0 -810
- unrealon-1.0.9.dist-info/RECORD +0 -246
- unrealon_browser/pyproject.toml +0 -182
- unrealon_browser/src/__init__.py +0 -62
- unrealon_browser/src/managers/logger_bridge.py +0 -395
- unrealon_driver/README.md +0 -204
- unrealon_driver/pyproject.toml +0 -187
- unrealon_driver/src/__init__.py +0 -90
- unrealon_driver/src/cli/__init__.py +0 -10
- unrealon_driver/src/cli/main.py +0 -66
- unrealon_driver/src/cli/simple.py +0 -510
- unrealon_driver/src/config/__init__.py +0 -11
- unrealon_driver/src/config/auto_config.py +0 -478
- unrealon_driver/src/core/__init__.py +0 -18
- unrealon_driver/src/core/exceptions.py +0 -289
- unrealon_driver/src/core/parser.py +0 -638
- unrealon_driver/src/dto/__init__.py +0 -66
- unrealon_driver/src/dto/cli.py +0 -119
- unrealon_driver/src/dto/config.py +0 -18
- unrealon_driver/src/dto/events.py +0 -237
- unrealon_driver/src/dto/execution.py +0 -313
- unrealon_driver/src/dto/services.py +0 -311
- unrealon_driver/src/execution/__init__.py +0 -23
- unrealon_driver/src/execution/daemon_mode.py +0 -317
- unrealon_driver/src/execution/interactive_mode.py +0 -88
- unrealon_driver/src/execution/modes.py +0 -45
- unrealon_driver/src/execution/scheduled_mode.py +0 -209
- unrealon_driver/src/execution/test_mode.py +0 -250
- unrealon_driver/src/logging/__init__.py +0 -24
- unrealon_driver/src/logging/driver_logger.py +0 -512
- unrealon_driver/src/services/__init__.py +0 -24
- unrealon_driver/src/services/browser_service.py +0 -726
- unrealon_driver/src/services/llm/__init__.py +0 -15
- unrealon_driver/src/services/llm/browser_llm_service.py +0 -363
- unrealon_driver/src/services/llm/llm.py +0 -195
- unrealon_driver/src/services/logger_service.py +0 -232
- unrealon_driver/src/services/metrics_service.py +0 -185
- unrealon_driver/src/services/scheduler_service.py +0 -489
- unrealon_driver/src/services/websocket_service.py +0 -362
- unrealon_driver/src/utils/__init__.py +0 -16
- unrealon_driver/src/utils/service_factory.py +0 -317
- unrealon_driver/src/utils/time_formatter.py +0 -338
- unrealon_llm/README.md +0 -44
- unrealon_llm/__init__.py +0 -26
- unrealon_llm/pyproject.toml +0 -154
- unrealon_llm/src/__init__.py +0 -228
- unrealon_llm/src/cli/__init__.py +0 -0
- unrealon_llm/src/core/__init__.py +0 -11
- unrealon_llm/src/core/smart_client.py +0 -438
- unrealon_llm/src/dto/__init__.py +0 -155
- unrealon_llm/src/dto/models/__init__.py +0 -0
- unrealon_llm/src/dto/models/config.py +0 -343
- unrealon_llm/src/dto/models/core.py +0 -328
- unrealon_llm/src/dto/models/enums.py +0 -123
- unrealon_llm/src/dto/models/html_analysis.py +0 -345
- unrealon_llm/src/dto/models/statistics.py +0 -473
- unrealon_llm/src/dto/models/translation.py +0 -383
- unrealon_llm/src/dto/models/type_conversion.py +0 -462
- unrealon_llm/src/dto/schemas/__init__.py +0 -0
- unrealon_llm/src/exceptions.py +0 -392
- unrealon_llm/src/llm_config/__init__.py +0 -20
- unrealon_llm/src/llm_config/logging_config.py +0 -178
- unrealon_llm/src/llm_logging/__init__.py +0 -42
- unrealon_llm/src/llm_logging/llm_events.py +0 -107
- unrealon_llm/src/llm_logging/llm_logger.py +0 -466
- unrealon_llm/src/managers/__init__.py +0 -15
- unrealon_llm/src/managers/cache_manager.py +0 -67
- unrealon_llm/src/managers/cost_manager.py +0 -107
- unrealon_llm/src/managers/request_manager.py +0 -298
- unrealon_llm/src/modules/__init__.py +0 -0
- unrealon_llm/src/modules/html_processor/__init__.py +0 -25
- unrealon_llm/src/modules/html_processor/base_processor.py +0 -415
- unrealon_llm/src/modules/html_processor/details_processor.py +0 -85
- unrealon_llm/src/modules/html_processor/listing_processor.py +0 -91
- unrealon_llm/src/modules/html_processor/models/__init__.py +0 -20
- unrealon_llm/src/modules/html_processor/models/processing_models.py +0 -40
- unrealon_llm/src/modules/html_processor/models/universal_model.py +0 -56
- unrealon_llm/src/modules/html_processor/processor.py +0 -102
- unrealon_llm/src/modules/llm/__init__.py +0 -0
- unrealon_llm/src/modules/translator/__init__.py +0 -0
- unrealon_llm/src/provider.py +0 -116
- unrealon_llm/src/utils/__init__.py +0 -95
- unrealon_llm/src/utils/common.py +0 -64
- unrealon_llm/src/utils/data_extractor.py +0 -188
- unrealon_llm/src/utils/html_cleaner.py +0 -767
- unrealon_llm/src/utils/language_detector.py +0 -308
- unrealon_llm/src/utils/models_cache.py +0 -592
- unrealon_llm/src/utils/smart_counter.py +0 -229
- unrealon_llm/src/utils/token_counter.py +0 -189
- unrealon_sdk/README.md +0 -25
- unrealon_sdk/__init__.py +0 -30
- unrealon_sdk/pyproject.toml +0 -231
- unrealon_sdk/src/__init__.py +0 -150
- unrealon_sdk/src/cli/__init__.py +0 -12
- unrealon_sdk/src/cli/commands/__init__.py +0 -22
- unrealon_sdk/src/cli/commands/benchmark.py +0 -42
- unrealon_sdk/src/cli/commands/diagnostics.py +0 -573
- unrealon_sdk/src/cli/commands/health.py +0 -46
- unrealon_sdk/src/cli/commands/integration.py +0 -498
- unrealon_sdk/src/cli/commands/reports.py +0 -43
- unrealon_sdk/src/cli/commands/security.py +0 -36
- unrealon_sdk/src/cli/commands/server.py +0 -483
- unrealon_sdk/src/cli/commands/servers.py +0 -56
- unrealon_sdk/src/cli/commands/tests.py +0 -55
- unrealon_sdk/src/cli/main.py +0 -126
- unrealon_sdk/src/cli/utils/reporter.py +0 -519
- unrealon_sdk/src/clients/openapi.yaml +0 -3347
- unrealon_sdk/src/clients/python_http/__init__.py +0 -3
- unrealon_sdk/src/clients/python_http/api_config.py +0 -228
- unrealon_sdk/src/clients/python_http/models/BaseModel.py +0 -12
- unrealon_sdk/src/clients/python_http/models/BroadcastDeliveryStats.py +0 -33
- unrealon_sdk/src/clients/python_http/models/BroadcastMessage.py +0 -17
- unrealon_sdk/src/clients/python_http/models/BroadcastMessageRequest.py +0 -35
- unrealon_sdk/src/clients/python_http/models/BroadcastPriority.py +0 -10
- unrealon_sdk/src/clients/python_http/models/BroadcastResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/BroadcastResultResponse.py +0 -33
- unrealon_sdk/src/clients/python_http/models/BroadcastTarget.py +0 -11
- unrealon_sdk/src/clients/python_http/models/ConnectionStats.py +0 -27
- unrealon_sdk/src/clients/python_http/models/ConnectionsResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/DeveloperMessageResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ErrorResponse.py +0 -25
- unrealon_sdk/src/clients/python_http/models/HTTPValidationError.py +0 -16
- unrealon_sdk/src/clients/python_http/models/HealthResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/HealthStatus.py +0 -33
- unrealon_sdk/src/clients/python_http/models/LogLevel.py +0 -10
- unrealon_sdk/src/clients/python_http/models/LoggingRequest.py +0 -27
- unrealon_sdk/src/clients/python_http/models/LoggingResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/MaintenanceMode.py +0 -9
- unrealon_sdk/src/clients/python_http/models/MaintenanceModeRequest.py +0 -33
- unrealon_sdk/src/clients/python_http/models/MaintenanceStatusResponse.py +0 -39
- unrealon_sdk/src/clients/python_http/models/ParserCommandRequest.py +0 -25
- unrealon_sdk/src/clients/python_http/models/ParserMessageResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/ParserRegistrationRequest.py +0 -28
- unrealon_sdk/src/clients/python_http/models/ParserRegistrationResponse.py +0 -25
- unrealon_sdk/src/clients/python_http/models/ParserType.py +0 -10
- unrealon_sdk/src/clients/python_http/models/ProxyBlockRequest.py +0 -19
- unrealon_sdk/src/clients/python_http/models/ProxyEndpointResponse.py +0 -20
- unrealon_sdk/src/clients/python_http/models/ProxyListResponse.py +0 -19
- unrealon_sdk/src/clients/python_http/models/ProxyProvider.py +0 -10
- unrealon_sdk/src/clients/python_http/models/ProxyPurchaseRequest.py +0 -25
- unrealon_sdk/src/clients/python_http/models/ProxyResponse.py +0 -47
- unrealon_sdk/src/clients/python_http/models/ProxyRotationRequest.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ProxyStatus.py +0 -10
- unrealon_sdk/src/clients/python_http/models/ProxyUsageRequest.py +0 -19
- unrealon_sdk/src/clients/python_http/models/ProxyUsageStatsResponse.py +0 -26
- unrealon_sdk/src/clients/python_http/models/ServiceRegistrationDto.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ServiceStatsResponse.py +0 -31
- unrealon_sdk/src/clients/python_http/models/SessionStartRequest.py +0 -23
- unrealon_sdk/src/clients/python_http/models/SuccessResponse.py +0 -25
- unrealon_sdk/src/clients/python_http/models/SystemNotificationResponse.py +0 -23
- unrealon_sdk/src/clients/python_http/models/ValidationError.py +0 -18
- unrealon_sdk/src/clients/python_http/models/ValidationErrorResponse.py +0 -21
- unrealon_sdk/src/clients/python_http/models/WebSocketMetrics.py +0 -21
- unrealon_sdk/src/clients/python_http/models/__init__.py +0 -44
- unrealon_sdk/src/clients/python_http/services/None_service.py +0 -35
- unrealon_sdk/src/clients/python_http/services/ParserManagement_service.py +0 -190
- unrealon_sdk/src/clients/python_http/services/ProxyManagement_service.py +0 -289
- unrealon_sdk/src/clients/python_http/services/SocketLogging_service.py +0 -187
- unrealon_sdk/src/clients/python_http/services/SystemHealth_service.py +0 -119
- unrealon_sdk/src/clients/python_http/services/WebSocketAPI_service.py +0 -198
- unrealon_sdk/src/clients/python_http/services/__init__.py +0 -0
- unrealon_sdk/src/clients/python_http/services/admin_service.py +0 -125
- unrealon_sdk/src/clients/python_http/services/async_None_service.py +0 -35
- unrealon_sdk/src/clients/python_http/services/async_ParserManagement_service.py +0 -190
- unrealon_sdk/src/clients/python_http/services/async_ProxyManagement_service.py +0 -289
- unrealon_sdk/src/clients/python_http/services/async_SocketLogging_service.py +0 -189
- unrealon_sdk/src/clients/python_http/services/async_SystemHealth_service.py +0 -123
- unrealon_sdk/src/clients/python_http/services/async_WebSocketAPI_service.py +0 -200
- unrealon_sdk/src/clients/python_http/services/async_admin_service.py +0 -125
- unrealon_sdk/src/clients/python_websocket/__init__.py +0 -28
- unrealon_sdk/src/clients/python_websocket/client.py +0 -490
- unrealon_sdk/src/clients/python_websocket/events.py +0 -732
- unrealon_sdk/src/clients/python_websocket/example.py +0 -136
- unrealon_sdk/src/clients/python_websocket/types.py +0 -871
- unrealon_sdk/src/core/__init__.py +0 -64
- unrealon_sdk/src/core/client.py +0 -556
- unrealon_sdk/src/core/config.py +0 -465
- unrealon_sdk/src/core/exceptions.py +0 -239
- unrealon_sdk/src/core/metadata.py +0 -191
- unrealon_sdk/src/core/models.py +0 -142
- unrealon_sdk/src/core/types.py +0 -68
- unrealon_sdk/src/dto/__init__.py +0 -268
- unrealon_sdk/src/dto/authentication.py +0 -108
- unrealon_sdk/src/dto/cache.py +0 -208
- unrealon_sdk/src/dto/common.py +0 -19
- unrealon_sdk/src/dto/concurrency.py +0 -393
- unrealon_sdk/src/dto/events.py +0 -108
- unrealon_sdk/src/dto/health.py +0 -339
- unrealon_sdk/src/dto/load_balancing.py +0 -336
- unrealon_sdk/src/dto/logging.py +0 -230
- unrealon_sdk/src/dto/performance.py +0 -165
- unrealon_sdk/src/dto/rate_limiting.py +0 -295
- unrealon_sdk/src/dto/resource_pooling.py +0 -128
- unrealon_sdk/src/dto/structured_logging.py +0 -112
- unrealon_sdk/src/dto/task_scheduling.py +0 -121
- unrealon_sdk/src/dto/websocket.py +0 -55
- unrealon_sdk/src/enterprise/__init__.py +0 -59
- unrealon_sdk/src/enterprise/authentication.py +0 -401
- unrealon_sdk/src/enterprise/cache_manager.py +0 -578
- unrealon_sdk/src/enterprise/error_recovery.py +0 -494
- unrealon_sdk/src/enterprise/event_system.py +0 -549
- unrealon_sdk/src/enterprise/health_monitor.py +0 -747
- unrealon_sdk/src/enterprise/load_balancer.py +0 -964
- unrealon_sdk/src/enterprise/logging/__init__.py +0 -68
- unrealon_sdk/src/enterprise/logging/cleanup.py +0 -156
- unrealon_sdk/src/enterprise/logging/development.py +0 -744
- unrealon_sdk/src/enterprise/logging/service.py +0 -410
- unrealon_sdk/src/enterprise/multithreading_manager.py +0 -853
- unrealon_sdk/src/enterprise/performance_monitor.py +0 -539
- unrealon_sdk/src/enterprise/proxy_manager.py +0 -696
- unrealon_sdk/src/enterprise/rate_limiter.py +0 -652
- unrealon_sdk/src/enterprise/resource_pool.py +0 -763
- unrealon_sdk/src/enterprise/task_scheduler.py +0 -709
- unrealon_sdk/src/internal/__init__.py +0 -10
- unrealon_sdk/src/internal/command_router.py +0 -497
- unrealon_sdk/src/internal/connection_manager.py +0 -397
- unrealon_sdk/src/internal/http_client.py +0 -446
- unrealon_sdk/src/internal/websocket_client.py +0 -420
- unrealon_sdk/src/provider.py +0 -471
- unrealon_sdk/src/utils.py +0 -234
- /unrealon_browser/{src/cli → cli}/__init__.py +0 -0
- /unrealon_browser/{src/cli → cli}/interactive_mode.py +0 -0
- /unrealon_browser/{src/cli → cli}/main.py +0 -0
- /unrealon_browser/{src/core → core}/__init__.py +0 -0
- /unrealon_browser/{src/dto → dto}/__init__.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/config.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/core.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/dataclasses.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/detection.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/enums.py +0 -0
- /unrealon_browser/{src/dto → dto}/models/statistics.py +0 -0
- /unrealon_browser/{src/managers → managers}/__init__.py +0 -0
- /unrealon_browser/{src/managers → managers}/stealth.py +0 -0
|
@@ -1,107 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Cost Manager
|
|
3
|
-
|
|
4
|
-
Manages cost tracking and budget limits for LLM operations.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import logging
|
|
8
|
-
from datetime import datetime
|
|
9
|
-
from decimal import Decimal
|
|
10
|
-
from typing import Any, Dict, Optional
|
|
11
|
-
|
|
12
|
-
from unrealon_llm.src.exceptions import CostLimitExceededError
|
|
13
|
-
|
|
14
|
-
logger = logging.getLogger(__name__)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class CostManager:
|
|
18
|
-
"""Real-time cost tracking with limits"""
|
|
19
|
-
|
|
20
|
-
def __init__(self, daily_limit_usd: float = 10.0):
|
|
21
|
-
self.daily_limit = Decimal(str(daily_limit_usd))
|
|
22
|
-
self.total_cost = Decimal('0.00')
|
|
23
|
-
self.requests_today = 0
|
|
24
|
-
self.last_reset = datetime.now().date()
|
|
25
|
-
|
|
26
|
-
# Cost breakdown
|
|
27
|
-
self.cost_by_model: Dict[str, Decimal] = {}
|
|
28
|
-
self.cost_by_operation: Dict[str, Decimal] = {}
|
|
29
|
-
|
|
30
|
-
def track_request(self, cost_usd: float, model: str, operation: str = "completion", llm_logger=None):
|
|
31
|
-
"""Track a request cost"""
|
|
32
|
-
# Reset daily counters if new day
|
|
33
|
-
today = datetime.now().date()
|
|
34
|
-
if today != self.last_reset:
|
|
35
|
-
old_total = float(self.total_cost)
|
|
36
|
-
self.total_cost = Decimal('0.00')
|
|
37
|
-
self.requests_today = 0
|
|
38
|
-
self.last_reset = today
|
|
39
|
-
|
|
40
|
-
# Log daily reset
|
|
41
|
-
if llm_logger and old_total > 0:
|
|
42
|
-
llm_logger.log_cost_tracking(
|
|
43
|
-
daily_total_usd=old_total,
|
|
44
|
-
request_cost_usd=0,
|
|
45
|
-
details={
|
|
46
|
-
"daily_budget_reset": True,
|
|
47
|
-
"previous_total": old_total
|
|
48
|
-
}
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
cost_decimal = Decimal(str(cost_usd))
|
|
52
|
-
|
|
53
|
-
# Check limit before adding
|
|
54
|
-
if self.total_cost + cost_decimal > self.daily_limit:
|
|
55
|
-
if llm_logger:
|
|
56
|
-
llm_logger.log_cost_tracking(
|
|
57
|
-
operation_cost_usd=cost_usd,
|
|
58
|
-
daily_total_usd=float(self.total_cost + cost_decimal),
|
|
59
|
-
daily_limit_usd=float(self.daily_limit),
|
|
60
|
-
model=model,
|
|
61
|
-
)
|
|
62
|
-
raise CostLimitExceededError(
|
|
63
|
-
float(self.total_cost + cost_decimal),
|
|
64
|
-
float(self.daily_limit)
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
# Track costs
|
|
68
|
-
self.total_cost += cost_decimal
|
|
69
|
-
self.requests_today += 1
|
|
70
|
-
|
|
71
|
-
# Track by model
|
|
72
|
-
if model not in self.cost_by_model:
|
|
73
|
-
self.cost_by_model[model] = Decimal('0.00')
|
|
74
|
-
self.cost_by_model[model] += cost_decimal
|
|
75
|
-
|
|
76
|
-
# Track by operation
|
|
77
|
-
if operation not in self.cost_by_operation:
|
|
78
|
-
self.cost_by_operation[operation] = Decimal('0.00')
|
|
79
|
-
self.cost_by_operation[operation] += cost_decimal
|
|
80
|
-
|
|
81
|
-
# Log cost tracking
|
|
82
|
-
if llm_logger:
|
|
83
|
-
llm_logger.log_cost_tracking(
|
|
84
|
-
operation_cost_usd=cost_usd,
|
|
85
|
-
daily_total_usd=float(self.total_cost),
|
|
86
|
-
daily_limit_usd=float(self.daily_limit),
|
|
87
|
-
model=model,
|
|
88
|
-
)
|
|
89
|
-
|
|
90
|
-
def can_afford(self, estimated_cost_usd: float) -> bool:
|
|
91
|
-
"""Check if we can afford a request"""
|
|
92
|
-
return self.total_cost + Decimal(str(estimated_cost_usd)) <= self.daily_limit
|
|
93
|
-
|
|
94
|
-
def get_remaining_budget(self) -> float:
|
|
95
|
-
"""Get remaining budget"""
|
|
96
|
-
return float(self.daily_limit - self.total_cost)
|
|
97
|
-
|
|
98
|
-
def get_stats(self) -> Dict[str, Any]:
|
|
99
|
-
"""Get cost statistics"""
|
|
100
|
-
return {
|
|
101
|
-
"total_cost_usd": float(self.total_cost),
|
|
102
|
-
"remaining_budget_usd": float(self.daily_limit - self.total_cost),
|
|
103
|
-
"requests_today": self.requests_today,
|
|
104
|
-
"daily_limit_usd": float(self.daily_limit),
|
|
105
|
-
"cost_by_model": {k: float(v) for k, v in self.cost_by_model.items()},
|
|
106
|
-
"cost_by_operation": {k: float(v) for k, v in self.cost_by_operation.items()},
|
|
107
|
-
}
|
|
@@ -1,298 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Request Manager
|
|
3
|
-
|
|
4
|
-
Manages HTTP requests to LLM providers with retry logic and error handling.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import asyncio
|
|
8
|
-
import logging
|
|
9
|
-
from datetime import datetime
|
|
10
|
-
from typing import Any, Dict, List, Optional, Type
|
|
11
|
-
from pydantic import BaseModel
|
|
12
|
-
|
|
13
|
-
import aiohttp
|
|
14
|
-
|
|
15
|
-
from unrealon_llm.src.dto import (
|
|
16
|
-
ChatMessage,
|
|
17
|
-
LLMConfig,
|
|
18
|
-
LLMProvider,
|
|
19
|
-
LLMResponse,
|
|
20
|
-
MessageRole,
|
|
21
|
-
TokenUsage,
|
|
22
|
-
)
|
|
23
|
-
from unrealon_llm.src.exceptions import (
|
|
24
|
-
APIError,
|
|
25
|
-
AuthenticationError,
|
|
26
|
-
MissingAPIKeyError,
|
|
27
|
-
ModelUnavailableError,
|
|
28
|
-
NetworkError,
|
|
29
|
-
RateLimitError,
|
|
30
|
-
wrap_api_error,
|
|
31
|
-
)
|
|
32
|
-
from unrealon_llm.src.utils.data_extractor import safe_extract_json
|
|
33
|
-
|
|
34
|
-
logger = logging.getLogger(__name__)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class RequestManager:
|
|
38
|
-
"""HTTP request manager for LLM providers"""
|
|
39
|
-
|
|
40
|
-
def __init__(self, config: LLMConfig):
|
|
41
|
-
self.config = config
|
|
42
|
-
self._session: Optional[aiohttp.ClientSession] = None
|
|
43
|
-
|
|
44
|
-
# Provider URLs
|
|
45
|
-
self.provider_urls = {
|
|
46
|
-
LLMProvider.OPENROUTER: "https://openrouter.ai/api/v1/chat/completions",
|
|
47
|
-
LLMProvider.OPENAI: "https://api.openai.com/v1/chat/completions",
|
|
48
|
-
LLMProvider.ANTHROPIC: "https://api.anthropic.com/v1/messages",
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
async def __aenter__(self):
|
|
52
|
-
"""Async context manager entry"""
|
|
53
|
-
await self._ensure_session()
|
|
54
|
-
return self
|
|
55
|
-
|
|
56
|
-
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
57
|
-
"""Async context manager exit"""
|
|
58
|
-
await self.close()
|
|
59
|
-
|
|
60
|
-
async def _ensure_session(self):
|
|
61
|
-
"""Ensure HTTP session is available"""
|
|
62
|
-
if self._session is None or self._session.closed:
|
|
63
|
-
timeout = aiohttp.ClientTimeout(total=self.config.request_timeout_seconds)
|
|
64
|
-
self._session = aiohttp.ClientSession(timeout=timeout)
|
|
65
|
-
|
|
66
|
-
async def close(self):
|
|
67
|
-
"""Close HTTP session"""
|
|
68
|
-
if self._session and not self._session.closed:
|
|
69
|
-
await self._session.close()
|
|
70
|
-
|
|
71
|
-
async def execute_with_retry(
|
|
72
|
-
self,
|
|
73
|
-
provider: LLMProvider,
|
|
74
|
-
messages: List[ChatMessage],
|
|
75
|
-
model: str,
|
|
76
|
-
max_tokens: Optional[int],
|
|
77
|
-
temperature: float,
|
|
78
|
-
response_format: Optional[str],
|
|
79
|
-
**kwargs
|
|
80
|
-
) -> LLMResponse:
|
|
81
|
-
"""Execute request with retry logic"""
|
|
82
|
-
last_error = None
|
|
83
|
-
|
|
84
|
-
for attempt in range(self.config.max_retries + 1):
|
|
85
|
-
try:
|
|
86
|
-
return await self._execute_request(
|
|
87
|
-
provider, messages, model, max_tokens, temperature, response_format, **kwargs
|
|
88
|
-
)
|
|
89
|
-
except RateLimitError as e:
|
|
90
|
-
last_error = e
|
|
91
|
-
if attempt < self.config.max_retries:
|
|
92
|
-
# Wait before retry (exponential backoff)
|
|
93
|
-
wait_time = (2 ** attempt)
|
|
94
|
-
logger.warning(f"Rate limited, waiting {wait_time}s before retry")
|
|
95
|
-
await asyncio.sleep(wait_time)
|
|
96
|
-
continue
|
|
97
|
-
raise e
|
|
98
|
-
except (NetworkError, APIError) as e:
|
|
99
|
-
last_error = e
|
|
100
|
-
if attempt < self.config.max_retries:
|
|
101
|
-
wait_time = (2 ** attempt)
|
|
102
|
-
logger.warning(f"Request failed, retrying in {wait_time}s")
|
|
103
|
-
await asyncio.sleep(wait_time)
|
|
104
|
-
continue
|
|
105
|
-
raise e
|
|
106
|
-
|
|
107
|
-
# If we get here, all retries failed
|
|
108
|
-
raise last_error
|
|
109
|
-
|
|
110
|
-
async def _execute_request(
|
|
111
|
-
self,
|
|
112
|
-
provider: LLMProvider,
|
|
113
|
-
messages: List[ChatMessage],
|
|
114
|
-
model: str,
|
|
115
|
-
max_tokens: Optional[int],
|
|
116
|
-
temperature: float,
|
|
117
|
-
response_format: Optional[str],
|
|
118
|
-
**kwargs
|
|
119
|
-
) -> LLMResponse:
|
|
120
|
-
"""Execute actual HTTP request to provider"""
|
|
121
|
-
await self._ensure_session()
|
|
122
|
-
|
|
123
|
-
# Get API key for provider
|
|
124
|
-
api_key = self._get_api_key_for_provider(provider)
|
|
125
|
-
if not api_key:
|
|
126
|
-
raise MissingAPIKeyError(provider.value)
|
|
127
|
-
|
|
128
|
-
# Extract response_model before building payload (don't send to API)
|
|
129
|
-
response_model = kwargs.pop('response_model', None)
|
|
130
|
-
if not response_model:
|
|
131
|
-
raise ValueError("response_model is required for LLM requests")
|
|
132
|
-
|
|
133
|
-
# Build request payload
|
|
134
|
-
payload = self._build_request_payload(
|
|
135
|
-
provider, messages, model, max_tokens, temperature, response_format, **kwargs
|
|
136
|
-
)
|
|
137
|
-
|
|
138
|
-
# Build headers
|
|
139
|
-
headers = self._build_headers(provider, api_key)
|
|
140
|
-
|
|
141
|
-
# Get provider URL
|
|
142
|
-
url = self.provider_urls[provider]
|
|
143
|
-
|
|
144
|
-
start_time = datetime.now()
|
|
145
|
-
|
|
146
|
-
try:
|
|
147
|
-
async with self._session.post(url, json=payload, headers=headers) as response:
|
|
148
|
-
response_data = await response.json()
|
|
149
|
-
|
|
150
|
-
# Handle errors
|
|
151
|
-
if response.status == 401:
|
|
152
|
-
raise AuthenticationError("Invalid API key")
|
|
153
|
-
elif response.status == 429:
|
|
154
|
-
raise RateLimitError("Rate limit exceeded")
|
|
155
|
-
elif response.status == 404:
|
|
156
|
-
raise ModelUnavailableError(model)
|
|
157
|
-
elif response.status >= 400:
|
|
158
|
-
error_msg = response_data.get("error", {}).get("message", "Unknown error")
|
|
159
|
-
raise APIError(f"Provider error: {error_msg}")
|
|
160
|
-
|
|
161
|
-
response.raise_for_status()
|
|
162
|
-
|
|
163
|
-
# Parse response
|
|
164
|
-
return self._parse_response(provider, response_data, model, start_time, response_model)
|
|
165
|
-
|
|
166
|
-
except aiohttp.ClientError as e:
|
|
167
|
-
raise NetworkError(f"Network error: {e}")
|
|
168
|
-
|
|
169
|
-
def _build_request_payload(
|
|
170
|
-
self,
|
|
171
|
-
provider: LLMProvider,
|
|
172
|
-
messages: List[ChatMessage],
|
|
173
|
-
model: str,
|
|
174
|
-
max_tokens: Optional[int],
|
|
175
|
-
temperature: float,
|
|
176
|
-
response_format: Optional[str],
|
|
177
|
-
**kwargs
|
|
178
|
-
) -> Dict[str, Any]:
|
|
179
|
-
"""Build request payload for provider"""
|
|
180
|
-
payload = {
|
|
181
|
-
"model": model,
|
|
182
|
-
"temperature": temperature,
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
if max_tokens:
|
|
186
|
-
payload["max_tokens"] = max_tokens
|
|
187
|
-
|
|
188
|
-
# Convert messages to provider format
|
|
189
|
-
if provider == LLMProvider.ANTHROPIC:
|
|
190
|
-
# Anthropic uses different message format
|
|
191
|
-
payload["messages"] = [
|
|
192
|
-
{"role": msg.role.value, "content": msg.content}
|
|
193
|
-
for msg in messages if msg.role != MessageRole.SYSTEM
|
|
194
|
-
]
|
|
195
|
-
|
|
196
|
-
# System message goes in separate field
|
|
197
|
-
system_messages = [msg.content for msg in messages if msg.role == MessageRole.SYSTEM]
|
|
198
|
-
if system_messages:
|
|
199
|
-
payload["system"] = system_messages[0]
|
|
200
|
-
else:
|
|
201
|
-
# OpenAI/OpenRouter format
|
|
202
|
-
payload["messages"] = [
|
|
203
|
-
{"role": msg.role.value, "content": msg.content}
|
|
204
|
-
for msg in messages
|
|
205
|
-
]
|
|
206
|
-
|
|
207
|
-
# Add response format if specified
|
|
208
|
-
if response_format == "json":
|
|
209
|
-
payload["response_format"] = {"type": "json_object"}
|
|
210
|
-
|
|
211
|
-
# Add any additional parameters
|
|
212
|
-
payload.update(kwargs)
|
|
213
|
-
|
|
214
|
-
return payload
|
|
215
|
-
|
|
216
|
-
def _build_headers(self, provider: LLMProvider, api_key: str) -> Dict[str, str]:
|
|
217
|
-
"""Build headers for provider"""
|
|
218
|
-
headers = {
|
|
219
|
-
"Content-Type": "application/json",
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
if provider == LLMProvider.ANTHROPIC:
|
|
223
|
-
headers["x-api-key"] = api_key
|
|
224
|
-
headers["anthropic-version"] = "2023-06-01"
|
|
225
|
-
else:
|
|
226
|
-
headers["Authorization"] = f"Bearer {api_key}"
|
|
227
|
-
|
|
228
|
-
return headers
|
|
229
|
-
|
|
230
|
-
def _parse_response(
|
|
231
|
-
self,
|
|
232
|
-
provider: LLMProvider,
|
|
233
|
-
response_data: Dict[str, Any],
|
|
234
|
-
model: str,
|
|
235
|
-
start_time: datetime,
|
|
236
|
-
response_model: Type[BaseModel]
|
|
237
|
-
) -> LLMResponse:
|
|
238
|
-
"""Parse provider response to LLMResponse"""
|
|
239
|
-
processing_time = (datetime.now() - start_time).total_seconds()
|
|
240
|
-
|
|
241
|
-
if provider == LLMProvider.ANTHROPIC:
|
|
242
|
-
# Anthropic response format
|
|
243
|
-
content = response_data.get("content", [{}])[0].get("text", "")
|
|
244
|
-
usage = response_data.get("usage", {})
|
|
245
|
-
|
|
246
|
-
token_usage = TokenUsage(
|
|
247
|
-
prompt_tokens=usage.get("input_tokens", 0),
|
|
248
|
-
completion_tokens=usage.get("output_tokens", 0),
|
|
249
|
-
total_tokens=usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
|
|
250
|
-
)
|
|
251
|
-
else:
|
|
252
|
-
# OpenAI/OpenRouter format
|
|
253
|
-
choice = response_data.get("choices", [{}])[0]
|
|
254
|
-
content = choice.get("message", {}).get("content", "")
|
|
255
|
-
usage = response_data.get("usage", {})
|
|
256
|
-
|
|
257
|
-
token_usage = TokenUsage(
|
|
258
|
-
prompt_tokens=usage.get("prompt_tokens", 0),
|
|
259
|
-
completion_tokens=usage.get("completion_tokens", 0),
|
|
260
|
-
total_tokens=usage.get("total_tokens", 0)
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
# Safe model extraction with response model validation - COMPLIANT with LLM_REQUIREMENTS.md
|
|
264
|
-
try:
|
|
265
|
-
# Always validate with provided response model and return model instance
|
|
266
|
-
extracted_model = safe_extract_json(content, expected_schema=response_model)
|
|
267
|
-
except Exception as e:
|
|
268
|
-
# If validation fails, create empty model instance
|
|
269
|
-
extracted_model = response_model()
|
|
270
|
-
|
|
271
|
-
return LLMResponse(
|
|
272
|
-
id=response_data.get("id", f"llm_{int(datetime.now().timestamp())}"),
|
|
273
|
-
model=model,
|
|
274
|
-
content=content,
|
|
275
|
-
finish_reason=response_data.get("choices", [{}])[0].get("finish_reason"),
|
|
276
|
-
processing_time_seconds=processing_time,
|
|
277
|
-
token_usage=token_usage,
|
|
278
|
-
extracted_model=extracted_model
|
|
279
|
-
)
|
|
280
|
-
|
|
281
|
-
def get_provider_for_model(self, model: str) -> LLMProvider:
|
|
282
|
-
"""Determine provider from model name"""
|
|
283
|
-
if model.startswith("claude"):
|
|
284
|
-
return LLMProvider.ANTHROPIC
|
|
285
|
-
elif model.startswith("gpt"):
|
|
286
|
-
return LLMProvider.OPENAI
|
|
287
|
-
else:
|
|
288
|
-
return LLMProvider.OPENROUTER # Default to OpenRouter
|
|
289
|
-
|
|
290
|
-
def _get_api_key_for_provider(self, provider: LLMProvider) -> Optional[str]:
|
|
291
|
-
"""Get API key for provider"""
|
|
292
|
-
if provider == LLMProvider.OPENROUTER:
|
|
293
|
-
return self.config.openrouter_api_key
|
|
294
|
-
elif provider == LLMProvider.OPENAI:
|
|
295
|
-
return self.config.openai_api_key
|
|
296
|
-
elif provider == LLMProvider.ANTHROPIC:
|
|
297
|
-
return self.config.anthropic_api_key
|
|
298
|
-
return None
|
|
File without changes
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
HTML Processor Module
|
|
3
|
-
|
|
4
|
-
Universal HTML pattern extraction using smart LLM analysis.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from .base_processor import BaseHTMLProcessor
|
|
8
|
-
from .listing_processor import ListingProcessor
|
|
9
|
-
from .details_processor import DetailsProcessor
|
|
10
|
-
from .processor import UnrealOnLLM
|
|
11
|
-
from .models import (
|
|
12
|
-
UniversalExtractionSchema,
|
|
13
|
-
ProcessingInfo,
|
|
14
|
-
ExtractionResult,
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
__all__ = [
|
|
18
|
-
"BaseHTMLProcessor",
|
|
19
|
-
"ListingProcessor",
|
|
20
|
-
"DetailsProcessor",
|
|
21
|
-
"UnrealOnLLM",
|
|
22
|
-
"UniversalExtractionSchema",
|
|
23
|
-
"ProcessingInfo",
|
|
24
|
-
"ExtractionResult",
|
|
25
|
-
]
|