unrealon 1.0.9__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (302) hide show
  1. unrealon/__init__.py +23 -21
  2. unrealon-1.1.0.dist-info/METADATA +164 -0
  3. unrealon-1.1.0.dist-info/RECORD +82 -0
  4. {unrealon-1.0.9.dist-info → unrealon-1.1.0.dist-info}/WHEEL +1 -1
  5. unrealon-1.1.0.dist-info/entry_points.txt +9 -0
  6. {unrealon-1.0.9.dist-info → unrealon-1.1.0.dist-info/licenses}/LICENSE +1 -1
  7. unrealon_bridge/__init__.py +114 -0
  8. unrealon_bridge/cli.py +316 -0
  9. unrealon_bridge/client/__init__.py +93 -0
  10. unrealon_bridge/client/base.py +78 -0
  11. unrealon_bridge/client/commands.py +89 -0
  12. unrealon_bridge/client/connection.py +90 -0
  13. unrealon_bridge/client/events.py +65 -0
  14. unrealon_bridge/client/health.py +38 -0
  15. unrealon_bridge/client/html_parser.py +146 -0
  16. unrealon_bridge/client/logging.py +139 -0
  17. unrealon_bridge/client/proxy.py +70 -0
  18. unrealon_bridge/client/scheduler.py +450 -0
  19. unrealon_bridge/client/session.py +70 -0
  20. unrealon_bridge/configs/__init__.py +14 -0
  21. unrealon_bridge/configs/bridge_config.py +212 -0
  22. unrealon_bridge/configs/bridge_config.yaml +39 -0
  23. unrealon_bridge/models/__init__.py +138 -0
  24. unrealon_bridge/models/base.py +28 -0
  25. unrealon_bridge/models/command.py +41 -0
  26. unrealon_bridge/models/events.py +40 -0
  27. unrealon_bridge/models/html_parser.py +79 -0
  28. unrealon_bridge/models/logging.py +55 -0
  29. unrealon_bridge/models/parser.py +63 -0
  30. unrealon_bridge/models/proxy.py +41 -0
  31. unrealon_bridge/models/requests.py +95 -0
  32. unrealon_bridge/models/responses.py +88 -0
  33. unrealon_bridge/models/scheduler.py +592 -0
  34. unrealon_bridge/models/session.py +28 -0
  35. unrealon_bridge/server/__init__.py +91 -0
  36. unrealon_bridge/server/base.py +171 -0
  37. unrealon_bridge/server/handlers/__init__.py +23 -0
  38. unrealon_bridge/server/handlers/command.py +110 -0
  39. unrealon_bridge/server/handlers/html_parser.py +139 -0
  40. unrealon_bridge/server/handlers/logging.py +95 -0
  41. unrealon_bridge/server/handlers/parser.py +95 -0
  42. unrealon_bridge/server/handlers/proxy.py +75 -0
  43. unrealon_bridge/server/handlers/scheduler.py +545 -0
  44. unrealon_bridge/server/handlers/session.py +66 -0
  45. unrealon_browser/__init__.py +61 -18
  46. unrealon_browser/{src/cli → cli}/browser_cli.py +6 -13
  47. unrealon_browser/{src/cli → cli}/cookies_cli.py +5 -1
  48. unrealon_browser/{src/core → core}/browser_manager.py +2 -2
  49. unrealon_browser/{src/managers → managers}/captcha.py +1 -1
  50. unrealon_browser/{src/managers → managers}/cookies.py +1 -1
  51. unrealon_browser/managers/logger_bridge.py +231 -0
  52. unrealon_browser/{src/managers → managers}/profile.py +1 -1
  53. unrealon_driver/__init__.py +73 -19
  54. unrealon_driver/browser/__init__.py +8 -0
  55. unrealon_driver/browser/config.py +74 -0
  56. unrealon_driver/browser/manager.py +416 -0
  57. unrealon_driver/exceptions.py +28 -0
  58. unrealon_driver/parser/__init__.py +55 -0
  59. unrealon_driver/parser/cli_manager.py +141 -0
  60. unrealon_driver/parser/daemon_manager.py +227 -0
  61. unrealon_driver/parser/managers/__init__.py +46 -0
  62. unrealon_driver/parser/managers/browser.py +51 -0
  63. unrealon_driver/parser/managers/config.py +281 -0
  64. unrealon_driver/parser/managers/error.py +412 -0
  65. unrealon_driver/parser/managers/html.py +732 -0
  66. unrealon_driver/parser/managers/logging.py +609 -0
  67. unrealon_driver/parser/managers/result.py +321 -0
  68. unrealon_driver/parser/parser_manager.py +628 -0
  69. unrealon/sdk_config.py +0 -88
  70. unrealon-1.0.9.dist-info/METADATA +0 -810
  71. unrealon-1.0.9.dist-info/RECORD +0 -246
  72. unrealon_browser/pyproject.toml +0 -182
  73. unrealon_browser/src/__init__.py +0 -62
  74. unrealon_browser/src/managers/logger_bridge.py +0 -395
  75. unrealon_driver/README.md +0 -204
  76. unrealon_driver/pyproject.toml +0 -187
  77. unrealon_driver/src/__init__.py +0 -90
  78. unrealon_driver/src/cli/__init__.py +0 -10
  79. unrealon_driver/src/cli/main.py +0 -66
  80. unrealon_driver/src/cli/simple.py +0 -510
  81. unrealon_driver/src/config/__init__.py +0 -11
  82. unrealon_driver/src/config/auto_config.py +0 -478
  83. unrealon_driver/src/core/__init__.py +0 -18
  84. unrealon_driver/src/core/exceptions.py +0 -289
  85. unrealon_driver/src/core/parser.py +0 -638
  86. unrealon_driver/src/dto/__init__.py +0 -66
  87. unrealon_driver/src/dto/cli.py +0 -119
  88. unrealon_driver/src/dto/config.py +0 -18
  89. unrealon_driver/src/dto/events.py +0 -237
  90. unrealon_driver/src/dto/execution.py +0 -313
  91. unrealon_driver/src/dto/services.py +0 -311
  92. unrealon_driver/src/execution/__init__.py +0 -23
  93. unrealon_driver/src/execution/daemon_mode.py +0 -317
  94. unrealon_driver/src/execution/interactive_mode.py +0 -88
  95. unrealon_driver/src/execution/modes.py +0 -45
  96. unrealon_driver/src/execution/scheduled_mode.py +0 -209
  97. unrealon_driver/src/execution/test_mode.py +0 -250
  98. unrealon_driver/src/logging/__init__.py +0 -24
  99. unrealon_driver/src/logging/driver_logger.py +0 -512
  100. unrealon_driver/src/services/__init__.py +0 -24
  101. unrealon_driver/src/services/browser_service.py +0 -726
  102. unrealon_driver/src/services/llm/__init__.py +0 -15
  103. unrealon_driver/src/services/llm/browser_llm_service.py +0 -363
  104. unrealon_driver/src/services/llm/llm.py +0 -195
  105. unrealon_driver/src/services/logger_service.py +0 -232
  106. unrealon_driver/src/services/metrics_service.py +0 -185
  107. unrealon_driver/src/services/scheduler_service.py +0 -489
  108. unrealon_driver/src/services/websocket_service.py +0 -362
  109. unrealon_driver/src/utils/__init__.py +0 -16
  110. unrealon_driver/src/utils/service_factory.py +0 -317
  111. unrealon_driver/src/utils/time_formatter.py +0 -338
  112. unrealon_llm/README.md +0 -44
  113. unrealon_llm/__init__.py +0 -26
  114. unrealon_llm/pyproject.toml +0 -154
  115. unrealon_llm/src/__init__.py +0 -228
  116. unrealon_llm/src/cli/__init__.py +0 -0
  117. unrealon_llm/src/core/__init__.py +0 -11
  118. unrealon_llm/src/core/smart_client.py +0 -438
  119. unrealon_llm/src/dto/__init__.py +0 -155
  120. unrealon_llm/src/dto/models/__init__.py +0 -0
  121. unrealon_llm/src/dto/models/config.py +0 -343
  122. unrealon_llm/src/dto/models/core.py +0 -328
  123. unrealon_llm/src/dto/models/enums.py +0 -123
  124. unrealon_llm/src/dto/models/html_analysis.py +0 -345
  125. unrealon_llm/src/dto/models/statistics.py +0 -473
  126. unrealon_llm/src/dto/models/translation.py +0 -383
  127. unrealon_llm/src/dto/models/type_conversion.py +0 -462
  128. unrealon_llm/src/dto/schemas/__init__.py +0 -0
  129. unrealon_llm/src/exceptions.py +0 -392
  130. unrealon_llm/src/llm_config/__init__.py +0 -20
  131. unrealon_llm/src/llm_config/logging_config.py +0 -178
  132. unrealon_llm/src/llm_logging/__init__.py +0 -42
  133. unrealon_llm/src/llm_logging/llm_events.py +0 -107
  134. unrealon_llm/src/llm_logging/llm_logger.py +0 -466
  135. unrealon_llm/src/managers/__init__.py +0 -15
  136. unrealon_llm/src/managers/cache_manager.py +0 -67
  137. unrealon_llm/src/managers/cost_manager.py +0 -107
  138. unrealon_llm/src/managers/request_manager.py +0 -298
  139. unrealon_llm/src/modules/__init__.py +0 -0
  140. unrealon_llm/src/modules/html_processor/__init__.py +0 -25
  141. unrealon_llm/src/modules/html_processor/base_processor.py +0 -415
  142. unrealon_llm/src/modules/html_processor/details_processor.py +0 -85
  143. unrealon_llm/src/modules/html_processor/listing_processor.py +0 -91
  144. unrealon_llm/src/modules/html_processor/models/__init__.py +0 -20
  145. unrealon_llm/src/modules/html_processor/models/processing_models.py +0 -40
  146. unrealon_llm/src/modules/html_processor/models/universal_model.py +0 -56
  147. unrealon_llm/src/modules/html_processor/processor.py +0 -102
  148. unrealon_llm/src/modules/llm/__init__.py +0 -0
  149. unrealon_llm/src/modules/translator/__init__.py +0 -0
  150. unrealon_llm/src/provider.py +0 -116
  151. unrealon_llm/src/utils/__init__.py +0 -95
  152. unrealon_llm/src/utils/common.py +0 -64
  153. unrealon_llm/src/utils/data_extractor.py +0 -188
  154. unrealon_llm/src/utils/html_cleaner.py +0 -767
  155. unrealon_llm/src/utils/language_detector.py +0 -308
  156. unrealon_llm/src/utils/models_cache.py +0 -592
  157. unrealon_llm/src/utils/smart_counter.py +0 -229
  158. unrealon_llm/src/utils/token_counter.py +0 -189
  159. unrealon_sdk/README.md +0 -25
  160. unrealon_sdk/__init__.py +0 -30
  161. unrealon_sdk/pyproject.toml +0 -231
  162. unrealon_sdk/src/__init__.py +0 -150
  163. unrealon_sdk/src/cli/__init__.py +0 -12
  164. unrealon_sdk/src/cli/commands/__init__.py +0 -22
  165. unrealon_sdk/src/cli/commands/benchmark.py +0 -42
  166. unrealon_sdk/src/cli/commands/diagnostics.py +0 -573
  167. unrealon_sdk/src/cli/commands/health.py +0 -46
  168. unrealon_sdk/src/cli/commands/integration.py +0 -498
  169. unrealon_sdk/src/cli/commands/reports.py +0 -43
  170. unrealon_sdk/src/cli/commands/security.py +0 -36
  171. unrealon_sdk/src/cli/commands/server.py +0 -483
  172. unrealon_sdk/src/cli/commands/servers.py +0 -56
  173. unrealon_sdk/src/cli/commands/tests.py +0 -55
  174. unrealon_sdk/src/cli/main.py +0 -126
  175. unrealon_sdk/src/cli/utils/reporter.py +0 -519
  176. unrealon_sdk/src/clients/openapi.yaml +0 -3347
  177. unrealon_sdk/src/clients/python_http/__init__.py +0 -3
  178. unrealon_sdk/src/clients/python_http/api_config.py +0 -228
  179. unrealon_sdk/src/clients/python_http/models/BaseModel.py +0 -12
  180. unrealon_sdk/src/clients/python_http/models/BroadcastDeliveryStats.py +0 -33
  181. unrealon_sdk/src/clients/python_http/models/BroadcastMessage.py +0 -17
  182. unrealon_sdk/src/clients/python_http/models/BroadcastMessageRequest.py +0 -35
  183. unrealon_sdk/src/clients/python_http/models/BroadcastPriority.py +0 -10
  184. unrealon_sdk/src/clients/python_http/models/BroadcastResponse.py +0 -21
  185. unrealon_sdk/src/clients/python_http/models/BroadcastResultResponse.py +0 -33
  186. unrealon_sdk/src/clients/python_http/models/BroadcastTarget.py +0 -11
  187. unrealon_sdk/src/clients/python_http/models/ConnectionStats.py +0 -27
  188. unrealon_sdk/src/clients/python_http/models/ConnectionsResponse.py +0 -21
  189. unrealon_sdk/src/clients/python_http/models/DeveloperMessageResponse.py +0 -23
  190. unrealon_sdk/src/clients/python_http/models/ErrorResponse.py +0 -25
  191. unrealon_sdk/src/clients/python_http/models/HTTPValidationError.py +0 -16
  192. unrealon_sdk/src/clients/python_http/models/HealthResponse.py +0 -23
  193. unrealon_sdk/src/clients/python_http/models/HealthStatus.py +0 -33
  194. unrealon_sdk/src/clients/python_http/models/LogLevel.py +0 -10
  195. unrealon_sdk/src/clients/python_http/models/LoggingRequest.py +0 -27
  196. unrealon_sdk/src/clients/python_http/models/LoggingResponse.py +0 -23
  197. unrealon_sdk/src/clients/python_http/models/MaintenanceMode.py +0 -9
  198. unrealon_sdk/src/clients/python_http/models/MaintenanceModeRequest.py +0 -33
  199. unrealon_sdk/src/clients/python_http/models/MaintenanceStatusResponse.py +0 -39
  200. unrealon_sdk/src/clients/python_http/models/ParserCommandRequest.py +0 -25
  201. unrealon_sdk/src/clients/python_http/models/ParserMessageResponse.py +0 -21
  202. unrealon_sdk/src/clients/python_http/models/ParserRegistrationRequest.py +0 -28
  203. unrealon_sdk/src/clients/python_http/models/ParserRegistrationResponse.py +0 -25
  204. unrealon_sdk/src/clients/python_http/models/ParserType.py +0 -10
  205. unrealon_sdk/src/clients/python_http/models/ProxyBlockRequest.py +0 -19
  206. unrealon_sdk/src/clients/python_http/models/ProxyEndpointResponse.py +0 -20
  207. unrealon_sdk/src/clients/python_http/models/ProxyListResponse.py +0 -19
  208. unrealon_sdk/src/clients/python_http/models/ProxyProvider.py +0 -10
  209. unrealon_sdk/src/clients/python_http/models/ProxyPurchaseRequest.py +0 -25
  210. unrealon_sdk/src/clients/python_http/models/ProxyResponse.py +0 -47
  211. unrealon_sdk/src/clients/python_http/models/ProxyRotationRequest.py +0 -23
  212. unrealon_sdk/src/clients/python_http/models/ProxyStatus.py +0 -10
  213. unrealon_sdk/src/clients/python_http/models/ProxyUsageRequest.py +0 -19
  214. unrealon_sdk/src/clients/python_http/models/ProxyUsageStatsResponse.py +0 -26
  215. unrealon_sdk/src/clients/python_http/models/ServiceRegistrationDto.py +0 -23
  216. unrealon_sdk/src/clients/python_http/models/ServiceStatsResponse.py +0 -31
  217. unrealon_sdk/src/clients/python_http/models/SessionStartRequest.py +0 -23
  218. unrealon_sdk/src/clients/python_http/models/SuccessResponse.py +0 -25
  219. unrealon_sdk/src/clients/python_http/models/SystemNotificationResponse.py +0 -23
  220. unrealon_sdk/src/clients/python_http/models/ValidationError.py +0 -18
  221. unrealon_sdk/src/clients/python_http/models/ValidationErrorResponse.py +0 -21
  222. unrealon_sdk/src/clients/python_http/models/WebSocketMetrics.py +0 -21
  223. unrealon_sdk/src/clients/python_http/models/__init__.py +0 -44
  224. unrealon_sdk/src/clients/python_http/services/None_service.py +0 -35
  225. unrealon_sdk/src/clients/python_http/services/ParserManagement_service.py +0 -190
  226. unrealon_sdk/src/clients/python_http/services/ProxyManagement_service.py +0 -289
  227. unrealon_sdk/src/clients/python_http/services/SocketLogging_service.py +0 -187
  228. unrealon_sdk/src/clients/python_http/services/SystemHealth_service.py +0 -119
  229. unrealon_sdk/src/clients/python_http/services/WebSocketAPI_service.py +0 -198
  230. unrealon_sdk/src/clients/python_http/services/__init__.py +0 -0
  231. unrealon_sdk/src/clients/python_http/services/admin_service.py +0 -125
  232. unrealon_sdk/src/clients/python_http/services/async_None_service.py +0 -35
  233. unrealon_sdk/src/clients/python_http/services/async_ParserManagement_service.py +0 -190
  234. unrealon_sdk/src/clients/python_http/services/async_ProxyManagement_service.py +0 -289
  235. unrealon_sdk/src/clients/python_http/services/async_SocketLogging_service.py +0 -189
  236. unrealon_sdk/src/clients/python_http/services/async_SystemHealth_service.py +0 -123
  237. unrealon_sdk/src/clients/python_http/services/async_WebSocketAPI_service.py +0 -200
  238. unrealon_sdk/src/clients/python_http/services/async_admin_service.py +0 -125
  239. unrealon_sdk/src/clients/python_websocket/__init__.py +0 -28
  240. unrealon_sdk/src/clients/python_websocket/client.py +0 -490
  241. unrealon_sdk/src/clients/python_websocket/events.py +0 -732
  242. unrealon_sdk/src/clients/python_websocket/example.py +0 -136
  243. unrealon_sdk/src/clients/python_websocket/types.py +0 -871
  244. unrealon_sdk/src/core/__init__.py +0 -64
  245. unrealon_sdk/src/core/client.py +0 -556
  246. unrealon_sdk/src/core/config.py +0 -465
  247. unrealon_sdk/src/core/exceptions.py +0 -239
  248. unrealon_sdk/src/core/metadata.py +0 -191
  249. unrealon_sdk/src/core/models.py +0 -142
  250. unrealon_sdk/src/core/types.py +0 -68
  251. unrealon_sdk/src/dto/__init__.py +0 -268
  252. unrealon_sdk/src/dto/authentication.py +0 -108
  253. unrealon_sdk/src/dto/cache.py +0 -208
  254. unrealon_sdk/src/dto/common.py +0 -19
  255. unrealon_sdk/src/dto/concurrency.py +0 -393
  256. unrealon_sdk/src/dto/events.py +0 -108
  257. unrealon_sdk/src/dto/health.py +0 -339
  258. unrealon_sdk/src/dto/load_balancing.py +0 -336
  259. unrealon_sdk/src/dto/logging.py +0 -230
  260. unrealon_sdk/src/dto/performance.py +0 -165
  261. unrealon_sdk/src/dto/rate_limiting.py +0 -295
  262. unrealon_sdk/src/dto/resource_pooling.py +0 -128
  263. unrealon_sdk/src/dto/structured_logging.py +0 -112
  264. unrealon_sdk/src/dto/task_scheduling.py +0 -121
  265. unrealon_sdk/src/dto/websocket.py +0 -55
  266. unrealon_sdk/src/enterprise/__init__.py +0 -59
  267. unrealon_sdk/src/enterprise/authentication.py +0 -401
  268. unrealon_sdk/src/enterprise/cache_manager.py +0 -578
  269. unrealon_sdk/src/enterprise/error_recovery.py +0 -494
  270. unrealon_sdk/src/enterprise/event_system.py +0 -549
  271. unrealon_sdk/src/enterprise/health_monitor.py +0 -747
  272. unrealon_sdk/src/enterprise/load_balancer.py +0 -964
  273. unrealon_sdk/src/enterprise/logging/__init__.py +0 -68
  274. unrealon_sdk/src/enterprise/logging/cleanup.py +0 -156
  275. unrealon_sdk/src/enterprise/logging/development.py +0 -744
  276. unrealon_sdk/src/enterprise/logging/service.py +0 -410
  277. unrealon_sdk/src/enterprise/multithreading_manager.py +0 -853
  278. unrealon_sdk/src/enterprise/performance_monitor.py +0 -539
  279. unrealon_sdk/src/enterprise/proxy_manager.py +0 -696
  280. unrealon_sdk/src/enterprise/rate_limiter.py +0 -652
  281. unrealon_sdk/src/enterprise/resource_pool.py +0 -763
  282. unrealon_sdk/src/enterprise/task_scheduler.py +0 -709
  283. unrealon_sdk/src/internal/__init__.py +0 -10
  284. unrealon_sdk/src/internal/command_router.py +0 -497
  285. unrealon_sdk/src/internal/connection_manager.py +0 -397
  286. unrealon_sdk/src/internal/http_client.py +0 -446
  287. unrealon_sdk/src/internal/websocket_client.py +0 -420
  288. unrealon_sdk/src/provider.py +0 -471
  289. unrealon_sdk/src/utils.py +0 -234
  290. /unrealon_browser/{src/cli → cli}/__init__.py +0 -0
  291. /unrealon_browser/{src/cli → cli}/interactive_mode.py +0 -0
  292. /unrealon_browser/{src/cli → cli}/main.py +0 -0
  293. /unrealon_browser/{src/core → core}/__init__.py +0 -0
  294. /unrealon_browser/{src/dto → dto}/__init__.py +0 -0
  295. /unrealon_browser/{src/dto → dto}/models/config.py +0 -0
  296. /unrealon_browser/{src/dto → dto}/models/core.py +0 -0
  297. /unrealon_browser/{src/dto → dto}/models/dataclasses.py +0 -0
  298. /unrealon_browser/{src/dto → dto}/models/detection.py +0 -0
  299. /unrealon_browser/{src/dto → dto}/models/enums.py +0 -0
  300. /unrealon_browser/{src/dto → dto}/models/statistics.py +0 -0
  301. /unrealon_browser/{src/managers → managers}/__init__.py +0 -0
  302. /unrealon_browser/{src/managers → managers}/stealth.py +0 -0
@@ -1,228 +0,0 @@
1
- """
2
- 🤖 UnrealOn LLM v1.0 - Large Language Model Integration
3
-
4
- Advanced LLM integration tools for AI-powered parsing and data processing.
5
- Service-based architecture following KISS principles.
6
-
7
- Key Features:
8
- - 🧠 Smart LLM client with multiple providers support
9
- - 🕷️ HTML processing and pattern extraction
10
- - 🌐 Translation and language detection
11
- - 📊 Cost management and token counting
12
- - 🎯 Type-safe operations with Pydantic v2
13
- - 📝 Enterprise logging integration
14
- - ⚡ Caching and optimization strategies
15
- """
16
-
17
- # Core client
18
- from .core import SmartLLMClient
19
-
20
- # All DTOs and models (comprehensive export from dto module)
21
- from .dto import *
22
-
23
- # Managers
24
- from .managers import (
25
- CacheManager,
26
- CostManager,
27
- RequestManager,
28
- )
29
-
30
- # HTML Processing modules
31
- from .modules.html_processor import (
32
- BaseHTMLProcessor,
33
- ListingProcessor,
34
- DetailsProcessor,
35
- UnrealOnLLM,
36
- UniversalExtractionSchema,
37
- ProcessingInfo,
38
- ExtractionResult,
39
- )
40
-
41
- # Utilities
42
- from .utils import (
43
- # Language Detection
44
- LanguageDetector,
45
- detect_language,
46
- detect_multiple_languages,
47
- is_language,
48
- # Token Counting
49
- SmartTokenCounter,
50
- smart_count_tokens,
51
- smart_count_messages,
52
- # HTML Cleaning
53
- SmartHTMLCleaner,
54
- clean_html_for_llm,
55
- extract_js_data_only,
56
- # Data Extraction
57
- SmartDataExtractor,
58
- safe_extract_json,
59
- extract_llm_response_data,
60
- create_data_extractor,
61
- # Common Utilities
62
- generate_correlation_id,
63
- generate_request_id,
64
- )
65
-
66
- # Configuration
67
- from .llm_config import (
68
- LoggingConfig,
69
- setup_llm_logging,
70
- get_logging_config_from_env,
71
- configure_llm_logging,
72
- )
73
-
74
- # Logging
75
- from .llm_logging import (
76
- LLMEventType,
77
- LLMContext,
78
- LLMLogger,
79
- initialize_llm_logger,
80
- get_llm_logger,
81
- )
82
-
83
- # Exceptions
84
- from .exceptions import (
85
- # Base exceptions
86
- LLMError,
87
- APIError,
88
- # API-specific errors
89
- OpenRouterAPIError,
90
- OpenAIAPIError,
91
- AnthropicAPIError,
92
- RateLimitError,
93
- APIQuotaExceededError,
94
- ModelUnavailableError,
95
- NetworkError,
96
- AuthenticationError,
97
- # Cost and token errors
98
- CostLimitExceededError,
99
- TokenLimitExceededError,
100
- # HTML processing errors
101
- HTMLParsingError,
102
- HTMLTooLargeError,
103
- PatternDetectionError,
104
- SelectorGenerationError,
105
- SelectorValidationError,
106
- # Translation errors
107
- TranslationError,
108
- LanguageDetectionError,
109
- TranslationQualityError,
110
- # Schema errors
111
- SchemaGenerationError,
112
- TypeInferenceError,
113
- CodeGenerationError,
114
- # Cache errors
115
- CacheError,
116
- CacheCorruptionError,
117
- # Configuration errors
118
- ConfigurationError,
119
- MissingAPIKeyError,
120
- InvalidConfigurationError,
121
- # Processing errors
122
- ProcessingPipelineError,
123
- ResponseParsingError,
124
- RetryExhaustedError,
125
- # Helper functions
126
- raise_if_cost_exceeded,
127
- raise_if_tokens_exceeded,
128
- raise_if_html_too_large,
129
- wrap_api_error,
130
- ErrorCodes,
131
- )
132
-
133
- # Description
134
- __description__ = "Large Language Model integration tools for UnrealOn SDK"
135
-
136
- # Main exports
137
- __all__ = [
138
- # Core client
139
- "SmartLLMClient",
140
-
141
- # Managers
142
- "CacheManager",
143
- "CostManager",
144
- "RequestManager",
145
-
146
- # HTML Processing
147
- "BaseHTMLProcessor",
148
- "ListingProcessor",
149
- "DetailsProcessor",
150
- "UnrealOnLLM",
151
- "UniversalExtractionSchema",
152
- "ProcessingInfo",
153
- "ExtractionResult",
154
-
155
- # Utilities
156
- "LanguageDetector",
157
- "detect_language",
158
- "detect_multiple_languages",
159
- "is_language",
160
- "SmartTokenCounter",
161
- "smart_count_tokens",
162
- "smart_count_messages",
163
- "SmartHTMLCleaner",
164
- "clean_html_for_llm",
165
- "extract_js_data_only",
166
- "SmartDataExtractor",
167
- "safe_extract_json",
168
- "extract_llm_response_data",
169
- "create_data_extractor",
170
- "generate_correlation_id",
171
- "generate_request_id",
172
-
173
- # Configuration
174
- "LoggingConfig",
175
- "setup_llm_logging",
176
- "get_logging_config_from_env",
177
- "configure_llm_logging",
178
-
179
- # Logging
180
- "LLMEventType",
181
- "LLMContext",
182
- "LLMLogger",
183
- "initialize_llm_logger",
184
- "get_llm_logger",
185
-
186
- # Exceptions
187
- "LLMError",
188
- "APIError",
189
- "OpenRouterAPIError",
190
- "OpenAIAPIError",
191
- "AnthropicAPIError",
192
- "RateLimitError",
193
- "APIQuotaExceededError",
194
- "ModelUnavailableError",
195
- "NetworkError",
196
- "AuthenticationError",
197
- "CostLimitExceededError",
198
- "TokenLimitExceededError",
199
- "HTMLParsingError",
200
- "HTMLTooLargeError",
201
- "PatternDetectionError",
202
- "SelectorGenerationError",
203
- "SelectorValidationError",
204
- "TranslationError",
205
- "LanguageDetectionError",
206
- "TranslationQualityError",
207
- "SchemaGenerationError",
208
- "TypeInferenceError",
209
- "CodeGenerationError",
210
- "CacheError",
211
- "CacheCorruptionError",
212
- "ConfigurationError",
213
- "MissingAPIKeyError",
214
- "InvalidConfigurationError",
215
- "ProcessingPipelineError",
216
- "ResponseParsingError",
217
- "RetryExhaustedError",
218
- "raise_if_cost_exceeded",
219
- "raise_if_tokens_exceeded",
220
- "raise_if_html_too_large",
221
- "wrap_api_error",
222
- "ErrorCodes",
223
- ]
224
-
225
- # Note: All DTO models are also exported via 'from .dto import *'
226
- # This includes all enums, configuration models, core models,
227
- # HTML analysis models, translation models, type conversion models,
228
- # and statistics models as defined in dto/__init__.py
File without changes
@@ -1,11 +0,0 @@
1
- """
2
- Core LLM Client Components
3
-
4
- Simplified and modular LLM client architecture.
5
- """
6
-
7
- from .smart_client import SmartLLMClient
8
-
9
- __all__ = [
10
- "SmartLLMClient",
11
- ]
@@ -1,438 +0,0 @@
1
- """
2
- Smart LLM Client
3
-
4
- Simplified and modular LLM client using manager components.
5
- """
6
-
7
- import json
8
- import logging
9
- from datetime import datetime
10
- from decimal import Decimal
11
- from typing import Any, Dict, List, Optional, Type
12
-
13
- from pydantic import BaseModel
14
-
15
- from unrealon_llm.src.dto import (
16
- ChatMessage,
17
- LLMConfig,
18
- LLMResponse,
19
- MessageRole,
20
- )
21
- from unrealon_llm.src.exceptions import (
22
- APIError,
23
- CostLimitExceededError,
24
- TokenLimitExceededError,
25
- ValidationError,
26
- )
27
- from unrealon_llm.src.llm_logging import get_llm_logger, LLMEventType
28
- from unrealon_llm.src.managers import CacheManager, CostManager, RequestManager
29
- from unrealon_llm.src.utils import generate_correlation_id, extract_llm_response_data
30
- from unrealon_llm.src.utils.models_cache import get_models_cache
31
- from unrealon_llm.src.utils.smart_counter import SmartTokenCounter
32
- from unrealon_sdk.src.dto.logging import SDKContext, SDKEventType
33
-
34
- logger = logging.getLogger(__name__)
35
-
36
-
37
- class SmartLLMClient:
38
- """
39
- Simplified LLM client with modular manager components
40
- """
41
-
42
- def __init__(self, config: LLMConfig):
43
- """
44
- Initialize smart LLM client
45
-
46
- Args:
47
- config: LLM configuration
48
- """
49
- self.config = config
50
-
51
- # Get LLM logger
52
- self.llm_logger = get_llm_logger()
53
-
54
- # Initialize models cache
55
- self.models_cache = get_models_cache(
56
- openrouter_api_key=config.openrouter_api_key,
57
- openai_api_key=config.openai_api_key,
58
- anthropic_api_key=config.anthropic_api_key
59
- )
60
-
61
- # Initialize managers
62
- self.cost_manager = CostManager(config.daily_cost_limit_usd)
63
-
64
- cache_ttl = config.cache_ttl_hours * 3600
65
- cache_size = config.max_cache_size_mb * 10 # Approximate conversion
66
- self.cache_manager = CacheManager(max_size=cache_size, ttl_seconds=cache_ttl)
67
- self.cache_manager.set_enabled(config.enable_global_cache)
68
-
69
- self.request_manager = RequestManager(config)
70
-
71
- # Token counter
72
- self.smart_counter = SmartTokenCounter(self.models_cache)
73
-
74
- # Log initialization
75
- if self.llm_logger:
76
- self.llm_logger._dev_logger.log_info(
77
- LLMEventType.LLM_CLIENT_INITIALIZED,
78
- f"Smart LLM Client initialized with {config.default_model}",
79
- context=SDKContext(
80
- component_name="SmartLLMClient",
81
- layer_name="UnrealOn_LLM"
82
- ),
83
- details={"model": config.default_model}
84
- )
85
-
86
- async def __aenter__(self):
87
- """Async context manager entry"""
88
- await self.request_manager._ensure_session()
89
- return self
90
-
91
- async def __aexit__(self, exc_type, exc_val, exc_tb):
92
- """Async context manager exit"""
93
- await self.request_manager.close()
94
-
95
- async def close(self):
96
- """Close client resources"""
97
- await self.request_manager.close()
98
-
99
- async def chat_completion(
100
- self,
101
- messages: List[ChatMessage],
102
- model: Optional[str] = None,
103
- max_tokens: Optional[int] = None,
104
- temperature: float = 0.1,
105
- response_format: Optional[str] = None,
106
- **kwargs
107
- ) -> LLMResponse:
108
- """
109
- Execute chat completion with managers
110
-
111
- Args:
112
- messages: Chat messages
113
- model: Model to use (defaults to config default)
114
- max_tokens: Maximum tokens to generate
115
- temperature: Sampling temperature
116
- response_format: Response format ("json" for JSON)
117
- **kwargs: Additional provider-specific parameters
118
-
119
- Returns:
120
- LLM response with metadata
121
-
122
- Raises:
123
- Various LLM exceptions based on failure type
124
- """
125
- # Use default model if not specified
126
- if model is None:
127
- model = self.config.default_model
128
-
129
- # Validate inputs
130
- if not messages:
131
- raise ValidationError("Messages list cannot be empty")
132
-
133
- # Smart count and cost estimation
134
- estimated_output_tokens = max_tokens or 500
135
- input_tokens, estimated_cost = await self.smart_counter.count_messages_and_estimate(
136
- messages, model, estimated_output_tokens
137
- )
138
-
139
- # Check token limits
140
- context_limit = await self.smart_counter.get_model_context_limit(model)
141
- if context_limit and input_tokens > context_limit:
142
- raise TokenLimitExceededError(input_tokens, context_limit)
143
-
144
- # Check cost limits
145
- if not self.cost_manager.can_afford(estimated_cost):
146
- raise CostLimitExceededError(
147
- self.cost_manager.total_cost + Decimal(str(estimated_cost)),
148
- self.cost_manager.daily_limit
149
- )
150
-
151
- # Check cache
152
- cache_key = self.cache_manager.generate_cache_key(messages, model, temperature, response_format)
153
- cached_response = self.cache_manager.get(cache_key)
154
-
155
- if cached_response:
156
- # Log cache hit
157
- if self.llm_logger:
158
- provider = self.request_manager.get_provider_for_model(model)
159
- self.llm_logger.log_llm_request_completed(
160
- provider=provider.value,
161
- model=model,
162
- prompt_tokens=input_tokens,
163
- completion_tokens=cached_response.token_usage.completion_tokens if cached_response.token_usage else 0,
164
- cost_usd=0.0, # Cached responses are free
165
- duration_ms=0.0, # Cached responses are instant
166
- cached=True,
167
- details={
168
- "cache_key": cache_key[:50],
169
- "message_count": len(messages),
170
- }
171
- )
172
- self.llm_logger.log_cache_operation("hit", cache_key)
173
-
174
- logger.debug("Returning cached response")
175
- return cached_response
176
-
177
- # Generate request ID for tracking
178
- request_id = generate_correlation_id()
179
-
180
- # Log request start
181
- if self.llm_logger:
182
- provider = self.request_manager.get_provider_for_model(model)
183
- self.llm_logger.log_llm_request_start(
184
- provider=provider.value,
185
- model=model,
186
- prompt_tokens=input_tokens,
187
- request_id=request_id,
188
- details={
189
- "message_count": len(messages),
190
- "temperature": temperature,
191
- "max_tokens": max_tokens,
192
- "estimated_cost": estimated_cost,
193
- }
194
- )
195
-
196
- # Execute request
197
- start_time = datetime.now()
198
-
199
- try:
200
- # Determine provider from model
201
- provider = self.request_manager.get_provider_for_model(model)
202
-
203
- # Execute request
204
- response = await self.request_manager.execute_with_retry(
205
- provider, messages, model, max_tokens, temperature, response_format, **kwargs
206
- )
207
-
208
- # Calculate actual cost and duration
209
- actual_cost = 0.0
210
- # Only fetch models if not already cached
211
- if not self.models_cache.models:
212
- await self.models_cache.fetch_all_models()
213
- model_info = self.models_cache.get_model(model)
214
- if model_info and response.token_usage:
215
- actual_cost = model_info.estimate_cost(
216
- response.token_usage.prompt_tokens,
217
- response.token_usage.completion_tokens
218
- )
219
- response.cost_usd = actual_cost
220
-
221
- duration_ms = (datetime.now() - start_time).total_seconds() * 1000
222
-
223
- # Track cost
224
- self.cost_manager.track_request(actual_cost, model, "chat_completion", self.llm_logger)
225
-
226
- # Log successful completion
227
- if self.llm_logger:
228
- self.llm_logger.log_llm_request_completed(
229
- provider=provider.value,
230
- model=model,
231
- prompt_tokens=response.token_usage.prompt_tokens if response.token_usage else input_tokens,
232
- completion_tokens=response.token_usage.completion_tokens if response.token_usage else 0,
233
- cost_usd=actual_cost,
234
- duration_ms=duration_ms,
235
- request_id=request_id,
236
- cached=False,
237
- details={
238
- "temperature": temperature,
239
- "max_tokens": max_tokens,
240
- "response_length": len(response.content) if response.content else 0,
241
- }
242
- )
243
-
244
- # Cache response
245
- self.cache_manager.store(cache_key, response)
246
- if self.llm_logger:
247
- self.llm_logger.log_cache_operation("store", cache_key[:50])
248
-
249
- return response
250
-
251
- except Exception as e:
252
- # Log failure
253
- if self.llm_logger:
254
- provider = self.request_manager.get_provider_for_model(model)
255
- self.llm_logger.log_llm_request_failed(
256
- provider=provider.value,
257
- model=model,
258
- error_message=str(e),
259
- request_id=request_id,
260
- exception=e,
261
- details={
262
- "temperature": temperature,
263
- "max_tokens": max_tokens,
264
- "duration_ms": (datetime.now() - start_time).total_seconds() * 1000,
265
- }
266
- )
267
-
268
- # Re-raise the error
269
- raise e
270
-
271
- async def simple_chat(
272
- self,
273
- message: str,
274
- system_message: Optional[str] = None,
275
- model: Optional[str] = None,
276
- **kwargs
277
- ) -> str:
278
- """
279
- Simple chat interface that returns just the response text
280
-
281
- Args:
282
- message: User message
283
- system_message: Optional system message
284
- model: Model to use
285
- **kwargs: Additional parameters
286
-
287
- Returns:
288
- Response text
289
- """
290
- messages = []
291
-
292
- if system_message:
293
- messages.append(ChatMessage(
294
- role=MessageRole.SYSTEM,
295
- content=system_message
296
- ))
297
-
298
- messages.append(ChatMessage(
299
- role=MessageRole.USER,
300
- content=message
301
- ))
302
-
303
- response = await self.chat_completion(messages, model=model, **kwargs)
304
- return response.content
305
-
306
- async def json_chat(
307
- self,
308
- message: str,
309
- system_message: Optional[str] = None,
310
- model: Optional[str] = None,
311
- expected_schema: Optional[type] = None,
312
- required_fields: Optional[List[str]] = None,
313
- **kwargs
314
- ) -> Dict[str, Any]:
315
- """
316
- Chat interface that returns safely parsed and validated JSON response
317
-
318
- Args:
319
- message: User message
320
- system_message: Optional system message
321
- model: Model to use
322
- expected_schema: Pydantic model class for validation
323
- required_fields: List of required fields in response
324
- **kwargs: Additional parameters
325
-
326
- Returns:
327
- Safely parsed and validated JSON response
328
-
329
- Raises:
330
- ResponseParsingError: If response is not valid JSON
331
- ValidationError: If schema validation fails
332
- """
333
- if system_message:
334
- system_message += "\n\nIMPORTANT: Respond only with valid JSON. Follow the exact structure requested."
335
- else:
336
- system_message = "Respond only with valid JSON. Follow the exact structure requested."
337
-
338
- response_text = await self.simple_chat(
339
- message,
340
- system_message=system_message,
341
- model=model,
342
- response_format="json",
343
- **kwargs
344
- )
345
-
346
- # Use safe JSON parsing
347
- validated_response = extract_llm_response_data(
348
- response_text,
349
- expected_schema=expected_schema,
350
- required_fields=required_fields
351
- )
352
-
353
- # Log successful JSON validation
354
- if self.llm_logger:
355
- self.llm_logger._dev_logger.log_info(
356
- SDKEventType.COMMAND_COMPLETED,
357
- f"JSON response validated successfully",
358
- context=SDKContext(
359
- metadata={
360
- "schema_type": expected_schema.__name__ if expected_schema else "untyped",
361
- "fields_count": len(validated_response) if isinstance(validated_response, dict) else 0,
362
- }
363
- ),
364
- )
365
-
366
- return validated_response
367
-
368
- async def validated_json_chat(
369
- self,
370
- message: str,
371
- schema_class: Type[BaseModel],
372
- system_message: Optional[str] = None,
373
- model: Optional[str] = None,
374
- **kwargs
375
- ) -> BaseModel:
376
- """
377
- Type-safe JSON chat with full Pydantic validation
378
-
379
- Args:
380
- message: User message
381
- schema_class: Pydantic model class for strict validation
382
- system_message: Optional system message
383
- model: Model to use
384
- **kwargs: Additional parameters
385
-
386
- Returns:
387
- Validated Pydantic model instance
388
-
389
- Raises:
390
- ValidationError: If response doesn't match schema
391
- ResponseParsingError: If response is not valid JSON
392
- """
393
- if system_message:
394
- system_message += f"\n\nIMPORTANT: Respond only with valid JSON matching this exact schema: {schema_class.__name__}"
395
- else:
396
- system_message = f"Respond only with valid JSON matching this exact schema: {schema_class.__name__}"
397
-
398
- # Add schema documentation to prompt
399
- if hasattr(schema_class, 'model_json_schema'):
400
- schema_doc = schema_class.model_json_schema()
401
- system_message += f"\n\nSchema:\n{json.dumps(schema_doc, indent=2)}"
402
-
403
- response_text = await self.simple_chat(
404
- message,
405
- system_message=system_message,
406
- model=model,
407
- response_format="json",
408
- **kwargs
409
- )
410
-
411
- # Use safe parsing with strict Pydantic validation
412
- validated_instance = extract_llm_response_data(
413
- response_text,
414
- expected_schema=schema_class
415
- )
416
-
417
- # Log successful validation
418
- if self.llm_logger:
419
- self.llm_logger._dev_logger.log_info(
420
- SDKEventType.COMMAND_COMPLETED,
421
- f"Pydantic validation successful: {schema_class.__name__}",
422
- context=SDKContext(
423
- metadata={
424
- "schema_type": schema_class.__name__,
425
- "fields_count": len(validated_instance.model_fields) if hasattr(validated_instance, 'model_fields') else 0,
426
- }
427
- ),
428
- )
429
-
430
- return validated_instance
431
-
432
- def get_cost_stats(self) -> Dict[str, Any]:
433
- """Get cost tracking statistics"""
434
- return self.cost_manager.get_stats()
435
-
436
- def get_cache_stats(self) -> Dict[str, Any]:
437
- """Get cache statistics"""
438
- return self.cache_manager.get_stats()