webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (273) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Apriel.py +306 -0
  65. webscout/Provider/ChatGPTClone.py +236 -236
  66. webscout/Provider/ChatSandbox.py +343 -343
  67. webscout/Provider/Cloudflare.py +324 -324
  68. webscout/Provider/Cohere.py +208 -208
  69. webscout/Provider/Deepinfra.py +370 -366
  70. webscout/Provider/ExaAI.py +260 -260
  71. webscout/Provider/ExaChat.py +308 -308
  72. webscout/Provider/Flowith.py +221 -221
  73. webscout/Provider/GMI.py +293 -0
  74. webscout/Provider/Gemini.py +164 -164
  75. webscout/Provider/GeminiProxy.py +167 -167
  76. webscout/Provider/GithubChat.py +371 -372
  77. webscout/Provider/Groq.py +800 -800
  78. webscout/Provider/HeckAI.py +383 -383
  79. webscout/Provider/Jadve.py +282 -282
  80. webscout/Provider/K2Think.py +307 -307
  81. webscout/Provider/Koboldai.py +205 -205
  82. webscout/Provider/LambdaChat.py +423 -423
  83. webscout/Provider/Nemotron.py +244 -244
  84. webscout/Provider/Netwrck.py +248 -248
  85. webscout/Provider/OLLAMA.py +395 -395
  86. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  87. webscout/Provider/OPENAI/FalconH1.py +451 -451
  88. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  89. webscout/Provider/OPENAI/K2Think.py +431 -431
  90. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  91. webscout/Provider/OPENAI/PI.py +427 -427
  92. webscout/Provider/OPENAI/README.md +959 -959
  93. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  94. webscout/Provider/OPENAI/TwoAI.py +465 -465
  95. webscout/Provider/OPENAI/__init__.py +33 -18
  96. webscout/Provider/OPENAI/base.py +248 -248
  97. webscout/Provider/OPENAI/chatglm.py +528 -0
  98. webscout/Provider/OPENAI/chatgpt.py +592 -592
  99. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  100. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  101. webscout/Provider/OPENAI/deepinfra.py +318 -314
  102. webscout/Provider/OPENAI/e2b.py +1665 -1665
  103. webscout/Provider/OPENAI/exaai.py +420 -420
  104. webscout/Provider/OPENAI/exachat.py +452 -452
  105. webscout/Provider/OPENAI/friendli.py +232 -232
  106. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  107. webscout/Provider/OPENAI/groq.py +364 -364
  108. webscout/Provider/OPENAI/heckai.py +314 -314
  109. webscout/Provider/OPENAI/llmchatco.py +337 -337
  110. webscout/Provider/OPENAI/netwrck.py +355 -355
  111. webscout/Provider/OPENAI/oivscode.py +290 -290
  112. webscout/Provider/OPENAI/opkfc.py +518 -518
  113. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  114. webscout/Provider/OPENAI/scirachat.py +535 -535
  115. webscout/Provider/OPENAI/sonus.py +308 -308
  116. webscout/Provider/OPENAI/standardinput.py +442 -442
  117. webscout/Provider/OPENAI/textpollinations.py +340 -340
  118. webscout/Provider/OPENAI/toolbaz.py +419 -416
  119. webscout/Provider/OPENAI/typefully.py +362 -362
  120. webscout/Provider/OPENAI/utils.py +295 -295
  121. webscout/Provider/OPENAI/venice.py +436 -436
  122. webscout/Provider/OPENAI/wisecat.py +387 -387
  123. webscout/Provider/OPENAI/writecream.py +166 -166
  124. webscout/Provider/OPENAI/x0gpt.py +378 -378
  125. webscout/Provider/OPENAI/yep.py +389 -389
  126. webscout/Provider/OpenGPT.py +230 -230
  127. webscout/Provider/Openai.py +243 -243
  128. webscout/Provider/PI.py +405 -405
  129. webscout/Provider/Perplexitylabs.py +430 -430
  130. webscout/Provider/QwenLM.py +272 -272
  131. webscout/Provider/STT/__init__.py +16 -1
  132. webscout/Provider/Sambanova.py +257 -257
  133. webscout/Provider/StandardInput.py +309 -309
  134. webscout/Provider/TTI/README.md +82 -82
  135. webscout/Provider/TTI/__init__.py +33 -18
  136. webscout/Provider/TTI/aiarta.py +413 -413
  137. webscout/Provider/TTI/base.py +136 -136
  138. webscout/Provider/TTI/bing.py +243 -243
  139. webscout/Provider/TTI/gpt1image.py +149 -149
  140. webscout/Provider/TTI/imagen.py +196 -196
  141. webscout/Provider/TTI/infip.py +211 -211
  142. webscout/Provider/TTI/magicstudio.py +232 -232
  143. webscout/Provider/TTI/monochat.py +219 -219
  144. webscout/Provider/TTI/piclumen.py +214 -214
  145. webscout/Provider/TTI/pixelmuse.py +232 -232
  146. webscout/Provider/TTI/pollinations.py +232 -232
  147. webscout/Provider/TTI/together.py +288 -288
  148. webscout/Provider/TTI/utils.py +12 -12
  149. webscout/Provider/TTI/venice.py +367 -367
  150. webscout/Provider/TTS/README.md +192 -192
  151. webscout/Provider/TTS/__init__.py +33 -18
  152. webscout/Provider/TTS/parler.py +110 -110
  153. webscout/Provider/TTS/streamElements.py +333 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TeachAnything.py +237 -237
  156. webscout/Provider/TextPollinationsAI.py +310 -310
  157. webscout/Provider/TogetherAI.py +356 -356
  158. webscout/Provider/TwoAI.py +312 -312
  159. webscout/Provider/TypliAI.py +311 -311
  160. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  161. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  162. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  163. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  164. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  165. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  166. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  167. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  168. webscout/Provider/UNFINISHED/liner.py +334 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  170. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  171. webscout/Provider/UNFINISHED/samurai.py +223 -223
  172. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  173. webscout/Provider/Venice.py +250 -250
  174. webscout/Provider/VercelAI.py +256 -256
  175. webscout/Provider/WiseCat.py +231 -231
  176. webscout/Provider/WrDoChat.py +366 -366
  177. webscout/Provider/__init__.py +33 -18
  178. webscout/Provider/ai4chat.py +174 -174
  179. webscout/Provider/akashgpt.py +331 -331
  180. webscout/Provider/cerebras.py +446 -446
  181. webscout/Provider/chatglm.py +394 -301
  182. webscout/Provider/cleeai.py +211 -211
  183. webscout/Provider/elmo.py +282 -282
  184. webscout/Provider/geminiapi.py +208 -208
  185. webscout/Provider/granite.py +261 -261
  186. webscout/Provider/hermes.py +263 -263
  187. webscout/Provider/julius.py +223 -223
  188. webscout/Provider/learnfastai.py +309 -309
  189. webscout/Provider/llama3mitril.py +214 -214
  190. webscout/Provider/llmchat.py +243 -243
  191. webscout/Provider/llmchatco.py +290 -290
  192. webscout/Provider/meta.py +801 -801
  193. webscout/Provider/oivscode.py +309 -309
  194. webscout/Provider/scira_chat.py +383 -383
  195. webscout/Provider/searchchat.py +292 -292
  196. webscout/Provider/sonus.py +258 -258
  197. webscout/Provider/toolbaz.py +370 -367
  198. webscout/Provider/turboseek.py +273 -273
  199. webscout/Provider/typefully.py +207 -207
  200. webscout/Provider/yep.py +372 -372
  201. webscout/__init__.py +30 -31
  202. webscout/__main__.py +5 -5
  203. webscout/auth/api_key_manager.py +189 -189
  204. webscout/auth/config.py +175 -175
  205. webscout/auth/models.py +185 -185
  206. webscout/auth/routes.py +664 -664
  207. webscout/auth/simple_logger.py +236 -236
  208. webscout/cli.py +523 -523
  209. webscout/conversation.py +438 -438
  210. webscout/exceptions.py +361 -361
  211. webscout/litagent/Readme.md +298 -298
  212. webscout/litagent/__init__.py +28 -28
  213. webscout/litagent/agent.py +581 -581
  214. webscout/litagent/constants.py +59 -59
  215. webscout/litprinter/__init__.py +58 -58
  216. webscout/models.py +181 -181
  217. webscout/optimizers.py +419 -419
  218. webscout/prompt_manager.py +288 -288
  219. webscout/sanitize.py +1078 -1078
  220. webscout/scout/README.md +401 -401
  221. webscout/scout/__init__.py +8 -8
  222. webscout/scout/core/__init__.py +6 -6
  223. webscout/scout/core/crawler.py +297 -297
  224. webscout/scout/core/scout.py +706 -706
  225. webscout/scout/core/search_result.py +95 -95
  226. webscout/scout/core/text_analyzer.py +62 -62
  227. webscout/scout/core/text_utils.py +277 -277
  228. webscout/scout/core/web_analyzer.py +51 -51
  229. webscout/scout/element.py +599 -599
  230. webscout/scout/parsers/__init__.py +69 -69
  231. webscout/scout/parsers/html5lib_parser.py +172 -172
  232. webscout/scout/parsers/html_parser.py +236 -236
  233. webscout/scout/parsers/lxml_parser.py +178 -178
  234. webscout/scout/utils.py +37 -37
  235. webscout/swiftcli/Readme.md +323 -323
  236. webscout/swiftcli/__init__.py +95 -95
  237. webscout/swiftcli/core/__init__.py +7 -7
  238. webscout/swiftcli/core/cli.py +308 -308
  239. webscout/swiftcli/core/context.py +104 -104
  240. webscout/swiftcli/core/group.py +241 -241
  241. webscout/swiftcli/decorators/__init__.py +28 -28
  242. webscout/swiftcli/decorators/command.py +221 -221
  243. webscout/swiftcli/decorators/options.py +220 -220
  244. webscout/swiftcli/decorators/output.py +302 -302
  245. webscout/swiftcli/exceptions.py +21 -21
  246. webscout/swiftcli/plugins/__init__.py +9 -9
  247. webscout/swiftcli/plugins/base.py +135 -135
  248. webscout/swiftcli/plugins/manager.py +269 -269
  249. webscout/swiftcli/utils/__init__.py +59 -59
  250. webscout/swiftcli/utils/formatting.py +252 -252
  251. webscout/swiftcli/utils/parsing.py +267 -267
  252. webscout/update_checker.py +117 -117
  253. webscout/version.py +1 -1
  254. webscout/webscout_search.py +1183 -1183
  255. webscout/webscout_search_async.py +649 -649
  256. webscout/yep_search.py +346 -346
  257. webscout/zeroart/README.md +89 -89
  258. webscout/zeroart/__init__.py +134 -134
  259. webscout/zeroart/base.py +66 -66
  260. webscout/zeroart/effects.py +100 -100
  261. webscout/zeroart/fonts.py +1238 -1238
  262. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
  263. webscout-2025.10.11.dist-info/RECORD +300 -0
  264. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  265. webscout/Provider/OPENAI/Qwen3.py +0 -303
  266. webscout/Provider/OPENAI/qodo.py +0 -630
  267. webscout/Provider/OPENAI/xenai.py +0 -514
  268. webscout/tempid.py +0 -134
  269. webscout-8.3.7.dist-info/RECORD +0 -301
  270. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  271. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  272. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  273. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,337 +1,337 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, get_last_user_message, get_system_prompt, format_prompt # Import format_prompt
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- # Define a dummy LitAgent if webscout is not installed or accessible
19
- class LitAgent:
20
- def random(self) -> str:
21
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
22
-
23
- # --- LLMChatCo Client ---
24
-
25
- class Completions(BaseCompletions):
26
- def __init__(self, client: 'LLMChatCo'):
27
- self._client = client
28
-
29
- def create(
30
- self,
31
- *,
32
- model: str, # Model is now mandatory per request
33
- messages: List[Dict[str, str]],
34
- max_tokens: Optional[int] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
35
- stream: bool = False,
36
- temperature: Optional[float] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
37
- top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
- web_search: bool = False, # LLMChatCo specific parameter
39
- system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
- timeout: Optional[int] = None,
41
- proxies: Optional[Dict[str, str]] = None,
42
- **kwargs: Any
43
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
44
- """
45
- Creates a model response for the given chat conversation.
46
- Mimics openai.chat.completions.create
47
- """
48
- if model not in self._client.AVAILABLE_MODELS:
49
- # Raise error as model is mandatory and must be valid for this provider
50
- raise ValueError(f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}")
51
- actual_model = model
52
-
53
- # Determine the effective system prompt
54
- effective_system_prompt = system_prompt # Use the provided system_prompt or its default
55
- message_list_system_prompt = get_system_prompt(messages)
56
- # If a system prompt is also in messages, the explicit one takes precedence.
57
- # We'll use the effective_system_prompt determined above.
58
-
59
- # Prepare final messages list, ensuring only one system message at the start
60
- final_messages = []
61
- if effective_system_prompt:
62
- final_messages.append({"role": "system", "content": effective_system_prompt})
63
- final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
64
-
65
- # Extract the last user prompt using the utility function for the separate 'prompt' field
66
- last_user_prompt = get_last_user_message(final_messages)
67
-
68
- # Note: format_prompt is not directly used here as the API requires the structured 'messages' list
69
- # and a separate 'prompt' field, rather than a single formatted string.
70
-
71
- # Generate a unique ID for this message
72
- thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
73
-
74
- payload = {
75
- "mode": actual_model,
76
- "prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
77
- "threadId": self._client.thread_id,
78
- "messages": final_messages, # Use the reconstructed final_messages list
79
- "mcpConfig": {}, # Keep structure as observed
80
- "threadItemId": thread_item_id,
81
- "parentThreadItemId": "", # Assuming no parent for simplicity
82
- "webSearch": web_search,
83
- "showSuggestions": True # Keep structure as observed
84
- }
85
-
86
- # Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
87
- payload.update(kwargs)
88
-
89
- request_id = f"chatcmpl-{uuid.uuid4()}"
90
- created_time = int(time.time())
91
-
92
- if stream:
93
- return self._create_stream(request_id, created_time, actual_model, payload, timeout, proxies)
94
- else:
95
- return self._create_non_stream(request_id, created_time, actual_model, payload, timeout, proxies)
96
-
97
- def _create_stream(
98
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
99
- ) -> Generator[ChatCompletionChunk, None, None]:
100
- try:
101
- response = self._client.session.post(
102
- self._client.api_endpoint,
103
- headers=self._client.headers,
104
- json=payload,
105
- stream=True,
106
- timeout=timeout or self._client.timeout,
107
- proxies=proxies or getattr(self._client, "proxies", None)
108
- )
109
-
110
- if not response.ok:
111
- raise IOError(
112
- f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
113
- )
114
-
115
- full_response_text = ""
116
- current_event = None
117
- buffer = ""
118
-
119
- for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
120
- if not chunk_bytes:
121
- continue
122
-
123
- buffer += chunk_bytes.decode('utf-8', errors='replace')
124
-
125
- while '\n' in buffer:
126
- line, buffer = buffer.split('\n', 1)
127
- line = line.strip()
128
-
129
- if not line: # End of an event block
130
- current_event = None
131
- continue
132
-
133
- if line.startswith('event:'):
134
- current_event = line[len('event:'):].strip()
135
- elif line.startswith('data:'):
136
- data_content = line[len('data:'):].strip()
137
- if data_content and current_event == 'answer':
138
- try:
139
- json_data = json.loads(data_content)
140
- answer_data = json_data.get("answer", {})
141
- text_chunk = answer_data.get("text", "")
142
- full_text = answer_data.get("fullText")
143
- status = answer_data.get("status")
144
-
145
- # Prefer fullText if available and status is COMPLETED
146
- if full_text is not None and status == "COMPLETED":
147
- delta_content = full_text[len(full_response_text):]
148
- full_response_text = full_text # Update full response tracker
149
- elif text_chunk is not None:
150
- # Calculate delta based on potentially partial 'text' field
151
- delta_content = text_chunk[len(full_response_text):]
152
- full_response_text = text_chunk # Update full response tracker
153
- else:
154
- delta_content = None
155
-
156
- if delta_content:
157
- delta = ChoiceDelta(content=delta_content, role="assistant")
158
- choice = Choice(index=0, delta=delta, finish_reason=None)
159
- chunk = ChatCompletionChunk(
160
- id=request_id,
161
- choices=[choice],
162
- created=created_time,
163
- model=model,
164
- )
165
- yield chunk
166
-
167
- except json.JSONDecodeError:
168
- print(f"Warning: Could not decode JSON data line: {data_content}")
169
- continue
170
- elif data_content and current_event == 'done':
171
- # The 'done' event signals the end of the stream
172
- delta = ChoiceDelta() # Empty delta
173
- choice = Choice(index=0, delta=delta, finish_reason="stop")
174
- chunk = ChatCompletionChunk(
175
- id=request_id,
176
- choices=[choice],
177
- created=created_time,
178
- model=model,
179
- )
180
- yield chunk
181
- return # End the generator
182
-
183
- except requests.exceptions.RequestException as e:
184
- print(f"Error during LLMChatCo stream request: {e}")
185
- raise IOError(f"LLMChatCo request failed: {e}") from e
186
- except Exception as e:
187
- print(f"Unexpected error during LLMChatCo stream: {e}")
188
- raise IOError(f"LLMChatCo stream processing failed: {e}") from e
189
-
190
- # Fallback final chunk if 'done' event wasn't received properly
191
- delta = ChoiceDelta()
192
- choice = Choice(index=0, delta=delta, finish_reason="stop")
193
- chunk = ChatCompletionChunk(
194
- id=request_id,
195
- choices=[choice],
196
- created=created_time,
197
- model=model,
198
- )
199
- yield chunk
200
-
201
-
202
- def _create_non_stream(
203
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
204
- ) -> ChatCompletion:
205
- # Non-streaming requires accumulating stream chunks
206
- full_response_content = ""
207
- finish_reason = "stop" # Assume stop unless error occurs
208
-
209
- try:
210
- stream_generator = self._create_stream(request_id, created_time, model, payload, timeout, proxies)
211
- for chunk in stream_generator:
212
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
213
- full_response_content += chunk.choices[0].delta.content
214
- if chunk.choices and chunk.choices[0].finish_reason:
215
- finish_reason = chunk.choices[0].finish_reason
216
-
217
- except IOError as e:
218
- print(f"Error obtaining non-stream response from LLMChatCo: {e}")
219
- # Return a partial or error response if needed, or re-raise
220
- # For simplicity, we'll return what we have, potentially empty
221
- finish_reason = "error" # Indicate an issue
222
-
223
- # Construct the final ChatCompletion object
224
- message = ChatCompletionMessage(
225
- role="assistant",
226
- content=full_response_content
227
- )
228
- choice = Choice(
229
- index=0,
230
- message=message,
231
- finish_reason=finish_reason
232
- )
233
- # Usage data is not provided by this API, so set to 0
234
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
235
-
236
- completion = ChatCompletion(
237
- id=request_id,
238
- choices=[choice],
239
- created=created_time,
240
- model=model,
241
- usage=usage,
242
- )
243
- return completion
244
-
245
- class Chat(BaseChat):
246
- def __init__(self, client: 'LLMChatCo'):
247
- self.completions = Completions(client)
248
-
249
- class LLMChatCo(OpenAICompatibleProvider):
250
- """
251
- OpenAI-compatible client for LLMChat.co API.
252
-
253
- Usage:
254
- client = LLMChatCo()
255
- response = client.chat.completions.create(
256
- model="gemini-flash-2.0", # Model must be specified here
257
- messages=[{"role": "user", "content": "Hello!"}]
258
- )
259
- print(response.choices[0].message.content)
260
- """
261
- AVAILABLE_MODELS = [
262
- "gemini-flash-2.0", # Default model
263
- "llama-4-scout",
264
- "gpt-4o-mini",
265
- # "gpt-4.1",
266
- # "gpt-4.1-mini",
267
- "gpt-4.1-nano",
268
- ]
269
-
270
- def __init__(
271
- self,
272
- timeout: int = 60,
273
- browser: str = "chrome" # For User-Agent generation
274
- ):
275
- """
276
- Initialize the LLMChatCo client.
277
-
278
- Args:
279
- timeout: Request timeout in seconds.
280
- browser: Browser name for LitAgent to generate User-Agent.
281
- """
282
- # Removed model, system_prompt, proxies parameters
283
-
284
- self.timeout = timeout
285
- # Removed self.system_prompt assignment
286
- self.api_endpoint = "https://llmchat.co/api/completion"
287
- self.session = requests.Session()
288
- self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
289
-
290
- # Removed proxy handling block
291
-
292
- # Initialize LitAgent for user agent generation and fingerprinting
293
- try:
294
- agent = LitAgent()
295
- fingerprint = agent.generate_fingerprint(browser=browser)
296
- except Exception as e:
297
- print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
298
- # Fallback fingerprint data
299
- fingerprint = {
300
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
301
- "accept_language": "en-US,en;q=0.9",
302
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
303
- "platform": "Windows",
304
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
305
- }
306
-
307
- # Initialize headers using the fingerprint
308
- self.headers = {
309
- "Accept": fingerprint["accept"],
310
- "Accept-Encoding": "gzip, deflate, br, zstd", # Standard encoding
311
- "Accept-Language": fingerprint["accept_language"],
312
- "Content-Type": "application/json",
313
- "Cache-Control": "no-cache",
314
- "Connection": "keep-alive",
315
- "Origin": "https://llmchat.co", # Specific origin for LLMChatCo
316
- "Pragma": "no-cache",
317
- "Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
318
- "Sec-Fetch-Dest": "empty",
319
- "Sec-Fetch-Mode": "cors",
320
- "Sec-Fetch-Site": "same-origin",
321
- "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
322
- "Sec-CH-UA-Mobile": "?0",
323
- "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
324
- "User-Agent": fingerprint["user_agent"],
325
- "DNT": "1", # Added back from previous version
326
- }
327
- self.session.headers.update(self.headers)
328
-
329
- # Initialize the chat interface
330
- self.chat = Chat(self)
331
-
332
- @property
333
- def models(self):
334
- class _ModelList:
335
- def list(inner_self):
336
- return type(self).AVAILABLE_MODELS
337
- return _ModelList()
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, get_last_user_message, get_system_prompt, format_prompt # Import format_prompt
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ # Define a dummy LitAgent if webscout is not installed or accessible
19
+ class LitAgent:
20
+ def random(self) -> str:
21
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
22
+
23
+ # --- LLMChatCo Client ---
24
+
25
+ class Completions(BaseCompletions):
26
+ def __init__(self, client: 'LLMChatCo'):
27
+ self._client = client
28
+
29
+ def create(
30
+ self,
31
+ *,
32
+ model: str, # Model is now mandatory per request
33
+ messages: List[Dict[str, str]],
34
+ max_tokens: Optional[int] = 2048, # Note: LLMChatCo doesn't seem to use max_tokens directly in payload
35
+ stream: bool = False,
36
+ temperature: Optional[float] = None, # Note: LLMChatCo doesn't seem to use temperature directly in payload
37
+ top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
+ web_search: bool = False, # LLMChatCo specific parameter
39
+ system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
42
+ **kwargs: Any
43
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
44
+ """
45
+ Creates a model response for the given chat conversation.
46
+ Mimics openai.chat.completions.create
47
+ """
48
+ if model not in self._client.AVAILABLE_MODELS:
49
+ # Raise error as model is mandatory and must be valid for this provider
50
+ raise ValueError(f"Model '{model}' not supported by LLMChatCo. Available: {self._client.AVAILABLE_MODELS}")
51
+ actual_model = model
52
+
53
+ # Determine the effective system prompt
54
+ effective_system_prompt = system_prompt # Use the provided system_prompt or its default
55
+ message_list_system_prompt = get_system_prompt(messages)
56
+ # If a system prompt is also in messages, the explicit one takes precedence.
57
+ # We'll use the effective_system_prompt determined above.
58
+
59
+ # Prepare final messages list, ensuring only one system message at the start
60
+ final_messages = []
61
+ if effective_system_prompt:
62
+ final_messages.append({"role": "system", "content": effective_system_prompt})
63
+ final_messages.extend([msg for msg in messages if msg.get("role") != "system"])
64
+
65
+ # Extract the last user prompt using the utility function for the separate 'prompt' field
66
+ last_user_prompt = get_last_user_message(final_messages)
67
+
68
+ # Note: format_prompt is not directly used here as the API requires the structured 'messages' list
69
+ # and a separate 'prompt' field, rather than a single formatted string.
70
+
71
+ # Generate a unique ID for this message
72
+ thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
73
+
74
+ payload = {
75
+ "mode": actual_model,
76
+ "prompt": last_user_prompt, # LLMChatCo seems to require the last prompt separately
77
+ "threadId": self._client.thread_id,
78
+ "messages": final_messages, # Use the reconstructed final_messages list
79
+ "mcpConfig": {}, # Keep structure as observed
80
+ "threadItemId": thread_item_id,
81
+ "parentThreadItemId": "", # Assuming no parent for simplicity
82
+ "webSearch": web_search,
83
+ "showSuggestions": True # Keep structure as observed
84
+ }
85
+
86
+ # Add any extra kwargs to the payload if needed, though LLMChatCo seems limited
87
+ payload.update(kwargs)
88
+
89
+ request_id = f"chatcmpl-{uuid.uuid4()}"
90
+ created_time = int(time.time())
91
+
92
+ if stream:
93
+ return self._create_stream(request_id, created_time, actual_model, payload, timeout, proxies)
94
+ else:
95
+ return self._create_non_stream(request_id, created_time, actual_model, payload, timeout, proxies)
96
+
97
+ def _create_stream(
98
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
99
+ ) -> Generator[ChatCompletionChunk, None, None]:
100
+ try:
101
+ response = self._client.session.post(
102
+ self._client.api_endpoint,
103
+ headers=self._client.headers,
104
+ json=payload,
105
+ stream=True,
106
+ timeout=timeout or self._client.timeout,
107
+ proxies=proxies or getattr(self._client, "proxies", None)
108
+ )
109
+
110
+ if not response.ok:
111
+ raise IOError(
112
+ f"LLMChatCo API Error: {response.status_code} {response.reason} - {response.text}"
113
+ )
114
+
115
+ full_response_text = ""
116
+ current_event = None
117
+ buffer = ""
118
+
119
+ for chunk_bytes in response.iter_content(chunk_size=None, decode_unicode=False):
120
+ if not chunk_bytes:
121
+ continue
122
+
123
+ buffer += chunk_bytes.decode('utf-8', errors='replace')
124
+
125
+ while '\n' in buffer:
126
+ line, buffer = buffer.split('\n', 1)
127
+ line = line.strip()
128
+
129
+ if not line: # End of an event block
130
+ current_event = None
131
+ continue
132
+
133
+ if line.startswith('event:'):
134
+ current_event = line[len('event:'):].strip()
135
+ elif line.startswith('data:'):
136
+ data_content = line[len('data:'):].strip()
137
+ if data_content and current_event == 'answer':
138
+ try:
139
+ json_data = json.loads(data_content)
140
+ answer_data = json_data.get("answer", {})
141
+ text_chunk = answer_data.get("text", "")
142
+ full_text = answer_data.get("fullText")
143
+ status = answer_data.get("status")
144
+
145
+ # Prefer fullText if available and status is COMPLETED
146
+ if full_text is not None and status == "COMPLETED":
147
+ delta_content = full_text[len(full_response_text):]
148
+ full_response_text = full_text # Update full response tracker
149
+ elif text_chunk is not None:
150
+ # Calculate delta based on potentially partial 'text' field
151
+ delta_content = text_chunk[len(full_response_text):]
152
+ full_response_text = text_chunk # Update full response tracker
153
+ else:
154
+ delta_content = None
155
+
156
+ if delta_content:
157
+ delta = ChoiceDelta(content=delta_content, role="assistant")
158
+ choice = Choice(index=0, delta=delta, finish_reason=None)
159
+ chunk = ChatCompletionChunk(
160
+ id=request_id,
161
+ choices=[choice],
162
+ created=created_time,
163
+ model=model,
164
+ )
165
+ yield chunk
166
+
167
+ except json.JSONDecodeError:
168
+ print(f"Warning: Could not decode JSON data line: {data_content}")
169
+ continue
170
+ elif data_content and current_event == 'done':
171
+ # The 'done' event signals the end of the stream
172
+ delta = ChoiceDelta() # Empty delta
173
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
174
+ chunk = ChatCompletionChunk(
175
+ id=request_id,
176
+ choices=[choice],
177
+ created=created_time,
178
+ model=model,
179
+ )
180
+ yield chunk
181
+ return # End the generator
182
+
183
+ except requests.exceptions.RequestException as e:
184
+ print(f"Error during LLMChatCo stream request: {e}")
185
+ raise IOError(f"LLMChatCo request failed: {e}") from e
186
+ except Exception as e:
187
+ print(f"Unexpected error during LLMChatCo stream: {e}")
188
+ raise IOError(f"LLMChatCo stream processing failed: {e}") from e
189
+
190
+ # Fallback final chunk if 'done' event wasn't received properly
191
+ delta = ChoiceDelta()
192
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
193
+ chunk = ChatCompletionChunk(
194
+ id=request_id,
195
+ choices=[choice],
196
+ created=created_time,
197
+ model=model,
198
+ )
199
+ yield chunk
200
+
201
+
202
+ def _create_non_stream(
203
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
204
+ ) -> ChatCompletion:
205
+ # Non-streaming requires accumulating stream chunks
206
+ full_response_content = ""
207
+ finish_reason = "stop" # Assume stop unless error occurs
208
+
209
+ try:
210
+ stream_generator = self._create_stream(request_id, created_time, model, payload, timeout, proxies)
211
+ for chunk in stream_generator:
212
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
213
+ full_response_content += chunk.choices[0].delta.content
214
+ if chunk.choices and chunk.choices[0].finish_reason:
215
+ finish_reason = chunk.choices[0].finish_reason
216
+
217
+ except IOError as e:
218
+ print(f"Error obtaining non-stream response from LLMChatCo: {e}")
219
+ # Return a partial or error response if needed, or re-raise
220
+ # For simplicity, we'll return what we have, potentially empty
221
+ finish_reason = "error" # Indicate an issue
222
+
223
+ # Construct the final ChatCompletion object
224
+ message = ChatCompletionMessage(
225
+ role="assistant",
226
+ content=full_response_content
227
+ )
228
+ choice = Choice(
229
+ index=0,
230
+ message=message,
231
+ finish_reason=finish_reason
232
+ )
233
+ # Usage data is not provided by this API, so set to 0
234
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
235
+
236
+ completion = ChatCompletion(
237
+ id=request_id,
238
+ choices=[choice],
239
+ created=created_time,
240
+ model=model,
241
+ usage=usage,
242
+ )
243
+ return completion
244
+
245
+ class Chat(BaseChat):
246
+ def __init__(self, client: 'LLMChatCo'):
247
+ self.completions = Completions(client)
248
+
249
+ class LLMChatCo(OpenAICompatibleProvider):
250
+ """
251
+ OpenAI-compatible client for LLMChat.co API.
252
+
253
+ Usage:
254
+ client = LLMChatCo()
255
+ response = client.chat.completions.create(
256
+ model="gemini-flash-2.0", # Model must be specified here
257
+ messages=[{"role": "user", "content": "Hello!"}]
258
+ )
259
+ print(response.choices[0].message.content)
260
+ """
261
+ AVAILABLE_MODELS = [
262
+ "gemini-flash-2.0", # Default model
263
+ "llama-4-scout",
264
+ "gpt-4o-mini",
265
+ # "gpt-4.1",
266
+ # "gpt-4.1-mini",
267
+ "gpt-4.1-nano",
268
+ ]
269
+
270
+ def __init__(
271
+ self,
272
+ timeout: int = 60,
273
+ browser: str = "chrome" # For User-Agent generation
274
+ ):
275
+ """
276
+ Initialize the LLMChatCo client.
277
+
278
+ Args:
279
+ timeout: Request timeout in seconds.
280
+ browser: Browser name for LitAgent to generate User-Agent.
281
+ """
282
+ # Removed model, system_prompt, proxies parameters
283
+
284
+ self.timeout = timeout
285
+ # Removed self.system_prompt assignment
286
+ self.api_endpoint = "https://llmchat.co/api/completion"
287
+ self.session = requests.Session()
288
+ self.thread_id = str(uuid.uuid4()) # Unique thread ID per client instance
289
+
290
+ # Removed proxy handling block
291
+
292
+ # Initialize LitAgent for user agent generation and fingerprinting
293
+ try:
294
+ agent = LitAgent()
295
+ fingerprint = agent.generate_fingerprint(browser=browser)
296
+ except Exception as e:
297
+ print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
298
+ # Fallback fingerprint data
299
+ fingerprint = {
300
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
301
+ "accept_language": "en-US,en;q=0.9",
302
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
303
+ "platform": "Windows",
304
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
305
+ }
306
+
307
+ # Initialize headers using the fingerprint
308
+ self.headers = {
309
+ "Accept": fingerprint["accept"],
310
+ "Accept-Encoding": "gzip, deflate, br, zstd", # Standard encoding
311
+ "Accept-Language": fingerprint["accept_language"],
312
+ "Content-Type": "application/json",
313
+ "Cache-Control": "no-cache",
314
+ "Connection": "keep-alive",
315
+ "Origin": "https://llmchat.co", # Specific origin for LLMChatCo
316
+ "Pragma": "no-cache",
317
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}", # Specific referer for LLMChatCo
318
+ "Sec-Fetch-Dest": "empty",
319
+ "Sec-Fetch-Mode": "cors",
320
+ "Sec-Fetch-Site": "same-origin",
321
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"', # Fallback if empty
322
+ "Sec-CH-UA-Mobile": "?0",
323
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
324
+ "User-Agent": fingerprint["user_agent"],
325
+ "DNT": "1", # Added back from previous version
326
+ }
327
+ self.session.headers.update(self.headers)
328
+
329
+ # Initialize the chat interface
330
+ self.chat = Chat(self)
331
+
332
+ @property
333
+ def models(self):
334
+ class _ModelList:
335
+ def list(inner_self):
336
+ return type(self).AVAILABLE_MODELS
337
+ return _ModelList()