webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (273) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -60
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +16 -1
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -316
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -314
  64. webscout/Provider/Apriel.py +306 -0
  65. webscout/Provider/ChatGPTClone.py +236 -236
  66. webscout/Provider/ChatSandbox.py +343 -343
  67. webscout/Provider/Cloudflare.py +324 -324
  68. webscout/Provider/Cohere.py +208 -208
  69. webscout/Provider/Deepinfra.py +370 -366
  70. webscout/Provider/ExaAI.py +260 -260
  71. webscout/Provider/ExaChat.py +308 -308
  72. webscout/Provider/Flowith.py +221 -221
  73. webscout/Provider/GMI.py +293 -0
  74. webscout/Provider/Gemini.py +164 -164
  75. webscout/Provider/GeminiProxy.py +167 -167
  76. webscout/Provider/GithubChat.py +371 -372
  77. webscout/Provider/Groq.py +800 -800
  78. webscout/Provider/HeckAI.py +383 -383
  79. webscout/Provider/Jadve.py +282 -282
  80. webscout/Provider/K2Think.py +307 -307
  81. webscout/Provider/Koboldai.py +205 -205
  82. webscout/Provider/LambdaChat.py +423 -423
  83. webscout/Provider/Nemotron.py +244 -244
  84. webscout/Provider/Netwrck.py +248 -248
  85. webscout/Provider/OLLAMA.py +395 -395
  86. webscout/Provider/OPENAI/Cloudflare.py +393 -393
  87. webscout/Provider/OPENAI/FalconH1.py +451 -451
  88. webscout/Provider/OPENAI/FreeGemini.py +296 -296
  89. webscout/Provider/OPENAI/K2Think.py +431 -431
  90. webscout/Provider/OPENAI/NEMOTRON.py +240 -240
  91. webscout/Provider/OPENAI/PI.py +427 -427
  92. webscout/Provider/OPENAI/README.md +959 -959
  93. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  94. webscout/Provider/OPENAI/TwoAI.py +465 -465
  95. webscout/Provider/OPENAI/__init__.py +33 -18
  96. webscout/Provider/OPENAI/base.py +248 -248
  97. webscout/Provider/OPENAI/chatglm.py +528 -0
  98. webscout/Provider/OPENAI/chatgpt.py +592 -592
  99. webscout/Provider/OPENAI/chatgptclone.py +521 -521
  100. webscout/Provider/OPENAI/chatsandbox.py +202 -202
  101. webscout/Provider/OPENAI/deepinfra.py +318 -314
  102. webscout/Provider/OPENAI/e2b.py +1665 -1665
  103. webscout/Provider/OPENAI/exaai.py +420 -420
  104. webscout/Provider/OPENAI/exachat.py +452 -452
  105. webscout/Provider/OPENAI/friendli.py +232 -232
  106. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  107. webscout/Provider/OPENAI/groq.py +364 -364
  108. webscout/Provider/OPENAI/heckai.py +314 -314
  109. webscout/Provider/OPENAI/llmchatco.py +337 -337
  110. webscout/Provider/OPENAI/netwrck.py +355 -355
  111. webscout/Provider/OPENAI/oivscode.py +290 -290
  112. webscout/Provider/OPENAI/opkfc.py +518 -518
  113. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  114. webscout/Provider/OPENAI/scirachat.py +535 -535
  115. webscout/Provider/OPENAI/sonus.py +308 -308
  116. webscout/Provider/OPENAI/standardinput.py +442 -442
  117. webscout/Provider/OPENAI/textpollinations.py +340 -340
  118. webscout/Provider/OPENAI/toolbaz.py +419 -416
  119. webscout/Provider/OPENAI/typefully.py +362 -362
  120. webscout/Provider/OPENAI/utils.py +295 -295
  121. webscout/Provider/OPENAI/venice.py +436 -436
  122. webscout/Provider/OPENAI/wisecat.py +387 -387
  123. webscout/Provider/OPENAI/writecream.py +166 -166
  124. webscout/Provider/OPENAI/x0gpt.py +378 -378
  125. webscout/Provider/OPENAI/yep.py +389 -389
  126. webscout/Provider/OpenGPT.py +230 -230
  127. webscout/Provider/Openai.py +243 -243
  128. webscout/Provider/PI.py +405 -405
  129. webscout/Provider/Perplexitylabs.py +430 -430
  130. webscout/Provider/QwenLM.py +272 -272
  131. webscout/Provider/STT/__init__.py +16 -1
  132. webscout/Provider/Sambanova.py +257 -257
  133. webscout/Provider/StandardInput.py +309 -309
  134. webscout/Provider/TTI/README.md +82 -82
  135. webscout/Provider/TTI/__init__.py +33 -18
  136. webscout/Provider/TTI/aiarta.py +413 -413
  137. webscout/Provider/TTI/base.py +136 -136
  138. webscout/Provider/TTI/bing.py +243 -243
  139. webscout/Provider/TTI/gpt1image.py +149 -149
  140. webscout/Provider/TTI/imagen.py +196 -196
  141. webscout/Provider/TTI/infip.py +211 -211
  142. webscout/Provider/TTI/magicstudio.py +232 -232
  143. webscout/Provider/TTI/monochat.py +219 -219
  144. webscout/Provider/TTI/piclumen.py +214 -214
  145. webscout/Provider/TTI/pixelmuse.py +232 -232
  146. webscout/Provider/TTI/pollinations.py +232 -232
  147. webscout/Provider/TTI/together.py +288 -288
  148. webscout/Provider/TTI/utils.py +12 -12
  149. webscout/Provider/TTI/venice.py +367 -367
  150. webscout/Provider/TTS/README.md +192 -192
  151. webscout/Provider/TTS/__init__.py +33 -18
  152. webscout/Provider/TTS/parler.py +110 -110
  153. webscout/Provider/TTS/streamElements.py +333 -333
  154. webscout/Provider/TTS/utils.py +280 -280
  155. webscout/Provider/TeachAnything.py +237 -237
  156. webscout/Provider/TextPollinationsAI.py +310 -310
  157. webscout/Provider/TogetherAI.py +356 -356
  158. webscout/Provider/TwoAI.py +312 -312
  159. webscout/Provider/TypliAI.py +311 -311
  160. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  161. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  162. webscout/Provider/UNFINISHED/GizAI.py +294 -294
  163. webscout/Provider/UNFINISHED/Marcus.py +198 -198
  164. webscout/Provider/UNFINISHED/Qodo.py +477 -477
  165. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  166. webscout/Provider/UNFINISHED/XenAI.py +324 -324
  167. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  168. webscout/Provider/UNFINISHED/liner.py +334 -0
  169. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  170. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  171. webscout/Provider/UNFINISHED/samurai.py +223 -223
  172. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  173. webscout/Provider/Venice.py +250 -250
  174. webscout/Provider/VercelAI.py +256 -256
  175. webscout/Provider/WiseCat.py +231 -231
  176. webscout/Provider/WrDoChat.py +366 -366
  177. webscout/Provider/__init__.py +33 -18
  178. webscout/Provider/ai4chat.py +174 -174
  179. webscout/Provider/akashgpt.py +331 -331
  180. webscout/Provider/cerebras.py +446 -446
  181. webscout/Provider/chatglm.py +394 -301
  182. webscout/Provider/cleeai.py +211 -211
  183. webscout/Provider/elmo.py +282 -282
  184. webscout/Provider/geminiapi.py +208 -208
  185. webscout/Provider/granite.py +261 -261
  186. webscout/Provider/hermes.py +263 -263
  187. webscout/Provider/julius.py +223 -223
  188. webscout/Provider/learnfastai.py +309 -309
  189. webscout/Provider/llama3mitril.py +214 -214
  190. webscout/Provider/llmchat.py +243 -243
  191. webscout/Provider/llmchatco.py +290 -290
  192. webscout/Provider/meta.py +801 -801
  193. webscout/Provider/oivscode.py +309 -309
  194. webscout/Provider/scira_chat.py +383 -383
  195. webscout/Provider/searchchat.py +292 -292
  196. webscout/Provider/sonus.py +258 -258
  197. webscout/Provider/toolbaz.py +370 -367
  198. webscout/Provider/turboseek.py +273 -273
  199. webscout/Provider/typefully.py +207 -207
  200. webscout/Provider/yep.py +372 -372
  201. webscout/__init__.py +30 -31
  202. webscout/__main__.py +5 -5
  203. webscout/auth/api_key_manager.py +189 -189
  204. webscout/auth/config.py +175 -175
  205. webscout/auth/models.py +185 -185
  206. webscout/auth/routes.py +664 -664
  207. webscout/auth/simple_logger.py +236 -236
  208. webscout/cli.py +523 -523
  209. webscout/conversation.py +438 -438
  210. webscout/exceptions.py +361 -361
  211. webscout/litagent/Readme.md +298 -298
  212. webscout/litagent/__init__.py +28 -28
  213. webscout/litagent/agent.py +581 -581
  214. webscout/litagent/constants.py +59 -59
  215. webscout/litprinter/__init__.py +58 -58
  216. webscout/models.py +181 -181
  217. webscout/optimizers.py +419 -419
  218. webscout/prompt_manager.py +288 -288
  219. webscout/sanitize.py +1078 -1078
  220. webscout/scout/README.md +401 -401
  221. webscout/scout/__init__.py +8 -8
  222. webscout/scout/core/__init__.py +6 -6
  223. webscout/scout/core/crawler.py +297 -297
  224. webscout/scout/core/scout.py +706 -706
  225. webscout/scout/core/search_result.py +95 -95
  226. webscout/scout/core/text_analyzer.py +62 -62
  227. webscout/scout/core/text_utils.py +277 -277
  228. webscout/scout/core/web_analyzer.py +51 -51
  229. webscout/scout/element.py +599 -599
  230. webscout/scout/parsers/__init__.py +69 -69
  231. webscout/scout/parsers/html5lib_parser.py +172 -172
  232. webscout/scout/parsers/html_parser.py +236 -236
  233. webscout/scout/parsers/lxml_parser.py +178 -178
  234. webscout/scout/utils.py +37 -37
  235. webscout/swiftcli/Readme.md +323 -323
  236. webscout/swiftcli/__init__.py +95 -95
  237. webscout/swiftcli/core/__init__.py +7 -7
  238. webscout/swiftcli/core/cli.py +308 -308
  239. webscout/swiftcli/core/context.py +104 -104
  240. webscout/swiftcli/core/group.py +241 -241
  241. webscout/swiftcli/decorators/__init__.py +28 -28
  242. webscout/swiftcli/decorators/command.py +221 -221
  243. webscout/swiftcli/decorators/options.py +220 -220
  244. webscout/swiftcli/decorators/output.py +302 -302
  245. webscout/swiftcli/exceptions.py +21 -21
  246. webscout/swiftcli/plugins/__init__.py +9 -9
  247. webscout/swiftcli/plugins/base.py +135 -135
  248. webscout/swiftcli/plugins/manager.py +269 -269
  249. webscout/swiftcli/utils/__init__.py +59 -59
  250. webscout/swiftcli/utils/formatting.py +252 -252
  251. webscout/swiftcli/utils/parsing.py +267 -267
  252. webscout/update_checker.py +117 -117
  253. webscout/version.py +1 -1
  254. webscout/webscout_search.py +1183 -1183
  255. webscout/webscout_search_async.py +649 -649
  256. webscout/yep_search.py +346 -346
  257. webscout/zeroart/README.md +89 -89
  258. webscout/zeroart/__init__.py +134 -134
  259. webscout/zeroart/base.py +66 -66
  260. webscout/zeroart/effects.py +100 -100
  261. webscout/zeroart/fonts.py +1238 -1238
  262. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
  263. webscout-2025.10.11.dist-info/RECORD +300 -0
  264. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  265. webscout/Provider/OPENAI/Qwen3.py +0 -303
  266. webscout/Provider/OPENAI/qodo.py +0 -630
  267. webscout/Provider/OPENAI/xenai.py +0 -514
  268. webscout/tempid.py +0 -134
  269. webscout-8.3.7.dist-info/RECORD +0 -301
  270. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  271. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  272. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  273. {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,291 +1,291 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import uuid
5
- import re
6
- from typing import Union, Any, Dict, Optional, Generator, List
7
-
8
- from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
9
- from webscout.AIutel import Conversation
10
- from webscout.AIutel import AwesomePrompts
11
- from webscout.AIbase import Provider
12
- from webscout import exceptions
13
- from webscout.litagent import LitAgent as Lit
14
-
15
- class LLMChatCo(Provider):
16
- """
17
- A class to interact with the LLMChat.co API
18
- """
19
- required_auth = False
20
- AVAILABLE_MODELS = [
21
- "gemini-flash-2.0", # Default model
22
- "llama-4-scout",
23
- "gpt-4o-mini",
24
- "gpt-4.1-nano",
25
-
26
-
27
- # "gpt-4.1",
28
- # "gpt-4.1-mini",
29
- # "o3-mini",
30
- # "claude-3-5-sonnet",
31
- # "deepseek-r1",
32
- # "claude-3-7-sonnet",
33
- # "deep", # deep research mode
34
- # "pro" # pro research mode
35
-
36
- ]
37
-
38
- def __init__(
39
- self,
40
- is_conversation: bool = True,
41
- max_tokens: int = 2048, # Note: max_tokens is not used by this API
42
- timeout: int = 60,
43
- intro: str = None,
44
- filepath: str = None,
45
- update_file: bool = True,
46
- proxies: dict = {},
47
- history_offset: int = 10250,
48
- act: str = None,
49
- model: str = "gemini-flash-2.0",
50
- system_prompt: str = "You are a helpful assistant."
51
- ):
52
- """
53
- Initializes the LLMChat.co API with given parameters.
54
- """
55
-
56
- if model not in self.AVAILABLE_MODELS:
57
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
-
59
- # Initialize curl_cffi Session
60
- self.session = Session()
61
- self.is_conversation = is_conversation
62
- self.max_tokens_to_sample = max_tokens
63
- self.api_endpoint = "https://llmchat.co/api/completion"
64
- self.timeout = timeout
65
- self.last_response = {}
66
- self.model = model
67
- self.system_prompt = system_prompt
68
- self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
69
-
70
- # Create LitAgent instance (keep if needed for other headers)
71
- lit_agent = Lit()
72
-
73
- # Headers based on the provided request
74
- self.headers = {
75
- "Content-Type": "application/json",
76
- "Accept": "text/event-stream",
77
- "User-Agent": lit_agent.random(),
78
- "Accept-Language": "en-US,en;q=0.9",
79
- "Origin": "https://llmchat.co",
80
- "Referer": f"https://llmchat.co/chat/{self.thread_id}",
81
- "DNT": "1",
82
- "Sec-Fetch-Dest": "empty",
83
- "Sec-Fetch-Mode": "cors",
84
- "Sec-Fetch-Site": "same-origin",
85
- # Add sec-ch-ua headers if needed for impersonation consistency
86
- }
87
-
88
- self.__available_optimizers = (
89
- method
90
- for method in dir(Optimizers)
91
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
- )
93
-
94
- Conversation.intro = (
95
- AwesomePrompts().get_act(
96
- act, raise_not_found=True, default=None, case_insensitive=True
97
- )
98
- if act
99
- else intro or Conversation.intro
100
- )
101
-
102
- self.conversation = Conversation(
103
- is_conversation, self.max_tokens_to_sample, filepath, update_file
104
- )
105
- self.conversation.history_offset = history_offset
106
- # Update curl_cffi session headers and proxies
107
- self.session.headers.update(self.headers)
108
- self.session.proxies = proxies # Assign proxies directly
109
- # Store message history for conversation context
110
- self.last_assistant_response = ""
111
-
112
- @staticmethod
113
- def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
- """Extracts text content from LLMChat.co stream JSON objects."""
115
- if isinstance(chunk, dict) and "answer" in chunk:
116
- answer = chunk["answer"]
117
- # Prefer fullText if available and status is COMPLETED
118
- if answer.get("fullText") and answer.get("status") == "COMPLETED":
119
- return answer["fullText"]
120
- elif "text" in answer:
121
- return answer["text"]
122
- return None
123
-
124
- def ask(
125
- self,
126
- prompt: str,
127
- stream: bool = True, # Default to stream as the API uses SSE
128
- raw: bool = False,
129
- optimizer: str = None,
130
- conversationally: bool = False,
131
- web_search: bool = False,
132
- ) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
133
- """Chat with LLMChat.co with streaming capabilities and raw output support using sanitize_stream."""
134
-
135
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
136
- if optimizer:
137
- if optimizer in self.__available_optimizers:
138
- conversation_prompt = getattr(Optimizers, optimizer)(
139
- conversation_prompt if conversationally else prompt
140
- )
141
- else:
142
- raise exceptions.FailedToGenerateResponseError(
143
- f"Optimizer is not one of {self.__available_optimizers}"
144
- )
145
-
146
- # Generate a unique ID for this message
147
- thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
148
- messages = [
149
- {"role": "system", "content": self.system_prompt},
150
- {"role": "user", "content": prompt},
151
- ]
152
- # Prepare payload for the API request based on observed request format
153
- payload = {
154
- "mode": self.model,
155
- "prompt": prompt,
156
- "threadId": self.thread_id,
157
- "messages": messages,
158
- "mcpConfig": {},
159
- "threadItemId": thread_item_id,
160
- "parentThreadItemId": "",
161
- "webSearch": web_search,
162
- "showSuggestions": True
163
- }
164
-
165
- def for_stream():
166
- full_response = ""
167
- try:
168
- response = self.session.post(
169
- self.api_endpoint,
170
- json=payload,
171
- stream=True,
172
- timeout=self.timeout,
173
- impersonate="chrome110"
174
- )
175
- response.raise_for_status()
176
-
177
- processed_stream = sanitize_stream(
178
- data=response.iter_content(chunk_size=None),
179
- intro_value="data:",
180
- to_json=True,
181
- content_extractor=self._llmchatco_extractor,
182
- yield_raw_on_error=False,
183
- raw=raw
184
- )
185
-
186
- last_yielded_text = ""
187
- for current_full_text in processed_stream:
188
- if current_full_text and isinstance(current_full_text, str):
189
- new_text = current_full_text[len(last_yielded_text):]
190
- if new_text:
191
- full_response = current_full_text
192
- last_yielded_text = current_full_text
193
- if raw:
194
- yield new_text
195
- else:
196
- yield dict(text=new_text)
197
- self.last_response = dict(text=full_response)
198
- self.last_assistant_response = full_response
199
- self.conversation.update_chat_history(
200
- prompt, full_response
201
- )
202
- except CurlError as e:
203
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
204
- except Exception as e:
205
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
206
- raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
207
- def for_non_stream():
208
- full_response_text = ""
209
- try:
210
- for chunk_data in for_stream():
211
- if raw and isinstance(chunk_data, str):
212
- full_response_text += chunk_data
213
- elif isinstance(chunk_data, dict) and "text" in chunk_data:
214
- full_response_text += chunk_data["text"]
215
- except Exception as e:
216
- if not full_response_text:
217
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
218
- return full_response_text if raw else self.last_response
219
- return for_stream() if stream else for_non_stream()
220
-
221
- def chat(
222
- self,
223
- prompt: str,
224
- stream: bool = False,
225
- optimizer: str = None,
226
- conversationally: bool = False,
227
- web_search: bool = False,
228
- raw: bool = False
229
- ) -> Union[str, Generator[str, None, None]]:
230
- """Generate response with streaming capabilities and raw output support"""
231
- def for_stream_chat():
232
- gen = self.ask(
233
- prompt, stream=True, raw=raw,
234
- optimizer=optimizer, conversationally=conversationally,
235
- web_search=web_search
236
- )
237
- for response in gen:
238
- if raw:
239
- yield response
240
- else:
241
- yield self.get_message(response)
242
- def for_non_stream_chat():
243
- response_data = self.ask(
244
- prompt,
245
- stream=False,
246
- raw=raw,
247
- optimizer=optimizer,
248
- conversationally=conversationally,
249
- web_search=web_search
250
- )
251
- if raw:
252
- return response_data if isinstance(response_data, str) else self.get_message(response_data)
253
- else:
254
- return self.get_message(response_data)
255
- return for_stream_chat() if stream else for_non_stream_chat()
256
-
257
- def get_message(self, response: Dict[str, Any]) -> str:
258
- """Retrieves message from response with validation"""
259
- assert isinstance(response, dict), "Response should be of dict data-type only"
260
- return response["text"]
261
-
262
- if __name__ == "__main__":
263
- # # Ensure curl_cffi is installed
264
- # print("-" * 80)
265
- # print(f"{'Model':<50} {'Status':<10} {'Response'}")
266
- # print("-" * 80)
267
-
268
- # # Test all available models
269
- # working = 0
270
- # total = len(LLMChatCo.AVAILABLE_MODELS)
271
-
272
- # for model in LLMChatCo.AVAILABLE_MODELS:
273
- # try:
274
- # test_ai = LLMChatCo(model=model, timeout=60)
275
- # response = test_ai.chat("Say 'Hello' in one word")
276
- # response_text = response
277
-
278
- # if response_text and len(response_text.strip()) > 0:
279
- # status = "✓"
280
- # # Truncate response if too long
281
- # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
282
- # else:
283
- # status = "✗"
284
- # display_text = "Empty or invalid response"
285
- # print(f"{model:<50} {status:<10} {display_text}")
286
- # except Exception as e:
287
- # print(f"{model:<50} {'✗':<10} {str(e)}")
288
- ai = LLMChatCo()
289
- response = ai.chat("yooo", stream=True, raw=False)
290
- for chunk in response:
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import uuid
5
+ import re
6
+ from typing import Union, Any, Dict, Optional, Generator, List
7
+
8
+ from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent as Lit
14
+
15
+ class LLMChatCo(Provider):
16
+ """
17
+ A class to interact with the LLMChat.co API
18
+ """
19
+ required_auth = False
20
+ AVAILABLE_MODELS = [
21
+ "gemini-flash-2.0", # Default model
22
+ "llama-4-scout",
23
+ "gpt-4o-mini",
24
+ "gpt-4.1-nano",
25
+
26
+
27
+ # "gpt-4.1",
28
+ # "gpt-4.1-mini",
29
+ # "o3-mini",
30
+ # "claude-3-5-sonnet",
31
+ # "deepseek-r1",
32
+ # "claude-3-7-sonnet",
33
+ # "deep", # deep research mode
34
+ # "pro" # pro research mode
35
+
36
+ ]
37
+
38
+ def __init__(
39
+ self,
40
+ is_conversation: bool = True,
41
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
42
+ timeout: int = 60,
43
+ intro: str = None,
44
+ filepath: str = None,
45
+ update_file: bool = True,
46
+ proxies: dict = {},
47
+ history_offset: int = 10250,
48
+ act: str = None,
49
+ model: str = "gemini-flash-2.0",
50
+ system_prompt: str = "You are a helpful assistant."
51
+ ):
52
+ """
53
+ Initializes the LLMChat.co API with given parameters.
54
+ """
55
+
56
+ if model not in self.AVAILABLE_MODELS:
57
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
+
59
+ # Initialize curl_cffi Session
60
+ self.session = Session()
61
+ self.is_conversation = is_conversation
62
+ self.max_tokens_to_sample = max_tokens
63
+ self.api_endpoint = "https://llmchat.co/api/completion"
64
+ self.timeout = timeout
65
+ self.last_response = {}
66
+ self.model = model
67
+ self.system_prompt = system_prompt
68
+ self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
69
+
70
+ # Create LitAgent instance (keep if needed for other headers)
71
+ lit_agent = Lit()
72
+
73
+ # Headers based on the provided request
74
+ self.headers = {
75
+ "Content-Type": "application/json",
76
+ "Accept": "text/event-stream",
77
+ "User-Agent": lit_agent.random(),
78
+ "Accept-Language": "en-US,en;q=0.9",
79
+ "Origin": "https://llmchat.co",
80
+ "Referer": f"https://llmchat.co/chat/{self.thread_id}",
81
+ "DNT": "1",
82
+ "Sec-Fetch-Dest": "empty",
83
+ "Sec-Fetch-Mode": "cors",
84
+ "Sec-Fetch-Site": "same-origin",
85
+ # Add sec-ch-ua headers if needed for impersonation consistency
86
+ }
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+
94
+ Conversation.intro = (
95
+ AwesomePrompts().get_act(
96
+ act, raise_not_found=True, default=None, case_insensitive=True
97
+ )
98
+ if act
99
+ else intro or Conversation.intro
100
+ )
101
+
102
+ self.conversation = Conversation(
103
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
104
+ )
105
+ self.conversation.history_offset = history_offset
106
+ # Update curl_cffi session headers and proxies
107
+ self.session.headers.update(self.headers)
108
+ self.session.proxies = proxies # Assign proxies directly
109
+ # Store message history for conversation context
110
+ self.last_assistant_response = ""
111
+
112
+ @staticmethod
113
+ def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts text content from LLMChat.co stream JSON objects."""
115
+ if isinstance(chunk, dict) and "answer" in chunk:
116
+ answer = chunk["answer"]
117
+ # Prefer fullText if available and status is COMPLETED
118
+ if answer.get("fullText") and answer.get("status") == "COMPLETED":
119
+ return answer["fullText"]
120
+ elif "text" in answer:
121
+ return answer["text"]
122
+ return None
123
+
124
+ def ask(
125
+ self,
126
+ prompt: str,
127
+ stream: bool = True, # Default to stream as the API uses SSE
128
+ raw: bool = False,
129
+ optimizer: str = None,
130
+ conversationally: bool = False,
131
+ web_search: bool = False,
132
+ ) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
133
+ """Chat with LLMChat.co with streaming capabilities and raw output support using sanitize_stream."""
134
+
135
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
136
+ if optimizer:
137
+ if optimizer in self.__available_optimizers:
138
+ conversation_prompt = getattr(Optimizers, optimizer)(
139
+ conversation_prompt if conversationally else prompt
140
+ )
141
+ else:
142
+ raise exceptions.FailedToGenerateResponseError(
143
+ f"Optimizer is not one of {self.__available_optimizers}"
144
+ )
145
+
146
+ # Generate a unique ID for this message
147
+ thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
148
+ messages = [
149
+ {"role": "system", "content": self.system_prompt},
150
+ {"role": "user", "content": prompt},
151
+ ]
152
+ # Prepare payload for the API request based on observed request format
153
+ payload = {
154
+ "mode": self.model,
155
+ "prompt": prompt,
156
+ "threadId": self.thread_id,
157
+ "messages": messages,
158
+ "mcpConfig": {},
159
+ "threadItemId": thread_item_id,
160
+ "parentThreadItemId": "",
161
+ "webSearch": web_search,
162
+ "showSuggestions": True
163
+ }
164
+
165
+ def for_stream():
166
+ full_response = ""
167
+ try:
168
+ response = self.session.post(
169
+ self.api_endpoint,
170
+ json=payload,
171
+ stream=True,
172
+ timeout=self.timeout,
173
+ impersonate="chrome110"
174
+ )
175
+ response.raise_for_status()
176
+
177
+ processed_stream = sanitize_stream(
178
+ data=response.iter_content(chunk_size=None),
179
+ intro_value="data:",
180
+ to_json=True,
181
+ content_extractor=self._llmchatco_extractor,
182
+ yield_raw_on_error=False,
183
+ raw=raw
184
+ )
185
+
186
+ last_yielded_text = ""
187
+ for current_full_text in processed_stream:
188
+ if current_full_text and isinstance(current_full_text, str):
189
+ new_text = current_full_text[len(last_yielded_text):]
190
+ if new_text:
191
+ full_response = current_full_text
192
+ last_yielded_text = current_full_text
193
+ if raw:
194
+ yield new_text
195
+ else:
196
+ yield dict(text=new_text)
197
+ self.last_response = dict(text=full_response)
198
+ self.last_assistant_response = full_response
199
+ self.conversation.update_chat_history(
200
+ prompt, full_response
201
+ )
202
+ except CurlError as e:
203
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
204
+ except Exception as e:
205
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
206
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
207
+ def for_non_stream():
208
+ full_response_text = ""
209
+ try:
210
+ for chunk_data in for_stream():
211
+ if raw and isinstance(chunk_data, str):
212
+ full_response_text += chunk_data
213
+ elif isinstance(chunk_data, dict) and "text" in chunk_data:
214
+ full_response_text += chunk_data["text"]
215
+ except Exception as e:
216
+ if not full_response_text:
217
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
218
+ return full_response_text if raw else self.last_response
219
+ return for_stream() if stream else for_non_stream()
220
+
221
+ def chat(
222
+ self,
223
+ prompt: str,
224
+ stream: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ web_search: bool = False,
228
+ raw: bool = False
229
+ ) -> Union[str, Generator[str, None, None]]:
230
+ """Generate response with streaming capabilities and raw output support"""
231
+ def for_stream_chat():
232
+ gen = self.ask(
233
+ prompt, stream=True, raw=raw,
234
+ optimizer=optimizer, conversationally=conversationally,
235
+ web_search=web_search
236
+ )
237
+ for response in gen:
238
+ if raw:
239
+ yield response
240
+ else:
241
+ yield self.get_message(response)
242
+ def for_non_stream_chat():
243
+ response_data = self.ask(
244
+ prompt,
245
+ stream=False,
246
+ raw=raw,
247
+ optimizer=optimizer,
248
+ conversationally=conversationally,
249
+ web_search=web_search
250
+ )
251
+ if raw:
252
+ return response_data if isinstance(response_data, str) else self.get_message(response_data)
253
+ else:
254
+ return self.get_message(response_data)
255
+ return for_stream_chat() if stream else for_non_stream_chat()
256
+
257
+ def get_message(self, response: Dict[str, Any]) -> str:
258
+ """Retrieves message from response with validation"""
259
+ assert isinstance(response, dict), "Response should be of dict data-type only"
260
+ return response["text"]
261
+
262
+ if __name__ == "__main__":
263
+ # # Ensure curl_cffi is installed
264
+ # print("-" * 80)
265
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
266
+ # print("-" * 80)
267
+
268
+ # # Test all available models
269
+ # working = 0
270
+ # total = len(LLMChatCo.AVAILABLE_MODELS)
271
+
272
+ # for model in LLMChatCo.AVAILABLE_MODELS:
273
+ # try:
274
+ # test_ai = LLMChatCo(model=model, timeout=60)
275
+ # response = test_ai.chat("Say 'Hello' in one word")
276
+ # response_text = response
277
+
278
+ # if response_text and len(response_text.strip()) > 0:
279
+ # status = "✓"
280
+ # # Truncate response if too long
281
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
282
+ # else:
283
+ # status = "✗"
284
+ # display_text = "Empty or invalid response"
285
+ # print(f"{model:<50} {status:<10} {display_text}")
286
+ # except Exception as e:
287
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
288
+ ai = LLMChatCo()
289
+ response = ai.chat("yooo", stream=True, raw=False)
290
+ for chunk in response:
291
291
  print(chunk, end="", flush=True)