webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/Provider/llmchatco.py
CHANGED
|
@@ -1,291 +1,291 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import uuid
|
|
5
|
-
import re
|
|
6
|
-
from typing import Union, Any, Dict, Optional, Generator, List
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent as Lit
|
|
14
|
-
|
|
15
|
-
class LLMChatCo(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the LLMChat.co API
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
AVAILABLE_MODELS = [
|
|
21
|
-
"gemini-flash-2.0", # Default model
|
|
22
|
-
"llama-4-scout",
|
|
23
|
-
"gpt-4o-mini",
|
|
24
|
-
"gpt-4.1-nano",
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
# "gpt-4.1",
|
|
28
|
-
# "gpt-4.1-mini",
|
|
29
|
-
# "o3-mini",
|
|
30
|
-
# "claude-3-5-sonnet",
|
|
31
|
-
# "deepseek-r1",
|
|
32
|
-
# "claude-3-7-sonnet",
|
|
33
|
-
# "deep", # deep research mode
|
|
34
|
-
# "pro" # pro research mode
|
|
35
|
-
|
|
36
|
-
]
|
|
37
|
-
|
|
38
|
-
def __init__(
|
|
39
|
-
self,
|
|
40
|
-
is_conversation: bool = True,
|
|
41
|
-
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
42
|
-
timeout: int = 60,
|
|
43
|
-
intro: str = None,
|
|
44
|
-
filepath: str = None,
|
|
45
|
-
update_file: bool = True,
|
|
46
|
-
proxies: dict = {},
|
|
47
|
-
history_offset: int = 10250,
|
|
48
|
-
act: str = None,
|
|
49
|
-
model: str = "gemini-flash-2.0",
|
|
50
|
-
system_prompt: str = "You are a helpful assistant."
|
|
51
|
-
):
|
|
52
|
-
"""
|
|
53
|
-
Initializes the LLMChat.co API with given parameters.
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
if model not in self.AVAILABLE_MODELS:
|
|
57
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
58
|
-
|
|
59
|
-
# Initialize curl_cffi Session
|
|
60
|
-
self.session = Session()
|
|
61
|
-
self.is_conversation = is_conversation
|
|
62
|
-
self.max_tokens_to_sample = max_tokens
|
|
63
|
-
self.api_endpoint = "https://llmchat.co/api/completion"
|
|
64
|
-
self.timeout = timeout
|
|
65
|
-
self.last_response = {}
|
|
66
|
-
self.model = model
|
|
67
|
-
self.system_prompt = system_prompt
|
|
68
|
-
self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
|
|
69
|
-
|
|
70
|
-
# Create LitAgent instance (keep if needed for other headers)
|
|
71
|
-
lit_agent = Lit()
|
|
72
|
-
|
|
73
|
-
# Headers based on the provided request
|
|
74
|
-
self.headers = {
|
|
75
|
-
"Content-Type": "application/json",
|
|
76
|
-
"Accept": "text/event-stream",
|
|
77
|
-
"User-Agent": lit_agent.random(),
|
|
78
|
-
"Accept-Language": "en-US,en;q=0.9",
|
|
79
|
-
"Origin": "https://llmchat.co",
|
|
80
|
-
"Referer": f"https://llmchat.co/chat/{self.thread_id}",
|
|
81
|
-
"DNT": "1",
|
|
82
|
-
"Sec-Fetch-Dest": "empty",
|
|
83
|
-
"Sec-Fetch-Mode": "cors",
|
|
84
|
-
"Sec-Fetch-Site": "same-origin",
|
|
85
|
-
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
self.__available_optimizers = (
|
|
89
|
-
method
|
|
90
|
-
for method in dir(Optimizers)
|
|
91
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
Conversation.intro = (
|
|
95
|
-
AwesomePrompts().get_act(
|
|
96
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
97
|
-
)
|
|
98
|
-
if act
|
|
99
|
-
else intro or Conversation.intro
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
self.conversation = Conversation(
|
|
103
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
104
|
-
)
|
|
105
|
-
self.conversation.history_offset = history_offset
|
|
106
|
-
# Update curl_cffi session headers and proxies
|
|
107
|
-
self.session.headers.update(self.headers)
|
|
108
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
109
|
-
# Store message history for conversation context
|
|
110
|
-
self.last_assistant_response = ""
|
|
111
|
-
|
|
112
|
-
@staticmethod
|
|
113
|
-
def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
-
"""Extracts text content from LLMChat.co stream JSON objects."""
|
|
115
|
-
if isinstance(chunk, dict) and "answer" in chunk:
|
|
116
|
-
answer = chunk["answer"]
|
|
117
|
-
# Prefer fullText if available and status is COMPLETED
|
|
118
|
-
if answer.get("fullText") and answer.get("status") == "COMPLETED":
|
|
119
|
-
return answer["fullText"]
|
|
120
|
-
elif "text" in answer:
|
|
121
|
-
return answer["text"]
|
|
122
|
-
return None
|
|
123
|
-
|
|
124
|
-
def ask(
|
|
125
|
-
self,
|
|
126
|
-
prompt: str,
|
|
127
|
-
stream: bool = True, # Default to stream as the API uses SSE
|
|
128
|
-
raw: bool = False,
|
|
129
|
-
optimizer: str = None,
|
|
130
|
-
conversationally: bool = False,
|
|
131
|
-
web_search: bool = False,
|
|
132
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
|
|
133
|
-
"""Chat with LLMChat.co with streaming capabilities and raw output support using sanitize_stream."""
|
|
134
|
-
|
|
135
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
136
|
-
if optimizer:
|
|
137
|
-
if optimizer in self.__available_optimizers:
|
|
138
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
139
|
-
conversation_prompt if conversationally else prompt
|
|
140
|
-
)
|
|
141
|
-
else:
|
|
142
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
143
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
# Generate a unique ID for this message
|
|
147
|
-
thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
|
|
148
|
-
messages = [
|
|
149
|
-
{"role": "system", "content": self.system_prompt},
|
|
150
|
-
{"role": "user", "content": prompt},
|
|
151
|
-
]
|
|
152
|
-
# Prepare payload for the API request based on observed request format
|
|
153
|
-
payload = {
|
|
154
|
-
"mode": self.model,
|
|
155
|
-
"prompt": prompt,
|
|
156
|
-
"threadId": self.thread_id,
|
|
157
|
-
"messages": messages,
|
|
158
|
-
"mcpConfig": {},
|
|
159
|
-
"threadItemId": thread_item_id,
|
|
160
|
-
"parentThreadItemId": "",
|
|
161
|
-
"webSearch": web_search,
|
|
162
|
-
"showSuggestions": True
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
def for_stream():
|
|
166
|
-
full_response = ""
|
|
167
|
-
try:
|
|
168
|
-
response = self.session.post(
|
|
169
|
-
self.api_endpoint,
|
|
170
|
-
json=payload,
|
|
171
|
-
stream=True,
|
|
172
|
-
timeout=self.timeout,
|
|
173
|
-
impersonate="chrome110"
|
|
174
|
-
)
|
|
175
|
-
response.raise_for_status()
|
|
176
|
-
|
|
177
|
-
processed_stream = sanitize_stream(
|
|
178
|
-
data=response.iter_content(chunk_size=None),
|
|
179
|
-
intro_value="data:",
|
|
180
|
-
to_json=True,
|
|
181
|
-
content_extractor=self._llmchatco_extractor,
|
|
182
|
-
yield_raw_on_error=False,
|
|
183
|
-
raw=raw
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
last_yielded_text = ""
|
|
187
|
-
for current_full_text in processed_stream:
|
|
188
|
-
if current_full_text and isinstance(current_full_text, str):
|
|
189
|
-
new_text = current_full_text[len(last_yielded_text):]
|
|
190
|
-
if new_text:
|
|
191
|
-
full_response = current_full_text
|
|
192
|
-
last_yielded_text = current_full_text
|
|
193
|
-
if raw:
|
|
194
|
-
yield new_text
|
|
195
|
-
else:
|
|
196
|
-
yield dict(text=new_text)
|
|
197
|
-
self.last_response = dict(text=full_response)
|
|
198
|
-
self.last_assistant_response = full_response
|
|
199
|
-
self.conversation.update_chat_history(
|
|
200
|
-
prompt, full_response
|
|
201
|
-
)
|
|
202
|
-
except CurlError as e:
|
|
203
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
204
|
-
except Exception as e:
|
|
205
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
206
|
-
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
207
|
-
def for_non_stream():
|
|
208
|
-
full_response_text = ""
|
|
209
|
-
try:
|
|
210
|
-
for chunk_data in for_stream():
|
|
211
|
-
if raw and isinstance(chunk_data, str):
|
|
212
|
-
full_response_text += chunk_data
|
|
213
|
-
elif isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
214
|
-
full_response_text += chunk_data["text"]
|
|
215
|
-
except Exception as e:
|
|
216
|
-
if not full_response_text:
|
|
217
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
218
|
-
return full_response_text if raw else self.last_response
|
|
219
|
-
return for_stream() if stream else for_non_stream()
|
|
220
|
-
|
|
221
|
-
def chat(
|
|
222
|
-
self,
|
|
223
|
-
prompt: str,
|
|
224
|
-
stream: bool = False,
|
|
225
|
-
optimizer: str = None,
|
|
226
|
-
conversationally: bool = False,
|
|
227
|
-
web_search: bool = False,
|
|
228
|
-
raw: bool = False
|
|
229
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
230
|
-
"""Generate response with streaming capabilities and raw output support"""
|
|
231
|
-
def for_stream_chat():
|
|
232
|
-
gen = self.ask(
|
|
233
|
-
prompt, stream=True, raw=raw,
|
|
234
|
-
optimizer=optimizer, conversationally=conversationally,
|
|
235
|
-
web_search=web_search
|
|
236
|
-
)
|
|
237
|
-
for response in gen:
|
|
238
|
-
if raw:
|
|
239
|
-
yield response
|
|
240
|
-
else:
|
|
241
|
-
yield self.get_message(response)
|
|
242
|
-
def for_non_stream_chat():
|
|
243
|
-
response_data = self.ask(
|
|
244
|
-
prompt,
|
|
245
|
-
stream=False,
|
|
246
|
-
raw=raw,
|
|
247
|
-
optimizer=optimizer,
|
|
248
|
-
conversationally=conversationally,
|
|
249
|
-
web_search=web_search
|
|
250
|
-
)
|
|
251
|
-
if raw:
|
|
252
|
-
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
253
|
-
else:
|
|
254
|
-
return self.get_message(response_data)
|
|
255
|
-
return for_stream_chat() if stream else for_non_stream_chat()
|
|
256
|
-
|
|
257
|
-
def get_message(self, response: Dict[str, Any]) -> str:
|
|
258
|
-
"""Retrieves message from response with validation"""
|
|
259
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
260
|
-
return response["text"]
|
|
261
|
-
|
|
262
|
-
if __name__ == "__main__":
|
|
263
|
-
# # Ensure curl_cffi is installed
|
|
264
|
-
# print("-" * 80)
|
|
265
|
-
# print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
266
|
-
# print("-" * 80)
|
|
267
|
-
|
|
268
|
-
# # Test all available models
|
|
269
|
-
# working = 0
|
|
270
|
-
# total = len(LLMChatCo.AVAILABLE_MODELS)
|
|
271
|
-
|
|
272
|
-
# for model in LLMChatCo.AVAILABLE_MODELS:
|
|
273
|
-
# try:
|
|
274
|
-
# test_ai = LLMChatCo(model=model, timeout=60)
|
|
275
|
-
# response = test_ai.chat("Say 'Hello' in one word")
|
|
276
|
-
# response_text = response
|
|
277
|
-
|
|
278
|
-
# if response_text and len(response_text.strip()) > 0:
|
|
279
|
-
# status = "✓"
|
|
280
|
-
# # Truncate response if too long
|
|
281
|
-
# display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
282
|
-
# else:
|
|
283
|
-
# status = "✗"
|
|
284
|
-
# display_text = "Empty or invalid response"
|
|
285
|
-
# print(f"{model:<50} {status:<10} {display_text}")
|
|
286
|
-
# except Exception as e:
|
|
287
|
-
# print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
288
|
-
ai = LLMChatCo()
|
|
289
|
-
response = ai.chat("yooo", stream=True, raw=False)
|
|
290
|
-
for chunk in response:
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import uuid
|
|
5
|
+
import re
|
|
6
|
+
from typing import Union, Any, Dict, Optional, Generator, List
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
|
|
9
|
+
from webscout.AIutel import Conversation
|
|
10
|
+
from webscout.AIutel import AwesomePrompts
|
|
11
|
+
from webscout.AIbase import Provider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.litagent import LitAgent as Lit
|
|
14
|
+
|
|
15
|
+
class LLMChatCo(Provider):
|
|
16
|
+
"""
|
|
17
|
+
A class to interact with the LLMChat.co API
|
|
18
|
+
"""
|
|
19
|
+
required_auth = False
|
|
20
|
+
AVAILABLE_MODELS = [
|
|
21
|
+
"gemini-flash-2.0", # Default model
|
|
22
|
+
"llama-4-scout",
|
|
23
|
+
"gpt-4o-mini",
|
|
24
|
+
"gpt-4.1-nano",
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# "gpt-4.1",
|
|
28
|
+
# "gpt-4.1-mini",
|
|
29
|
+
# "o3-mini",
|
|
30
|
+
# "claude-3-5-sonnet",
|
|
31
|
+
# "deepseek-r1",
|
|
32
|
+
# "claude-3-7-sonnet",
|
|
33
|
+
# "deep", # deep research mode
|
|
34
|
+
# "pro" # pro research mode
|
|
35
|
+
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
is_conversation: bool = True,
|
|
41
|
+
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
42
|
+
timeout: int = 60,
|
|
43
|
+
intro: str = None,
|
|
44
|
+
filepath: str = None,
|
|
45
|
+
update_file: bool = True,
|
|
46
|
+
proxies: dict = {},
|
|
47
|
+
history_offset: int = 10250,
|
|
48
|
+
act: str = None,
|
|
49
|
+
model: str = "gemini-flash-2.0",
|
|
50
|
+
system_prompt: str = "You are a helpful assistant."
|
|
51
|
+
):
|
|
52
|
+
"""
|
|
53
|
+
Initializes the LLMChat.co API with given parameters.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
if model not in self.AVAILABLE_MODELS:
|
|
57
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
58
|
+
|
|
59
|
+
# Initialize curl_cffi Session
|
|
60
|
+
self.session = Session()
|
|
61
|
+
self.is_conversation = is_conversation
|
|
62
|
+
self.max_tokens_to_sample = max_tokens
|
|
63
|
+
self.api_endpoint = "https://llmchat.co/api/completion"
|
|
64
|
+
self.timeout = timeout
|
|
65
|
+
self.last_response = {}
|
|
66
|
+
self.model = model
|
|
67
|
+
self.system_prompt = system_prompt
|
|
68
|
+
self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
|
|
69
|
+
|
|
70
|
+
# Create LitAgent instance (keep if needed for other headers)
|
|
71
|
+
lit_agent = Lit()
|
|
72
|
+
|
|
73
|
+
# Headers based on the provided request
|
|
74
|
+
self.headers = {
|
|
75
|
+
"Content-Type": "application/json",
|
|
76
|
+
"Accept": "text/event-stream",
|
|
77
|
+
"User-Agent": lit_agent.random(),
|
|
78
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
79
|
+
"Origin": "https://llmchat.co",
|
|
80
|
+
"Referer": f"https://llmchat.co/chat/{self.thread_id}",
|
|
81
|
+
"DNT": "1",
|
|
82
|
+
"Sec-Fetch-Dest": "empty",
|
|
83
|
+
"Sec-Fetch-Mode": "cors",
|
|
84
|
+
"Sec-Fetch-Site": "same-origin",
|
|
85
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
self.__available_optimizers = (
|
|
89
|
+
method
|
|
90
|
+
for method in dir(Optimizers)
|
|
91
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
Conversation.intro = (
|
|
95
|
+
AwesomePrompts().get_act(
|
|
96
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
97
|
+
)
|
|
98
|
+
if act
|
|
99
|
+
else intro or Conversation.intro
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
self.conversation = Conversation(
|
|
103
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
104
|
+
)
|
|
105
|
+
self.conversation.history_offset = history_offset
|
|
106
|
+
# Update curl_cffi session headers and proxies
|
|
107
|
+
self.session.headers.update(self.headers)
|
|
108
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
109
|
+
# Store message history for conversation context
|
|
110
|
+
self.last_assistant_response = ""
|
|
111
|
+
|
|
112
|
+
@staticmethod
|
|
113
|
+
def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
114
|
+
"""Extracts text content from LLMChat.co stream JSON objects."""
|
|
115
|
+
if isinstance(chunk, dict) and "answer" in chunk:
|
|
116
|
+
answer = chunk["answer"]
|
|
117
|
+
# Prefer fullText if available and status is COMPLETED
|
|
118
|
+
if answer.get("fullText") and answer.get("status") == "COMPLETED":
|
|
119
|
+
return answer["fullText"]
|
|
120
|
+
elif "text" in answer:
|
|
121
|
+
return answer["text"]
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
def ask(
|
|
125
|
+
self,
|
|
126
|
+
prompt: str,
|
|
127
|
+
stream: bool = True, # Default to stream as the API uses SSE
|
|
128
|
+
raw: bool = False,
|
|
129
|
+
optimizer: str = None,
|
|
130
|
+
conversationally: bool = False,
|
|
131
|
+
web_search: bool = False,
|
|
132
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None], str]:
|
|
133
|
+
"""Chat with LLMChat.co with streaming capabilities and raw output support using sanitize_stream."""
|
|
134
|
+
|
|
135
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
136
|
+
if optimizer:
|
|
137
|
+
if optimizer in self.__available_optimizers:
|
|
138
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
139
|
+
conversation_prompt if conversationally else prompt
|
|
140
|
+
)
|
|
141
|
+
else:
|
|
142
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
143
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
# Generate a unique ID for this message
|
|
147
|
+
thread_item_id = ''.join(str(uuid.uuid4()).split('-'))[:20]
|
|
148
|
+
messages = [
|
|
149
|
+
{"role": "system", "content": self.system_prompt},
|
|
150
|
+
{"role": "user", "content": prompt},
|
|
151
|
+
]
|
|
152
|
+
# Prepare payload for the API request based on observed request format
|
|
153
|
+
payload = {
|
|
154
|
+
"mode": self.model,
|
|
155
|
+
"prompt": prompt,
|
|
156
|
+
"threadId": self.thread_id,
|
|
157
|
+
"messages": messages,
|
|
158
|
+
"mcpConfig": {},
|
|
159
|
+
"threadItemId": thread_item_id,
|
|
160
|
+
"parentThreadItemId": "",
|
|
161
|
+
"webSearch": web_search,
|
|
162
|
+
"showSuggestions": True
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
def for_stream():
|
|
166
|
+
full_response = ""
|
|
167
|
+
try:
|
|
168
|
+
response = self.session.post(
|
|
169
|
+
self.api_endpoint,
|
|
170
|
+
json=payload,
|
|
171
|
+
stream=True,
|
|
172
|
+
timeout=self.timeout,
|
|
173
|
+
impersonate="chrome110"
|
|
174
|
+
)
|
|
175
|
+
response.raise_for_status()
|
|
176
|
+
|
|
177
|
+
processed_stream = sanitize_stream(
|
|
178
|
+
data=response.iter_content(chunk_size=None),
|
|
179
|
+
intro_value="data:",
|
|
180
|
+
to_json=True,
|
|
181
|
+
content_extractor=self._llmchatco_extractor,
|
|
182
|
+
yield_raw_on_error=False,
|
|
183
|
+
raw=raw
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
last_yielded_text = ""
|
|
187
|
+
for current_full_text in processed_stream:
|
|
188
|
+
if current_full_text and isinstance(current_full_text, str):
|
|
189
|
+
new_text = current_full_text[len(last_yielded_text):]
|
|
190
|
+
if new_text:
|
|
191
|
+
full_response = current_full_text
|
|
192
|
+
last_yielded_text = current_full_text
|
|
193
|
+
if raw:
|
|
194
|
+
yield new_text
|
|
195
|
+
else:
|
|
196
|
+
yield dict(text=new_text)
|
|
197
|
+
self.last_response = dict(text=full_response)
|
|
198
|
+
self.last_assistant_response = full_response
|
|
199
|
+
self.conversation.update_chat_history(
|
|
200
|
+
prompt, full_response
|
|
201
|
+
)
|
|
202
|
+
except CurlError as e:
|
|
203
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
204
|
+
except Exception as e:
|
|
205
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
206
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
207
|
+
def for_non_stream():
|
|
208
|
+
full_response_text = ""
|
|
209
|
+
try:
|
|
210
|
+
for chunk_data in for_stream():
|
|
211
|
+
if raw and isinstance(chunk_data, str):
|
|
212
|
+
full_response_text += chunk_data
|
|
213
|
+
elif isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
214
|
+
full_response_text += chunk_data["text"]
|
|
215
|
+
except Exception as e:
|
|
216
|
+
if not full_response_text:
|
|
217
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
218
|
+
return full_response_text if raw else self.last_response
|
|
219
|
+
return for_stream() if stream else for_non_stream()
|
|
220
|
+
|
|
221
|
+
def chat(
|
|
222
|
+
self,
|
|
223
|
+
prompt: str,
|
|
224
|
+
stream: bool = False,
|
|
225
|
+
optimizer: str = None,
|
|
226
|
+
conversationally: bool = False,
|
|
227
|
+
web_search: bool = False,
|
|
228
|
+
raw: bool = False
|
|
229
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
230
|
+
"""Generate response with streaming capabilities and raw output support"""
|
|
231
|
+
def for_stream_chat():
|
|
232
|
+
gen = self.ask(
|
|
233
|
+
prompt, stream=True, raw=raw,
|
|
234
|
+
optimizer=optimizer, conversationally=conversationally,
|
|
235
|
+
web_search=web_search
|
|
236
|
+
)
|
|
237
|
+
for response in gen:
|
|
238
|
+
if raw:
|
|
239
|
+
yield response
|
|
240
|
+
else:
|
|
241
|
+
yield self.get_message(response)
|
|
242
|
+
def for_non_stream_chat():
|
|
243
|
+
response_data = self.ask(
|
|
244
|
+
prompt,
|
|
245
|
+
stream=False,
|
|
246
|
+
raw=raw,
|
|
247
|
+
optimizer=optimizer,
|
|
248
|
+
conversationally=conversationally,
|
|
249
|
+
web_search=web_search
|
|
250
|
+
)
|
|
251
|
+
if raw:
|
|
252
|
+
return response_data if isinstance(response_data, str) else self.get_message(response_data)
|
|
253
|
+
else:
|
|
254
|
+
return self.get_message(response_data)
|
|
255
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
256
|
+
|
|
257
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
258
|
+
"""Retrieves message from response with validation"""
|
|
259
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
260
|
+
return response["text"]
|
|
261
|
+
|
|
262
|
+
if __name__ == "__main__":
|
|
263
|
+
# # Ensure curl_cffi is installed
|
|
264
|
+
# print("-" * 80)
|
|
265
|
+
# print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
266
|
+
# print("-" * 80)
|
|
267
|
+
|
|
268
|
+
# # Test all available models
|
|
269
|
+
# working = 0
|
|
270
|
+
# total = len(LLMChatCo.AVAILABLE_MODELS)
|
|
271
|
+
|
|
272
|
+
# for model in LLMChatCo.AVAILABLE_MODELS:
|
|
273
|
+
# try:
|
|
274
|
+
# test_ai = LLMChatCo(model=model, timeout=60)
|
|
275
|
+
# response = test_ai.chat("Say 'Hello' in one word")
|
|
276
|
+
# response_text = response
|
|
277
|
+
|
|
278
|
+
# if response_text and len(response_text.strip()) > 0:
|
|
279
|
+
# status = "✓"
|
|
280
|
+
# # Truncate response if too long
|
|
281
|
+
# display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
282
|
+
# else:
|
|
283
|
+
# status = "✗"
|
|
284
|
+
# display_text = "Empty or invalid response"
|
|
285
|
+
# print(f"{model:<50} {status:<10} {display_text}")
|
|
286
|
+
# except Exception as e:
|
|
287
|
+
# print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
288
|
+
ai = LLMChatCo()
|
|
289
|
+
response = ai.chat("yooo", stream=True, raw=False)
|
|
290
|
+
for chunk in response:
|
|
291
291
|
print(chunk, end="", flush=True)
|