webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,378 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
from datetime import datetime
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage,
|
|
12
|
-
format_prompt, count_tokens
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
# Import curl_cffi for Cloudflare bypass
|
|
16
|
-
from curl_cffi.requests import Session
|
|
17
|
-
from curl_cffi import CurlError
|
|
18
|
-
|
|
19
|
-
# Import LitAgent for user agent generation
|
|
20
|
-
from webscout.litagent import LitAgent
|
|
21
|
-
|
|
22
|
-
# ANSI escape codes for formatting
|
|
23
|
-
BOLD = "\033[1m"
|
|
24
|
-
RED = "\033[91m"
|
|
25
|
-
RESET = "\033[0m"
|
|
26
|
-
|
|
27
|
-
# Model configurations
|
|
28
|
-
MODEL_CONFIGS = {
|
|
29
|
-
"llama": {
|
|
30
|
-
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
31
|
-
"models": {
|
|
32
|
-
"llama-3.3-70b-versatile": {"contextLength": 131072},
|
|
33
|
-
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
34
|
-
"deepseek-r1-distill-llama-70b": {"contextLength": 128000},
|
|
35
|
-
},
|
|
36
|
-
},
|
|
37
|
-
"cohere": {
|
|
38
|
-
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
39
|
-
"models": {
|
|
40
|
-
"command-r": {"contextLength": 128000},
|
|
41
|
-
"command": {"contextLength": 4096},
|
|
42
|
-
},
|
|
43
|
-
},
|
|
44
|
-
"google": {
|
|
45
|
-
"endpoint": "https://www.multichatai.com/api/chat/google",
|
|
46
|
-
"models": {
|
|
47
|
-
"gemini-1.5-flash-002": {"contextLength": 1048576},
|
|
48
|
-
"gemma2-9b-it": {"contextLength": 8192},
|
|
49
|
-
"gemini-2.0-flash": {"contextLength": 128000},
|
|
50
|
-
},
|
|
51
|
-
"message_format": "parts",
|
|
52
|
-
},
|
|
53
|
-
"deepinfra": {
|
|
54
|
-
"endpoint": "https://www.multichatai.com/api/chat/deepinfra",
|
|
55
|
-
"models": {
|
|
56
|
-
"Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
|
|
57
|
-
"Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
|
|
58
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
|
|
59
|
-
"deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
|
|
60
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
|
|
61
|
-
"NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
|
|
62
|
-
"gemma-2-27b-it": {"contextLength": 8192},
|
|
63
|
-
},
|
|
64
|
-
},
|
|
65
|
-
"mistral": {
|
|
66
|
-
"endpoint": "https://www.multichatai.com/api/chat/mistral",
|
|
67
|
-
"models": {
|
|
68
|
-
"mistral-small-latest": {"contextLength": 32000},
|
|
69
|
-
"codestral-latest": {"contextLength": 32000},
|
|
70
|
-
"open-mistral-7b": {"contextLength": 8000},
|
|
71
|
-
"open-mixtral-8x7b": {"contextLength": 8000},
|
|
72
|
-
},
|
|
73
|
-
},
|
|
74
|
-
"alibaba": {
|
|
75
|
-
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
76
|
-
"models": {
|
|
77
|
-
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
78
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
79
|
-
"Qwen/QwQ-32B-Preview": {"contextLength": 32768},
|
|
80
|
-
},
|
|
81
|
-
},
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
class Completions(BaseCompletions):
|
|
85
|
-
def __init__(self, client: 'MultiChatAI'):
|
|
86
|
-
self._client = client
|
|
87
|
-
|
|
88
|
-
def create(
|
|
89
|
-
self,
|
|
90
|
-
*,
|
|
91
|
-
model: str,
|
|
92
|
-
messages: List[Dict[str, str]],
|
|
93
|
-
max_tokens: Optional[int] = None,
|
|
94
|
-
stream: bool = False,
|
|
95
|
-
temperature: Optional[float] = None,
|
|
96
|
-
top_p: Optional[float] = None,
|
|
97
|
-
timeout: Optional[int] = None,
|
|
98
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
99
|
-
**kwargs: Any
|
|
100
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
101
|
-
"""
|
|
102
|
-
Create a chat completion using the MultiChatAI API.
|
|
103
|
-
|
|
104
|
-
Args:
|
|
105
|
-
model: The model to use
|
|
106
|
-
messages: A list of messages in the conversation
|
|
107
|
-
max_tokens: Maximum number of tokens to generate
|
|
108
|
-
stream: Whether to stream the response
|
|
109
|
-
temperature: Temperature for response generation
|
|
110
|
-
top_p: Top-p sampling parameter
|
|
111
|
-
|
|
112
|
-
Returns:
|
|
113
|
-
Either a ChatCompletion object or a generator of ChatCompletionChunk objects
|
|
114
|
-
"""
|
|
115
|
-
try:
|
|
116
|
-
# Set client parameters based on function arguments
|
|
117
|
-
self._client.model = model
|
|
118
|
-
if temperature is not None:
|
|
119
|
-
self._client.temperature = temperature
|
|
120
|
-
if max_tokens is not None:
|
|
121
|
-
self._client.max_tokens_to_sample = max_tokens
|
|
122
|
-
|
|
123
|
-
# Extract system messages and set as system prompt
|
|
124
|
-
for message in messages:
|
|
125
|
-
if message.get("role") == "system":
|
|
126
|
-
self._client.system_prompt = message.get("content", "")
|
|
127
|
-
break
|
|
128
|
-
|
|
129
|
-
# Format all messages into a single prompt
|
|
130
|
-
user_message = format_prompt(messages)
|
|
131
|
-
|
|
132
|
-
# Generate a unique request ID
|
|
133
|
-
request_id = f"multichat-{str(uuid.uuid4())}"
|
|
134
|
-
created_time = int(time.time())
|
|
135
|
-
|
|
136
|
-
# Make the API request
|
|
137
|
-
response_text = self._client._make_api_request(user_message, timeout=timeout, proxies=proxies)
|
|
138
|
-
|
|
139
|
-
# If streaming is requested, simulate streaming with the full response
|
|
140
|
-
if stream:
|
|
141
|
-
def generate_chunks():
|
|
142
|
-
# Create a single chunk with the full response
|
|
143
|
-
delta = ChoiceDelta(content=response_text)
|
|
144
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
145
|
-
chunk = ChatCompletionChunk(
|
|
146
|
-
id=request_id,
|
|
147
|
-
choices=[choice],
|
|
148
|
-
created=created_time,
|
|
149
|
-
model=model,
|
|
150
|
-
)
|
|
151
|
-
yield chunk
|
|
152
|
-
|
|
153
|
-
return generate_chunks()
|
|
154
|
-
|
|
155
|
-
# For non-streaming, create a complete response
|
|
156
|
-
message = ChatCompletionMessage(role="assistant", content=response_text)
|
|
157
|
-
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
158
|
-
|
|
159
|
-
# Estimate token usage using count_tokens
|
|
160
|
-
prompt_tokens = count_tokens(user_message)
|
|
161
|
-
completion_tokens = count_tokens(response_text)
|
|
162
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
163
|
-
|
|
164
|
-
usage = CompletionUsage(
|
|
165
|
-
prompt_tokens=prompt_tokens,
|
|
166
|
-
completion_tokens=completion_tokens,
|
|
167
|
-
total_tokens=total_tokens
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
# Create the completion object
|
|
171
|
-
completion = ChatCompletion(
|
|
172
|
-
id=request_id,
|
|
173
|
-
choices=[choice],
|
|
174
|
-
created=created_time,
|
|
175
|
-
model=model,
|
|
176
|
-
usage=usage,
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
return completion
|
|
180
|
-
|
|
181
|
-
except Exception as e:
|
|
182
|
-
print(f"{RED}Error during MultiChatAI request: {e}{RESET}")
|
|
183
|
-
raise IOError(f"MultiChatAI request failed: {e}") from e
|
|
184
|
-
|
|
185
|
-
class Chat(BaseChat):
|
|
186
|
-
def __init__(self, client: 'MultiChatAI'):
|
|
187
|
-
self.completions = Completions(client)
|
|
188
|
-
|
|
189
|
-
class MultiChatAI(OpenAICompatibleProvider):
|
|
190
|
-
"""
|
|
191
|
-
OpenAI-compatible client for MultiChatAI API.
|
|
192
|
-
|
|
193
|
-
Usage:
|
|
194
|
-
client = MultiChatAI()
|
|
195
|
-
response = client.chat.completions.create(
|
|
196
|
-
model="llama-3.3-70b-versatile",
|
|
197
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
198
|
-
)
|
|
199
|
-
print(response.choices[0].message.content)
|
|
200
|
-
"""
|
|
201
|
-
|
|
202
|
-
AVAILABLE_MODELS = [
|
|
203
|
-
# Llama Models
|
|
204
|
-
"llama-3.3-70b-versatile",
|
|
205
|
-
"llama-3.2-11b-vision-preview",
|
|
206
|
-
"deepseek-r1-distill-llama-70b",
|
|
207
|
-
|
|
208
|
-
# Google Models
|
|
209
|
-
"gemma2-9b-it",
|
|
210
|
-
"gemini-2.0-flash",
|
|
211
|
-
|
|
212
|
-
# DeepInfra Models
|
|
213
|
-
"Sao10K/L3.1-70B-Euryale-v2.2",
|
|
214
|
-
"Gryphe/MythoMax-L2-13b",
|
|
215
|
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
216
|
-
"deepseek-ai/DeepSeek-V3",
|
|
217
|
-
"meta-llama/Meta-Llama-3.1-405B-Instruct",
|
|
218
|
-
"NousResearch/Hermes-3-Llama-3.1-405B",
|
|
219
|
-
|
|
220
|
-
# Alibaba Models
|
|
221
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
222
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
223
|
-
"Qwen/QwQ-32B-Preview"
|
|
224
|
-
]
|
|
225
|
-
|
|
226
|
-
def __init__(
|
|
227
|
-
self,
|
|
228
|
-
timeout: int = 30,
|
|
229
|
-
proxies: dict = {},
|
|
230
|
-
model: str = "llama-3.3-70b-versatile",
|
|
231
|
-
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
232
|
-
temperature: float = 0.5,
|
|
233
|
-
max_tokens: int = 4000
|
|
234
|
-
):
|
|
235
|
-
"""
|
|
236
|
-
Initialize the MultiChatAI client.
|
|
237
|
-
|
|
238
|
-
Args:
|
|
239
|
-
timeout: Request timeout in seconds
|
|
240
|
-
proxies: Optional proxy configuration
|
|
241
|
-
model: Default model to use
|
|
242
|
-
system_prompt: System prompt to use
|
|
243
|
-
temperature: Temperature for response generation
|
|
244
|
-
max_tokens: Maximum number of tokens to generate
|
|
245
|
-
"""
|
|
246
|
-
if model not in self.AVAILABLE_MODELS:
|
|
247
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
248
|
-
|
|
249
|
-
# Initialize curl_cffi Session
|
|
250
|
-
self.session = Session()
|
|
251
|
-
self.timeout = timeout
|
|
252
|
-
self.model = model
|
|
253
|
-
self.system_prompt = system_prompt
|
|
254
|
-
self.temperature = temperature
|
|
255
|
-
self.max_tokens_to_sample = max_tokens
|
|
256
|
-
|
|
257
|
-
# Initialize LitAgent for user agent generation
|
|
258
|
-
self.agent = LitAgent()
|
|
259
|
-
|
|
260
|
-
self.headers = {
|
|
261
|
-
"accept": "*/*",
|
|
262
|
-
"accept-language": "en-US,en;q=0.9",
|
|
263
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
264
|
-
"origin": "https://www.multichatai.com",
|
|
265
|
-
"referer": "https://www.multichatai.com/",
|
|
266
|
-
"user-agent": self.agent.random(),
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
# Update curl_cffi session headers, proxies, and cookies
|
|
270
|
-
self.session.headers.update(self.headers)
|
|
271
|
-
self.session.proxies = proxies
|
|
272
|
-
self.session.cookies.set("session", uuid.uuid4().hex)
|
|
273
|
-
|
|
274
|
-
# Initialize the provider based on the model
|
|
275
|
-
self.provider = self._get_provider_from_model(self.model)
|
|
276
|
-
self.model_name = self.model
|
|
277
|
-
|
|
278
|
-
# Initialize the chat interface
|
|
279
|
-
self.chat = Chat(self)
|
|
280
|
-
|
|
281
|
-
@property
|
|
282
|
-
def models(self):
|
|
283
|
-
class _ModelList:
|
|
284
|
-
def list(inner_self):
|
|
285
|
-
return type(self).AVAILABLE_MODELS
|
|
286
|
-
return _ModelList()
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
def _get_endpoint(self) -> str:
|
|
290
|
-
"""Get the API endpoint for the current provider."""
|
|
291
|
-
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
292
|
-
|
|
293
|
-
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
294
|
-
"""Get chat settings for the current model."""
|
|
295
|
-
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
296
|
-
return {
|
|
297
|
-
"model": self.model,
|
|
298
|
-
"prompt": self.system_prompt,
|
|
299
|
-
"temperature": self.temperature,
|
|
300
|
-
"contextLength": base_settings["contextLength"],
|
|
301
|
-
"includeProfileContext": True,
|
|
302
|
-
"includeWorkspaceInstructions": True,
|
|
303
|
-
"embeddingsProvider": "openai"
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
def _get_system_message(self) -> str:
|
|
307
|
-
"""Generate system message with current date."""
|
|
308
|
-
current_date = datetime.now().strftime("%d/%m/%Y")
|
|
309
|
-
return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
|
|
310
|
-
|
|
311
|
-
def _build_messages(self, conversation_prompt: str) -> list:
|
|
312
|
-
"""Build messages array based on provider type."""
|
|
313
|
-
if self.provider == "google":
|
|
314
|
-
return [
|
|
315
|
-
{"role": "user", "parts": self._get_system_message()},
|
|
316
|
-
{"role": "model", "parts": "I will follow your instructions."},
|
|
317
|
-
{"role": "user", "parts": conversation_prompt}
|
|
318
|
-
]
|
|
319
|
-
else:
|
|
320
|
-
return [
|
|
321
|
-
{"role": "system", "content": self._get_system_message()},
|
|
322
|
-
{"role": "user", "content": conversation_prompt}
|
|
323
|
-
]
|
|
324
|
-
|
|
325
|
-
def _get_provider_from_model(self, model: str) -> str:
|
|
326
|
-
"""Determine the provider based on the model name."""
|
|
327
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
328
|
-
if model in config["models"]:
|
|
329
|
-
return provider
|
|
330
|
-
|
|
331
|
-
available_models = []
|
|
332
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
333
|
-
for model_name in config["models"].keys():
|
|
334
|
-
available_models.append(f"{provider}/{model_name}")
|
|
335
|
-
|
|
336
|
-
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
337
|
-
raise ValueError(error_msg)
|
|
338
|
-
|
|
339
|
-
def _make_api_request(self, prompt: str) -> str:
|
|
340
|
-
"""Make the API request with proper error handling."""
|
|
341
|
-
try:
|
|
342
|
-
payload = {
|
|
343
|
-
"chatSettings": self._get_chat_settings(),
|
|
344
|
-
"messages": self._build_messages(prompt),
|
|
345
|
-
"customModelId": "",
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
# Use curl_cffi session post with impersonate
|
|
349
|
-
response = self.session.post(
|
|
350
|
-
self._get_endpoint(),
|
|
351
|
-
json=payload,
|
|
352
|
-
timeout=self.timeout,
|
|
353
|
-
impersonate="chrome110"
|
|
354
|
-
)
|
|
355
|
-
response.raise_for_status()
|
|
356
|
-
|
|
357
|
-
# Return the response text
|
|
358
|
-
return response.text.strip()
|
|
359
|
-
|
|
360
|
-
except CurlError as e:
|
|
361
|
-
raise IOError(f"API request failed (CurlError): {e}") from e
|
|
362
|
-
except Exception as e:
|
|
363
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
364
|
-
raise IOError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
365
|
-
|
|
366
|
-
if __name__ == "__main__":
|
|
367
|
-
print(f"{BOLD}Testing MultiChatAI OpenAI-compatible provider{RESET}")
|
|
368
|
-
|
|
369
|
-
client = MultiChatAI()
|
|
370
|
-
response = client.chat.completions.create(
|
|
371
|
-
model="llama-3.3-70b-versatile",
|
|
372
|
-
messages=[
|
|
373
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
|
374
|
-
{"role": "user", "content": "Say 'Hello' in one word"}
|
|
375
|
-
]
|
|
376
|
-
)
|
|
377
|
-
|
|
378
|
-
print(f"Response: {response.choices[0].message.content}")
|