webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (323) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -249
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/__init__.py +10 -0
  5. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  6. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  7. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  8. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  9. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  10. webscout/Extra/YTToolkit/README.md +375 -0
  11. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  12. webscout/Extra/YTToolkit/__init__.py +3 -0
  13. webscout/Extra/YTToolkit/transcriber.py +476 -0
  14. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  15. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  16. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  17. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  18. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  19. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  20. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  21. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  22. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  23. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  24. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  25. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  26. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  27. webscout/Extra/__init__.py +7 -0
  28. webscout/Extra/autocoder/__init__.py +9 -0
  29. webscout/Extra/autocoder/autocoder.py +1105 -0
  30. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  31. webscout/Extra/gguf.md +430 -0
  32. webscout/Extra/gguf.py +684 -0
  33. webscout/Extra/tempmail/README.md +488 -0
  34. webscout/Extra/tempmail/__init__.py +28 -0
  35. webscout/Extra/tempmail/async_utils.py +141 -0
  36. webscout/Extra/tempmail/base.py +161 -0
  37. webscout/Extra/tempmail/cli.py +187 -0
  38. webscout/Extra/tempmail/emailnator.py +84 -0
  39. webscout/Extra/tempmail/mail_tm.py +361 -0
  40. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  41. webscout/Extra/weather.md +281 -0
  42. webscout/Extra/weather.py +194 -0
  43. webscout/Extra/weather_ascii.py +76 -0
  44. webscout/Litlogger/Readme.md +175 -0
  45. webscout/Litlogger/__init__.py +67 -0
  46. webscout/Litlogger/core/__init__.py +6 -0
  47. webscout/Litlogger/core/level.py +23 -0
  48. webscout/Litlogger/core/logger.py +165 -0
  49. webscout/Litlogger/handlers/__init__.py +12 -0
  50. webscout/Litlogger/handlers/console.py +33 -0
  51. webscout/Litlogger/handlers/file.py +143 -0
  52. webscout/Litlogger/handlers/network.py +173 -0
  53. webscout/Litlogger/styles/__init__.py +7 -0
  54. webscout/Litlogger/styles/colors.py +249 -0
  55. webscout/Litlogger/styles/formats.py +458 -0
  56. webscout/Litlogger/styles/text.py +87 -0
  57. webscout/Litlogger/utils/__init__.py +6 -0
  58. webscout/Litlogger/utils/detectors.py +153 -0
  59. webscout/Litlogger/utils/formatters.py +200 -0
  60. webscout/Provider/AI21.py +177 -0
  61. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  62. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  63. webscout/Provider/AISEARCH/README.md +279 -0
  64. webscout/Provider/AISEARCH/__init__.py +9 -0
  65. webscout/Provider/AISEARCH/felo_search.py +228 -0
  66. webscout/Provider/AISEARCH/genspark_search.py +350 -0
  67. webscout/Provider/AISEARCH/hika_search.py +198 -0
  68. webscout/Provider/AISEARCH/iask_search.py +436 -0
  69. webscout/Provider/AISEARCH/monica_search.py +246 -0
  70. webscout/Provider/AISEARCH/scira_search.py +324 -0
  71. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  72. webscout/Provider/Aitopia.py +316 -0
  73. webscout/Provider/AllenAI.py +440 -0
  74. webscout/Provider/Andi.py +228 -0
  75. webscout/Provider/Blackboxai.py +673 -0
  76. webscout/Provider/ChatGPTClone.py +237 -0
  77. webscout/Provider/ChatGPTGratis.py +194 -0
  78. webscout/Provider/ChatSandbox.py +342 -0
  79. webscout/Provider/Cloudflare.py +324 -0
  80. webscout/Provider/Cohere.py +208 -0
  81. webscout/Provider/Deepinfra.py +340 -0
  82. webscout/Provider/ExaAI.py +261 -0
  83. webscout/Provider/ExaChat.py +358 -0
  84. webscout/Provider/Flowith.py +217 -0
  85. webscout/Provider/FreeGemini.py +250 -0
  86. webscout/Provider/Gemini.py +169 -0
  87. webscout/Provider/GithubChat.py +370 -0
  88. webscout/Provider/GizAI.py +295 -0
  89. webscout/Provider/Glider.py +225 -0
  90. webscout/Provider/Groq.py +801 -0
  91. webscout/Provider/HF_space/__init__.py +0 -0
  92. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  93. webscout/Provider/HeckAI.py +285 -0
  94. webscout/Provider/HuggingFaceChat.py +469 -0
  95. webscout/Provider/Hunyuan.py +283 -0
  96. webscout/Provider/Jadve.py +291 -0
  97. webscout/Provider/Koboldai.py +384 -0
  98. webscout/Provider/LambdaChat.py +411 -0
  99. webscout/Provider/Llama3.py +259 -0
  100. webscout/Provider/MCPCore.py +315 -0
  101. webscout/Provider/Marcus.py +198 -0
  102. webscout/Provider/Nemotron.py +218 -0
  103. webscout/Provider/Netwrck.py +270 -0
  104. webscout/Provider/OLLAMA.py +396 -0
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  106. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  107. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  108. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  109. webscout/Provider/OPENAI/README.md +1253 -0
  110. webscout/Provider/OPENAI/__init__.py +36 -0
  111. webscout/Provider/OPENAI/ai4chat.py +293 -0
  112. webscout/Provider/OPENAI/api.py +810 -0
  113. webscout/Provider/OPENAI/base.py +249 -0
  114. webscout/Provider/OPENAI/c4ai.py +373 -0
  115. webscout/Provider/OPENAI/chatgpt.py +556 -0
  116. webscout/Provider/OPENAI/chatgptclone.py +488 -0
  117. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  118. webscout/Provider/OPENAI/deepinfra.py +319 -0
  119. webscout/Provider/OPENAI/e2b.py +1356 -0
  120. webscout/Provider/OPENAI/exaai.py +411 -0
  121. webscout/Provider/OPENAI/exachat.py +443 -0
  122. webscout/Provider/OPENAI/flowith.py +162 -0
  123. webscout/Provider/OPENAI/freeaichat.py +359 -0
  124. webscout/Provider/OPENAI/glider.py +323 -0
  125. webscout/Provider/OPENAI/groq.py +361 -0
  126. webscout/Provider/OPENAI/heckai.py +307 -0
  127. webscout/Provider/OPENAI/llmchatco.py +335 -0
  128. webscout/Provider/OPENAI/mcpcore.py +383 -0
  129. webscout/Provider/OPENAI/multichat.py +376 -0
  130. webscout/Provider/OPENAI/netwrck.py +356 -0
  131. webscout/Provider/OPENAI/opkfc.py +496 -0
  132. webscout/Provider/OPENAI/scirachat.py +471 -0
  133. webscout/Provider/OPENAI/sonus.py +303 -0
  134. webscout/Provider/OPENAI/standardinput.py +433 -0
  135. webscout/Provider/OPENAI/textpollinations.py +339 -0
  136. webscout/Provider/OPENAI/toolbaz.py +413 -0
  137. webscout/Provider/OPENAI/typefully.py +355 -0
  138. webscout/Provider/OPENAI/typegpt.py +358 -0
  139. webscout/Provider/OPENAI/uncovrAI.py +462 -0
  140. webscout/Provider/OPENAI/utils.py +307 -0
  141. webscout/Provider/OPENAI/venice.py +425 -0
  142. webscout/Provider/OPENAI/wisecat.py +381 -0
  143. webscout/Provider/OPENAI/writecream.py +163 -0
  144. webscout/Provider/OPENAI/x0gpt.py +378 -0
  145. webscout/Provider/OPENAI/yep.py +356 -0
  146. webscout/Provider/OpenGPT.py +209 -0
  147. webscout/Provider/Openai.py +496 -0
  148. webscout/Provider/PI.py +429 -0
  149. webscout/Provider/Perplexitylabs.py +415 -0
  150. webscout/Provider/QwenLM.py +254 -0
  151. webscout/Provider/Reka.py +214 -0
  152. webscout/Provider/StandardInput.py +290 -0
  153. webscout/Provider/TTI/AiForce/README.md +159 -0
  154. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  155. webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
  156. webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
  157. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  158. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  159. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
  160. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
  161. webscout/Provider/TTI/ImgSys/README.md +174 -0
  162. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  163. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  164. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  165. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  166. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  167. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  168. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  169. webscout/Provider/TTI/Nexra/README.md +155 -0
  170. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  171. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  172. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  173. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  174. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  175. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
  176. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
  177. webscout/Provider/TTI/README.md +128 -0
  178. webscout/Provider/TTI/__init__.py +12 -0
  179. webscout/Provider/TTI/aiarta/README.md +134 -0
  180. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  181. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  182. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  183. webscout/Provider/TTI/artbit/README.md +100 -0
  184. webscout/Provider/TTI/artbit/__init__.py +22 -0
  185. webscout/Provider/TTI/artbit/async_artbit.py +155 -0
  186. webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
  187. webscout/Provider/TTI/fastflux/README.md +129 -0
  188. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  189. webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
  190. webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
  191. webscout/Provider/TTI/huggingface/README.md +114 -0
  192. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  193. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  194. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  195. webscout/Provider/TTI/piclumen/README.md +161 -0
  196. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  197. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  198. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  199. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  200. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  201. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  202. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  203. webscout/Provider/TTI/talkai/README.md +139 -0
  204. webscout/Provider/TTI/talkai/__init__.py +4 -0
  205. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  206. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  207. webscout/Provider/TTS/README.md +192 -0
  208. webscout/Provider/TTS/__init__.py +9 -0
  209. webscout/Provider/TTS/base.py +159 -0
  210. webscout/Provider/TTS/deepgram.py +156 -0
  211. webscout/Provider/TTS/elevenlabs.py +111 -0
  212. webscout/Provider/TTS/gesserit.py +128 -0
  213. webscout/Provider/TTS/murfai.py +113 -0
  214. webscout/Provider/TTS/parler.py +111 -0
  215. webscout/Provider/TTS/speechma.py +580 -0
  216. webscout/Provider/TTS/sthir.py +94 -0
  217. webscout/Provider/TTS/streamElements.py +333 -0
  218. webscout/Provider/TTS/utils.py +280 -0
  219. webscout/Provider/TeachAnything.py +229 -0
  220. webscout/Provider/TextPollinationsAI.py +308 -0
  221. webscout/Provider/TwoAI.py +280 -0
  222. webscout/Provider/TypliAI.py +305 -0
  223. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  224. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  225. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  226. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  227. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  228. webscout/Provider/Venice.py +258 -0
  229. webscout/Provider/VercelAI.py +253 -0
  230. webscout/Provider/WiseCat.py +233 -0
  231. webscout/Provider/WrDoChat.py +370 -0
  232. webscout/Provider/Writecream.py +246 -0
  233. webscout/Provider/WritingMate.py +269 -0
  234. webscout/Provider/__init__.py +172 -0
  235. webscout/Provider/ai4chat.py +149 -0
  236. webscout/Provider/akashgpt.py +335 -0
  237. webscout/Provider/asksteve.py +220 -0
  238. webscout/Provider/cerebras.py +290 -0
  239. webscout/Provider/chatglm.py +215 -0
  240. webscout/Provider/cleeai.py +213 -0
  241. webscout/Provider/copilot.py +425 -0
  242. webscout/Provider/elmo.py +283 -0
  243. webscout/Provider/freeaichat.py +285 -0
  244. webscout/Provider/geminiapi.py +208 -0
  245. webscout/Provider/granite.py +235 -0
  246. webscout/Provider/hermes.py +266 -0
  247. webscout/Provider/julius.py +223 -0
  248. webscout/Provider/koala.py +170 -0
  249. webscout/Provider/learnfastai.py +325 -0
  250. webscout/Provider/llama3mitril.py +215 -0
  251. webscout/Provider/llmchat.py +258 -0
  252. webscout/Provider/llmchatco.py +306 -0
  253. webscout/Provider/lmarena.py +198 -0
  254. webscout/Provider/meta.py +801 -0
  255. webscout/Provider/multichat.py +364 -0
  256. webscout/Provider/samurai.py +223 -0
  257. webscout/Provider/scira_chat.py +299 -0
  258. webscout/Provider/scnet.py +243 -0
  259. webscout/Provider/searchchat.py +292 -0
  260. webscout/Provider/sonus.py +258 -0
  261. webscout/Provider/talkai.py +194 -0
  262. webscout/Provider/toolbaz.py +353 -0
  263. webscout/Provider/turboseek.py +266 -0
  264. webscout/Provider/typefully.py +202 -0
  265. webscout/Provider/typegpt.py +289 -0
  266. webscout/Provider/uncovr.py +368 -0
  267. webscout/Provider/x0gpt.py +299 -0
  268. webscout/Provider/yep.py +389 -0
  269. webscout/__init__.py +4 -2
  270. webscout/cli.py +3 -28
  271. webscout/conversation.py +35 -35
  272. webscout/litagent/Readme.md +276 -0
  273. webscout/litagent/__init__.py +29 -0
  274. webscout/litagent/agent.py +455 -0
  275. webscout/litagent/constants.py +60 -0
  276. webscout/litprinter/__init__.py +59 -0
  277. webscout/scout/README.md +402 -0
  278. webscout/scout/__init__.py +8 -0
  279. webscout/scout/core/__init__.py +7 -0
  280. webscout/scout/core/crawler.py +140 -0
  281. webscout/scout/core/scout.py +568 -0
  282. webscout/scout/core/search_result.py +96 -0
  283. webscout/scout/core/text_analyzer.py +63 -0
  284. webscout/scout/core/text_utils.py +277 -0
  285. webscout/scout/core/web_analyzer.py +52 -0
  286. webscout/scout/element.py +460 -0
  287. webscout/scout/parsers/__init__.py +69 -0
  288. webscout/scout/parsers/html5lib_parser.py +172 -0
  289. webscout/scout/parsers/html_parser.py +236 -0
  290. webscout/scout/parsers/lxml_parser.py +178 -0
  291. webscout/scout/utils.py +37 -0
  292. webscout/swiftcli/Readme.md +323 -0
  293. webscout/swiftcli/__init__.py +95 -0
  294. webscout/swiftcli/core/__init__.py +7 -0
  295. webscout/swiftcli/core/cli.py +297 -0
  296. webscout/swiftcli/core/context.py +104 -0
  297. webscout/swiftcli/core/group.py +241 -0
  298. webscout/swiftcli/decorators/__init__.py +28 -0
  299. webscout/swiftcli/decorators/command.py +221 -0
  300. webscout/swiftcli/decorators/options.py +220 -0
  301. webscout/swiftcli/decorators/output.py +252 -0
  302. webscout/swiftcli/exceptions.py +21 -0
  303. webscout/swiftcli/plugins/__init__.py +9 -0
  304. webscout/swiftcli/plugins/base.py +135 -0
  305. webscout/swiftcli/plugins/manager.py +262 -0
  306. webscout/swiftcli/utils/__init__.py +59 -0
  307. webscout/swiftcli/utils/formatting.py +252 -0
  308. webscout/swiftcli/utils/parsing.py +267 -0
  309. webscout/version.py +1 -1
  310. webscout/webscout_search.py +2 -182
  311. webscout/webscout_search_async.py +1 -179
  312. webscout/zeroart/README.md +89 -0
  313. webscout/zeroart/__init__.py +135 -0
  314. webscout/zeroart/base.py +66 -0
  315. webscout/zeroart/effects.py +101 -0
  316. webscout/zeroart/fonts.py +1239 -0
  317. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
  318. webscout-8.2.8.dist-info/RECORD +334 -0
  319. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  320. webscout-8.2.7.dist-info/RECORD +0 -26
  321. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
  322. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
  323. {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,376 @@
1
+ import time
2
+ import uuid
3
+ import json
4
+ from datetime import datetime
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage,
12
+ format_prompt
13
+ )
14
+
15
+ # Import curl_cffi for Cloudflare bypass
16
+ from curl_cffi.requests import Session
17
+ from curl_cffi import CurlError
18
+
19
+ # Import LitAgent for user agent generation
20
+ from webscout.litagent import LitAgent
21
+
22
+ # ANSI escape codes for formatting
23
+ BOLD = "\033[1m"
24
+ RED = "\033[91m"
25
+ RESET = "\033[0m"
26
+
27
+ # Model configurations
28
+ MODEL_CONFIGS = {
29
+ "llama": {
30
+ "endpoint": "https://www.multichatai.com/api/chat/meta",
31
+ "models": {
32
+ "llama-3.3-70b-versatile": {"contextLength": 131072},
33
+ "llama-3.2-11b-vision-preview": {"contextLength": 32768},
34
+ "deepseek-r1-distill-llama-70b": {"contextLength": 128000},
35
+ },
36
+ },
37
+ "cohere": {
38
+ "endpoint": "https://www.multichatai.com/api/chat/cohere",
39
+ "models": {
40
+ "command-r": {"contextLength": 128000},
41
+ "command": {"contextLength": 4096},
42
+ },
43
+ },
44
+ "google": {
45
+ "endpoint": "https://www.multichatai.com/api/chat/google",
46
+ "models": {
47
+ "gemini-1.5-flash-002": {"contextLength": 1048576},
48
+ "gemma2-9b-it": {"contextLength": 8192},
49
+ "gemini-2.0-flash": {"contextLength": 128000},
50
+ },
51
+ "message_format": "parts",
52
+ },
53
+ "deepinfra": {
54
+ "endpoint": "https://www.multichatai.com/api/chat/deepinfra",
55
+ "models": {
56
+ "Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
57
+ "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
58
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
59
+ "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
60
+ "meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
61
+ "NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
62
+ "gemma-2-27b-it": {"contextLength": 8192},
63
+ },
64
+ },
65
+ "mistral": {
66
+ "endpoint": "https://www.multichatai.com/api/chat/mistral",
67
+ "models": {
68
+ "mistral-small-latest": {"contextLength": 32000},
69
+ "codestral-latest": {"contextLength": 32000},
70
+ "open-mistral-7b": {"contextLength": 8000},
71
+ "open-mixtral-8x7b": {"contextLength": 8000},
72
+ },
73
+ },
74
+ "alibaba": {
75
+ "endpoint": "https://www.multichatai.com/api/chat/alibaba",
76
+ "models": {
77
+ "Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
78
+ "Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
79
+ "Qwen/QwQ-32B-Preview": {"contextLength": 32768},
80
+ },
81
+ },
82
+ }
83
+
84
+ class Completions(BaseCompletions):
85
+ def __init__(self, client: 'MultiChatAI'):
86
+ self._client = client
87
+
88
+ def create(
89
+ self,
90
+ *,
91
+ model: str,
92
+ messages: List[Dict[str, str]],
93
+ max_tokens: Optional[int] = None,
94
+ stream: bool = False,
95
+ temperature: Optional[float] = None,
96
+ top_p: Optional[float] = None,
97
+ **kwargs: Any
98
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
99
+ """
100
+ Create a chat completion using the MultiChatAI API.
101
+
102
+ Args:
103
+ model: The model to use
104
+ messages: A list of messages in the conversation
105
+ max_tokens: Maximum number of tokens to generate
106
+ stream: Whether to stream the response
107
+ temperature: Temperature for response generation
108
+ top_p: Top-p sampling parameter
109
+
110
+ Returns:
111
+ Either a ChatCompletion object or a generator of ChatCompletionChunk objects
112
+ """
113
+ try:
114
+ # Set client parameters based on function arguments
115
+ self._client.model = model
116
+ if temperature is not None:
117
+ self._client.temperature = temperature
118
+ if max_tokens is not None:
119
+ self._client.max_tokens_to_sample = max_tokens
120
+
121
+ # Extract system messages and set as system prompt
122
+ for message in messages:
123
+ if message.get("role") == "system":
124
+ self._client.system_prompt = message.get("content", "")
125
+ break
126
+
127
+ # Format all messages into a single prompt
128
+ user_message = format_prompt(messages)
129
+
130
+ # Generate a unique request ID
131
+ request_id = f"multichat-{str(uuid.uuid4())}"
132
+ created_time = int(time.time())
133
+
134
+ # Make the API request
135
+ response_text = self._client._make_api_request(user_message)
136
+
137
+ # If streaming is requested, simulate streaming with the full response
138
+ if stream:
139
+ def generate_chunks():
140
+ # Create a single chunk with the full response
141
+ delta = ChoiceDelta(content=response_text)
142
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
143
+ chunk = ChatCompletionChunk(
144
+ id=request_id,
145
+ choices=[choice],
146
+ created=created_time,
147
+ model=model,
148
+ )
149
+ yield chunk
150
+
151
+ return generate_chunks()
152
+
153
+ # For non-streaming, create a complete response
154
+ message = ChatCompletionMessage(role="assistant", content=response_text)
155
+ choice = Choice(index=0, message=message, finish_reason="stop")
156
+
157
+ # Estimate token usage (this is approximate)
158
+ prompt_tokens = len(user_message) // 4 # Rough estimate
159
+ completion_tokens = len(response_text) // 4 # Rough estimate
160
+ total_tokens = prompt_tokens + completion_tokens
161
+
162
+ usage = CompletionUsage(
163
+ prompt_tokens=prompt_tokens,
164
+ completion_tokens=completion_tokens,
165
+ total_tokens=total_tokens
166
+ )
167
+
168
+ # Create the completion object
169
+ completion = ChatCompletion(
170
+ id=request_id,
171
+ choices=[choice],
172
+ created=created_time,
173
+ model=model,
174
+ usage=usage,
175
+ )
176
+
177
+ return completion
178
+
179
+ except Exception as e:
180
+ print(f"{RED}Error during MultiChatAI request: {e}{RESET}")
181
+ raise IOError(f"MultiChatAI request failed: {e}") from e
182
+
183
+ class Chat(BaseChat):
184
+ def __init__(self, client: 'MultiChatAI'):
185
+ self.completions = Completions(client)
186
+
187
+ class MultiChatAI(OpenAICompatibleProvider):
188
+ """
189
+ OpenAI-compatible client for MultiChatAI API.
190
+
191
+ Usage:
192
+ client = MultiChatAI()
193
+ response = client.chat.completions.create(
194
+ model="llama-3.3-70b-versatile",
195
+ messages=[{"role": "user", "content": "Hello!"}]
196
+ )
197
+ print(response.choices[0].message.content)
198
+ """
199
+
200
+ AVAILABLE_MODELS = [
201
+ # Llama Models
202
+ "llama-3.3-70b-versatile",
203
+ "llama-3.2-11b-vision-preview",
204
+ "deepseek-r1-distill-llama-70b",
205
+
206
+ # Google Models
207
+ "gemma2-9b-it",
208
+ "gemini-2.0-flash",
209
+
210
+ # DeepInfra Models
211
+ "Sao10K/L3.1-70B-Euryale-v2.2",
212
+ "Gryphe/MythoMax-L2-13b",
213
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
214
+ "deepseek-ai/DeepSeek-V3",
215
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
216
+ "NousResearch/Hermes-3-Llama-3.1-405B",
217
+
218
+ # Alibaba Models
219
+ "Qwen/Qwen2.5-72B-Instruct",
220
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
221
+ "Qwen/QwQ-32B-Preview"
222
+ ]
223
+
224
+ def __init__(
225
+ self,
226
+ timeout: int = 30,
227
+ proxies: dict = {},
228
+ model: str = "llama-3.3-70b-versatile",
229
+ system_prompt: str = "You are a friendly, helpful AI assistant.",
230
+ temperature: float = 0.5,
231
+ max_tokens: int = 4000
232
+ ):
233
+ """
234
+ Initialize the MultiChatAI client.
235
+
236
+ Args:
237
+ timeout: Request timeout in seconds
238
+ proxies: Optional proxy configuration
239
+ model: Default model to use
240
+ system_prompt: System prompt to use
241
+ temperature: Temperature for response generation
242
+ max_tokens: Maximum number of tokens to generate
243
+ """
244
+ if model not in self.AVAILABLE_MODELS:
245
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
246
+
247
+ # Initialize curl_cffi Session
248
+ self.session = Session()
249
+ self.timeout = timeout
250
+ self.model = model
251
+ self.system_prompt = system_prompt
252
+ self.temperature = temperature
253
+ self.max_tokens_to_sample = max_tokens
254
+
255
+ # Initialize LitAgent for user agent generation
256
+ self.agent = LitAgent()
257
+
258
+ self.headers = {
259
+ "accept": "*/*",
260
+ "accept-language": "en-US,en;q=0.9",
261
+ "content-type": "text/plain;charset=UTF-8",
262
+ "origin": "https://www.multichatai.com",
263
+ "referer": "https://www.multichatai.com/",
264
+ "user-agent": self.agent.random(),
265
+ }
266
+
267
+ # Update curl_cffi session headers, proxies, and cookies
268
+ self.session.headers.update(self.headers)
269
+ self.session.proxies = proxies
270
+ self.session.cookies.set("session", uuid.uuid4().hex)
271
+
272
+ # Initialize the provider based on the model
273
+ self.provider = self._get_provider_from_model(self.model)
274
+ self.model_name = self.model
275
+
276
+ # Initialize the chat interface
277
+ self.chat = Chat(self)
278
+
279
+ @property
280
+ def models(self):
281
+ class _ModelList:
282
+ def list(inner_self):
283
+ return type(self).AVAILABLE_MODELS
284
+ return _ModelList()
285
+
286
+
287
+ def _get_endpoint(self) -> str:
288
+ """Get the API endpoint for the current provider."""
289
+ return MODEL_CONFIGS[self.provider]["endpoint"]
290
+
291
+ def _get_chat_settings(self) -> Dict[str, Any]:
292
+ """Get chat settings for the current model."""
293
+ base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
294
+ return {
295
+ "model": self.model,
296
+ "prompt": self.system_prompt,
297
+ "temperature": self.temperature,
298
+ "contextLength": base_settings["contextLength"],
299
+ "includeProfileContext": True,
300
+ "includeWorkspaceInstructions": True,
301
+ "embeddingsProvider": "openai"
302
+ }
303
+
304
+ def _get_system_message(self) -> str:
305
+ """Generate system message with current date."""
306
+ current_date = datetime.now().strftime("%d/%m/%Y")
307
+ return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
308
+
309
+ def _build_messages(self, conversation_prompt: str) -> list:
310
+ """Build messages array based on provider type."""
311
+ if self.provider == "google":
312
+ return [
313
+ {"role": "user", "parts": self._get_system_message()},
314
+ {"role": "model", "parts": "I will follow your instructions."},
315
+ {"role": "user", "parts": conversation_prompt}
316
+ ]
317
+ else:
318
+ return [
319
+ {"role": "system", "content": self._get_system_message()},
320
+ {"role": "user", "content": conversation_prompt}
321
+ ]
322
+
323
+ def _get_provider_from_model(self, model: str) -> str:
324
+ """Determine the provider based on the model name."""
325
+ for provider, config in MODEL_CONFIGS.items():
326
+ if model in config["models"]:
327
+ return provider
328
+
329
+ available_models = []
330
+ for provider, config in MODEL_CONFIGS.items():
331
+ for model_name in config["models"].keys():
332
+ available_models.append(f"{provider}/{model_name}")
333
+
334
+ error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
335
+ raise ValueError(error_msg)
336
+
337
+ def _make_api_request(self, prompt: str) -> str:
338
+ """Make the API request with proper error handling."""
339
+ try:
340
+ payload = {
341
+ "chatSettings": self._get_chat_settings(),
342
+ "messages": self._build_messages(prompt),
343
+ "customModelId": "",
344
+ }
345
+
346
+ # Use curl_cffi session post with impersonate
347
+ response = self.session.post(
348
+ self._get_endpoint(),
349
+ json=payload,
350
+ timeout=self.timeout,
351
+ impersonate="chrome110"
352
+ )
353
+ response.raise_for_status()
354
+
355
+ # Return the response text
356
+ return response.text.strip()
357
+
358
+ except CurlError as e:
359
+ raise IOError(f"API request failed (CurlError): {e}") from e
360
+ except Exception as e:
361
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
362
+ raise IOError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
363
+
364
+ if __name__ == "__main__":
365
+ print(f"{BOLD}Testing MultiChatAI OpenAI-compatible provider{RESET}")
366
+
367
+ client = MultiChatAI()
368
+ response = client.chat.completions.create(
369
+ model="llama-3.3-70b-versatile",
370
+ messages=[
371
+ {"role": "system", "content": "You are a helpful assistant."},
372
+ {"role": "user", "content": "Say 'Hello' in one word"}
373
+ ]
374
+ )
375
+
376
+ print(f"Response: {response.choices[0].message.content}")