webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,232 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import random
6
+ import datetime
7
+ import re
8
+ from typing import List, Dict, Optional, Union, Generator, Any
9
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from webscout.Provider.OPENAI.utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
13
+ )
14
+ try:
15
+ from webscout.litagent import LitAgent
16
+ except ImportError:
17
+ class LitAgent:
18
+ def random(self) -> str:
19
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
20
+ from webscout.AIutel import sanitize_stream
21
+ from webscout import exceptions
22
+
23
+
24
+ class Completions(BaseCompletions):
25
+ def __init__(self, client: 'NEMOTRON'):
26
+ self._client = client
27
+
28
+ def create(
29
+ self,
30
+ *,
31
+ model: str,
32
+ messages: List[Dict[str, str]],
33
+ max_tokens: Optional[int] = None,
34
+ stream: bool = False,
35
+ temperature: Optional[float] = None,
36
+ top_p: Optional[float] = None,
37
+ **kwargs: Any
38
+ ) -> ChatCompletion:
39
+ nemotron_model_name = self._client.convert_model_name(model)
40
+ prompt_content = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
41
+ payload = {
42
+ "content": prompt_content,
43
+ "imageSrc": "",
44
+ "model": nemotron_model_name,
45
+ "user": self._client._get_user_data(),
46
+ "conversationId": kwargs.get("conversation_id", "")
47
+ }
48
+ request_id = f"chatcmpl-{uuid.uuid4()}"
49
+ created_time = int(time.time())
50
+ # Always use non-stream mode, ignore 'stream' argument
51
+ return self._create_non_stream(request_id, created_time, model, payload)
52
+
53
+ def _create_stream(
54
+ self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any]
55
+ ) -> Generator[ChatCompletionChunk, None, None]:
56
+ try:
57
+ response_generator = self._client._internal_make_request(payload, stream=True)
58
+ for text_chunk in response_generator:
59
+ if text_chunk:
60
+ delta = ChoiceDelta(content=text_chunk, role="assistant")
61
+ choice = Choice(index=0, delta=delta, finish_reason=None)
62
+ chunk = ChatCompletionChunk(
63
+ id=request_id,
64
+ choices=[choice],
65
+ created=created_time,
66
+ model=model_name,
67
+ )
68
+ yield chunk
69
+ final_delta = ChoiceDelta()
70
+ final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
71
+ final_chunk = ChatCompletionChunk(
72
+ id=request_id,
73
+ choices=[final_choice],
74
+ created=created_time,
75
+ model=model_name,
76
+ )
77
+ yield final_chunk
78
+ except Exception as e:
79
+ raise IOError(f"NEMOTRON request failed: {e}") from e
80
+
81
+ def _create_non_stream(
82
+ self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any]
83
+ ) -> ChatCompletion:
84
+ full_response_content = ""
85
+ try:
86
+ response_generator = self._client._internal_make_request(payload, stream=False)
87
+ full_response_content = next(response_generator, "")
88
+ except Exception as e:
89
+ pass
90
+ message = ChatCompletionMessage(role="assistant", content=full_response_content)
91
+ choice = Choice(index=0, message=message, finish_reason="stop")
92
+ prompt_tokens = count_tokens(payload.get("content", ""))
93
+ completion_tokens = count_tokens(full_response_content)
94
+ usage = CompletionUsage(
95
+ prompt_tokens=prompt_tokens,
96
+ completion_tokens=completion_tokens,
97
+ total_tokens=prompt_tokens + completion_tokens
98
+ )
99
+ completion = ChatCompletion(
100
+ id=request_id,
101
+ choices=[choice],
102
+ created=created_time,
103
+ model=model_name,
104
+ usage=usage,
105
+ )
106
+ return completion
107
+
108
+ class Chat(BaseChat):
109
+ def __init__(self, client: 'NEMOTRON'):
110
+ self.completions = Completions(client)
111
+
112
+ class NEMOTRON(OpenAICompatibleProvider):
113
+ AVAILABLE_MODELS = [
114
+ "gpt4o",
115
+ "nemotron70b",
116
+ ]
117
+
118
+ API_BASE_URL = "https://nemotron.one/api/chat"
119
+ def __init__(
120
+ self,
121
+ timeout: int = 30,
122
+ proxies: dict = {}
123
+ ):
124
+ self.session = requests.Session()
125
+ self.timeout = timeout
126
+ agent = LitAgent()
127
+ user_agent = agent.random()
128
+ self.base_headers = {
129
+ "authority": "nemotron.one",
130
+ "accept": "*/*",
131
+ "accept-language": "en-US,en;q=0.9",
132
+ "content-type": "application/json",
133
+ "origin": "https://nemotron.one",
134
+ "sec-ch-ua": '"Chromium";v="136", "Not.A/Brand";v="99"',
135
+ "sec-ch-ua-mobile": "?0",
136
+ "sec-ch-ua-platform": '"Windows"',
137
+ "user-agent": user_agent
138
+ }
139
+ self.session.headers.update(self.base_headers)
140
+ if proxies:
141
+ self.session.proxies.update(proxies)
142
+ self.chat = Chat(self)
143
+
144
+ def _generate_random_email(self) -> str:
145
+ random_letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
146
+ random_string = ''.join(random.choice(random_letter) for _ in range(10))
147
+ return f"{random_string}@gmail.com"
148
+
149
+ def _generate_random_id(self) -> str:
150
+ timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
151
+ random_letter = "abcdefghijklmnopqrstuvwxyz0123456789"
152
+ random_string = ''.join(random.choice(random_letter) for _ in range(8))
153
+ return f"cm{random_string}{timestamp[:10]}"
154
+
155
+ def _get_user_data(self) -> Dict[str, Any]:
156
+ current_time = datetime.datetime.now().isoformat()
157
+ return {
158
+ "name": "user",
159
+ "email": self._generate_random_email(),
160
+ "image": "https://lh3.googleusercontent.com/a/default-user=s96-c",
161
+ "id": self._generate_random_id(),
162
+ "password": None,
163
+ "emailVerified": None,
164
+ "credits": 100000000000,
165
+ "isPro": False,
166
+ "createdAt": current_time,
167
+ "updatedAt": current_time
168
+ }
169
+
170
+ def convert_model_name(self, model_alias: str) -> str:
171
+ """
172
+ Convert model names to ones supported by NEMOTRON API.
173
+
174
+ Args:
175
+ model_alias: Model name to convert
176
+
177
+ Returns:
178
+ NEMOTRON model name for API payload
179
+ """
180
+ # Accept only direct model names
181
+ if model_alias in self.AVAILABLE_MODELS:
182
+ return model_alias
183
+
184
+ # Case-insensitive matching
185
+ for m in self.AVAILABLE_MODELS:
186
+ if m.lower() == model_alias.lower():
187
+ return m
188
+
189
+ # Default to gpt4o if no match
190
+ print(f"Warning: Unknown model '{model_alias}'. Using 'gpt4o' instead.")
191
+ return "gpt4o"
192
+
193
+ def _internal_make_request(
194
+ self,
195
+ payload: Dict[str, Any],
196
+ stream: bool = False
197
+ ) -> Generator[str, None, None]:
198
+ request_headers = self.base_headers.copy()
199
+ request_headers["referer"] = f"https://nemotron.one/chat/{payload['model']}"
200
+ try:
201
+ if stream:
202
+ with self.session.post(
203
+ self.API_BASE_URL,
204
+ headers=request_headers,
205
+ json=payload,
206
+ stream=True,
207
+ timeout=self.timeout
208
+ ) as response:
209
+ response.raise_for_status()
210
+ yield from sanitize_stream(
211
+ response.iter_content(chunk_size=1024),
212
+ to_json=False,
213
+ )
214
+ else:
215
+ response = self.session.post(
216
+ self.API_BASE_URL,
217
+ headers=request_headers,
218
+ json=payload,
219
+ timeout=self.timeout
220
+ )
221
+ response.raise_for_status()
222
+ yield response.text
223
+ except requests.exceptions.RequestException as e:
224
+ raise exceptions.ProviderConnectionError(f"NEMOTRON API Connection error: {str(e)}")
225
+ except Exception as e:
226
+ raise RuntimeError(f"NEMOTRON API request unexpected error: {str(e)}")
227
+ @property
228
+ def models(self):
229
+ class _ModelList:
230
+ def list(inner_self):
231
+ return type(self).AVAILABLE_MODELS
232
+ return _ModelList()
@@ -0,0 +1,283 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
8
+ from webscout.Provider.OPENAI.utils import (
9
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
10
+ ChatCompletionMessage, CompletionUsage,
11
+ get_last_user_message, get_system_prompt,
12
+ count_tokens
13
+ )
14
+
15
+ class Completions(BaseCompletions):
16
+ def __init__(self, client: 'Qwen3'):
17
+ self._client = client
18
+
19
+ def create(
20
+ self,
21
+ *,
22
+ model: str,
23
+ messages: List[Dict[str, str]],
24
+ max_tokens: Optional[int] = 2048,
25
+ stream: bool = False,
26
+ temperature: Optional[float] = None,
27
+ top_p: Optional[float] = None,
28
+ **kwargs: Any
29
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
30
+ payload = {
31
+ "data": [
32
+ get_last_user_message(messages),
33
+ {
34
+ "thinking_budget": kwargs.get("thinking_budget", 38),
35
+ "model": self._client.get_model(model),
36
+ "sys_prompt": get_system_prompt(messages)
37
+ },
38
+ None, None
39
+ ],
40
+ "event_data": None,
41
+ "fn_index": 13,
42
+ "trigger_id": 31,
43
+ "session_hash": str(uuid.uuid4()).replace('-', '')
44
+ }
45
+
46
+ request_id = f"chatcmpl-{uuid.uuid4()}"
47
+ created_time = int(time.time())
48
+
49
+ if stream:
50
+ return self._create_stream(request_id, created_time, model, payload)
51
+ else:
52
+ return self._create_non_stream(request_id, created_time, model, payload)
53
+
54
+ def _create_stream(
55
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
56
+ ) -> Generator[ChatCompletionChunk, None, None]:
57
+ session = self._client.session
58
+ headers = self._client.headers
59
+ # Step 1: Join the queue
60
+ join_resp = session.post(self._client.api_endpoint, headers=headers, json=payload, timeout=self._client.timeout)
61
+ join_resp.raise_for_status()
62
+ event_id = join_resp.json().get('event_id')
63
+ session_hash = payload["session_hash"]
64
+
65
+ # Step 2: Stream data
66
+ params = {'session_hash': session_hash}
67
+ stream_resp = session.get(self._client.url + "/gradio_api/queue/data", headers=self._client.stream_headers, params=params, stream=True, timeout=self._client.timeout)
68
+ stream_resp.raise_for_status()
69
+
70
+ # --- New logic to yield all content, tool reasoning, and status, similar to Reasoning class ---
71
+ is_thinking_tag_open = False # True if <think> has been yielded and not yet </think>
72
+
73
+ for line in stream_resp.iter_lines():
74
+ if line:
75
+ decoded_line = line.decode('utf-8')
76
+ if decoded_line.startswith('data: '):
77
+ try:
78
+ json_data = json.loads(decoded_line[6:])
79
+ if json_data.get('msg') == 'process_generating':
80
+ if 'output' in json_data and 'data' in json_data['output'] and len(json_data['output']['data']) > 5:
81
+ updates_list = json_data['output']['data'][5] # This is a list of operations
82
+ for op_details in updates_list:
83
+ action = op_details[0]
84
+ path = op_details[1]
85
+ value = op_details[2]
86
+
87
+ content_to_yield = None
88
+ is_current_op_tool = False
89
+ is_current_op_text = False
90
+
91
+ # Case 1: Adding a new content block (tool or text object)
92
+ if action == "add" and isinstance(value, dict) and "type" in value:
93
+ if len(path) == 4 and path[0] == "value" and path[2] == "content":
94
+ block_type = value.get("type")
95
+ content_to_yield = value.get("content")
96
+ if block_type == "tool":
97
+ is_current_op_tool = True
98
+ elif block_type == "text":
99
+ is_current_op_text = True
100
+
101
+ # Case 2: Appending content string to an existing block
102
+ elif action == "append" and isinstance(value, str):
103
+ if len(path) == 5 and path[0] == "value" and path[2] == "content" and path[4] == "content":
104
+ block_index = path[3] # 0 for tool's content, 1 for text's content
105
+ content_to_yield = value
106
+ if block_index == 0: # Appending to tool's content
107
+ is_current_op_tool = True
108
+ elif block_index == 1: # Appending to text's content
109
+ is_current_op_text = True
110
+
111
+ # Case 3: Tool status update (e.g., "End of Thought")
112
+ elif action == "replace" and len(path) == 6 and \
113
+ path[0] == "value" and path[2] == "content" and \
114
+ path[3] == 0 and path[4] == "options" and path[5] == "status": # path[3]==0 ensures it's the tool block
115
+ if value == "done": # Tool block processing is complete
116
+ if is_thinking_tag_open:
117
+ delta = ChoiceDelta(content="</think>\n\n", role="assistant")
118
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
119
+ is_thinking_tag_open = False
120
+ continue # This operation itself doesn't yield visible content
121
+
122
+ # Yielding logic
123
+ if is_current_op_tool and content_to_yield:
124
+ if not is_thinking_tag_open:
125
+ delta = ChoiceDelta(content="<think>", role="assistant")
126
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
127
+ is_thinking_tag_open = True
128
+
129
+ delta = ChoiceDelta(content=content_to_yield, role="assistant")
130
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
131
+
132
+ elif is_current_op_text and content_to_yield:
133
+ if is_thinking_tag_open: # If text starts, close any open thinking tag
134
+ delta = ChoiceDelta(content="</think>", role="assistant")
135
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
136
+ is_thinking_tag_open = False
137
+
138
+ delta = ChoiceDelta(content=content_to_yield, role="assistant")
139
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
140
+
141
+ if json_data.get('msg') == 'process_completed':
142
+ if is_thinking_tag_open: # Ensure </think> is yielded if process completes mid-thought
143
+ delta = ChoiceDelta(content="</think>", role="assistant")
144
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
145
+ is_thinking_tag_open = False
146
+ break
147
+ except json.JSONDecodeError:
148
+ continue
149
+ except Exception as e:
150
+ # Log or handle other potential exceptions
151
+ continue
152
+
153
+ # After the loop, ensure the tag is closed if the stream broke for reasons other than 'process_completed'
154
+ if is_thinking_tag_open:
155
+ delta = ChoiceDelta(content="</think>", role="assistant")
156
+ yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
157
+
158
+ def _create_non_stream(
159
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
160
+ ) -> ChatCompletion:
161
+ # For non-streaming, just call the join endpoint and parse the result
162
+ session = self._client.session
163
+ headers = self._client.headers
164
+ resp = session.post(self._client.api_endpoint, headers=headers, json=payload, timeout=self._client.timeout)
165
+ resp.raise_for_status()
166
+ data = resp.json()
167
+ # Return the full content as a single message, including all tool and text reasoning if present
168
+ output = ""
169
+ if 'output' in data and 'data' in data['output'] and len(data['output']['data']) > 5:
170
+ updates = data['output']['data'][5]
171
+ parts = []
172
+ for update in updates:
173
+ if isinstance(update, list) and len(update) > 2 and isinstance(update[2], str):
174
+ parts.append(update[2])
175
+ elif isinstance(update, list) and isinstance(update[1], list) and len(update[1]) > 4:
176
+ if update[1][4] == "content":
177
+ parts.append(update[2])
178
+ elif update[1][4] == "options" and update[2] != "done":
179
+ parts.append(str(update[2]))
180
+ elif isinstance(update, dict):
181
+ if update.get('type') == 'tool':
182
+ parts.append(update.get('content', ''))
183
+ elif update.get('type') == 'text':
184
+ parts.append(update.get('content', ''))
185
+ output = "\n".join([str(p) for p in parts if p])
186
+ else:
187
+ output = data.get('output', {}).get('data', ["", "", "", "", "", [["", "", ""]]])[5][0][2]
188
+ message = ChatCompletionMessage(role="assistant", content=output)
189
+ choice = Choice(index=0, message=message, finish_reason="stop")
190
+ # Use count_tokens to compute usage
191
+ prompt_tokens = count_tokens([m.get('content', '') for m in payload['data'] if isinstance(m, dict) and 'content' in m or isinstance(m, str)])
192
+ completion_tokens = count_tokens(output)
193
+ usage = CompletionUsage(
194
+ prompt_tokens=prompt_tokens,
195
+ completion_tokens=completion_tokens,
196
+ total_tokens=prompt_tokens + completion_tokens
197
+ )
198
+ completion = ChatCompletion(
199
+ id=request_id,
200
+ choices=[choice],
201
+ created=created_time,
202
+ model=model,
203
+ usage=usage,
204
+ )
205
+ return completion
206
+
207
+ class Chat(BaseChat):
208
+ def __init__(self, client: 'Qwen3'):
209
+ self.completions = Completions(client)
210
+
211
+ class Qwen3(OpenAICompatibleProvider):
212
+ url = "https://qwen-qwen3-demo.hf.space"
213
+ api_endpoint = "https://qwen-qwen3-demo.hf.space/gradio_api/queue/join?__theme=system"
214
+ AVAILABLE_MODELS = [
215
+ "qwen3-235b-a22b",
216
+ "qwen3-32b",
217
+ "qwen3-30b-a3b",
218
+ "qwen3-14b",
219
+ "qwen3-8b",
220
+ "qwen3-4b",
221
+ "qwen3-1.7b",
222
+ "qwen3-0.6b",
223
+ ]
224
+ MODEL_ALIASES = {
225
+ "qwen-3-235b": "qwen3-235b-a22b",
226
+ "qwen-3-30b": "qwen3-30b-a3b",
227
+ "qwen-3-32b": "qwen3-32b",
228
+ "qwen-3-14b": "qwen3-14b",
229
+ "qwen-3-4b": "qwen3-4b",
230
+ "qwen-3-1.7b": "qwen3-1.7b",
231
+ "qwen-3-0.6b": "qwen3-0.6b"
232
+ }
233
+
234
+ def __init__(self, timeout: Optional[int] = None):
235
+ self.timeout = timeout
236
+ self.session = requests.Session()
237
+ self.headers = {
238
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
239
+ 'Accept': '*/*',
240
+ 'Accept-Language': 'en-US,en;q=0.5',
241
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
242
+ 'Referer': f'{self.url}/?__theme=system',
243
+ 'content-type': 'application/json',
244
+ 'Origin': self.url,
245
+ 'Connection': 'keep-alive',
246
+ 'Sec-Fetch-Dest': 'empty',
247
+ 'Sec-Fetch-Mode': 'cors',
248
+ 'Sec-Fetch-Site': 'same-origin',
249
+ 'Pragma': 'no-cache',
250
+ 'Cache-Control': 'no-cache',
251
+ }
252
+ self.stream_headers = {
253
+ 'Accept': 'text/event-stream',
254
+ 'Accept-Language': 'en-US,en;q=0.5',
255
+ 'Referer': f'{self.url}/?__theme=system',
256
+ 'User-Agent': self.headers['User-Agent'],
257
+ }
258
+ self.session.headers.update(self.headers)
259
+ self.chat = Chat(self)
260
+
261
+ def get_model(self, model):
262
+ return self.MODEL_ALIASES.get(model, model)
263
+
264
+ @property
265
+ def models(self):
266
+ class _ModelList:
267
+ def list(inner_self):
268
+ return type(self).AVAILABLE_MODELS
269
+ return _ModelList()
270
+
271
+ if __name__ == "__main__":
272
+ client = Qwen3()
273
+ from rich import print
274
+ resp = client.chat.completions.create(
275
+ model="qwen3-14b",
276
+ messages=[
277
+ {"role": "system", "content": "You are a helpful assistant."},
278
+ {"role": "user", "content": "Hello "}
279
+ ],
280
+ stream=True
281
+ )
282
+ for chunk in resp:
283
+ print(chunk, end="", flush=True)