webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (281) hide show
  1. webscout/AIauto.py +33 -15
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +703 -250
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/Act.md +309 -0
  6. webscout/Extra/GitToolkit/__init__.py +10 -0
  7. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  8. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  9. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  10. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  11. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  12. webscout/Extra/YTToolkit/README.md +375 -0
  13. webscout/Extra/YTToolkit/YTdownloader.py +957 -0
  14. webscout/Extra/YTToolkit/__init__.py +3 -0
  15. webscout/Extra/YTToolkit/transcriber.py +476 -0
  16. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  17. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  18. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  19. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  20. webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
  21. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  22. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  23. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  24. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  25. webscout/Extra/YTToolkit/ytapi/query.py +40 -0
  26. webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
  27. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  28. webscout/Extra/YTToolkit/ytapi/video.py +232 -0
  29. webscout/Extra/__init__.py +7 -0
  30. webscout/Extra/autocoder/__init__.py +9 -0
  31. webscout/Extra/autocoder/autocoder.py +1105 -0
  32. webscout/Extra/autocoder/autocoder_utiles.py +332 -0
  33. webscout/Extra/gguf.md +430 -0
  34. webscout/Extra/gguf.py +684 -0
  35. webscout/Extra/tempmail/README.md +488 -0
  36. webscout/Extra/tempmail/__init__.py +28 -0
  37. webscout/Extra/tempmail/async_utils.py +141 -0
  38. webscout/Extra/tempmail/base.py +161 -0
  39. webscout/Extra/tempmail/cli.py +187 -0
  40. webscout/Extra/tempmail/emailnator.py +84 -0
  41. webscout/Extra/tempmail/mail_tm.py +361 -0
  42. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  43. webscout/Extra/weather.md +281 -0
  44. webscout/Extra/weather.py +194 -0
  45. webscout/Extra/weather_ascii.py +76 -0
  46. webscout/Litlogger/README.md +10 -0
  47. webscout/Litlogger/__init__.py +15 -0
  48. webscout/Litlogger/formats.py +4 -0
  49. webscout/Litlogger/handlers.py +103 -0
  50. webscout/Litlogger/levels.py +13 -0
  51. webscout/Litlogger/logger.py +92 -0
  52. webscout/Provider/AI21.py +177 -0
  53. webscout/Provider/AISEARCH/DeepFind.py +254 -0
  54. webscout/Provider/AISEARCH/Perplexity.py +333 -0
  55. webscout/Provider/AISEARCH/README.md +279 -0
  56. webscout/Provider/AISEARCH/__init__.py +9 -0
  57. webscout/Provider/AISEARCH/felo_search.py +202 -0
  58. webscout/Provider/AISEARCH/genspark_search.py +324 -0
  59. webscout/Provider/AISEARCH/hika_search.py +186 -0
  60. webscout/Provider/AISEARCH/iask_search.py +410 -0
  61. webscout/Provider/AISEARCH/monica_search.py +220 -0
  62. webscout/Provider/AISEARCH/scira_search.py +298 -0
  63. webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
  64. webscout/Provider/Aitopia.py +316 -0
  65. webscout/Provider/AllenAI.py +440 -0
  66. webscout/Provider/Andi.py +228 -0
  67. webscout/Provider/Blackboxai.py +791 -0
  68. webscout/Provider/ChatGPTClone.py +237 -0
  69. webscout/Provider/ChatGPTGratis.py +194 -0
  70. webscout/Provider/ChatSandbox.py +342 -0
  71. webscout/Provider/Cloudflare.py +324 -0
  72. webscout/Provider/Cohere.py +208 -0
  73. webscout/Provider/Deepinfra.py +340 -0
  74. webscout/Provider/ExaAI.py +261 -0
  75. webscout/Provider/ExaChat.py +358 -0
  76. webscout/Provider/Flowith.py +217 -0
  77. webscout/Provider/FreeGemini.py +250 -0
  78. webscout/Provider/Gemini.py +169 -0
  79. webscout/Provider/GithubChat.py +369 -0
  80. webscout/Provider/GizAI.py +295 -0
  81. webscout/Provider/Glider.py +225 -0
  82. webscout/Provider/Groq.py +801 -0
  83. webscout/Provider/HF_space/__init__.py +0 -0
  84. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  85. webscout/Provider/HeckAI.py +375 -0
  86. webscout/Provider/HuggingFaceChat.py +469 -0
  87. webscout/Provider/Hunyuan.py +283 -0
  88. webscout/Provider/Jadve.py +291 -0
  89. webscout/Provider/Koboldai.py +384 -0
  90. webscout/Provider/LambdaChat.py +411 -0
  91. webscout/Provider/Llama3.py +259 -0
  92. webscout/Provider/MCPCore.py +315 -0
  93. webscout/Provider/Marcus.py +198 -0
  94. webscout/Provider/Nemotron.py +218 -0
  95. webscout/Provider/Netwrck.py +270 -0
  96. webscout/Provider/OLLAMA.py +396 -0
  97. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
  98. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  99. webscout/Provider/OPENAI/FreeGemini.py +283 -0
  100. webscout/Provider/OPENAI/NEMOTRON.py +232 -0
  101. webscout/Provider/OPENAI/Qwen3.py +283 -0
  102. webscout/Provider/OPENAI/README.md +952 -0
  103. webscout/Provider/OPENAI/TwoAI.py +357 -0
  104. webscout/Provider/OPENAI/__init__.py +40 -0
  105. webscout/Provider/OPENAI/ai4chat.py +293 -0
  106. webscout/Provider/OPENAI/api.py +969 -0
  107. webscout/Provider/OPENAI/base.py +249 -0
  108. webscout/Provider/OPENAI/c4ai.py +373 -0
  109. webscout/Provider/OPENAI/chatgpt.py +556 -0
  110. webscout/Provider/OPENAI/chatgptclone.py +494 -0
  111. webscout/Provider/OPENAI/chatsandbox.py +173 -0
  112. webscout/Provider/OPENAI/copilot.py +242 -0
  113. webscout/Provider/OPENAI/deepinfra.py +322 -0
  114. webscout/Provider/OPENAI/e2b.py +1414 -0
  115. webscout/Provider/OPENAI/exaai.py +417 -0
  116. webscout/Provider/OPENAI/exachat.py +444 -0
  117. webscout/Provider/OPENAI/flowith.py +162 -0
  118. webscout/Provider/OPENAI/freeaichat.py +359 -0
  119. webscout/Provider/OPENAI/glider.py +326 -0
  120. webscout/Provider/OPENAI/groq.py +364 -0
  121. webscout/Provider/OPENAI/heckai.py +308 -0
  122. webscout/Provider/OPENAI/llmchatco.py +335 -0
  123. webscout/Provider/OPENAI/mcpcore.py +389 -0
  124. webscout/Provider/OPENAI/multichat.py +376 -0
  125. webscout/Provider/OPENAI/netwrck.py +357 -0
  126. webscout/Provider/OPENAI/oivscode.py +287 -0
  127. webscout/Provider/OPENAI/opkfc.py +496 -0
  128. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  129. webscout/Provider/OPENAI/scirachat.py +477 -0
  130. webscout/Provider/OPENAI/sonus.py +304 -0
  131. webscout/Provider/OPENAI/standardinput.py +433 -0
  132. webscout/Provider/OPENAI/textpollinations.py +339 -0
  133. webscout/Provider/OPENAI/toolbaz.py +413 -0
  134. webscout/Provider/OPENAI/typefully.py +355 -0
  135. webscout/Provider/OPENAI/typegpt.py +364 -0
  136. webscout/Provider/OPENAI/uncovrAI.py +463 -0
  137. webscout/Provider/OPENAI/utils.py +318 -0
  138. webscout/Provider/OPENAI/venice.py +431 -0
  139. webscout/Provider/OPENAI/wisecat.py +387 -0
  140. webscout/Provider/OPENAI/writecream.py +163 -0
  141. webscout/Provider/OPENAI/x0gpt.py +365 -0
  142. webscout/Provider/OPENAI/yep.py +382 -0
  143. webscout/Provider/OpenGPT.py +209 -0
  144. webscout/Provider/Openai.py +496 -0
  145. webscout/Provider/PI.py +429 -0
  146. webscout/Provider/Perplexitylabs.py +415 -0
  147. webscout/Provider/QwenLM.py +254 -0
  148. webscout/Provider/Reka.py +214 -0
  149. webscout/Provider/StandardInput.py +290 -0
  150. webscout/Provider/TTI/README.md +82 -0
  151. webscout/Provider/TTI/__init__.py +7 -0
  152. webscout/Provider/TTI/aiarta.py +365 -0
  153. webscout/Provider/TTI/artbit.py +0 -0
  154. webscout/Provider/TTI/base.py +64 -0
  155. webscout/Provider/TTI/fastflux.py +200 -0
  156. webscout/Provider/TTI/magicstudio.py +201 -0
  157. webscout/Provider/TTI/piclumen.py +203 -0
  158. webscout/Provider/TTI/pixelmuse.py +225 -0
  159. webscout/Provider/TTI/pollinations.py +221 -0
  160. webscout/Provider/TTI/utils.py +11 -0
  161. webscout/Provider/TTS/README.md +192 -0
  162. webscout/Provider/TTS/__init__.py +10 -0
  163. webscout/Provider/TTS/base.py +159 -0
  164. webscout/Provider/TTS/deepgram.py +156 -0
  165. webscout/Provider/TTS/elevenlabs.py +111 -0
  166. webscout/Provider/TTS/gesserit.py +128 -0
  167. webscout/Provider/TTS/murfai.py +113 -0
  168. webscout/Provider/TTS/openai_fm.py +129 -0
  169. webscout/Provider/TTS/parler.py +111 -0
  170. webscout/Provider/TTS/speechma.py +580 -0
  171. webscout/Provider/TTS/sthir.py +94 -0
  172. webscout/Provider/TTS/streamElements.py +333 -0
  173. webscout/Provider/TTS/utils.py +280 -0
  174. webscout/Provider/TeachAnything.py +229 -0
  175. webscout/Provider/TextPollinationsAI.py +308 -0
  176. webscout/Provider/TwoAI.py +475 -0
  177. webscout/Provider/TypliAI.py +305 -0
  178. webscout/Provider/UNFINISHED/ChatHub.py +209 -0
  179. webscout/Provider/UNFINISHED/Youchat.py +330 -0
  180. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  181. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  182. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  183. webscout/Provider/Venice.py +258 -0
  184. webscout/Provider/VercelAI.py +253 -0
  185. webscout/Provider/WiseCat.py +233 -0
  186. webscout/Provider/WrDoChat.py +370 -0
  187. webscout/Provider/Writecream.py +246 -0
  188. webscout/Provider/WritingMate.py +269 -0
  189. webscout/Provider/__init__.py +174 -0
  190. webscout/Provider/ai4chat.py +174 -0
  191. webscout/Provider/akashgpt.py +335 -0
  192. webscout/Provider/asksteve.py +220 -0
  193. webscout/Provider/cerebras.py +290 -0
  194. webscout/Provider/chatglm.py +215 -0
  195. webscout/Provider/cleeai.py +213 -0
  196. webscout/Provider/copilot.py +425 -0
  197. webscout/Provider/elmo.py +283 -0
  198. webscout/Provider/freeaichat.py +285 -0
  199. webscout/Provider/geminiapi.py +208 -0
  200. webscout/Provider/granite.py +235 -0
  201. webscout/Provider/hermes.py +266 -0
  202. webscout/Provider/julius.py +223 -0
  203. webscout/Provider/koala.py +170 -0
  204. webscout/Provider/learnfastai.py +325 -0
  205. webscout/Provider/llama3mitril.py +215 -0
  206. webscout/Provider/llmchat.py +258 -0
  207. webscout/Provider/llmchatco.py +306 -0
  208. webscout/Provider/lmarena.py +198 -0
  209. webscout/Provider/meta.py +801 -0
  210. webscout/Provider/multichat.py +364 -0
  211. webscout/Provider/oivscode.py +309 -0
  212. webscout/Provider/samurai.py +224 -0
  213. webscout/Provider/scira_chat.py +299 -0
  214. webscout/Provider/scnet.py +243 -0
  215. webscout/Provider/searchchat.py +292 -0
  216. webscout/Provider/sonus.py +258 -0
  217. webscout/Provider/talkai.py +194 -0
  218. webscout/Provider/toolbaz.py +353 -0
  219. webscout/Provider/turboseek.py +266 -0
  220. webscout/Provider/typefully.py +202 -0
  221. webscout/Provider/typegpt.py +289 -0
  222. webscout/Provider/uncovr.py +368 -0
  223. webscout/Provider/x0gpt.py +299 -0
  224. webscout/Provider/yep.py +389 -0
  225. webscout/__init__.py +4 -2
  226. webscout/cli.py +3 -28
  227. webscout/client.py +70 -0
  228. webscout/conversation.py +35 -35
  229. webscout/litagent/Readme.md +276 -0
  230. webscout/litagent/__init__.py +29 -0
  231. webscout/litagent/agent.py +455 -0
  232. webscout/litagent/constants.py +60 -0
  233. webscout/litprinter/__init__.py +59 -0
  234. webscout/optimizers.py +419 -419
  235. webscout/scout/README.md +404 -0
  236. webscout/scout/__init__.py +8 -0
  237. webscout/scout/core/__init__.py +7 -0
  238. webscout/scout/core/crawler.py +210 -0
  239. webscout/scout/core/scout.py +607 -0
  240. webscout/scout/core/search_result.py +96 -0
  241. webscout/scout/core/text_analyzer.py +63 -0
  242. webscout/scout/core/text_utils.py +277 -0
  243. webscout/scout/core/web_analyzer.py +52 -0
  244. webscout/scout/element.py +478 -0
  245. webscout/scout/parsers/__init__.py +69 -0
  246. webscout/scout/parsers/html5lib_parser.py +172 -0
  247. webscout/scout/parsers/html_parser.py +236 -0
  248. webscout/scout/parsers/lxml_parser.py +178 -0
  249. webscout/scout/utils.py +37 -0
  250. webscout/swiftcli/Readme.md +323 -0
  251. webscout/swiftcli/__init__.py +95 -0
  252. webscout/swiftcli/core/__init__.py +7 -0
  253. webscout/swiftcli/core/cli.py +297 -0
  254. webscout/swiftcli/core/context.py +104 -0
  255. webscout/swiftcli/core/group.py +241 -0
  256. webscout/swiftcli/decorators/__init__.py +28 -0
  257. webscout/swiftcli/decorators/command.py +221 -0
  258. webscout/swiftcli/decorators/options.py +220 -0
  259. webscout/swiftcli/decorators/output.py +252 -0
  260. webscout/swiftcli/exceptions.py +21 -0
  261. webscout/swiftcli/plugins/__init__.py +9 -0
  262. webscout/swiftcli/plugins/base.py +135 -0
  263. webscout/swiftcli/plugins/manager.py +269 -0
  264. webscout/swiftcli/utils/__init__.py +59 -0
  265. webscout/swiftcli/utils/formatting.py +252 -0
  266. webscout/swiftcli/utils/parsing.py +267 -0
  267. webscout/version.py +1 -1
  268. webscout/webscout_search.py +2 -182
  269. webscout/webscout_search_async.py +1 -179
  270. webscout/zeroart/README.md +89 -0
  271. webscout/zeroart/__init__.py +135 -0
  272. webscout/zeroart/base.py +66 -0
  273. webscout/zeroart/effects.py +101 -0
  274. webscout/zeroart/fonts.py +1239 -0
  275. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
  276. webscout-8.2.9.dist-info/RECORD +289 -0
  277. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  278. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  279. webscout-8.2.7.dist-info/RECORD +0 -26
  280. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  281. {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,242 @@
1
+ import json
2
+ import uuid
3
+ import time
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+ from urllib.parse import quote
6
+ from curl_cffi.requests import Session, CurlWsFlag
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ pass
20
+
21
+ # --- Microsoft Copilot Client ---
22
+
23
+ class Completions(BaseCompletions):
24
+ def __init__(self, client: 'Copilot'):
25
+ self._client = client
26
+
27
+ def create(
28
+ self,
29
+ *,
30
+ model: str,
31
+ messages: List[Dict[str, str]],
32
+ max_tokens: Optional[int] = None,
33
+ stream: bool = False,
34
+ temperature: Optional[float] = None,
35
+ top_p: Optional[float] = None,
36
+ **kwargs: Any
37
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
+ """
39
+ Creates a model response for the given chat conversation.
40
+ Mimics openai.chat.completions.create
41
+ """
42
+ # Format the entire conversation using the utility function
43
+ formatted_prompt = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
44
+
45
+ request_id = f"chatcmpl-{uuid.uuid4()}"
46
+ created_time = int(time.time())
47
+
48
+ # Handle image if provided
49
+ image = kwargs.get("image")
50
+
51
+ if stream:
52
+ return self._create_stream(request_id, created_time, model, formatted_prompt, image)
53
+ else:
54
+ return self._create_non_stream(request_id, created_time, model, formatted_prompt, image)
55
+
56
+ def _create_stream(
57
+ self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
58
+ ) -> Generator[ChatCompletionChunk, None, None]:
59
+ try:
60
+ s = self._client.session
61
+ # Create a new conversation if needed
62
+ r = s.post(self._client.conversation_url)
63
+ if r.status_code != 200:
64
+ raise RuntimeError(f"Failed to create conversation: {r.text}")
65
+ conv_id = r.json().get("id")
66
+
67
+ # Handle image upload if provided
68
+ images = []
69
+ if image:
70
+ r = s.post(
71
+ f"{self._client.url}/c/api/attachments",
72
+ headers={"content-type": "image/jpeg"},
73
+ data=image
74
+ )
75
+ if r.status_code != 200:
76
+ raise RuntimeError(f"Image upload failed: {r.text}")
77
+ images.append({"type": "image", "url": r.json().get("url")})
78
+
79
+ # Connect to websocket
80
+ ws = s.ws_connect(self._client.websocket_url)
81
+
82
+ # Use model to set mode ("reasoning" for Think Deeper)
83
+ mode = "reasoning" if "Think" in model else "chat"
84
+
85
+ # Send the message to Copilot
86
+ ws.send(json.dumps({
87
+ "event": "send",
88
+ "conversationId": conv_id,
89
+ "content": images + [{"type": "text", "text": prompt_text}],
90
+ "mode": mode
91
+ }).encode(), CurlWsFlag.TEXT)
92
+
93
+ # Track token usage using count_tokens
94
+ prompt_tokens = count_tokens(prompt_text)
95
+ completion_tokens = 0
96
+ total_tokens = prompt_tokens
97
+
98
+ started = False
99
+ while True:
100
+ try:
101
+ msg = json.loads(ws.recv()[0])
102
+ except Exception:
103
+ break
104
+
105
+ if msg.get("event") == "appendText":
106
+ started = True
107
+ content = msg.get("text", "")
108
+
109
+ # Update token counts using count_tokens
110
+ content_tokens = count_tokens(content)
111
+ completion_tokens += content_tokens
112
+ total_tokens = prompt_tokens + completion_tokens
113
+
114
+ # Create the delta object
115
+ delta = ChoiceDelta(
116
+ content=content,
117
+ role="assistant"
118
+ )
119
+
120
+ # Create the choice object
121
+ choice = Choice(
122
+ index=0,
123
+ delta=delta,
124
+ finish_reason=None
125
+ )
126
+
127
+ # Create the chunk object
128
+ chunk = ChatCompletionChunk(
129
+ id=request_id,
130
+ choices=[choice],
131
+ created=created_time,
132
+ model=model
133
+ )
134
+
135
+ yield chunk
136
+ elif msg.get("event") == "done":
137
+ # Final chunk with finish_reason
138
+ delta = ChoiceDelta(
139
+ content=None,
140
+ role=None
141
+ )
142
+
143
+ choice = Choice(
144
+ index=0,
145
+ delta=delta,
146
+ finish_reason="stop"
147
+ )
148
+
149
+ chunk = ChatCompletionChunk(
150
+ id=request_id,
151
+ choices=[choice],
152
+ created=created_time,
153
+ model=model
154
+ )
155
+
156
+ yield chunk
157
+ break
158
+ elif msg.get("event") == "error":
159
+ raise RuntimeError(f"Copilot error: {msg}")
160
+
161
+ ws.close()
162
+
163
+ if not started:
164
+ raise RuntimeError("No response received from Copilot")
165
+
166
+ except Exception as e:
167
+ raise RuntimeError(f"Stream error: {e}") from e
168
+
169
+ def _create_non_stream(
170
+ self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
171
+ ) -> ChatCompletion:
172
+ result = ""
173
+ for chunk in self._create_stream(request_id, created_time, model, prompt_text, image):
174
+ if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
175
+ result += chunk.choices[0].delta.content
176
+
177
+ # Create the message object
178
+ message = ChatCompletionMessage(
179
+ role="assistant",
180
+ content=result
181
+ )
182
+
183
+ # Create the choice object
184
+ choice = Choice(
185
+ index=0,
186
+ message=message,
187
+ finish_reason="stop"
188
+ )
189
+
190
+ # Estimate token usage using count_tokens
191
+ prompt_tokens = count_tokens(prompt_text)
192
+ completion_tokens = count_tokens(result)
193
+ total_tokens = prompt_tokens + completion_tokens
194
+
195
+ # Create usage object
196
+ usage = CompletionUsage(
197
+ prompt_tokens=prompt_tokens,
198
+ completion_tokens=completion_tokens,
199
+ total_tokens=total_tokens
200
+ )
201
+
202
+ # Create the completion object
203
+ completion = ChatCompletion(
204
+ id=request_id,
205
+ choices=[choice],
206
+ created=created_time,
207
+ model=model,
208
+ usage=usage
209
+ )
210
+
211
+ return completion
212
+
213
+ class Chat(BaseChat):
214
+ def __init__(self, client: 'Copilot'):
215
+ self.completions = Completions(client)
216
+
217
+ class Copilot(OpenAICompatibleProvider):
218
+
219
+ url = "https://copilot.microsoft.com"
220
+ conversation_url = f"{url}/c/api/conversations"
221
+ websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
222
+
223
+ AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
224
+
225
+ def __init__(self, timeout: int = 900, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
226
+ self.timeout = timeout
227
+ self.session = Session(timeout=timeout, impersonate=browser)
228
+
229
+ # Initialize tools
230
+ self.available_tools = {}
231
+ if tools:
232
+ self.register_tools(tools)
233
+
234
+ # Set up the chat interface
235
+ self.chat = Chat(self)
236
+
237
+ @property
238
+ def models(self):
239
+ class _ModelList:
240
+ def list(inner_self):
241
+ return self.AVAILABLE_MODELS
242
+ return _ModelList()
@@ -0,0 +1,322 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ pass
19
+
20
+ # --- DeepInfra Client ---
21
+
22
+ class Completions(BaseCompletions):
23
+ def __init__(self, client: 'DeepInfra'):
24
+ self._client = client
25
+
26
+ def create(
27
+ self,
28
+ *,
29
+ model: str,
30
+ messages: List[Dict[str, str]],
31
+ max_tokens: Optional[int] = 2049,
32
+ stream: bool = False,
33
+ temperature: Optional[float] = None,
34
+ top_p: Optional[float] = None,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Creates a model response for the given chat conversation.
39
+ Mimics openai.chat.completions.create
40
+ """
41
+ payload = {
42
+ "model": model,
43
+ "messages": messages,
44
+ "max_tokens": max_tokens,
45
+ "stream": stream,
46
+ }
47
+ if temperature is not None:
48
+ payload["temperature"] = temperature
49
+ if top_p is not None:
50
+ payload["top_p"] = top_p
51
+
52
+ payload.update(kwargs)
53
+
54
+ request_id = f"chatcmpl-{uuid.uuid4()}"
55
+ created_time = int(time.time())
56
+
57
+ if stream:
58
+ return self._create_stream(request_id, created_time, model, payload)
59
+ else:
60
+ return self._create_non_stream(request_id, created_time, model, payload)
61
+
62
+ def _create_stream(
63
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
64
+ ) -> Generator[ChatCompletionChunk, None, None]:
65
+ try:
66
+ response = self._client.session.post(
67
+ self._client.base_url,
68
+ headers=self._client.headers,
69
+ json=payload,
70
+ stream=True,
71
+ timeout=self._client.timeout
72
+ )
73
+ response.raise_for_status()
74
+
75
+ # Track token usage across chunks
76
+ prompt_tokens = 0
77
+ completion_tokens = 0
78
+ total_tokens = 0
79
+
80
+ for line in response.iter_lines():
81
+ if line:
82
+ decoded_line = line.decode('utf-8').strip()
83
+
84
+ if decoded_line.startswith("data: "):
85
+ json_str = decoded_line[6:]
86
+ if json_str == "[DONE]":
87
+ # Format the final [DONE] marker in OpenAI format
88
+ # print("data: [DONE]")
89
+ break
90
+
91
+ try:
92
+ data = json.loads(json_str)
93
+ choice_data = data.get('choices', [{}])[0]
94
+ delta_data = choice_data.get('delta', {})
95
+ finish_reason = choice_data.get('finish_reason')
96
+
97
+ # Update token counts if available
98
+ usage_data = data.get('usage', {})
99
+ if usage_data:
100
+ prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
101
+ completion_tokens = usage_data.get('completion_tokens', completion_tokens)
102
+ total_tokens = usage_data.get('total_tokens', total_tokens)
103
+
104
+ # Create the delta object
105
+ delta = ChoiceDelta(
106
+ content=delta_data.get('content'),
107
+ role=delta_data.get('role'),
108
+ tool_calls=delta_data.get('tool_calls')
109
+ )
110
+
111
+ # Create the choice object
112
+ choice = Choice(
113
+ index=choice_data.get('index', 0),
114
+ delta=delta,
115
+ finish_reason=finish_reason,
116
+ logprobs=choice_data.get('logprobs')
117
+ )
118
+
119
+ # Create the chunk object
120
+ chunk = ChatCompletionChunk(
121
+ id=request_id,
122
+ choices=[choice],
123
+ created=created_time,
124
+ model=model,
125
+ system_fingerprint=data.get('system_fingerprint')
126
+ )
127
+
128
+ # Convert chunk to dict using Pydantic's API
129
+ if hasattr(chunk, "model_dump"):
130
+ chunk_dict = chunk.model_dump(exclude_none=True)
131
+ else:
132
+ chunk_dict = chunk.dict(exclude_none=True)
133
+
134
+ # Add usage information to match OpenAI format
135
+ # Even if we don't have real token counts, include estimated usage
136
+ # This matches the format in the examples
137
+ usage_dict = {
138
+ "prompt_tokens": prompt_tokens or 10,
139
+ "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
140
+ "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
141
+ "estimated_cost": None
142
+ }
143
+
144
+ # Update completion_tokens and total_tokens as we receive more content
145
+ if delta_data.get('content'):
146
+ completion_tokens += 1
147
+ total_tokens = prompt_tokens + completion_tokens
148
+ usage_dict["completion_tokens"] = completion_tokens
149
+ usage_dict["total_tokens"] = total_tokens
150
+
151
+ chunk_dict["usage"] = usage_dict
152
+
153
+ # Format the response in OpenAI format exactly as requested
154
+ # We need to print the raw string and also yield the chunk object
155
+ # This ensures both the console output and the returned object are correct
156
+ # print(f"data: {json.dumps(chunk_dict)}")
157
+
158
+ # Return the chunk object for internal processing
159
+ yield chunk
160
+ except json.JSONDecodeError:
161
+ print(f"Warning: Could not decode JSON line: {json_str}")
162
+ continue
163
+ except requests.exceptions.RequestException as e:
164
+ print(f"Error during DeepInfra stream request: {e}")
165
+ raise IOError(f"DeepInfra request failed: {e}") from e
166
+ except Exception as e:
167
+ print(f"Error processing DeepInfra stream: {e}")
168
+ raise
169
+
170
+ def _create_non_stream(
171
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
172
+ ) -> ChatCompletion:
173
+ try:
174
+ response = self._client.session.post(
175
+ self._client.base_url,
176
+ headers=self._client.headers,
177
+ json=payload,
178
+ timeout=self._client.timeout
179
+ )
180
+ response.raise_for_status()
181
+ data = response.json()
182
+
183
+ choices_data = data.get('choices', [])
184
+ usage_data = data.get('usage', {})
185
+
186
+ choices = []
187
+ for choice_d in choices_data:
188
+ message_d = choice_d.get('message', {})
189
+ message = ChatCompletionMessage(
190
+ role=message_d.get('role', 'assistant'),
191
+ content=message_d.get('content', '')
192
+ )
193
+ choice = Choice(
194
+ index=choice_d.get('index', 0),
195
+ message=message,
196
+ finish_reason=choice_d.get('finish_reason', 'stop')
197
+ )
198
+ choices.append(choice)
199
+
200
+ usage = CompletionUsage(
201
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
202
+ completion_tokens=usage_data.get('completion_tokens', 0),
203
+ total_tokens=usage_data.get('total_tokens', 0)
204
+ )
205
+
206
+ completion = ChatCompletion(
207
+ id=request_id,
208
+ choices=choices,
209
+ created=created_time,
210
+ model=data.get('model', model),
211
+ usage=usage,
212
+ )
213
+ return completion
214
+
215
+ except requests.exceptions.RequestException as e:
216
+ print(f"Error during DeepInfra non-stream request: {e}")
217
+ raise IOError(f"DeepInfra request failed: {e}") from e
218
+ except Exception as e:
219
+ print(f"Error processing DeepInfra response: {e}")
220
+ raise
221
+
222
+ class Chat(BaseChat):
223
+ def __init__(self, client: 'DeepInfra'):
224
+ self.completions = Completions(client)
225
+
226
+ class DeepInfra(OpenAICompatibleProvider):
227
+
228
+ AVAILABLE_MODELS = [
229
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
230
+
231
+ "deepseek-ai/DeepSeek-R1",
232
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
233
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
234
+ "deepseek-ai/DeepSeek-R1-Turbo",
235
+ "deepseek-ai/DeepSeek-V3",
236
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
237
+ "google/gemma-2-27b-it",
238
+ "google/gemma-2-9b-it",
239
+ "google/gemma-3-12b-it",
240
+ "google/gemma-3-27b-it",
241
+ "google/gemma-3-4b-it",
242
+ "meta-llama/Llama-3.3-70B-Instruct",
243
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
244
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
245
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
246
+ "meta-llama/Llama-Guard-4-12B",
247
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
248
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
249
+ "microsoft/Phi-4-multimodal-instruct",
250
+ "microsoft/WizardLM-2-8x22B",
251
+ "microsoft/phi-4",
252
+ "microsoft/phi-4-reasoning-plus",
253
+ "mistralai/Mistral-Small-24B-Instruct-2501",
254
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
255
+ "Qwen/QwQ-32B",
256
+ "Qwen/Qwen2.5-72B-Instruct",
257
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
258
+ "Qwen/Qwen3-14B",
259
+ "Qwen/Qwen3-30B-A3B",
260
+ "Qwen/Qwen3-32B",
261
+ "Qwen/Qwen3-235B-A22B",
262
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
263
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
264
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
265
+
266
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
267
+
268
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
269
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
270
+ # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
271
+ # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
272
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
273
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
274
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
275
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
276
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
277
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
278
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
279
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
280
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
281
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
282
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
283
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
284
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
285
+ ]
286
+
287
+ def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
288
+ self.timeout = timeout
289
+ self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
290
+ self.session = requests.Session()
291
+
292
+ agent = LitAgent()
293
+ fingerprint = agent.generate_fingerprint(browser)
294
+
295
+ self.headers = {
296
+ "Accept": fingerprint["accept"],
297
+ "Accept-Encoding": "gzip, deflate, br, zstd",
298
+ "Accept-Language": fingerprint["accept_language"],
299
+ "Content-Type": "application/json",
300
+ "Cache-Control": "no-cache",
301
+ "Connection": "keep-alive",
302
+ "Origin": "https://deepinfra.com",
303
+ "Pragma": "no-cache",
304
+ "Referer": "https://deepinfra.com/",
305
+ "Sec-Fetch-Dest": "empty",
306
+ "Sec-Fetch-Mode": "cors",
307
+ "Sec-Fetch-Site": "same-site",
308
+ "X-Deepinfra-Source": "web-embed",
309
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
310
+ "Sec-CH-UA-Mobile": "?0",
311
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
312
+ "User-Agent": fingerprint["user_agent"],
313
+ }
314
+ self.session.headers.update(self.headers)
315
+ self.chat = Chat(self)
316
+
317
+ @property
318
+ def models(self):
319
+ class _ModelList:
320
+ def list(inner_self):
321
+ return type(self).AVAILABLE_MODELS
322
+ return _ModelList()