webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,319 +1,328 @@
1
- import requests
2
- import json
3
- import time
4
- import uuid
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- pass
19
-
20
- # --- DeepInfra Client ---
21
-
22
- class Completions(BaseCompletions):
23
- def __init__(self, client: 'DeepInfra'):
24
- self._client = client
25
-
26
- def create(
27
- self,
28
- *,
29
- model: str,
30
- messages: List[Dict[str, str]],
31
- max_tokens: Optional[int] = 2049,
32
- stream: bool = False,
33
- temperature: Optional[float] = None,
34
- top_p: Optional[float] = None,
35
- **kwargs: Any
36
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
- """
38
- Creates a model response for the given chat conversation.
39
- Mimics openai.chat.completions.create
40
- """
41
- payload = {
42
- "model": model,
43
- "messages": messages,
44
- "max_tokens": max_tokens,
45
- "stream": stream,
46
- }
47
- if temperature is not None:
48
- payload["temperature"] = temperature
49
- if top_p is not None:
50
- payload["top_p"] = top_p
51
-
52
- payload.update(kwargs)
53
-
54
- request_id = f"chatcmpl-{uuid.uuid4()}"
55
- created_time = int(time.time())
56
-
57
- if stream:
58
- return self._create_stream(request_id, created_time, model, payload)
59
- else:
60
- return self._create_non_stream(request_id, created_time, model, payload)
61
-
62
- def _create_stream(
63
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
64
- ) -> Generator[ChatCompletionChunk, None, None]:
65
- try:
66
- response = self._client.session.post(
67
- self._client.base_url,
68
- headers=self._client.headers,
69
- json=payload,
70
- stream=True,
71
- timeout=self._client.timeout
72
- )
73
- response.raise_for_status()
74
-
75
- # Track token usage across chunks
76
- prompt_tokens = 0
77
- completion_tokens = 0
78
- total_tokens = 0
79
-
80
- for line in response.iter_lines():
81
- if line:
82
- decoded_line = line.decode('utf-8').strip()
83
-
84
- if decoded_line.startswith("data: "):
85
- json_str = decoded_line[6:]
86
- if json_str == "[DONE]":
87
- # Format the final [DONE] marker in OpenAI format
88
- # print("data: [DONE]")
89
- break
90
-
91
- try:
92
- data = json.loads(json_str)
93
- choice_data = data.get('choices', [{}])[0]
94
- delta_data = choice_data.get('delta', {})
95
- finish_reason = choice_data.get('finish_reason')
96
-
97
- # Update token counts if available
98
- usage_data = data.get('usage', {})
99
- if usage_data:
100
- prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
101
- completion_tokens = usage_data.get('completion_tokens', completion_tokens)
102
- total_tokens = usage_data.get('total_tokens', total_tokens)
103
-
104
- # Create the delta object
105
- delta = ChoiceDelta(
106
- content=delta_data.get('content'),
107
- role=delta_data.get('role'),
108
- tool_calls=delta_data.get('tool_calls')
109
- )
110
-
111
- # Create the choice object
112
- choice = Choice(
113
- index=choice_data.get('index', 0),
114
- delta=delta,
115
- finish_reason=finish_reason,
116
- logprobs=choice_data.get('logprobs')
117
- )
118
-
119
- # Create the chunk object
120
- chunk = ChatCompletionChunk(
121
- id=request_id,
122
- choices=[choice],
123
- created=created_time,
124
- model=model,
125
- system_fingerprint=data.get('system_fingerprint')
126
- )
127
-
128
- # Convert to dict for proper formatting
129
- chunk_dict = chunk.to_dict()
130
-
131
- # Add usage information to match OpenAI format
132
- # Even if we don't have real token counts, include estimated usage
133
- # This matches the format in the examples
134
- usage_dict = {
135
- "prompt_tokens": prompt_tokens or 10,
136
- "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
137
- "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
138
- "estimated_cost": None
139
- }
140
-
141
- # Update completion_tokens and total_tokens as we receive more content
142
- if delta_data.get('content'):
143
- completion_tokens += 1
144
- total_tokens = prompt_tokens + completion_tokens
145
- usage_dict["completion_tokens"] = completion_tokens
146
- usage_dict["total_tokens"] = total_tokens
147
-
148
- chunk_dict["usage"] = usage_dict
149
-
150
- # Format the response in OpenAI format exactly as requested
151
- # We need to print the raw string and also yield the chunk object
152
- # This ensures both the console output and the returned object are correct
153
- # print(f"data: {json.dumps(chunk_dict)}")
154
-
155
- # Return the chunk object for internal processing
156
- yield chunk
157
- except json.JSONDecodeError:
158
- print(f"Warning: Could not decode JSON line: {json_str}")
159
- continue
160
- except requests.exceptions.RequestException as e:
161
- print(f"Error during DeepInfra stream request: {e}")
162
- raise IOError(f"DeepInfra request failed: {e}") from e
163
- except Exception as e:
164
- print(f"Error processing DeepInfra stream: {e}")
165
- raise
166
-
167
- def _create_non_stream(
168
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
169
- ) -> ChatCompletion:
170
- try:
171
- response = self._client.session.post(
172
- self._client.base_url,
173
- headers=self._client.headers,
174
- json=payload,
175
- timeout=self._client.timeout
176
- )
177
- response.raise_for_status()
178
- data = response.json()
179
-
180
- choices_data = data.get('choices', [])
181
- usage_data = data.get('usage', {})
182
-
183
- choices = []
184
- for choice_d in choices_data:
185
- message_d = choice_d.get('message', {})
186
- message = ChatCompletionMessage(
187
- role=message_d.get('role', 'assistant'),
188
- content=message_d.get('content', '')
189
- )
190
- choice = Choice(
191
- index=choice_d.get('index', 0),
192
- message=message,
193
- finish_reason=choice_d.get('finish_reason', 'stop')
194
- )
195
- choices.append(choice)
196
-
197
- usage = CompletionUsage(
198
- prompt_tokens=usage_data.get('prompt_tokens', 0),
199
- completion_tokens=usage_data.get('completion_tokens', 0),
200
- total_tokens=usage_data.get('total_tokens', 0)
201
- )
202
-
203
- completion = ChatCompletion(
204
- id=request_id,
205
- choices=choices,
206
- created=created_time,
207
- model=data.get('model', model),
208
- usage=usage,
209
- )
210
- return completion
211
-
212
- except requests.exceptions.RequestException as e:
213
- print(f"Error during DeepInfra non-stream request: {e}")
214
- raise IOError(f"DeepInfra request failed: {e}") from e
215
- except Exception as e:
216
- print(f"Error processing DeepInfra response: {e}")
217
- raise
218
-
219
- class Chat(BaseChat):
220
- def __init__(self, client: 'DeepInfra'):
221
- self.completions = Completions(client)
222
-
223
- class DeepInfra(OpenAICompatibleProvider):
224
-
225
- AVAILABLE_MODELS = [
226
- # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
227
-
228
- "deepseek-ai/DeepSeek-R1",
229
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
230
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
231
- "deepseek-ai/DeepSeek-R1-Turbo",
232
- "deepseek-ai/DeepSeek-V3",
233
- "deepseek-ai/DeepSeek-Prover-V2-671B",
234
- "google/gemma-2-27b-it",
235
- "google/gemma-2-9b-it",
236
- "google/gemma-3-12b-it",
237
- "google/gemma-3-27b-it",
238
- "google/gemma-3-4b-it",
239
- "meta-llama/Llama-3.3-70B-Instruct",
240
- "meta-llama/Llama-3.3-70B-Instruct-Turbo",
241
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
242
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
243
- "meta-llama/Llama-Guard-4-12B",
244
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
245
- "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
246
- "microsoft/Phi-4-multimodal-instruct",
247
- "microsoft/WizardLM-2-8x22B",
248
- "microsoft/phi-4",
249
- "microsoft/phi-4-reasoning-plus",
250
- "mistralai/Mistral-Small-24B-Instruct-2501",
251
- "nvidia/Llama-3.1-Nemotron-70B-Instruct",
252
- "Qwen/QwQ-32B",
253
- "Qwen/Qwen2.5-72B-Instruct",
254
- "Qwen/Qwen2.5-Coder-32B-Instruct",
255
- "Qwen/Qwen3-14B",
256
- "Qwen/Qwen3-30B-A3B",
257
- "Qwen/Qwen3-32B",
258
- "Qwen/Qwen3-235B-A22B",
259
- # "google/gemini-1.5-flash", # >>>> NOT WORKING
260
- # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
261
- # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
262
-
263
- # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
264
-
265
- # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
266
- # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
267
- # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
268
- # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
269
- # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
270
- # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
271
- # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
272
- # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
273
- # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
274
- # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
275
- # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
276
- # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
277
- # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
278
- # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
279
- # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
280
- # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
281
- # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
282
- ]
283
-
284
- def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
285
- self.timeout = timeout
286
- self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
287
- self.session = requests.Session()
288
-
289
- agent = LitAgent()
290
- fingerprint = agent.generate_fingerprint(browser)
291
-
292
- self.headers = {
293
- "Accept": fingerprint["accept"],
294
- "Accept-Encoding": "gzip, deflate, br, zstd",
295
- "Accept-Language": fingerprint["accept_language"],
296
- "Content-Type": "application/json",
297
- "Cache-Control": "no-cache",
298
- "Connection": "keep-alive",
299
- "Origin": "https://deepinfra.com",
300
- "Pragma": "no-cache",
301
- "Referer": "https://deepinfra.com/",
302
- "Sec-Fetch-Dest": "empty",
303
- "Sec-Fetch-Mode": "cors",
304
- "Sec-Fetch-Site": "same-site",
305
- "X-Deepinfra-Source": "web-embed",
306
- "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
307
- "Sec-CH-UA-Mobile": "?0",
308
- "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
309
- "User-Agent": fingerprint["user_agent"],
310
- }
311
- self.session.headers.update(self.headers)
312
- self.chat = Chat(self)
313
-
314
- @property
315
- def models(self):
316
- class _ModelList:
317
- def list(inner_self):
318
- return type(self).AVAILABLE_MODELS
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ pass
19
+
20
+ # --- DeepInfra Client ---
21
+
22
+ class Completions(BaseCompletions):
23
+ def __init__(self, client: 'DeepInfra'):
24
+ self._client = client
25
+
26
+ def create(
27
+ self,
28
+ *,
29
+ model: str,
30
+ messages: List[Dict[str, str]],
31
+ max_tokens: Optional[int] = 2049,
32
+ stream: bool = False,
33
+ temperature: Optional[float] = None,
34
+ top_p: Optional[float] = None,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
37
+ **kwargs: Any
38
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
+ """
40
+ Creates a model response for the given chat conversation.
41
+ Mimics openai.chat.completions.create
42
+ """
43
+ payload = {
44
+ "model": model,
45
+ "messages": messages,
46
+ "max_tokens": max_tokens,
47
+ "stream": stream,
48
+ }
49
+ if temperature is not None:
50
+ payload["temperature"] = temperature
51
+ if top_p is not None:
52
+ payload["top_p"] = top_p
53
+
54
+ payload.update(kwargs)
55
+
56
+ request_id = f"chatcmpl-{uuid.uuid4()}"
57
+ created_time = int(time.time())
58
+
59
+ if stream:
60
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
61
+ else:
62
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
63
+
64
+ def _create_stream(
65
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
66
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
67
+ ) -> Generator[ChatCompletionChunk, None, None]:
68
+ try:
69
+ response = self._client.session.post(
70
+ self._client.base_url,
71
+ headers=self._client.headers,
72
+ json=payload,
73
+ stream=True,
74
+ timeout=timeout or self._client.timeout,
75
+ proxies=proxies
76
+ )
77
+ response.raise_for_status()
78
+
79
+ # Track token usage across chunks
80
+ prompt_tokens = 0
81
+ completion_tokens = 0
82
+ total_tokens = 0
83
+
84
+ for line in response.iter_lines():
85
+ if line:
86
+ decoded_line = line.decode('utf-8').strip()
87
+
88
+ if decoded_line.startswith("data: "):
89
+ json_str = decoded_line[6:]
90
+ if json_str == "[DONE]":
91
+ # Format the final [DONE] marker in OpenAI format
92
+ # print("data: [DONE]")
93
+ break
94
+
95
+ try:
96
+ data = json.loads(json_str)
97
+ choice_data = data.get('choices', [{}])[0]
98
+ delta_data = choice_data.get('delta', {})
99
+ finish_reason = choice_data.get('finish_reason')
100
+
101
+ # Update token counts if available
102
+ usage_data = data.get('usage', {})
103
+ if usage_data:
104
+ prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
105
+ completion_tokens = usage_data.get('completion_tokens', completion_tokens)
106
+ total_tokens = usage_data.get('total_tokens', total_tokens)
107
+
108
+ # Create the delta object
109
+ delta = ChoiceDelta(
110
+ content=delta_data.get('content'),
111
+ role=delta_data.get('role'),
112
+ tool_calls=delta_data.get('tool_calls')
113
+ )
114
+
115
+ # Create the choice object
116
+ choice = Choice(
117
+ index=choice_data.get('index', 0),
118
+ delta=delta,
119
+ finish_reason=finish_reason,
120
+ logprobs=choice_data.get('logprobs')
121
+ )
122
+
123
+ # Create the chunk object
124
+ chunk = ChatCompletionChunk(
125
+ id=request_id,
126
+ choices=[choice],
127
+ created=created_time,
128
+ model=model,
129
+ system_fingerprint=data.get('system_fingerprint')
130
+ )
131
+
132
+ # Convert chunk to dict using Pydantic's API
133
+ if hasattr(chunk, "model_dump"):
134
+ chunk_dict = chunk.model_dump(exclude_none=True)
135
+ else:
136
+ chunk_dict = chunk.dict(exclude_none=True)
137
+
138
+ # Add usage information to match OpenAI format
139
+ # Even if we don't have real token counts, include estimated usage
140
+ # This matches the format in the examples
141
+ usage_dict = {
142
+ "prompt_tokens": prompt_tokens or 10,
143
+ "completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
144
+ "total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
145
+ "estimated_cost": None
146
+ }
147
+
148
+ # Update completion_tokens and total_tokens as we receive more content
149
+ if delta_data.get('content'):
150
+ completion_tokens += 1
151
+ total_tokens = prompt_tokens + completion_tokens
152
+ usage_dict["completion_tokens"] = completion_tokens
153
+ usage_dict["total_tokens"] = total_tokens
154
+
155
+ chunk_dict["usage"] = usage_dict
156
+
157
+ # Format the response in OpenAI format exactly as requested
158
+ # We need to print the raw string and also yield the chunk object
159
+ # This ensures both the console output and the returned object are correct
160
+ # print(f"data: {json.dumps(chunk_dict)}")
161
+
162
+ # Return the chunk object for internal processing
163
+ yield chunk
164
+ except json.JSONDecodeError:
165
+ print(f"Warning: Could not decode JSON line: {json_str}")
166
+ continue
167
+ except requests.exceptions.RequestException as e:
168
+ print(f"Error during DeepInfra stream request: {e}")
169
+ raise IOError(f"DeepInfra request failed: {e}") from e
170
+ except Exception as e:
171
+ print(f"Error processing DeepInfra stream: {e}")
172
+ raise
173
+
174
+ def _create_non_stream(
175
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
176
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
177
+ ) -> ChatCompletion:
178
+ try:
179
+ response = self._client.session.post(
180
+ self._client.base_url,
181
+ headers=self._client.headers,
182
+ json=payload,
183
+ timeout=timeout or self._client.timeout,
184
+ proxies=proxies
185
+ )
186
+ response.raise_for_status()
187
+ data = response.json()
188
+
189
+ choices_data = data.get('choices', [])
190
+ usage_data = data.get('usage', {})
191
+
192
+ choices = []
193
+ for choice_d in choices_data:
194
+ message_d = choice_d.get('message', {})
195
+ message = ChatCompletionMessage(
196
+ role=message_d.get('role', 'assistant'),
197
+ content=message_d.get('content', '')
198
+ )
199
+ choice = Choice(
200
+ index=choice_d.get('index', 0),
201
+ message=message,
202
+ finish_reason=choice_d.get('finish_reason', 'stop')
203
+ )
204
+ choices.append(choice)
205
+
206
+ usage = CompletionUsage(
207
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
208
+ completion_tokens=usage_data.get('completion_tokens', 0),
209
+ total_tokens=usage_data.get('total_tokens', 0)
210
+ )
211
+
212
+ completion = ChatCompletion(
213
+ id=request_id,
214
+ choices=choices,
215
+ created=created_time,
216
+ model=data.get('model', model),
217
+ usage=usage,
218
+ )
219
+ return completion
220
+
221
+ except requests.exceptions.RequestException as e:
222
+ print(f"Error during DeepInfra non-stream request: {e}")
223
+ raise IOError(f"DeepInfra request failed: {e}") from e
224
+ except Exception as e:
225
+ print(f"Error processing DeepInfra response: {e}")
226
+ raise
227
+
228
+ class Chat(BaseChat):
229
+ def __init__(self, client: 'DeepInfra'):
230
+ self.completions = Completions(client)
231
+
232
+ class DeepInfra(OpenAICompatibleProvider):
233
+
234
+ AVAILABLE_MODELS = [
235
+ # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
236
+ "deepseek-ai/DeepSeek-R1-0528",
237
+ "deepseek-ai/DeepSeek-R1",
238
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
239
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
240
+ "deepseek-ai/DeepSeek-R1-Turbo",
241
+ "deepseek-ai/DeepSeek-V3",
242
+ "deepseek-ai/DeepSeek-Prover-V2-671B",
243
+ "google/gemma-2-27b-it",
244
+ "google/gemma-2-9b-it",
245
+ "google/gemma-3-12b-it",
246
+ "google/gemma-3-27b-it",
247
+ "google/gemma-3-4b-it",
248
+ "meta-llama/Llama-3.3-70B-Instruct",
249
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
250
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
251
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
252
+ "meta-llama/Llama-Guard-4-12B",
253
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
254
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
255
+ "microsoft/Phi-4-multimodal-instruct",
256
+ "microsoft/WizardLM-2-8x22B",
257
+ "microsoft/phi-4",
258
+ "microsoft/phi-4-reasoning-plus",
259
+ "mistralai/Mistral-Small-24B-Instruct-2501",
260
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
261
+ "Qwen/QwQ-32B",
262
+ "Qwen/Qwen2.5-72B-Instruct",
263
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
264
+ "Qwen/Qwen3-14B",
265
+ "Qwen/Qwen3-30B-A3B",
266
+ "Qwen/Qwen3-32B",
267
+ "Qwen/Qwen3-235B-A22B",
268
+ # "google/gemini-1.5-flash", # >>>> NOT WORKING
269
+ # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
270
+ # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
271
+
272
+ # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
273
+
274
+ # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
275
+ # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
276
+ # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
277
+ # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
278
+ # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
279
+ # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
280
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
281
+ # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
282
+ # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
283
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
284
+ # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
285
+ # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
286
+ # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
287
+ # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
288
+ # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
289
+ # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
290
+ # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
291
+ ]
292
+
293
+ def __init__(self, browser: str = "chrome"):
294
+ self.timeout = None # Default timeout
295
+ self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
296
+ self.session = requests.Session()
297
+
298
+ agent = LitAgent()
299
+ fingerprint = agent.generate_fingerprint(browser)
300
+
301
+ self.headers = {
302
+ "Accept": fingerprint["accept"],
303
+ "Accept-Encoding": "gzip, deflate, br, zstd",
304
+ "Accept-Language": fingerprint["accept_language"],
305
+ "Content-Type": "application/json",
306
+ "Cache-Control": "no-cache",
307
+ "Connection": "keep-alive",
308
+ "Origin": "https://deepinfra.com",
309
+ "Pragma": "no-cache",
310
+ "Referer": "https://deepinfra.com/",
311
+ "Sec-Fetch-Dest": "empty",
312
+ "Sec-Fetch-Mode": "cors",
313
+ "Sec-Fetch-Site": "same-site",
314
+ "X-Deepinfra-Source": "web-embed",
315
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
316
+ "Sec-CH-UA-Mobile": "?0",
317
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
318
+ "User-Agent": fingerprint["user_agent"],
319
+ }
320
+ self.session.headers.update(self.headers)
321
+ self.chat = Chat(self)
322
+
323
+ @property
324
+ def models(self):
325
+ class _ModelList:
326
+ def list(inner_self):
327
+ return type(self).AVAILABLE_MODELS
319
328
  return _ModelList()