webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,425 +1,435 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- print("Warning: LitAgent not found. Some functionality may be limited.")
19
-
20
- # --- Venice Client ---
21
-
22
- class Completions(BaseCompletions):
23
- def __init__(self, client: 'Venice'):
24
- self._client = client
25
-
26
- def create(
27
- self,
28
- *,
29
- model: str,
30
- messages: List[Dict[str, str]],
31
- max_tokens: Optional[int] = 2049,
32
- stream: bool = False,
33
- temperature: Optional[float] = 0.8,
34
- top_p: Optional[float] = 0.9,
35
- **kwargs: Any
36
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
- """
38
- Creates a model response for the given chat conversation.
39
- Mimics openai.chat.completions.create
40
- """
41
- # Extract system message if present for systemPrompt parameter
42
- system_prompt = self._client.system_prompt
43
- for msg in messages:
44
- if msg["role"] == "system":
45
- system_prompt = msg["content"]
46
- break
47
-
48
- # Prepare the payload for Venice API
49
- payload = {
50
- "requestId": str(uuid.uuid4())[:7],
51
- "modelId": self._client.convert_model_name(model),
52
- "prompt": messages,
53
- "systemPrompt": system_prompt,
54
- "conversationType": "text",
55
- "temperature": temperature if temperature is not None else self._client.temperature,
56
- "webEnabled": True,
57
- "topP": top_p if top_p is not None else self._client.top_p,
58
- "includeVeniceSystemPrompt": False,
59
- "isCharacter": False,
60
- "clientProcessingTime": 2000
61
- }
62
-
63
- # Add optional parameters if provided
64
- if max_tokens is not None and max_tokens > 0:
65
- payload["max_tokens"] = max_tokens
66
-
67
- # Add any additional parameters
68
- for key, value in kwargs.items():
69
- if key not in payload:
70
- payload[key] = value
71
-
72
- request_id = f"chatcmpl-{uuid.uuid4()}"
73
- created_time = int(time.time())
74
-
75
- if stream:
76
- return self._create_stream(request_id, created_time, model, payload)
77
- else:
78
- return self._create_non_stream(request_id, created_time, model, payload)
79
-
80
- def _create_stream(
81
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
82
- ) -> Generator[ChatCompletionChunk, None, None]:
83
- try:
84
- response = self._client.session.post(
85
- self._client.api_endpoint,
86
- json=payload,
87
- stream=True,
88
- timeout=self._client.timeout
89
- )
90
-
91
- # Handle non-200 responses
92
- if response.status_code != 200:
93
- raise IOError(
94
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
95
- )
96
-
97
- # Track token usage across chunks
98
- prompt_tokens = 0
99
- completion_tokens = 0
100
- total_tokens = 0
101
-
102
- # Estimate prompt tokens based on message length
103
- prompt_tokens = 0
104
- for msg in payload.get("prompt", []):
105
- prompt_tokens += len(msg.get("content", "").split())
106
- prompt_tokens += len(payload.get("systemPrompt", "").split())
107
-
108
- for line in response.iter_lines():
109
- if not line:
110
- continue
111
-
112
- try:
113
- # Decode bytes to string
114
- line_data = line.decode('utf-8').strip()
115
- if '"kind":"content"' in line_data:
116
- data = json.loads(line_data)
117
- if 'content' in data:
118
- content = data['content']
119
-
120
- # Format the content (replace escaped newlines)
121
- content = self._client.format_text(content)
122
-
123
- # Update token counts
124
- completion_tokens += 1
125
- total_tokens = prompt_tokens + completion_tokens
126
-
127
- # Create the delta object
128
- delta = ChoiceDelta(
129
- content=content,
130
- role="assistant",
131
- tool_calls=None
132
- )
133
-
134
- # Create the choice object
135
- choice = Choice(
136
- index=0,
137
- delta=delta,
138
- finish_reason=None,
139
- logprobs=None
140
- )
141
-
142
- # Create the chunk object
143
- chunk = ChatCompletionChunk(
144
- id=request_id,
145
- choices=[choice],
146
- created=created_time,
147
- model=model,
148
- system_fingerprint=None
149
- )
150
-
151
- # Convert to dict for proper formatting
152
- chunk_dict = chunk.to_dict()
153
-
154
- # Add usage information to match OpenAI format
155
- usage_dict = {
156
- "prompt_tokens": prompt_tokens,
157
- "completion_tokens": completion_tokens,
158
- "total_tokens": total_tokens,
159
- "estimated_cost": None
160
- }
161
-
162
- chunk_dict["usage"] = usage_dict
163
-
164
- # Return the chunk object for internal processing
165
- yield chunk
166
- except json.JSONDecodeError:
167
- continue
168
- except UnicodeDecodeError:
169
- continue
170
-
171
- # Final chunk with finish_reason="stop"
172
- delta = ChoiceDelta(
173
- content=None,
174
- role=None,
175
- tool_calls=None
176
- )
177
-
178
- choice = Choice(
179
- index=0,
180
- delta=delta,
181
- finish_reason="stop",
182
- logprobs=None
183
- )
184
-
185
- chunk = ChatCompletionChunk(
186
- id=request_id,
187
- choices=[choice],
188
- created=created_time,
189
- model=model,
190
- system_fingerprint=None
191
- )
192
-
193
- chunk_dict = chunk.to_dict()
194
- chunk_dict["usage"] = {
195
- "prompt_tokens": prompt_tokens,
196
- "completion_tokens": completion_tokens,
197
- "total_tokens": total_tokens,
198
- "estimated_cost": None
199
- }
200
-
201
- yield chunk
202
-
203
- except Exception as e:
204
- print(f"Error during Venice stream request: {e}")
205
- raise IOError(f"Venice request failed: {e}") from e
206
-
207
- def _create_non_stream(
208
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
209
- ) -> ChatCompletion:
210
- try:
211
- # For non-streaming, we still use streaming internally to collect the full response
212
- response = self._client.session.post(
213
- self._client.api_endpoint,
214
- json=payload,
215
- stream=True,
216
- timeout=self._client.timeout
217
- )
218
-
219
- # Handle non-200 responses
220
- if response.status_code != 200:
221
- raise IOError(
222
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
223
- )
224
-
225
- # Collect the full response
226
- full_text = ""
227
- for line in response.iter_lines():
228
- if not line:
229
- continue
230
-
231
- try:
232
- # Decode bytes to string
233
- line_data = line.decode('utf-8').strip()
234
- if '"kind":"content"' in line_data:
235
- data = json.loads(line_data)
236
- if 'content' in data:
237
- content = data['content']
238
- full_text += content
239
- except json.JSONDecodeError:
240
- continue
241
- except UnicodeDecodeError:
242
- continue
243
-
244
- # Format the text (replace escaped newlines)
245
- full_text = self._client.format_text(full_text)
246
-
247
- # Estimate token counts
248
- prompt_tokens = 0
249
- for msg in payload.get("prompt", []):
250
- prompt_tokens += len(msg.get("content", "").split())
251
- prompt_tokens += len(payload.get("systemPrompt", "").split())
252
- completion_tokens = len(full_text.split())
253
- total_tokens = prompt_tokens + completion_tokens
254
-
255
- # Create the message object
256
- message = ChatCompletionMessage(
257
- role="assistant",
258
- content=full_text
259
- )
260
-
261
- # Create the choice object
262
- choice = Choice(
263
- index=0,
264
- message=message,
265
- finish_reason="stop"
266
- )
267
-
268
- # Create the usage object
269
- usage = CompletionUsage(
270
- prompt_tokens=prompt_tokens,
271
- completion_tokens=completion_tokens,
272
- total_tokens=total_tokens
273
- )
274
-
275
- # Create the completion object
276
- completion = ChatCompletion(
277
- id=request_id,
278
- choices=[choice],
279
- created=created_time,
280
- model=model,
281
- usage=usage,
282
- )
283
-
284
- return completion
285
-
286
- except Exception as e:
287
- print(f"Error during Venice non-stream request: {e}")
288
- raise IOError(f"Venice request failed: {e}") from e
289
-
290
- class Chat(BaseChat):
291
- def __init__(self, client: 'Venice'):
292
- self.completions = Completions(client)
293
-
294
- class Venice(OpenAICompatibleProvider):
295
- """
296
- OpenAI-compatible client for Venice AI API.
297
-
298
- Usage:
299
- client = Venice()
300
- response = client.chat.completions.create(
301
- model="mistral-31-24b",
302
- messages=[{"role": "user", "content": "Hello!"}]
303
- )
304
- """
305
-
306
- AVAILABLE_MODELS = [
307
- "mistral-31-24b",
308
- "llama-3.2-3b-akash",
309
- "qwen2dot5-coder-32b",
310
- "deepseek-coder-v2-lite",
311
- ]
312
-
313
- # No model mapping needed as we use the model names directly
314
-
315
- def __init__(
316
- self,
317
- timeout: Optional[int] = None,
318
- browser: str = "chrome"
319
- ):
320
- """
321
- Initialize the Venice client.
322
-
323
- Args:
324
- timeout: Request timeout in seconds (None for no timeout)
325
- browser: Browser to emulate in user agent
326
- """
327
- self.timeout = timeout
328
- self.temperature = 0.8 # Default temperature
329
- self.top_p = 0.9 # Default top_p
330
- self.system_prompt = "You are a helpful AI assistant." # Default system prompt
331
- self.api_endpoint = "https://venice.ai/api/inference/chat"
332
- self.session = requests.Session()
333
-
334
- # Initialize LitAgent for user agent generation
335
- agent = LitAgent()
336
- self.fingerprint = agent.generate_fingerprint(browser)
337
-
338
- # Headers for the request
339
- self.headers = {
340
- "User-Agent": self.fingerprint["user_agent"],
341
- "accept": self.fingerprint["accept"],
342
- "accept-language": self.fingerprint["accept_language"],
343
- "content-type": "application/json",
344
- "origin": "https://venice.ai",
345
- "referer": "https://venice.ai/chat/",
346
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
347
- "sec-ch-ua-mobile": "?0",
348
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
349
- "sec-fetch-dest": "empty",
350
- "sec-fetch-mode": "cors",
351
- "sec-fetch-site": "same-origin"
352
- }
353
-
354
- self.session.headers.update(self.headers)
355
-
356
- # Initialize the chat interface
357
- self.chat = Chat(self)
358
-
359
- def format_text(self, text: str) -> str:
360
- """
361
- Format text by replacing escaped newlines with actual newlines.
362
-
363
- Args:
364
- text: Text to format
365
-
366
- Returns:
367
- Formatted text
368
- """
369
- # Use a more comprehensive approach to handle all escape sequences
370
- try:
371
- # First handle double backslashes to avoid issues
372
- text = text.replace('\\\\', '\\')
373
-
374
- # Handle common escape sequences
375
- text = text.replace('\\n', '\n')
376
- text = text.replace('\\r', '\r')
377
- text = text.replace('\\t', '\t')
378
- text = text.replace('\\"', '"')
379
- text = text.replace("\\'", "'")
380
-
381
- # Handle any remaining escape sequences using JSON decoding
382
- # This is a fallback in case there are other escape sequences
383
- try:
384
- # Add quotes to make it a valid JSON string
385
- json_str = f'"{text}"'
386
- # Use json module to decode all escape sequences
387
- decoded = json.loads(json_str)
388
- return decoded
389
- except json.JSONDecodeError:
390
- # If JSON decoding fails, return the text with the replacements we've already done
391
- return text
392
- except Exception as e:
393
- # If any error occurs, return the original text
394
- print(f"Warning: Error formatting text: {e}")
395
- return text
396
-
397
- def convert_model_name(self, model: str) -> str:
398
- """
399
- Convert model names to ones supported by Venice.
400
-
401
- Args:
402
- model: Model name to convert
403
-
404
- Returns:
405
- Venice model name
406
- """
407
- # If the model is already a valid Venice model, return it
408
- if model in self.AVAILABLE_MODELS:
409
- return model
410
-
411
- # Default to the most capable model
412
- print(f"Warning: Unknown model '{model}'. Using 'mistral-31-24b' instead.")
413
- return "mistral-31-24b"
414
-
415
- @property
416
- def models(self):
417
- class _ModelList:
418
- def list(inner_self):
419
- return type(self).AVAILABLE_MODELS
420
- return _ModelList()
421
-
422
- @classmethod
423
- def models(cls):
424
- """Return the list of available models for Venice."""
425
- return cls.AVAILABLE_MODELS
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ print("Warning: LitAgent not found. Some functionality may be limited.")
19
+
20
+ # --- Venice Client ---
21
+
22
+ class Completions(BaseCompletions):
23
+ def __init__(self, client: 'Venice'):
24
+ self._client = client
25
+
26
+ def create(
27
+ self,
28
+ *,
29
+ model: str,
30
+ messages: List[Dict[str, str]],
31
+ max_tokens: Optional[int] = 2049,
32
+ stream: bool = False,
33
+ temperature: Optional[float] = 0.8,
34
+ top_p: Optional[float] = 0.9,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
37
+ **kwargs: Any
38
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
+ """
40
+ Creates a model response for the given chat conversation.
41
+ Mimics openai.chat.completions.create
42
+ """
43
+ # Extract system message if present for systemPrompt parameter
44
+ system_prompt = self._client.system_prompt
45
+ for msg in messages:
46
+ if msg["role"] == "system":
47
+ system_prompt = msg["content"]
48
+ break
49
+
50
+ # Prepare the payload for Venice API
51
+ payload = {
52
+ "requestId": str(uuid.uuid4())[:7],
53
+ "modelId": self._client.convert_model_name(model),
54
+ "prompt": messages,
55
+ "systemPrompt": system_prompt,
56
+ "conversationType": "text",
57
+ "temperature": temperature if temperature is not None else self._client.temperature,
58
+ "webEnabled": True,
59
+ "topP": top_p if top_p is not None else self._client.top_p,
60
+ "includeVeniceSystemPrompt": False,
61
+ "isCharacter": False,
62
+ "clientProcessingTime": 2000
63
+ }
64
+
65
+ # Add optional parameters if provided
66
+ if max_tokens is not None and max_tokens > 0:
67
+ payload["max_tokens"] = max_tokens
68
+
69
+ # Add any additional parameters
70
+ for key, value in kwargs.items():
71
+ if key not in payload:
72
+ payload[key] = value
73
+
74
+ request_id = f"chatcmpl-{uuid.uuid4()}"
75
+ created_time = int(time.time())
76
+
77
+ if stream:
78
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
79
+ else:
80
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
81
+
82
+ def _create_stream(
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
84
+ ) -> Generator[ChatCompletionChunk, None, None]:
85
+ try:
86
+ response = self._client.session.post(
87
+ self._client.api_endpoint,
88
+ json=payload,
89
+ stream=True,
90
+ timeout=timeout or self._client.timeout,
91
+ proxies=proxies or getattr(self._client, "proxies", None)
92
+ )
93
+
94
+ # Handle non-200 responses
95
+ if response.status_code != 200:
96
+ raise IOError(
97
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
98
+ )
99
+
100
+ # Track token usage across chunks
101
+ prompt_tokens = 0
102
+ completion_tokens = 0
103
+ total_tokens = 0
104
+
105
+ # Estimate prompt tokens based on message length
106
+ prompt_tokens = 0
107
+ for msg in payload.get("prompt", []):
108
+ prompt_tokens += count_tokens(msg.get("content", ""))
109
+ prompt_tokens += count_tokens(payload.get("systemPrompt", ""))
110
+
111
+ for line in response.iter_lines():
112
+ if not line:
113
+ continue
114
+
115
+ try:
116
+ # Decode bytes to string
117
+ line_data = line.decode('utf-8').strip()
118
+ if '"kind":"content"' in line_data:
119
+ data = json.loads(line_data)
120
+ if 'content' in data:
121
+ content = data['content']
122
+
123
+ # Format the content (replace escaped newlines)
124
+ content = self._client.format_text(content)
125
+
126
+ # Update token counts
127
+ completion_tokens += 1
128
+ total_tokens = prompt_tokens + completion_tokens
129
+
130
+ # Create the delta object
131
+ delta = ChoiceDelta(
132
+ content=content,
133
+ role="assistant",
134
+ tool_calls=None
135
+ )
136
+
137
+ # Create the choice object
138
+ choice = Choice(
139
+ index=0,
140
+ delta=delta,
141
+ finish_reason=None,
142
+ logprobs=None
143
+ )
144
+
145
+ # Create the chunk object
146
+ chunk = ChatCompletionChunk(
147
+ id=request_id,
148
+ choices=[choice],
149
+ created=created_time,
150
+ model=model,
151
+ system_fingerprint=None
152
+ )
153
+
154
+ # Convert chunk to dict using Pydantic's API
155
+ if hasattr(chunk, "model_dump"):
156
+ chunk_dict = chunk.model_dump(exclude_none=True)
157
+ else:
158
+ chunk_dict = chunk.dict(exclude_none=True)
159
+
160
+ # Add usage information to match OpenAI format
161
+ usage_dict = {
162
+ "prompt_tokens": prompt_tokens,
163
+ "completion_tokens": completion_tokens,
164
+ "total_tokens": total_tokens,
165
+ "estimated_cost": None
166
+ }
167
+
168
+ chunk_dict["usage"] = usage_dict
169
+
170
+ # Return the chunk object for internal processing
171
+ yield chunk
172
+ except json.JSONDecodeError:
173
+ continue
174
+ except UnicodeDecodeError:
175
+ continue
176
+
177
+ # Final chunk with finish_reason="stop"
178
+ delta = ChoiceDelta(
179
+ content=None,
180
+ role=None,
181
+ tool_calls=None
182
+ )
183
+
184
+ choice = Choice(
185
+ index=0,
186
+ delta=delta,
187
+ finish_reason="stop",
188
+ logprobs=None
189
+ )
190
+
191
+ chunk = ChatCompletionChunk(
192
+ id=request_id,
193
+ choices=[choice],
194
+ created=created_time,
195
+ model=model,
196
+ system_fingerprint=None
197
+ )
198
+
199
+ if hasattr(chunk, "model_dump"):
200
+ chunk_dict = chunk.model_dump(exclude_none=True)
201
+ else:
202
+ chunk_dict = chunk.dict(exclude_none=True)
203
+ chunk_dict["usage"] = {
204
+ "prompt_tokens": prompt_tokens,
205
+ "completion_tokens": completion_tokens,
206
+ "total_tokens": total_tokens,
207
+ "estimated_cost": None
208
+ }
209
+
210
+ yield chunk
211
+
212
+ except Exception as e:
213
+ print(f"Error during Venice stream request: {e}")
214
+ raise IOError(f"Venice request failed: {e}") from e
215
+
216
+ def _create_non_stream(
217
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
218
+ ) -> ChatCompletion:
219
+ try:
220
+ # For non-streaming, we still use streaming internally to collect the full response
221
+ response = self._client.session.post(
222
+ self._client.api_endpoint,
223
+ json=payload,
224
+ stream=True,
225
+ timeout=timeout or self._client.timeout,
226
+ proxies=proxies or getattr(self._client, "proxies", None)
227
+ )
228
+
229
+ # Handle non-200 responses
230
+ if response.status_code != 200:
231
+ raise IOError(
232
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
233
+ )
234
+
235
+ # Collect the full response
236
+ full_text = ""
237
+ for line in response.iter_lines():
238
+ if not line:
239
+ continue
240
+
241
+ try:
242
+ # Decode bytes to string
243
+ line_data = line.decode('utf-8').strip()
244
+ if '"kind":"content"' in line_data:
245
+ data = json.loads(line_data)
246
+ if 'content' in data:
247
+ content = data['content']
248
+ full_text += content
249
+ except json.JSONDecodeError:
250
+ continue
251
+ except UnicodeDecodeError:
252
+ continue
253
+
254
+ # Format the text (replace escaped newlines)
255
+ full_text = self._client.format_text(full_text)
256
+
257
+ # Estimate token counts
258
+ prompt_tokens = 0
259
+ for msg in payload.get("prompt", []):
260
+ prompt_tokens += count_tokens(msg.get("content", ""))
261
+ prompt_tokens += count_tokens(payload.get("systemPrompt", ""))
262
+ completion_tokens = count_tokens(full_text)
263
+ total_tokens = prompt_tokens + completion_tokens
264
+
265
+ # Create the message object
266
+ message = ChatCompletionMessage(
267
+ role="assistant",
268
+ content=full_text
269
+ )
270
+
271
+ # Create the choice object
272
+ choice = Choice(
273
+ index=0,
274
+ message=message,
275
+ finish_reason="stop"
276
+ )
277
+
278
+ # Create the usage object
279
+ usage = CompletionUsage(
280
+ prompt_tokens=prompt_tokens,
281
+ completion_tokens=completion_tokens,
282
+ total_tokens=total_tokens
283
+ )
284
+
285
+ # Create the completion object
286
+ completion = ChatCompletion(
287
+ id=request_id,
288
+ choices=[choice],
289
+ created=created_time,
290
+ model=model,
291
+ usage=usage,
292
+ )
293
+
294
+ return completion
295
+
296
+ except Exception as e:
297
+ print(f"Error during Venice non-stream request: {e}")
298
+ raise IOError(f"Venice request failed: {e}") from e
299
+
300
+ class Chat(BaseChat):
301
+ def __init__(self, client: 'Venice'):
302
+ self.completions = Completions(client)
303
+
304
+ class Venice(OpenAICompatibleProvider):
305
+ """
306
+ OpenAI-compatible client for Venice AI API.
307
+
308
+ Usage:
309
+ client = Venice()
310
+ response = client.chat.completions.create(
311
+ model="mistral-31-24b",
312
+ messages=[{"role": "user", "content": "Hello!"}]
313
+ )
314
+ """
315
+
316
+ AVAILABLE_MODELS = [
317
+ "mistral-31-24b",
318
+ "llama-3.2-3b-akash",
319
+ "qwen2dot5-coder-32b",
320
+ "deepseek-coder-v2-lite",
321
+ ]
322
+
323
+ # No model mapping needed as we use the model names directly
324
+
325
+ def __init__(
326
+ self,
327
+ timeout: Optional[int] = None,
328
+ browser: str = "chrome"
329
+ ):
330
+ """
331
+ Initialize the Venice client.
332
+
333
+ Args:
334
+ timeout: Request timeout in seconds (None for no timeout)
335
+ browser: Browser to emulate in user agent
336
+ """
337
+ self.timeout = timeout
338
+ self.temperature = 0.8 # Default temperature
339
+ self.top_p = 0.9 # Default top_p
340
+ self.system_prompt = "You are a helpful AI assistant." # Default system prompt
341
+ self.api_endpoint = "https://venice.ai/api/inference/chat"
342
+ self.session = requests.Session()
343
+
344
+ # Initialize LitAgent for user agent generation
345
+ agent = LitAgent()
346
+ self.fingerprint = agent.generate_fingerprint(browser)
347
+
348
+ # Headers for the request
349
+ self.headers = {
350
+ "User-Agent": self.fingerprint["user_agent"],
351
+ "accept": self.fingerprint["accept"],
352
+ "accept-language": self.fingerprint["accept_language"],
353
+ "content-type": "application/json",
354
+ "origin": "https://venice.ai",
355
+ "referer": "https://venice.ai/chat/",
356
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
357
+ "sec-ch-ua-mobile": "?0",
358
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
359
+ "sec-fetch-dest": "empty",
360
+ "sec-fetch-mode": "cors",
361
+ "sec-fetch-site": "same-origin"
362
+ }
363
+
364
+ self.session.headers.update(self.headers)
365
+
366
+ # Initialize the chat interface
367
+ self.chat = Chat(self)
368
+
369
+ def format_text(self, text: str) -> str:
370
+ """
371
+ Format text by replacing escaped newlines with actual newlines.
372
+
373
+ Args:
374
+ text: Text to format
375
+
376
+ Returns:
377
+ Formatted text
378
+ """
379
+ # Use a more comprehensive approach to handle all escape sequences
380
+ try:
381
+ # First handle double backslashes to avoid issues
382
+ text = text.replace('\\\\', '\\')
383
+
384
+ # Handle common escape sequences
385
+ text = text.replace('\\n', '\n')
386
+ text = text.replace('\\r', '\r')
387
+ text = text.replace('\\t', '\t')
388
+ text = text.replace('\\"', '"')
389
+ text = text.replace("\\'", "'")
390
+
391
+ # Handle any remaining escape sequences using JSON decoding
392
+ # This is a fallback in case there are other escape sequences
393
+ try:
394
+ # Add quotes to make it a valid JSON string
395
+ json_str = f'"{text}"'
396
+ # Use json module to decode all escape sequences
397
+ decoded = json.loads(json_str)
398
+ return decoded
399
+ except json.JSONDecodeError:
400
+ # If JSON decoding fails, return the text with the replacements we've already done
401
+ return text
402
+ except Exception as e:
403
+ # If any error occurs, return the original text
404
+ print(f"Warning: Error formatting text: {e}")
405
+ return text
406
+
407
+ def convert_model_name(self, model: str) -> str:
408
+ """
409
+ Convert model names to ones supported by Venice.
410
+
411
+ Args:
412
+ model: Model name to convert
413
+
414
+ Returns:
415
+ Venice model name
416
+ """
417
+ # If the model is already a valid Venice model, return it
418
+ if model in self.AVAILABLE_MODELS:
419
+ return model
420
+
421
+ # Default to the most capable model
422
+ print(f"Warning: Unknown model '{model}'. Using 'mistral-31-24b' instead.")
423
+ return "mistral-31-24b"
424
+
425
+ @property
426
+ def models(self):
427
+ class _ModelList:
428
+ def list(inner_self):
429
+ return type(self).AVAILABLE_MODELS
430
+ return _ModelList()
431
+
432
+ @classmethod
433
+ def models(cls):
434
+ """Return the list of available models for Venice."""
435
+ return cls.AVAILABLE_MODELS