webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,425 +1,431 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- print("Warning: LitAgent not found. Some functionality may be limited.")
19
-
20
- # --- Venice Client ---
21
-
22
- class Completions(BaseCompletions):
23
- def __init__(self, client: 'Venice'):
24
- self._client = client
25
-
26
- def create(
27
- self,
28
- *,
29
- model: str,
30
- messages: List[Dict[str, str]],
31
- max_tokens: Optional[int] = 2049,
32
- stream: bool = False,
33
- temperature: Optional[float] = 0.8,
34
- top_p: Optional[float] = 0.9,
35
- **kwargs: Any
36
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
- """
38
- Creates a model response for the given chat conversation.
39
- Mimics openai.chat.completions.create
40
- """
41
- # Extract system message if present for systemPrompt parameter
42
- system_prompt = self._client.system_prompt
43
- for msg in messages:
44
- if msg["role"] == "system":
45
- system_prompt = msg["content"]
46
- break
47
-
48
- # Prepare the payload for Venice API
49
- payload = {
50
- "requestId": str(uuid.uuid4())[:7],
51
- "modelId": self._client.convert_model_name(model),
52
- "prompt": messages,
53
- "systemPrompt": system_prompt,
54
- "conversationType": "text",
55
- "temperature": temperature if temperature is not None else self._client.temperature,
56
- "webEnabled": True,
57
- "topP": top_p if top_p is not None else self._client.top_p,
58
- "includeVeniceSystemPrompt": False,
59
- "isCharacter": False,
60
- "clientProcessingTime": 2000
61
- }
62
-
63
- # Add optional parameters if provided
64
- if max_tokens is not None and max_tokens > 0:
65
- payload["max_tokens"] = max_tokens
66
-
67
- # Add any additional parameters
68
- for key, value in kwargs.items():
69
- if key not in payload:
70
- payload[key] = value
71
-
72
- request_id = f"chatcmpl-{uuid.uuid4()}"
73
- created_time = int(time.time())
74
-
75
- if stream:
76
- return self._create_stream(request_id, created_time, model, payload)
77
- else:
78
- return self._create_non_stream(request_id, created_time, model, payload)
79
-
80
- def _create_stream(
81
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
82
- ) -> Generator[ChatCompletionChunk, None, None]:
83
- try:
84
- response = self._client.session.post(
85
- self._client.api_endpoint,
86
- json=payload,
87
- stream=True,
88
- timeout=self._client.timeout
89
- )
90
-
91
- # Handle non-200 responses
92
- if response.status_code != 200:
93
- raise IOError(
94
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
95
- )
96
-
97
- # Track token usage across chunks
98
- prompt_tokens = 0
99
- completion_tokens = 0
100
- total_tokens = 0
101
-
102
- # Estimate prompt tokens based on message length
103
- prompt_tokens = 0
104
- for msg in payload.get("prompt", []):
105
- prompt_tokens += len(msg.get("content", "").split())
106
- prompt_tokens += len(payload.get("systemPrompt", "").split())
107
-
108
- for line in response.iter_lines():
109
- if not line:
110
- continue
111
-
112
- try:
113
- # Decode bytes to string
114
- line_data = line.decode('utf-8').strip()
115
- if '"kind":"content"' in line_data:
116
- data = json.loads(line_data)
117
- if 'content' in data:
118
- content = data['content']
119
-
120
- # Format the content (replace escaped newlines)
121
- content = self._client.format_text(content)
122
-
123
- # Update token counts
124
- completion_tokens += 1
125
- total_tokens = prompt_tokens + completion_tokens
126
-
127
- # Create the delta object
128
- delta = ChoiceDelta(
129
- content=content,
130
- role="assistant",
131
- tool_calls=None
132
- )
133
-
134
- # Create the choice object
135
- choice = Choice(
136
- index=0,
137
- delta=delta,
138
- finish_reason=None,
139
- logprobs=None
140
- )
141
-
142
- # Create the chunk object
143
- chunk = ChatCompletionChunk(
144
- id=request_id,
145
- choices=[choice],
146
- created=created_time,
147
- model=model,
148
- system_fingerprint=None
149
- )
150
-
151
- # Convert to dict for proper formatting
152
- chunk_dict = chunk.to_dict()
153
-
154
- # Add usage information to match OpenAI format
155
- usage_dict = {
156
- "prompt_tokens": prompt_tokens,
157
- "completion_tokens": completion_tokens,
158
- "total_tokens": total_tokens,
159
- "estimated_cost": None
160
- }
161
-
162
- chunk_dict["usage"] = usage_dict
163
-
164
- # Return the chunk object for internal processing
165
- yield chunk
166
- except json.JSONDecodeError:
167
- continue
168
- except UnicodeDecodeError:
169
- continue
170
-
171
- # Final chunk with finish_reason="stop"
172
- delta = ChoiceDelta(
173
- content=None,
174
- role=None,
175
- tool_calls=None
176
- )
177
-
178
- choice = Choice(
179
- index=0,
180
- delta=delta,
181
- finish_reason="stop",
182
- logprobs=None
183
- )
184
-
185
- chunk = ChatCompletionChunk(
186
- id=request_id,
187
- choices=[choice],
188
- created=created_time,
189
- model=model,
190
- system_fingerprint=None
191
- )
192
-
193
- chunk_dict = chunk.to_dict()
194
- chunk_dict["usage"] = {
195
- "prompt_tokens": prompt_tokens,
196
- "completion_tokens": completion_tokens,
197
- "total_tokens": total_tokens,
198
- "estimated_cost": None
199
- }
200
-
201
- yield chunk
202
-
203
- except Exception as e:
204
- print(f"Error during Venice stream request: {e}")
205
- raise IOError(f"Venice request failed: {e}") from e
206
-
207
- def _create_non_stream(
208
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
209
- ) -> ChatCompletion:
210
- try:
211
- # For non-streaming, we still use streaming internally to collect the full response
212
- response = self._client.session.post(
213
- self._client.api_endpoint,
214
- json=payload,
215
- stream=True,
216
- timeout=self._client.timeout
217
- )
218
-
219
- # Handle non-200 responses
220
- if response.status_code != 200:
221
- raise IOError(
222
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
223
- )
224
-
225
- # Collect the full response
226
- full_text = ""
227
- for line in response.iter_lines():
228
- if not line:
229
- continue
230
-
231
- try:
232
- # Decode bytes to string
233
- line_data = line.decode('utf-8').strip()
234
- if '"kind":"content"' in line_data:
235
- data = json.loads(line_data)
236
- if 'content' in data:
237
- content = data['content']
238
- full_text += content
239
- except json.JSONDecodeError:
240
- continue
241
- except UnicodeDecodeError:
242
- continue
243
-
244
- # Format the text (replace escaped newlines)
245
- full_text = self._client.format_text(full_text)
246
-
247
- # Estimate token counts
248
- prompt_tokens = 0
249
- for msg in payload.get("prompt", []):
250
- prompt_tokens += len(msg.get("content", "").split())
251
- prompt_tokens += len(payload.get("systemPrompt", "").split())
252
- completion_tokens = len(full_text.split())
253
- total_tokens = prompt_tokens + completion_tokens
254
-
255
- # Create the message object
256
- message = ChatCompletionMessage(
257
- role="assistant",
258
- content=full_text
259
- )
260
-
261
- # Create the choice object
262
- choice = Choice(
263
- index=0,
264
- message=message,
265
- finish_reason="stop"
266
- )
267
-
268
- # Create the usage object
269
- usage = CompletionUsage(
270
- prompt_tokens=prompt_tokens,
271
- completion_tokens=completion_tokens,
272
- total_tokens=total_tokens
273
- )
274
-
275
- # Create the completion object
276
- completion = ChatCompletion(
277
- id=request_id,
278
- choices=[choice],
279
- created=created_time,
280
- model=model,
281
- usage=usage,
282
- )
283
-
284
- return completion
285
-
286
- except Exception as e:
287
- print(f"Error during Venice non-stream request: {e}")
288
- raise IOError(f"Venice request failed: {e}") from e
289
-
290
- class Chat(BaseChat):
291
- def __init__(self, client: 'Venice'):
292
- self.completions = Completions(client)
293
-
294
- class Venice(OpenAICompatibleProvider):
295
- """
296
- OpenAI-compatible client for Venice AI API.
297
-
298
- Usage:
299
- client = Venice()
300
- response = client.chat.completions.create(
301
- model="mistral-31-24b",
302
- messages=[{"role": "user", "content": "Hello!"}]
303
- )
304
- """
305
-
306
- AVAILABLE_MODELS = [
307
- "mistral-31-24b",
308
- "llama-3.2-3b-akash",
309
- "qwen2dot5-coder-32b",
310
- "deepseek-coder-v2-lite",
311
- ]
312
-
313
- # No model mapping needed as we use the model names directly
314
-
315
- def __init__(
316
- self,
317
- timeout: Optional[int] = None,
318
- browser: str = "chrome"
319
- ):
320
- """
321
- Initialize the Venice client.
322
-
323
- Args:
324
- timeout: Request timeout in seconds (None for no timeout)
325
- browser: Browser to emulate in user agent
326
- """
327
- self.timeout = timeout
328
- self.temperature = 0.8 # Default temperature
329
- self.top_p = 0.9 # Default top_p
330
- self.system_prompt = "You are a helpful AI assistant." # Default system prompt
331
- self.api_endpoint = "https://venice.ai/api/inference/chat"
332
- self.session = requests.Session()
333
-
334
- # Initialize LitAgent for user agent generation
335
- agent = LitAgent()
336
- self.fingerprint = agent.generate_fingerprint(browser)
337
-
338
- # Headers for the request
339
- self.headers = {
340
- "User-Agent": self.fingerprint["user_agent"],
341
- "accept": self.fingerprint["accept"],
342
- "accept-language": self.fingerprint["accept_language"],
343
- "content-type": "application/json",
344
- "origin": "https://venice.ai",
345
- "referer": "https://venice.ai/chat/",
346
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
347
- "sec-ch-ua-mobile": "?0",
348
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
349
- "sec-fetch-dest": "empty",
350
- "sec-fetch-mode": "cors",
351
- "sec-fetch-site": "same-origin"
352
- }
353
-
354
- self.session.headers.update(self.headers)
355
-
356
- # Initialize the chat interface
357
- self.chat = Chat(self)
358
-
359
- def format_text(self, text: str) -> str:
360
- """
361
- Format text by replacing escaped newlines with actual newlines.
362
-
363
- Args:
364
- text: Text to format
365
-
366
- Returns:
367
- Formatted text
368
- """
369
- # Use a more comprehensive approach to handle all escape sequences
370
- try:
371
- # First handle double backslashes to avoid issues
372
- text = text.replace('\\\\', '\\')
373
-
374
- # Handle common escape sequences
375
- text = text.replace('\\n', '\n')
376
- text = text.replace('\\r', '\r')
377
- text = text.replace('\\t', '\t')
378
- text = text.replace('\\"', '"')
379
- text = text.replace("\\'", "'")
380
-
381
- # Handle any remaining escape sequences using JSON decoding
382
- # This is a fallback in case there are other escape sequences
383
- try:
384
- # Add quotes to make it a valid JSON string
385
- json_str = f'"{text}"'
386
- # Use json module to decode all escape sequences
387
- decoded = json.loads(json_str)
388
- return decoded
389
- except json.JSONDecodeError:
390
- # If JSON decoding fails, return the text with the replacements we've already done
391
- return text
392
- except Exception as e:
393
- # If any error occurs, return the original text
394
- print(f"Warning: Error formatting text: {e}")
395
- return text
396
-
397
- def convert_model_name(self, model: str) -> str:
398
- """
399
- Convert model names to ones supported by Venice.
400
-
401
- Args:
402
- model: Model name to convert
403
-
404
- Returns:
405
- Venice model name
406
- """
407
- # If the model is already a valid Venice model, return it
408
- if model in self.AVAILABLE_MODELS:
409
- return model
410
-
411
- # Default to the most capable model
412
- print(f"Warning: Unknown model '{model}'. Using 'mistral-31-24b' instead.")
413
- return "mistral-31-24b"
414
-
415
- @property
416
- def models(self):
417
- class _ModelList:
418
- def list(inner_self):
419
- return type(self).AVAILABLE_MODELS
420
- return _ModelList()
421
-
422
- @classmethod
423
- def models(cls):
424
- """Return the list of available models for Venice."""
425
- return cls.AVAILABLE_MODELS
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ print("Warning: LitAgent not found. Some functionality may be limited.")
19
+
20
+ # --- Venice Client ---
21
+
22
+ class Completions(BaseCompletions):
23
+ def __init__(self, client: 'Venice'):
24
+ self._client = client
25
+
26
+ def create(
27
+ self,
28
+ *,
29
+ model: str,
30
+ messages: List[Dict[str, str]],
31
+ max_tokens: Optional[int] = 2049,
32
+ stream: bool = False,
33
+ temperature: Optional[float] = 0.8,
34
+ top_p: Optional[float] = 0.9,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Creates a model response for the given chat conversation.
39
+ Mimics openai.chat.completions.create
40
+ """
41
+ # Extract system message if present for systemPrompt parameter
42
+ system_prompt = self._client.system_prompt
43
+ for msg in messages:
44
+ if msg["role"] == "system":
45
+ system_prompt = msg["content"]
46
+ break
47
+
48
+ # Prepare the payload for Venice API
49
+ payload = {
50
+ "requestId": str(uuid.uuid4())[:7],
51
+ "modelId": self._client.convert_model_name(model),
52
+ "prompt": messages,
53
+ "systemPrompt": system_prompt,
54
+ "conversationType": "text",
55
+ "temperature": temperature if temperature is not None else self._client.temperature,
56
+ "webEnabled": True,
57
+ "topP": top_p if top_p is not None else self._client.top_p,
58
+ "includeVeniceSystemPrompt": False,
59
+ "isCharacter": False,
60
+ "clientProcessingTime": 2000
61
+ }
62
+
63
+ # Add optional parameters if provided
64
+ if max_tokens is not None and max_tokens > 0:
65
+ payload["max_tokens"] = max_tokens
66
+
67
+ # Add any additional parameters
68
+ for key, value in kwargs.items():
69
+ if key not in payload:
70
+ payload[key] = value
71
+
72
+ request_id = f"chatcmpl-{uuid.uuid4()}"
73
+ created_time = int(time.time())
74
+
75
+ if stream:
76
+ return self._create_stream(request_id, created_time, model, payload)
77
+ else:
78
+ return self._create_non_stream(request_id, created_time, model, payload)
79
+
80
+ def _create_stream(
81
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
82
+ ) -> Generator[ChatCompletionChunk, None, None]:
83
+ try:
84
+ response = self._client.session.post(
85
+ self._client.api_endpoint,
86
+ json=payload,
87
+ stream=True,
88
+ timeout=self._client.timeout
89
+ )
90
+
91
+ # Handle non-200 responses
92
+ if response.status_code != 200:
93
+ raise IOError(
94
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
95
+ )
96
+
97
+ # Track token usage across chunks
98
+ prompt_tokens = 0
99
+ completion_tokens = 0
100
+ total_tokens = 0
101
+
102
+ # Estimate prompt tokens based on message length
103
+ prompt_tokens = 0
104
+ for msg in payload.get("prompt", []):
105
+ prompt_tokens += count_tokens(msg.get("content", ""))
106
+ prompt_tokens += count_tokens(payload.get("systemPrompt", ""))
107
+
108
+ for line in response.iter_lines():
109
+ if not line:
110
+ continue
111
+
112
+ try:
113
+ # Decode bytes to string
114
+ line_data = line.decode('utf-8').strip()
115
+ if '"kind":"content"' in line_data:
116
+ data = json.loads(line_data)
117
+ if 'content' in data:
118
+ content = data['content']
119
+
120
+ # Format the content (replace escaped newlines)
121
+ content = self._client.format_text(content)
122
+
123
+ # Update token counts
124
+ completion_tokens += 1
125
+ total_tokens = prompt_tokens + completion_tokens
126
+
127
+ # Create the delta object
128
+ delta = ChoiceDelta(
129
+ content=content,
130
+ role="assistant",
131
+ tool_calls=None
132
+ )
133
+
134
+ # Create the choice object
135
+ choice = Choice(
136
+ index=0,
137
+ delta=delta,
138
+ finish_reason=None,
139
+ logprobs=None
140
+ )
141
+
142
+ # Create the chunk object
143
+ chunk = ChatCompletionChunk(
144
+ id=request_id,
145
+ choices=[choice],
146
+ created=created_time,
147
+ model=model,
148
+ system_fingerprint=None
149
+ )
150
+
151
+ # Convert chunk to dict using Pydantic's API
152
+ if hasattr(chunk, "model_dump"):
153
+ chunk_dict = chunk.model_dump(exclude_none=True)
154
+ else:
155
+ chunk_dict = chunk.dict(exclude_none=True)
156
+
157
+ # Add usage information to match OpenAI format
158
+ usage_dict = {
159
+ "prompt_tokens": prompt_tokens,
160
+ "completion_tokens": completion_tokens,
161
+ "total_tokens": total_tokens,
162
+ "estimated_cost": None
163
+ }
164
+
165
+ chunk_dict["usage"] = usage_dict
166
+
167
+ # Return the chunk object for internal processing
168
+ yield chunk
169
+ except json.JSONDecodeError:
170
+ continue
171
+ except UnicodeDecodeError:
172
+ continue
173
+
174
+ # Final chunk with finish_reason="stop"
175
+ delta = ChoiceDelta(
176
+ content=None,
177
+ role=None,
178
+ tool_calls=None
179
+ )
180
+
181
+ choice = Choice(
182
+ index=0,
183
+ delta=delta,
184
+ finish_reason="stop",
185
+ logprobs=None
186
+ )
187
+
188
+ chunk = ChatCompletionChunk(
189
+ id=request_id,
190
+ choices=[choice],
191
+ created=created_time,
192
+ model=model,
193
+ system_fingerprint=None
194
+ )
195
+
196
+ if hasattr(chunk, "model_dump"):
197
+ chunk_dict = chunk.model_dump(exclude_none=True)
198
+ else:
199
+ chunk_dict = chunk.dict(exclude_none=True)
200
+ chunk_dict["usage"] = {
201
+ "prompt_tokens": prompt_tokens,
202
+ "completion_tokens": completion_tokens,
203
+ "total_tokens": total_tokens,
204
+ "estimated_cost": None
205
+ }
206
+
207
+ yield chunk
208
+
209
+ except Exception as e:
210
+ print(f"Error during Venice stream request: {e}")
211
+ raise IOError(f"Venice request failed: {e}") from e
212
+
213
+ def _create_non_stream(
214
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
215
+ ) -> ChatCompletion:
216
+ try:
217
+ # For non-streaming, we still use streaming internally to collect the full response
218
+ response = self._client.session.post(
219
+ self._client.api_endpoint,
220
+ json=payload,
221
+ stream=True,
222
+ timeout=self._client.timeout
223
+ )
224
+
225
+ # Handle non-200 responses
226
+ if response.status_code != 200:
227
+ raise IOError(
228
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
229
+ )
230
+
231
+ # Collect the full response
232
+ full_text = ""
233
+ for line in response.iter_lines():
234
+ if not line:
235
+ continue
236
+
237
+ try:
238
+ # Decode bytes to string
239
+ line_data = line.decode('utf-8').strip()
240
+ if '"kind":"content"' in line_data:
241
+ data = json.loads(line_data)
242
+ if 'content' in data:
243
+ content = data['content']
244
+ full_text += content
245
+ except json.JSONDecodeError:
246
+ continue
247
+ except UnicodeDecodeError:
248
+ continue
249
+
250
+ # Format the text (replace escaped newlines)
251
+ full_text = self._client.format_text(full_text)
252
+
253
+ # Estimate token counts
254
+ prompt_tokens = 0
255
+ for msg in payload.get("prompt", []):
256
+ prompt_tokens += count_tokens(msg.get("content", ""))
257
+ prompt_tokens += count_tokens(payload.get("systemPrompt", ""))
258
+ completion_tokens = count_tokens(full_text)
259
+ total_tokens = prompt_tokens + completion_tokens
260
+
261
+ # Create the message object
262
+ message = ChatCompletionMessage(
263
+ role="assistant",
264
+ content=full_text
265
+ )
266
+
267
+ # Create the choice object
268
+ choice = Choice(
269
+ index=0,
270
+ message=message,
271
+ finish_reason="stop"
272
+ )
273
+
274
+ # Create the usage object
275
+ usage = CompletionUsage(
276
+ prompt_tokens=prompt_tokens,
277
+ completion_tokens=completion_tokens,
278
+ total_tokens=total_tokens
279
+ )
280
+
281
+ # Create the completion object
282
+ completion = ChatCompletion(
283
+ id=request_id,
284
+ choices=[choice],
285
+ created=created_time,
286
+ model=model,
287
+ usage=usage,
288
+ )
289
+
290
+ return completion
291
+
292
+ except Exception as e:
293
+ print(f"Error during Venice non-stream request: {e}")
294
+ raise IOError(f"Venice request failed: {e}") from e
295
+
296
+ class Chat(BaseChat):
297
+ def __init__(self, client: 'Venice'):
298
+ self.completions = Completions(client)
299
+
300
+ class Venice(OpenAICompatibleProvider):
301
+ """
302
+ OpenAI-compatible client for Venice AI API.
303
+
304
+ Usage:
305
+ client = Venice()
306
+ response = client.chat.completions.create(
307
+ model="mistral-31-24b",
308
+ messages=[{"role": "user", "content": "Hello!"}]
309
+ )
310
+ """
311
+
312
+ AVAILABLE_MODELS = [
313
+ "mistral-31-24b",
314
+ "llama-3.2-3b-akash",
315
+ "qwen2dot5-coder-32b",
316
+ "deepseek-coder-v2-lite",
317
+ ]
318
+
319
+ # No model mapping needed as we use the model names directly
320
+
321
+ def __init__(
322
+ self,
323
+ timeout: Optional[int] = None,
324
+ browser: str = "chrome"
325
+ ):
326
+ """
327
+ Initialize the Venice client.
328
+
329
+ Args:
330
+ timeout: Request timeout in seconds (None for no timeout)
331
+ browser: Browser to emulate in user agent
332
+ """
333
+ self.timeout = timeout
334
+ self.temperature = 0.8 # Default temperature
335
+ self.top_p = 0.9 # Default top_p
336
+ self.system_prompt = "You are a helpful AI assistant." # Default system prompt
337
+ self.api_endpoint = "https://venice.ai/api/inference/chat"
338
+ self.session = requests.Session()
339
+
340
+ # Initialize LitAgent for user agent generation
341
+ agent = LitAgent()
342
+ self.fingerprint = agent.generate_fingerprint(browser)
343
+
344
+ # Headers for the request
345
+ self.headers = {
346
+ "User-Agent": self.fingerprint["user_agent"],
347
+ "accept": self.fingerprint["accept"],
348
+ "accept-language": self.fingerprint["accept_language"],
349
+ "content-type": "application/json",
350
+ "origin": "https://venice.ai",
351
+ "referer": "https://venice.ai/chat/",
352
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
353
+ "sec-ch-ua-mobile": "?0",
354
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
355
+ "sec-fetch-dest": "empty",
356
+ "sec-fetch-mode": "cors",
357
+ "sec-fetch-site": "same-origin"
358
+ }
359
+
360
+ self.session.headers.update(self.headers)
361
+
362
+ # Initialize the chat interface
363
+ self.chat = Chat(self)
364
+
365
+ def format_text(self, text: str) -> str:
366
+ """
367
+ Format text by replacing escaped newlines with actual newlines.
368
+
369
+ Args:
370
+ text: Text to format
371
+
372
+ Returns:
373
+ Formatted text
374
+ """
375
+ # Use a more comprehensive approach to handle all escape sequences
376
+ try:
377
+ # First handle double backslashes to avoid issues
378
+ text = text.replace('\\\\', '\\')
379
+
380
+ # Handle common escape sequences
381
+ text = text.replace('\\n', '\n')
382
+ text = text.replace('\\r', '\r')
383
+ text = text.replace('\\t', '\t')
384
+ text = text.replace('\\"', '"')
385
+ text = text.replace("\\'", "'")
386
+
387
+ # Handle any remaining escape sequences using JSON decoding
388
+ # This is a fallback in case there are other escape sequences
389
+ try:
390
+ # Add quotes to make it a valid JSON string
391
+ json_str = f'"{text}"'
392
+ # Use json module to decode all escape sequences
393
+ decoded = json.loads(json_str)
394
+ return decoded
395
+ except json.JSONDecodeError:
396
+ # If JSON decoding fails, return the text with the replacements we've already done
397
+ return text
398
+ except Exception as e:
399
+ # If any error occurs, return the original text
400
+ print(f"Warning: Error formatting text: {e}")
401
+ return text
402
+
403
+ def convert_model_name(self, model: str) -> str:
404
+ """
405
+ Convert model names to ones supported by Venice.
406
+
407
+ Args:
408
+ model: Model name to convert
409
+
410
+ Returns:
411
+ Venice model name
412
+ """
413
+ # If the model is already a valid Venice model, return it
414
+ if model in self.AVAILABLE_MODELS:
415
+ return model
416
+
417
+ # Default to the most capable model
418
+ print(f"Warning: Unknown model '{model}'. Using 'mistral-31-24b' instead.")
419
+ return "mistral-31-24b"
420
+
421
+ @property
422
+ def models(self):
423
+ class _ModelList:
424
+ def list(inner_self):
425
+ return type(self).AVAILABLE_MODELS
426
+ return _ModelList()
427
+
428
+ @classmethod
429
+ def models(cls):
430
+ """Return the list of available models for Venice."""
431
+ return cls.AVAILABLE_MODELS