webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,381 +1,387 @@
1
- import time
2
- import uuid
3
- import requests
4
- import re
5
- import json
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage
13
- )
14
-
15
- # Attempt to import LitAgent, fallback if not available
16
- try:
17
- from webscout.litagent import LitAgent
18
- except ImportError:
19
- # Define a dummy LitAgent if webscout is not installed or accessible
20
- class LitAgent:
21
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
- # Return minimal default headers if LitAgent is unavailable
23
- print("Warning: LitAgent not found. Using default minimal headers.")
24
- return {
25
- "accept": "*/*",
26
- "accept_language": "en-US,en;q=0.9",
27
- "platform": "Windows",
28
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
- "browser_type": browser,
31
- }
32
-
33
- # --- WiseCat Client ---
34
-
35
- class Completions(BaseCompletions):
36
- def __init__(self, client: 'WiseCat'):
37
- self._client = client
38
-
39
- def create(
40
- self,
41
- *,
42
- model: str,
43
- messages: List[Dict[str, str]],
44
- max_tokens: Optional[int] = 2049,
45
- stream: bool = False,
46
- temperature: Optional[float] = None,
47
- top_p: Optional[float] = None,
48
- **kwargs: Any
49
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
50
- """
51
- Creates a model response for the given chat conversation.
52
- Mimics openai.chat.completions.create
53
- """
54
- # Prepare the payload for WiseCat API
55
- payload = {
56
- "id": "ephemeral",
57
- "messages": messages,
58
- "selectedChatModel": self._client.convert_model_name(model)
59
- }
60
-
61
- # Add optional parameters if provided
62
- if max_tokens is not None and max_tokens > 0:
63
- payload["max_tokens"] = max_tokens
64
-
65
- if temperature is not None:
66
- payload["temperature"] = temperature
67
-
68
- if top_p is not None:
69
- payload["top_p"] = top_p
70
-
71
- # Add any additional parameters
72
- payload.update(kwargs)
73
-
74
- request_id = f"chatcmpl-{uuid.uuid4()}"
75
- created_time = int(time.time())
76
-
77
- if stream:
78
- return self._create_stream(request_id, created_time, model, payload)
79
- else:
80
- return self._create_non_stream(request_id, created_time, model, payload)
81
-
82
- def _create_stream(
83
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
84
- ) -> Generator[ChatCompletionChunk, None, None]:
85
- try:
86
- response = self._client.session.post(
87
- self._client.api_endpoint,
88
- headers=self._client.headers,
89
- json=payload,
90
- stream=True,
91
- timeout=self._client.timeout
92
- )
93
-
94
- # Handle non-200 responses
95
- if not response.ok:
96
- raise IOError(
97
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
98
- )
99
-
100
- # Track token usage across chunks
101
- prompt_tokens = 0
102
- completion_tokens = 0
103
- total_tokens = 0
104
-
105
- # Estimate prompt tokens based on message length
106
- for msg in payload.get("messages", []):
107
- prompt_tokens += len(msg.get("content", "").split())
108
-
109
- for line in response.iter_lines():
110
- if line:
111
- decoded_line = line.decode('utf-8').strip()
112
-
113
- # WiseCat uses a different format, so we need to extract the content
114
- match = re.search(r'0:"(.*?)"', decoded_line)
115
- if match:
116
- content = match.group(1)
117
-
118
- # Format the content (replace escaped newlines)
119
- content = self._client.format_text(content)
120
-
121
- # Update token counts
122
- completion_tokens += 1
123
- total_tokens = prompt_tokens + completion_tokens
124
-
125
- # Create the delta object
126
- delta = ChoiceDelta(
127
- content=content,
128
- role="assistant",
129
- tool_calls=None
130
- )
131
-
132
- # Create the choice object
133
- choice = Choice(
134
- index=0,
135
- delta=delta,
136
- finish_reason=None,
137
- logprobs=None
138
- )
139
-
140
- # Create the chunk object
141
- chunk = ChatCompletionChunk(
142
- id=request_id,
143
- choices=[choice],
144
- created=created_time,
145
- model=model,
146
- system_fingerprint=None
147
- )
148
-
149
- # Convert to dict for proper formatting
150
- chunk_dict = chunk.to_dict()
151
-
152
- # Add usage information to match OpenAI format
153
- usage_dict = {
154
- "prompt_tokens": prompt_tokens,
155
- "completion_tokens": completion_tokens,
156
- "total_tokens": total_tokens,
157
- "estimated_cost": None
158
- }
159
-
160
- chunk_dict["usage"] = usage_dict
161
-
162
- # Return the chunk object for internal processing
163
- yield chunk
164
-
165
- # Final chunk with finish_reason="stop"
166
- delta = ChoiceDelta(
167
- content=None,
168
- role=None,
169
- tool_calls=None
170
- )
171
-
172
- choice = Choice(
173
- index=0,
174
- delta=delta,
175
- finish_reason="stop",
176
- logprobs=None
177
- )
178
-
179
- chunk = ChatCompletionChunk(
180
- id=request_id,
181
- choices=[choice],
182
- created=created_time,
183
- model=model,
184
- system_fingerprint=None
185
- )
186
-
187
- chunk_dict = chunk.to_dict()
188
- chunk_dict["usage"] = {
189
- "prompt_tokens": prompt_tokens,
190
- "completion_tokens": completion_tokens,
191
- "total_tokens": total_tokens,
192
- "estimated_cost": None
193
- }
194
-
195
- yield chunk
196
-
197
- except Exception as e:
198
- print(f"Error during WiseCat stream request: {e}")
199
- raise IOError(f"WiseCat request failed: {e}") from e
200
-
201
- def _create_non_stream(
202
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
203
- ) -> ChatCompletion:
204
- try:
205
- # For non-streaming, we still use streaming internally to collect the full response
206
- response = self._client.session.post(
207
- self._client.api_endpoint,
208
- headers=self._client.headers,
209
- json=payload,
210
- stream=True,
211
- timeout=self._client.timeout
212
- )
213
-
214
- # Handle non-200 responses
215
- if not response.ok:
216
- raise IOError(
217
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
218
- )
219
-
220
- # Collect the full response
221
- full_text = ""
222
- for line in response.iter_lines(decode_unicode=True):
223
- if line:
224
- match = re.search(r'0:"(.*?)"', line)
225
- if match:
226
- content = match.group(1)
227
- full_text += content
228
-
229
- # Format the text (replace escaped newlines)
230
- full_text = self._client.format_text(full_text)
231
-
232
- # Estimate token counts
233
- prompt_tokens = 0
234
- for msg in payload.get("messages", []):
235
- prompt_tokens += len(msg.get("content", "").split())
236
-
237
- completion_tokens = len(full_text.split())
238
- total_tokens = prompt_tokens + completion_tokens
239
-
240
- # Create the message object
241
- message = ChatCompletionMessage(
242
- role="assistant",
243
- content=full_text
244
- )
245
-
246
- # Create the choice object
247
- choice = Choice(
248
- index=0,
249
- message=message,
250
- finish_reason="stop"
251
- )
252
-
253
- # Create the usage object
254
- usage = CompletionUsage(
255
- prompt_tokens=prompt_tokens,
256
- completion_tokens=completion_tokens,
257
- total_tokens=total_tokens
258
- )
259
-
260
- # Create the completion object
261
- completion = ChatCompletion(
262
- id=request_id,
263
- choices=[choice],
264
- created=created_time,
265
- model=model,
266
- usage=usage,
267
- )
268
-
269
- return completion
270
-
271
- except Exception as e:
272
- print(f"Error during WiseCat non-stream request: {e}")
273
- raise IOError(f"WiseCat request failed: {e}") from e
274
-
275
- class Chat(BaseChat):
276
- def __init__(self, client: 'WiseCat'):
277
- self.completions = Completions(client)
278
-
279
- class WiseCat(OpenAICompatibleProvider):
280
- """
281
- OpenAI-compatible client for WiseCat API.
282
-
283
- Usage:
284
- client = WiseCat()
285
- response = client.chat.completions.create(
286
- model="chat-model-large",
287
- messages=[{"role": "user", "content": "Hello!"}]
288
- )
289
- """
290
-
291
- _base_models = ["chat-model-small", "chat-model-large", "chat-model-reasoning"]
292
- # Create AVAILABLE_MODELS as a list with the format "WiseCat/model"
293
- AVAILABLE_MODELS = [f"WiseCat/{model}" for model in _base_models]
294
- # Create a mapping dictionary for internal use
295
- _model_mapping = {model: f"WiseCat/{model}" for model in _base_models}
296
-
297
- def __init__(
298
- self,
299
- timeout: Optional[int] = None,
300
- browser: str = "chrome"
301
- ):
302
- """
303
- Initialize the WiseCat client.
304
-
305
- Args:
306
- timeout: Request timeout in seconds (None for no timeout)
307
- browser: Browser to emulate in user agent
308
- """
309
- self.timeout = timeout
310
- self.api_endpoint = "https://wise-cat-groq.vercel.app/api/chat"
311
- self.session = requests.Session()
312
-
313
- # Initialize LitAgent for user agent generation
314
- agent = LitAgent()
315
- self.fingerprint = agent.generate_fingerprint(browser)
316
-
317
- # Use the fingerprint for headers
318
- self.headers = self.fingerprint
319
-
320
- self.session.headers.update(self.headers)
321
-
322
- # Initialize the chat interface
323
- self.chat = Chat(self)
324
-
325
- def format_text(self, text: str) -> str:
326
- """
327
- Format text by replacing escaped newlines with actual newlines.
328
-
329
- Args:
330
- text: Text to format
331
-
332
- Returns:
333
- Formatted text
334
- """
335
- # Use a more comprehensive approach to handle all escape sequences
336
- try:
337
- # First handle double backslashes to avoid issues
338
- text = text.replace('\\\\', '\\')
339
-
340
- # Handle common escape sequences
341
- text = text.replace('\\n', '\n')
342
- text = text.replace('\\r', '\r')
343
- text = text.replace('\\t', '\t')
344
- text = text.replace('\\"', '"')
345
- text = text.replace("\\'", "'")
346
-
347
- # Handle any remaining escape sequences using JSON decoding
348
- # This is a fallback in case there are other escape sequences
349
- try:
350
- # Add quotes to make it a valid JSON string
351
- json_str = f'"{text}"'
352
- # Use json module to decode all escape sequences
353
- decoded = json.loads(json_str)
354
- return decoded
355
- except json.JSONDecodeError:
356
- # If JSON decoding fails, return the text with the replacements we've already done
357
- return text
358
- except Exception as e:
359
- # If any error occurs, return the original text
360
- print(f"Warning: Error formatting text: {e}")
361
- return text
362
-
363
- def convert_model_name(self, model: str) -> str:
364
- """
365
- Convert model names to ones supported by WiseCat. Accepts both 'WiseCat/model' and raw model names.
366
- """
367
- if model.startswith("WiseCat/"):
368
- model_raw = model.replace("WiseCat/", "", 1)
369
- else:
370
- model_raw = model
371
- if f"WiseCat/{model_raw}" in self.AVAILABLE_MODELS:
372
- return model_raw
373
- print(f"Warning: Unknown model '{model}'. Using 'chat-model-large' instead.")
374
- return "chat-model-large"
375
-
376
- @property
377
- def models(self):
378
- class _ModelList:
379
- def list(inner_self):
380
- return WiseCat.AVAILABLE_MODELS
381
- return _ModelList()
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import re
5
+ import json
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ # Define a dummy LitAgent if webscout is not installed or accessible
20
+ class LitAgent:
21
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
22
+ # Return minimal default headers if LitAgent is unavailable
23
+ print("Warning: LitAgent not found. Using default minimal headers.")
24
+ return {
25
+ "accept": "*/*",
26
+ "accept_language": "en-US,en;q=0.9",
27
+ "platform": "Windows",
28
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
29
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
30
+ "browser_type": browser,
31
+ }
32
+
33
+ # --- WiseCat Client ---
34
+
35
+ class Completions(BaseCompletions):
36
+ def __init__(self, client: 'WiseCat'):
37
+ self._client = client
38
+
39
+ def create(
40
+ self,
41
+ *,
42
+ model: str,
43
+ messages: List[Dict[str, str]],
44
+ max_tokens: Optional[int] = 2049,
45
+ stream: bool = False,
46
+ temperature: Optional[float] = None,
47
+ top_p: Optional[float] = None,
48
+ **kwargs: Any
49
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
50
+ """
51
+ Creates a model response for the given chat conversation.
52
+ Mimics openai.chat.completions.create
53
+ """
54
+ # Prepare the payload for WiseCat API
55
+ payload = {
56
+ "id": "ephemeral",
57
+ "messages": messages,
58
+ "selectedChatModel": self._client.convert_model_name(model)
59
+ }
60
+
61
+ # Add optional parameters if provided
62
+ if max_tokens is not None and max_tokens > 0:
63
+ payload["max_tokens"] = max_tokens
64
+
65
+ if temperature is not None:
66
+ payload["temperature"] = temperature
67
+
68
+ if top_p is not None:
69
+ payload["top_p"] = top_p
70
+
71
+ # Add any additional parameters
72
+ payload.update(kwargs)
73
+
74
+ request_id = f"chatcmpl-{uuid.uuid4()}"
75
+ created_time = int(time.time())
76
+
77
+ if stream:
78
+ return self._create_stream(request_id, created_time, model, payload)
79
+ else:
80
+ return self._create_non_stream(request_id, created_time, model, payload)
81
+
82
+ def _create_stream(
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
84
+ ) -> Generator[ChatCompletionChunk, None, None]:
85
+ try:
86
+ response = self._client.session.post(
87
+ self._client.api_endpoint,
88
+ headers=self._client.headers,
89
+ json=payload,
90
+ stream=True,
91
+ timeout=self._client.timeout
92
+ )
93
+
94
+ # Handle non-200 responses
95
+ if not response.ok:
96
+ raise IOError(
97
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
98
+ )
99
+
100
+ # Track token usage across chunks
101
+ prompt_tokens = 0
102
+ completion_tokens = 0
103
+ total_tokens = 0
104
+
105
+ # Estimate prompt tokens based on message length
106
+ for msg in payload.get("messages", []):
107
+ prompt_tokens += count_tokens(msg.get("content", ""))
108
+
109
+ for line in response.iter_lines():
110
+ if line:
111
+ decoded_line = line.decode('utf-8').strip()
112
+
113
+ # WiseCat uses a different format, so we need to extract the content
114
+ match = re.search(r'0:"(.*?)"', decoded_line)
115
+ if match:
116
+ content = match.group(1)
117
+
118
+ # Format the content (replace escaped newlines)
119
+ content = self._client.format_text(content)
120
+
121
+ # Update token counts
122
+ completion_tokens += 1
123
+ total_tokens = prompt_tokens + completion_tokens
124
+
125
+ # Create the delta object
126
+ delta = ChoiceDelta(
127
+ content=content,
128
+ role="assistant",
129
+ tool_calls=None
130
+ )
131
+
132
+ # Create the choice object
133
+ choice = Choice(
134
+ index=0,
135
+ delta=delta,
136
+ finish_reason=None,
137
+ logprobs=None
138
+ )
139
+
140
+ # Create the chunk object
141
+ chunk = ChatCompletionChunk(
142
+ id=request_id,
143
+ choices=[choice],
144
+ created=created_time,
145
+ model=model,
146
+ system_fingerprint=None
147
+ )
148
+
149
+ # Convert chunk to dict using Pydantic's API
150
+ if hasattr(chunk, "model_dump"):
151
+ chunk_dict = chunk.model_dump(exclude_none=True)
152
+ else:
153
+ chunk_dict = chunk.dict(exclude_none=True)
154
+
155
+ # Add usage information to match OpenAI format
156
+ usage_dict = {
157
+ "prompt_tokens": prompt_tokens,
158
+ "completion_tokens": completion_tokens,
159
+ "total_tokens": total_tokens,
160
+ "estimated_cost": None
161
+ }
162
+
163
+ chunk_dict["usage"] = usage_dict
164
+
165
+ # Return the chunk object for internal processing
166
+ yield chunk
167
+
168
+ # Final chunk with finish_reason="stop"
169
+ delta = ChoiceDelta(
170
+ content=None,
171
+ role=None,
172
+ tool_calls=None
173
+ )
174
+
175
+ choice = Choice(
176
+ index=0,
177
+ delta=delta,
178
+ finish_reason="stop",
179
+ logprobs=None
180
+ )
181
+
182
+ chunk = ChatCompletionChunk(
183
+ id=request_id,
184
+ choices=[choice],
185
+ created=created_time,
186
+ model=model,
187
+ system_fingerprint=None
188
+ )
189
+
190
+ if hasattr(chunk, "model_dump"):
191
+ chunk_dict = chunk.model_dump(exclude_none=True)
192
+ else:
193
+ chunk_dict = chunk.dict(exclude_none=True)
194
+ chunk_dict["usage"] = {
195
+ "prompt_tokens": prompt_tokens,
196
+ "completion_tokens": completion_tokens,
197
+ "total_tokens": total_tokens,
198
+ "estimated_cost": None
199
+ }
200
+
201
+ yield chunk
202
+
203
+ except Exception as e:
204
+ print(f"Error during WiseCat stream request: {e}")
205
+ raise IOError(f"WiseCat request failed: {e}") from e
206
+
207
+ def _create_non_stream(
208
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
209
+ ) -> ChatCompletion:
210
+ try:
211
+ # For non-streaming, we still use streaming internally to collect the full response
212
+ response = self._client.session.post(
213
+ self._client.api_endpoint,
214
+ headers=self._client.headers,
215
+ json=payload,
216
+ stream=True,
217
+ timeout=self._client.timeout
218
+ )
219
+
220
+ # Handle non-200 responses
221
+ if not response.ok:
222
+ raise IOError(
223
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
224
+ )
225
+
226
+ # Collect the full response
227
+ full_text = ""
228
+ for line in response.iter_lines(decode_unicode=True):
229
+ if line:
230
+ match = re.search(r'0:"(.*?)"', line)
231
+ if match:
232
+ content = match.group(1)
233
+ full_text += content
234
+
235
+ # Format the text (replace escaped newlines)
236
+ full_text = self._client.format_text(full_text)
237
+
238
+ # Estimate token counts
239
+ prompt_tokens = 0
240
+ for msg in payload.get("messages", []):
241
+ prompt_tokens += count_tokens(msg.get("content", ""))
242
+
243
+ completion_tokens = count_tokens(full_text)
244
+ total_tokens = prompt_tokens + completion_tokens
245
+
246
+ # Create the message object
247
+ message = ChatCompletionMessage(
248
+ role="assistant",
249
+ content=full_text
250
+ )
251
+
252
+ # Create the choice object
253
+ choice = Choice(
254
+ index=0,
255
+ message=message,
256
+ finish_reason="stop"
257
+ )
258
+
259
+ # Create the usage object
260
+ usage = CompletionUsage(
261
+ prompt_tokens=prompt_tokens,
262
+ completion_tokens=completion_tokens,
263
+ total_tokens=total_tokens
264
+ )
265
+
266
+ # Create the completion object
267
+ completion = ChatCompletion(
268
+ id=request_id,
269
+ choices=[choice],
270
+ created=created_time,
271
+ model=model,
272
+ usage=usage,
273
+ )
274
+
275
+ return completion
276
+
277
+ except Exception as e:
278
+ print(f"Error during WiseCat non-stream request: {e}")
279
+ raise IOError(f"WiseCat request failed: {e}") from e
280
+
281
+ class Chat(BaseChat):
282
+ def __init__(self, client: 'WiseCat'):
283
+ self.completions = Completions(client)
284
+
285
+ class WiseCat(OpenAICompatibleProvider):
286
+ """
287
+ OpenAI-compatible client for WiseCat API.
288
+
289
+ Usage:
290
+ client = WiseCat()
291
+ response = client.chat.completions.create(
292
+ model="chat-model-large",
293
+ messages=[{"role": "user", "content": "Hello!"}]
294
+ )
295
+ """
296
+
297
+ _base_models = ["chat-model-small", "chat-model-large", "chat-model-reasoning"]
298
+ # Create AVAILABLE_MODELS as a list with the format "WiseCat/model"
299
+ AVAILABLE_MODELS = [f"WiseCat/{model}" for model in _base_models]
300
+ # Create a mapping dictionary for internal use
301
+ _model_mapping = {model: f"WiseCat/{model}" for model in _base_models}
302
+
303
+ def __init__(
304
+ self,
305
+ timeout: Optional[int] = None,
306
+ browser: str = "chrome"
307
+ ):
308
+ """
309
+ Initialize the WiseCat client.
310
+
311
+ Args:
312
+ timeout: Request timeout in seconds (None for no timeout)
313
+ browser: Browser to emulate in user agent
314
+ """
315
+ self.timeout = timeout
316
+ self.api_endpoint = "https://wise-cat-groq.vercel.app/api/chat"
317
+ self.session = requests.Session()
318
+
319
+ # Initialize LitAgent for user agent generation
320
+ agent = LitAgent()
321
+ self.fingerprint = agent.generate_fingerprint(browser)
322
+
323
+ # Use the fingerprint for headers
324
+ self.headers = self.fingerprint
325
+
326
+ self.session.headers.update(self.headers)
327
+
328
+ # Initialize the chat interface
329
+ self.chat = Chat(self)
330
+
331
+ def format_text(self, text: str) -> str:
332
+ """
333
+ Format text by replacing escaped newlines with actual newlines.
334
+
335
+ Args:
336
+ text: Text to format
337
+
338
+ Returns:
339
+ Formatted text
340
+ """
341
+ # Use a more comprehensive approach to handle all escape sequences
342
+ try:
343
+ # First handle double backslashes to avoid issues
344
+ text = text.replace('\\\\', '\\')
345
+
346
+ # Handle common escape sequences
347
+ text = text.replace('\\n', '\n')
348
+ text = text.replace('\\r', '\r')
349
+ text = text.replace('\\t', '\t')
350
+ text = text.replace('\\"', '"')
351
+ text = text.replace("\\'", "'")
352
+
353
+ # Handle any remaining escape sequences using JSON decoding
354
+ # This is a fallback in case there are other escape sequences
355
+ try:
356
+ # Add quotes to make it a valid JSON string
357
+ json_str = f'"{text}"'
358
+ # Use json module to decode all escape sequences
359
+ decoded = json.loads(json_str)
360
+ return decoded
361
+ except json.JSONDecodeError:
362
+ # If JSON decoding fails, return the text with the replacements we've already done
363
+ return text
364
+ except Exception as e:
365
+ # If any error occurs, return the original text
366
+ print(f"Warning: Error formatting text: {e}")
367
+ return text
368
+
369
+ def convert_model_name(self, model: str) -> str:
370
+ """
371
+ Convert model names to ones supported by WiseCat. Accepts both 'WiseCat/model' and raw model names.
372
+ """
373
+ if model.startswith("WiseCat/"):
374
+ model_raw = model.replace("WiseCat/", "", 1)
375
+ else:
376
+ model_raw = model
377
+ if f"WiseCat/{model_raw}" in self.AVAILABLE_MODELS:
378
+ return model_raw
379
+ print(f"Warning: Unknown model '{model}'. Using 'chat-model-large' instead.")
380
+ return "chat-model-large"
381
+
382
+ @property
383
+ def models(self):
384
+ class _ModelList:
385
+ def list(inner_self):
386
+ return WiseCat.AVAILABLE_MODELS
387
+ return _ModelList()