webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,411 +1,420 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- import re
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage
13
- )
14
-
15
- # Attempt to import LitAgent, fallback if not available
16
- try:
17
- from webscout.litagent import LitAgent
18
- except ImportError:
19
- pass
20
-
21
- # --- ExaAI Client ---
22
-
23
- # ANSI escape codes for formatting
24
- BOLD = "\033[1m"
25
- RED = "\033[91m"
26
- RESET = "\033[0m"
27
-
28
- class Completions(BaseCompletions):
29
- def __init__(self, client: 'ExaAI'):
30
- self._client = client
31
-
32
- def create(
33
- self,
34
- *,
35
- model: str,
36
- messages: List[Dict[str, str]],
37
- max_tokens: Optional[int] = None,
38
- stream: bool = False,
39
- temperature: Optional[float] = None,
40
- top_p: Optional[float] = None,
41
- **kwargs: Any
42
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
- """
44
- Creates a model response for the given chat conversation.
45
- Mimics openai.chat.completions.create
46
- """
47
- # Filter out system messages and warn the user if any are present
48
- filtered_messages = []
49
- has_system_message = False
50
-
51
- for msg in messages:
52
- if msg["role"] == "system":
53
- has_system_message = True
54
- continue # Skip system messages
55
- filtered_messages.append(msg)
56
-
57
- if has_system_message:
58
- # Print warning in bold red
59
- print(f"{BOLD}{RED}Warning: ExaAI does not support system messages, they will be ignored.{RESET}")
60
-
61
- # If no messages left after filtering, raise an error
62
- if not filtered_messages:
63
- raise ValueError("At least one user message is required")
64
-
65
- # Generate a unique ID for the conversation
66
- conversation_id = uuid.uuid4().hex[:16]
67
-
68
- # Prepare the payload for ExaAI API
69
- payload = {
70
- "id": conversation_id,
71
- "messages": filtered_messages
72
- }
73
-
74
- # Add optional parameters if provided
75
- if max_tokens is not None and max_tokens > 0:
76
- payload["max_tokens"] = max_tokens
77
-
78
- if temperature is not None:
79
- payload["temperature"] = temperature
80
-
81
- if top_p is not None:
82
- payload["top_p"] = top_p
83
-
84
- # Add any additional parameters
85
- for key, value in kwargs.items():
86
- if key not in payload:
87
- payload[key] = value
88
-
89
- request_id = f"chatcmpl-{uuid.uuid4()}"
90
- created_time = int(time.time())
91
-
92
- if stream:
93
- return self._create_stream(request_id, created_time, model, payload)
94
- else:
95
- return self._create_non_stream(request_id, created_time, model, payload)
96
-
97
- def _create_stream(
98
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
99
- ) -> Generator[ChatCompletionChunk, None, None]:
100
- try:
101
- response = self._client.session.post(
102
- self._client.api_endpoint,
103
- headers=self._client.headers,
104
- json=payload,
105
- stream=True,
106
- timeout=self._client.timeout
107
- )
108
-
109
- # Handle non-200 responses
110
- if not response.ok:
111
- raise IOError(
112
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
- )
114
-
115
- # Track token usage across chunks
116
- prompt_tokens = 0
117
- completion_tokens = 0
118
- total_tokens = 0
119
-
120
- # Estimate prompt tokens based on message length
121
- for msg in payload.get("messages", []):
122
- prompt_tokens += len(msg.get("content", "").split())
123
-
124
- for line in response.iter_lines(decode_unicode=True):
125
- if line:
126
- match = re.search(r'0:"(.*?)"', line)
127
- if match:
128
- content = match.group(1)
129
-
130
- # Format the content (replace escaped newlines)
131
- content = self._client.format_text(content)
132
-
133
- # Update token counts
134
- completion_tokens += 1
135
- total_tokens = prompt_tokens + completion_tokens
136
-
137
- # Create the delta object
138
- delta = ChoiceDelta(
139
- content=content,
140
- role="assistant",
141
- tool_calls=None
142
- )
143
-
144
- # Create the choice object
145
- choice = Choice(
146
- index=0,
147
- delta=delta,
148
- finish_reason=None,
149
- logprobs=None
150
- )
151
-
152
- # Create the chunk object
153
- chunk = ChatCompletionChunk(
154
- id=request_id,
155
- choices=[choice],
156
- created=created_time,
157
- model=model,
158
- system_fingerprint=None
159
- )
160
-
161
- # Convert to dict for proper formatting
162
- chunk_dict = chunk.to_dict()
163
-
164
- # Add usage information to match OpenAI format
165
- usage_dict = {
166
- "prompt_tokens": prompt_tokens,
167
- "completion_tokens": completion_tokens,
168
- "total_tokens": total_tokens,
169
- "estimated_cost": None
170
- }
171
-
172
- chunk_dict["usage"] = usage_dict
173
-
174
- # Return the chunk object for internal processing
175
- yield chunk
176
-
177
- # Final chunk with finish_reason="stop"
178
- delta = ChoiceDelta(
179
- content=None,
180
- role=None,
181
- tool_calls=None
182
- )
183
-
184
- choice = Choice(
185
- index=0,
186
- delta=delta,
187
- finish_reason="stop",
188
- logprobs=None
189
- )
190
-
191
- chunk = ChatCompletionChunk(
192
- id=request_id,
193
- choices=[choice],
194
- created=created_time,
195
- model=model,
196
- system_fingerprint=None
197
- )
198
-
199
- chunk_dict = chunk.to_dict()
200
- chunk_dict["usage"] = {
201
- "prompt_tokens": prompt_tokens,
202
- "completion_tokens": completion_tokens,
203
- "total_tokens": total_tokens,
204
- "estimated_cost": None
205
- }
206
-
207
- yield chunk
208
-
209
- except Exception as e:
210
- print(f"Error during ExaAI stream request: {e}")
211
- raise IOError(f"ExaAI request failed: {e}") from e
212
-
213
- def _create_non_stream(
214
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
215
- ) -> ChatCompletion:
216
- try:
217
- # For non-streaming, we still use streaming internally to collect the full response
218
- response = self._client.session.post(
219
- self._client.api_endpoint,
220
- headers=self._client.headers,
221
- json=payload,
222
- stream=True,
223
- timeout=self._client.timeout
224
- )
225
-
226
- # Handle non-200 responses
227
- if not response.ok:
228
- raise IOError(
229
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
230
- )
231
-
232
- # Collect the full response
233
- full_text = ""
234
- for line in response.iter_lines(decode_unicode=True):
235
- if line:
236
- match = re.search(r'0:"(.*?)"', line)
237
- if match:
238
- content = match.group(1)
239
- full_text += content
240
-
241
- # Format the text (replace escaped newlines)
242
- full_text = self._client.format_text(full_text)
243
-
244
- # Estimate token counts
245
- prompt_tokens = 0
246
- for msg in payload.get("messages", []):
247
- prompt_tokens += len(msg.get("content", "").split())
248
-
249
- completion_tokens = len(full_text.split())
250
- total_tokens = prompt_tokens + completion_tokens
251
-
252
- # Create the message object
253
- message = ChatCompletionMessage(
254
- role="assistant",
255
- content=full_text
256
- )
257
-
258
- # Create the choice object
259
- choice = Choice(
260
- index=0,
261
- message=message,
262
- finish_reason="stop"
263
- )
264
-
265
- # Create the usage object
266
- usage = CompletionUsage(
267
- prompt_tokens=prompt_tokens,
268
- completion_tokens=completion_tokens,
269
- total_tokens=total_tokens
270
- )
271
-
272
- # Create the completion object
273
- completion = ChatCompletion(
274
- id=request_id,
275
- choices=[choice],
276
- created=created_time,
277
- model=model,
278
- usage=usage,
279
- )
280
-
281
- return completion
282
-
283
- except Exception as e:
284
- print(f"Error during ExaAI non-stream request: {e}")
285
- raise IOError(f"ExaAI request failed: {e}") from e
286
-
287
- class Chat(BaseChat):
288
- def __init__(self, client: 'ExaAI'):
289
- self.completions = Completions(client)
290
-
291
- class ExaAI(OpenAICompatibleProvider):
292
- """
293
- OpenAI-compatible client for ExaAI API.
294
-
295
- Usage:
296
- client = ExaAI()
297
- response = client.chat.completions.create(
298
- model="O3-Mini",
299
- messages=[{"role": "user", "content": "Hello!"}]
300
- )
301
-
302
- Note:
303
- ExaAI does not support system messages. Any system messages will be ignored.
304
- """
305
-
306
- AVAILABLE_MODELS = ["O3-Mini"]
307
-
308
- def __init__(
309
- self,
310
- timeout: Optional[int] = None,
311
- browser: str = "chrome"
312
- ):
313
- """
314
- Initialize the ExaAI client.
315
-
316
- Args:
317
- timeout: Request timeout in seconds (None for no timeout)
318
- browser: Browser to emulate in user agent
319
- """
320
- self.timeout = timeout
321
- self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
322
- self.session = requests.Session()
323
-
324
- # Initialize LitAgent for user agent generation
325
- agent = LitAgent()
326
- self.fingerprint = agent.generate_fingerprint(browser)
327
-
328
- # Headers for the request
329
- self.headers = {
330
- "authority": "o3minichat.exa.ai",
331
- "accept": self.fingerprint["accept"],
332
- "accept-encoding": "gzip, deflate, br, zstd",
333
- "accept-language": self.fingerprint["accept_language"],
334
- "content-type": "application/json",
335
- "dnt": "1",
336
- "origin": "https://o3minichat.exa.ai",
337
- "priority": "u=1, i",
338
- "referer": "https://o3minichat.exa.ai/",
339
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
340
- "sec-ch-ua-mobile": "?0",
341
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
342
- "sec-fetch-dest": "empty",
343
- "sec-fetch-mode": "cors",
344
- "sec-fetch-site": "same-origin",
345
- "sec-gpc": "1",
346
- "user-agent": self.fingerprint["user_agent"]
347
- }
348
-
349
- self.session.headers.update(self.headers)
350
-
351
- # Initialize the chat interface
352
- self.chat = Chat(self)
353
-
354
- def format_text(self, text: str) -> str:
355
- """
356
- Format text by replacing escaped newlines with actual newlines.
357
-
358
- Args:
359
- text: Text to format
360
-
361
- Returns:
362
- Formatted text
363
- """
364
- # Use a more comprehensive approach to handle all escape sequences
365
- try:
366
- # First handle double backslashes to avoid issues
367
- text = text.replace('\\\\', '\\')
368
-
369
- # Handle common escape sequences
370
- text = text.replace('\\n', '\n')
371
- text = text.replace('\\r', '\r')
372
- text = text.replace('\\t', '\t')
373
- text = text.replace('\\"', '"')
374
- text = text.replace("\\'", "'")
375
-
376
- # Handle any remaining escape sequences using JSON decoding
377
- # This is a fallback in case there are other escape sequences
378
- try:
379
- # Add quotes to make it a valid JSON string
380
- json_str = f'"{text}"'
381
- # Use json module to decode all escape sequences
382
- decoded = json.loads(json_str)
383
- return decoded
384
- except json.JSONDecodeError:
385
- # If JSON decoding fails, return the text with the replacements we've already done
386
- return text
387
- except Exception as e:
388
- # If any error occurs, return the original text
389
- print(f"Warning: Error formatting text: {e}")
390
- return text
391
-
392
- def convert_model_name(self, model: str) -> str:
393
- """
394
- Convert model names to ones supported by ExaAI.
395
-
396
- Args:
397
- model: Model name to convert (ignored as ExaAI only supports O3-Mini)
398
-
399
- Returns:
400
- ExaAI model name
401
- """
402
- # ExaAI only supports O3-Mini, regardless of the input model
403
- print(f"Note: ExaAI only supports O3-Mini model. Ignoring provided model '{model}'.")
404
- return "O3-Mini"
405
-
406
- @property
407
- def models(self):
408
- class _ModelList:
409
- def list(inner_self):
410
- return type(self).AVAILABLE_MODELS
411
- return _ModelList()
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ pass
20
+
21
+ # --- ExaAI Client ---
22
+
23
+ # ANSI escape codes for formatting
24
+ BOLD = "\033[1m"
25
+ RED = "\033[91m"
26
+ RESET = "\033[0m"
27
+
28
+ class Completions(BaseCompletions):
29
+ def __init__(self, client: 'ExaAI'):
30
+ self._client = client
31
+
32
+ def create(
33
+ self,
34
+ *,
35
+ model: str,
36
+ messages: List[Dict[str, str]],
37
+ max_tokens: Optional[int] = None,
38
+ stream: bool = False,
39
+ temperature: Optional[float] = None,
40
+ top_p: Optional[float] = None,
41
+ timeout: Optional[int] = None,
42
+ proxies: Optional[Dict[str, str]] = None,
43
+ **kwargs: Any
44
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
45
+ """
46
+ Creates a model response for the given chat conversation.
47
+ Mimics openai.chat.completions.create
48
+ """
49
+ # Filter out system messages and warn the user if any are present
50
+ filtered_messages = []
51
+ has_system_message = False
52
+
53
+ for msg in messages:
54
+ if msg["role"] == "system":
55
+ has_system_message = True
56
+ continue # Skip system messages
57
+ filtered_messages.append(msg)
58
+
59
+ if has_system_message:
60
+ # Print warning in bold red
61
+ print(f"{BOLD}{RED}Warning: ExaAI does not support system messages, they will be ignored.{RESET}")
62
+
63
+ # If no messages left after filtering, raise an error
64
+ if not filtered_messages:
65
+ raise ValueError("At least one user message is required")
66
+
67
+ # Generate a unique ID for the conversation
68
+ conversation_id = uuid.uuid4().hex[:16]
69
+
70
+ # Prepare the payload for ExaAI API
71
+ payload = {
72
+ "id": conversation_id,
73
+ "messages": filtered_messages
74
+ }
75
+
76
+ # Add optional parameters if provided
77
+ if max_tokens is not None and max_tokens > 0:
78
+ payload["max_tokens"] = max_tokens
79
+
80
+ if temperature is not None:
81
+ payload["temperature"] = temperature
82
+
83
+ if top_p is not None:
84
+ payload["top_p"] = top_p
85
+
86
+ # Add any additional parameters
87
+ for key, value in kwargs.items():
88
+ if key not in payload:
89
+ payload[key] = value
90
+
91
+ request_id = f"chatcmpl-{uuid.uuid4()}"
92
+ created_time = int(time.time())
93
+
94
+ if stream:
95
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
96
+ else:
97
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
98
+
99
+ def _create_stream(
100
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
101
+ ) -> Generator[ChatCompletionChunk, None, None]:
102
+ try:
103
+ response = self._client.session.post(
104
+ self._client.api_endpoint,
105
+ headers=self._client.headers,
106
+ json=payload,
107
+ stream=True,
108
+ timeout=timeout or self._client.timeout,
109
+ proxies=proxies or getattr(self._client, "proxies", None)
110
+ )
111
+
112
+ # Handle non-200 responses
113
+ if not response.ok:
114
+ raise IOError(
115
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
116
+ )
117
+
118
+ # Track token usage across chunks
119
+ prompt_tokens = 0
120
+ completion_tokens = 0
121
+ total_tokens = 0
122
+
123
+ # Estimate prompt tokens based on message length
124
+ for msg in payload.get("messages", []):
125
+ prompt_tokens += count_tokens(msg.get("content", ""))
126
+
127
+ for line in response.iter_lines(decode_unicode=True):
128
+ if line:
129
+ match = re.search(r'0:"(.*?)"', line)
130
+ if match:
131
+ content = match.group(1)
132
+
133
+ # Format the content (replace escaped newlines)
134
+ content = self._client.format_text(content)
135
+
136
+ # Update token counts
137
+ completion_tokens += count_tokens(content)
138
+ total_tokens = prompt_tokens + completion_tokens
139
+
140
+ # Create the delta object
141
+ delta = ChoiceDelta(
142
+ content=content,
143
+ role="assistant",
144
+ tool_calls=None
145
+ )
146
+
147
+ # Create the choice object
148
+ choice = Choice(
149
+ index=0,
150
+ delta=delta,
151
+ finish_reason=None,
152
+ logprobs=None
153
+ )
154
+
155
+ # Create the chunk object
156
+ chunk = ChatCompletionChunk(
157
+ id=request_id,
158
+ choices=[choice],
159
+ created=created_time,
160
+ model=model,
161
+ system_fingerprint=None
162
+ )
163
+
164
+ # Convert chunk to dict using Pydantic's API
165
+ if hasattr(chunk, "model_dump"):
166
+ chunk_dict = chunk.model_dump(exclude_none=True)
167
+ else:
168
+ chunk_dict = chunk.dict(exclude_none=True)
169
+
170
+ # Add usage information to match OpenAI format
171
+ usage_dict = {
172
+ "prompt_tokens": prompt_tokens,
173
+ "completion_tokens": completion_tokens,
174
+ "total_tokens": total_tokens,
175
+ "estimated_cost": None
176
+ }
177
+
178
+ chunk_dict["usage"] = usage_dict
179
+
180
+ # Return the chunk object for internal processing
181
+ yield chunk
182
+
183
+ # Final chunk with finish_reason="stop"
184
+ delta = ChoiceDelta(
185
+ content=None,
186
+ role=None,
187
+ tool_calls=None
188
+ )
189
+
190
+ choice = Choice(
191
+ index=0,
192
+ delta=delta,
193
+ finish_reason="stop",
194
+ logprobs=None
195
+ )
196
+
197
+ chunk = ChatCompletionChunk(
198
+ id=request_id,
199
+ choices=[choice],
200
+ created=created_time,
201
+ model=model,
202
+ system_fingerprint=None
203
+ )
204
+
205
+ if hasattr(chunk, "model_dump"):
206
+ chunk_dict = chunk.model_dump(exclude_none=True)
207
+ else:
208
+ chunk_dict = chunk.dict(exclude_none=True)
209
+ chunk_dict["usage"] = {
210
+ "prompt_tokens": prompt_tokens,
211
+ "completion_tokens": completion_tokens,
212
+ "total_tokens": total_tokens,
213
+ "estimated_cost": None
214
+ }
215
+
216
+ yield chunk
217
+
218
+ except Exception as e:
219
+ print(f"Error during ExaAI stream request: {e}")
220
+ raise IOError(f"ExaAI request failed: {e}") from e
221
+
222
+ def _create_non_stream(
223
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
224
+ ) -> ChatCompletion:
225
+ try:
226
+ # For non-streaming, we still use streaming internally to collect the full response
227
+ response = self._client.session.post(
228
+ self._client.api_endpoint,
229
+ headers=self._client.headers,
230
+ json=payload,
231
+ stream=True,
232
+ timeout=timeout or self._client.timeout,
233
+ proxies=proxies or getattr(self._client, "proxies", None)
234
+ )
235
+
236
+ # Handle non-200 responses
237
+ if not response.ok:
238
+ raise IOError(
239
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
240
+ )
241
+
242
+ # Collect the full response
243
+ full_text = ""
244
+ for line in response.iter_lines(decode_unicode=True):
245
+ if line:
246
+ match = re.search(r'0:"(.*?)"', line)
247
+ if match:
248
+ content = match.group(1)
249
+ full_text += content
250
+
251
+ # Format the text (replace escaped newlines)
252
+ full_text = self._client.format_text(full_text)
253
+
254
+ # Estimate token counts
255
+ prompt_tokens = 0
256
+ for msg in payload.get("messages", []):
257
+ prompt_tokens += count_tokens(msg.get("content", ""))
258
+
259
+ completion_tokens = count_tokens(full_text)
260
+ total_tokens = prompt_tokens + completion_tokens
261
+
262
+ # Create the message object
263
+ message = ChatCompletionMessage(
264
+ role="assistant",
265
+ content=full_text
266
+ )
267
+
268
+ # Create the choice object
269
+ choice = Choice(
270
+ index=0,
271
+ message=message,
272
+ finish_reason="stop"
273
+ )
274
+
275
+ # Create the usage object
276
+ usage = CompletionUsage(
277
+ prompt_tokens=prompt_tokens,
278
+ completion_tokens=completion_tokens,
279
+ total_tokens=total_tokens
280
+ )
281
+
282
+ # Create the completion object
283
+ completion = ChatCompletion(
284
+ id=request_id,
285
+ choices=[choice],
286
+ created=created_time,
287
+ model=model,
288
+ usage=usage,
289
+ )
290
+
291
+ return completion
292
+
293
+ except Exception as e:
294
+ print(f"Error during ExaAI non-stream request: {e}")
295
+ raise IOError(f"ExaAI request failed: {e}") from e
296
+
297
+ class Chat(BaseChat):
298
+ def __init__(self, client: 'ExaAI'):
299
+ self.completions = Completions(client)
300
+
301
+ class ExaAI(OpenAICompatibleProvider):
302
+ """
303
+ OpenAI-compatible client for ExaAI API.
304
+
305
+ Usage:
306
+ client = ExaAI()
307
+ response = client.chat.completions.create(
308
+ model="O3-Mini",
309
+ messages=[{"role": "user", "content": "Hello!"}]
310
+ )
311
+
312
+ Note:
313
+ ExaAI does not support system messages. Any system messages will be ignored.
314
+ """
315
+
316
+ AVAILABLE_MODELS = ["O3-Mini"]
317
+
318
+ def __init__(
319
+ self,
320
+ browser: str = "chrome"
321
+ ):
322
+ """
323
+ Initialize the ExaAI client.
324
+
325
+ Args:
326
+ browser: Browser to emulate in user agent
327
+ """
328
+ self.timeout = 60 # Default timeout in seconds
329
+ self.proxies = None # Default proxies
330
+ self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
331
+ self.session = requests.Session()
332
+
333
+ # Initialize LitAgent for user agent generation
334
+ agent = LitAgent()
335
+ self.fingerprint = agent.generate_fingerprint(browser)
336
+
337
+ # Headers for the request
338
+ self.headers = {
339
+ "authority": "o3minichat.exa.ai",
340
+ "accept": self.fingerprint["accept"],
341
+ "accept-encoding": "gzip, deflate, br, zstd",
342
+ "accept-language": self.fingerprint["accept_language"],
343
+ "content-type": "application/json",
344
+ "dnt": "1",
345
+ "origin": "https://o3minichat.exa.ai",
346
+ "priority": "u=1, i",
347
+ "referer": "https://o3minichat.exa.ai/",
348
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
349
+ "sec-ch-ua-mobile": "?0",
350
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
351
+ "sec-fetch-dest": "empty",
352
+ "sec-fetch-mode": "cors",
353
+ "sec-fetch-site": "same-origin",
354
+ "sec-gpc": "1",
355
+ "user-agent": self.fingerprint["user_agent"]
356
+ }
357
+
358
+ self.session.headers.update(self.headers)
359
+
360
+ # Initialize the chat interface
361
+ self.chat = Chat(self)
362
+
363
+ def format_text(self, text: str) -> str:
364
+ """
365
+ Format text by replacing escaped newlines with actual newlines.
366
+
367
+ Args:
368
+ text: Text to format
369
+
370
+ Returns:
371
+ Formatted text
372
+ """
373
+ # Use a more comprehensive approach to handle all escape sequences
374
+ try:
375
+ # First handle double backslashes to avoid issues
376
+ text = text.replace('\\\\', '\\')
377
+
378
+ # Handle common escape sequences
379
+ text = text.replace('\\n', '\n')
380
+ text = text.replace('\\r', '\r')
381
+ text = text.replace('\\t', '\t')
382
+ text = text.replace('\\"', '"')
383
+ text = text.replace("\\'", "'")
384
+
385
+ # Handle any remaining escape sequences using JSON decoding
386
+ # This is a fallback in case there are other escape sequences
387
+ try:
388
+ # Add quotes to make it a valid JSON string
389
+ json_str = f'"{text}"'
390
+ # Use json module to decode all escape sequences
391
+ decoded = json.loads(json_str)
392
+ return decoded
393
+ except json.JSONDecodeError:
394
+ # If JSON decoding fails, return the text with the replacements we've already done
395
+ return text
396
+ except Exception as e:
397
+ # If any error occurs, return the original text
398
+ print(f"Warning: Error formatting text: {e}")
399
+ return text
400
+
401
+ def convert_model_name(self, model: str) -> str:
402
+ """
403
+ Convert model names to ones supported by ExaAI.
404
+
405
+ Args:
406
+ model: Model name to convert (ignored as ExaAI only supports O3-Mini)
407
+
408
+ Returns:
409
+ ExaAI model name
410
+ """
411
+ # ExaAI only supports O3-Mini, regardless of the input model
412
+ print(f"Note: ExaAI only supports O3-Mini model. Ignoring provided model '{model}'.")
413
+ return "O3-Mini"
414
+
415
+ @property
416
+ def models(self):
417
+ class _ModelList:
418
+ def list(inner_self):
419
+ return type(self).AVAILABLE_MODELS
420
+ return _ModelList()