webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,411 +1,417 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- import re
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage
13
- )
14
-
15
- # Attempt to import LitAgent, fallback if not available
16
- try:
17
- from webscout.litagent import LitAgent
18
- except ImportError:
19
- pass
20
-
21
- # --- ExaAI Client ---
22
-
23
- # ANSI escape codes for formatting
24
- BOLD = "\033[1m"
25
- RED = "\033[91m"
26
- RESET = "\033[0m"
27
-
28
- class Completions(BaseCompletions):
29
- def __init__(self, client: 'ExaAI'):
30
- self._client = client
31
-
32
- def create(
33
- self,
34
- *,
35
- model: str,
36
- messages: List[Dict[str, str]],
37
- max_tokens: Optional[int] = None,
38
- stream: bool = False,
39
- temperature: Optional[float] = None,
40
- top_p: Optional[float] = None,
41
- **kwargs: Any
42
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
- """
44
- Creates a model response for the given chat conversation.
45
- Mimics openai.chat.completions.create
46
- """
47
- # Filter out system messages and warn the user if any are present
48
- filtered_messages = []
49
- has_system_message = False
50
-
51
- for msg in messages:
52
- if msg["role"] == "system":
53
- has_system_message = True
54
- continue # Skip system messages
55
- filtered_messages.append(msg)
56
-
57
- if has_system_message:
58
- # Print warning in bold red
59
- print(f"{BOLD}{RED}Warning: ExaAI does not support system messages, they will be ignored.{RESET}")
60
-
61
- # If no messages left after filtering, raise an error
62
- if not filtered_messages:
63
- raise ValueError("At least one user message is required")
64
-
65
- # Generate a unique ID for the conversation
66
- conversation_id = uuid.uuid4().hex[:16]
67
-
68
- # Prepare the payload for ExaAI API
69
- payload = {
70
- "id": conversation_id,
71
- "messages": filtered_messages
72
- }
73
-
74
- # Add optional parameters if provided
75
- if max_tokens is not None and max_tokens > 0:
76
- payload["max_tokens"] = max_tokens
77
-
78
- if temperature is not None:
79
- payload["temperature"] = temperature
80
-
81
- if top_p is not None:
82
- payload["top_p"] = top_p
83
-
84
- # Add any additional parameters
85
- for key, value in kwargs.items():
86
- if key not in payload:
87
- payload[key] = value
88
-
89
- request_id = f"chatcmpl-{uuid.uuid4()}"
90
- created_time = int(time.time())
91
-
92
- if stream:
93
- return self._create_stream(request_id, created_time, model, payload)
94
- else:
95
- return self._create_non_stream(request_id, created_time, model, payload)
96
-
97
- def _create_stream(
98
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
99
- ) -> Generator[ChatCompletionChunk, None, None]:
100
- try:
101
- response = self._client.session.post(
102
- self._client.api_endpoint,
103
- headers=self._client.headers,
104
- json=payload,
105
- stream=True,
106
- timeout=self._client.timeout
107
- )
108
-
109
- # Handle non-200 responses
110
- if not response.ok:
111
- raise IOError(
112
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
- )
114
-
115
- # Track token usage across chunks
116
- prompt_tokens = 0
117
- completion_tokens = 0
118
- total_tokens = 0
119
-
120
- # Estimate prompt tokens based on message length
121
- for msg in payload.get("messages", []):
122
- prompt_tokens += len(msg.get("content", "").split())
123
-
124
- for line in response.iter_lines(decode_unicode=True):
125
- if line:
126
- match = re.search(r'0:"(.*?)"', line)
127
- if match:
128
- content = match.group(1)
129
-
130
- # Format the content (replace escaped newlines)
131
- content = self._client.format_text(content)
132
-
133
- # Update token counts
134
- completion_tokens += 1
135
- total_tokens = prompt_tokens + completion_tokens
136
-
137
- # Create the delta object
138
- delta = ChoiceDelta(
139
- content=content,
140
- role="assistant",
141
- tool_calls=None
142
- )
143
-
144
- # Create the choice object
145
- choice = Choice(
146
- index=0,
147
- delta=delta,
148
- finish_reason=None,
149
- logprobs=None
150
- )
151
-
152
- # Create the chunk object
153
- chunk = ChatCompletionChunk(
154
- id=request_id,
155
- choices=[choice],
156
- created=created_time,
157
- model=model,
158
- system_fingerprint=None
159
- )
160
-
161
- # Convert to dict for proper formatting
162
- chunk_dict = chunk.to_dict()
163
-
164
- # Add usage information to match OpenAI format
165
- usage_dict = {
166
- "prompt_tokens": prompt_tokens,
167
- "completion_tokens": completion_tokens,
168
- "total_tokens": total_tokens,
169
- "estimated_cost": None
170
- }
171
-
172
- chunk_dict["usage"] = usage_dict
173
-
174
- # Return the chunk object for internal processing
175
- yield chunk
176
-
177
- # Final chunk with finish_reason="stop"
178
- delta = ChoiceDelta(
179
- content=None,
180
- role=None,
181
- tool_calls=None
182
- )
183
-
184
- choice = Choice(
185
- index=0,
186
- delta=delta,
187
- finish_reason="stop",
188
- logprobs=None
189
- )
190
-
191
- chunk = ChatCompletionChunk(
192
- id=request_id,
193
- choices=[choice],
194
- created=created_time,
195
- model=model,
196
- system_fingerprint=None
197
- )
198
-
199
- chunk_dict = chunk.to_dict()
200
- chunk_dict["usage"] = {
201
- "prompt_tokens": prompt_tokens,
202
- "completion_tokens": completion_tokens,
203
- "total_tokens": total_tokens,
204
- "estimated_cost": None
205
- }
206
-
207
- yield chunk
208
-
209
- except Exception as e:
210
- print(f"Error during ExaAI stream request: {e}")
211
- raise IOError(f"ExaAI request failed: {e}") from e
212
-
213
- def _create_non_stream(
214
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
215
- ) -> ChatCompletion:
216
- try:
217
- # For non-streaming, we still use streaming internally to collect the full response
218
- response = self._client.session.post(
219
- self._client.api_endpoint,
220
- headers=self._client.headers,
221
- json=payload,
222
- stream=True,
223
- timeout=self._client.timeout
224
- )
225
-
226
- # Handle non-200 responses
227
- if not response.ok:
228
- raise IOError(
229
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
230
- )
231
-
232
- # Collect the full response
233
- full_text = ""
234
- for line in response.iter_lines(decode_unicode=True):
235
- if line:
236
- match = re.search(r'0:"(.*?)"', line)
237
- if match:
238
- content = match.group(1)
239
- full_text += content
240
-
241
- # Format the text (replace escaped newlines)
242
- full_text = self._client.format_text(full_text)
243
-
244
- # Estimate token counts
245
- prompt_tokens = 0
246
- for msg in payload.get("messages", []):
247
- prompt_tokens += len(msg.get("content", "").split())
248
-
249
- completion_tokens = len(full_text.split())
250
- total_tokens = prompt_tokens + completion_tokens
251
-
252
- # Create the message object
253
- message = ChatCompletionMessage(
254
- role="assistant",
255
- content=full_text
256
- )
257
-
258
- # Create the choice object
259
- choice = Choice(
260
- index=0,
261
- message=message,
262
- finish_reason="stop"
263
- )
264
-
265
- # Create the usage object
266
- usage = CompletionUsage(
267
- prompt_tokens=prompt_tokens,
268
- completion_tokens=completion_tokens,
269
- total_tokens=total_tokens
270
- )
271
-
272
- # Create the completion object
273
- completion = ChatCompletion(
274
- id=request_id,
275
- choices=[choice],
276
- created=created_time,
277
- model=model,
278
- usage=usage,
279
- )
280
-
281
- return completion
282
-
283
- except Exception as e:
284
- print(f"Error during ExaAI non-stream request: {e}")
285
- raise IOError(f"ExaAI request failed: {e}") from e
286
-
287
- class Chat(BaseChat):
288
- def __init__(self, client: 'ExaAI'):
289
- self.completions = Completions(client)
290
-
291
- class ExaAI(OpenAICompatibleProvider):
292
- """
293
- OpenAI-compatible client for ExaAI API.
294
-
295
- Usage:
296
- client = ExaAI()
297
- response = client.chat.completions.create(
298
- model="O3-Mini",
299
- messages=[{"role": "user", "content": "Hello!"}]
300
- )
301
-
302
- Note:
303
- ExaAI does not support system messages. Any system messages will be ignored.
304
- """
305
-
306
- AVAILABLE_MODELS = ["O3-Mini"]
307
-
308
- def __init__(
309
- self,
310
- timeout: Optional[int] = None,
311
- browser: str = "chrome"
312
- ):
313
- """
314
- Initialize the ExaAI client.
315
-
316
- Args:
317
- timeout: Request timeout in seconds (None for no timeout)
318
- browser: Browser to emulate in user agent
319
- """
320
- self.timeout = timeout
321
- self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
322
- self.session = requests.Session()
323
-
324
- # Initialize LitAgent for user agent generation
325
- agent = LitAgent()
326
- self.fingerprint = agent.generate_fingerprint(browser)
327
-
328
- # Headers for the request
329
- self.headers = {
330
- "authority": "o3minichat.exa.ai",
331
- "accept": self.fingerprint["accept"],
332
- "accept-encoding": "gzip, deflate, br, zstd",
333
- "accept-language": self.fingerprint["accept_language"],
334
- "content-type": "application/json",
335
- "dnt": "1",
336
- "origin": "https://o3minichat.exa.ai",
337
- "priority": "u=1, i",
338
- "referer": "https://o3minichat.exa.ai/",
339
- "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
340
- "sec-ch-ua-mobile": "?0",
341
- "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
342
- "sec-fetch-dest": "empty",
343
- "sec-fetch-mode": "cors",
344
- "sec-fetch-site": "same-origin",
345
- "sec-gpc": "1",
346
- "user-agent": self.fingerprint["user_agent"]
347
- }
348
-
349
- self.session.headers.update(self.headers)
350
-
351
- # Initialize the chat interface
352
- self.chat = Chat(self)
353
-
354
- def format_text(self, text: str) -> str:
355
- """
356
- Format text by replacing escaped newlines with actual newlines.
357
-
358
- Args:
359
- text: Text to format
360
-
361
- Returns:
362
- Formatted text
363
- """
364
- # Use a more comprehensive approach to handle all escape sequences
365
- try:
366
- # First handle double backslashes to avoid issues
367
- text = text.replace('\\\\', '\\')
368
-
369
- # Handle common escape sequences
370
- text = text.replace('\\n', '\n')
371
- text = text.replace('\\r', '\r')
372
- text = text.replace('\\t', '\t')
373
- text = text.replace('\\"', '"')
374
- text = text.replace("\\'", "'")
375
-
376
- # Handle any remaining escape sequences using JSON decoding
377
- # This is a fallback in case there are other escape sequences
378
- try:
379
- # Add quotes to make it a valid JSON string
380
- json_str = f'"{text}"'
381
- # Use json module to decode all escape sequences
382
- decoded = json.loads(json_str)
383
- return decoded
384
- except json.JSONDecodeError:
385
- # If JSON decoding fails, return the text with the replacements we've already done
386
- return text
387
- except Exception as e:
388
- # If any error occurs, return the original text
389
- print(f"Warning: Error formatting text: {e}")
390
- return text
391
-
392
- def convert_model_name(self, model: str) -> str:
393
- """
394
- Convert model names to ones supported by ExaAI.
395
-
396
- Args:
397
- model: Model name to convert (ignored as ExaAI only supports O3-Mini)
398
-
399
- Returns:
400
- ExaAI model name
401
- """
402
- # ExaAI only supports O3-Mini, regardless of the input model
403
- print(f"Note: ExaAI only supports O3-Mini model. Ignoring provided model '{model}'.")
404
- return "O3-Mini"
405
-
406
- @property
407
- def models(self):
408
- class _ModelList:
409
- def list(inner_self):
410
- return type(self).AVAILABLE_MODELS
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ pass
20
+
21
+ # --- ExaAI Client ---
22
+
23
+ # ANSI escape codes for formatting
24
+ BOLD = "\033[1m"
25
+ RED = "\033[91m"
26
+ RESET = "\033[0m"
27
+
28
+ class Completions(BaseCompletions):
29
+ def __init__(self, client: 'ExaAI'):
30
+ self._client = client
31
+
32
+ def create(
33
+ self,
34
+ *,
35
+ model: str,
36
+ messages: List[Dict[str, str]],
37
+ max_tokens: Optional[int] = None,
38
+ stream: bool = False,
39
+ temperature: Optional[float] = None,
40
+ top_p: Optional[float] = None,
41
+ **kwargs: Any
42
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
+ """
44
+ Creates a model response for the given chat conversation.
45
+ Mimics openai.chat.completions.create
46
+ """
47
+ # Filter out system messages and warn the user if any are present
48
+ filtered_messages = []
49
+ has_system_message = False
50
+
51
+ for msg in messages:
52
+ if msg["role"] == "system":
53
+ has_system_message = True
54
+ continue # Skip system messages
55
+ filtered_messages.append(msg)
56
+
57
+ if has_system_message:
58
+ # Print warning in bold red
59
+ print(f"{BOLD}{RED}Warning: ExaAI does not support system messages, they will be ignored.{RESET}")
60
+
61
+ # If no messages left after filtering, raise an error
62
+ if not filtered_messages:
63
+ raise ValueError("At least one user message is required")
64
+
65
+ # Generate a unique ID for the conversation
66
+ conversation_id = uuid.uuid4().hex[:16]
67
+
68
+ # Prepare the payload for ExaAI API
69
+ payload = {
70
+ "id": conversation_id,
71
+ "messages": filtered_messages
72
+ }
73
+
74
+ # Add optional parameters if provided
75
+ if max_tokens is not None and max_tokens > 0:
76
+ payload["max_tokens"] = max_tokens
77
+
78
+ if temperature is not None:
79
+ payload["temperature"] = temperature
80
+
81
+ if top_p is not None:
82
+ payload["top_p"] = top_p
83
+
84
+ # Add any additional parameters
85
+ for key, value in kwargs.items():
86
+ if key not in payload:
87
+ payload[key] = value
88
+
89
+ request_id = f"chatcmpl-{uuid.uuid4()}"
90
+ created_time = int(time.time())
91
+
92
+ if stream:
93
+ return self._create_stream(request_id, created_time, model, payload)
94
+ else:
95
+ return self._create_non_stream(request_id, created_time, model, payload)
96
+
97
+ def _create_stream(
98
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
99
+ ) -> Generator[ChatCompletionChunk, None, None]:
100
+ try:
101
+ response = self._client.session.post(
102
+ self._client.api_endpoint,
103
+ headers=self._client.headers,
104
+ json=payload,
105
+ stream=True,
106
+ timeout=self._client.timeout
107
+ )
108
+
109
+ # Handle non-200 responses
110
+ if not response.ok:
111
+ raise IOError(
112
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
+ )
114
+
115
+ # Track token usage across chunks
116
+ prompt_tokens = 0
117
+ completion_tokens = 0
118
+ total_tokens = 0
119
+
120
+ # Estimate prompt tokens based on message length
121
+ for msg in payload.get("messages", []):
122
+ prompt_tokens += count_tokens(msg.get("content", ""))
123
+
124
+ for line in response.iter_lines(decode_unicode=True):
125
+ if line:
126
+ match = re.search(r'0:"(.*?)"', line)
127
+ if match:
128
+ content = match.group(1)
129
+
130
+ # Format the content (replace escaped newlines)
131
+ content = self._client.format_text(content)
132
+
133
+ # Update token counts
134
+ completion_tokens += count_tokens(content)
135
+ total_tokens = prompt_tokens + completion_tokens
136
+
137
+ # Create the delta object
138
+ delta = ChoiceDelta(
139
+ content=content,
140
+ role="assistant",
141
+ tool_calls=None
142
+ )
143
+
144
+ # Create the choice object
145
+ choice = Choice(
146
+ index=0,
147
+ delta=delta,
148
+ finish_reason=None,
149
+ logprobs=None
150
+ )
151
+
152
+ # Create the chunk object
153
+ chunk = ChatCompletionChunk(
154
+ id=request_id,
155
+ choices=[choice],
156
+ created=created_time,
157
+ model=model,
158
+ system_fingerprint=None
159
+ )
160
+
161
+ # Convert chunk to dict using Pydantic's API
162
+ if hasattr(chunk, "model_dump"):
163
+ chunk_dict = chunk.model_dump(exclude_none=True)
164
+ else:
165
+ chunk_dict = chunk.dict(exclude_none=True)
166
+
167
+ # Add usage information to match OpenAI format
168
+ usage_dict = {
169
+ "prompt_tokens": prompt_tokens,
170
+ "completion_tokens": completion_tokens,
171
+ "total_tokens": total_tokens,
172
+ "estimated_cost": None
173
+ }
174
+
175
+ chunk_dict["usage"] = usage_dict
176
+
177
+ # Return the chunk object for internal processing
178
+ yield chunk
179
+
180
+ # Final chunk with finish_reason="stop"
181
+ delta = ChoiceDelta(
182
+ content=None,
183
+ role=None,
184
+ tool_calls=None
185
+ )
186
+
187
+ choice = Choice(
188
+ index=0,
189
+ delta=delta,
190
+ finish_reason="stop",
191
+ logprobs=None
192
+ )
193
+
194
+ chunk = ChatCompletionChunk(
195
+ id=request_id,
196
+ choices=[choice],
197
+ created=created_time,
198
+ model=model,
199
+ system_fingerprint=None
200
+ )
201
+
202
+ if hasattr(chunk, "model_dump"):
203
+ chunk_dict = chunk.model_dump(exclude_none=True)
204
+ else:
205
+ chunk_dict = chunk.dict(exclude_none=True)
206
+ chunk_dict["usage"] = {
207
+ "prompt_tokens": prompt_tokens,
208
+ "completion_tokens": completion_tokens,
209
+ "total_tokens": total_tokens,
210
+ "estimated_cost": None
211
+ }
212
+
213
+ yield chunk
214
+
215
+ except Exception as e:
216
+ print(f"Error during ExaAI stream request: {e}")
217
+ raise IOError(f"ExaAI request failed: {e}") from e
218
+
219
+ def _create_non_stream(
220
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
221
+ ) -> ChatCompletion:
222
+ try:
223
+ # For non-streaming, we still use streaming internally to collect the full response
224
+ response = self._client.session.post(
225
+ self._client.api_endpoint,
226
+ headers=self._client.headers,
227
+ json=payload,
228
+ stream=True,
229
+ timeout=self._client.timeout
230
+ )
231
+
232
+ # Handle non-200 responses
233
+ if not response.ok:
234
+ raise IOError(
235
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
236
+ )
237
+
238
+ # Collect the full response
239
+ full_text = ""
240
+ for line in response.iter_lines(decode_unicode=True):
241
+ if line:
242
+ match = re.search(r'0:"(.*?)"', line)
243
+ if match:
244
+ content = match.group(1)
245
+ full_text += content
246
+
247
+ # Format the text (replace escaped newlines)
248
+ full_text = self._client.format_text(full_text)
249
+
250
+ # Estimate token counts
251
+ prompt_tokens = 0
252
+ for msg in payload.get("messages", []):
253
+ prompt_tokens += count_tokens(msg.get("content", ""))
254
+
255
+ completion_tokens = count_tokens(full_text)
256
+ total_tokens = prompt_tokens + completion_tokens
257
+
258
+ # Create the message object
259
+ message = ChatCompletionMessage(
260
+ role="assistant",
261
+ content=full_text
262
+ )
263
+
264
+ # Create the choice object
265
+ choice = Choice(
266
+ index=0,
267
+ message=message,
268
+ finish_reason="stop"
269
+ )
270
+
271
+ # Create the usage object
272
+ usage = CompletionUsage(
273
+ prompt_tokens=prompt_tokens,
274
+ completion_tokens=completion_tokens,
275
+ total_tokens=total_tokens
276
+ )
277
+
278
+ # Create the completion object
279
+ completion = ChatCompletion(
280
+ id=request_id,
281
+ choices=[choice],
282
+ created=created_time,
283
+ model=model,
284
+ usage=usage,
285
+ )
286
+
287
+ return completion
288
+
289
+ except Exception as e:
290
+ print(f"Error during ExaAI non-stream request: {e}")
291
+ raise IOError(f"ExaAI request failed: {e}") from e
292
+
293
+ class Chat(BaseChat):
294
+ def __init__(self, client: 'ExaAI'):
295
+ self.completions = Completions(client)
296
+
297
+ class ExaAI(OpenAICompatibleProvider):
298
+ """
299
+ OpenAI-compatible client for ExaAI API.
300
+
301
+ Usage:
302
+ client = ExaAI()
303
+ response = client.chat.completions.create(
304
+ model="O3-Mini",
305
+ messages=[{"role": "user", "content": "Hello!"}]
306
+ )
307
+
308
+ Note:
309
+ ExaAI does not support system messages. Any system messages will be ignored.
310
+ """
311
+
312
+ AVAILABLE_MODELS = ["O3-Mini"]
313
+
314
+ def __init__(
315
+ self,
316
+ timeout: Optional[int] = None,
317
+ browser: str = "chrome"
318
+ ):
319
+ """
320
+ Initialize the ExaAI client.
321
+
322
+ Args:
323
+ timeout: Request timeout in seconds (None for no timeout)
324
+ browser: Browser to emulate in user agent
325
+ """
326
+ self.timeout = timeout
327
+ self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
328
+ self.session = requests.Session()
329
+
330
+ # Initialize LitAgent for user agent generation
331
+ agent = LitAgent()
332
+ self.fingerprint = agent.generate_fingerprint(browser)
333
+
334
+ # Headers for the request
335
+ self.headers = {
336
+ "authority": "o3minichat.exa.ai",
337
+ "accept": self.fingerprint["accept"],
338
+ "accept-encoding": "gzip, deflate, br, zstd",
339
+ "accept-language": self.fingerprint["accept_language"],
340
+ "content-type": "application/json",
341
+ "dnt": "1",
342
+ "origin": "https://o3minichat.exa.ai",
343
+ "priority": "u=1, i",
344
+ "referer": "https://o3minichat.exa.ai/",
345
+ "sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
346
+ "sec-ch-ua-mobile": "?0",
347
+ "sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
348
+ "sec-fetch-dest": "empty",
349
+ "sec-fetch-mode": "cors",
350
+ "sec-fetch-site": "same-origin",
351
+ "sec-gpc": "1",
352
+ "user-agent": self.fingerprint["user_agent"]
353
+ }
354
+
355
+ self.session.headers.update(self.headers)
356
+
357
+ # Initialize the chat interface
358
+ self.chat = Chat(self)
359
+
360
+ def format_text(self, text: str) -> str:
361
+ """
362
+ Format text by replacing escaped newlines with actual newlines.
363
+
364
+ Args:
365
+ text: Text to format
366
+
367
+ Returns:
368
+ Formatted text
369
+ """
370
+ # Use a more comprehensive approach to handle all escape sequences
371
+ try:
372
+ # First handle double backslashes to avoid issues
373
+ text = text.replace('\\\\', '\\')
374
+
375
+ # Handle common escape sequences
376
+ text = text.replace('\\n', '\n')
377
+ text = text.replace('\\r', '\r')
378
+ text = text.replace('\\t', '\t')
379
+ text = text.replace('\\"', '"')
380
+ text = text.replace("\\'", "'")
381
+
382
+ # Handle any remaining escape sequences using JSON decoding
383
+ # This is a fallback in case there are other escape sequences
384
+ try:
385
+ # Add quotes to make it a valid JSON string
386
+ json_str = f'"{text}"'
387
+ # Use json module to decode all escape sequences
388
+ decoded = json.loads(json_str)
389
+ return decoded
390
+ except json.JSONDecodeError:
391
+ # If JSON decoding fails, return the text with the replacements we've already done
392
+ return text
393
+ except Exception as e:
394
+ # If any error occurs, return the original text
395
+ print(f"Warning: Error formatting text: {e}")
396
+ return text
397
+
398
+ def convert_model_name(self, model: str) -> str:
399
+ """
400
+ Convert model names to ones supported by ExaAI.
401
+
402
+ Args:
403
+ model: Model name to convert (ignored as ExaAI only supports O3-Mini)
404
+
405
+ Returns:
406
+ ExaAI model name
407
+ """
408
+ # ExaAI only supports O3-Mini, regardless of the input model
409
+ print(f"Note: ExaAI only supports O3-Mini model. Ignoring provided model '{model}'.")
410
+ return "O3-Mini"
411
+
412
+ @property
413
+ def models(self):
414
+ class _ModelList:
415
+ def list(inner_self):
416
+ return type(self).AVAILABLE_MODELS
411
417
  return _ModelList()