webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. webscout/AIauto.py +32 -14
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +153 -35
  22. webscout/Provider/Deepinfra.py +339 -339
  23. webscout/Provider/ExaChat.py +358 -358
  24. webscout/Provider/Gemini.py +169 -169
  25. webscout/Provider/GithubChat.py +1 -2
  26. webscout/Provider/Glider.py +3 -3
  27. webscout/Provider/HeckAI.py +171 -81
  28. webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
  29. webscout/Provider/OPENAI/Cloudflare.py +7 -7
  30. webscout/Provider/OPENAI/FreeGemini.py +6 -5
  31. webscout/Provider/OPENAI/NEMOTRON.py +8 -20
  32. webscout/Provider/OPENAI/Qwen3.py +283 -0
  33. webscout/Provider/OPENAI/README.md +952 -1253
  34. webscout/Provider/OPENAI/TwoAI.py +357 -0
  35. webscout/Provider/OPENAI/__init__.py +5 -1
  36. webscout/Provider/OPENAI/ai4chat.py +40 -40
  37. webscout/Provider/OPENAI/api.py +808 -649
  38. webscout/Provider/OPENAI/c4ai.py +3 -3
  39. webscout/Provider/OPENAI/chatgpt.py +555 -555
  40. webscout/Provider/OPENAI/chatgptclone.py +493 -487
  41. webscout/Provider/OPENAI/chatsandbox.py +4 -3
  42. webscout/Provider/OPENAI/copilot.py +242 -0
  43. webscout/Provider/OPENAI/deepinfra.py +5 -2
  44. webscout/Provider/OPENAI/e2b.py +63 -5
  45. webscout/Provider/OPENAI/exaai.py +416 -410
  46. webscout/Provider/OPENAI/exachat.py +444 -443
  47. webscout/Provider/OPENAI/freeaichat.py +2 -2
  48. webscout/Provider/OPENAI/glider.py +5 -2
  49. webscout/Provider/OPENAI/groq.py +5 -2
  50. webscout/Provider/OPENAI/heckai.py +308 -307
  51. webscout/Provider/OPENAI/mcpcore.py +8 -2
  52. webscout/Provider/OPENAI/multichat.py +4 -4
  53. webscout/Provider/OPENAI/netwrck.py +6 -5
  54. webscout/Provider/OPENAI/oivscode.py +287 -0
  55. webscout/Provider/OPENAI/opkfc.py +496 -496
  56. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  57. webscout/Provider/OPENAI/scirachat.py +15 -9
  58. webscout/Provider/OPENAI/sonus.py +304 -303
  59. webscout/Provider/OPENAI/standardinput.py +433 -433
  60. webscout/Provider/OPENAI/textpollinations.py +4 -4
  61. webscout/Provider/OPENAI/toolbaz.py +413 -413
  62. webscout/Provider/OPENAI/typefully.py +3 -3
  63. webscout/Provider/OPENAI/typegpt.py +11 -5
  64. webscout/Provider/OPENAI/uncovrAI.py +463 -462
  65. webscout/Provider/OPENAI/utils.py +90 -79
  66. webscout/Provider/OPENAI/venice.py +431 -425
  67. webscout/Provider/OPENAI/wisecat.py +387 -381
  68. webscout/Provider/OPENAI/writecream.py +3 -3
  69. webscout/Provider/OPENAI/x0gpt.py +365 -378
  70. webscout/Provider/OPENAI/yep.py +39 -13
  71. webscout/Provider/TTI/README.md +55 -101
  72. webscout/Provider/TTI/__init__.py +4 -9
  73. webscout/Provider/TTI/aiarta.py +365 -0
  74. webscout/Provider/TTI/artbit.py +0 -0
  75. webscout/Provider/TTI/base.py +64 -0
  76. webscout/Provider/TTI/fastflux.py +200 -0
  77. webscout/Provider/TTI/magicstudio.py +201 -0
  78. webscout/Provider/TTI/piclumen.py +203 -0
  79. webscout/Provider/TTI/pixelmuse.py +225 -0
  80. webscout/Provider/TTI/pollinations.py +221 -0
  81. webscout/Provider/TTI/utils.py +11 -0
  82. webscout/Provider/TTS/__init__.py +2 -1
  83. webscout/Provider/TTS/base.py +159 -159
  84. webscout/Provider/TTS/openai_fm.py +129 -0
  85. webscout/Provider/TextPollinationsAI.py +308 -308
  86. webscout/Provider/TwoAI.py +239 -44
  87. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  88. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  89. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  90. webscout/Provider/Writecream.py +246 -246
  91. webscout/Provider/__init__.py +2 -0
  92. webscout/Provider/ai4chat.py +33 -8
  93. webscout/Provider/koala.py +169 -169
  94. webscout/Provider/oivscode.py +309 -0
  95. webscout/Provider/samurai.py +3 -2
  96. webscout/Provider/typegpt.py +3 -3
  97. webscout/Provider/uncovr.py +368 -368
  98. webscout/client.py +70 -0
  99. webscout/litprinter/__init__.py +58 -58
  100. webscout/optimizers.py +419 -419
  101. webscout/scout/README.md +3 -1
  102. webscout/scout/core/crawler.py +134 -64
  103. webscout/scout/core/scout.py +148 -109
  104. webscout/scout/element.py +106 -88
  105. webscout/swiftcli/Readme.md +323 -323
  106. webscout/swiftcli/plugins/manager.py +9 -2
  107. webscout/version.py +1 -1
  108. webscout/zeroart/__init__.py +134 -134
  109. webscout/zeroart/effects.py +100 -100
  110. webscout/zeroart/fonts.py +1238 -1238
  111. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
  112. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
  113. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
  114. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
  115. webscout/Litlogger/Readme.md +0 -175
  116. webscout/Litlogger/core/__init__.py +0 -6
  117. webscout/Litlogger/core/level.py +0 -23
  118. webscout/Litlogger/core/logger.py +0 -165
  119. webscout/Litlogger/handlers/__init__.py +0 -12
  120. webscout/Litlogger/handlers/console.py +0 -33
  121. webscout/Litlogger/handlers/file.py +0 -143
  122. webscout/Litlogger/handlers/network.py +0 -173
  123. webscout/Litlogger/styles/__init__.py +0 -7
  124. webscout/Litlogger/styles/colors.py +0 -249
  125. webscout/Litlogger/styles/formats.py +0 -458
  126. webscout/Litlogger/styles/text.py +0 -87
  127. webscout/Litlogger/utils/__init__.py +0 -6
  128. webscout/Litlogger/utils/detectors.py +0 -153
  129. webscout/Litlogger/utils/formatters.py +0 -200
  130. webscout/Provider/TTI/AiForce/README.md +0 -159
  131. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  132. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  133. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  134. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  135. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  136. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  137. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  138. webscout/Provider/TTI/ImgSys/README.md +0 -174
  139. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  140. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  141. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  142. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  143. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  144. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  145. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  146. webscout/Provider/TTI/Nexra/README.md +0 -155
  147. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  148. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  149. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  150. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  151. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  152. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  153. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  154. webscout/Provider/TTI/aiarta/README.md +0 -134
  155. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  156. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  157. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  158. webscout/Provider/TTI/artbit/README.md +0 -100
  159. webscout/Provider/TTI/artbit/__init__.py +0 -22
  160. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  161. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  162. webscout/Provider/TTI/fastflux/README.md +0 -129
  163. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  164. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  165. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  166. webscout/Provider/TTI/huggingface/README.md +0 -114
  167. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  168. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  169. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  170. webscout/Provider/TTI/piclumen/README.md +0 -161
  171. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  172. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  173. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  174. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  175. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  176. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  177. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  178. webscout/Provider/TTI/talkai/README.md +0 -139
  179. webscout/Provider/TTI/talkai/__init__.py +0 -4
  180. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  181. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  182. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  183. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
  184. {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
@@ -1,443 +1,444 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- from webscout.litagent import LitAgent
8
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
- from .utils import (
10
- ChatCompletion,
11
- ChatCompletionChunk,
12
- Choice,
13
- ChatCompletionMessage,
14
- ChoiceDelta,
15
- CompletionUsage,
16
- format_prompt
17
- )
18
-
19
- # ANSI escape codes for formatting
20
- BOLD = "\033[1m"
21
- RED = "\033[91m"
22
- RESET = "\033[0m"
23
-
24
- # Model configurations
25
- MODEL_CONFIGS = {
26
- "exaanswer": {
27
- "endpoint": "https://ayle.chat/api/exaanswer",
28
- "models": ["exaanswer"],
29
- },
30
- "gemini": {
31
- "endpoint": "https://ayle.chat/api/gemini",
32
- "models": [
33
- "gemini-2.0-flash",
34
- "gemini-2.0-flash-exp-image-generation",
35
- "gemini-2.0-flash-thinking-exp-01-21",
36
- "gemini-2.5-pro-exp-03-25",
37
- "gemini-2.0-pro-exp-02-05",
38
- "gemini-2.5-flash-preview-04-17",
39
-
40
-
41
- ],
42
- },
43
- "openrouter": {
44
- "endpoint": "https://ayle.chat/api/openrouter",
45
- "models": [
46
- "mistralai/mistral-small-3.1-24b-instruct:free",
47
- "deepseek/deepseek-r1:free",
48
- "deepseek/deepseek-chat-v3-0324:free",
49
- "google/gemma-3-27b-it:free",
50
- "meta-llama/llama-4-maverick:free",
51
- ],
52
- },
53
- "groq": {
54
- "endpoint": "https://ayle.chat/api/groq",
55
- "models": [
56
- "deepseek-r1-distill-llama-70b",
57
- "deepseek-r1-distill-qwen-32b",
58
- "gemma2-9b-it",
59
- "llama-3.1-8b-instant",
60
- "llama-3.2-1b-preview",
61
- "llama-3.2-3b-preview",
62
- "llama-3.2-90b-vision-preview",
63
- "llama-3.3-70b-specdec",
64
- "llama-3.3-70b-versatile",
65
- "llama3-70b-8192",
66
- "llama3-8b-8192",
67
- "qwen-2.5-32b",
68
- "qwen-2.5-coder-32b",
69
- "qwen-qwq-32b",
70
- "meta-llama/llama-4-scout-17b-16e-instruct"
71
- ],
72
- },
73
- "cerebras": {
74
- "endpoint": "https://ayle.chat/api/cerebras",
75
- "models": [
76
- "llama3.1-8b",
77
- "llama-3.3-70b"
78
- ],
79
- },
80
- "xai": {
81
- "endpoint": "https://ayle.chat/api/xai",
82
- "models": [
83
- "grok-3-mini-beta"
84
- ],
85
- },
86
- }
87
-
88
-
89
- class Completions(BaseCompletions):
90
- def __init__(self, client: 'ExaChat'):
91
- self._client = client
92
-
93
- def create(
94
- self,
95
- *,
96
- model: str,
97
- messages: List[Dict[str, str]],
98
- max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
99
- stream: bool = False,
100
- temperature: Optional[float] = None,
101
- top_p: Optional[float] = None,
102
- **kwargs: Any
103
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
104
- """
105
- Creates a model response for the given chat conversation.
106
- Mimics openai.chat.completions.create
107
- """
108
- # Format the messages using the format_prompt utility
109
- # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
110
- question = format_prompt(messages, add_special_tokens=True, do_continue=True)
111
-
112
- # Determine the provider based on the model
113
- provider = self._client._get_provider_from_model(model)
114
-
115
- # Build the appropriate payload based on the provider
116
- if provider == "exaanswer":
117
- payload = {
118
- "query": question,
119
- "messages": []
120
- }
121
- elif provider in ["gemini", "cerebras"]:
122
- payload = {
123
- "query": question,
124
- "model": model,
125
- "messages": []
126
- }
127
- else: # openrouter or groq
128
- payload = {
129
- "query": question + "\n", # Add newline for openrouter and groq models
130
- "model": model,
131
- "messages": []
132
- }
133
-
134
- request_id = f"chatcmpl-{uuid.uuid4()}"
135
- created_time = int(time.time())
136
-
137
- if stream:
138
- return self._create_stream(request_id, created_time, model, provider, payload)
139
- else:
140
- return self._create_non_stream(request_id, created_time, model, provider, payload)
141
-
142
- def _create_stream(
143
- self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
144
- ) -> Generator[ChatCompletionChunk, None, None]:
145
- try:
146
- endpoint = self._client._get_endpoint(provider)
147
- response = self._client.session.post(
148
- endpoint,
149
- headers=self._client.headers,
150
- json=payload,
151
- stream=True,
152
- timeout=self._client.timeout
153
- )
154
- response.raise_for_status()
155
-
156
- # Track token usage across chunks
157
- completion_tokens = 0
158
- streaming_text = ""
159
-
160
- for line in response.iter_lines():
161
- if not line:
162
- continue
163
-
164
- try:
165
- data = json.loads(line.decode('utf-8'))
166
- if 'choices' in data and len(data['choices']) > 0:
167
- content = data['choices'][0].get('delta', {}).get('content', '')
168
- if content:
169
- streaming_text += content
170
- completion_tokens += len(content) // 4 # Rough estimate
171
-
172
- # Create a delta object for this chunk
173
- delta = ChoiceDelta(content=content)
174
- choice = Choice(index=0, delta=delta, finish_reason=None)
175
-
176
- chunk = ChatCompletionChunk(
177
- id=request_id,
178
- choices=[choice],
179
- created=created_time,
180
- model=model,
181
- )
182
-
183
- yield chunk
184
- except json.JSONDecodeError:
185
- continue
186
-
187
- # Final chunk with finish_reason
188
- delta = ChoiceDelta(content=None)
189
- choice = Choice(index=0, delta=delta, finish_reason="stop")
190
-
191
- chunk = ChatCompletionChunk(
192
- id=request_id,
193
- choices=[choice],
194
- created=created_time,
195
- model=model,
196
- )
197
-
198
- yield chunk
199
-
200
- except requests.exceptions.RequestException as e:
201
- print(f"{RED}Error during ExaChat stream request: {e}{RESET}")
202
- raise IOError(f"ExaChat request failed: {e}") from e
203
-
204
- def _create_non_stream(
205
- self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
206
- ) -> ChatCompletion:
207
- try:
208
- endpoint = self._client._get_endpoint(provider)
209
- response = self._client.session.post(
210
- endpoint,
211
- headers=self._client.headers,
212
- json=payload,
213
- timeout=self._client.timeout
214
- )
215
- response.raise_for_status()
216
-
217
- full_response = ""
218
- for line in response.iter_lines():
219
- if line:
220
- try:
221
- data = json.loads(line.decode('utf-8'))
222
- if 'choices' in data and len(data['choices']) > 0:
223
- content = data['choices'][0].get('delta', {}).get('content', '')
224
- if content:
225
- full_response += content
226
- except json.JSONDecodeError:
227
- continue
228
-
229
- # Create usage statistics (estimated)
230
- prompt_tokens = len(payload["query"]) // 4
231
- completion_tokens = len(full_response) // 4
232
- total_tokens = prompt_tokens + completion_tokens
233
-
234
- usage = CompletionUsage(
235
- prompt_tokens=prompt_tokens,
236
- completion_tokens=completion_tokens,
237
- total_tokens=total_tokens
238
- )
239
-
240
- # Create the message object
241
- message = ChatCompletionMessage(
242
- role="assistant",
243
- content=full_response
244
- )
245
-
246
- # Create the choice object
247
- choice = Choice(
248
- index=0,
249
- message=message,
250
- finish_reason="stop"
251
- )
252
-
253
- # Create the completion object
254
- completion = ChatCompletion(
255
- id=request_id,
256
- choices=[choice],
257
- created=created_time,
258
- model=model,
259
- usage=usage,
260
- )
261
-
262
- return completion
263
-
264
- except Exception as e:
265
- print(f"{RED}Error during ExaChat non-stream request: {e}{RESET}")
266
- raise IOError(f"ExaChat request failed: {e}") from e
267
-
268
- class Chat(BaseChat):
269
- def __init__(self, client: 'ExaChat'):
270
- self.completions = Completions(client)
271
-
272
- class ExaChat(OpenAICompatibleProvider):
273
- """
274
- OpenAI-compatible client for ExaChat API.
275
-
276
- Usage:
277
- client = ExaChat()
278
- response = client.chat.completions.create(
279
- model="exaanswer",
280
- messages=[{"role": "user", "content": "Hello!"}]
281
- )
282
- print(response.choices[0].message.content)
283
- """
284
-
285
- AVAILABLE_MODELS = [
286
- # ExaAnswer Models
287
- "exaanswer",
288
-
289
- # XAI Models
290
- "grok-3-mini-beta",
291
-
292
- # Gemini Models
293
- "gemini-2.0-flash",
294
- "gemini-2.0-flash-exp-image-generation",
295
- "gemini-2.0-flash-thinking-exp-01-21",
296
- "gemini-2.5-pro-exp-03-25",
297
- "gemini-2.0-pro-exp-02-05",
298
- "gemini-2.5-flash-preview-04-17",
299
-
300
- # OpenRouter Models
301
- "mistralai/mistral-small-3.1-24b-instruct:free",
302
- "deepseek/deepseek-r1:free",
303
- "deepseek/deepseek-chat-v3-0324:free",
304
- "google/gemma-3-27b-it:free",
305
- "meta-llama/llama-4-maverick:free",
306
-
307
- # Groq Models
308
- "deepseek-r1-distill-llama-70b",
309
- "deepseek-r1-distill-qwen-32b",
310
- "gemma2-9b-it",
311
- "llama-3.1-8b-instant",
312
- "llama-3.2-1b-preview",
313
- "llama-3.2-3b-preview",
314
- "llama-3.2-90b-vision-preview",
315
- "llama-3.3-70b-specdec",
316
- "llama-3.3-70b-versatile",
317
- "llama3-70b-8192",
318
- "llama3-8b-8192",
319
- "qwen-2.5-32b",
320
- "qwen-2.5-coder-32b",
321
- "qwen-qwq-32b",
322
- "meta-llama/llama-4-scout-17b-16e-instruct",
323
-
324
-
325
- # Cerebras Models
326
- "llama3.1-8b",
327
- "llama-3.3-70b",
328
-
329
- ]
330
-
331
- def __init__(
332
- self,
333
- timeout: int = 30,
334
- temperature: float = 0.5,
335
- top_p: float = 1.0
336
- ):
337
- """
338
- Initialize the ExaChat client.
339
-
340
- Args:
341
- timeout: Request timeout in seconds.
342
- temperature: Temperature for response generation.
343
- top_p: Top-p sampling parameter.
344
- """
345
- self.timeout = timeout
346
- self.temperature = temperature
347
- self.top_p = top_p
348
-
349
- # Initialize LitAgent for user agent generation
350
- agent = LitAgent()
351
-
352
- self.headers = {
353
- "accept": "*/*",
354
- "accept-language": "en-US,en;q=0.9",
355
- "content-type": "application/json",
356
- "origin": "https://ayle.chat/",
357
- "referer": "https://ayle.chat//",
358
- "user-agent": agent.random(),
359
- }
360
-
361
- self.session = requests.Session()
362
- self.session.headers.update(self.headers)
363
- self.session.cookies.update({"session": uuid.uuid4().hex})
364
-
365
- # Initialize the chat interface
366
- self.chat = Chat(self)
367
-
368
- @property
369
- def models(self):
370
- class _ModelList:
371
- def list(inner_self):
372
- return type(self).AVAILABLE_MODELS
373
- return _ModelList()
374
- def _get_endpoint(self, provider: str) -> str:
375
- """Get the API endpoint for the specified provider."""
376
- return MODEL_CONFIGS[provider]["endpoint"]
377
-
378
- def _get_provider_from_model(self, model: str) -> str:
379
- """Determine the provider based on the model name."""
380
- for provider, config in MODEL_CONFIGS.items():
381
- if model in config["models"]:
382
- return provider
383
-
384
- # If model not found, use a default model
385
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
386
- return "exaanswer"
387
-
388
- def convert_model_name(self, model: str) -> str:
389
- """
390
- Ensure the model name is in the correct format.
391
- """
392
- if model in self.AVAILABLE_MODELS:
393
- return model
394
-
395
- # Try to find a matching model
396
- for available_model in self.AVAILABLE_MODELS:
397
- if model.lower() in available_model.lower():
398
- return available_model
399
-
400
- # Default to exaanswer if no match
401
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
402
- return "exaanswer"
403
-
404
-
405
- # Simple test if run directly
406
- if __name__ == "__main__":
407
- print("-" * 80)
408
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
409
- print("-" * 80)
410
-
411
- # Test a subset of models to avoid excessive API calls
412
- test_models = [
413
- "exaanswer",
414
- "gemini-2.0-flash",
415
- "deepseek/deepseek-r1:free",
416
- "llama-3.1-8b-instant",
417
- "llama3.1-8b"
418
- ]
419
-
420
- for model in test_models:
421
- try:
422
- client = ExaChat(timeout=60)
423
- # Test with a simple conversation to demonstrate format_prompt usage
424
- response = client.chat.completions.create(
425
- model=model,
426
- messages=[
427
- {"role": "system", "content": "You are a helpful assistant."},
428
- {"role": "user", "content": "Say 'Hello' in one word"},
429
- ],
430
- stream=False
431
- )
432
-
433
- if response and response.choices and response.choices[0].message.content:
434
- status = "✓"
435
- # Truncate response if too long
436
- display_text = response.choices[0].message.content.strip()
437
- display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
438
- else:
439
- status = "✗"
440
- display_text = "Empty or invalid response"
441
- print(f"{model:<50} {status:<10} {display_text}")
442
- except Exception as e:
443
- print(f"{model:<50} {'✗':<10} {str(e)}")
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.litagent import LitAgent
8
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from .utils import (
10
+ ChatCompletion,
11
+ ChatCompletionChunk,
12
+ Choice,
13
+ ChatCompletionMessage,
14
+ ChoiceDelta,
15
+ CompletionUsage,
16
+ format_prompt,
17
+ count_tokens
18
+ )
19
+
20
+ # ANSI escape codes for formatting
21
+ BOLD = "\033[1m"
22
+ RED = "\033[91m"
23
+ RESET = "\033[0m"
24
+
25
+ # Model configurations
26
+ MODEL_CONFIGS = {
27
+ "exaanswer": {
28
+ "endpoint": "https://ayle.chat/api/exaanswer",
29
+ "models": ["exaanswer"],
30
+ },
31
+ "gemini": {
32
+ "endpoint": "https://ayle.chat/api/gemini",
33
+ "models": [
34
+ "gemini-2.0-flash",
35
+ "gemini-2.0-flash-exp-image-generation",
36
+ "gemini-2.0-flash-thinking-exp-01-21",
37
+ "gemini-2.5-pro-exp-03-25",
38
+ "gemini-2.0-pro-exp-02-05",
39
+ "gemini-2.5-flash-preview-04-17",
40
+
41
+
42
+ ],
43
+ },
44
+ "openrouter": {
45
+ "endpoint": "https://ayle.chat/api/openrouter",
46
+ "models": [
47
+ "mistralai/mistral-small-3.1-24b-instruct:free",
48
+ "deepseek/deepseek-r1:free",
49
+ "deepseek/deepseek-chat-v3-0324:free",
50
+ "google/gemma-3-27b-it:free",
51
+ "meta-llama/llama-4-maverick:free",
52
+ ],
53
+ },
54
+ "groq": {
55
+ "endpoint": "https://ayle.chat/api/groq",
56
+ "models": [
57
+ "deepseek-r1-distill-llama-70b",
58
+ "deepseek-r1-distill-qwen-32b",
59
+ "gemma2-9b-it",
60
+ "llama-3.1-8b-instant",
61
+ "llama-3.2-1b-preview",
62
+ "llama-3.2-3b-preview",
63
+ "llama-3.2-90b-vision-preview",
64
+ "llama-3.3-70b-specdec",
65
+ "llama-3.3-70b-versatile",
66
+ "llama3-70b-8192",
67
+ "llama3-8b-8192",
68
+ "qwen-2.5-32b",
69
+ "qwen-2.5-coder-32b",
70
+ "qwen-qwq-32b",
71
+ "meta-llama/llama-4-scout-17b-16e-instruct"
72
+ ],
73
+ },
74
+ "cerebras": {
75
+ "endpoint": "https://ayle.chat/api/cerebras",
76
+ "models": [
77
+ "llama3.1-8b",
78
+ "llama-3.3-70b"
79
+ ],
80
+ },
81
+ "xai": {
82
+ "endpoint": "https://ayle.chat/api/xai",
83
+ "models": [
84
+ "grok-3-mini-beta"
85
+ ],
86
+ },
87
+ }
88
+
89
+
90
+ class Completions(BaseCompletions):
91
+ def __init__(self, client: 'ExaChat'):
92
+ self._client = client
93
+
94
+ def create(
95
+ self,
96
+ *,
97
+ model: str,
98
+ messages: List[Dict[str, str]],
99
+ max_tokens: Optional[int] = None, # Not used directly but kept for compatibility
100
+ stream: bool = False,
101
+ temperature: Optional[float] = None,
102
+ top_p: Optional[float] = None,
103
+ **kwargs: Any
104
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
105
+ """
106
+ Creates a model response for the given chat conversation.
107
+ Mimics openai.chat.completions.create
108
+ """
109
+ # Format the messages using the format_prompt utility
110
+ # This creates a conversation in the format: "User: message\nAssistant: response\nUser: message\nAssistant:"
111
+ question = format_prompt(messages, add_special_tokens=True, do_continue=True)
112
+
113
+ # Determine the provider based on the model
114
+ provider = self._client._get_provider_from_model(model)
115
+
116
+ # Build the appropriate payload based on the provider
117
+ if provider == "exaanswer":
118
+ payload = {
119
+ "query": question,
120
+ "messages": []
121
+ }
122
+ elif provider in ["gemini", "cerebras"]:
123
+ payload = {
124
+ "query": question,
125
+ "model": model,
126
+ "messages": []
127
+ }
128
+ else: # openrouter or groq
129
+ payload = {
130
+ "query": question + "\n", # Add newline for openrouter and groq models
131
+ "model": model,
132
+ "messages": []
133
+ }
134
+
135
+ request_id = f"chatcmpl-{uuid.uuid4()}"
136
+ created_time = int(time.time())
137
+
138
+ if stream:
139
+ return self._create_stream(request_id, created_time, model, provider, payload)
140
+ else:
141
+ return self._create_non_stream(request_id, created_time, model, provider, payload)
142
+
143
+ def _create_stream(
144
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
145
+ ) -> Generator[ChatCompletionChunk, None, None]:
146
+ try:
147
+ endpoint = self._client._get_endpoint(provider)
148
+ response = self._client.session.post(
149
+ endpoint,
150
+ headers=self._client.headers,
151
+ json=payload,
152
+ stream=True,
153
+ timeout=self._client.timeout
154
+ )
155
+ response.raise_for_status()
156
+
157
+ # Track token usage across chunks
158
+ completion_tokens = 0
159
+ streaming_text = ""
160
+
161
+ for line in response.iter_lines():
162
+ if not line:
163
+ continue
164
+
165
+ try:
166
+ data = json.loads(line.decode('utf-8'))
167
+ if 'choices' in data and len(data['choices']) > 0:
168
+ content = data['choices'][0].get('delta', {}).get('content', '')
169
+ if content:
170
+ streaming_text += content
171
+ completion_tokens += count_tokens(content)
172
+
173
+ # Create a delta object for this chunk
174
+ delta = ChoiceDelta(content=content)
175
+ choice = Choice(index=0, delta=delta, finish_reason=None)
176
+
177
+ chunk = ChatCompletionChunk(
178
+ id=request_id,
179
+ choices=[choice],
180
+ created=created_time,
181
+ model=model,
182
+ )
183
+
184
+ yield chunk
185
+ except json.JSONDecodeError:
186
+ continue
187
+
188
+ # Final chunk with finish_reason
189
+ delta = ChoiceDelta(content=None)
190
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
191
+
192
+ chunk = ChatCompletionChunk(
193
+ id=request_id,
194
+ choices=[choice],
195
+ created=created_time,
196
+ model=model,
197
+ )
198
+
199
+ yield chunk
200
+
201
+ except requests.exceptions.RequestException as e:
202
+ print(f"{RED}Error during ExaChat stream request: {e}{RESET}")
203
+ raise IOError(f"ExaChat request failed: {e}") from e
204
+
205
+ def _create_non_stream(
206
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
207
+ ) -> ChatCompletion:
208
+ try:
209
+ endpoint = self._client._get_endpoint(provider)
210
+ response = self._client.session.post(
211
+ endpoint,
212
+ headers=self._client.headers,
213
+ json=payload,
214
+ timeout=self._client.timeout
215
+ )
216
+ response.raise_for_status()
217
+
218
+ full_response = ""
219
+ for line in response.iter_lines():
220
+ if line:
221
+ try:
222
+ data = json.loads(line.decode('utf-8'))
223
+ if 'choices' in data and len(data['choices']) > 0:
224
+ content = data['choices'][0].get('delta', {}).get('content', '')
225
+ if content:
226
+ full_response += content
227
+ except json.JSONDecodeError:
228
+ continue
229
+
230
+ # Create usage statistics (estimated)
231
+ prompt_tokens = count_tokens(payload.get("query", ""))
232
+ completion_tokens = count_tokens(full_response)
233
+ total_tokens = prompt_tokens + completion_tokens
234
+
235
+ usage = CompletionUsage(
236
+ prompt_tokens=prompt_tokens,
237
+ completion_tokens=completion_tokens,
238
+ total_tokens=total_tokens
239
+ )
240
+
241
+ # Create the message object
242
+ message = ChatCompletionMessage(
243
+ role="assistant",
244
+ content=full_response
245
+ )
246
+
247
+ # Create the choice object
248
+ choice = Choice(
249
+ index=0,
250
+ message=message,
251
+ finish_reason="stop"
252
+ )
253
+
254
+ # Create the completion object
255
+ completion = ChatCompletion(
256
+ id=request_id,
257
+ choices=[choice],
258
+ created=created_time,
259
+ model=model,
260
+ usage=usage,
261
+ )
262
+
263
+ return completion
264
+
265
+ except Exception as e:
266
+ print(f"{RED}Error during ExaChat non-stream request: {e}{RESET}")
267
+ raise IOError(f"ExaChat request failed: {e}") from e
268
+
269
+ class Chat(BaseChat):
270
+ def __init__(self, client: 'ExaChat'):
271
+ self.completions = Completions(client)
272
+
273
+ class ExaChat(OpenAICompatibleProvider):
274
+ """
275
+ OpenAI-compatible client for ExaChat API.
276
+
277
+ Usage:
278
+ client = ExaChat()
279
+ response = client.chat.completions.create(
280
+ model="exaanswer",
281
+ messages=[{"role": "user", "content": "Hello!"}]
282
+ )
283
+ print(response.choices[0].message.content)
284
+ """
285
+
286
+ AVAILABLE_MODELS = [
287
+ # ExaAnswer Models
288
+ "exaanswer",
289
+
290
+ # XAI Models
291
+ "grok-3-mini-beta",
292
+
293
+ # Gemini Models
294
+ "gemini-2.0-flash",
295
+ "gemini-2.0-flash-exp-image-generation",
296
+ "gemini-2.0-flash-thinking-exp-01-21",
297
+ "gemini-2.5-pro-exp-03-25",
298
+ "gemini-2.0-pro-exp-02-05",
299
+ "gemini-2.5-flash-preview-04-17",
300
+
301
+ # OpenRouter Models
302
+ "mistralai/mistral-small-3.1-24b-instruct:free",
303
+ "deepseek/deepseek-r1:free",
304
+ "deepseek/deepseek-chat-v3-0324:free",
305
+ "google/gemma-3-27b-it:free",
306
+ "meta-llama/llama-4-maverick:free",
307
+
308
+ # Groq Models
309
+ "deepseek-r1-distill-llama-70b",
310
+ "deepseek-r1-distill-qwen-32b",
311
+ "gemma2-9b-it",
312
+ "llama-3.1-8b-instant",
313
+ "llama-3.2-1b-preview",
314
+ "llama-3.2-3b-preview",
315
+ "llama-3.2-90b-vision-preview",
316
+ "llama-3.3-70b-specdec",
317
+ "llama-3.3-70b-versatile",
318
+ "llama3-70b-8192",
319
+ "llama3-8b-8192",
320
+ "qwen-2.5-32b",
321
+ "qwen-2.5-coder-32b",
322
+ "qwen-qwq-32b",
323
+ "meta-llama/llama-4-scout-17b-16e-instruct",
324
+
325
+
326
+ # Cerebras Models
327
+ "llama3.1-8b",
328
+ "llama-3.3-70b",
329
+
330
+ ]
331
+
332
+ def __init__(
333
+ self,
334
+ timeout: int = 30,
335
+ temperature: float = 0.5,
336
+ top_p: float = 1.0
337
+ ):
338
+ """
339
+ Initialize the ExaChat client.
340
+
341
+ Args:
342
+ timeout: Request timeout in seconds.
343
+ temperature: Temperature for response generation.
344
+ top_p: Top-p sampling parameter.
345
+ """
346
+ self.timeout = timeout
347
+ self.temperature = temperature
348
+ self.top_p = top_p
349
+
350
+ # Initialize LitAgent for user agent generation
351
+ agent = LitAgent()
352
+
353
+ self.headers = {
354
+ "accept": "*/*",
355
+ "accept-language": "en-US,en;q=0.9",
356
+ "content-type": "application/json",
357
+ "origin": "https://ayle.chat/",
358
+ "referer": "https://ayle.chat//",
359
+ "user-agent": agent.random(),
360
+ }
361
+
362
+ self.session = requests.Session()
363
+ self.session.headers.update(self.headers)
364
+ self.session.cookies.update({"session": uuid.uuid4().hex})
365
+
366
+ # Initialize the chat interface
367
+ self.chat = Chat(self)
368
+
369
+ @property
370
+ def models(self):
371
+ class _ModelList:
372
+ def list(inner_self):
373
+ return type(self).AVAILABLE_MODELS
374
+ return _ModelList()
375
+ def _get_endpoint(self, provider: str) -> str:
376
+ """Get the API endpoint for the specified provider."""
377
+ return MODEL_CONFIGS[provider]["endpoint"]
378
+
379
+ def _get_provider_from_model(self, model: str) -> str:
380
+ """Determine the provider based on the model name."""
381
+ for provider, config in MODEL_CONFIGS.items():
382
+ if model in config["models"]:
383
+ return provider
384
+
385
+ # If model not found, use a default model
386
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
387
+ return "exaanswer"
388
+
389
+ def convert_model_name(self, model: str) -> str:
390
+ """
391
+ Ensure the model name is in the correct format.
392
+ """
393
+ if model in self.AVAILABLE_MODELS:
394
+ return model
395
+
396
+ # Try to find a matching model
397
+ for available_model in self.AVAILABLE_MODELS:
398
+ if model.lower() in available_model.lower():
399
+ return available_model
400
+
401
+ # Default to exaanswer if no match
402
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'exaanswer'{RESET}")
403
+ return "exaanswer"
404
+
405
+
406
+ # Simple test if run directly
407
+ if __name__ == "__main__":
408
+ print("-" * 80)
409
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
410
+ print("-" * 80)
411
+
412
+ # Test a subset of models to avoid excessive API calls
413
+ test_models = [
414
+ "exaanswer",
415
+ "gemini-2.0-flash",
416
+ "deepseek/deepseek-r1:free",
417
+ "llama-3.1-8b-instant",
418
+ "llama3.1-8b"
419
+ ]
420
+
421
+ for model in test_models:
422
+ try:
423
+ client = ExaChat(timeout=60)
424
+ # Test with a simple conversation to demonstrate format_prompt usage
425
+ response = client.chat.completions.create(
426
+ model=model,
427
+ messages=[
428
+ {"role": "system", "content": "You are a helpful assistant."},
429
+ {"role": "user", "content": "Say 'Hello' in one word"},
430
+ ],
431
+ stream=False
432
+ )
433
+
434
+ if response and response.choices and response.choices[0].message.content:
435
+ status = "✓"
436
+ # Truncate response if too long
437
+ display_text = response.choices[0].message.content.strip()
438
+ display_text = display_text[:50] + "..." if len(display_text) > 50 else display_text
439
+ else:
440
+ status = ""
441
+ display_text = "Empty or invalid response"
442
+ print(f"{model:<50} {status:<10} {display_text}")
443
+ except Exception as e:
444
+ print(f"{model:<50} {'✗':<10} {str(e)}")