webscout 8.2.8__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +32 -14
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +153 -35
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +171 -81
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -735
- webscout/Provider/OPENAI/Cloudflare.py +7 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -5
- webscout/Provider/OPENAI/NEMOTRON.py +8 -20
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +5 -1
- webscout/Provider/OPENAI/ai4chat.py +40 -40
- webscout/Provider/OPENAI/api.py +808 -649
- webscout/Provider/OPENAI/c4ai.py +3 -3
- webscout/Provider/OPENAI/chatgpt.py +555 -555
- webscout/Provider/OPENAI/chatgptclone.py +493 -487
- webscout/Provider/OPENAI/chatsandbox.py +4 -3
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +5 -2
- webscout/Provider/OPENAI/e2b.py +63 -5
- webscout/Provider/OPENAI/exaai.py +416 -410
- webscout/Provider/OPENAI/exachat.py +444 -443
- webscout/Provider/OPENAI/freeaichat.py +2 -2
- webscout/Provider/OPENAI/glider.py +5 -2
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +308 -307
- webscout/Provider/OPENAI/mcpcore.py +8 -2
- webscout/Provider/OPENAI/multichat.py +4 -4
- webscout/Provider/OPENAI/netwrck.py +6 -5
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +15 -9
- webscout/Provider/OPENAI/sonus.py +304 -303
- webscout/Provider/OPENAI/standardinput.py +433 -433
- webscout/Provider/OPENAI/textpollinations.py +4 -4
- webscout/Provider/OPENAI/toolbaz.py +413 -413
- webscout/Provider/OPENAI/typefully.py +3 -3
- webscout/Provider/OPENAI/typegpt.py +11 -5
- webscout/Provider/OPENAI/uncovrAI.py +463 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +431 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +3 -3
- webscout/Provider/OPENAI/x0gpt.py +365 -378
- webscout/Provider/OPENAI/yep.py +39 -13
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -0
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/METADATA +159 -35
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/RECORD +116 -161
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
webscout/Provider/ExaChat.py
CHANGED
|
@@ -1,358 +1,358 @@
|
|
|
1
|
-
from curl_cffi import CurlError
|
|
2
|
-
from curl_cffi.requests import Session, Response # Import Response
|
|
3
|
-
import json
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import Any, Dict, Union, Optional, List
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent
|
|
11
|
-
|
|
12
|
-
# Model configurations
|
|
13
|
-
MODEL_CONFIGS = {
|
|
14
|
-
"exaanswer": {
|
|
15
|
-
"endpoint": "https://ayle.chat/api/exaanswer",
|
|
16
|
-
"models": ["exaanswer"],
|
|
17
|
-
},
|
|
18
|
-
"gemini": {
|
|
19
|
-
"endpoint": "https://ayle.chat/api/gemini",
|
|
20
|
-
"models": [
|
|
21
|
-
"gemini-2.0-flash",
|
|
22
|
-
"gemini-2.0-flash-exp-image-generation",
|
|
23
|
-
"gemini-2.0-flash-thinking-exp-01-21",
|
|
24
|
-
"gemini-2.5-pro-exp-03-25",
|
|
25
|
-
"gemini-2.0-pro-exp-02-05",
|
|
26
|
-
"gemini-2.5-flash-preview-04-17",
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
],
|
|
30
|
-
},
|
|
31
|
-
"openrouter": {
|
|
32
|
-
"endpoint": "https://ayle.chat/api/openrouter",
|
|
33
|
-
"models": [
|
|
34
|
-
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
35
|
-
"deepseek/deepseek-r1:free",
|
|
36
|
-
"deepseek/deepseek-chat-v3-0324:free",
|
|
37
|
-
"google/gemma-3-27b-it:free",
|
|
38
|
-
"meta-llama/llama-4-maverick:free",
|
|
39
|
-
],
|
|
40
|
-
},
|
|
41
|
-
"groq": {
|
|
42
|
-
"endpoint": "https://ayle.chat/api/groq",
|
|
43
|
-
"models": [
|
|
44
|
-
"deepseek-r1-distill-llama-70b",
|
|
45
|
-
"deepseek-r1-distill-qwen-32b",
|
|
46
|
-
"gemma2-9b-it",
|
|
47
|
-
"llama-3.1-8b-instant",
|
|
48
|
-
"llama-3.2-1b-preview",
|
|
49
|
-
"llama-3.2-3b-preview",
|
|
50
|
-
"llama-3.2-90b-vision-preview",
|
|
51
|
-
"llama-3.3-70b-specdec",
|
|
52
|
-
"llama-3.3-70b-versatile",
|
|
53
|
-
"llama3-70b-8192",
|
|
54
|
-
"llama3-8b-8192",
|
|
55
|
-
"qwen-2.5-32b",
|
|
56
|
-
"qwen-2.5-coder-32b",
|
|
57
|
-
"qwen-qwq-32b",
|
|
58
|
-
"meta-llama/llama-4-scout-17b-16e-instruct"
|
|
59
|
-
],
|
|
60
|
-
},
|
|
61
|
-
"cerebras": {
|
|
62
|
-
"endpoint": "https://ayle.chat/api/cerebras",
|
|
63
|
-
"models": [
|
|
64
|
-
"llama3.1-8b",
|
|
65
|
-
"llama-3.3-70b"
|
|
66
|
-
],
|
|
67
|
-
},
|
|
68
|
-
"xai": {
|
|
69
|
-
"endpoint": "https://ayle.chat/api/xai",
|
|
70
|
-
"models": [
|
|
71
|
-
"grok-3-mini-beta"
|
|
72
|
-
],
|
|
73
|
-
},
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
class ExaChat(Provider):
|
|
77
|
-
"""
|
|
78
|
-
A class to interact with multiple AI APIs through the Exa Chat interface.
|
|
79
|
-
"""
|
|
80
|
-
AVAILABLE_MODELS = [
|
|
81
|
-
# ExaAnswer Models
|
|
82
|
-
"exaanswer",
|
|
83
|
-
|
|
84
|
-
# XAI Models
|
|
85
|
-
"grok-3-mini-beta",
|
|
86
|
-
|
|
87
|
-
# Gemini Models
|
|
88
|
-
"gemini-2.0-flash",
|
|
89
|
-
"gemini-2.0-flash-exp-image-generation",
|
|
90
|
-
"gemini-2.0-flash-thinking-exp-01-21",
|
|
91
|
-
"gemini-2.5-pro-exp-03-25",
|
|
92
|
-
"gemini-2.0-pro-exp-02-05",
|
|
93
|
-
"gemini-2.5-flash-preview-04-17",
|
|
94
|
-
|
|
95
|
-
# OpenRouter Models
|
|
96
|
-
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
97
|
-
"deepseek/deepseek-r1:free",
|
|
98
|
-
"deepseek/deepseek-chat-v3-0324:free",
|
|
99
|
-
"google/gemma-3-27b-it:free",
|
|
100
|
-
"meta-llama/llama-4-maverick:free",
|
|
101
|
-
|
|
102
|
-
# Groq Models
|
|
103
|
-
"deepseek-r1-distill-llama-70b",
|
|
104
|
-
"deepseek-r1-distill-qwen-32b",
|
|
105
|
-
"gemma2-9b-it",
|
|
106
|
-
"llama-3.1-8b-instant",
|
|
107
|
-
"llama-3.2-1b-preview",
|
|
108
|
-
"llama-3.2-3b-preview",
|
|
109
|
-
"llama-3.2-90b-vision-preview",
|
|
110
|
-
"llama-3.3-70b-specdec",
|
|
111
|
-
"llama-3.3-70b-versatile",
|
|
112
|
-
"llama3-70b-8192",
|
|
113
|
-
"llama3-8b-8192",
|
|
114
|
-
"qwen-2.5-32b",
|
|
115
|
-
"qwen-2.5-coder-32b",
|
|
116
|
-
"qwen-qwq-32b",
|
|
117
|
-
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
# Cerebras Models
|
|
121
|
-
"llama3.1-8b",
|
|
122
|
-
"llama-3.3-70b",
|
|
123
|
-
|
|
124
|
-
]
|
|
125
|
-
|
|
126
|
-
def __init__(
|
|
127
|
-
self,
|
|
128
|
-
is_conversation: bool = True,
|
|
129
|
-
max_tokens: int = 4000,
|
|
130
|
-
timeout: int = 30,
|
|
131
|
-
intro: str = None,
|
|
132
|
-
filepath: str = None,
|
|
133
|
-
update_file: bool = True,
|
|
134
|
-
proxies: dict = {},
|
|
135
|
-
history_offset: int = 10250,
|
|
136
|
-
act: str = None,
|
|
137
|
-
model: str = "exaanswer",
|
|
138
|
-
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
139
|
-
temperature: float = 0.5,
|
|
140
|
-
presence_penalty: int = 0,
|
|
141
|
-
frequency_penalty: int = 0,
|
|
142
|
-
top_p: float = 1
|
|
143
|
-
):
|
|
144
|
-
"""Initializes the ExaChat client."""
|
|
145
|
-
if model not in self.AVAILABLE_MODELS:
|
|
146
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
147
|
-
|
|
148
|
-
self.session = Session() # Use curl_cffi Session
|
|
149
|
-
self.is_conversation = is_conversation
|
|
150
|
-
self.max_tokens_to_sample = max_tokens
|
|
151
|
-
self.timeout = timeout
|
|
152
|
-
self.last_response = {}
|
|
153
|
-
self.model = model
|
|
154
|
-
self.system_prompt = system_prompt
|
|
155
|
-
self.temperature = temperature
|
|
156
|
-
self.presence_penalty = presence_penalty
|
|
157
|
-
self.frequency_penalty = frequency_penalty
|
|
158
|
-
self.top_p = top_p
|
|
159
|
-
|
|
160
|
-
# Initialize LitAgent for user agent generation
|
|
161
|
-
self.agent = LitAgent()
|
|
162
|
-
|
|
163
|
-
self.headers = {
|
|
164
|
-
"accept": "*/*",
|
|
165
|
-
"accept-language": "en-US,en;q=0.9",
|
|
166
|
-
"content-type": "application/json",
|
|
167
|
-
"origin": "https://ayle.chat/",
|
|
168
|
-
"referer": "https://ayle.chat/",
|
|
169
|
-
"user-agent": self.agent.random(),
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
self.session.headers.update(self.headers)
|
|
173
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
174
|
-
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
175
|
-
|
|
176
|
-
self.__available_optimizers = (
|
|
177
|
-
method for method in dir(Optimizers)
|
|
178
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
Conversation.intro = (
|
|
182
|
-
AwesomePrompts().get_act(
|
|
183
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
184
|
-
)
|
|
185
|
-
if act
|
|
186
|
-
else intro or Conversation.intro
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
self.conversation = Conversation(
|
|
190
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
191
|
-
)
|
|
192
|
-
self.conversation.history_offset = history_offset
|
|
193
|
-
|
|
194
|
-
self.provider = self._get_provider_from_model(self.model)
|
|
195
|
-
self.model_name = self.model
|
|
196
|
-
|
|
197
|
-
def _get_endpoint(self) -> str:
|
|
198
|
-
"""Get the API endpoint for the current provider."""
|
|
199
|
-
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
200
|
-
|
|
201
|
-
def _get_provider_from_model(self, model: str) -> str:
|
|
202
|
-
"""Determine the provider based on the model name."""
|
|
203
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
204
|
-
if model in config["models"]:
|
|
205
|
-
return provider
|
|
206
|
-
|
|
207
|
-
available_models = []
|
|
208
|
-
for provider, config in MODEL_CONFIGS.items():
|
|
209
|
-
for model_name in config["models"]:
|
|
210
|
-
available_models.append(f"{provider}/{model_name}")
|
|
211
|
-
|
|
212
|
-
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
213
|
-
raise ValueError(error_msg)
|
|
214
|
-
|
|
215
|
-
@staticmethod
|
|
216
|
-
def _exachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
217
|
-
"""Extracts content from ExaChat stream JSON objects."""
|
|
218
|
-
if isinstance(chunk, dict):
|
|
219
|
-
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
220
|
-
return None
|
|
221
|
-
|
|
222
|
-
def _make_request(self, payload: Dict[str, Any]) -> Response: # Change type hint to Response
|
|
223
|
-
"""Make the API request with proper error handling."""
|
|
224
|
-
try:
|
|
225
|
-
response = self.session.post(
|
|
226
|
-
self._get_endpoint(),
|
|
227
|
-
headers=self.headers,
|
|
228
|
-
json=payload,
|
|
229
|
-
timeout=self.timeout, # type: ignore
|
|
230
|
-
stream=True, # Enable streaming for the request
|
|
231
|
-
impersonate="chrome120" # Add impersonate
|
|
232
|
-
)
|
|
233
|
-
response.raise_for_status()
|
|
234
|
-
return response
|
|
235
|
-
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch CurlError and others
|
|
236
|
-
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
237
|
-
|
|
238
|
-
def _build_payload(self, conversation_prompt: str) -> Dict[str, Any]:
|
|
239
|
-
"""Build the appropriate payload based on the provider."""
|
|
240
|
-
if self.provider == "exaanswer":
|
|
241
|
-
return {
|
|
242
|
-
"query": conversation_prompt,
|
|
243
|
-
"messages": []
|
|
244
|
-
}
|
|
245
|
-
elif self.provider == "gemini":
|
|
246
|
-
return {
|
|
247
|
-
"query": conversation_prompt,
|
|
248
|
-
"model": self.model,
|
|
249
|
-
"messages": []
|
|
250
|
-
}
|
|
251
|
-
elif self.provider == "cerebras":
|
|
252
|
-
return {
|
|
253
|
-
"query": conversation_prompt,
|
|
254
|
-
"model": self.model,
|
|
255
|
-
"messages": []
|
|
256
|
-
}
|
|
257
|
-
else: # openrouter or groq
|
|
258
|
-
return {
|
|
259
|
-
"query": conversation_prompt + "\n", # Add newline for openrouter and groq models
|
|
260
|
-
"model": self.model,
|
|
261
|
-
"messages": []
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
def ask(
|
|
265
|
-
self,
|
|
266
|
-
prompt: str,
|
|
267
|
-
raw: bool = False,
|
|
268
|
-
optimizer: str = None,
|
|
269
|
-
conversationally: bool = False,
|
|
270
|
-
) -> Dict[str, Any]:
|
|
271
|
-
"""Sends a prompt to the API and returns the response."""
|
|
272
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
273
|
-
if optimizer:
|
|
274
|
-
if optimizer in self.__available_optimizers:
|
|
275
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
276
|
-
conversation_prompt if conversationally else prompt
|
|
277
|
-
)
|
|
278
|
-
else:
|
|
279
|
-
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
280
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
281
|
-
|
|
282
|
-
payload = self._build_payload(conversation_prompt)
|
|
283
|
-
response = self._make_request(payload)
|
|
284
|
-
|
|
285
|
-
try:
|
|
286
|
-
full_response = ""
|
|
287
|
-
# Use sanitize_stream to process the response
|
|
288
|
-
processed_stream = sanitize_stream(
|
|
289
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
290
|
-
intro_value=None, # API doesn't seem to use 'data:' prefix
|
|
291
|
-
to_json=True, # Stream sends JSON lines
|
|
292
|
-
content_extractor=self._exachat_extractor, # Use the specific extractor
|
|
293
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
294
|
-
)
|
|
295
|
-
|
|
296
|
-
for content_chunk in processed_stream:
|
|
297
|
-
# content_chunk is the string extracted by _exachat_extractor
|
|
298
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
299
|
-
full_response += content_chunk
|
|
300
|
-
|
|
301
|
-
self.last_response = {"text": full_response}
|
|
302
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
303
|
-
return self.last_response if not raw else full_response # Return dict or raw string
|
|
304
|
-
|
|
305
|
-
except json.JSONDecodeError as e:
|
|
306
|
-
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
307
|
-
|
|
308
|
-
def chat(
|
|
309
|
-
self,
|
|
310
|
-
prompt: str,
|
|
311
|
-
optimizer: str = None,
|
|
312
|
-
conversationally: bool = False,
|
|
313
|
-
) -> str:
|
|
314
|
-
"""Generate response."""
|
|
315
|
-
response = self.ask(
|
|
316
|
-
prompt, optimizer=optimizer, conversationally=conversationally
|
|
317
|
-
)
|
|
318
|
-
return self.get_message(response)
|
|
319
|
-
|
|
320
|
-
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
321
|
-
"""
|
|
322
|
-
Retrieves message from response.
|
|
323
|
-
|
|
324
|
-
Args:
|
|
325
|
-
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
326
|
-
|
|
327
|
-
Returns:
|
|
328
|
-
str: The extracted message text
|
|
329
|
-
"""
|
|
330
|
-
if isinstance(response, dict):
|
|
331
|
-
return response.get("text", "")
|
|
332
|
-
return str(response)
|
|
333
|
-
|
|
334
|
-
if __name__ == "__main__":
|
|
335
|
-
print("-" * 80)
|
|
336
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
337
|
-
print("-" * 80)
|
|
338
|
-
|
|
339
|
-
# Test all available models
|
|
340
|
-
working = 0
|
|
341
|
-
total = len(ExaChat.AVAILABLE_MODELS)
|
|
342
|
-
|
|
343
|
-
for model in ExaChat.AVAILABLE_MODELS:
|
|
344
|
-
try:
|
|
345
|
-
test_ai = ExaChat(model=model, timeout=60)
|
|
346
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
347
|
-
response_text = response
|
|
348
|
-
|
|
349
|
-
if response_text and len(response_text.strip()) > 0:
|
|
350
|
-
status = "✓"
|
|
351
|
-
# Truncate response if too long
|
|
352
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
353
|
-
else:
|
|
354
|
-
status = "✗"
|
|
355
|
-
display_text = "Empty or invalid response"
|
|
356
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
357
|
-
except Exception as e:
|
|
358
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session, Response # Import Response
|
|
3
|
+
import json
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import Any, Dict, Union, Optional, List
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
|
|
12
|
+
# Model configurations
|
|
13
|
+
MODEL_CONFIGS = {
|
|
14
|
+
"exaanswer": {
|
|
15
|
+
"endpoint": "https://ayle.chat/api/exaanswer",
|
|
16
|
+
"models": ["exaanswer"],
|
|
17
|
+
},
|
|
18
|
+
"gemini": {
|
|
19
|
+
"endpoint": "https://ayle.chat/api/gemini",
|
|
20
|
+
"models": [
|
|
21
|
+
"gemini-2.0-flash",
|
|
22
|
+
"gemini-2.0-flash-exp-image-generation",
|
|
23
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
|
24
|
+
"gemini-2.5-pro-exp-03-25",
|
|
25
|
+
"gemini-2.0-pro-exp-02-05",
|
|
26
|
+
"gemini-2.5-flash-preview-04-17",
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
],
|
|
30
|
+
},
|
|
31
|
+
"openrouter": {
|
|
32
|
+
"endpoint": "https://ayle.chat/api/openrouter",
|
|
33
|
+
"models": [
|
|
34
|
+
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
35
|
+
"deepseek/deepseek-r1:free",
|
|
36
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
37
|
+
"google/gemma-3-27b-it:free",
|
|
38
|
+
"meta-llama/llama-4-maverick:free",
|
|
39
|
+
],
|
|
40
|
+
},
|
|
41
|
+
"groq": {
|
|
42
|
+
"endpoint": "https://ayle.chat/api/groq",
|
|
43
|
+
"models": [
|
|
44
|
+
"deepseek-r1-distill-llama-70b",
|
|
45
|
+
"deepseek-r1-distill-qwen-32b",
|
|
46
|
+
"gemma2-9b-it",
|
|
47
|
+
"llama-3.1-8b-instant",
|
|
48
|
+
"llama-3.2-1b-preview",
|
|
49
|
+
"llama-3.2-3b-preview",
|
|
50
|
+
"llama-3.2-90b-vision-preview",
|
|
51
|
+
"llama-3.3-70b-specdec",
|
|
52
|
+
"llama-3.3-70b-versatile",
|
|
53
|
+
"llama3-70b-8192",
|
|
54
|
+
"llama3-8b-8192",
|
|
55
|
+
"qwen-2.5-32b",
|
|
56
|
+
"qwen-2.5-coder-32b",
|
|
57
|
+
"qwen-qwq-32b",
|
|
58
|
+
"meta-llama/llama-4-scout-17b-16e-instruct"
|
|
59
|
+
],
|
|
60
|
+
},
|
|
61
|
+
"cerebras": {
|
|
62
|
+
"endpoint": "https://ayle.chat/api/cerebras",
|
|
63
|
+
"models": [
|
|
64
|
+
"llama3.1-8b",
|
|
65
|
+
"llama-3.3-70b"
|
|
66
|
+
],
|
|
67
|
+
},
|
|
68
|
+
"xai": {
|
|
69
|
+
"endpoint": "https://ayle.chat/api/xai",
|
|
70
|
+
"models": [
|
|
71
|
+
"grok-3-mini-beta"
|
|
72
|
+
],
|
|
73
|
+
},
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
class ExaChat(Provider):
|
|
77
|
+
"""
|
|
78
|
+
A class to interact with multiple AI APIs through the Exa Chat interface.
|
|
79
|
+
"""
|
|
80
|
+
AVAILABLE_MODELS = [
|
|
81
|
+
# ExaAnswer Models
|
|
82
|
+
"exaanswer",
|
|
83
|
+
|
|
84
|
+
# XAI Models
|
|
85
|
+
"grok-3-mini-beta",
|
|
86
|
+
|
|
87
|
+
# Gemini Models
|
|
88
|
+
"gemini-2.0-flash",
|
|
89
|
+
"gemini-2.0-flash-exp-image-generation",
|
|
90
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
|
91
|
+
"gemini-2.5-pro-exp-03-25",
|
|
92
|
+
"gemini-2.0-pro-exp-02-05",
|
|
93
|
+
"gemini-2.5-flash-preview-04-17",
|
|
94
|
+
|
|
95
|
+
# OpenRouter Models
|
|
96
|
+
"mistralai/mistral-small-3.1-24b-instruct:free",
|
|
97
|
+
"deepseek/deepseek-r1:free",
|
|
98
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
99
|
+
"google/gemma-3-27b-it:free",
|
|
100
|
+
"meta-llama/llama-4-maverick:free",
|
|
101
|
+
|
|
102
|
+
# Groq Models
|
|
103
|
+
"deepseek-r1-distill-llama-70b",
|
|
104
|
+
"deepseek-r1-distill-qwen-32b",
|
|
105
|
+
"gemma2-9b-it",
|
|
106
|
+
"llama-3.1-8b-instant",
|
|
107
|
+
"llama-3.2-1b-preview",
|
|
108
|
+
"llama-3.2-3b-preview",
|
|
109
|
+
"llama-3.2-90b-vision-preview",
|
|
110
|
+
"llama-3.3-70b-specdec",
|
|
111
|
+
"llama-3.3-70b-versatile",
|
|
112
|
+
"llama3-70b-8192",
|
|
113
|
+
"llama3-8b-8192",
|
|
114
|
+
"qwen-2.5-32b",
|
|
115
|
+
"qwen-2.5-coder-32b",
|
|
116
|
+
"qwen-qwq-32b",
|
|
117
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
# Cerebras Models
|
|
121
|
+
"llama3.1-8b",
|
|
122
|
+
"llama-3.3-70b",
|
|
123
|
+
|
|
124
|
+
]
|
|
125
|
+
|
|
126
|
+
def __init__(
|
|
127
|
+
self,
|
|
128
|
+
is_conversation: bool = True,
|
|
129
|
+
max_tokens: int = 4000,
|
|
130
|
+
timeout: int = 30,
|
|
131
|
+
intro: str = None,
|
|
132
|
+
filepath: str = None,
|
|
133
|
+
update_file: bool = True,
|
|
134
|
+
proxies: dict = {},
|
|
135
|
+
history_offset: int = 10250,
|
|
136
|
+
act: str = None,
|
|
137
|
+
model: str = "exaanswer",
|
|
138
|
+
system_prompt: str = "You are a friendly, helpful AI assistant.",
|
|
139
|
+
temperature: float = 0.5,
|
|
140
|
+
presence_penalty: int = 0,
|
|
141
|
+
frequency_penalty: int = 0,
|
|
142
|
+
top_p: float = 1
|
|
143
|
+
):
|
|
144
|
+
"""Initializes the ExaChat client."""
|
|
145
|
+
if model not in self.AVAILABLE_MODELS:
|
|
146
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
147
|
+
|
|
148
|
+
self.session = Session() # Use curl_cffi Session
|
|
149
|
+
self.is_conversation = is_conversation
|
|
150
|
+
self.max_tokens_to_sample = max_tokens
|
|
151
|
+
self.timeout = timeout
|
|
152
|
+
self.last_response = {}
|
|
153
|
+
self.model = model
|
|
154
|
+
self.system_prompt = system_prompt
|
|
155
|
+
self.temperature = temperature
|
|
156
|
+
self.presence_penalty = presence_penalty
|
|
157
|
+
self.frequency_penalty = frequency_penalty
|
|
158
|
+
self.top_p = top_p
|
|
159
|
+
|
|
160
|
+
# Initialize LitAgent for user agent generation
|
|
161
|
+
self.agent = LitAgent()
|
|
162
|
+
|
|
163
|
+
self.headers = {
|
|
164
|
+
"accept": "*/*",
|
|
165
|
+
"accept-language": "en-US,en;q=0.9",
|
|
166
|
+
"content-type": "application/json",
|
|
167
|
+
"origin": "https://ayle.chat/",
|
|
168
|
+
"referer": "https://ayle.chat/",
|
|
169
|
+
"user-agent": self.agent.random(),
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
self.session.headers.update(self.headers)
|
|
173
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
174
|
+
self.session.cookies.update({"session": uuid.uuid4().hex})
|
|
175
|
+
|
|
176
|
+
self.__available_optimizers = (
|
|
177
|
+
method for method in dir(Optimizers)
|
|
178
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
Conversation.intro = (
|
|
182
|
+
AwesomePrompts().get_act(
|
|
183
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
184
|
+
)
|
|
185
|
+
if act
|
|
186
|
+
else intro or Conversation.intro
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
self.conversation = Conversation(
|
|
190
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
191
|
+
)
|
|
192
|
+
self.conversation.history_offset = history_offset
|
|
193
|
+
|
|
194
|
+
self.provider = self._get_provider_from_model(self.model)
|
|
195
|
+
self.model_name = self.model
|
|
196
|
+
|
|
197
|
+
def _get_endpoint(self) -> str:
|
|
198
|
+
"""Get the API endpoint for the current provider."""
|
|
199
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
200
|
+
|
|
201
|
+
def _get_provider_from_model(self, model: str) -> str:
|
|
202
|
+
"""Determine the provider based on the model name."""
|
|
203
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
204
|
+
if model in config["models"]:
|
|
205
|
+
return provider
|
|
206
|
+
|
|
207
|
+
available_models = []
|
|
208
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
209
|
+
for model_name in config["models"]:
|
|
210
|
+
available_models.append(f"{provider}/{model_name}")
|
|
211
|
+
|
|
212
|
+
error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
|
|
213
|
+
raise ValueError(error_msg)
|
|
214
|
+
|
|
215
|
+
@staticmethod
|
|
216
|
+
def _exachat_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
217
|
+
"""Extracts content from ExaChat stream JSON objects."""
|
|
218
|
+
if isinstance(chunk, dict):
|
|
219
|
+
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
220
|
+
return None
|
|
221
|
+
|
|
222
|
+
def _make_request(self, payload: Dict[str, Any]) -> Response: # Change type hint to Response
|
|
223
|
+
"""Make the API request with proper error handling."""
|
|
224
|
+
try:
|
|
225
|
+
response = self.session.post(
|
|
226
|
+
self._get_endpoint(),
|
|
227
|
+
headers=self.headers,
|
|
228
|
+
json=payload,
|
|
229
|
+
timeout=self.timeout, # type: ignore
|
|
230
|
+
stream=True, # Enable streaming for the request
|
|
231
|
+
impersonate="chrome120" # Add impersonate
|
|
232
|
+
)
|
|
233
|
+
response.raise_for_status()
|
|
234
|
+
return response
|
|
235
|
+
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch CurlError and others
|
|
236
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed: {e}") from e
|
|
237
|
+
|
|
238
|
+
def _build_payload(self, conversation_prompt: str) -> Dict[str, Any]:
|
|
239
|
+
"""Build the appropriate payload based on the provider."""
|
|
240
|
+
if self.provider == "exaanswer":
|
|
241
|
+
return {
|
|
242
|
+
"query": conversation_prompt,
|
|
243
|
+
"messages": []
|
|
244
|
+
}
|
|
245
|
+
elif self.provider == "gemini":
|
|
246
|
+
return {
|
|
247
|
+
"query": conversation_prompt,
|
|
248
|
+
"model": self.model,
|
|
249
|
+
"messages": []
|
|
250
|
+
}
|
|
251
|
+
elif self.provider == "cerebras":
|
|
252
|
+
return {
|
|
253
|
+
"query": conversation_prompt,
|
|
254
|
+
"model": self.model,
|
|
255
|
+
"messages": []
|
|
256
|
+
}
|
|
257
|
+
else: # openrouter or groq
|
|
258
|
+
return {
|
|
259
|
+
"query": conversation_prompt + "\n", # Add newline for openrouter and groq models
|
|
260
|
+
"model": self.model,
|
|
261
|
+
"messages": []
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
def ask(
|
|
265
|
+
self,
|
|
266
|
+
prompt: str,
|
|
267
|
+
raw: bool = False,
|
|
268
|
+
optimizer: str = None,
|
|
269
|
+
conversationally: bool = False,
|
|
270
|
+
) -> Dict[str, Any]:
|
|
271
|
+
"""Sends a prompt to the API and returns the response."""
|
|
272
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
273
|
+
if optimizer:
|
|
274
|
+
if optimizer in self.__available_optimizers:
|
|
275
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
276
|
+
conversation_prompt if conversationally else prompt
|
|
277
|
+
)
|
|
278
|
+
else:
|
|
279
|
+
error_msg = f"Optimizer is not one of {self.__available_optimizers}"
|
|
280
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
281
|
+
|
|
282
|
+
payload = self._build_payload(conversation_prompt)
|
|
283
|
+
response = self._make_request(payload)
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
full_response = ""
|
|
287
|
+
# Use sanitize_stream to process the response
|
|
288
|
+
processed_stream = sanitize_stream(
|
|
289
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
290
|
+
intro_value=None, # API doesn't seem to use 'data:' prefix
|
|
291
|
+
to_json=True, # Stream sends JSON lines
|
|
292
|
+
content_extractor=self._exachat_extractor, # Use the specific extractor
|
|
293
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
for content_chunk in processed_stream:
|
|
297
|
+
# content_chunk is the string extracted by _exachat_extractor
|
|
298
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
299
|
+
full_response += content_chunk
|
|
300
|
+
|
|
301
|
+
self.last_response = {"text": full_response}
|
|
302
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
303
|
+
return self.last_response if not raw else full_response # Return dict or raw string
|
|
304
|
+
|
|
305
|
+
except json.JSONDecodeError as e:
|
|
306
|
+
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
307
|
+
|
|
308
|
+
def chat(
|
|
309
|
+
self,
|
|
310
|
+
prompt: str,
|
|
311
|
+
optimizer: str = None,
|
|
312
|
+
conversationally: bool = False,
|
|
313
|
+
) -> str:
|
|
314
|
+
"""Generate response."""
|
|
315
|
+
response = self.ask(
|
|
316
|
+
prompt, optimizer=optimizer, conversationally=conversationally
|
|
317
|
+
)
|
|
318
|
+
return self.get_message(response)
|
|
319
|
+
|
|
320
|
+
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
321
|
+
"""
|
|
322
|
+
Retrieves message from response.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
str: The extracted message text
|
|
329
|
+
"""
|
|
330
|
+
if isinstance(response, dict):
|
|
331
|
+
return response.get("text", "")
|
|
332
|
+
return str(response)
|
|
333
|
+
|
|
334
|
+
if __name__ == "__main__":
|
|
335
|
+
print("-" * 80)
|
|
336
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
337
|
+
print("-" * 80)
|
|
338
|
+
|
|
339
|
+
# Test all available models
|
|
340
|
+
working = 0
|
|
341
|
+
total = len(ExaChat.AVAILABLE_MODELS)
|
|
342
|
+
|
|
343
|
+
for model in ExaChat.AVAILABLE_MODELS:
|
|
344
|
+
try:
|
|
345
|
+
test_ai = ExaChat(model=model, timeout=60)
|
|
346
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
347
|
+
response_text = response
|
|
348
|
+
|
|
349
|
+
if response_text and len(response_text.strip()) > 0:
|
|
350
|
+
status = "✓"
|
|
351
|
+
# Truncate response if too long
|
|
352
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
353
|
+
else:
|
|
354
|
+
status = "✗"
|
|
355
|
+
display_text = "Empty or invalid response"
|
|
356
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
357
|
+
except Exception as e:
|
|
358
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|