webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +180 -78
- webscout/Bing_search.py +417 -0
- webscout/Extra/gguf.py +706 -177
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/genspark_search.py +7 -7
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/GeminiProxy.py +140 -0
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MCPCore.py +78 -75
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
- webscout/Provider/OPENAI/GeminiProxy.py +328 -0
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +32 -29
- webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +17 -1
- webscout/Provider/OPENAI/autoproxy.py +1067 -39
- webscout/Provider/OPENAI/base.py +17 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/e2b.py +0 -1
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/mcpcore.py +109 -70
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/scirachat.py +59 -51
- webscout/Provider/OPENAI/toolbaz.py +3 -9
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OPENAI/xenai.py +514 -0
- webscout/Provider/OPENAI/yep.py +8 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/bing.py +231 -0
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TTS/speechma.py +45 -39
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +350 -0
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/XenAI.py +324 -0
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/deepseek_assistant.py +378 -0
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +55 -0
- webscout/auth/api_key_manager.py +189 -0
- webscout/auth/auth_system.py +100 -0
- webscout/auth/config.py +76 -0
- webscout/auth/database.py +400 -0
- webscout/auth/exceptions.py +67 -0
- webscout/auth/middleware.py +248 -0
- webscout/auth/models.py +130 -0
- webscout/auth/providers.py +279 -0
- webscout/auth/rate_limiter.py +254 -0
- webscout/auth/request_models.py +127 -0
- webscout/auth/request_processing.py +226 -0
- webscout/auth/routes.py +550 -0
- webscout/auth/schemas.py +103 -0
- webscout/auth/server.py +367 -0
- webscout/client.py +121 -70
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/scout/core/scout.py +104 -26
- webscout/scout/element.py +139 -18
- webscout/swiftcli/core/cli.py +14 -3
- webscout/swiftcli/decorators/output.py +59 -9
- webscout/update_checker.py +31 -49
- webscout/version.py +1 -1
- webscout/webscout_search.py +4 -12
- webscout/webscout_search_async.py +3 -10
- webscout/yep_search.py +2 -11
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/OPENAI/api.py +0 -1320
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
|
@@ -201,12 +201,12 @@ class Genspark(AISearch):
|
|
|
201
201
|
json={},
|
|
202
202
|
stream=True,
|
|
203
203
|
timeout=self.timeout,
|
|
204
|
-
) as
|
|
205
|
-
if not
|
|
204
|
+
) as resp:
|
|
205
|
+
if not resp.ok:
|
|
206
206
|
raise exceptions.APIConnectionError(
|
|
207
|
-
f"Failed to generate SearchResponse - ({
|
|
207
|
+
f"Failed to generate SearchResponse - ({resp.status_code}, {resp.reason}) - {resp.text}"
|
|
208
208
|
)
|
|
209
|
-
for line in
|
|
209
|
+
for line in resp.iter_lines(decode_unicode=True):
|
|
210
210
|
if not line or not line.startswith("data: "):
|
|
211
211
|
continue
|
|
212
212
|
try:
|
|
@@ -287,7 +287,7 @@ class Genspark(AISearch):
|
|
|
287
287
|
yield processed_event_payload
|
|
288
288
|
except json.JSONDecodeError:
|
|
289
289
|
continue
|
|
290
|
-
except cloudscraper.exceptions as e:
|
|
290
|
+
except cloudscraper.exceptions.CloudflareException as e:
|
|
291
291
|
raise exceptions.APIConnectionError(f"Request failed due to Cloudscraper issue: {e}")
|
|
292
292
|
except requests.exceptions.RequestException as e:
|
|
293
293
|
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
@@ -315,8 +315,8 @@ if __name__ == "__main__":
|
|
|
315
315
|
from rich import print
|
|
316
316
|
ai = Genspark()
|
|
317
317
|
try:
|
|
318
|
-
|
|
319
|
-
for chunk in
|
|
318
|
+
search_result_stream = ai.search(input(">>> "), stream=True, raw=False)
|
|
319
|
+
for chunk in search_result_stream:
|
|
320
320
|
print(chunk, end="", flush=True)
|
|
321
321
|
except KeyboardInterrupt:
|
|
322
322
|
print("\nSearch interrupted by user.")
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import re
|
|
3
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
4
|
+
from webscout.AIbase import AISearch, SearchResponse
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
from webscout.litagent import LitAgent
|
|
7
|
+
from webscout.AIutel import sanitize_stream
|
|
8
|
+
|
|
9
|
+
class Stellar(AISearch):
|
|
10
|
+
"""AI Search provider for stellar.chatastra.ai"""
|
|
11
|
+
def __init__(self, timeout: int = 30, proxies: Optional[dict] = None):
|
|
12
|
+
self.api_endpoint = "https://stellar.chatastra.ai/search/x1GUVzl"
|
|
13
|
+
self.timeout = timeout
|
|
14
|
+
self.proxies = proxies
|
|
15
|
+
self.session = requests.Session()
|
|
16
|
+
self.headers = {
|
|
17
|
+
"accept": "text/x-component",
|
|
18
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
19
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
20
|
+
"content-type": "multipart/form-data; boundary=----WebKitFormBoundaryQsWD5Qs3QqDkNBPH",
|
|
21
|
+
"dnt": "1",
|
|
22
|
+
"next-action": "efc2643ed9bafe182a010b58ebea17f068ad3985",
|
|
23
|
+
"next-router-state-tree": "%5B%22%22%2C%7B%22children%22%3A%5B%22__PAGE__%22%2C%7B%7D%2C%22%2F%22%2C%22refresh%22%5D%7D%2Cnull%2Cnull%2Ctrue%5D",
|
|
24
|
+
"origin": "https://stellar.chatastra.ai",
|
|
25
|
+
"priority": "u=1, i",
|
|
26
|
+
"referer": "https://stellar.chatastra.ai/search/x1GUVzl",
|
|
27
|
+
"sec-ch-ua": '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
|
|
28
|
+
"sec-ch-ua-mobile": "?0",
|
|
29
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
30
|
+
"sec-fetch-dest": "empty",
|
|
31
|
+
"sec-fetch-mode": "cors",
|
|
32
|
+
"sec-fetch-site": "same-origin",
|
|
33
|
+
"sec-gpc": "1",
|
|
34
|
+
"user-agent": LitAgent().random(),
|
|
35
|
+
"cookie": "__client_uat=0; __client_uat_K90aduOv=0",
|
|
36
|
+
}
|
|
37
|
+
self.session.headers.update(self.headers)
|
|
38
|
+
if proxies:
|
|
39
|
+
self.session.proxies = proxies
|
|
40
|
+
|
|
41
|
+
def _make_payload(self, prompt: str) -> bytes: # This is a static payload for the demo; in production, generate dynamically as needed
|
|
42
|
+
boundary = "----WebKitFormBoundaryQsWD5Qs3QqDkNBPH"
|
|
43
|
+
parts = [
|
|
44
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n{{\"id\":\"71bb616ba5b7cbcac2308fe0c249a9f2d51825b7\",\"bound\":null}}\r\n",
|
|
45
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"2\"\r\n\r\n{{\"id\":\"8bcca1d0cb933b14fefde88dacb2865be3d1d525\",\"bound\":null}}\r\n",
|
|
46
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_input\"\r\n\r\n{prompt}\r\n",
|
|
47
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_id\"\r\n\r\nx1GUVzl\r\n",
|
|
48
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_userId\"\r\n\r\nnull\r\n",
|
|
49
|
+
f"--{boundary}\r\nContent-Disposition: form-data; name=\"0\"\r\n\r\n[{{\"action\":\"$F1\",\"options\":{{\"onSetAIState\":\"$F2\"}}}},{{\"messages\":[],\"chatId\":\"\"}},\"$K3\"]\r\n",
|
|
50
|
+
f"--{boundary}--\r\n"
|
|
51
|
+
]
|
|
52
|
+
return "".join(parts).encode("utf-8")
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def _stellar_extractor(chunk: Union[str, bytes, Dict[str, Any]]) -> Optional[str]:
|
|
56
|
+
"""Extracts content from the Stellar stream format with hex keys and diff arrays. Handles both str and bytes input."""
|
|
57
|
+
if isinstance(chunk, bytes):
|
|
58
|
+
try:
|
|
59
|
+
chunk = chunk.decode('utf-8', errors='replace')
|
|
60
|
+
except Exception:
|
|
61
|
+
return None
|
|
62
|
+
if not isinstance(chunk, str):
|
|
63
|
+
return None
|
|
64
|
+
# Match patterns like 6e:{"diff":[0," empathy"],"next":"$@6f"}
|
|
65
|
+
pattern = r'[0-9a-f]+:\{"diff":\[0,"([^"\\]*)"\]'
|
|
66
|
+
matches = re.findall(pattern, chunk)
|
|
67
|
+
if matches:
|
|
68
|
+
extracted_text = ''.join(matches)
|
|
69
|
+
# Fix escaped newlines
|
|
70
|
+
extracted_text = extracted_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
71
|
+
return extracted_text if extracted_text.strip() else None
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
def search(self, prompt: str, stream: bool = False, raw: bool = False) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse, str], None, None]]:
|
|
75
|
+
payload = self._make_payload(prompt)
|
|
76
|
+
try:
|
|
77
|
+
response = self.session.post(
|
|
78
|
+
self.api_endpoint,
|
|
79
|
+
data=payload,
|
|
80
|
+
timeout=self.timeout,
|
|
81
|
+
proxies=self.proxies,
|
|
82
|
+
stream=stream,
|
|
83
|
+
)
|
|
84
|
+
if not response.ok:
|
|
85
|
+
raise exceptions.APIConnectionError(f"Failed to get response: {response.status_code} {response.text}")
|
|
86
|
+
|
|
87
|
+
def _yield_stream():
|
|
88
|
+
# Use sanitize_stream for real-time extraction from the response iterator
|
|
89
|
+
processed_stream = sanitize_stream(
|
|
90
|
+
data=response.iter_lines(decode_unicode=True),
|
|
91
|
+
intro_value=None,
|
|
92
|
+
to_json=False,
|
|
93
|
+
content_extractor=self._stellar_extractor
|
|
94
|
+
)
|
|
95
|
+
full_response = ""
|
|
96
|
+
for content in processed_stream:
|
|
97
|
+
if content and isinstance(content, str):
|
|
98
|
+
full_response += content
|
|
99
|
+
if raw:
|
|
100
|
+
yield {"text": content}
|
|
101
|
+
else:
|
|
102
|
+
yield content
|
|
103
|
+
# Do NOT yield SearchResponse(full_response) in streaming mode to avoid duplicate output
|
|
104
|
+
|
|
105
|
+
if stream:
|
|
106
|
+
return _yield_stream()
|
|
107
|
+
else:
|
|
108
|
+
# Use sanitize_stream for the full response text
|
|
109
|
+
processed_stream = sanitize_stream(
|
|
110
|
+
data=response.text.splitlines(),
|
|
111
|
+
intro_value=None,
|
|
112
|
+
to_json=False,
|
|
113
|
+
content_extractor=self._stellar_extractor
|
|
114
|
+
)
|
|
115
|
+
full_response = ""
|
|
116
|
+
for content in processed_stream:
|
|
117
|
+
if content and isinstance(content, str):
|
|
118
|
+
full_response += content
|
|
119
|
+
if raw:
|
|
120
|
+
return {"text": full_response}
|
|
121
|
+
else:
|
|
122
|
+
return SearchResponse(full_response)
|
|
123
|
+
except requests.RequestException as e:
|
|
124
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
125
|
+
|
|
126
|
+
if __name__ == "__main__":
|
|
127
|
+
from rich import print
|
|
128
|
+
ai = Stellar()
|
|
129
|
+
user_query = input(">>> ")
|
|
130
|
+
response = ai.search(user_query, stream=True, raw=False)
|
|
131
|
+
for chunk in response:
|
|
132
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/ExaChat.py
CHANGED
|
@@ -2,7 +2,7 @@ from curl_cffi import CurlError
|
|
|
2
2
|
from curl_cffi.requests import Session, Response # Import Response
|
|
3
3
|
import json
|
|
4
4
|
import uuid
|
|
5
|
-
from typing import Any, Dict, Union, Optional, List
|
|
5
|
+
from typing import Any, Dict, Union, Optional, List, Generator
|
|
6
6
|
from datetime import datetime
|
|
7
7
|
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
8
|
from webscout.AIbase import Provider
|
|
@@ -264,10 +264,11 @@ class ExaChat(Provider):
|
|
|
264
264
|
def ask(
|
|
265
265
|
self,
|
|
266
266
|
prompt: str,
|
|
267
|
+
stream: bool = False,
|
|
267
268
|
raw: bool = False,
|
|
268
269
|
optimizer: str = None,
|
|
269
270
|
conversationally: bool = False,
|
|
270
|
-
) -> Dict[str, Any]:
|
|
271
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
271
272
|
"""Sends a prompt to the API and returns the response."""
|
|
272
273
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
273
274
|
if optimizer:
|
|
@@ -281,78 +282,103 @@ class ExaChat(Provider):
|
|
|
281
282
|
|
|
282
283
|
payload = self._build_payload(conversation_prompt)
|
|
283
284
|
response = self._make_request(payload)
|
|
284
|
-
|
|
285
|
-
|
|
285
|
+
processed_stream = sanitize_stream(
|
|
286
|
+
data=response.iter_content(chunk_size=None),
|
|
287
|
+
intro_value=None,
|
|
288
|
+
to_json=True,
|
|
289
|
+
content_extractor=self._exachat_extractor,
|
|
290
|
+
yield_raw_on_error=False,
|
|
291
|
+
raw=raw
|
|
292
|
+
)
|
|
293
|
+
if stream:
|
|
294
|
+
streaming_text = ""
|
|
295
|
+
for content_chunk in processed_stream:
|
|
296
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
297
|
+
content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
|
|
298
|
+
if raw:
|
|
299
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
300
|
+
streaming_text += content_chunk
|
|
301
|
+
yield content_chunk
|
|
302
|
+
else:
|
|
303
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
304
|
+
streaming_text += content_chunk
|
|
305
|
+
yield dict(text=content_chunk)
|
|
306
|
+
self.last_response = {"text": streaming_text}
|
|
307
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
308
|
+
else:
|
|
286
309
|
full_response = ""
|
|
287
|
-
# Use sanitize_stream to process the response
|
|
288
|
-
processed_stream = sanitize_stream(
|
|
289
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
290
|
-
intro_value=None, # API doesn't seem to use 'data:' prefix
|
|
291
|
-
to_json=True, # Stream sends JSON lines
|
|
292
|
-
content_extractor=self._exachat_extractor, # Use the specific extractor
|
|
293
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
294
|
-
)
|
|
295
|
-
|
|
296
310
|
for content_chunk in processed_stream:
|
|
297
|
-
# content_chunk is the string extracted by _exachat_extractor
|
|
298
311
|
if content_chunk and isinstance(content_chunk, str):
|
|
299
|
-
|
|
300
|
-
|
|
312
|
+
content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
|
|
313
|
+
if raw:
|
|
314
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
315
|
+
full_response += content_chunk
|
|
316
|
+
else:
|
|
317
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
318
|
+
full_response += content_chunk
|
|
301
319
|
self.last_response = {"text": full_response}
|
|
302
320
|
self.conversation.update_chat_history(prompt, full_response)
|
|
303
|
-
return self.last_response if not raw else full_response
|
|
304
|
-
|
|
305
|
-
except json.JSONDecodeError as e:
|
|
306
|
-
raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
|
|
321
|
+
return self.last_response if not raw else full_response
|
|
307
322
|
|
|
308
323
|
def chat(
|
|
309
324
|
self,
|
|
310
325
|
prompt: str,
|
|
326
|
+
stream: bool = False,
|
|
311
327
|
optimizer: str = None,
|
|
312
328
|
conversationally: bool = False,
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
329
|
+
raw: bool = False,
|
|
330
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
331
|
+
def for_stream():
|
|
332
|
+
for response in self.ask(
|
|
333
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
334
|
+
):
|
|
335
|
+
if raw:
|
|
336
|
+
yield response
|
|
337
|
+
else:
|
|
338
|
+
yield self.get_message(response)
|
|
339
|
+
def for_non_stream():
|
|
340
|
+
result = self.ask(
|
|
341
|
+
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
342
|
+
)
|
|
343
|
+
if raw:
|
|
344
|
+
return result if isinstance(result, str) else str(result)
|
|
345
|
+
return self.get_message(result)
|
|
346
|
+
return for_stream() if stream else for_non_stream()
|
|
319
347
|
|
|
320
348
|
def get_message(self, response: Union[Dict[str, Any], str]) -> str:
|
|
321
|
-
"""
|
|
322
|
-
Retrieves message from response.
|
|
323
|
-
|
|
324
|
-
Args:
|
|
325
|
-
response (Union[Dict[str, Any], str]): The response to extract the message from
|
|
326
|
-
|
|
327
|
-
Returns:
|
|
328
|
-
str: The extracted message text
|
|
329
|
-
"""
|
|
330
349
|
if isinstance(response, dict):
|
|
331
|
-
|
|
332
|
-
|
|
350
|
+
text = response.get("text", "")
|
|
351
|
+
else:
|
|
352
|
+
text = str(response)
|
|
353
|
+
return text.replace('\\\\', '\\').replace('\\"', '"')
|
|
333
354
|
|
|
334
355
|
if __name__ == "__main__":
|
|
335
|
-
print("-" * 80)
|
|
336
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
337
|
-
print("-" * 80)
|
|
356
|
+
# print("-" * 80)
|
|
357
|
+
# print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
358
|
+
# print("-" * 80)
|
|
338
359
|
|
|
339
|
-
# Test all available models
|
|
340
|
-
working = 0
|
|
341
|
-
total = len(ExaChat.AVAILABLE_MODELS)
|
|
360
|
+
# # Test all available models
|
|
361
|
+
# working = 0
|
|
362
|
+
# total = len(ExaChat.AVAILABLE_MODELS)
|
|
342
363
|
|
|
343
|
-
for model in ExaChat.AVAILABLE_MODELS:
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
364
|
+
# for model in ExaChat.AVAILABLE_MODELS:
|
|
365
|
+
# try:
|
|
366
|
+
# test_ai = ExaChat(model=model, timeout=60)
|
|
367
|
+
# response = test_ai.chat("Say 'Hello' in one word")
|
|
368
|
+
# response_text = response
|
|
348
369
|
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
370
|
+
# if response_text and len(response_text.strip()) > 0:
|
|
371
|
+
# status = "✓"
|
|
372
|
+
# # Truncate response if too long
|
|
373
|
+
# display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
374
|
+
# else:
|
|
375
|
+
# status = "✗"
|
|
376
|
+
# display_text = "Empty or invalid response"
|
|
377
|
+
# print(f"{model:<50} {status:<10} {display_text}")
|
|
378
|
+
# except Exception as e:
|
|
379
|
+
# print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
380
|
+
from rich import print
|
|
381
|
+
ai = ExaChat(model="gemini-2.0-flash")
|
|
382
|
+
response = ai.chat("tell me a joke", stream=True, raw=False)
|
|
383
|
+
for chunk in response:
|
|
384
|
+
print(chunk, end='', flush=True)
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Union, Generator
|
|
2
|
+
import requests
|
|
3
|
+
import base64
|
|
4
|
+
from webscout.litagent import LitAgent
|
|
5
|
+
from webscout.AIutel import Optimizers, AwesomePrompts
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
|
|
10
|
+
class GeminiProxy(Provider):
|
|
11
|
+
"""
|
|
12
|
+
GeminiProxy is a provider class for interacting with the Gemini API via a proxy endpoint.
|
|
13
|
+
"""
|
|
14
|
+
AVAILABLE_MODELS = [
|
|
15
|
+
"gemini-2.0-flash-lite",
|
|
16
|
+
"gemini-2.0-flash",
|
|
17
|
+
"gemini-2.5-pro-preview-06-05",
|
|
18
|
+
"gemini-2.5-pro-preview-05-06",
|
|
19
|
+
"gemini-2.5-flash-preview-04-17",
|
|
20
|
+
"gemini-2.5-flash-preview-05-20",
|
|
21
|
+
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
is_conversation: bool = True,
|
|
27
|
+
max_tokens: int = 2048,
|
|
28
|
+
timeout: int = 30,
|
|
29
|
+
intro: str = None,
|
|
30
|
+
filepath: str = None,
|
|
31
|
+
update_file: bool = True,
|
|
32
|
+
proxies: dict = {},
|
|
33
|
+
history_offset: int = 10250,
|
|
34
|
+
act: str = None,
|
|
35
|
+
model: str = "gemini-2.0-flash-lite",
|
|
36
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
37
|
+
browser: str = "chrome"
|
|
38
|
+
):
|
|
39
|
+
if model not in self.AVAILABLE_MODELS:
|
|
40
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
41
|
+
self.base_url = "https://us-central1-infinite-chain-295909.cloudfunctions.net/gemini-proxy-staging-v1"
|
|
42
|
+
self.agent = LitAgent()
|
|
43
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
44
|
+
self.headers = self.fingerprint.copy()
|
|
45
|
+
self.session = requests.Session()
|
|
46
|
+
self.session.headers.update(self.headers)
|
|
47
|
+
self.session.proxies.update(proxies)
|
|
48
|
+
self.is_conversation = is_conversation
|
|
49
|
+
self.max_tokens_to_sample = max_tokens
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.last_response = {}
|
|
52
|
+
self.model = model
|
|
53
|
+
self.system_prompt = system_prompt
|
|
54
|
+
self.__available_optimizers = (
|
|
55
|
+
method
|
|
56
|
+
for method in dir(Optimizers)
|
|
57
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
58
|
+
)
|
|
59
|
+
Conversation.intro = (
|
|
60
|
+
AwesomePrompts().get_act(
|
|
61
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
62
|
+
)
|
|
63
|
+
if act
|
|
64
|
+
else intro or Conversation.intro
|
|
65
|
+
)
|
|
66
|
+
self.conversation = Conversation(
|
|
67
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
68
|
+
)
|
|
69
|
+
self.conversation.history_offset = history_offset
|
|
70
|
+
|
|
71
|
+
def get_image(self, img_url):
|
|
72
|
+
try:
|
|
73
|
+
response = self.session.get(img_url, stream=True, timeout=self.timeout)
|
|
74
|
+
response.raise_for_status()
|
|
75
|
+
mime_type = response.headers.get("content-type", "application/octet-stream")
|
|
76
|
+
data = base64.b64encode(response.content).decode("utf-8")
|
|
77
|
+
return {"mime_type": mime_type, "data": data}
|
|
78
|
+
except Exception as e:
|
|
79
|
+
raise exceptions.FailedToGenerateResponseError(f"Error fetching image: {e}")
|
|
80
|
+
|
|
81
|
+
def ask(
|
|
82
|
+
self,
|
|
83
|
+
prompt: str,
|
|
84
|
+
stream: bool = False,
|
|
85
|
+
raw: bool = False,
|
|
86
|
+
optimizer: str = None,
|
|
87
|
+
conversationally: bool = False,
|
|
88
|
+
img_url: Optional[str] = None,
|
|
89
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
90
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
91
|
+
if optimizer:
|
|
92
|
+
if optimizer in self.__available_optimizers:
|
|
93
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
94
|
+
conversation_prompt if conversationally else prompt
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
98
|
+
parts = []
|
|
99
|
+
if img_url:
|
|
100
|
+
parts.append({"inline_data": self.get_image(img_url)})
|
|
101
|
+
parts.append({"text": conversation_prompt})
|
|
102
|
+
request_data = {
|
|
103
|
+
"model": self.model,
|
|
104
|
+
"contents": [{"parts": parts}]
|
|
105
|
+
}
|
|
106
|
+
def for_non_stream():
|
|
107
|
+
try:
|
|
108
|
+
response = self.session.post(self.base_url, json=request_data, headers=self.headers, timeout=self.timeout)
|
|
109
|
+
response.raise_for_status()
|
|
110
|
+
data = response.json()
|
|
111
|
+
self.last_response = data
|
|
112
|
+
self.conversation.update_chat_history(prompt, self.get_message(data))
|
|
113
|
+
return data
|
|
114
|
+
except Exception as e:
|
|
115
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during chat request: {e}")
|
|
116
|
+
# Gemini proxy does not support streaming, so only non-stream
|
|
117
|
+
return for_non_stream()
|
|
118
|
+
|
|
119
|
+
def chat(
|
|
120
|
+
self,
|
|
121
|
+
prompt: str,
|
|
122
|
+
stream: bool = False,
|
|
123
|
+
optimizer: str = None,
|
|
124
|
+
conversationally: bool = False,
|
|
125
|
+
img_url: Optional[str] = None,
|
|
126
|
+
) -> str:
|
|
127
|
+
data = self.ask(prompt, stream=stream, optimizer=optimizer, conversationally=conversationally, img_url=img_url)
|
|
128
|
+
return self.get_message(data)
|
|
129
|
+
|
|
130
|
+
def get_message(self, response: dict) -> str:
|
|
131
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
132
|
+
try:
|
|
133
|
+
return response['candidates'][0]['content']['parts'][0]['text']
|
|
134
|
+
except Exception:
|
|
135
|
+
return str(response)
|
|
136
|
+
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
ai = GeminiProxy(timeout=30, model="gemini-2.5-flash-preview-05-20")
|
|
139
|
+
response = ai.chat("write a poem about AI")
|
|
140
|
+
print(response)
|