webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIutel.py +240 -344
- webscout/Extra/autocoder/autocoder.py +66 -5
- webscout/Extra/gguf.py +2 -0
- webscout/Provider/AISEARCH/scira_search.py +3 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +64 -67
- webscout/Provider/ChatGPTClone.py +33 -34
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +69 -56
- webscout/Provider/ElectronHub.py +48 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +24 -18
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +285 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +48 -20
- webscout/Provider/HeckAI.py +18 -36
- webscout/Provider/Jadve.py +30 -37
- webscout/Provider/LambdaChat.py +36 -59
- webscout/Provider/MCPCore.py +18 -21
- webscout/Provider/Marcus.py +23 -14
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +35 -26
- webscout/Provider/OPENAI/__init__.py +1 -1
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/scirachat.py +3 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/PI.py +22 -13
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +24 -12
- webscout/Provider/TextPollinationsAI.py +78 -76
- webscout/Provider/TwoAI.py +120 -88
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +24 -22
- webscout/Provider/VercelAI.py +31 -12
- webscout/Provider/WiseCat.py +1 -1
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/__init__.py +11 -13
- webscout/Provider/ai4chat.py +5 -3
- webscout/Provider/akashgpt.py +59 -66
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/elmo.py +38 -32
- webscout/Provider/freeaichat.py +57 -43
- webscout/Provider/granite.py +24 -21
- webscout/Provider/hermes.py +27 -20
- webscout/Provider/learnfastai.py +25 -20
- webscout/Provider/llmchatco.py +48 -78
- webscout/Provider/multichat.py +13 -3
- webscout/Provider/scira_chat.py +50 -30
- webscout/Provider/scnet.py +27 -21
- webscout/Provider/searchchat.py +16 -24
- webscout/Provider/sonus.py +37 -39
- webscout/Provider/toolbaz.py +24 -46
- webscout/Provider/turboseek.py +37 -41
- webscout/Provider/typefully.py +30 -22
- webscout/Provider/typegpt.py +47 -51
- webscout/Provider/uncovr.py +46 -40
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +305 -448
- webscout/exceptions.py +3 -0
- webscout/swiftcli/__init__.py +80 -794
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
- webscout-8.2.6.dist-info/entry_points.txt +3 -0
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- inferno/lol.py +0 -589
- webscout/LLM.py +0 -442
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/PizzaGPT.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/tutorai.py +0 -270
- webscout-8.2.4.dist-info/entry_points.txt +0 -5
- {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/GithubChat.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session
|
|
2
3
|
import json
|
|
3
4
|
import time
|
|
4
5
|
from typing import Any, Dict, List, Optional, Union, Generator
|
|
5
6
|
|
|
6
7
|
from webscout.AIutel import Conversation
|
|
7
8
|
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import AwesomePrompts
|
|
9
|
-
from webscout.AIbase import Provider
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
10
11
|
from webscout import exceptions
|
|
11
12
|
from webscout.litagent import LitAgent
|
|
12
13
|
|
|
@@ -54,7 +55,7 @@ class GithubChat(Provider):
|
|
|
54
55
|
self.url = "https://github.com/copilot"
|
|
55
56
|
self.api_url = "https://api.individual.githubcopilot.com"
|
|
56
57
|
self.cookie_path = cookie_path
|
|
57
|
-
self.session =
|
|
58
|
+
self.session = Session() # Use curl_cffi Session
|
|
58
59
|
self.session.proxies.update(proxies)
|
|
59
60
|
|
|
60
61
|
# Load cookies for authentication
|
|
@@ -158,8 +159,15 @@ class GithubChat(Provider):
|
|
|
158
159
|
|
|
159
160
|
return self._access_token
|
|
160
161
|
|
|
161
|
-
except
|
|
162
|
-
|
|
162
|
+
except:
|
|
163
|
+
pass
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def _github_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
167
|
+
"""Extracts content from GitHub Copilot stream JSON objects."""
|
|
168
|
+
if isinstance(chunk, dict) and chunk.get("type") == "content":
|
|
169
|
+
return chunk.get("body")
|
|
170
|
+
return None
|
|
163
171
|
|
|
164
172
|
def create_conversation(self):
|
|
165
173
|
"""Create a new conversation with GitHub Copilot."""
|
|
@@ -173,7 +181,10 @@ class GithubChat(Provider):
|
|
|
173
181
|
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
174
182
|
|
|
175
183
|
try:
|
|
176
|
-
response = self.session.post(
|
|
184
|
+
response = self.session.post(
|
|
185
|
+
url, headers=headers,
|
|
186
|
+
impersonate="chrome120" # Add impersonate
|
|
187
|
+
)
|
|
177
188
|
|
|
178
189
|
if response.status_code == 401:
|
|
179
190
|
# Token might be expired, try refreshing
|
|
@@ -181,7 +192,10 @@ class GithubChat(Provider):
|
|
|
181
192
|
access_token = self.get_access_token()
|
|
182
193
|
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
183
194
|
response = self.session.post(url, headers=headers)
|
|
184
|
-
|
|
195
|
+
|
|
196
|
+
# Check status after potential retry
|
|
197
|
+
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
|
198
|
+
|
|
185
199
|
if response.status_code not in [200, 201]:
|
|
186
200
|
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation: {response.status_code}")
|
|
187
201
|
|
|
@@ -192,40 +206,8 @@ class GithubChat(Provider):
|
|
|
192
206
|
raise exceptions.FailedToGenerateResponseError("Failed to extract conversation ID from response")
|
|
193
207
|
|
|
194
208
|
return self._conversation_id
|
|
195
|
-
|
|
196
|
-
except requests.exceptions.RequestException as e:
|
|
209
|
+
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch CurlError and others
|
|
197
210
|
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation: {str(e)}")
|
|
198
|
-
|
|
199
|
-
def process_response(self, response, prompt: str):
|
|
200
|
-
"""Process streaming response and extract content."""
|
|
201
|
-
full_text = ""
|
|
202
|
-
|
|
203
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
204
|
-
if not line or not line.startswith("data: "):
|
|
205
|
-
continue
|
|
206
|
-
|
|
207
|
-
try:
|
|
208
|
-
# Parse each line (remove "data: " prefix)
|
|
209
|
-
json_str = line[6:]
|
|
210
|
-
if json_str == "[DONE]":
|
|
211
|
-
break
|
|
212
|
-
|
|
213
|
-
data = json.loads(json_str)
|
|
214
|
-
|
|
215
|
-
# Handle different response types
|
|
216
|
-
if data.get("type") == "content":
|
|
217
|
-
token = data.get("body", "")
|
|
218
|
-
full_text += token
|
|
219
|
-
resp = {"text": token}
|
|
220
|
-
yield resp
|
|
221
|
-
|
|
222
|
-
except json.JSONDecodeError:
|
|
223
|
-
continue
|
|
224
|
-
|
|
225
|
-
# Update conversation history only for saving to file if needed
|
|
226
|
-
if full_text:
|
|
227
|
-
self.last_response = {"text": full_text}
|
|
228
|
-
self.conversation.update_chat_history(prompt, full_text)
|
|
229
211
|
|
|
230
212
|
def ask(
|
|
231
213
|
self,
|
|
@@ -275,12 +257,14 @@ class GithubChat(Provider):
|
|
|
275
257
|
"mode": "immersive"
|
|
276
258
|
}
|
|
277
259
|
|
|
260
|
+
streaming_text = "" # Initialize for history update
|
|
278
261
|
def for_stream():
|
|
262
|
+
nonlocal streaming_text # Allow modification of outer scope variable
|
|
279
263
|
try:
|
|
280
264
|
response = self.session.post(
|
|
281
265
|
url,
|
|
282
266
|
json=request_data,
|
|
283
|
-
headers=headers,
|
|
267
|
+
headers=headers, # Use updated headers with Authorization
|
|
284
268
|
stream=True,
|
|
285
269
|
timeout=self.timeout
|
|
286
270
|
)
|
|
@@ -292,21 +276,35 @@ class GithubChat(Provider):
|
|
|
292
276
|
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
293
277
|
response = self.session.post(
|
|
294
278
|
url,
|
|
295
|
-
json=request_data,
|
|
279
|
+
json=request_data, # Use original payload
|
|
296
280
|
headers=headers,
|
|
297
281
|
stream=True,
|
|
298
282
|
timeout=self.timeout
|
|
299
283
|
)
|
|
300
284
|
|
|
301
285
|
# If still not successful, raise exception
|
|
302
|
-
|
|
303
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
|
|
286
|
+
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
|
304
287
|
|
|
305
288
|
# Process the streaming response
|
|
306
|
-
|
|
289
|
+
# Use sanitize_stream
|
|
290
|
+
processed_stream = sanitize_stream(
|
|
291
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
292
|
+
intro_value="data:",
|
|
293
|
+
to_json=True, # Stream sends JSON
|
|
294
|
+
skip_markers=["[DONE]"],
|
|
295
|
+
content_extractor=self._github_extractor, # Use the specific extractor
|
|
296
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
for content_chunk in processed_stream:
|
|
300
|
+
# content_chunk is the string extracted by _github_extractor
|
|
301
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
302
|
+
streaming_text += content_chunk
|
|
303
|
+
resp = {"text": content_chunk}
|
|
304
|
+
yield resp if not raw else content_chunk
|
|
307
305
|
|
|
308
306
|
except Exception as e:
|
|
309
|
-
if isinstance(e,
|
|
307
|
+
if isinstance(e, CurlError): # Check for CurlError
|
|
310
308
|
if hasattr(e, 'response') and e.response is not None:
|
|
311
309
|
status_code = e.response.status_code
|
|
312
310
|
if status_code == 401:
|
|
@@ -314,13 +312,18 @@ class GithubChat(Provider):
|
|
|
314
312
|
|
|
315
313
|
# If anything else fails
|
|
316
314
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
315
|
+
finally:
|
|
316
|
+
# Update history after stream finishes or fails (if text was generated)
|
|
317
|
+
if streaming_text:
|
|
318
|
+
self.last_response = {"text": streaming_text}
|
|
319
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
317
320
|
|
|
318
321
|
def for_non_stream():
|
|
319
322
|
response_text = ""
|
|
320
323
|
for response in for_stream():
|
|
321
324
|
if "text" in response:
|
|
322
325
|
response_text += response["text"]
|
|
323
|
-
self.last_response
|
|
326
|
+
# self.last_response and history are updated in for_stream's finally block
|
|
324
327
|
return self.last_response
|
|
325
328
|
|
|
326
329
|
return for_stream() if stream else for_non_stream()
|
|
@@ -358,7 +361,7 @@ if __name__ == "__main__":
|
|
|
358
361
|
from rich import print
|
|
359
362
|
|
|
360
363
|
try:
|
|
361
|
-
ai = GithubChat()
|
|
364
|
+
ai = GithubChat("cookies.json")
|
|
362
365
|
response = ai.chat("Python code to count r in strawberry", stream=True)
|
|
363
366
|
for chunk in response:
|
|
364
367
|
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import base64
|
|
3
|
+
import random
|
|
4
|
+
import json
|
|
5
|
+
from typing import Union, Dict, Any, Optional
|
|
6
|
+
from urllib import response
|
|
7
|
+
|
|
8
|
+
from curl_cffi import CurlError
|
|
9
|
+
from curl_cffi.requests import Session
|
|
10
|
+
from curl_cffi.const import CurlHttpVersion
|
|
11
|
+
|
|
12
|
+
from webscout.AIutel import Optimizers
|
|
13
|
+
from webscout.AIutel import Conversation
|
|
14
|
+
from webscout.AIutel import AwesomePrompts
|
|
15
|
+
from webscout.AIbase import Provider
|
|
16
|
+
from webscout import exceptions
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
|
|
19
|
+
class GizAI(Provider):
|
|
20
|
+
"""
|
|
21
|
+
A class to interact with the GizAI API.
|
|
22
|
+
|
|
23
|
+
Attributes:
|
|
24
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
>>> from webscout.Provider.GizAI import GizAI
|
|
28
|
+
>>> ai = GizAI()
|
|
29
|
+
>>> response = ai.chat("What's the weather today?")
|
|
30
|
+
>>> print(response)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
AVAILABLE_MODELS = [
|
|
34
|
+
"azure-gpt-4-1",
|
|
35
|
+
"chat-gpt4",
|
|
36
|
+
"chat-grok-2",
|
|
37
|
+
"chat-o4-mini",
|
|
38
|
+
"chat-o4-mini-high",
|
|
39
|
+
"chat-o4-mini-medium",
|
|
40
|
+
"claude-haiku",
|
|
41
|
+
"claude-sonnet",
|
|
42
|
+
"deepinfra-llama-4-maverick",
|
|
43
|
+
"deepseek",
|
|
44
|
+
"deepseek-r1-distill-llama-70b",
|
|
45
|
+
"gemini-2.0-flash-lite",
|
|
46
|
+
"gemini-2.5-flash",
|
|
47
|
+
"gemini-2.5-pro",
|
|
48
|
+
"gpt-4-1-mini",
|
|
49
|
+
"gpt-4-1-nano",
|
|
50
|
+
"gpt-4o-image",
|
|
51
|
+
"hyperbolic-deepseek-r1",
|
|
52
|
+
"llama-3-70b",
|
|
53
|
+
"llama-4-scout",
|
|
54
|
+
"o3",
|
|
55
|
+
"phi-4",
|
|
56
|
+
"qwq-32b"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
is_conversation: bool = True,
|
|
62
|
+
max_tokens: int = 2049,
|
|
63
|
+
timeout: int = 30,
|
|
64
|
+
intro: str = None,
|
|
65
|
+
filepath: str = None,
|
|
66
|
+
update_file: bool = True,
|
|
67
|
+
proxies: dict = {},
|
|
68
|
+
history_offset: int = 10250,
|
|
69
|
+
act: str = None,
|
|
70
|
+
model: str = "gemini-2.0-flash-lite",
|
|
71
|
+
system_prompt: str = "You are a helpful assistant."
|
|
72
|
+
):
|
|
73
|
+
"""Initializes the GizAI API client."""
|
|
74
|
+
if model not in self.AVAILABLE_MODELS:
|
|
75
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
76
|
+
|
|
77
|
+
self.api_url = "https://app.giz.ai/api/data/users/inferenceServer.infer"
|
|
78
|
+
|
|
79
|
+
# Initialize LitAgent for user-agent generation
|
|
80
|
+
self.agent = LitAgent()
|
|
81
|
+
|
|
82
|
+
# Initialize curl_cffi Session
|
|
83
|
+
self.session = Session()
|
|
84
|
+
|
|
85
|
+
# Set up the headers
|
|
86
|
+
self.headers = {
|
|
87
|
+
"accept": "application/json, text/plain, */*",
|
|
88
|
+
"content-type": "application/json",
|
|
89
|
+
"user-agent": self.agent.random(),
|
|
90
|
+
"origin": "https://app.giz.ai",
|
|
91
|
+
"referer": "https://app.giz.ai/",
|
|
92
|
+
"sec-fetch-dest": "empty",
|
|
93
|
+
"sec-fetch-mode": "cors",
|
|
94
|
+
"sec-fetch-site": "same-origin"
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
# Update session headers and proxies
|
|
98
|
+
self.session.headers.update(self.headers)
|
|
99
|
+
self.session.proxies = proxies
|
|
100
|
+
|
|
101
|
+
# Store configuration
|
|
102
|
+
self.system_prompt = system_prompt
|
|
103
|
+
self.is_conversation = is_conversation
|
|
104
|
+
self.max_tokens_to_sample = max_tokens
|
|
105
|
+
self.timeout = timeout
|
|
106
|
+
self.last_response = {}
|
|
107
|
+
self.model = model
|
|
108
|
+
|
|
109
|
+
self.__available_optimizers = (
|
|
110
|
+
method
|
|
111
|
+
for method in dir(Optimizers)
|
|
112
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
Conversation.intro = (
|
|
116
|
+
AwesomePrompts().get_act(
|
|
117
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
118
|
+
)
|
|
119
|
+
if act
|
|
120
|
+
else intro or Conversation.intro
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
self.conversation = Conversation(
|
|
124
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
125
|
+
)
|
|
126
|
+
self.conversation.history_offset = history_offset
|
|
127
|
+
|
|
128
|
+
def _generate_id(self, length: int = 21) -> str:
|
|
129
|
+
"""Generates a random URL-safe base64 string."""
|
|
130
|
+
random_bytes = os.urandom(length * 2) # Generate more bytes initially
|
|
131
|
+
b64_encoded = base64.urlsafe_b64encode(random_bytes).decode('utf-8')
|
|
132
|
+
return b64_encoded[:length]
|
|
133
|
+
|
|
134
|
+
def _get_random_ip(self) -> str:
|
|
135
|
+
"""Generates a random IPv4 address string."""
|
|
136
|
+
return f"{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
|
137
|
+
|
|
138
|
+
def ask(
|
|
139
|
+
self,
|
|
140
|
+
prompt: str,
|
|
141
|
+
stream: bool = False, # Parameter kept for compatibility but not used
|
|
142
|
+
raw: bool = False,
|
|
143
|
+
optimizer: str = None,
|
|
144
|
+
conversationally: bool = False,
|
|
145
|
+
) -> Dict[str, Any]:
|
|
146
|
+
"""
|
|
147
|
+
Sends a prompt to the GizAI API and returns the response.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
prompt (str): The prompt to send to the API.
|
|
151
|
+
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
152
|
+
raw (bool): Whether to return the raw response.
|
|
153
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
154
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
Dict[str, Any]: The API response.
|
|
158
|
+
|
|
159
|
+
Examples:
|
|
160
|
+
>>> ai = GizAI()
|
|
161
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
162
|
+
"""
|
|
163
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
164
|
+
if optimizer:
|
|
165
|
+
if optimizer in self.__available_optimizers:
|
|
166
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
167
|
+
conversation_prompt if conversationally else prompt
|
|
168
|
+
)
|
|
169
|
+
else:
|
|
170
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
171
|
+
|
|
172
|
+
# Generate random IDs for request
|
|
173
|
+
instance_id = self._generate_id()
|
|
174
|
+
subscribe_id = self._generate_id()
|
|
175
|
+
x_forwarded_for = self._get_random_ip()
|
|
176
|
+
|
|
177
|
+
# Set up request body - GizAI doesn't support streaming
|
|
178
|
+
request_body = {
|
|
179
|
+
"model": "chat",
|
|
180
|
+
"baseModel": self.model, # Use the specific model ID here
|
|
181
|
+
"input": {
|
|
182
|
+
"messages": [{
|
|
183
|
+
"type": "human",
|
|
184
|
+
"content": conversation_prompt
|
|
185
|
+
}],
|
|
186
|
+
"mode": "plan"
|
|
187
|
+
},
|
|
188
|
+
"noStream": True,
|
|
189
|
+
"instanceId": instance_id,
|
|
190
|
+
"subscribeId": subscribe_id
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
# Combine default headers with the dynamic x-forwarded-for header
|
|
194
|
+
request_headers = {**self.headers, "x-forwarded-for": x_forwarded_for}
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
# Use curl_cffi session post with impersonate
|
|
198
|
+
response = self.session.post(
|
|
199
|
+
self.api_url,
|
|
200
|
+
headers=request_headers,
|
|
201
|
+
json=request_body,
|
|
202
|
+
timeout=self.timeout,
|
|
203
|
+
impersonate="chrome120", # Use a common impersonation profile
|
|
204
|
+
http_version=CurlHttpVersion.V2_0 # Use HTTP/2
|
|
205
|
+
)
|
|
206
|
+
response.raise_for_status() # Check for HTTP errors
|
|
207
|
+
|
|
208
|
+
# Process the response
|
|
209
|
+
try:
|
|
210
|
+
response_json = response.json()
|
|
211
|
+
# GizAI responses have "status" and "output" fields
|
|
212
|
+
if response_json.get("status") == "completed" and "output" in response_json:
|
|
213
|
+
content = response_json["output"]
|
|
214
|
+
else:
|
|
215
|
+
content = ""
|
|
216
|
+
# Try to extract content from any available field that might contain the response
|
|
217
|
+
for key, value in response_json.items():
|
|
218
|
+
if isinstance(value, str) and len(value) > 10:
|
|
219
|
+
content = value
|
|
220
|
+
break
|
|
221
|
+
except json.JSONDecodeError:
|
|
222
|
+
# Handle case where response is not valid JSON
|
|
223
|
+
content = response.text
|
|
224
|
+
|
|
225
|
+
# Update conversation history
|
|
226
|
+
self.last_response = {"text": content}
|
|
227
|
+
self.conversation.update_chat_history(prompt, content)
|
|
228
|
+
|
|
229
|
+
return self.last_response if not raw else content
|
|
230
|
+
|
|
231
|
+
except CurlError as e:
|
|
232
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}")
|
|
233
|
+
except Exception as e:
|
|
234
|
+
error_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
235
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {error_text}")
|
|
236
|
+
|
|
237
|
+
def chat(
|
|
238
|
+
self,
|
|
239
|
+
prompt: str,
|
|
240
|
+
stream: bool = False, # Parameter kept for compatibility but not used
|
|
241
|
+
optimizer: str = None,
|
|
242
|
+
conversationally: bool = False,
|
|
243
|
+
) -> str:
|
|
244
|
+
"""
|
|
245
|
+
Generates a response from the GizAI API.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
prompt (str): The prompt to send to the API.
|
|
249
|
+
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
250
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
251
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
str: The API response text.
|
|
255
|
+
|
|
256
|
+
Examples:
|
|
257
|
+
>>> ai = GizAI()
|
|
258
|
+
>>> response = ai.chat("What's the weather today?")
|
|
259
|
+
"""
|
|
260
|
+
# GizAI doesn't support streaming, so ignore the stream parameter
|
|
261
|
+
response_data = self.ask(
|
|
262
|
+
prompt, stream=False, raw=False,
|
|
263
|
+
optimizer=optimizer, conversationally=conversationally
|
|
264
|
+
)
|
|
265
|
+
return self.get_message(response_data)
|
|
266
|
+
|
|
267
|
+
def get_message(self, response: Union[dict, str]) -> str:
|
|
268
|
+
"""
|
|
269
|
+
Extracts the message from the API response.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
response (Union[dict, str]): The API response.
|
|
273
|
+
|
|
274
|
+
Returns:
|
|
275
|
+
str: The message content.
|
|
276
|
+
|
|
277
|
+
Examples:
|
|
278
|
+
>>> ai = GizAI()
|
|
279
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
280
|
+
>>> message = ai.get_message(response)
|
|
281
|
+
"""
|
|
282
|
+
if isinstance(response, str):
|
|
283
|
+
return response
|
|
284
|
+
assert isinstance(response, dict), "Response should be either dict or str"
|
|
285
|
+
return response.get("text", "")
|
webscout/Provider/Glider.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
import
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session
|
|
2
3
|
import json
|
|
3
|
-
from typing import Union, Any, Dict, Generator, Optional
|
|
4
|
+
from typing import Union, Any, Dict, Generator, Optional, List
|
|
4
5
|
|
|
5
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
6
7
|
from webscout.AIbase import Provider
|
|
7
8
|
from webscout import exceptions
|
|
8
9
|
from webscout.litagent import LitAgent as Lit
|
|
@@ -39,7 +40,7 @@ class GliderAI(Provider):
|
|
|
39
40
|
if model not in self.AVAILABLE_MODELS:
|
|
40
41
|
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
41
42
|
|
|
42
|
-
self.session =
|
|
43
|
+
self.session = Session() # Use curl_cffi Session
|
|
43
44
|
self.is_conversation = is_conversation
|
|
44
45
|
self.max_tokens_to_sample = max_tokens
|
|
45
46
|
self.api_endpoint = "https://glider.so/api/chat"
|
|
@@ -57,7 +58,7 @@ class GliderAI(Provider):
|
|
|
57
58
|
"user-agent": Lit().random(),
|
|
58
59
|
}
|
|
59
60
|
self.session.headers.update(self.headers)
|
|
60
|
-
self.session.proxies = proxies
|
|
61
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
61
62
|
|
|
62
63
|
self.__available_optimizers = (
|
|
63
64
|
method for method in dir(Optimizers)
|
|
@@ -76,6 +77,14 @@ class GliderAI(Provider):
|
|
|
76
77
|
)
|
|
77
78
|
self.conversation.history_offset = history_offset
|
|
78
79
|
|
|
80
|
+
@staticmethod
|
|
81
|
+
def _glider_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
82
|
+
"""Extracts content from Glider stream JSON objects."""
|
|
83
|
+
if isinstance(chunk, dict):
|
|
84
|
+
# Handle both standard and DeepSeek response formats within choices
|
|
85
|
+
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
86
|
+
return None
|
|
87
|
+
|
|
79
88
|
def ask(
|
|
80
89
|
self,
|
|
81
90
|
prompt: str,
|
|
@@ -113,30 +122,32 @@ class GliderAI(Provider):
|
|
|
113
122
|
}
|
|
114
123
|
|
|
115
124
|
def for_stream():
|
|
116
|
-
response = self.session.post(
|
|
117
|
-
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
118
|
-
)
|
|
119
|
-
if not response.ok:
|
|
120
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
121
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
122
|
-
)
|
|
123
125
|
streaming_text = ""
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
126
|
+
try:
|
|
127
|
+
response = self.session.post(
|
|
128
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout,
|
|
129
|
+
impersonate="chrome120" # Add impersonate
|
|
130
|
+
)
|
|
131
|
+
response.raise_for_status()
|
|
132
|
+
|
|
133
|
+
# Use sanitize_stream
|
|
134
|
+
processed_stream = sanitize_stream(
|
|
135
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
136
|
+
intro_value="data:",
|
|
137
|
+
to_json=True, # Stream sends JSON
|
|
138
|
+
content_extractor=self._glider_extractor, # Use the specific extractor
|
|
139
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
for content_chunk in processed_stream:
|
|
143
|
+
# content_chunk is the string extracted by _glider_extractor
|
|
144
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
145
|
+
streaming_text += content_chunk
|
|
146
|
+
yield content_chunk if raw else {"text": content_chunk}
|
|
147
|
+
except CurlError as e:
|
|
148
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
149
|
+
except Exception as e:
|
|
150
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
|
|
140
151
|
self.last_response.update(dict(text=streaming_text))
|
|
141
152
|
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
142
153
|
|
webscout/Provider/Groq.py
CHANGED
|
@@ -9,7 +9,7 @@ from curl_cffi import CurlError
|
|
|
9
9
|
|
|
10
10
|
from webscout.AIutel import Optimizers
|
|
11
11
|
from webscout.AIutel import Conversation
|
|
12
|
-
from webscout.AIutel import AwesomePrompts
|
|
12
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
13
13
|
from webscout.AIbase import Provider, AsyncProvider
|
|
14
14
|
from webscout import exceptions
|
|
15
15
|
|
|
@@ -178,6 +178,14 @@ class GROQ(Provider):
|
|
|
178
178
|
# Set proxies for curl_cffi session
|
|
179
179
|
self.session.proxies = proxies
|
|
180
180
|
|
|
181
|
+
@staticmethod
|
|
182
|
+
def _groq_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[Dict]:
|
|
183
|
+
"""Extracts the 'delta' object from Groq stream JSON chunks."""
|
|
184
|
+
if isinstance(chunk, dict):
|
|
185
|
+
# Return the delta object itself, or None if not found
|
|
186
|
+
return chunk.get("choices", [{}])[0].get("delta")
|
|
187
|
+
return None
|
|
188
|
+
|
|
181
189
|
@classmethod
|
|
182
190
|
def update_available_models(cls, api_key=None):
|
|
183
191
|
"""Update the available models list from Groq API"""
|
|
@@ -262,24 +270,27 @@ class GROQ(Provider):
|
|
|
262
270
|
f"Failed to generate response - ({response.status_code}) - {response.text}"
|
|
263
271
|
)
|
|
264
272
|
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
273
|
+
streaming_text = ""
|
|
274
|
+
# Use sanitize_stream
|
|
275
|
+
processed_stream = sanitize_stream(
|
|
276
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
277
|
+
intro_value="data:",
|
|
278
|
+
to_json=True, # Stream sends JSON
|
|
279
|
+
content_extractor=self._groq_extractor, # Use the delta extractor
|
|
280
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
for delta in processed_stream:
|
|
284
|
+
# delta is the extracted 'delta' object or None
|
|
285
|
+
if delta and isinstance(delta, dict):
|
|
286
|
+
content = delta.get("content")
|
|
287
|
+
if content:
|
|
288
|
+
streaming_text += content
|
|
289
|
+
resp = {"text": content} # Yield only the new chunk text
|
|
290
|
+
self.last_response = {"choices": [{"delta": {"content": streaming_text}}]} # Update last_response structure
|
|
291
|
+
yield resp if not raw else content # Yield dict or raw string chunk
|
|
292
|
+
# Note: Tool calls in streaming delta are less common in OpenAI format, usually in final message
|
|
293
|
+
|
|
283
294
|
except CurlError as e:
|
|
284
295
|
raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
|
|
285
296
|
except Exception as e:
|
|
@@ -339,7 +350,24 @@ class GROQ(Provider):
|
|
|
339
350
|
# Removed response.reason_phrase
|
|
340
351
|
f"Failed to generate response - ({response.status_code}) - {response.text}"
|
|
341
352
|
)
|
|
342
|
-
|
|
353
|
+
|
|
354
|
+
response_text = response.text # Get raw text
|
|
355
|
+
|
|
356
|
+
# Use sanitize_stream to parse the non-streaming JSON response
|
|
357
|
+
processed_stream = sanitize_stream(
|
|
358
|
+
data=response_text,
|
|
359
|
+
to_json=True, # Parse the whole text as JSON
|
|
360
|
+
intro_value=None,
|
|
361
|
+
# Extractor for non-stream structure (returns the whole parsed dict)
|
|
362
|
+
content_extractor=lambda chunk: chunk if isinstance(chunk, dict) else None,
|
|
363
|
+
yield_raw_on_error=False
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# Extract the single result (the parsed JSON dictionary)
|
|
367
|
+
resp = next(processed_stream, None)
|
|
368
|
+
if resp is None:
|
|
369
|
+
raise exceptions.FailedToGenerateResponseError("Failed to parse non-stream JSON response")
|
|
370
|
+
|
|
343
371
|
except CurlError as e:
|
|
344
372
|
raise exceptions.FailedToGenerateResponseError(f"CurlError: {str(e)}")
|
|
345
373
|
except Exception as e:
|