webscout 8.2.5__py3-none-any.whl → 8.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIutel.py +240 -344
- webscout/Extra/autocoder/autocoder.py +66 -5
- webscout/Provider/AISEARCH/scira_search.py +2 -1
- webscout/Provider/GizAI.py +6 -4
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/OPENAI/scirachat.py +2 -1
- webscout/Provider/TeachAnything.py +8 -5
- webscout/Provider/WiseCat.py +1 -1
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/__init__.py +4 -6
- webscout/Provider/ai4chat.py +5 -3
- webscout/Provider/akashgpt.py +59 -66
- webscout/Provider/freeaichat.py +57 -43
- webscout/Provider/scira_chat.py +2 -1
- webscout/Provider/scnet.py +4 -1
- webscout/__init__.py +0 -1
- webscout/conversation.py +305 -446
- webscout/swiftcli/__init__.py +80 -794
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/METADATA +1 -1
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/RECORD +41 -28
- webscout/LLM.py +0 -442
- webscout/Provider/PizzaGPT.py +0 -228
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/tutorai.py +0 -270
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/WHEEL +0 -0
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -0
|
@@ -7,19 +7,16 @@ import queue
|
|
|
7
7
|
import tempfile
|
|
8
8
|
import threading
|
|
9
9
|
import subprocess
|
|
10
|
+
from typing import Optional, Generator, List, Tuple, Dict, Any, NamedTuple
|
|
10
11
|
from rich.panel import Panel
|
|
11
12
|
from rich.syntax import Syntax
|
|
12
|
-
from rich.console import Console
|
|
13
|
+
from rich.console import Console
|
|
13
14
|
from rich.markdown import Markdown
|
|
14
15
|
from rich.table import Table
|
|
15
16
|
from rich.theme import Theme
|
|
16
17
|
from rich.live import Live
|
|
17
|
-
from rich.rule import Rule
|
|
18
18
|
from rich.box import ROUNDED
|
|
19
|
-
from typing import Optional, Generator, List, Tuple, Dict, Any
|
|
20
|
-
from webscout.AIutel import run_system_command
|
|
21
19
|
from .autocoder_utiles import get_intro_prompt
|
|
22
|
-
|
|
23
20
|
# Initialize LitLogger with custom format and colors
|
|
24
21
|
default_path = tempfile.mkdtemp(prefix="webscout_autocoder")
|
|
25
22
|
|
|
@@ -34,6 +31,70 @@ CUSTOM_THEME = Theme({
|
|
|
34
31
|
})
|
|
35
32
|
|
|
36
33
|
console = Console(theme=CUSTOM_THEME)
|
|
34
|
+
class CommandResult(NamedTuple):
|
|
35
|
+
"""Result of a system command execution."""
|
|
36
|
+
success: bool
|
|
37
|
+
stdout: str
|
|
38
|
+
stderr: str
|
|
39
|
+
|
|
40
|
+
def run_system_command(
|
|
41
|
+
command: str,
|
|
42
|
+
exit_on_error: bool = False,
|
|
43
|
+
stdout_error: bool = False,
|
|
44
|
+
help: Optional[str] = None
|
|
45
|
+
) -> Tuple[bool, CommandResult]:
|
|
46
|
+
"""Execute a system command and return the result.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
command (str): Command to execute
|
|
50
|
+
exit_on_error (bool): Whether to exit on error. Defaults to False.
|
|
51
|
+
stdout_error (bool): Whether to include stdout in error messages. Defaults to False.
|
|
52
|
+
help (str, optional): Help message for errors. Defaults to None.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Tuple[bool, CommandResult]: Success status and command result containing stdout/stderr
|
|
56
|
+
"""
|
|
57
|
+
try:
|
|
58
|
+
# Execute command and capture output
|
|
59
|
+
process = subprocess.Popen(
|
|
60
|
+
command,
|
|
61
|
+
stdout=subprocess.PIPE,
|
|
62
|
+
stderr=subprocess.PIPE,
|
|
63
|
+
shell=True,
|
|
64
|
+
text=True
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Get stdout and stderr
|
|
68
|
+
stdout, stderr = process.communicate()
|
|
69
|
+
success = process.returncode == 0
|
|
70
|
+
|
|
71
|
+
# Create result object
|
|
72
|
+
result = CommandResult(
|
|
73
|
+
success=success,
|
|
74
|
+
stdout=stdout.strip() if stdout else "",
|
|
75
|
+
stderr=stderr.strip() if stderr else ""
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Handle errors if needed
|
|
79
|
+
if not success and exit_on_error:
|
|
80
|
+
error_msg = stderr if stderr else stdout if stdout_error else "Command failed"
|
|
81
|
+
if help:
|
|
82
|
+
error_msg += f"\n{help}"
|
|
83
|
+
sys.exit(error_msg)
|
|
84
|
+
|
|
85
|
+
return success, result
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
# Handle execution errors
|
|
89
|
+
error_msg = str(e)
|
|
90
|
+
if help:
|
|
91
|
+
error_msg += f"\n{help}"
|
|
92
|
+
|
|
93
|
+
if exit_on_error:
|
|
94
|
+
sys.exit(error_msg)
|
|
95
|
+
|
|
96
|
+
return False, CommandResult(success=False, stdout="", stderr=error_msg)
|
|
97
|
+
|
|
37
98
|
|
|
38
99
|
class AutoCoder:
|
|
39
100
|
"""Generate and auto-execute Python scripts in the CLI with advanced error handling and retry logic.
|
|
@@ -70,7 +70,8 @@ class Scira(AISearch):
|
|
|
70
70
|
|
|
71
71
|
AVAILABLE_MODELS = {
|
|
72
72
|
"scira-default": "Grok3-mini", # thinking model
|
|
73
|
-
"scira-grok-3": "Grok3",
|
|
73
|
+
"scira-grok-3": "Grok3",
|
|
74
|
+
"scira-anthropic": "Sonnet 3.7 thinking",
|
|
74
75
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
75
76
|
"scira-4.1-mini": "GPT4.1-mini",
|
|
76
77
|
"scira-qwq": "QWQ-32B",
|
webscout/Provider/GizAI.py
CHANGED
|
@@ -264,12 +264,12 @@ class GizAI(Provider):
|
|
|
264
264
|
)
|
|
265
265
|
return self.get_message(response_data)
|
|
266
266
|
|
|
267
|
-
def get_message(self, response: dict) -> str:
|
|
267
|
+
def get_message(self, response: Union[dict, str]) -> str:
|
|
268
268
|
"""
|
|
269
269
|
Extracts the message from the API response.
|
|
270
270
|
|
|
271
271
|
Args:
|
|
272
|
-
response (dict): The API response.
|
|
272
|
+
response (Union[dict, str]): The API response.
|
|
273
273
|
|
|
274
274
|
Returns:
|
|
275
275
|
str: The message content.
|
|
@@ -279,5 +279,7 @@ class GizAI(Provider):
|
|
|
279
279
|
>>> response = ai.ask("Tell me a joke!")
|
|
280
280
|
>>> message = ai.get_message(response)
|
|
281
281
|
"""
|
|
282
|
-
|
|
283
|
-
|
|
282
|
+
if isinstance(response, str):
|
|
283
|
+
return response
|
|
284
|
+
assert isinstance(response, dict), "Response should be either dict or str"
|
|
285
|
+
return response.get("text", "")
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import random
|
|
4
|
+
import datetime
|
|
5
|
+
from typing import Any, Dict, Optional, Union, Generator
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
|
|
10
|
+
class NEMOTRON(Provider):
|
|
11
|
+
"""NEMOTRON provider for interacting with the nemotron.one API."""
|
|
12
|
+
url = "https://nemotron.one/api/chat"
|
|
13
|
+
|
|
14
|
+
AVAILABLE_MODELS = [
|
|
15
|
+
"gpt4o",
|
|
16
|
+
"nemotron70b",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 8000,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
model: str = "gpt4o"
|
|
31
|
+
):
|
|
32
|
+
"""Initialize NEMOTRON with configuration options."""
|
|
33
|
+
self.session = requests.Session()
|
|
34
|
+
self.max_tokens = max_tokens
|
|
35
|
+
self.is_conversation = is_conversation
|
|
36
|
+
self.timeout = timeout
|
|
37
|
+
self.last_response = {}
|
|
38
|
+
self.model = self.get_model(model)
|
|
39
|
+
|
|
40
|
+
self.headers = {
|
|
41
|
+
"authority": "nemotron.one",
|
|
42
|
+
"accept": "*/*",
|
|
43
|
+
"accept-language": "en-US,en;q=0.9",
|
|
44
|
+
"content-type": "application/json",
|
|
45
|
+
"origin": "https://nemotron.one",
|
|
46
|
+
"referer": f"https://nemotron.one/chat/{self.model}",
|
|
47
|
+
"sec-ch-ua": '"Chromium";v="136", "Not.A/Brand";v="99"',
|
|
48
|
+
"sec-ch-ua-mobile": "?0",
|
|
49
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
50
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
self.__available_optimizers = (
|
|
54
|
+
method for method in dir(Optimizers)
|
|
55
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
Conversation.intro = (
|
|
59
|
+
AwesomePrompts().get_act(
|
|
60
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
+
)
|
|
62
|
+
if act
|
|
63
|
+
else intro or Conversation.intro
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
self.conversation = Conversation(
|
|
67
|
+
is_conversation, self.max_tokens, filepath, update_file
|
|
68
|
+
)
|
|
69
|
+
self.conversation.history_offset = history_offset
|
|
70
|
+
self.session.proxies = proxies
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def _generate_random_email() -> str:
|
|
74
|
+
"""Generate a random email address."""
|
|
75
|
+
random_letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
76
|
+
random_string = ''.join(random.choice(random_letter) for _ in range(10))
|
|
77
|
+
return f"{random_string}@gmail.com"
|
|
78
|
+
|
|
79
|
+
@staticmethod
|
|
80
|
+
def _generate_random_id() -> str:
|
|
81
|
+
"""Generate a random user ID."""
|
|
82
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
|
|
83
|
+
random_letter = "abcdefghijklmnopqrstuvwxyz0123456789"
|
|
84
|
+
random_string = ''.join(random.choice(random_letter) for _ in range(8))
|
|
85
|
+
return f"cm{random_string}{timestamp[:10]}"
|
|
86
|
+
|
|
87
|
+
@classmethod
|
|
88
|
+
def get_model(cls, model: str) -> str:
|
|
89
|
+
"""Resolve model name from alias."""
|
|
90
|
+
if model in cls.AVAILABLE_MODELS:
|
|
91
|
+
return model # Simply return the model name if it's in the list
|
|
92
|
+
raise ValueError(f"Unknown model: {model}. Available models: {', '.join(cls.AVAILABLE_MODELS)}")
|
|
93
|
+
|
|
94
|
+
def _get_user_data(self) -> Dict[str, Any]:
|
|
95
|
+
"""Generate user data for the request."""
|
|
96
|
+
current_time = datetime.datetime.now().isoformat()
|
|
97
|
+
return {
|
|
98
|
+
"name": "user",
|
|
99
|
+
"email": self._generate_random_email(),
|
|
100
|
+
"image": "https://lh3.googleusercontent.com/a/default-user=s96-c",
|
|
101
|
+
"id": self._generate_random_id(),
|
|
102
|
+
"password": None,
|
|
103
|
+
"emailVerified": None,
|
|
104
|
+
"credits": 100000000000,
|
|
105
|
+
"isPro": False,
|
|
106
|
+
"createdAt": current_time,
|
|
107
|
+
"updatedAt": current_time
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
def _make_request(
|
|
111
|
+
self,
|
|
112
|
+
message: str,
|
|
113
|
+
stream: bool = False
|
|
114
|
+
) -> Generator[str, None, None]:
|
|
115
|
+
"""Make request to NEMOTRON API."""
|
|
116
|
+
payload = {
|
|
117
|
+
"content": message,
|
|
118
|
+
"imageSrc": "",
|
|
119
|
+
"model": self.model,
|
|
120
|
+
"user": self._get_user_data(),
|
|
121
|
+
"conversationId": ""
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
if stream:
|
|
126
|
+
with self.session.post(
|
|
127
|
+
self.url,
|
|
128
|
+
headers=self.headers,
|
|
129
|
+
json=payload,
|
|
130
|
+
stream=True,
|
|
131
|
+
timeout=self.timeout
|
|
132
|
+
) as response:
|
|
133
|
+
response.raise_for_status()
|
|
134
|
+
yield from sanitize_stream(
|
|
135
|
+
response.iter_content(chunk_size=1024),
|
|
136
|
+
to_json=False,
|
|
137
|
+
)
|
|
138
|
+
else:
|
|
139
|
+
response = self.session.post(
|
|
140
|
+
self.url,
|
|
141
|
+
headers=self.headers,
|
|
142
|
+
json=payload,
|
|
143
|
+
timeout=self.timeout
|
|
144
|
+
)
|
|
145
|
+
response.raise_for_status()
|
|
146
|
+
yield response.text
|
|
147
|
+
|
|
148
|
+
except requests.exceptions.RequestException as e:
|
|
149
|
+
raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
|
|
150
|
+
|
|
151
|
+
def ask(
|
|
152
|
+
self,
|
|
153
|
+
prompt: str,
|
|
154
|
+
stream: bool = False,
|
|
155
|
+
raw: bool = False,
|
|
156
|
+
optimizer: str = None,
|
|
157
|
+
conversationally: bool = False,
|
|
158
|
+
) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
|
|
159
|
+
"""Send a prompt to NEMOTRON API and return the response."""
|
|
160
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
161
|
+
if optimizer:
|
|
162
|
+
if optimizer in self.__available_optimizers:
|
|
163
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
164
|
+
conversation_prompt if conversationally else prompt
|
|
165
|
+
)
|
|
166
|
+
else:
|
|
167
|
+
raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
|
|
168
|
+
|
|
169
|
+
def for_stream():
|
|
170
|
+
for text in self._make_request(conversation_prompt, stream=True):
|
|
171
|
+
yield {"text": text}
|
|
172
|
+
|
|
173
|
+
def for_non_stream():
|
|
174
|
+
response_text = next(self._make_request(conversation_prompt, stream=False))
|
|
175
|
+
self.last_response = {"text": response_text}
|
|
176
|
+
return self.last_response
|
|
177
|
+
|
|
178
|
+
return for_stream() if stream else for_non_stream()
|
|
179
|
+
|
|
180
|
+
def chat(
|
|
181
|
+
self,
|
|
182
|
+
prompt: str,
|
|
183
|
+
stream: bool = False,
|
|
184
|
+
optimizer: str = None,
|
|
185
|
+
conversationally: bool = False,
|
|
186
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
187
|
+
"""Generate response as string."""
|
|
188
|
+
def for_stream():
|
|
189
|
+
for response in self.ask(
|
|
190
|
+
prompt,
|
|
191
|
+
stream=True,
|
|
192
|
+
optimizer=optimizer,
|
|
193
|
+
conversationally=conversationally
|
|
194
|
+
):
|
|
195
|
+
yield self.get_message(response)
|
|
196
|
+
|
|
197
|
+
def for_non_stream():
|
|
198
|
+
return self.get_message(
|
|
199
|
+
self.ask(
|
|
200
|
+
prompt,
|
|
201
|
+
stream=False,
|
|
202
|
+
optimizer=optimizer,
|
|
203
|
+
conversationally=conversationally,
|
|
204
|
+
)
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return for_stream() if stream else for_non_stream()
|
|
208
|
+
|
|
209
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
210
|
+
"""Extract message from response dictionary."""
|
|
211
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
212
|
+
return response["text"]
|
|
213
|
+
|
|
214
|
+
if __name__ == "__main__":
|
|
215
|
+
# Example usage
|
|
216
|
+
nemotron = NEMOTRON()
|
|
217
|
+
response = nemotron.chat("Hello, how are you?", stream=False)
|
|
218
|
+
print(response)
|
|
@@ -325,7 +325,8 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
325
325
|
|
|
326
326
|
AVAILABLE_MODELS = {
|
|
327
327
|
"scira-default": "Grok3-mini", # thinking model
|
|
328
|
-
"scira-grok-3": "Grok3",
|
|
328
|
+
"scira-grok-3": "Grok3",
|
|
329
|
+
"scira-anthropic": "Sonnet 3.7 thinking",
|
|
329
330
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
330
331
|
"scira-4.1-mini": "GPT4.1-mini",
|
|
331
332
|
"scira-qwq": "QWQ-32B",
|
|
@@ -191,17 +191,20 @@ class TeachAnything(Provider):
|
|
|
191
191
|
# If stream=False, return the full message directly
|
|
192
192
|
return self.get_message(response_data)
|
|
193
193
|
|
|
194
|
-
def get_message(self, response: dict) -> str:
|
|
194
|
+
def get_message(self, response: Union[dict, str]) -> str:
|
|
195
195
|
"""Retrieves message only from response
|
|
196
196
|
|
|
197
197
|
Args:
|
|
198
|
-
response (dict): Response generated by `self.ask`
|
|
198
|
+
response (Union[dict, str]): Response generated by `self.ask`
|
|
199
199
|
|
|
200
200
|
Returns:
|
|
201
201
|
str: Message extracted
|
|
202
202
|
"""
|
|
203
|
-
|
|
204
|
-
|
|
203
|
+
if isinstance(response, str):
|
|
204
|
+
return response
|
|
205
|
+
elif isinstance(response, dict):
|
|
206
|
+
return response["text"]
|
|
207
|
+
raise ValueError("Response must be either dict or str")
|
|
205
208
|
|
|
206
209
|
|
|
207
210
|
if __name__ == '__main__':
|
|
@@ -227,4 +230,4 @@ if __name__ == '__main__':
|
|
|
227
230
|
except exceptions.FailedToGenerateResponseError as e:
|
|
228
231
|
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
229
232
|
except Exception as e:
|
|
230
|
-
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
233
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
webscout/Provider/WiseCat.py
CHANGED
|
@@ -33,7 +33,7 @@ class WiseCat(Provider):
|
|
|
33
33
|
proxies: dict = {},
|
|
34
34
|
history_offset: int = 10250,
|
|
35
35
|
act: str = None,
|
|
36
|
-
model: str = "chat-model-
|
|
36
|
+
model: str = "chat-model-small",
|
|
37
37
|
system_prompt: str = "You are a helpful AI assistant."
|
|
38
38
|
):
|
|
39
39
|
"""Initializes the WiseCat API client."""
|