webscout 8.2.5__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Provider/AISEARCH/scira_search.py +2 -1
  5. webscout/Provider/GizAI.py +6 -4
  6. webscout/Provider/Nemotron.py +218 -0
  7. webscout/Provider/OPENAI/scirachat.py +2 -1
  8. webscout/Provider/TeachAnything.py +8 -5
  9. webscout/Provider/WiseCat.py +1 -1
  10. webscout/Provider/WrDoChat.py +370 -0
  11. webscout/Provider/__init__.py +4 -6
  12. webscout/Provider/ai4chat.py +5 -3
  13. webscout/Provider/akashgpt.py +59 -66
  14. webscout/Provider/freeaichat.py +57 -43
  15. webscout/Provider/scira_chat.py +2 -1
  16. webscout/Provider/scnet.py +4 -1
  17. webscout/__init__.py +0 -1
  18. webscout/conversation.py +305 -446
  19. webscout/swiftcli/__init__.py +80 -794
  20. webscout/swiftcli/core/__init__.py +7 -0
  21. webscout/swiftcli/core/cli.py +297 -0
  22. webscout/swiftcli/core/context.py +104 -0
  23. webscout/swiftcli/core/group.py +241 -0
  24. webscout/swiftcli/decorators/__init__.py +28 -0
  25. webscout/swiftcli/decorators/command.py +221 -0
  26. webscout/swiftcli/decorators/options.py +220 -0
  27. webscout/swiftcli/decorators/output.py +252 -0
  28. webscout/swiftcli/exceptions.py +21 -0
  29. webscout/swiftcli/plugins/__init__.py +9 -0
  30. webscout/swiftcli/plugins/base.py +135 -0
  31. webscout/swiftcli/plugins/manager.py +262 -0
  32. webscout/swiftcli/utils/__init__.py +59 -0
  33. webscout/swiftcli/utils/formatting.py +252 -0
  34. webscout/swiftcli/utils/parsing.py +267 -0
  35. webscout/version.py +1 -1
  36. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/METADATA +1 -1
  37. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/RECORD +41 -28
  38. webscout/LLM.py +0 -442
  39. webscout/Provider/PizzaGPT.py +0 -228
  40. webscout/Provider/promptrefine.py +0 -193
  41. webscout/Provider/tutorai.py +0 -270
  42. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/WHEEL +0 -0
  43. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/entry_points.txt +0 -0
  44. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
  45. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
+ import re
1
2
  import requests
2
3
  import json
3
- import time
4
+ import uuid
4
5
  from typing import Any, Dict, Optional, Generator, Union
5
6
 
6
7
  from webscout.AIutel import Optimizers
@@ -27,6 +28,12 @@ class FreeAIChat(Provider):
27
28
  "O3 Mini",
28
29
  "O3 Mini High",
29
30
  "O3 Mini Low",
31
+ "O4 Mini",
32
+ "O4 Mini High",
33
+ "GPT 4.1",
34
+ "o3",
35
+ "GPT 4.1 Mini",
36
+
30
37
 
31
38
  # Anthropic Models
32
39
  "Claude 3.5 haiku",
@@ -74,8 +81,9 @@ class FreeAIChat(Provider):
74
81
 
75
82
  def __init__(
76
83
  self,
84
+ api_key: str,
77
85
  is_conversation: bool = True,
78
- max_tokens: int = 2049,
86
+ max_tokens: int = 150,
79
87
  timeout: int = 30,
80
88
  intro: str = None,
81
89
  filepath: str = None,
@@ -83,8 +91,9 @@ class FreeAIChat(Provider):
83
91
  proxies: dict = {},
84
92
  history_offset: int = 10250,
85
93
  act: str = None,
86
- model: str = "GPT-4o",
94
+ model: str = "GPT 4o",
87
95
  system_prompt: str = "You are a helpful AI assistant.",
96
+ temperature: float = 0.7,
88
97
  ):
89
98
  """Initializes the FreeAIChat API client."""
90
99
  if model not in self.AVAILABLE_MODELS:
@@ -105,11 +114,13 @@ class FreeAIChat(Provider):
105
114
  self.session.proxies.update(proxies)
106
115
 
107
116
  self.is_conversation = is_conversation
108
- self.max_tokens_to_sample = max_tokens
117
+ self.max_tokens = max_tokens
109
118
  self.timeout = timeout
110
119
  self.last_response = {}
111
120
  self.model = model
112
121
  self.system_prompt = system_prompt
122
+ self.temperature = temperature
123
+ self.api_key = api_key
113
124
 
114
125
  self.__available_optimizers = (
115
126
  method
@@ -125,10 +136,21 @@ class FreeAIChat(Provider):
125
136
  )
126
137
 
127
138
  self.conversation = Conversation(
128
- is_conversation, self.max_tokens_to_sample, filepath, update_file
139
+ is_conversation, self.max_tokens, filepath, update_file
129
140
  )
130
141
  self.conversation.history_offset = history_offset
131
142
 
143
+ @staticmethod
144
+ def _extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
145
+ """Extracts content from the x0gpt stream format '0:"..."'."""
146
+ if isinstance(chunk, str):
147
+ match = re.search(r'0:"(.*?)"', chunk)
148
+ if match:
149
+ # Decode potential unicode escapes like \u00e9
150
+ content = match.group(1).encode().decode('unicode_escape')
151
+ return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
152
+ return None
153
+
132
154
  def ask(
133
155
  self,
134
156
  prompt: str,
@@ -146,24 +168,19 @@ class FreeAIChat(Provider):
146
168
  else:
147
169
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
148
170
 
149
- messages = [
150
- {
151
- "id": str(int(time.time() * 1000)),
171
+ payload = {
172
+ "id": str(uuid.uuid4()),
173
+ "messages": [{
152
174
  "role": "user",
153
175
  "content": conversation_prompt,
154
- "model": {
155
- # "id": "14",
156
- "name": self.model,
157
- # "icon": "https://cdn-avatars.huggingface.co/v1/production/uploads/1620805164087-5ec0135ded25d76864d553f1.png",
158
- # "provider": "openAI",
159
- # "contextWindow": 63920
160
- }
161
- }
162
- ]
163
-
164
- payload = {
176
+ "parts": [{"type": "text", "text": conversation_prompt}]
177
+ }],
165
178
  "model": self.model,
166
- "messages": messages
179
+ "config": {
180
+ "temperature": self.temperature,
181
+ "maxTokens": self.max_tokens
182
+ },
183
+ "apiKey": self.api_key
167
184
  }
168
185
 
169
186
  def for_stream():
@@ -174,28 +191,25 @@ class FreeAIChat(Provider):
174
191
  f"Request failed with status code {response.status_code}"
175
192
  )
176
193
 
177
- streaming_text = ""
178
- for line in response.iter_lines(decode_unicode=True):
179
- if line:
180
- line = line.strip()
181
- if line.startswith("data: "):
182
- json_str = line[6:] # Remove "data: " prefix
183
- if json_str == "[DONE]":
184
- break
185
- try:
186
- json_data = json.loads(json_str)
187
- if 'choices' in json_data:
188
- choice = json_data['choices'][0]
189
- if 'delta' in choice and 'content' in choice['delta']:
190
- content = choice['delta']['content']
191
- streaming_text += content
192
- resp = dict(text=content)
193
- yield resp if raw else resp
194
- except json.JSONDecodeError:
195
- pass
196
-
197
- self.conversation.update_chat_history(prompt, streaming_text)
198
-
194
+ streaming_response = ""
195
+ processed_stream = sanitize_stream(
196
+ data=response.iter_lines(decode_unicode=True),
197
+ intro_value=None,
198
+ to_json=False,
199
+ content_extractor=self._extractor,
200
+ skip_markers=None
201
+ )
202
+
203
+ for content_chunk in processed_stream:
204
+ if content_chunk and isinstance(content_chunk, str):
205
+ streaming_response += content_chunk
206
+ yield dict(text=content_chunk) if raw else dict(text=content_chunk)
207
+
208
+ self.last_response.update(dict(text=streaming_response))
209
+ self.conversation.update_chat_history(
210
+ prompt, self.get_message(self.last_response)
211
+ )
212
+
199
213
  except requests.RequestException as e:
200
214
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
201
215
 
@@ -268,4 +282,4 @@ if __name__ == "__main__":
268
282
  display_text = "Empty or invalid response"
269
283
  print(f"\r{model:<50} {status:<10} {display_text}")
270
284
  except Exception as e:
271
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
285
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -19,7 +19,8 @@ class SciraAI(Provider):
19
19
 
20
20
  AVAILABLE_MODELS = {
21
21
  "scira-default": "Grok3-mini", # thinking model
22
- "scira-grok-3": "Grok3",
22
+ "scira-grok-3": "Grok3",
23
+ "scira-anthropic": "Sonnet 3.7 thinking",
23
24
  "scira-vision" : "Grok2-Vision", # vision model
24
25
  "scira-4.1-mini": "GPT4.1-mini",
25
26
  "scira-qwq": "QWQ-32B",
@@ -29,7 +29,10 @@ class SCNet(Provider):
29
29
  is_conversation: bool = True,
30
30
  max_tokens: int = 2048, # Note: max_tokens is not used by this API
31
31
  timeout: int = 30,
32
- intro: Optional[str] = None,
32
+ intro: Optional[str] = ("You are a helpful, advanced LLM assistant. "
33
+ "You must always answer in English, regardless of the user's language. "
34
+ "If the user asks in another language, politely respond in English only. "
35
+ "Be clear, concise, and helpful."),
33
36
  filepath: Optional[str] = None,
34
37
  update_file: bool = True,
35
38
  proxies: Optional[dict] = None,
webscout/__init__.py CHANGED
@@ -5,7 +5,6 @@ from .webscout_search_async import AsyncWEBS
5
5
  from .version import __version__
6
6
  from .DWEBS import *
7
7
  from .tempid import *
8
- from .LLM import VLM, LLM
9
8
  from .Provider import *
10
9
  from .Provider.TTI import *
11
10
  from .Provider.TTS import *