webscout 8.3.3__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (46) hide show
  1. webscout/AIutel.py +221 -4
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/scira_search.py +24 -11
  4. webscout/Provider/Deepinfra.py +75 -57
  5. webscout/Provider/ExaChat.py +9 -5
  6. webscout/Provider/Flowith.py +1 -1
  7. webscout/Provider/FreeGemini.py +2 -2
  8. webscout/Provider/Gemini.py +3 -10
  9. webscout/Provider/GeminiProxy.py +31 -5
  10. webscout/Provider/LambdaChat.py +39 -31
  11. webscout/Provider/Netwrck.py +5 -8
  12. webscout/Provider/OLLAMA.py +8 -9
  13. webscout/Provider/OPENAI/README.md +1 -1
  14. webscout/Provider/OPENAI/__init__.py +1 -1
  15. webscout/Provider/OPENAI/autoproxy.py +1 -1
  16. webscout/Provider/OPENAI/copilot.py +73 -26
  17. webscout/Provider/OPENAI/deepinfra.py +54 -24
  18. webscout/Provider/OPENAI/exachat.py +9 -5
  19. webscout/Provider/OPENAI/monochat.py +3 -3
  20. webscout/Provider/OPENAI/netwrck.py +4 -7
  21. webscout/Provider/OPENAI/qodo.py +630 -0
  22. webscout/Provider/OPENAI/scirachat.py +82 -49
  23. webscout/Provider/OPENAI/textpollinations.py +13 -12
  24. webscout/Provider/OPENAI/typegpt.py +3 -3
  25. webscout/Provider/Qodo.py +454 -0
  26. webscout/Provider/TTI/monochat.py +3 -3
  27. webscout/Provider/TextPollinationsAI.py +13 -12
  28. webscout/Provider/__init__.py +4 -4
  29. webscout/Provider/copilot.py +58 -61
  30. webscout/Provider/freeaichat.py +64 -55
  31. webscout/Provider/monochat.py +275 -0
  32. webscout/Provider/scira_chat.py +111 -21
  33. webscout/Provider/typegpt.py +2 -2
  34. webscout/Provider/x0gpt.py +325 -315
  35. webscout/__init__.py +7 -2
  36. webscout/auth/routes.py +20 -3
  37. webscout/version.py +1 -1
  38. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/METADATA +1 -2
  39. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/RECORD +43 -43
  40. webscout/Provider/AI21.py +0 -177
  41. webscout/Provider/HuggingFaceChat.py +0 -469
  42. webscout/Provider/OPENAI/freeaichat.py +0 -363
  43. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  44. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  46. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,275 @@
1
+ from typing import Generator, Optional, Union, Any, Dict
2
+ from uuid import uuid4
3
+ from curl_cffi import CurlError
4
+ from curl_cffi.requests import Session
5
+ import re
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class MonoChat(Provider):
15
+ """
16
+ MonoChat provider for interacting with the gg.is-a-furry.dev API (OpenAI-compatible).
17
+ """
18
+ AVAILABLE_MODELS = [
19
+ "deepseek-r1",
20
+ "deepseek-v3",
21
+ "uncensored-r1-32b",
22
+ "o3-pro",
23
+ "o4-mini",
24
+ "o3",
25
+ "gpt-4.5-preview",
26
+ "gpt-4.1",
27
+ "gpt-4.1-mini",
28
+ "gpt-4.1-nano",
29
+ "gpt-4o",
30
+ "gpt-4o-mini",
31
+ "gpt-4o-search-preview",
32
+ "gpt-4o-mini-search-preview",
33
+ "gpt-4-turbo"
34
+ ]
35
+
36
+ def __init__(
37
+ self,
38
+ is_conversation: bool = True,
39
+ max_tokens: int = 2049,
40
+ timeout: int = 30,
41
+ intro: str = None,
42
+ filepath: str = None,
43
+ update_file: bool = True,
44
+ proxies: dict = {},
45
+ history_offset: int = 10250,
46
+ act: str = None,
47
+ model: str = "gpt-4.1",
48
+ system_prompt: str = "You are a helpful assistant.",
49
+ browser: str = "chrome"
50
+ ):
51
+ if model not in self.AVAILABLE_MODELS:
52
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
53
+ self.session = Session()
54
+ self.is_conversation = is_conversation
55
+ self.max_tokens_to_sample = max_tokens
56
+ self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.system_prompt = system_prompt
61
+ self.agent = LitAgent()
62
+ self.fingerprint = self.agent.generate_fingerprint(browser)
63
+ self.headers = {
64
+ "accept": "*/*",
65
+ "accept-encoding": "gzip, deflate, br, zstd",
66
+ "accept-language": self.fingerprint["accept_language"],
67
+ "content-type": "application/json",
68
+ "origin": "https://gg.is-a-furry.dev",
69
+ "referer": "https://gg.is-a-furry.dev/",
70
+ "user-agent": self.fingerprint["user_agent"]
71
+ }
72
+ self.session.headers.update(self.headers)
73
+ self.session.proxies = proxies
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+ Conversation.intro = (
80
+ AwesomePrompts().get_act(
81
+ act, raise_not_found=True, default=None, case_insensitive=True
82
+ )
83
+ if act
84
+ else intro or Conversation.intro
85
+ )
86
+ self.conversation = Conversation(
87
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
88
+ )
89
+ self.conversation.history_offset = history_offset
90
+
91
+ def refresh_identity(self, browser: str = None):
92
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
93
+ self.fingerprint = self.agent.generate_fingerprint(browser)
94
+ self.headers.update({
95
+ "accept-language": self.fingerprint["accept_language"],
96
+ "user-agent": self.fingerprint["user_agent"]
97
+ })
98
+ self.session.headers.update(self.headers)
99
+ return self.fingerprint
100
+
101
+ def ask(
102
+ self,
103
+ prompt: str,
104
+ stream: bool = False,
105
+ raw: bool = False,
106
+ optimizer: str = None,
107
+ conversationally: bool = False,
108
+ ) -> Union[Dict[str, Any], Generator]:
109
+ """
110
+ Sends a prompt to the gg.is-a-furry.dev API and returns the response.
111
+
112
+ Args:
113
+ prompt (str): The prompt to send to the API.
114
+ stream (bool): Whether to stream the response.
115
+ raw (bool): Whether to return the raw response.
116
+ optimizer (str): Optimizer to use for the prompt.
117
+ conversationally (bool): Whether to generate the prompt conversationally.
118
+
119
+ Returns:
120
+ Dict[str, Any]: The API response.
121
+ """
122
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
123
+ if optimizer:
124
+ if optimizer in self.__available_optimizers:
125
+ conversation_prompt = getattr(Optimizers, optimizer)(
126
+ conversation_prompt if conversationally else prompt
127
+ )
128
+ else:
129
+ raise Exception(
130
+ f"Optimizer is not one of {self.__available_optimizers}"
131
+ )
132
+
133
+ payload = {
134
+ "messages": [
135
+ {"role": "system", "content": self.system_prompt},
136
+ {"role": "user", "content": conversation_prompt}
137
+ ],
138
+ "model": self.model,
139
+ "max_tokens": self.max_tokens_to_sample
140
+ }
141
+
142
+ def for_stream():
143
+ try:
144
+ response = self.session.post(
145
+ self.api_endpoint,
146
+ headers=self.headers,
147
+ json=payload,
148
+ stream=True,
149
+ timeout=self.timeout
150
+ )
151
+ if not response.ok:
152
+ raise exceptions.FailedToGenerateResponseError(
153
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
154
+ )
155
+ streaming_response = ""
156
+ # Use sanitize_stream with regex-based extraction and filtering (like x0gpt)
157
+ processed_stream = sanitize_stream(
158
+ data=response.iter_content(chunk_size=None),
159
+ intro_value=None,
160
+ to_json=False,
161
+ extract_regexes=[r'0:"(.*?)"'],
162
+ skip_regexes=[
163
+ r'^f:',
164
+ r'^e:',
165
+ r'^d:',
166
+ r'^\s*$',
167
+ r'data:\s*\[DONE\]',
168
+ r'event:\s*',
169
+ r'^\d+:\s*$',
170
+ r'^:\s*$',
171
+ r'^\s*[\x00-\x1f]+\s*$',
172
+ ],
173
+ raw=raw
174
+ )
175
+
176
+ for content_chunk in processed_stream:
177
+ if isinstance(content_chunk, bytes):
178
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
179
+ if raw:
180
+ yield content_chunk
181
+ else:
182
+ if content_chunk and isinstance(content_chunk, str):
183
+ try:
184
+ clean_content = content_chunk.encode().decode('unicode_escape')
185
+ clean_content = clean_content.replace('\\\\', '\\').replace('\\"', '"')
186
+ streaming_response += clean_content
187
+ yield dict(text=clean_content)
188
+ except (UnicodeDecodeError, UnicodeEncodeError):
189
+ streaming_response += content_chunk
190
+ yield dict(text=content_chunk)
191
+
192
+ self.last_response.update(dict(text=streaming_response))
193
+ self.conversation.update_chat_history(
194
+ prompt, self.get_message(self.last_response)
195
+ )
196
+ except CurlError as e:
197
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
198
+ except Exception as e:
199
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
200
+
201
+ def for_non_stream():
202
+ if stream:
203
+ return for_stream()
204
+ for _ in for_stream():
205
+ pass
206
+ return self.last_response
207
+
208
+ return for_stream() if stream else for_non_stream()
209
+
210
+ def chat(
211
+ self,
212
+ prompt: str,
213
+ stream: bool = False,
214
+ optimizer: str = None,
215
+ conversationally: bool = False,
216
+ raw: bool = False,
217
+ ) -> Union[str, Generator[str, None, None]]:
218
+ """
219
+ Generates a response from the MonoChat API.
220
+
221
+ Args:
222
+ prompt (str): The prompt to send to the API.
223
+ stream (bool): Whether to stream the response.
224
+ optimizer (str): Optimizer to use for the prompt.
225
+ conversationally (bool): Whether to generate the prompt conversationally.
226
+ raw (bool): Whether to return raw response chunks.
227
+
228
+ Returns:
229
+ str: The API response.
230
+ """
231
+
232
+ def for_stream():
233
+ for response in self.ask(
234
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
235
+ ):
236
+ if raw:
237
+ yield response
238
+ else:
239
+ yield self.get_message(response)
240
+
241
+ def for_non_stream():
242
+ result = self.ask(
243
+ prompt,
244
+ False,
245
+ raw=raw,
246
+ optimizer=optimizer,
247
+ conversationally=conversationally,
248
+ )
249
+ if raw:
250
+ return result
251
+ else:
252
+ return self.get_message(result)
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def get_message(self, response: dict) -> str:
257
+ """
258
+ Extracts the message from the API response.
259
+
260
+ Args:
261
+ response (dict): The API response.
262
+
263
+ Returns:
264
+ str: The message content.
265
+ """
266
+ assert isinstance(response, dict), "Response should be of dict data-type only"
267
+ text = response.get("text", "")
268
+ return text
269
+
270
+ if __name__ == "__main__":
271
+ from rich import print
272
+ ai = MonoChat(timeout=60)
273
+ response = ai.chat("In points tell me about humans", stream=True, raw=False)
274
+ for chunk in response:
275
+ print(chunk, end="", flush=True)
@@ -17,19 +17,74 @@ class SciraAI(Provider):
17
17
  A class to interact with the Scira AI chat API.
18
18
  """
19
19
 
20
- AVAILABLE_MODELS = {
21
- "scira-default": "Grok3-mini", # thinking model
22
- "scira-grok-3": "Grok3",
23
- "scira-anthropic": "Claude 4 Sonnet",
24
- "scira-anthropic-thinking": "Claude 4 Sonnet Thinking", # thinking model
25
- "scira-vision" : "Grok2-Vision", # vision model
26
- "scira-4o": "GPT4o",
27
- "scira-qwq": "QWQ-32B",
28
- "scira-o4-mini": "o4-mini",
29
- "scira-google": "gemini 2.5 flash Thinking", # thinking model
30
- "scira-google-pro": "gemini 2.5 pro",
31
- "scira-llama-4": "llama 4 Maverick",
20
+ # Model mapping: actual model names to Scira API format
21
+ MODEL_MAPPING = {
22
+ "grok-3-mini": "scira-default",
23
+ "grok-3-mini-fast": "scira-x-fast-mini",
24
+ "grok-3-fast": "scira-x-fast",
25
+ "gpt-4.1-nano": "scira-nano",
26
+ "grok-3": "scira-grok-3",
27
+ "grok-4": "scira-grok-4",
28
+ "grok-2-vision-1212": "scira-vision",
29
+ "grok-2-latest": "scira-g2",
30
+ "gpt-4o-mini": "scira-4o-mini",
31
+ "o4-mini-2025-04-16": "scira-o4-mini",
32
+ "o3": "scira-o3",
33
+ "qwen/qwen3-32b": "scira-qwen-32b",
34
+ "qwen3-30b-a3b": "scira-qwen-30b",
35
+ "deepseek-v3-0324": "scira-deepseek-v3",
36
+ "claude-3-5-haiku-20241022": "scira-haiku",
37
+ "mistral-small-latest": "scira-mistral",
38
+ "gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
39
+ "gemini-2.5-flash": "scira-google",
40
+ "gemini-2.5-pro": "scira-google-pro",
41
+ "claude-sonnet-4-20250514": "scira-anthropic",
42
+ "claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
43
+ "claude-4-opus-20250514": "scira-opus",
44
+ "claude-4-opus-20250514-pro": "scira-opus-pro",
45
+ "meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
32
46
  }
47
+
48
+ # Reverse mapping: Scira format to actual model names
49
+ SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
50
+ # Add special cases for aliases and duplicate mappings
51
+ SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
52
+ SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
53
+ SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
54
+ SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
55
+ SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
56
+ SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
57
+ SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
58
+ SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
59
+ SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
60
+ MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
61
+ # Available models list (actual model names + scira aliases)
62
+ AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
63
+
64
+ @classmethod
65
+ def _resolve_model(cls, model: str) -> str:
66
+ """
67
+ Resolve a model name to its Scira API format.
68
+
69
+ Args:
70
+ model: Either an actual model name or a Scira alias
71
+
72
+ Returns:
73
+ The Scira API format model name
74
+
75
+ Raises:
76
+ ValueError: If the model is not supported
77
+ """
78
+ # If it's already a Scira format, return as-is
79
+ if model in cls.SCIRA_TO_MODEL:
80
+ return model
81
+
82
+ # If it's an actual model name, convert to Scira format
83
+ if model in cls.MODEL_MAPPING:
84
+ return cls.MODEL_MAPPING[model]
85
+
86
+ # Model not found
87
+ raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
33
88
 
34
89
  def __init__(
35
90
  self,
@@ -42,7 +97,7 @@ class SciraAI(Provider):
42
97
  proxies: dict = {},
43
98
  history_offset: int = 10250,
44
99
  act: str = None,
45
- model: str = "scira-default",
100
+ model: str = "grok-3-mini",
46
101
  chat_id: str = None,
47
102
  user_id: str = None,
48
103
  browser: str = "chrome",
@@ -67,9 +122,9 @@ class SciraAI(Provider):
67
122
  system_prompt (str): System prompt for the AI.
68
123
 
69
124
  """
70
- if model not in self.AVAILABLE_MODELS:
71
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
72
-
125
+ # Resolve the model to Scira format
126
+ self.model = self._resolve_model(model)
127
+
73
128
  self.url = "https://scira.ai/api/search"
74
129
 
75
130
  # Initialize LitAgent for user agent generation
@@ -103,7 +158,6 @@ class SciraAI(Provider):
103
158
  self.max_tokens_to_sample = max_tokens
104
159
  self.timeout = timeout
105
160
  self.last_response = {}
106
- self.model = model
107
161
  self.chat_id = chat_id or str(uuid.uuid4())
108
162
  self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
109
163
 
@@ -357,7 +411,43 @@ class SciraAI(Provider):
357
411
  return response.get("text", "")
358
412
 
359
413
  if __name__ == "__main__":
360
- ai = SciraAI()
361
- resp = ai.chat("What is the capital of France?", stream=True, raw=False)
362
- for chunk in resp:
363
- print(chunk, end="", flush=True)
414
+ print("-" * 80)
415
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
416
+ print("-" * 80)
417
+
418
+ # Test all available models
419
+ working = 0
420
+ total = len(SciraAI.AVAILABLE_MODELS)
421
+
422
+ for model in SciraAI.AVAILABLE_MODELS:
423
+ try:
424
+ test_ai = SciraAI(model=model, timeout=60)
425
+ # Test stream first
426
+ response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
427
+ response_text = ""
428
+ print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
429
+ for chunk in response_stream:
430
+ response_text += chunk
431
+ # Optional: print chunks as they arrive for visual feedback
432
+ # print(chunk, end="", flush=True)
433
+
434
+ if response_text and len(response_text.strip()) > 0:
435
+ status = "✓"
436
+ # Clean and truncate response
437
+ clean_text = response_text.strip() # Already decoded in get_message
438
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
439
+ else:
440
+ status = "✗ (Stream)"
441
+ display_text = "Empty or invalid stream response"
442
+ print(f"\r{model:<50} {status:<10} {display_text}")
443
+
444
+ # Optional: Add non-stream test if needed, but stream test covers basic functionality
445
+ # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
446
+ # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
447
+ # if not response_non_stream or len(response_non_stream.strip()) == 0:
448
+ # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
449
+
450
+
451
+ except Exception as e:
452
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
453
+
@@ -19,9 +19,9 @@ class TypeGPT(Provider):
19
19
  # "gpt-4o-mini-2024-07-18",
20
20
  "gpt-4o-mini",
21
21
  "chatgpt-4o-latest",
22
- "deepseek-r1",
22
+ # "deepseek-r1",
23
23
  "deepseek-v3",
24
- "uncensored-r1",
24
+ # "uncensored-r1",
25
25
  # "Image-Generator",
26
26
  ]
27
27