webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -1,233 +0,0 @@
1
- import requests
2
- import os
3
- import tempfile
4
- import time
5
- import base64
6
- from typing import Optional, List, Dict, Any
7
- from webscout.Provider.TTI.utils import (
8
- ImageData,
9
- ImageResponse
10
- )
11
- from webscout.Provider.TTI.base import TTICompatibleProvider, BaseImages
12
- from io import BytesIO
13
- from webscout.litagent import LitAgent
14
-
15
- try:
16
- from PIL import Image
17
- except ImportError:
18
- Image = None
19
-
20
-
21
- class Images(BaseImages):
22
- def __init__(self, client):
23
- self._client = client
24
-
25
- def create(
26
- self,
27
- model: str = "flux_1_schnell",
28
- prompt: str = None,
29
- n: int = 1,
30
- size: str = "1_1",
31
- response_format: str = "url",
32
- user: Optional[str] = None,
33
- style: str = "none",
34
- aspect_ratio: str = "1:1",
35
- timeout: int = 60,
36
- image_format: str = "png",
37
- is_public: bool = False,
38
- **kwargs,
39
- ) -> ImageResponse:
40
- if not prompt:
41
- raise ValueError("Prompt is required!")
42
- agent = LitAgent()
43
- images = []
44
- urls = []
45
- api_url = self._client.api_endpoint
46
- payload = {
47
- "prompt": prompt,
48
- "model": model,
49
- "size": size,
50
- "isPublic": is_public,
51
- }
52
- for _ in range(n):
53
- resp = self._client.session.post(
54
- api_url,
55
- json=payload,
56
- timeout=timeout,
57
- )
58
- resp.raise_for_status()
59
- result = resp.json()
60
- if result and "result" in result:
61
- image_data = result["result"]
62
- base64_data = image_data.split(",")[1]
63
- img_bytes = base64.b64decode(base64_data)
64
- # Convert to png or jpeg in memory if needed
65
- if Image is not None:
66
- with BytesIO(img_bytes) as input_io:
67
- with Image.open(input_io) as im:
68
- out_io = BytesIO()
69
- if image_format.lower() == "jpeg":
70
- im = im.convert("RGB")
71
- im.save(out_io, format="JPEG")
72
- else:
73
- im.save(out_io, format="PNG")
74
- img_bytes = out_io.getvalue()
75
- images.append(img_bytes)
76
- if response_format == "url":
77
-
78
- def upload_file_with_retry(img_bytes, image_format, max_retries=3):
79
- ext = "jpg" if image_format.lower() == "jpeg" else "png"
80
- for attempt in range(max_retries):
81
- tmp_path = None
82
- try:
83
- with tempfile.NamedTemporaryFile(
84
- suffix=f".{ext}", delete=False
85
- ) as tmp:
86
- tmp.write(img_bytes)
87
- tmp.flush()
88
- tmp_path = tmp.name
89
- with open(tmp_path, "rb") as f:
90
- files = {
91
- "fileToUpload": (
92
- f"image.{ext}",
93
- f,
94
- f"image/{ext}",
95
- )
96
- }
97
- data = {"reqtype": "fileupload", "json": "true"}
98
- headers = {"User-Agent": agent.random()}
99
- if attempt > 0:
100
- headers["Connection"] = "close"
101
- resp = requests.post(
102
- "https://catbox.moe/user/api.php",
103
- files=files,
104
- data=data,
105
- headers=headers,
106
- timeout=timeout,
107
- )
108
- if resp.status_code == 200 and resp.text.strip():
109
- text = resp.text.strip()
110
- if text.startswith("http"):
111
- return text
112
- try:
113
- result = resp.json()
114
- if "url" in result:
115
- return result["url"]
116
- except Exception:
117
- if "http" in text:
118
- return text
119
- except Exception:
120
- if attempt < max_retries - 1:
121
- time.sleep(1 * (attempt + 1))
122
- finally:
123
- if tmp_path and os.path.isfile(tmp_path):
124
- try:
125
- os.remove(tmp_path)
126
- except Exception:
127
- pass
128
- return None
129
-
130
- def upload_file_alternative(img_bytes, image_format):
131
- try:
132
- ext = "jpg" if image_format.lower() == "jpeg" else "png"
133
- with tempfile.NamedTemporaryFile(
134
- suffix=f".{ext}", delete=False
135
- ) as tmp:
136
- tmp.write(img_bytes)
137
- tmp.flush()
138
- tmp_path = tmp.name
139
- try:
140
- if not os.path.isfile(tmp_path):
141
- return None
142
- with open(tmp_path, "rb") as img_file:
143
- files = {"file": img_file}
144
- response = requests.post(
145
- "https://0x0.st", files=files
146
- )
147
- response.raise_for_status()
148
- image_url = response.text.strip()
149
- if not image_url.startswith("http"):
150
- return None
151
- return image_url
152
- except Exception:
153
- return None
154
- finally:
155
- try:
156
- os.remove(tmp_path)
157
- except Exception:
158
- pass
159
- except Exception:
160
- return None
161
-
162
- uploaded_url = upload_file_with_retry(img_bytes, image_format)
163
- if not uploaded_url:
164
- uploaded_url = upload_file_alternative(img_bytes, image_format)
165
- if uploaded_url:
166
- urls.append(uploaded_url)
167
- else:
168
- raise RuntimeError(
169
- "Failed to upload image to catbox.moe using all available methods"
170
- )
171
- else:
172
- raise RuntimeError("No image data received from FastFlux API")
173
- result_data = []
174
- if response_format == "url":
175
- for url in urls:
176
- result_data.append(ImageData(url=url))
177
- elif response_format == "b64_json":
178
- import base64
179
-
180
- for img in images:
181
- b64 = base64.b64encode(img).decode("utf-8")
182
- result_data.append(ImageData(b64_json=b64))
183
- else:
184
- raise ValueError("response_format must be 'url' or 'b64_json'")
185
- from time import time as _time
186
-
187
- return ImageResponse(created=int(_time()), data=result_data)
188
-
189
-
190
- class FastFluxAI(TTICompatibleProvider):
191
- AVAILABLE_MODELS = [
192
- "flux_1_schnell",
193
- ]
194
-
195
- def __init__(self, api_key: str = None):
196
- self.api_endpoint = "https://api.freeflux.ai/v1/images/generate"
197
- self.session = requests.Session()
198
- self.user_agent = LitAgent().random()
199
- self.api_key = api_key
200
- self.headers = {
201
- "accept": "application/json, text/plain, */*",
202
- "accept-language": "en-US,en;q=0.9",
203
- "content-type": "application/json",
204
- "origin": "https://freeflux.ai",
205
- "referer": "https://freeflux.ai/",
206
- "user-agent": self.user_agent,
207
- }
208
- if self.api_key:
209
- self.headers["authorization"] = f"Bearer {self.api_key}"
210
- self.session.headers.update(self.headers)
211
- self.images = Images(self)
212
-
213
- @property
214
- def models(self):
215
- class _ModelList:
216
- def list(inner_self):
217
- return type(self).AVAILABLE_MODELS
218
-
219
- return _ModelList()
220
-
221
-
222
- if __name__ == "__main__":
223
- from rich import print
224
-
225
- client = FastFluxAI()
226
- response = client.images.create(
227
- model="flux_1_schnell",
228
- prompt="A cool cyberpunk city at night",
229
- response_format="url",
230
- n=2,
231
- timeout=30,
232
- )
233
- print(response)
@@ -1,246 +0,0 @@
1
- from curl_cffi import CurlError
2
- from curl_cffi.requests import Session # Keep Session import
3
- import json
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
- from webscout.AIbase import Provider
10
- from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
-
13
- class Writecream(Provider):
14
- """
15
- A class to interact with the Writecream API.
16
- """
17
-
18
- AVAILABLE_MODELS = ["writecream-gpt"]
19
-
20
- def __init__(
21
- self,
22
- is_conversation: bool = True,
23
- max_tokens: int = 600,
24
- timeout: int = 30,
25
- intro: str = None,
26
- filepath: str = None,
27
- update_file: bool = True,
28
- proxies: dict = {},
29
- history_offset: int = 10250,
30
- act: str = None,
31
- system_prompt: str = "You are a helpful and informative AI assistant.",
32
- base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
33
- referer: str = "https://www.writecream.com/chatgpt-chat/",
34
- link: str = "writecream.com",
35
- model: str = "writecream-gpt"
36
- ):
37
- """
38
- Initializes the Writecream API with given parameters.
39
- """
40
- if model not in self.AVAILABLE_MODELS:
41
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
-
43
- # Initialize curl_cffi Session
44
- self.session = Session()
45
- self.is_conversation = is_conversation
46
- self.max_tokens_to_sample = max_tokens
47
- self.base_url = base_url
48
- self.timeout = timeout
49
- self.last_response = {}
50
- self.system_prompt = system_prompt
51
- self.model = model
52
- # Initialize LitAgent
53
- self.agent = LitAgent()
54
- self.referer = referer
55
- self.link = link
56
-
57
- self.headers = {
58
- # Use LitAgent for User-Agent
59
- "User-Agent": self.agent.random(),
60
- "Referer": self.referer
61
- # Add other headers if needed by curl_cffi impersonation or API
62
- }
63
-
64
- self.__available_optimizers = (
65
- method
66
- for method in dir(Optimizers)
67
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
- )
69
-
70
- # Update curl_cffi session headers and proxies
71
- self.session.headers.update(self.headers)
72
- self.session.proxies.update(proxies)
73
-
74
- Conversation.intro = (
75
- AwesomePrompts().get_act(
76
- act, raise_not_found=True, default=None, case_insensitive=True
77
- )
78
- if act
79
- else intro or Conversation.intro
80
- )
81
-
82
- self.conversation = Conversation(
83
- is_conversation, self.max_tokens_to_sample, filepath, update_file
84
- )
85
- self.conversation.history_offset = history_offset
86
-
87
- def ask(
88
- self,
89
- prompt: str,
90
- stream: bool = False,
91
- raw: bool = False,
92
- optimizer: str = None,
93
- conversationally: bool = False,
94
- ) -> Union[Dict[str, Any], Generator]:
95
- """
96
- Sends a message to the Writecream API and returns the response.
97
-
98
- Args:
99
- prompt (str): Prompt to be sent.
100
- stream (bool, optional): Flag for streaming response. Defaults to False.
101
- raw (bool, optional): Stream back raw response as received. Defaults to False.
102
- optimizer (str, optional): Prompt optimizer name. Defaults to None.
103
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
-
105
- Returns:
106
- Union[Dict[str, Any], Generator]: Response from the API.
107
- """
108
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
109
- if optimizer:
110
- if optimizer in self.__available_optimizers:
111
- conversation_prompt = getattr(Optimizers, optimizer)(
112
- conversation_prompt if conversationally else prompt
113
- )
114
- else:
115
- raise exceptions.FailedToGenerateResponseError(
116
- f"Optimizer is not one of {self.__available_optimizers}"
117
- )
118
-
119
- final_query = [
120
- {"role": "system", "content": self.system_prompt},
121
- {"role": "user", "content": conversation_prompt}
122
- ]
123
-
124
- params = {
125
- "query": json.dumps(final_query),
126
- "link": self.link
127
- }
128
-
129
- def for_non_stream():
130
- try:
131
- # Use curl_cffi session.get with impersonate
132
- response = self.session.get(
133
- self.base_url,
134
- params=params,
135
- timeout=self.timeout,
136
- impersonate="chrome120" # Add impersonate
137
- )
138
- response.raise_for_status()
139
- response_text = response.text # Get the raw text
140
-
141
- # Use sanitize_stream to process the non-streaming text
142
- # It will try to parse the whole text as JSON because to_json=True
143
- processed_stream = sanitize_stream(
144
- data=response_text,
145
- to_json=True, # Attempt to parse the whole response text as JSON
146
- intro_value=None, # No prefix expected on the full response
147
- content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
148
- )
149
-
150
- # Extract the single result from the generator
151
- response_content = ""
152
- for content in processed_stream:
153
- response_content = content if isinstance(content, str) else ""
154
-
155
- # Update conversation history
156
- self.last_response = {"text": response_content}
157
- self.conversation.update_chat_history(prompt, response_content)
158
-
159
- return {"text": response_content}
160
- except CurlError as e: # Catch CurlError
161
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
162
- except Exception as e:
163
- # Include original exception type
164
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
165
-
166
- # Currently, Writecream API doesn't support streaming, so we always return non-streaming response
167
- return for_non_stream()
168
-
169
- def chat(
170
- self,
171
- prompt: str,
172
- stream: bool = False,
173
- optimizer: str = None,
174
- conversationally: bool = False,
175
- ) -> Union[str, Generator[str, None, None]]:
176
- """
177
- Generates a response from the Writecream API.
178
-
179
- Args:
180
- prompt (str): Prompt to be sent.
181
- stream (bool, optional): Flag for streaming response. Defaults to False.
182
- optimizer (str, optional): Prompt optimizer name. Defaults to None.
183
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184
-
185
- Returns:
186
- Union[str, Generator[str, None, None]]: Response from the API.
187
- """
188
- def for_non_stream():
189
- return self.get_message(
190
- self.ask(
191
- prompt,
192
- stream=False,
193
- optimizer=optimizer,
194
- conversationally=conversationally,
195
- )
196
- )
197
-
198
- if stream:
199
- # For compatibility with AUTO streaming interface, yield a dict
200
- response_dict = self.ask(
201
- prompt,
202
- stream=False,
203
- optimizer=optimizer,
204
- conversationally=conversationally,
205
- )
206
- yield response_dict
207
- else:
208
- return for_non_stream()
209
-
210
- def get_message(self, response: dict) -> str:
211
- """
212
- Retrieves message only from response.
213
-
214
- Args:
215
- response (dict): Response generated by `self.ask`
216
-
217
- Returns:
218
- str: Message extracted
219
- """
220
- assert isinstance(response, dict), "Response should be of dict data-type only"
221
- return response["text"]
222
-
223
-
224
- if __name__ == "__main__":
225
- # Ensure curl_cffi is installed
226
- print("-" * 80)
227
- print(f"{'Model':<30} {'Status':<10} {'Response'}")
228
- print("-" * 80)
229
-
230
- try:
231
- test_api = Writecream(timeout=60)
232
- prompt = "Say 'Hello' in one word"
233
- response = test_api.chat(prompt)
234
-
235
- if response and len(response.strip()) > 0:
236
- status = "✓"
237
- # Clean and truncate response
238
- clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
239
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
240
- else:
241
- status = "✗"
242
- display_text = "Empty or invalid response"
243
-
244
- print(f"{test_api.model:<30} {status:<10} {display_text}")
245
- except Exception as e:
246
- print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")