webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,315 @@
1
+ from typing import Any, Dict, Generator
2
+ from uuid import uuid4
3
+ import requests
4
+ import re
5
+ import json
6
+ import time
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+ class AkashGPT(Provider):
16
+ """
17
+ A class to interact with the Akash Network Chat API.
18
+
19
+ Attributes:
20
+ system_prompt (str): The system prompt to define the assistant's role.
21
+ model (str): The model to use for generation.
22
+
23
+ Examples:
24
+ >>> from webscout.Provider.akashgpt import AkashGPT
25
+ >>> ai = AkashGPT()
26
+ >>> response = ai.chat("What's the weather today?")
27
+ >>> print(response)
28
+ 'The weather today depends on your location. I don't have access to real-time weather data.'
29
+ """
30
+
31
+ AVAILABLE_MODELS = [
32
+ "Meta-Llama-3-3-70B-Instruct",
33
+ "DeepSeek-R1",
34
+ "Meta-Llama-3-1-405B-Instruct-FP8",
35
+ # "Meta-Llama-3-2-3B-Instruct",
36
+ # "Meta-Llama-3-1-8B-Instruct-FP8",
37
+ # "mistral",
38
+ # "nous-hermes2-mixtral",
39
+ # "dolphin-mixtral",
40
+ "Qwen-QwQ-32B"
41
+
42
+ ]
43
+
44
+ def __init__(
45
+ self,
46
+ is_conversation: bool = True,
47
+ max_tokens: int = 600,
48
+ timeout: int = 30,
49
+ intro: str = None,
50
+ filepath: str = None,
51
+ update_file: bool = True,
52
+ proxies: dict = {},
53
+ history_offset: int = 10250,
54
+ act: str = None,
55
+ system_prompt: str = "You are a helpful assistant.",
56
+ model: str = "Meta-Llama-3-3-70B-Instruct",
57
+ temperature: float = 0.6,
58
+ top_p: float = 0.9,
59
+ session_token: str = None
60
+ ):
61
+ """
62
+ Initializes the AkashGPT API with given parameters.
63
+
64
+ Args:
65
+ is_conversation (bool): Whether the provider is in conversation mode.
66
+ max_tokens (int): Maximum number of tokens to sample.
67
+ timeout (int): Timeout for API requests.
68
+ intro (str): Introduction message for the conversation.
69
+ filepath (str): Filepath for storing conversation history.
70
+ update_file (bool): Whether to update the conversation history file.
71
+ proxies (dict): Proxies for the API requests.
72
+ history_offset (int): Offset for conversation history.
73
+ act (str): Act for the conversation.
74
+ system_prompt (str): The system prompt to define the assistant's role.
75
+ model (str): The model to use for generation.
76
+ temperature (float): Controls randomness in generation.
77
+ top_p (float): Controls diversity via nucleus sampling.
78
+ session_token (str): Session token for authentication. If None, auto-generates one.
79
+ """
80
+ # Validate model choice
81
+ if model not in self.AVAILABLE_MODELS:
82
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
83
+
84
+ self.session = requests.Session()
85
+ self.is_conversation = is_conversation
86
+ self.max_tokens_to_sample = max_tokens
87
+ self.api_endpoint = "https://chat.akash.network/api/chat"
88
+ self.timeout = timeout
89
+ self.last_response = {}
90
+ self.system_prompt = system_prompt
91
+ self.model = model
92
+ self.temperature = temperature
93
+ self.top_p = top_p
94
+
95
+ # Generate session token if not provided
96
+ if not session_token:
97
+ self.session_token = str(uuid4()).replace("-", "") + str(int(time.time()))
98
+ else:
99
+ self.session_token = session_token
100
+
101
+ self.agent = LitAgent()
102
+
103
+ self.headers = {
104
+ "authority": "chat.akash.network",
105
+ "method": "POST",
106
+ "path": "/api/chat",
107
+ "scheme": "https",
108
+ "accept": "*/*",
109
+ "accept-encoding": "gzip, deflate, br, zstd",
110
+ "accept-language": "en-US,en;q=0.9",
111
+ "content-type": "application/json",
112
+ "dnt": "1",
113
+ "origin": "https://chat.akash.network",
114
+ "priority": "u=1, i",
115
+ "referer": "https://chat.akash.network/",
116
+ "sec-ch-ua": '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
117
+ "sec-ch-ua-mobile": "?0",
118
+ "sec-ch-ua-platform": '"Windows"',
119
+ "user-agent": self.agent.random()
120
+
121
+ }
122
+
123
+ # Set cookies with the session token
124
+ self.session.cookies.set("session_token", self.session_token, domain="chat.akash.network")
125
+
126
+ self.__available_optimizers = (
127
+ method
128
+ for method in dir(Optimizers)
129
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
130
+ )
131
+ self.session.headers.update(self.headers)
132
+ Conversation.intro = (
133
+ AwesomePrompts().get_act(
134
+ act, raise_not_found=True, default=None, case_insensitive=True
135
+ )
136
+ if act
137
+ else intro or Conversation.intro
138
+ )
139
+ self.conversation = Conversation(
140
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
141
+ )
142
+ self.conversation.history_offset = history_offset
143
+ self.session.proxies = proxies
144
+
145
+ def ask(
146
+ self,
147
+ prompt: str,
148
+ stream: bool = False,
149
+ raw: bool = False,
150
+ optimizer: str = None,
151
+ conversationally: bool = False,
152
+ ) -> Dict[str, Any]:
153
+ """
154
+ Sends a prompt to the Akash Network API and returns the response.
155
+
156
+ Args:
157
+ prompt (str): The prompt to send to the API.
158
+ stream (bool): Whether to stream the response.
159
+ raw (bool): Whether to return the raw response.
160
+ optimizer (str): Optimizer to use for the prompt.
161
+ conversationally (bool): Whether to generate the prompt conversationally.
162
+
163
+ Returns:
164
+ Dict[str, Any]: The API response.
165
+
166
+ Examples:
167
+ >>> ai = AkashGPT()
168
+ >>> response = ai.ask("Tell me a joke!")
169
+ >>> print(response)
170
+ {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
171
+ """
172
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
173
+ if optimizer:
174
+ if optimizer in self.__available_optimizers:
175
+ conversation_prompt = getattr(Optimizers, optimizer)(
176
+ conversation_prompt if conversationally else prompt
177
+ )
178
+ else:
179
+ raise Exception(
180
+ f"Optimizer is not one of {self.__available_optimizers}"
181
+ )
182
+
183
+ payload = {
184
+ "id": str(uuid4()), # Generate a unique request ID
185
+ "messages": [
186
+ {"role": "system", "content": self.system_prompt},
187
+ {"role": "user", "content": conversation_prompt}
188
+ ],
189
+ "model": self.model,
190
+ "temperature": self.temperature,
191
+ "system": self.system_prompt,
192
+ "topP": self.top_p
193
+ }
194
+
195
+ def for_stream():
196
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
197
+ if not response.ok:
198
+ raise exceptions.FailedToGenerateResponseError(
199
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
200
+ )
201
+
202
+ streaming_response = ""
203
+ message_id = None
204
+
205
+ for line in response.iter_lines(decode_unicode=True):
206
+ if not line:
207
+ continue
208
+
209
+ # Parse message ID from the f: line
210
+ if line.startswith('f:'):
211
+ try:
212
+ f_data = json.loads(line[2:])
213
+ message_id = f_data.get("messageId")
214
+ continue
215
+ except json.JSONDecodeError:
216
+ pass
217
+
218
+ # Parse content chunks
219
+ if line.startswith('0:'):
220
+ try:
221
+ # Extract content between quotes
222
+ content = line[2:].strip('"')
223
+ if content.startswith('"') and content.endswith('"'):
224
+ content = content[1:-1]
225
+ streaming_response += content
226
+ yield content if raw else dict(text=content)
227
+ except Exception:
228
+ continue
229
+
230
+ # End of stream
231
+ if line.startswith('e:') or line.startswith('d:'):
232
+ break
233
+
234
+ self.last_response.update(dict(text=streaming_response, message_id=message_id))
235
+ self.conversation.update_chat_history(
236
+ prompt, self.get_message(self.last_response)
237
+ )
238
+
239
+ def for_non_stream():
240
+ for _ in for_stream():
241
+ pass
242
+ return self.last_response
243
+
244
+ return for_stream() if stream else for_non_stream()
245
+
246
+ def chat(
247
+ self,
248
+ prompt: str,
249
+ stream: bool = False,
250
+ optimizer: str = None,
251
+ conversationally: bool = False,
252
+ ) -> str:
253
+ """
254
+ Generates a response from the AkashGPT API.
255
+
256
+ Args:
257
+ prompt (str): The prompt to send to the API.
258
+ stream (bool): Whether to stream the response.
259
+ optimizer (str): Optimizer to use for the prompt.
260
+ conversationally (bool): Whether to generate the prompt conversationally.
261
+
262
+ Returns:
263
+ str: The API response.
264
+
265
+ Examples:
266
+ >>> ai = AkashGPT()
267
+ >>> response = ai.chat("What's the weather today?")
268
+ >>> print(response)
269
+ 'The weather today depends on your location. I don't have access to real-time weather data.'
270
+ """
271
+
272
+ def for_stream():
273
+ for response in self.ask(
274
+ prompt, True, optimizer=optimizer, conversationally=conversationally
275
+ ):
276
+ yield self.get_message(response)
277
+
278
+ def for_non_stream():
279
+ return self.get_message(
280
+ self.ask(
281
+ prompt,
282
+ False,
283
+ optimizer=optimizer,
284
+ conversationally=conversationally,
285
+ )
286
+ )
287
+
288
+ return for_stream() if stream else for_non_stream()
289
+
290
+ def get_message(self, response: dict) -> str:
291
+ """
292
+ Extracts the message from the API response.
293
+
294
+ Args:
295
+ response (dict): The API response.
296
+
297
+ Returns:
298
+ str: The message content.
299
+
300
+ Examples:
301
+ >>> ai = AkashGPT()
302
+ >>> response = ai.ask("Tell me a joke!")
303
+ >>> message = ai.get_message(response)
304
+ >>> print(message)
305
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
306
+ """
307
+ assert isinstance(response, dict), "Response should be of dict data-type only"
308
+ return response.get("text", "")
309
+
310
+ if __name__ == "__main__":
311
+ from rich import print
312
+ ai = AkashGPT()
313
+ resp = ai.chat("Tell me a joke!", stream=True)
314
+ for c in resp:
315
+ print(c, end="", flush=True)
@@ -7,7 +7,7 @@ from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
- from webscout import exceptions
10
+ from webscout import exceptions, LitAgent
11
11
 
12
12
 
13
13
  class ChatGLM(Provider):
@@ -26,7 +26,7 @@ class ChatGLM(Provider):
26
26
  proxies: dict = {},
27
27
  history_offset: int = 10250,
28
28
  act: str = None,
29
- model: str = "all-tools-230b",
29
+ plus_model: bool = True,
30
30
  ):
31
31
  """Initializes the ChatGLM API client."""
32
32
  self.session = requests.Session()
@@ -36,14 +36,14 @@ class ChatGLM(Provider):
36
36
  self.stream_chunk_size = 64
37
37
  self.timeout = timeout
38
38
  self.last_response = {}
39
- self.model = model
39
+ self.plus_model = plus_model
40
40
  self.headers = {
41
41
  'Accept-Language': 'en-US,en;q=0.9',
42
42
  'App-Name': 'chatglm',
43
43
  'Authorization': 'undefined',
44
44
  'Content-Type': 'application/json',
45
45
  'Origin': 'https://chatglm.cn',
46
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
46
+ 'User-Agent': LitAgent().random(),
47
47
  'X-App-Platform': 'pc',
48
48
  'X-App-Version': '0.0.1',
49
49
  'X-Device-Id': '', #Will be generated each time
@@ -102,7 +102,7 @@ class ChatGLM(Provider):
102
102
  "assistant_id": "65940acff94777010aa6b796",
103
103
  "conversation_id": "",
104
104
  "meta_data": {
105
- "if_plus_model": False,
105
+ "if_plus_model": self.plus_model,
106
106
  "is_test": False,
107
107
  "input_question_type": "xxxx",
108
108
  "channel": "",