webscout 6.1__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (48) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Extra/autollama.py +37 -20
  4. webscout/Local/formats.py +4 -2
  5. webscout/Local/utils.py +37 -12
  6. webscout/Provider/Amigo.py +50 -37
  7. webscout/Provider/Deepseek.py +7 -6
  8. webscout/Provider/EDITEE.py +2 -2
  9. webscout/Provider/GPTWeb.py +1 -1
  10. webscout/Provider/Llama3.py +1 -1
  11. webscout/Provider/NinjaChat.py +2 -2
  12. webscout/Provider/OLLAMA.py +1 -1
  13. webscout/Provider/Perplexity.py +1 -1
  14. webscout/Provider/Reka.py +12 -5
  15. webscout/Provider/TTI/AIuncensored.py +103 -0
  16. webscout/Provider/TTI/__init__.py +3 -2
  17. webscout/Provider/TTI/talkai.py +116 -0
  18. webscout/Provider/TeachAnything.py +0 -3
  19. webscout/Provider/__init__.py +8 -11
  20. webscout/Provider/cerebras.py +143 -123
  21. webscout/Provider/cleeai.py +1 -1
  22. webscout/Provider/felo_search.py +1 -1
  23. webscout/Provider/gaurish.py +41 -2
  24. webscout/Provider/geminiprorealtime.py +1 -1
  25. webscout/Provider/genspark.py +1 -1
  26. webscout/Provider/julius.py +4 -3
  27. webscout/Provider/learnfastai.py +1 -1
  28. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  29. webscout/Provider/promptrefine.py +3 -1
  30. webscout/Provider/talkai.py +196 -0
  31. webscout/Provider/turboseek.py +3 -8
  32. webscout/Provider/tutorai.py +1 -1
  33. webscout/__init__.py +2 -43
  34. webscout/tempid.py +4 -73
  35. webscout/version.py +1 -1
  36. webscout/webai.py +1 -1
  37. {webscout-6.1.dist-info → webscout-6.2.dist-info}/METADATA +44 -128
  38. {webscout-6.1.dist-info → webscout-6.2.dist-info}/RECORD +42 -45
  39. webscout/Provider/BasedGPT.py +0 -214
  40. webscout/Provider/ChatHub.py +0 -209
  41. webscout/Provider/TTI/amigo.py +0 -148
  42. webscout/Provider/bixin.py +0 -264
  43. webscout/Provider/xdash.py +0 -182
  44. webscout/websx_search.py +0 -19
  45. {webscout-6.1.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  46. {webscout-6.1.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  47. {webscout-6.1.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  48. {webscout-6.1.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -1,22 +1,24 @@
1
- import json
1
+ import re
2
2
  import requests
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, Optional, Generator, List, Union
3
6
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
4
7
  from webscout.AIbase import Provider
5
8
  from webscout import exceptions
6
- from typing import Dict, Any
9
+ from fake_useragent import UserAgent
10
+ from cerebras.cloud.sdk import Cerebras as CerebrasSDK
11
+
7
12
 
8
13
  class Cerebras(Provider):
9
14
  """
10
- A class to interact with the Cerebras AI API.
15
+ A class to interact with the Cerebras API using a cookie for authentication.
11
16
  """
12
17
 
13
- AVAILABLE_MODELS = ["llama3.1-8b", "llama3.1-70b"]
14
-
15
18
  def __init__(
16
19
  self,
17
- api_key: str,
18
20
  is_conversation: bool = True,
19
- max_tokens: int = 4096,
21
+ max_tokens: int = 2049,
20
22
  timeout: int = 30,
21
23
  intro: str = None,
22
24
  filepath: str = None,
@@ -24,47 +26,36 @@ class Cerebras(Provider):
24
26
  proxies: dict = {},
25
27
  history_offset: int = 10250,
26
28
  act: str = None,
29
+ cookie_path: str = "cookie.json",
27
30
  model: str = "llama3.1-8b",
28
- system_prompt: str = "Please try to provide useful, helpful and actionable answers.",
31
+ system_prompt: str = "You are a helpful assistant.",
29
32
  ):
30
- """
31
- Initializes the Cerebras AI API with given parameters.
32
- """
33
- if model not in self.AVAILABLE_MODELS:
34
- raise ValueError(f"Invalid model: {model}. Available models are: {', '.join(self.AVAILABLE_MODELS)}")
35
-
36
- self.session = requests.Session()
37
- self.is_conversation = is_conversation
38
- self.max_tokens_to_sample = max_tokens
39
- self.api_endpoint = "https://api.cerebras.ai/v1/chat/completions"
33
+ # Initialize basic settings first
40
34
  self.timeout = timeout
41
- self.last_response = {}
42
35
  self.model = model
43
36
  self.system_prompt = system_prompt
44
- self.headers = {
45
- "accept": "application/json",
46
- "accept-encoding": "gzip, deflate, br, zstd",
47
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
48
- "authorization": f"Bearer {api_key}",
49
- "content-type": "application/json",
50
- "dnt": "1",
51
- "origin": "https://inference.cerebras.ai",
52
- "referer": "https://inference.cerebras.ai/",
53
- "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
54
- "sec-ch-ua-mobile": "?0",
55
- "sec-ch-ua-platform": '"Windows"',
56
- "sec-fetch-dest": "empty",
57
- "sec-fetch-mode": "cors",
58
- "sec-fetch-site": "same-site",
59
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
60
- }
37
+ self.is_conversation = is_conversation
38
+ self.max_tokens_to_sample = max_tokens
39
+ self.last_response = {}
61
40
 
41
+ # Get API key first
42
+ try:
43
+ self.api_key = self.get_demo_api_key(cookie_path)
44
+ # Set environment variable for the SDK
45
+ os.environ["CEREBRAS_API_KEY"] = self.api_key
46
+ # Initialize the client with the API key
47
+ self.client = CerebrasSDK(api_key=self.api_key)
48
+ except Exception as e:
49
+ raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
50
+
51
+ # Initialize optimizers
62
52
  self.__available_optimizers = (
63
53
  method
64
54
  for method in dir(Optimizers)
65
55
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
56
  )
67
- self.session.headers.update(self.headers)
57
+
58
+ # Initialize conversation settings
68
59
  Conversation.intro = (
69
60
  AwesomePrompts().get_act(
70
61
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -76,7 +67,59 @@ class Cerebras(Provider):
76
67
  is_conversation, self.max_tokens_to_sample, filepath, update_file
77
68
  )
78
69
  self.conversation.history_offset = history_offset
79
- self.session.proxies = proxies
70
+
71
+ @staticmethod
72
+ def extract_query(text: str) -> str:
73
+ """Extracts the first code block from the given text."""
74
+ pattern = r"```(.*?)```"
75
+ matches = re.findall(pattern, text, re.DOTALL)
76
+ return matches[0].strip() if matches else text.strip()
77
+
78
+ @staticmethod
79
+ def refiner(text: str) -> str:
80
+ """Refines the input text by removing surrounding quotes."""
81
+ return text.strip('"')
82
+
83
+ def get_demo_api_key(self, cookie_path: str) -> str:
84
+ """Retrieves the demo API key using the provided cookie."""
85
+ try:
86
+ with open(cookie_path, "r") as file:
87
+ cookies = {item["name"]: item["value"] for item in json.load(file)}
88
+ except FileNotFoundError:
89
+ raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
90
+ except json.JSONDecodeError:
91
+ raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
92
+
93
+ headers = {
94
+ "Accept": "*/*",
95
+ "Accept-Language": "en-US,en;q=0.9",
96
+ "Content-Type": "application/json",
97
+ "Origin": "https://inference.cerebras.ai",
98
+ "Referer": "https://inference.cerebras.ai/",
99
+ "user-agent": UserAgent().random,
100
+ }
101
+
102
+ json_data = {
103
+ "operationName": "GetMyDemoApiKey",
104
+ "variables": {},
105
+ "query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
106
+ }
107
+
108
+ try:
109
+ response = requests.post(
110
+ "https://inference.cerebras.ai/api/graphql",
111
+ cookies=cookies,
112
+ headers=headers,
113
+ json=json_data,
114
+ timeout=self.timeout,
115
+ )
116
+ response.raise_for_status()
117
+ api_key = response.json()["data"]["GetMyDemoApiKey"]
118
+ return api_key
119
+ except requests.exceptions.RequestException as e:
120
+ raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
121
+ except KeyError:
122
+ raise exceptions.InvalidResponseError("API key not found in response.")
80
123
 
81
124
  def ask(
82
125
  self,
@@ -85,7 +128,8 @@ class Cerebras(Provider):
85
128
  raw: bool = False,
86
129
  optimizer: str = None,
87
130
  conversationally: bool = False,
88
- ) -> Dict[str, Any]:
131
+ ) -> Union[Dict, Generator]:
132
+ """Send a prompt to the model and get a response."""
89
133
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
134
  if optimizer:
91
135
  if optimizer in self.__available_optimizers:
@@ -93,62 +137,51 @@ class Cerebras(Provider):
93
137
  conversation_prompt if conversationally else prompt
94
138
  )
95
139
  else:
96
- raise Exception(
97
- f"Optimizer is not one of {self.__available_optimizers}"
98
- )
99
-
100
- payload = {
101
- "messages": [
102
- {"role": "system", "content": self.system_prompt},
103
- {"role": "user", "content": conversation_prompt},
104
- ],
105
- "model": self.model,
106
- "stream": True,
107
- "temperature": 0.2,
108
- "top_p": 1,
109
- "max_tokens": self.max_tokens_to_sample
110
- }
111
-
112
- def for_stream():
113
- response = self.session.post(
114
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
140
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
141
+
142
+ messages = [
143
+ {"content": self.system_prompt, "role": "system"},
144
+ {"content": conversation_prompt, "role": "user"},
145
+ ]
146
+
147
+ try:
148
+ if stream:
149
+ return self._handle_stream_response(messages)
150
+ return self._handle_normal_response(messages)
151
+ except Exception as e:
152
+ raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
153
+
154
+ def _handle_stream_response(self, messages: List[Dict]) -> Generator:
155
+ """Handle streaming response from the model."""
156
+ try:
157
+ response = self.client.chat.completions.create(
158
+ messages=messages,
159
+ model=self.model,
160
+ stream=True
115
161
  )
116
-
117
- if not response.ok:
118
- raise exceptions.FailedToGenerateResponseError(
119
- f"Failed to generate response - ({response.status_code}, {response.reason})"
120
- )
121
-
122
- full_response = ""
123
- for line in response.iter_lines():
124
- if line:
125
- line_data = line.decode('utf-8').strip()
126
- if line_data.startswith("data: "):
127
- json_str = line_data[6:]
128
- if json_str != "[DONE]":
129
- chunk = json.loads(json_str)
130
- if 'choices' in chunk and 'delta' in chunk['choices'][0]:
131
- content = chunk['choices'][0]['delta'].get('content', '')
132
- full_response += content
133
- yield content if raw else dict(text=content)
134
- else:
135
- break
136
-
137
- self.last_response.update(dict(text=full_response))
138
- self.conversation.update_chat_history(
139
- prompt, self.get_message(self.last_response)
162
+
163
+ for choice in response.choices:
164
+ if hasattr(choice, 'delta') and hasattr(choice.delta, 'content') and choice.delta.content:
165
+ yield dict(text=choice.delta.content)
166
+
167
+ # Update last response with the complete message
168
+ if hasattr(response.choices[0], 'message'):
169
+ self.last_response.update({"text": response.choices[0].message.content})
170
+
171
+ except Exception as e:
172
+ raise exceptions.FailedToGenerateResponseError(f"Error during streaming: {e}")
173
+
174
+ def _handle_normal_response(self, messages: List[Dict]) -> Dict:
175
+ """Handle normal (non-streaming) response from the model."""
176
+ try:
177
+ response = self.client.chat.completions.create(
178
+ messages=messages,
179
+ model=self.model
140
180
  )
141
-
142
- def for_non_stream():
143
- full_response = ""
144
- for chunk in for_stream():
145
- if isinstance(chunk, dict):
146
- full_response += chunk['text']
147
- else:
148
- full_response += chunk
149
- return dict(text=full_response)
150
-
151
- return for_stream() if stream else for_non_stream()
181
+ self.last_response.update({"text": response.choices[0].message.content})
182
+ return self.last_response
183
+ except Exception as e:
184
+ raise exceptions.FailedToGenerateResponseError(f"Error during response: {e}")
152
185
 
153
186
  def chat(
154
187
  self,
@@ -156,44 +189,31 @@ class Cerebras(Provider):
156
189
  stream: bool = False,
157
190
  optimizer: str = None,
158
191
  conversationally: bool = False,
159
- ) -> str:
160
- def for_stream():
161
- for response in self.ask(
162
- prompt, True, optimizer=optimizer, conversationally=conversationally
163
- ):
164
- yield self.get_message(response)
165
-
166
- def for_non_stream():
167
- return self.get_message(
168
- self.ask(
169
- prompt,
170
- False,
171
- optimizer=optimizer,
172
- conversationally=conversationally,
173
- )
192
+ ) -> Union[str, Generator]:
193
+ """High-level method to chat with the model."""
194
+ return self.get_message(
195
+ self.ask(
196
+ prompt, stream, optimizer=optimizer, conversationally=conversationally
174
197
  )
175
-
176
- return for_stream() if stream else for_non_stream()
198
+ )
177
199
 
178
200
  def get_message(self, response: dict) -> str:
179
- """Retrieves message only from response
180
-
181
- Args:
182
- response (dict): Response generated by `self.ask`
183
-
184
- Returns:
185
- str: Message extracted
186
- """
201
+ """Retrieves message from response."""
187
202
  assert isinstance(response, dict), "Response should be of dict data-type only"
188
203
  return response["text"]
189
204
 
190
- if __name__ == '__main__':
205
+
206
+ if __name__ == "__main__":
191
207
  from rich import print
192
208
 
193
- # You can replace this with your actual API key
194
- api_key = "YOUR_API_KEY_HERE"
209
+ # Example usage
210
+ cerebras = Cerebras(
211
+ cookie_path='cookie.json',
212
+ model='llama3.1-8b',
213
+ system_prompt="You are a helpful AI assistant."
214
+ )
195
215
 
196
- ai = Cerebras(api_key=api_key)
197
- response = ai.chat(input(">>> "), stream=True)
216
+ # Test with streaming
217
+ response = cerebras.chat("What is the meaning of life?", stream=True)
198
218
  for chunk in response:
199
219
  print(chunk, end="", flush=True)
@@ -207,6 +207,6 @@ class Cleeai(Provider):
207
207
  if __name__ == "__main__":
208
208
  from rich import print
209
209
  ai = Cleeai(timeout=5000)
210
- response = ai.chat("write a poem about AI", stream=True)
210
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
211
211
  for chunk in response:
212
212
  print(chunk, end="", flush=True)
@@ -175,6 +175,6 @@ class Felo(Provider):
175
175
  if __name__ == '__main__':
176
176
  from rich import print
177
177
  ai = Felo()
178
- response = ai.chat("tell me about HelpingAI-9B", stream=True)
178
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
179
179
  for chunk in response:
180
180
  print(chunk, end="", flush=True)
@@ -2,6 +2,7 @@ import requests
2
2
  import json
3
3
  import os
4
4
  from typing import Any, Dict, Optional, Generator, List, Union
5
+ import uuid
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
@@ -31,9 +32,47 @@ class GaurishCerebras(Provider):
31
32
  """Initializes the Gaurish Cerebras API client."""
32
33
  self.url = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
33
34
  self.headers = {
34
- "Authorization": "Bearer 123",
35
35
  "Content-Type": "application/json",
36
36
  "Accept": "text/event-stream",
37
+ "access-control-allow-credentials": "true",
38
+ "access-control-allow-headers": "*",
39
+ "access-control-allow-methods": "*",
40
+ "access-control-allow-origin": "*",
41
+ "cache-control": "public, max-age=0, must-revalidate",
42
+ "referrer-policy": "strict-origin-when-cross-origin",
43
+ "content-type": "text/event-stream; charset=utf-8",
44
+ "strict-transport-security": "max-age=3600; includeSubDomains",
45
+ "x-content-type-options": "nosniff",
46
+ "x-matched-path": "/api/cerebras/[...path]",
47
+ "x-ratelimit-limit-requests-day": "30000",
48
+ "x-ratelimit-limit-tokens-minute": "60000",
49
+ "x-ratelimit-remaining-requests-day": "29984",
50
+ "x-ratelimit-remaining-tokens-minute": "60000",
51
+ "x-ratelimit-reset-requests-day": "24092.23299384117",
52
+ "x-ratelimit-reset-tokens-minute": "32.232993841171265",
53
+ "x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
54
+ "x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
55
+ "accept": "application/json",
56
+ "accept-encoding": "gzip, deflate, br, zstd",
57
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
58
+ "dnt": "1",
59
+ "origin": "https://chat.gaurish.xyz",
60
+ "priority": "u=1, i",
61
+ "referer": "https://chat.gaurish.xyz/",
62
+ "sec-ch-ua": "\"Chromium\";v=\"130\", \"Microsoft Edge\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
63
+ "sec-ch-ua-mobile": "?0",
64
+ "sec-ch-ua-platform": "\"Windows\"",
65
+ "sec-fetch-dest": "empty",
66
+ "sec-fetch-mode": "cors",
67
+ "sec-fetch-site": "same-site",
68
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
69
+ "x-stainless-arch": "unknown",
70
+ "x-stainless-lang": "js",
71
+ "x-stainless-os": "Unknown",
72
+ "x-stainless-package-version": "4.67.3",
73
+ "x-stainless-retry-count": "0",
74
+ "x-stainless-runtime": "browser:chrome",
75
+ "x-stainless-runtime-version": "130.0.0",
37
76
  }
38
77
  self.session = requests.Session()
39
78
  self.session.headers.update(self.headers)
@@ -165,4 +204,4 @@ if __name__ == "__main__":
165
204
  for chunk in response:
166
205
  print(chunk, end="", flush=True)
167
206
  except Exception as e:
168
- print(f"An error occurred: {e}")
207
+ print(f"An error occurred: {e}")
@@ -153,7 +153,7 @@ if __name__ == "__main__":
153
153
  from rich import print
154
154
  bot = GeminiPro()
155
155
  try:
156
- response = bot.chat("tell me about Gpt canvas", stream=True)
156
+ response = bot.chat("hi", stream=True)
157
157
  for chunk in response:
158
158
  print(chunk, end="", flush=True)
159
159
  except Exception as e:
@@ -220,6 +220,6 @@ class Genspark(Provider):
220
220
  if __name__ == '__main__':
221
221
  from rich import print
222
222
  ai = Genspark()
223
- response = ai.chat(input(">>> "))
223
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
224
224
  for chunk in response:
225
225
  print(chunk, end="", flush=True)
@@ -32,6 +32,7 @@ class Julius(Provider):
32
32
  ]
33
33
  def __init__(
34
34
  self,
35
+ api_key: str,
35
36
  is_conversation: bool = True,
36
37
  max_tokens: int = 600,
37
38
  timeout: int = 30,
@@ -69,12 +70,12 @@ class Julius(Provider):
69
70
  self.timeout = timeout
70
71
  self.last_response = {}
71
72
  self.model = model
73
+ self.api_key = api_key
72
74
  self.headers = {
73
- "authorization": "Bearer",
75
+ "authorization": f"Bearer {self.api_key}",
74
76
  "content-type": "application/json",
75
77
  "conversation-id": str(uuid.uuid4()),
76
78
  "interactive-charts": "true",
77
- "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
78
79
  "is-native": "false",
79
80
  "orient-split": "true",
80
81
  "request-id": str(uuid.uuid4()),
@@ -216,7 +217,7 @@ class Julius(Provider):
216
217
  return response["text"]
217
218
  if __name__ == '__main__':
218
219
  from rich import print
219
- ai = Julius(timeout=5000)
220
+ ai = Julius(api_key="",timeout=5000)
220
221
  response = ai.chat("write a poem about AI", stream=True)
221
222
  for chunk in response:
222
223
  print(chunk, end="", flush=True)
@@ -248,6 +248,6 @@ class LearnFast(Provider):
248
248
  if __name__ == "__main__":
249
249
  from rich import print
250
250
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path="photo_2024-07-06_22-19-42.jpg")
251
+ response = ai.chat(input(">>> "), image_path=None)
252
252
  for chunk in response:
253
253
  print(chunk, end="", flush=True)