webscout 6.1__py3-none-any.whl → 6.2b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (44) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Extra/autollama.py +37 -20
  4. webscout/Local/utils.py +37 -12
  5. webscout/Provider/Amigo.py +50 -37
  6. webscout/Provider/Deepseek.py +7 -6
  7. webscout/Provider/EDITEE.py +2 -2
  8. webscout/Provider/GPTWeb.py +1 -1
  9. webscout/Provider/NinjaChat.py +2 -2
  10. webscout/Provider/OLLAMA.py +1 -1
  11. webscout/Provider/Perplexity.py +1 -1
  12. webscout/Provider/Reka.py +12 -5
  13. webscout/Provider/TTI/AIuncensored.py +103 -0
  14. webscout/Provider/TTI/__init__.py +2 -2
  15. webscout/Provider/TeachAnything.py +0 -3
  16. webscout/Provider/__init__.py +4 -10
  17. webscout/Provider/cerebras.py +125 -118
  18. webscout/Provider/cleeai.py +1 -1
  19. webscout/Provider/felo_search.py +1 -1
  20. webscout/Provider/gaurish.py +41 -2
  21. webscout/Provider/geminiprorealtime.py +1 -1
  22. webscout/Provider/genspark.py +1 -1
  23. webscout/Provider/julius.py +4 -3
  24. webscout/Provider/learnfastai.py +1 -1
  25. webscout/Provider/promptrefine.py +3 -1
  26. webscout/Provider/turboseek.py +3 -8
  27. webscout/Provider/tutorai.py +1 -1
  28. webscout/__init__.py +2 -43
  29. webscout/tempid.py +4 -73
  30. webscout/version.py +1 -1
  31. webscout/webai.py +1 -1
  32. {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/METADATA +36 -119
  33. {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/RECORD +37 -43
  34. webscout/Provider/BasedGPT.py +0 -214
  35. webscout/Provider/ChatHub.py +0 -209
  36. webscout/Provider/TTI/amigo.py +0 -148
  37. webscout/Provider/aigames.py +0 -213
  38. webscout/Provider/bixin.py +0 -264
  39. webscout/Provider/xdash.py +0 -182
  40. webscout/websx_search.py +0 -19
  41. {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/LICENSE.md +0 -0
  42. {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/WHEEL +0 -0
  43. {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/entry_points.txt +0 -0
  44. {webscout-6.1.dist-info → webscout-6.2b0.dist-info}/top_level.txt +0 -0
@@ -1,22 +1,27 @@
1
- import json
1
+ import re
2
2
  import requests
3
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
4
- from webscout.AIbase import Provider
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, Optional, Generator, List, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
5
11
  from webscout import exceptions
6
- from typing import Dict, Any
12
+ from fake_useragent import UserAgent
13
+ from cerebras.cloud.sdk import Cerebras
14
+
7
15
 
8
16
  class Cerebras(Provider):
9
17
  """
10
- A class to interact with the Cerebras AI API.
18
+ A class to interact with the Cerebras API using a cookie for authentication.
11
19
  """
12
20
 
13
- AVAILABLE_MODELS = ["llama3.1-8b", "llama3.1-70b"]
14
-
15
21
  def __init__(
16
22
  self,
17
- api_key: str,
18
23
  is_conversation: bool = True,
19
- max_tokens: int = 4096,
24
+ max_tokens: int = 2049,
20
25
  timeout: int = 30,
21
26
  intro: str = None,
22
27
  filepath: str = None,
@@ -24,47 +29,39 @@ class Cerebras(Provider):
24
29
  proxies: dict = {},
25
30
  history_offset: int = 10250,
26
31
  act: str = None,
27
- model: str = "llama3.1-8b",
28
- system_prompt: str = "Please try to provide useful, helpful and actionable answers.",
32
+ cookie_path: str = "cookie.json", # Path to cookie file
33
+ model: str = "llama3.1-8b", # Default model
34
+ system_prompt: str = "You are a helpful assistant.",
29
35
  ):
30
36
  """
31
- Initializes the Cerebras AI API with given parameters.
37
+ Initializes the Cerebras client with the provided cookie.
38
+
39
+ Args:
40
+ cookie_path (str): Path to the cookie JSON file.
41
+ model (str, optional): Model name to use. Defaults to 'llama3.1-8b'.
42
+ system_prompt (str, optional): The system prompt to send with every request. Defaults to "You are a helpful assistant.".
43
+
44
+ Raises:
45
+ FileNotFoundError: If the cookie file is not found.
46
+ json.JSONDecodeError: If the cookie file has an invalid JSON format.
47
+ requests.exceptions.RequestException: If there's an error retrieving the API key.
32
48
  """
33
- if model not in self.AVAILABLE_MODELS:
34
- raise ValueError(f"Invalid model: {model}. Available models are: {', '.join(self.AVAILABLE_MODELS)}")
49
+ self.api_key = self.get_demo_api_key(cookie_path)
50
+ self.client = Cerebras(api_key=self.api_key)
51
+ self.model = model
52
+ self.system_prompt = system_prompt
35
53
 
36
- self.session = requests.Session()
37
54
  self.is_conversation = is_conversation
38
55
  self.max_tokens_to_sample = max_tokens
39
- self.api_endpoint = "https://api.cerebras.ai/v1/chat/completions"
40
56
  self.timeout = timeout
41
57
  self.last_response = {}
42
- self.model = model
43
- self.system_prompt = system_prompt
44
- self.headers = {
45
- "accept": "application/json",
46
- "accept-encoding": "gzip, deflate, br, zstd",
47
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
48
- "authorization": f"Bearer {api_key}",
49
- "content-type": "application/json",
50
- "dnt": "1",
51
- "origin": "https://inference.cerebras.ai",
52
- "referer": "https://inference.cerebras.ai/",
53
- "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
54
- "sec-ch-ua-mobile": "?0",
55
- "sec-ch-ua-platform": '"Windows"',
56
- "sec-fetch-dest": "empty",
57
- "sec-fetch-mode": "cors",
58
- "sec-fetch-site": "same-site",
59
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
60
- }
61
58
 
62
59
  self.__available_optimizers = (
63
60
  method
64
61
  for method in dir(Optimizers)
65
62
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
63
  )
67
- self.session.headers.update(self.headers)
64
+
68
65
  Conversation.intro = (
69
66
  AwesomePrompts().get_act(
70
67
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -76,7 +73,63 @@ class Cerebras(Provider):
76
73
  is_conversation, self.max_tokens_to_sample, filepath, update_file
77
74
  )
78
75
  self.conversation.history_offset = history_offset
79
- self.session.proxies = proxies
76
+
77
+
78
+ @staticmethod
79
+ def extract_query(text: str) -> str:
80
+ """
81
+ Extracts the first code block from the given text.
82
+ """
83
+ pattern = r"```(.*?)```"
84
+ matches = re.findall(pattern, text, re.DOTALL)
85
+ return matches[0].strip() if matches else text.strip()
86
+
87
+ @staticmethod
88
+ def refiner(text: str) -> str:
89
+ """Refines the input text by removing surrounding quotes."""
90
+ return text.strip('"')
91
+
92
+ def get_demo_api_key(self, cookie_path: str) -> str:
93
+ """Retrieves the demo API key using the provided cookie."""
94
+ try:
95
+ with open(cookie_path, "r") as file:
96
+ cookies = {item["name"]: item["value"] for item in json.load(file)}
97
+ except FileNotFoundError:
98
+ raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
99
+ except json.JSONDecodeError:
100
+ raise json.JSONDecodeError("Invalid JSON format in the cookie file.")
101
+
102
+ headers = {
103
+ "Accept": "*/*",
104
+ "Accept-Language": "en-US,en;q=0.9",
105
+ "Content-Type": "application/json",
106
+ "Origin": "https://inference.cerebras.ai",
107
+ "Referer": "https://inference.cerebras.ai/",
108
+ "user-agent": UserAgent().random,
109
+ }
110
+
111
+ json_data = {
112
+ "operationName": "GetMyDemoApiKey",
113
+ "variables": {},
114
+ "query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
115
+ }
116
+
117
+ try:
118
+ response = requests.post(
119
+ "https://inference.cerebras.ai/api/graphql",
120
+ cookies=cookies,
121
+ headers=headers,
122
+ json=json_data,
123
+ timeout=self.timeout,
124
+ )
125
+ response.raise_for_status()
126
+ api_key = response.json()["data"]["GetMyDemoApiKey"]
127
+ return api_key
128
+ except requests.exceptions.RequestException as e:
129
+ raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
130
+ except KeyError:
131
+ raise exceptions.InvalidResponseError("API key not found in response.")
132
+
80
133
 
81
134
  def ask(
82
135
  self,
@@ -85,7 +138,8 @@ class Cerebras(Provider):
85
138
  raw: bool = False,
86
139
  optimizer: str = None,
87
140
  conversationally: bool = False,
88
- ) -> Dict[str, Any]:
141
+ ) -> Union[Dict, Generator]:
142
+
89
143
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
144
  if optimizer:
91
145
  if optimizer in self.__available_optimizers:
@@ -93,60 +147,35 @@ class Cerebras(Provider):
93
147
  conversation_prompt if conversationally else prompt
94
148
  )
95
149
  else:
96
- raise Exception(
97
- f"Optimizer is not one of {self.__available_optimizers}"
98
- )
150
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
99
151
 
100
- payload = {
101
- "messages": [
102
- {"role": "system", "content": self.system_prompt},
103
- {"role": "user", "content": conversation_prompt},
104
- ],
105
- "model": self.model,
106
- "stream": True,
107
- "temperature": 0.2,
108
- "top_p": 1,
109
- "max_tokens": self.max_tokens_to_sample
110
- }
152
+ messages = [
153
+ {"content": self.system_prompt, "role": "system"},
154
+ {"content": conversation_prompt, "role": "user"},
155
+ ]
111
156
 
112
157
  def for_stream():
113
- response = self.session.post(
114
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
115
- )
116
-
117
- if not response.ok:
118
- raise exceptions.FailedToGenerateResponseError(
119
- f"Failed to generate response - ({response.status_code}, {response.reason})"
158
+ try:
159
+ response = self.client.chat.completions.create(
160
+ model=self.model, messages=messages, stream=True
120
161
  )
162
+ for choice in response.choices:
163
+ if choice.delta.content:
164
+ yield dict(text=choice.delta.content)
165
+ self.last_response.update({"text": response.choices[0].message.content})
121
166
 
122
- full_response = ""
123
- for line in response.iter_lines():
124
- if line:
125
- line_data = line.decode('utf-8').strip()
126
- if line_data.startswith("data: "):
127
- json_str = line_data[6:]
128
- if json_str != "[DONE]":
129
- chunk = json.loads(json_str)
130
- if 'choices' in chunk and 'delta' in chunk['choices'][0]:
131
- content = chunk['choices'][0]['delta'].get('content', '')
132
- full_response += content
133
- yield content if raw else dict(text=content)
134
- else:
135
- break
136
-
137
- self.last_response.update(dict(text=full_response))
138
- self.conversation.update_chat_history(
139
- prompt, self.get_message(self.last_response)
140
- )
167
+ except Exception as e:
168
+ raise exceptions.FailedToGenerateResponseError(f"Error during stream: {e}")
141
169
 
142
170
  def for_non_stream():
143
- full_response = ""
144
- for chunk in for_stream():
145
- if isinstance(chunk, dict):
146
- full_response += chunk['text']
147
- else:
148
- full_response += chunk
149
- return dict(text=full_response)
171
+ try:
172
+ response = self.client.chat.completions.create(
173
+ model=self.model, messages=messages
174
+ )
175
+ self.last_response.update({"text": response.choices[0].message.content})
176
+ return self.last_response
177
+ except Exception as e:
178
+ raise exceptions.FailedToGenerateResponseError(f"Error during non-stream: {e}")
150
179
 
151
180
  return for_stream() if stream else for_non_stream()
152
181
 
@@ -156,44 +185,22 @@ class Cerebras(Provider):
156
185
  stream: bool = False,
157
186
  optimizer: str = None,
158
187
  conversationally: bool = False,
159
- ) -> str:
160
- def for_stream():
161
- for response in self.ask(
162
- prompt, True, optimizer=optimizer, conversationally=conversationally
163
- ):
164
- yield self.get_message(response)
165
-
166
- def for_non_stream():
167
- return self.get_message(
168
- self.ask(
169
- prompt,
170
- False,
171
- optimizer=optimizer,
172
- conversationally=conversationally,
173
- )
188
+ ) -> Union[str, Generator]:
189
+ return self.get_message(
190
+ self.ask(
191
+ prompt, stream, optimizer=optimizer, conversationally=conversationally
174
192
  )
175
-
176
- return for_stream() if stream else for_non_stream()
193
+ )
177
194
 
178
195
  def get_message(self, response: dict) -> str:
179
- """Retrieves message only from response
180
-
181
- Args:
182
- response (dict): Response generated by `self.ask`
183
-
184
- Returns:
185
- str: Message extracted
186
- """
196
+ """Retrieves message only from response"""
187
197
  assert isinstance(response, dict), "Response should be of dict data-type only"
188
198
  return response["text"]
189
199
 
190
- if __name__ == '__main__':
200
+
201
+ if __name__ == "__main__":
191
202
  from rich import print
192
-
193
- # You can replace this with your actual API key
194
- api_key = "YOUR_API_KEY_HERE"
195
-
196
- ai = Cerebras(api_key=api_key)
197
- response = ai.chat(input(">>> "), stream=True)
203
+ cerebras = Cerebras(cookie_path='cookie.json', model='llama3.1-8b', system_prompt="You are a helpful AI assistant.")
204
+ response = cerebras.chat("What is the meaning of life?", sys_prompt='', stream=True)
198
205
  for chunk in response:
199
- print(chunk, end="", flush=True)
206
+ print(chunk, end="", flush=True)
@@ -207,6 +207,6 @@ class Cleeai(Provider):
207
207
  if __name__ == "__main__":
208
208
  from rich import print
209
209
  ai = Cleeai(timeout=5000)
210
- response = ai.chat("write a poem about AI", stream=True)
210
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
211
211
  for chunk in response:
212
212
  print(chunk, end="", flush=True)
@@ -175,6 +175,6 @@ class Felo(Provider):
175
175
  if __name__ == '__main__':
176
176
  from rich import print
177
177
  ai = Felo()
178
- response = ai.chat("tell me about HelpingAI-9B", stream=True)
178
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
179
179
  for chunk in response:
180
180
  print(chunk, end="", flush=True)
@@ -2,6 +2,7 @@ import requests
2
2
  import json
3
3
  import os
4
4
  from typing import Any, Dict, Optional, Generator, List, Union
5
+ import uuid
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
@@ -31,9 +32,47 @@ class GaurishCerebras(Provider):
31
32
  """Initializes the Gaurish Cerebras API client."""
32
33
  self.url = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
33
34
  self.headers = {
34
- "Authorization": "Bearer 123",
35
35
  "Content-Type": "application/json",
36
36
  "Accept": "text/event-stream",
37
+ "access-control-allow-credentials": "true",
38
+ "access-control-allow-headers": "*",
39
+ "access-control-allow-methods": "*",
40
+ "access-control-allow-origin": "*",
41
+ "cache-control": "public, max-age=0, must-revalidate",
42
+ "referrer-policy": "strict-origin-when-cross-origin",
43
+ "content-type": "text/event-stream; charset=utf-8",
44
+ "strict-transport-security": "max-age=3600; includeSubDomains",
45
+ "x-content-type-options": "nosniff",
46
+ "x-matched-path": "/api/cerebras/[...path]",
47
+ "x-ratelimit-limit-requests-day": "30000",
48
+ "x-ratelimit-limit-tokens-minute": "60000",
49
+ "x-ratelimit-remaining-requests-day": "29984",
50
+ "x-ratelimit-remaining-tokens-minute": "60000",
51
+ "x-ratelimit-reset-requests-day": "24092.23299384117",
52
+ "x-ratelimit-reset-tokens-minute": "32.232993841171265",
53
+ "x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
54
+ "x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
55
+ "accept": "application/json",
56
+ "accept-encoding": "gzip, deflate, br, zstd",
57
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
58
+ "dnt": "1",
59
+ "origin": "https://chat.gaurish.xyz",
60
+ "priority": "u=1, i",
61
+ "referer": "https://chat.gaurish.xyz/",
62
+ "sec-ch-ua": "\"Chromium\";v=\"130\", \"Microsoft Edge\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
63
+ "sec-ch-ua-mobile": "?0",
64
+ "sec-ch-ua-platform": "\"Windows\"",
65
+ "sec-fetch-dest": "empty",
66
+ "sec-fetch-mode": "cors",
67
+ "sec-fetch-site": "same-site",
68
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
69
+ "x-stainless-arch": "unknown",
70
+ "x-stainless-lang": "js",
71
+ "x-stainless-os": "Unknown",
72
+ "x-stainless-package-version": "4.67.3",
73
+ "x-stainless-retry-count": "0",
74
+ "x-stainless-runtime": "browser:chrome",
75
+ "x-stainless-runtime-version": "130.0.0",
37
76
  }
38
77
  self.session = requests.Session()
39
78
  self.session.headers.update(self.headers)
@@ -165,4 +204,4 @@ if __name__ == "__main__":
165
204
  for chunk in response:
166
205
  print(chunk, end="", flush=True)
167
206
  except Exception as e:
168
- print(f"An error occurred: {e}")
207
+ print(f"An error occurred: {e}")
@@ -153,7 +153,7 @@ if __name__ == "__main__":
153
153
  from rich import print
154
154
  bot = GeminiPro()
155
155
  try:
156
- response = bot.chat("tell me about Gpt canvas", stream=True)
156
+ response = bot.chat("hi", stream=True)
157
157
  for chunk in response:
158
158
  print(chunk, end="", flush=True)
159
159
  except Exception as e:
@@ -220,6 +220,6 @@ class Genspark(Provider):
220
220
  if __name__ == '__main__':
221
221
  from rich import print
222
222
  ai = Genspark()
223
- response = ai.chat(input(">>> "))
223
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
224
224
  for chunk in response:
225
225
  print(chunk, end="", flush=True)
@@ -32,6 +32,7 @@ class Julius(Provider):
32
32
  ]
33
33
  def __init__(
34
34
  self,
35
+ api_key: str,
35
36
  is_conversation: bool = True,
36
37
  max_tokens: int = 600,
37
38
  timeout: int = 30,
@@ -69,12 +70,12 @@ class Julius(Provider):
69
70
  self.timeout = timeout
70
71
  self.last_response = {}
71
72
  self.model = model
73
+ self.api_key = api_key
72
74
  self.headers = {
73
- "authorization": "Bearer",
75
+ "authorization": f"Bearer {self.api_key}",
74
76
  "content-type": "application/json",
75
77
  "conversation-id": str(uuid.uuid4()),
76
78
  "interactive-charts": "true",
77
- "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
78
79
  "is-native": "false",
79
80
  "orient-split": "true",
80
81
  "request-id": str(uuid.uuid4()),
@@ -216,7 +217,7 @@ class Julius(Provider):
216
217
  return response["text"]
217
218
  if __name__ == '__main__':
218
219
  from rich import print
219
- ai = Julius(timeout=5000)
220
+ ai = Julius(api_key="",timeout=5000)
220
221
  response = ai.chat("write a poem about AI", stream=True)
221
222
  for chunk in response:
222
223
  print(chunk, end="", flush=True)
@@ -248,6 +248,6 @@ class LearnFast(Provider):
248
248
  if __name__ == "__main__":
249
249
  from rich import print
250
250
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path="photo_2024-07-06_22-19-42.jpg")
251
+ response = ai.chat(input(">>> "), image_path=None)
252
252
  for chunk in response:
253
253
  print(chunk, end="", flush=True)
@@ -12,7 +12,7 @@ class PromptRefine(Provider):
12
12
  """
13
13
  A class to interact with the PromptRefine API.
14
14
  """
15
-
15
+ AVAILABLE_MODELS = ["openai/gpt-4", "openai/gpt-4o", "openai/gpt-4-1106-preview"]
16
16
  def __init__(
17
17
  self,
18
18
  is_conversation: bool = True,
@@ -76,6 +76,8 @@ class PromptRefine(Provider):
76
76
  )
77
77
  self.conversation.history_offset = history_offset
78
78
  self.session.proxies = proxies
79
+ if self.model not in self.AVAILABLE_MODELS:
80
+ raise ValueError(f"Invalid model: {self.model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
79
81
 
80
82
  def ask(
81
83
  self,
@@ -1,6 +1,4 @@
1
1
  import requests
2
-
3
-
4
2
  import json
5
3
 
6
4
  from webscout.AIutel import Optimizers
@@ -138,16 +136,13 @@ class TurboSeek(Provider):
138
136
  raise exceptions.FailedToGenerateResponseError(
139
137
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
138
  )
141
- print(response.text)
142
139
  streaming_text = ""
143
140
  for value in response.iter_lines(
144
- decode_unicode=True,
145
141
  chunk_size=self.stream_chunk_size,
146
- delimiter="\n",
147
142
  ):
148
143
  try:
149
- if bool(value) and value.startswith("data: "):
150
- data = json.loads(value[6:])
144
+ if value and value.startswith(b"data: "): #Check for bytes and decode
145
+ data = json.loads(value[6:].decode('utf-8')) # Decode manually
151
146
  if "text" in data:
152
147
  streaming_text += data["text"]
153
148
  resp = dict(text=streaming_text)
@@ -217,4 +212,4 @@ if __name__ == '__main__':
217
212
  ai = TurboSeek()
218
213
  response = ai.chat("hi")
219
214
  for chunk in response:
220
- print(chunk, end="", flush=True)
215
+ print(chunk, end="", flush=True)
@@ -349,6 +349,6 @@ if __name__ == "__main__":
349
349
  from rich import print
350
350
 
351
351
  ai = TutorAI()
352
- response = ai.chat(input(">>> "), attachment_path='photo_2024-07-06_22-19-42.jpg')
352
+ response = ai.chat(input(">>> "), attachment_path=None)
353
353
  for chunk in response:
354
354
  print(chunk, end="", flush=True)
webscout/__init__.py CHANGED
@@ -5,60 +5,19 @@ from .DWEBS import *
5
5
  from .transcriber import *
6
6
  from .requestsHTMLfix import *
7
7
  from .tempid import *
8
- from .websx_search import WEBSX
9
8
  from .LLM import VLM, LLM
10
9
  from .YTdownloader import *
11
10
  from .Bing_search import *
12
- import g4f
13
11
  from .YTdownloader import *
14
12
  from .Provider import *
15
13
  from .Provider.TTI import *
16
14
  from .Provider.TTS import *
17
- from .Extra import gguf
18
- from .Extra import autollama
19
- from .Extra import weather_ascii, weather
15
+ from .Extra import *
16
+
20
17
  from .Agents import *
21
18
 
22
19
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
23
20
 
24
- webai = [
25
- "leo",
26
- "openai",
27
- "opengpt",
28
- "koboldai",
29
- "gemini",
30
- "phind",
31
- "blackboxai",
32
- "g4fauto",
33
- "perplexity",
34
- "groq",
35
- "reka",
36
- "cohere",
37
- "yepchat",
38
- "you",
39
- "xjai",
40
- "thinkany",
41
- "berlin4h",
42
- "chatgptuk",
43
- "auto",
44
- "poe",
45
- "basedgpt",
46
- "deepseek",
47
- "deepinfra",
48
- "vtlchat",
49
- "geminiflash",
50
- "geminipro",
51
- "ollama",
52
- "andi",
53
- "llama3"
54
- ]
55
-
56
- gpt4free_providers = [
57
- provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
58
- ]
59
-
60
- available_providers = webai + gpt4free_providers
61
-
62
21
 
63
22
  import logging
64
23
  logging.getLogger("webscout").addHandler(logging.NullHandler())