webscout 5.2__py3-none-any.whl → 5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -4,7 +4,7 @@ import pygame
4
4
  import time
5
5
  from pathlib import Path
6
6
  from typing import Generator
7
-
7
+ from playsound import playsound
8
8
  from webscout import exceptions
9
9
  from webscout.AIbase import TTSProvider
10
10
 
@@ -91,13 +91,17 @@ class Voicepods(TTSProvider):
91
91
  raise exceptions.FailedToGenerateResponseError(f"Error saving audio: {e}")
92
92
 
93
93
  def play_audio(self, filename: str):
94
- """Plays the audio file using pygame."""
94
+ """
95
+ Plays an audio file using playsound.
96
+
97
+ Args:
98
+ filename (str): The path to the audio file.
99
+
100
+ Raises:
101
+ RuntimeError: If there is an error playing the audio.
102
+ """
95
103
  try:
96
- pygame.mixer.init()
97
- pygame.mixer.music.load(filename)
98
- pygame.mixer.music.play()
99
- while pygame.mixer.music.get_busy():
100
- pygame.time.Clock().tick(10)
104
+ playsound(filename)
101
105
  except Exception as e:
102
106
  raise RuntimeError(f"Error playing audio: {e}")
103
107
 
@@ -20,7 +20,6 @@ from .Phind import Phindv2
20
20
  from .Phind import AsyncPhindv2
21
21
  from .ai4chat import *
22
22
  from .Gemini import GEMINI
23
- from .Berlin4h import Berlin4h
24
23
  from .Poe import POE
25
24
  from .BasedGPT import BasedGPT
26
25
  from .Deepseek import DeepSeek
@@ -34,7 +33,6 @@ from .DARKAI import *
34
33
  from .koala import *
35
34
  from .RUBIKSAI import *
36
35
  from .meta import *
37
- from .liaobots import *
38
36
  from .DiscordRocks import *
39
37
  from .felo_search import *
40
38
  from .xdash import *
@@ -47,6 +45,9 @@ from .NetFly import *
47
45
  from .EDITEE import *
48
46
  from .TeachAnything import *
49
47
  from .AI21 import *
48
+ from .Chatify import *
49
+ from .x0gpt import *
50
+ from .cerebras import *
50
51
  __all__ = [
51
52
  'ThinkAnyAI',
52
53
  'Farfalle',
@@ -66,7 +67,6 @@ __all__ = [
66
67
  'AsyncPhindSearch',
67
68
  'Felo',
68
69
  'GEMINI',
69
- 'Berlin4h',
70
70
  'POE',
71
71
  'BasedGPT',
72
72
  'DeepSeek',
@@ -84,7 +84,6 @@ __all__ = [
84
84
  'KOALA',
85
85
  'RUBIKSAI',
86
86
  'Meta',
87
- 'LiaoBots',
88
87
  'DiscordRocks',
89
88
  'PiAI',
90
89
  'XDASH',
@@ -97,4 +96,7 @@ __all__ = [
97
96
  'Editee',
98
97
  'TeachAnything',
99
98
  'AI21',
99
+ 'Chatify',
100
+ 'X0GPT',
101
+ 'Cerebras'
100
102
  ]
@@ -1,7 +1,7 @@
1
1
  import requests
2
2
  import json
3
3
  import html
4
- from re import sub
4
+ import re
5
5
  from typing import Any, Dict
6
6
 
7
7
  from webscout.AIutel import Optimizers
@@ -94,7 +94,7 @@ class AI4Chat(Provider):
94
94
  def ask(
95
95
  self,
96
96
  prompt: str,
97
- stream: bool = False, # Streaming is not supported by AI4Chat
97
+ stream: bool = False,
98
98
  raw: bool = False,
99
99
  optimizer: str = None,
100
100
  conversationally: bool = False,
@@ -137,9 +137,17 @@ class AI4Chat(Provider):
137
137
  response_data = response.json()
138
138
  message_content = response_data.get('message', 'No message found')
139
139
 
140
- # Decode HTML entities and remove HTML tags
140
+ # Decode HTML entities
141
141
  decoded_message = html.unescape(message_content)
142
- cleaned_text = sub('<[^<]+?>', '', decoded_message)
142
+
143
+ # Remove HTML tags while preserving newlines and list structure
144
+ cleaned_text = re.sub(r'<p>(.*?)</p>', r'\1\n\n', decoded_message)
145
+ cleaned_text = re.sub(r'<ol>|</ol>', '', cleaned_text)
146
+ cleaned_text = re.sub(r'<li><p>(.*?)</p></li>', r'• \1\n', cleaned_text)
147
+ cleaned_text = re.sub(r'</?[^>]+>', '', cleaned_text)
148
+
149
+ # Remove extra newlines
150
+ cleaned_text = re.sub(r'\n{3,}', '\n\n', cleaned_text.strip())
143
151
 
144
152
  self.last_response.update(dict(text=cleaned_text))
145
153
  self.conversation.update_chat_history(prompt, cleaned_text)
@@ -183,11 +191,9 @@ class AI4Chat(Provider):
183
191
  """
184
192
  assert isinstance(response, dict), "Response should be of dict data-type only"
185
193
  return response["text"]
194
+
186
195
  if __name__ == "__main__":
187
196
  from rich import print
188
-
189
197
  ai = AI4Chat()
190
- # Stream the response
191
198
  response = ai.chat(input(">>> "))
192
- for chunk in response:
193
- print(chunk, end="", flush=True)
199
+ print(response)
@@ -0,0 +1,199 @@
1
+ import json
2
+ import requests
3
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
4
+ from webscout.AIbase import Provider
5
+ from webscout import exceptions
6
+ from typing import Dict, Any
7
+
8
+ class Cerebras(Provider):
9
+ """
10
+ A class to interact with the Cerebras AI API.
11
+ """
12
+
13
+ AVAILABLE_MODELS = ["llama3.1-8b", "llama3.1-70b"]
14
+
15
+ def __init__(
16
+ self,
17
+ api_key: str,
18
+ is_conversation: bool = True,
19
+ max_tokens: int = 4096,
20
+ timeout: int = 30,
21
+ intro: str = None,
22
+ filepath: str = None,
23
+ update_file: bool = True,
24
+ proxies: dict = {},
25
+ history_offset: int = 10250,
26
+ act: str = None,
27
+ model: str = "llama3.1-8b",
28
+ system_prompt: str = "Please try to provide useful, helpful and actionable answers.",
29
+ ):
30
+ """
31
+ Initializes the Cerebras AI API with given parameters.
32
+ """
33
+ if model not in self.AVAILABLE_MODELS:
34
+ raise ValueError(f"Invalid model: {model}. Available models are: {', '.join(self.AVAILABLE_MODELS)}")
35
+
36
+ self.session = requests.Session()
37
+ self.is_conversation = is_conversation
38
+ self.max_tokens_to_sample = max_tokens
39
+ self.api_endpoint = "https://api.cerebras.ai/v1/chat/completions"
40
+ self.timeout = timeout
41
+ self.last_response = {}
42
+ self.model = model
43
+ self.system_prompt = system_prompt
44
+ self.headers = {
45
+ "accept": "application/json",
46
+ "accept-encoding": "gzip, deflate, br, zstd",
47
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
48
+ "authorization": f"Bearer {api_key}",
49
+ "content-type": "application/json",
50
+ "dnt": "1",
51
+ "origin": "https://inference.cerebras.ai",
52
+ "referer": "https://inference.cerebras.ai/",
53
+ "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
54
+ "sec-ch-ua-mobile": "?0",
55
+ "sec-ch-ua-platform": '"Windows"',
56
+ "sec-fetch-dest": "empty",
57
+ "sec-fetch-mode": "cors",
58
+ "sec-fetch-site": "same-site",
59
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
60
+ }
61
+
62
+ self.__available_optimizers = (
63
+ method
64
+ for method in dir(Optimizers)
65
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
+ )
67
+ self.session.headers.update(self.headers)
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+ self.conversation = Conversation(
76
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
77
+ )
78
+ self.conversation.history_offset = history_offset
79
+ self.session.proxies = proxies
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = False,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ ) -> Dict[str, Any]:
89
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
+ if optimizer:
91
+ if optimizer in self.__available_optimizers:
92
+ conversation_prompt = getattr(Optimizers, optimizer)(
93
+ conversation_prompt if conversationally else prompt
94
+ )
95
+ else:
96
+ raise Exception(
97
+ f"Optimizer is not one of {self.__available_optimizers}"
98
+ )
99
+
100
+ payload = {
101
+ "messages": [
102
+ {"role": "system", "content": self.system_prompt},
103
+ {"role": "user", "content": conversation_prompt},
104
+ ],
105
+ "model": self.model,
106
+ "stream": True,
107
+ "temperature": 0.2,
108
+ "top_p": 1,
109
+ "max_tokens": self.max_tokens_to_sample
110
+ }
111
+
112
+ def for_stream():
113
+ response = self.session.post(
114
+ self.api_endpoint, json=payload, stream=True, timeout=self.timeout
115
+ )
116
+
117
+ if not response.ok:
118
+ raise exceptions.FailedToGenerateResponseError(
119
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
120
+ )
121
+
122
+ full_response = ""
123
+ for line in response.iter_lines():
124
+ if line:
125
+ line_data = line.decode('utf-8').strip()
126
+ if line_data.startswith("data: "):
127
+ json_str = line_data[6:]
128
+ if json_str != "[DONE]":
129
+ chunk = json.loads(json_str)
130
+ if 'choices' in chunk and 'delta' in chunk['choices'][0]:
131
+ content = chunk['choices'][0]['delta'].get('content', '')
132
+ full_response += content
133
+ yield content if raw else dict(text=content)
134
+ else:
135
+ break
136
+
137
+ self.last_response.update(dict(text=full_response))
138
+ self.conversation.update_chat_history(
139
+ prompt, self.get_message(self.last_response)
140
+ )
141
+
142
+ def for_non_stream():
143
+ full_response = ""
144
+ for chunk in for_stream():
145
+ if isinstance(chunk, dict):
146
+ full_response += chunk['text']
147
+ else:
148
+ full_response += chunk
149
+ return dict(text=full_response)
150
+
151
+ return for_stream() if stream else for_non_stream()
152
+
153
+ def chat(
154
+ self,
155
+ prompt: str,
156
+ stream: bool = False,
157
+ optimizer: str = None,
158
+ conversationally: bool = False,
159
+ ) -> str:
160
+ def for_stream():
161
+ for response in self.ask(
162
+ prompt, True, optimizer=optimizer, conversationally=conversationally
163
+ ):
164
+ yield self.get_message(response)
165
+
166
+ def for_non_stream():
167
+ return self.get_message(
168
+ self.ask(
169
+ prompt,
170
+ False,
171
+ optimizer=optimizer,
172
+ conversationally=conversationally,
173
+ )
174
+ )
175
+
176
+ return for_stream() if stream else for_non_stream()
177
+
178
+ def get_message(self, response: dict) -> str:
179
+ """Retrieves message only from response
180
+
181
+ Args:
182
+ response (dict): Response generated by `self.ask`
183
+
184
+ Returns:
185
+ str: Message extracted
186
+ """
187
+ assert isinstance(response, dict), "Response should be of dict data-type only"
188
+ return response["text"]
189
+
190
+ if __name__ == '__main__':
191
+ from rich import print
192
+
193
+ # You can replace this with your actual API key
194
+ api_key = "YOUR_API_KEY_HERE"
195
+
196
+ ai = Cerebras(api_key=api_key)
197
+ response = ai.chat(input(">>> "), stream=True)
198
+ for chunk in response:
199
+ print(chunk, end="", flush=True)
@@ -41,19 +41,6 @@ class Felo(Provider):
41
41
  history_offset: int = 10250,
42
42
  act: str = None,
43
43
  ):
44
- """Instantiates Felo
45
-
46
- Args:
47
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
- timeout (int, optional): Http request timeout. Defaults to 30.
50
- intro (str, optional): Conversation introductory prompt. Defaults to None.
51
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
- proxies (dict, optional): Http request proxies. Defaults to {}.
54
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
- """
57
44
  self.session = requests.Session()
58
45
  self.is_conversation = is_conversation
59
46
  self.max_tokens_to_sample = max_tokens
@@ -106,22 +93,6 @@ class Felo(Provider):
106
93
  optimizer: str = None,
107
94
  conversationally: bool = False,
108
95
  ) -> dict:
109
- """Chat with AI
110
-
111
- Args:
112
- prompt (str): Prompt to be send.
113
- stream (bool, optional): Flag for streaming response. Defaults to False.
114
- raw (bool, optional): Stream back raw response as received. Defaults to False.
115
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
116
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
117
- Returns:
118
- dict : {}
119
- ```json
120
- {
121
- "text" : "How may I assist you today?"
122
- }
123
- ```
124
- """
125
96
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
126
97
  if optimizer:
127
98
  if optimizer in self.__available_optimizers:
@@ -156,28 +127,31 @@ class Felo(Provider):
156
127
  )
157
128
 
158
129
  streaming_text = ""
159
- for value in response.iter_lines(
160
- decode_unicode=True,
161
- chunk_size=self.stream_chunk_size,
162
- delimiter="\n",
163
- ):
164
- try:
165
- if bool(value) and value.startswith('data:'):
166
- data = json.loads(value[len('data:'):].strip())
167
- if data['type'] == 'a':
168
- streaming_text += data['data']['k']
169
- resp = dict(text=streaming_text)
170
- self.last_response.update(resp)
171
- yield value if raw else resp
172
- except json.decoder.JSONDecodeError:
173
- pass
130
+ for line in response.iter_lines(decode_unicode=True):
131
+ if line.startswith('data:'):
132
+ try:
133
+ data = json.loads(line[5:].strip())
134
+ if data['type'] == 'answer' and 'text' in data['data']:
135
+ new_text = data['data']['text']
136
+ if len(new_text) > len(streaming_text):
137
+ delta = new_text[len(streaming_text):]
138
+ streaming_text = new_text
139
+ resp = dict(text=delta)
140
+ self.last_response.update(dict(text=streaming_text))
141
+ yield line if raw else resp
142
+ except json.JSONDecodeError:
143
+ pass
144
+
174
145
  self.conversation.update_chat_history(
175
146
  prompt, self.get_message(self.last_response)
176
147
  )
177
148
 
178
149
  def for_non_stream():
179
- for _ in for_stream():
180
- pass
150
+ full_response = ""
151
+ for chunk in for_stream():
152
+ if not raw:
153
+ full_response += chunk['text']
154
+ self.last_response = dict(text=full_response)
181
155
  return self.last_response
182
156
 
183
157
  return for_stream() if stream else for_non_stream()
@@ -189,16 +163,6 @@ class Felo(Provider):
189
163
  optimizer: str = None,
190
164
  conversationally: bool = False,
191
165
  ) -> str:
192
- """Generate response `str`
193
- Args:
194
- prompt (str): Prompt to be send.
195
- stream (bool, optional): Flag for streaming response. Defaults to False.
196
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
197
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
198
- Returns:
199
- str: Response generated
200
- """
201
-
202
166
  def for_stream():
203
167
  for response in self.ask(
204
168
  prompt, True, optimizer=optimizer, conversationally=conversationally
@@ -218,21 +182,17 @@ class Felo(Provider):
218
182
  return for_stream() if stream else for_non_stream()
219
183
 
220
184
  def get_message(self, response: dict) -> str:
221
- """Retrieves message only from response
222
-
223
- Args:
224
- response (dict): Response generated by `self.ask`
225
-
226
- Returns:
227
- str: Message extracted
228
- """
229
185
  assert isinstance(response, dict), "Response should be of dict data-type only"
230
186
 
231
- text = re.sub(r'\[\[\d+\]\]', '', response["text"])
232
- return text
187
+ if "text" in response:
188
+ text = re.sub(r'\[\[\d+\]\]', '', response["text"])
189
+ return text
190
+ else:
191
+ return "" # Return an empty string if no text is found
192
+
233
193
  if __name__ == '__main__':
234
194
  from rich import print
235
195
  ai = Felo()
236
- response = ai.chat(input(">>> "))
196
+ response = ai.chat(input(">>> "), stream=True)
237
197
  for chunk in response:
238
- print(chunk, end="", flush=True)
198
+ print(chunk, end="", flush=True)
@@ -0,0 +1,181 @@
1
+ from typing import Any, Dict
2
+ from uuid import uuid4
3
+ import requests
4
+ import json
5
+ import re
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+
13
+ class X0GPT(Provider):
14
+ """
15
+ A class to interact with the x0-gpt.devwtf.in API.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ is_conversation: bool = True,
21
+ max_tokens: int = 600,
22
+ timeout: int = 30,
23
+ intro: str = None,
24
+ filepath: str = None,
25
+ update_file: bool = True,
26
+ proxies: dict = {},
27
+ history_offset: int = 10250,
28
+ act: str = None,
29
+ ):
30
+ """
31
+ Initializes the X0GPT API with given parameters.
32
+ """
33
+ self.session = requests.Session()
34
+ self.is_conversation = is_conversation
35
+ self.max_tokens_to_sample = max_tokens
36
+ self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
37
+ self.timeout = timeout
38
+ self.last_response = {}
39
+ self.headers = {
40
+ "authority": "x0-gpt.devwtf.in",
41
+ "method": "POST",
42
+ "path": "/api/stream/reply",
43
+ "scheme": "https",
44
+ "accept": "*/*",
45
+ "accept-encoding": "gzip, deflate, br, zstd",
46
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
47
+ "content-length": "114",
48
+ "content-type": "application/json",
49
+ "dnt": "1",
50
+ "origin": "https://x0-gpt.devwtf.in",
51
+ "priority": "u=1, i",
52
+ "referer": "https://x0-gpt.devwtf.in/chat",
53
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
54
+ "sec-ch-ua-mobile": "?0",
55
+ "sec-ch-ua-platform": '"Windows"',
56
+ "sec-fetch-dest": "empty",
57
+ "sec-fetch-mode": "cors",
58
+ "sec-fetch-site": "same-origin",
59
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
60
+ }
61
+
62
+ self.__available_optimizers = (
63
+ method
64
+ for method in dir(Optimizers)
65
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
+ )
67
+ self.session.headers.update(self.headers)
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+ self.conversation = Conversation(
76
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
77
+ )
78
+ self.conversation.history_offset = history_offset
79
+ self.session.proxies = proxies
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = False,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ ) -> Dict[str, Any]:
89
+ """
90
+ Sends a prompt to the x0-gpt.devwtf.in API and returns the response.
91
+ """
92
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
93
+ if optimizer:
94
+ if optimizer in self.__available_optimizers:
95
+ conversation_prompt = getattr(Optimizers, optimizer)(
96
+ conversation_prompt if conversationally else prompt
97
+ )
98
+ else:
99
+ raise Exception(
100
+ f"Optimizer is not one of {self.__available_optimizers}"
101
+ )
102
+
103
+ payload = {
104
+ "messages": [
105
+ {
106
+ "role": "user",
107
+ "content": conversation_prompt
108
+ }
109
+ ],
110
+ "chatId": uuid4().hex,
111
+ "namespace": None
112
+ }
113
+
114
+ def for_stream():
115
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
116
+ if not response.ok:
117
+ raise exceptions.FailedToGenerateResponseError(
118
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
119
+ )
120
+ streaming_response = ""
121
+ for line in response.iter_lines(decode_unicode=True, chunk_size=64):
122
+ if line:
123
+ match = re.search(r'0:"(.*?)"', line)
124
+ if match:
125
+ content = match.group(1)
126
+ streaming_response += content
127
+ yield content if raw else dict(text=streaming_response)
128
+ self.last_response.update(dict(text=streaming_response))
129
+ self.conversation.update_chat_history(
130
+ prompt, self.get_message(self.last_response)
131
+ )
132
+
133
+ def for_non_stream():
134
+ for _ in for_stream():
135
+ pass
136
+ return self.last_response
137
+
138
+ return for_stream() if stream else for_non_stream()
139
+
140
+ def chat(
141
+ self,
142
+ prompt: str,
143
+ stream: bool = False,
144
+ optimizer: str = None,
145
+ conversationally: bool = False,
146
+ ) -> str:
147
+ """
148
+ Generates a response from the X0GPT API.
149
+ """
150
+
151
+ def for_stream():
152
+ for response in self.ask(
153
+ prompt, True, optimizer=optimizer, conversationally=conversationally
154
+ ):
155
+ yield self.get_message(response).replace("\n", "\n\n")
156
+
157
+ def for_non_stream():
158
+ return self.get_message(
159
+ self.ask(
160
+ prompt,
161
+ False,
162
+ optimizer=optimizer,
163
+ conversationally=conversationally,
164
+ )
165
+ ).replace("\n", "\n\n")
166
+
167
+ return for_stream() if stream else for_non_stream()
168
+
169
+ def get_message(self, response: dict) -> str:
170
+ """
171
+ Extracts the message from the API response.
172
+ """
173
+ assert isinstance(response, dict), "Response should be of dict data-type only"
174
+ return response["text"]
175
+
176
+ if __name__ == "__main__":
177
+ from rich import print
178
+ ai = X0GPT()
179
+ response = ai.chat(input(">>> "))
180
+ for chunk in response:
181
+ print(chunk, end="", flush=True)
webscout/__init__.py CHANGED
@@ -2,8 +2,8 @@ from .webscout_search import WEBS
2
2
  from .webscout_search_async import AsyncWEBS
3
3
  from .version import __version__
4
4
  from .DWEBS import *
5
- from .transcriber import transcriber
6
- from .voice import play_audio
5
+ from .transcriber import *
6
+ from .tempid import *
7
7
  from .websx_search import WEBSX
8
8
  from .LLM import VLM, LLM
9
9
  from .YTdownloader import *
webscout/exceptions.py CHANGED
@@ -21,4 +21,5 @@ class FacebookInvalidCredentialsException(Exception):
21
21
 
22
22
 
23
23
  class FacebookRegionBlocked(Exception):
24
- pass
24
+ pass
25
+