webscout 5.9__py3-none-any.whl → 6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -160,11 +160,10 @@ class YouChat(Provider):
160
160
  token = data.get('youChatToken', '')
161
161
  if token:
162
162
  streaming_text += token
163
- resp = dict(text=streaming_text)
164
- self.last_response.update(resp)
165
- yield value if raw else resp
163
+ yield token if raw else dict(text=token)
166
164
  except json.decoder.JSONDecodeError:
167
165
  pass
166
+ self.last_response.update(dict(text=streaming_text))
168
167
  self.conversation.update_chat_history(
169
168
  prompt, self.get_message(self.last_response)
170
169
  )
@@ -224,7 +223,7 @@ class YouChat(Provider):
224
223
  return response["text"]
225
224
  if __name__ == '__main__':
226
225
  from rich import print
227
- ai = YouChat()
228
- response = ai.chat("hi")
226
+ ai = YouChat(timeout=5000)
227
+ response = ai.chat("write a poem about AI", stream=True)
229
228
  for chunk in response:
230
229
  print(chunk, end="", flush=True)
@@ -16,10 +16,9 @@ from .Phind import PhindSearch
16
16
  from .Phind import Phindv2
17
17
  from .ai4chat import *
18
18
  from .Gemini import GEMINI
19
- from .Poe import POE
20
19
  from .BasedGPT import BasedGPT
21
20
  from .Deepseek import DeepSeek
22
- from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
21
+ from .Deepinfra import DeepInfra
23
22
  from .Farfalle import *
24
23
  from .cleeai import *
25
24
  from .OLLAMA import OLLAMA
@@ -61,6 +60,8 @@ from .bixin import *
61
60
  from .ChatGPTES import *
62
61
  from .Amigo import *
63
62
  from .prefind import *
63
+ from .bagoodex import *
64
+ from .ChatHub import *
64
65
  __all__ = [
65
66
  'Farfalle',
66
67
  'LLAMA',
@@ -78,12 +79,9 @@ __all__ = [
78
79
  'PhindSearch',
79
80
  'Felo',
80
81
  'GEMINI',
81
- 'POE',
82
82
  'BasedGPT',
83
83
  'DeepSeek',
84
84
  'DeepInfra',
85
- 'VLM',
86
- 'AsyncDeepInfra',
87
85
  'AI4Chat',
88
86
  'Phindv2',
89
87
  'OLLAMA',
@@ -126,6 +124,8 @@ __all__ = [
126
124
  'ChatGPTES',
127
125
  'AmigoChat',
128
126
  'PrefindAI',
127
+ 'Bagoodex',
128
+ 'ChatHub',
129
129
  # 'LearnFast',
130
130
 
131
131
 
@@ -195,5 +195,6 @@ class AI4Chat(Provider):
195
195
  if __name__ == "__main__":
196
196
  from rich import print
197
197
  ai = AI4Chat()
198
- response = ai.chat(input(">>> "))
199
- print(response)
198
+ response = ai.chat("write me poem about AI", stream=True)
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -0,0 +1,145 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+ class Bagoodex(Provider):
13
+ """
14
+ A class to interact with the Bagoodex API.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 2049, # Set a reasonable default
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ ):
29
+ """Initializes the Bagoodex API client."""
30
+ self.url = "https://bagoodex.io/front-api/chat"
31
+ self.headers = {"Content-Type": "application/json"}
32
+ self.session = requests.Session()
33
+ self.session.headers.update(self.headers)
34
+ self.session.proxies.update(proxies) # Use provided proxies
35
+ self.timeout = timeout
36
+ self.last_response = {}
37
+
38
+ self.is_conversation = is_conversation
39
+ self.max_tokens_to_sample = max_tokens
40
+ self.__available_optimizers = (
41
+ method
42
+ for method in dir(Optimizers)
43
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
44
+ )
45
+ Conversation.intro = (
46
+ AwesomePrompts().get_act(
47
+ act, raise_not_found=True, default=None, case_insensitive=True
48
+ )
49
+ if act
50
+ else intro or Conversation.intro
51
+ )
52
+ self.conversation = Conversation(
53
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
54
+ )
55
+ self.conversation.history_offset = history_offset
56
+
57
+ def ask(
58
+ self,
59
+ prompt: str,
60
+ stream: bool = False,
61
+ raw: bool = False,
62
+ optimizer: str = None,
63
+ conversationally: bool = False,
64
+ ) -> Dict[str, Any] | Generator:
65
+ """Sends a chat completion request to the Bagoodex API."""
66
+
67
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
68
+ if optimizer:
69
+ if optimizer in self.__available_optimizers:
70
+ conversation_prompt = getattr(Optimizers, optimizer)(
71
+ conversation_prompt if conversationally else prompt
72
+ )
73
+ else:
74
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
75
+
76
+
77
+ payload = {
78
+ "prompt": "You are AI", # This seems to be required by the API
79
+ "messages": [{"content": "Hi, this is chatgpt, let's talk", "role": "assistant"}],
80
+ "input": conversation_prompt,
81
+ }
82
+
83
+ def for_stream():
84
+ try:
85
+ response = self.session.post(self.url, json=payload, headers=self.headers, timeout=self.timeout)
86
+ response.raise_for_status()
87
+ text = response.text
88
+ self.last_response.update({"text": text})
89
+
90
+ if stream:
91
+ for char in text:
92
+ yield {"text": char} # Yielding one character at a time for streaming
93
+ else:
94
+ yield {"text": text}
95
+
96
+ except (requests.exceptions.RequestException, json.JSONDecodeError) as e: # Catch JSON errors too
97
+ raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
98
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
99
+
100
+ def for_non_stream():
101
+ for _ in for_stream(): pass
102
+ return self.last_response
103
+
104
+
105
+ return for_stream() if stream else for_non_stream()
106
+
107
+
108
+
109
+
110
+ def chat(
111
+ self,
112
+ prompt: str,
113
+ stream: bool = False,
114
+ optimizer: str = None,
115
+ conversationally: bool = False,
116
+ ) -> Union[str, Generator]:
117
+
118
+
119
+ def for_stream():
120
+ for response in self.ask(
121
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
122
+ ):
123
+ yield self.get_message(response)
124
+
125
+ def for_non_stream():
126
+ return self.get_message(
127
+ self.ask(
128
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
129
+ )
130
+ )
131
+
132
+ return for_stream() if stream else for_non_stream()
133
+
134
+
135
+ def get_message(self, response: dict) -> str:
136
+ assert isinstance(response, dict), "Response should be of dict data-type only"
137
+ return response.get("text", "")
138
+
139
+
140
+ if __name__ == "__main__":
141
+ from rich import print
142
+ ai = Bagoodex()
143
+ response = ai.chat("write a poem about AI", stream=True)
144
+ for chunk in response:
145
+ print(chunk, end="", flush=True)
@@ -174,7 +174,7 @@ class Bixin(Provider):
174
174
  if text.startswith(previous_text):
175
175
  new_text = text[len(previous_text):]
176
176
  full_response += new_text
177
- yield new_text if raw else dict(text=full_response)
177
+ yield new_text if raw else dict(text=new_text)
178
178
  previous_text = text
179
179
  else:
180
180
  full_response += text
@@ -258,7 +258,7 @@ class Bixin(Provider):
258
258
  if __name__ == "__main__":
259
259
  from rich import print
260
260
 
261
- ai = Bixin()
262
- response = ai.chat(input(">>> "))
261
+ ai = Bixin(timeout=5000)
262
+ response = ai.chat("write a poem about AI", stream=True)
263
263
  for chunk in response:
264
264
  print(chunk, end="", flush=True)
@@ -142,7 +142,7 @@ class Cleeai(Provider):
142
142
  full_response = ''
143
143
  for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
144
144
  full_response += chunk.decode('utf-8')
145
- yield chunk.decode('utf-8') if raw else dict(text=full_response)
145
+ yield chunk.decode('utf-8') if raw else dict(text=chunk.decode('utf-8'))
146
146
 
147
147
  self.last_response.update(dict(text=full_response))
148
148
  self.conversation.update_chat_history(
@@ -206,7 +206,7 @@ class Cleeai(Provider):
206
206
 
207
207
  if __name__ == "__main__":
208
208
  from rich import print
209
- ai = Cleeai()
210
- response = ai.chat("hi")
209
+ ai = Cleeai(timeout=5000)
210
+ response = ai.chat("write a poem about AI", stream=True)
211
211
  for chunk in response:
212
212
  print(chunk, end="", flush=True)
webscout/Provider/elmo.py CHANGED
@@ -1,7 +1,4 @@
1
1
  import requests
2
- import json
3
- import textwrap
4
-
5
2
  from webscout.AIutel import Optimizers
6
3
  from webscout.AIutel import Conversation
7
4
  from webscout.AIutel import AwesomePrompts
@@ -169,7 +166,7 @@ class Elmo(Provider):
169
166
  )
170
167
  full_response += formatted_output
171
168
  self.last_response.update(dict(text=full_response))
172
- yield formatted_output if raw else dict(text=full_response)
169
+ yield formatted_output if raw else dict(text=formatted_output)
173
170
  self.conversation.update_chat_history(
174
171
  prompt, self.get_message(self.last_response)
175
172
  )
@@ -232,6 +229,6 @@ class Elmo(Provider):
232
229
  if __name__ == "__main__":
233
230
  from rich import print
234
231
  ai = Elmo()
235
- response = ai.chat("hi")
232
+ response = ai.chat("write a poem about AI", stream=True)
236
233
  for chunk in response:
237
234
  print(chunk, end="", flush=True)
@@ -66,22 +66,13 @@ class Julius(Provider):
66
66
  self.last_response = {}
67
67
  self.model = model
68
68
  self.headers = {
69
- "accept": "*/*",
70
- "accept-encoding": "gzip, deflate, br, zstd",
71
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
72
69
  "authorization": "Bearer",
73
- "content-length": "206",
74
70
  "content-type": "application/json",
75
71
  "conversation-id": str(uuid.uuid4()),
76
- "dnt": "1",
77
72
  "interactive-charts": "true",
78
73
  "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
79
74
  "is-native": "false",
80
75
  "orient-split": "true",
81
- "origin": "https://julius.ai",
82
- "platform": "undefined",
83
- "priority": "u=1, i",
84
- "referer": "https://julius.ai/",
85
76
  "request-id": str(uuid.uuid4()),
86
77
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
87
78
  "visitor-id": str(uuid.uuid4())
@@ -165,7 +156,7 @@ class Julius(Provider):
165
156
  json_line = json.loads(line)
166
157
  content = json_line['content']
167
158
  streaming_response += content
168
- yield content if raw else dict(text=streaming_response)
159
+ yield content if raw else dict(text=content)
169
160
  except:
170
161
  continue
171
162
  self.last_response.update(dict(text=streaming_response))
@@ -174,30 +165,13 @@ class Julius(Provider):
174
165
  )
175
166
 
176
167
  def for_non_stream():
177
- response = self.session.post(
178
- self.chat_endpoint, json=payload, headers=self.headers, timeout=self.timeout
179
- )
180
168
 
181
- if not response.ok:
182
- raise exceptions.FailedToGenerateResponseError(
183
- f"Failed to generate response - ({response.status_code}, {response.reason})"
184
- )
185
- full_content = ""
186
- for line in response.text.splitlines():
187
- try:
188
- data = json.loads(line)
189
- if "content" in data:
190
- full_content += data['content']
191
- except json.JSONDecodeError:
192
- pass
193
- self.last_response.update(dict(text=full_content))
194
- self.conversation.update_chat_history(
195
- prompt, self.get_message(self.last_response)
196
- )
169
+ for _ in for_stream():
170
+ pass
197
171
  return self.last_response
198
172
 
199
- return for_stream() if stream else for_non_stream()
200
173
 
174
+ return for_stream() if stream else for_non_stream()
201
175
  def chat(
202
176
  self,
203
177
  prompt: str,
@@ -234,19 +208,11 @@ class Julius(Provider):
234
208
  return for_stream() if stream else for_non_stream()
235
209
 
236
210
  def get_message(self, response: dict) -> str:
237
- """Retrieves message only from response
238
-
239
- Args:
240
- response (dict): Response generated by `self.ask`
241
-
242
- Returns:
243
- str: Message extracted
244
- """
245
211
  assert isinstance(response, dict), "Response should be of dict data-type only"
246
212
  return response["text"]
247
213
  if __name__ == '__main__':
248
214
  from rich import print
249
- ai = Julius()
250
- response = ai.chat("hi")
215
+ ai = Julius(timeout=5000)
216
+ response = ai.chat("write a poem about AI", stream=True)
251
217
  for chunk in response:
252
218
  print(chunk, end="", flush=True)
@@ -147,7 +147,7 @@ class LlamaTutor(Provider):
147
147
  json_data = json.loads(decoded_line[6:])
148
148
  if "text" in json_data:
149
149
  full_response += json_data["text"]
150
- yield json_data["text"] if raw else dict(text=full_response)
150
+ yield json_data["text"] if raw else dict(text=json_data["text"])
151
151
 
152
152
  self.last_response.update(dict(text=full_response))
153
153
  self.conversation.update_chat_history(
@@ -217,6 +217,6 @@ if __name__ == "__main__":
217
217
  from rich import print
218
218
 
219
219
  ai = LlamaTutor()
220
- response = ai.chat(input(">>> "))
220
+ response = ai.chat("write a poem about AI", stream=True)
221
221
  for chunk in response:
222
222
  print(chunk, end="", flush=True)
@@ -161,8 +161,8 @@ class PrefindAI(Provider):
161
161
  (self.model == "claude" and model == 'OPENROUTER_CLAUDE'):
162
162
  content = data['chunk']['content']
163
163
  if content:
164
- streaming_text += content + ("\n" if stream else "")
165
- resp = dict(text=streaming_text)
164
+ streaming_text += content #+ ("\n" if stream else "")
165
+ resp = dict(text=content)
166
166
  self.last_response.update(resp)
167
167
  yield resp if raw else resp
168
168
  self.conversation.update_chat_history(
@@ -227,6 +227,6 @@ class PrefindAI(Provider):
227
227
  if __name__ == '__main__':
228
228
  from rich import print
229
229
  ai = PrefindAI(model="claude")
230
- response = ai.chat(input(">>> "))
230
+ response = ai.chat("write a poem about AI", stream=True)
231
231
  for chunk in response:
232
232
  print(chunk, end="", flush=True)
@@ -134,7 +134,7 @@ class PromptRefine(Provider):
134
134
  for line in response.iter_lines(decode_unicode=True):
135
135
  if line:
136
136
  full_response += line # No need to decode here
137
- yield full_response if raw else dict(text=full_response)
137
+ yield full_response if raw else dict(text=line)
138
138
  self.last_response.update(dict(text=full_response))
139
139
  self.conversation.update_chat_history(
140
140
  prompt, self.get_message(self.last_response)
@@ -185,7 +185,7 @@ class PromptRefine(Provider):
185
185
 
186
186
  if __name__ == '__main__':
187
187
  from rich import print
188
- ai = PromptRefine()
189
- response = ai.chat(input(">>> "))
188
+ ai = PromptRefine(timeout=5000)
189
+ response = ai.chat("write a poem about AI", stream=True)
190
190
  for chunk in response:
191
191
  print(chunk, end="", flush=True)
@@ -138,7 +138,7 @@ class TurboSeek(Provider):
138
138
  raise exceptions.FailedToGenerateResponseError(
139
139
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
140
  )
141
-
141
+ print(response.text)
142
142
  streaming_text = ""
143
143
  for value in response.iter_lines(
144
144
  decode_unicode=True,
@@ -149,45 +149,31 @@ class AIUncensored(Provider):
149
149
 
150
150
 
151
151
  def for_stream():
152
-
153
- try:
154
- with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
155
-
156
- if response.status_code == 200:
157
- full_content = ''
158
- for line in response.iter_lines():
159
- decoded_line = line.decode('utf-8').strip()
160
- if decoded_line:
161
-
162
- if decoded_line == "data: [DONE]":
163
-
164
- break
165
- if decoded_line.startswith("data: "):
166
- data_str = decoded_line[len("data: "):]
167
- try:
168
- data_json = json.loads(data_str)
169
- content = data_json.get("data", "")
170
- if content:
171
- full_content += content
172
-
173
- yield content if raw else {"text": full_content}
174
- except json.JSONDecodeError:
175
- if data_str != "[DONE]":
176
- return None
177
- else:
178
-
179
- raise exceptions.FailedToGenerateResponseError(
180
- f"Request failed with status code: {response.status_code}"
181
- )
182
- self.last_response = {"text": full_content}
183
-
152
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
153
+
154
+ if response.status_code == 200:
155
+ full_content = ''
156
+ for line in response.iter_lines():
157
+ decoded_line = line.decode('utf-8').strip()
158
+ if decoded_line:
159
+
160
+ if decoded_line == "data: [DONE]":
161
+
162
+ break
163
+ if decoded_line.startswith("data: "):
164
+ data_str = decoded_line[len("data: "):]
165
+ try:
166
+ data_json = json.loads(data_str)
167
+ content = data_json.get("data", "")
168
+ if content:
169
+ full_content += content
170
+ yield content if raw else dict(text=content)
171
+ except json.JSONDecodeError:
172
+ raise Exception
173
+ self.last_response.update(dict(text=full_content))
184
174
  self.conversation.update_chat_history(
185
175
  prompt, self.get_message(self.last_response)
186
176
  )
187
- except requests.exceptions.RequestException as e:
188
-
189
- raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
190
-
191
177
  def for_non_stream():
192
178
 
193
179
  for _ in for_stream():
@@ -252,9 +238,7 @@ class AIUncensored(Provider):
252
238
 
253
239
  if __name__ == "__main__":
254
240
  from rich import print
255
- ai = AIUncensored()
256
- user_input = 'hi'
257
- response = ai.chat(user_input)
241
+ ai = AIUncensored(timeout=5000)
242
+ response = ai.chat("write a poem about AI", stream=True)
258
243
  for chunk in response:
259
- print(chunk, end="", flush=True)
260
- print() # For a newline after streaming completes
244
+ print(chunk, end="", flush=True)
@@ -159,7 +159,7 @@ class Upstage(Provider):
159
159
  content = json_data['choices'][0]['delta'].get('content', '')
160
160
  if content:
161
161
  streaming_response += content
162
- yield content if raw else dict(text=streaming_response)
162
+ yield content if raw else dict(text=content)
163
163
  except json.JSONDecodeError:
164
164
  print(f"Error decoding JSON: {data}")
165
165
 
@@ -224,7 +224,7 @@ class Upstage(Provider):
224
224
  return response["text"]
225
225
  if __name__ == '__main__':
226
226
  from rich import print
227
- ai = Upstage()
228
- response = ai.chat("hi")
227
+ ai = Upstage(timeout=5000)
228
+ response = ai.chat("write a poem about AI", stream=True)
229
229
  for chunk in response:
230
230
  print(chunk, end="", flush=True)
@@ -118,13 +118,13 @@ class X0GPT(Provider):
118
118
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
119
119
  )
120
120
  streaming_response = ""
121
- for line in response.iter_lines(decode_unicode=True, chunk_size=64):
121
+ for line in response.iter_lines(decode_unicode=True):
122
122
  if line:
123
123
  match = re.search(r'0:"(.*?)"', line)
124
124
  if match:
125
125
  content = match.group(1)
126
126
  streaming_response += content
127
- yield content if raw else dict(text=streaming_response)
127
+ yield content if raw else dict(text=content)
128
128
  self.last_response.update(dict(text=streaming_response))
129
129
  self.conversation.update_chat_history(
130
130
  prompt, self.get_message(self.last_response)
@@ -152,7 +152,7 @@ class X0GPT(Provider):
152
152
  for response in self.ask(
153
153
  prompt, True, optimizer=optimizer, conversationally=conversationally
154
154
  ):
155
- yield self.get_message(response).replace("\n", "\n\n")
155
+ yield self.get_message(response)
156
156
 
157
157
  def for_non_stream():
158
158
  return self.get_message(
@@ -162,7 +162,7 @@ class X0GPT(Provider):
162
162
  optimizer=optimizer,
163
163
  conversationally=conversationally,
164
164
  )
165
- ).replace("\n", "\n\n")
165
+ )
166
166
 
167
167
  return for_stream() if stream else for_non_stream()
168
168
 
@@ -176,7 +176,7 @@ class X0GPT(Provider):
176
176
 
177
177
  if __name__ == "__main__":
178
178
  from rich import print
179
- ai = X0GPT()
180
- response = ai.chat("hi")
179
+ ai = X0GPT(timeout=5000)
180
+ response = ai.chat("write a poem about AI", stream=True)
181
181
  for chunk in response:
182
182
  print(chunk, end="", flush=True)
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "5.9"
1
+ __version__ = "6.0"
2
2
  __prog__ = "webscout"