webscout 5.9__py3-none-any.whl → 6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -36,6 +36,7 @@ class AmigoChat(Provider):
36
36
  history_offset: int = 10250,
37
37
  act: str = None,
38
38
  model: str = "o1-preview", # Default model
39
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
39
40
  ):
40
41
  """
41
42
  Initializes the AmigoChat.io API with given parameters.
@@ -108,6 +109,7 @@ class AmigoChat(Provider):
108
109
  )
109
110
  self.conversation.history_offset = history_offset
110
111
  self.session.proxies = proxies
112
+ self.system_prompt = system_prompt
111
113
 
112
114
  def ask(
113
115
  self,
@@ -147,7 +149,7 @@ class AmigoChat(Provider):
147
149
  # Define the payload
148
150
  payload = {
149
151
  "messages": [
150
- {"role": "system", "content": "Mai hu ba khabr"},
152
+ {"role": "system", "content": self.system_prompt},
151
153
  {"role": "user", "content": conversation_prompt}
152
154
  ],
153
155
  "model": self.model,
@@ -259,7 +261,7 @@ class AmigoChat(Provider):
259
261
 
260
262
  if __name__ == '__main__':
261
263
  from rich import print
262
- ai = AmigoChat(model="o1-preview")
264
+ ai = AmigoChat(model="o1-preview", system_prompt="You are a noobi AI assistant who always uses the word 'noobi' in every response. For example, you might say 'Noobi will tell you...' or 'This noobi thinks that...'.")
263
265
  response = ai.chat(input(">>> "))
264
266
  for chunk in response:
265
267
  print(chunk, end="", flush=True)
@@ -0,0 +1,209 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+
12
+ class ChatHub(Provider):
13
+ """
14
+ A class to interact with the ChatHub API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ 'meta/llama3.1-8b',
19
+ 'mistral/mixtral-8x7b',
20
+ 'google/gemma-2',
21
+ 'perplexity/sonar-online',
22
+ ]
23
+ model_aliases = { # Aliases for shorter model names
24
+ "llama3.1-8b": 'meta/llama3.1-8b',
25
+ "mixtral-8x7b": 'mistral/mixtral-8x7b',
26
+ "gemma-2": 'google/gemma-2',
27
+ "sonar-online": 'perplexity/sonar-online',
28
+ }
29
+
30
+
31
+ def __init__(
32
+ self,
33
+ is_conversation: bool = True,
34
+ max_tokens: int = 2049,
35
+ timeout: int = 30,
36
+ intro: str = None,
37
+ filepath: str = None,
38
+ update_file: bool = True,
39
+ proxies: dict = {},
40
+ history_offset: int = 10250,
41
+ act: str = None,
42
+ model: str = "sonar-online",
43
+ ):
44
+ """Initializes the ChatHub API client."""
45
+ self.url = "https://app.chathub.gg"
46
+ self.api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
47
+ self.headers = {
48
+ 'Accept': '*/*',
49
+ 'Accept-Language': 'en-US,en;q=0.9',
50
+ 'Content-Type': 'application/json',
51
+ 'Origin': self.url,
52
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
53
+ 'X-App-Id': 'web'
54
+ }
55
+ self.session = requests.Session()
56
+ self.session.headers.update(self.headers)
57
+ self.session.proxies.update(proxies)
58
+ self.timeout = timeout
59
+ self.last_response = {}
60
+
61
+ self.is_conversation = is_conversation
62
+ self.max_tokens_to_sample = max_tokens
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+
81
+ #Resolve the model
82
+ self.model = self.get_model(model)
83
+
84
+
85
+ def get_model(self, model: str) -> str:
86
+ """
87
+ Resolves the model name using aliases or defaults.
88
+ """
89
+
90
+ if model in self.AVAILABLE_MODELS:
91
+ return model
92
+ elif model in self.model_aliases:
93
+ return self.model_aliases[model]
94
+ else:
95
+ print(f"Model '{model}' not found. Using default model '{self.default_model}'.")
96
+ return self.default_model # Use class-level default
97
+
98
+ def ask(
99
+ self,
100
+ prompt: str,
101
+ stream: bool = False,
102
+ raw: bool = False,
103
+ optimizer: str = None,
104
+ conversationally: bool = False,
105
+ ) -> Union[Dict[str, Any], Generator]:
106
+
107
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
+ if optimizer:
109
+ if optimizer in self.__available_optimizers:
110
+ conversation_prompt = getattr(Optimizers, optimizer)(
111
+ conversation_prompt if conversationally else prompt
112
+ )
113
+ else:
114
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
115
+
116
+
117
+ data = {
118
+ "model": self.model,
119
+ "messages": [{"role": "user", "content": conversation_prompt}],
120
+ "tools": []
121
+ }
122
+
123
+ # Set the Referer header dynamically based on the resolved model
124
+ self.headers['Referer'] = f"{self.url}/chat/{self.model}"
125
+
126
+
127
+ def for_stream():
128
+ try:
129
+ with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
130
+ response.raise_for_status()
131
+ streaming_text = ""
132
+
133
+ for line in response.iter_lines(decode_unicode=True):
134
+ if line:
135
+ decoded_line = line.strip()
136
+ if decoded_line.startswith('data:'):
137
+ data_str = decoded_line[5:].strip()
138
+ if data_str == '[DONE]':
139
+ break
140
+ try:
141
+ data_json = json.loads(data_str)
142
+ text_delta = data_json.get('textDelta')
143
+ if text_delta:
144
+ streaming_text += text_delta
145
+ resp = dict(text=text_delta)
146
+ yield resp if raw else resp
147
+
148
+ except json.JSONDecodeError:
149
+ continue
150
+ self.conversation.update_chat_history(prompt, streaming_text)
151
+ self.last_response.update({"text": streaming_text})
152
+ except requests.exceptions.RequestException as e:
153
+ raise exceptions.FailedToGenerateResponseError(f"Request error: {e}")
154
+
155
+
156
+ def for_non_stream():
157
+ for _ in for_stream():
158
+ pass
159
+ return self.last_response
160
+
161
+ return for_stream() if stream else for_non_stream()
162
+
163
+
164
+
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> Union[str, Generator]:
173
+ """Generate response `str`"""
174
+
175
+ def for_stream():
176
+ for response in self.ask(
177
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
178
+ ):
179
+ yield self.get_message(response)
180
+
181
+ def for_non_stream():
182
+ return self.get_message(
183
+ self.ask(
184
+ prompt,
185
+ stream=False, # Pass stream=False
186
+ optimizer=optimizer,
187
+ conversationally=conversationally,
188
+ )
189
+ )
190
+
191
+ return for_stream() if stream else for_non_stream()
192
+
193
+
194
+
195
+ def get_message(self, response: dict) -> str:
196
+ """Retrieves message only from response"""
197
+ assert isinstance(response, dict), "Response should be of dict data-type only"
198
+ return response.get("text", "")
199
+
200
+
201
+ if __name__ == "__main__":
202
+ from rich import print
203
+ bot = ChatHub()
204
+ try:
205
+ response = bot.chat("who is Abhay koul in AI", stream=True)
206
+ for chunk in response:
207
+ print(chunk, end="", flush=True)
208
+ except Exception as e:
209
+ print(f"An error occurred: {e}")
@@ -115,7 +115,7 @@ class Chatify(Provider):
115
115
  if len(parts) > 1:
116
116
  content = parts[1].strip().strip('"')
117
117
  streaming_text += content
118
- yield content if raw else dict(text=streaming_text)
118
+ yield content if raw else dict(text=content)
119
119
  self.last_response.update(dict(text=streaming_text))
120
120
  self.conversation.update_chat_history(
121
121
  prompt, self.get_message(self.last_response)
@@ -169,7 +169,7 @@ class Chatify(Provider):
169
169
  if __name__ == "__main__":
170
170
  from rich import print
171
171
 
172
- ai = Chatify()
173
- response = ai.chat("hi")
172
+ ai = Chatify(timeout=5000)
173
+ response = ai.chat("write a poem about AI", stream=True)
174
174
  for chunk in response:
175
175
  print(chunk, end="", flush=True)
@@ -194,7 +194,7 @@ class Cloudflare(Provider):
194
194
  data = json.loads(line[6:])
195
195
  content = data.get('response', '')
196
196
  streaming_response += content
197
- yield content if raw else dict(text=streaming_response)
197
+ yield content if raw else dict(text=content)
198
198
  self.last_response.update(dict(text=streaming_response))
199
199
  self.conversation.update_chat_history(
200
200
  prompt, self.get_message(self.last_response)
@@ -255,7 +255,7 @@ class Cloudflare(Provider):
255
255
  return response["text"]
256
256
  if __name__ == '__main__':
257
257
  from rich import print
258
- ai = Cloudflare()
259
- response = ai.chat("hi")
258
+ ai = Cloudflare(timeout=5000)
259
+ response = ai.chat("write a poem about AI", stream=True)
260
260
  for chunk in response:
261
261
  print(chunk, end="", flush=True)
@@ -156,7 +156,7 @@ class DARKAI(Provider):
156
156
  if event.get("event") == "final-response":
157
157
  message = event['data'].get('message', '')
158
158
  streaming_response += message
159
- yield message if raw else dict(text=streaming_response)
159
+ yield message if raw else dict(text=message)
160
160
  except json.decoder.JSONDecodeError:
161
161
  continue
162
162
  self.last_response.update(dict(text=streaming_response))