webscout 6.2b0__py3-none-any.whl → 6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -0,0 +1,203 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, List
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+
11
+ class LLMChat(Provider):
12
+ """
13
+ A class to interact with the LLMChat API.
14
+ """
15
+
16
+ AVAILABLE_MODELS = [
17
+ "@cf/meta/llama-3.1-70b-instruct",
18
+ "@cf/meta/llama-3.1-8b-instruct",
19
+ "@cf/meta/llama-3.2-3b-instruct",
20
+ "@cf/meta/llama-3.2-1b-instruct"
21
+ ]
22
+
23
+ def __init__(
24
+ self,
25
+ is_conversation: bool = True,
26
+ max_tokens: int = 2048,
27
+ timeout: int = 30,
28
+ intro: str = None,
29
+ filepath: str = None,
30
+ update_file: bool = True,
31
+ proxies: dict = {},
32
+ history_offset: int = 10250,
33
+ act: str = None,
34
+ model: str = "@cf/meta/llama-3.1-70b-instruct", # Default model
35
+ system_prompt: str = "You are a helpful assistant.",
36
+ ):
37
+ """
38
+ Initializes the LLMChat API with given parameters.
39
+ """
40
+ if model not in self.AVAILABLE_MODELS:
41
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
+
43
+ self.session = requests.Session()
44
+ self.is_conversation = is_conversation
45
+ self.max_tokens_to_sample = max_tokens
46
+ self.api_endpoint = "https://llmchat.in/inference/stream"
47
+ self.timeout = timeout
48
+ self.last_response = {}
49
+ self.model = model
50
+ self.system_prompt = system_prompt
51
+ self.headers = {
52
+ "Content-Type": "application/json",
53
+ "Accept": "*/*",
54
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0",
55
+ "Origin": "https://llmchat.in",
56
+ "Referer": "https://llmchat.in/"
57
+ }
58
+ self.__available_optimizers = (
59
+ method
60
+ for method in dir(Optimizers)
61
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
62
+ )
63
+ Conversation.intro = (
64
+ AwesomePrompts().get_act(
65
+ act, raise_not_found=True, default=None, case_insensitive=True
66
+ )
67
+ if act
68
+ else intro or Conversation.intro
69
+ )
70
+ self.conversation = Conversation(
71
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
72
+ )
73
+ self.conversation.history_offset = history_offset
74
+ self.session.proxies = proxies
75
+
76
+ def ask(
77
+ self,
78
+ prompt: str,
79
+ stream: bool = False,
80
+ raw: bool = False,
81
+ optimizer: str = None,
82
+ conversationally: bool = False,
83
+ ) -> Dict[str, Any]:
84
+ """Chat with LLMChat
85
+
86
+ Args:
87
+ prompt (str): Prompt to be sent.
88
+ stream (bool, optional): Flag for streaming response. Defaults to False.
89
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
90
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
91
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
92
+ Returns:
93
+ dict: Response dictionary.
94
+ """
95
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
96
+ if optimizer:
97
+ if optimizer in self.__available_optimizers:
98
+ conversation_prompt = getattr(Optimizers, optimizer)(
99
+ conversation_prompt if conversationally else prompt
100
+ )
101
+ else:
102
+ raise exceptions.FailedToGenerateResponseError(
103
+ f"Optimizer is not one of {self.__available_optimizers}"
104
+ )
105
+
106
+ url = f"{self.api_endpoint}?model={self.model}"
107
+ payload = {
108
+ "messages": [
109
+ {"role": "system", "content": self.system_prompt},
110
+ {"role": "user", "content": conversation_prompt}
111
+ ],
112
+ "max_tokens": self.max_tokens_to_sample,
113
+ "stream": stream
114
+ }
115
+
116
+ def for_stream():
117
+ try:
118
+ with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
119
+ response.raise_for_status()
120
+ full_response = ""
121
+ for line in response.iter_lines():
122
+ if line:
123
+ line = line.decode('utf-8')
124
+ if line.startswith('data: '):
125
+ try:
126
+ data = json.loads(line[6:])
127
+ if data.get('response'):
128
+ response_text = data['response']
129
+ full_response += response_text
130
+ yield response_text if raw else dict(text=response_text)
131
+ except json.JSONDecodeError:
132
+ if line.strip() != 'data: [DONE]':
133
+ print(f"Failed to parse line: {line}")
134
+ continue
135
+ self.last_response.update(dict(text=full_response))
136
+ self.conversation.update_chat_history(
137
+ prompt, self.get_message(self.last_response)
138
+ )
139
+ except requests.exceptions.RequestException as e:
140
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
141
+
142
+ def for_non_stream():
143
+ full_response = ""
144
+ for line in for_stream():
145
+ full_response += line['text'] if not raw else line
146
+ return dict(text=full_response)
147
+
148
+ return for_stream() if stream else for_non_stream()
149
+
150
+ def chat(
151
+ self,
152
+ prompt: str,
153
+ stream: bool = False,
154
+ optimizer: str = None,
155
+ conversationally: bool = False,
156
+ ) -> str | Generator[str, None, None]:
157
+ """Generate response `str`
158
+ Args:
159
+ prompt (str): Prompt to be send.
160
+ stream (bool, optional): Flag for streaming response. Defaults to False.
161
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
162
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
163
+ Returns:
164
+ str: Response generated
165
+ """
166
+
167
+ def for_stream():
168
+ for response in self.ask(
169
+ prompt, True, optimizer=optimizer, conversationally=conversationally
170
+ ):
171
+ yield self.get_message(response)
172
+
173
+ def for_non_stream():
174
+ return self.get_message(
175
+ self.ask(
176
+ prompt,
177
+ False,
178
+ optimizer=optimizer,
179
+ conversationally=conversationally,
180
+ )
181
+ )
182
+
183
+ return for_stream() if stream else for_non_stream()
184
+
185
+ def get_message(self, response: Dict[str, Any]) -> str:
186
+ """Retrieves message only from response.
187
+
188
+ Args:
189
+ response (dict): Response generated by `self.ask`
190
+
191
+ Returns:
192
+ str: Message extracted.
193
+ """
194
+ assert isinstance(response, dict), "Response should be of dict data-type only"
195
+ return response["text"]
196
+
197
+
198
+ if __name__ == "__main__":
199
+ from rich import print
200
+ ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
201
+ response = ai.chat("What's the meaning of life?", stream=True)
202
+ for chunk in response:
203
+ print(chunk, end="", flush=True)
@@ -0,0 +1,196 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ from typing import Any, Dict, Optional, Generator
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+
12
+ class Talkai(Provider):
13
+ """
14
+ A class to interact with the Talkai.info API.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 2048,
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ model: str = "gpt-4o-mini", # Default model
29
+ ):
30
+ """
31
+ Initializes the Talkai.info API with given parameters.
32
+ """
33
+ self.session = requests.Session()
34
+ self.is_conversation = is_conversation
35
+ self.max_tokens_to_sample = max_tokens
36
+ self.api_endpoint = "https://talkai.info/chat/send/"
37
+ self.timeout = timeout
38
+ self.last_response = {}
39
+ self.model = model
40
+ self.headers = {
41
+ 'Accept': 'application/json, text/event-stream',
42
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
43
+ 'Content-Type': 'application/json',
44
+ 'Origin': 'https://talkai.info',
45
+ 'Referer': 'https://talkai.info/chat/',
46
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
47
+ 'sec-ch-ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
48
+ 'sec-ch-ua-platform': '"Windows"'
49
+ }
50
+ self.__available_optimizers = (
51
+ method
52
+ for method in dir(Optimizers)
53
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
54
+ )
55
+ Conversation.intro = (
56
+ AwesomePrompts().get_act(
57
+ act, raise_not_found=True, default=None, case_insensitive=True
58
+ )
59
+ if act
60
+ else intro or Conversation.intro
61
+ )
62
+ self.conversation = Conversation(
63
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
64
+ )
65
+ self.conversation.history_offset = history_offset
66
+ self.session.proxies = proxies
67
+
68
+ def ask(
69
+ self,
70
+ prompt: str,
71
+ stream: bool = False,
72
+ raw: bool = False,
73
+ optimizer: str = None,
74
+ conversationally: bool = False,
75
+ ) -> Dict[str, Any]:
76
+ """Chat with Talkai
77
+
78
+ Args:
79
+ prompt (str): Prompt to be sent.
80
+ stream (bool, optional): Flag for streaming response. Defaults to False.
81
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
82
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
83
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
84
+ Returns:
85
+ dict: Response dictionary.
86
+ """
87
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
+ if optimizer:
89
+ if optimizer in self.__available_optimizers:
90
+ conversation_prompt = getattr(Optimizers, optimizer)(
91
+ conversation_prompt if conversationally else prompt
92
+ )
93
+ else:
94
+ raise exceptions.FailedToGenerateResponseError(
95
+ f"Optimizer is not one of {self.__available_optimizers}"
96
+ )
97
+
98
+ payload = {
99
+ "type": "chat",
100
+ "messagesHistory": [
101
+ {
102
+ "id": str(uuid.uuid4()),
103
+ "from": "you",
104
+ "content": conversation_prompt
105
+ }
106
+ ],
107
+ "settings": {
108
+ "model": self.model
109
+ }
110
+ }
111
+
112
+ def for_stream():
113
+ try:
114
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
115
+ response.raise_for_status()
116
+
117
+ full_response = ""
118
+ for line in response.iter_lines():
119
+ if line:
120
+ decoded_line = line.decode('utf-8')
121
+ if 'event: trylimit' in decoded_line:
122
+ break # Stop if trylimit event is encountered
123
+ if decoded_line.startswith('data: '):
124
+ data = decoded_line[6:] # Remove 'data: ' prefix
125
+ full_response += data
126
+ yield data if raw else dict(text=data)
127
+
128
+ self.last_response.update(dict(text=full_response))
129
+ self.conversation.update_chat_history(
130
+ prompt, self.get_message(self.last_response)
131
+ )
132
+
133
+ except requests.exceptions.RequestException as e:
134
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
135
+
136
+ def for_non_stream():
137
+ full_response = ""
138
+ for line in for_stream():
139
+ full_response += line['text'] if not raw else line
140
+ return dict(text=full_response)
141
+
142
+ return for_stream() if stream else for_non_stream()
143
+
144
+
145
+ def chat(
146
+ self,
147
+ prompt: str,
148
+ stream: bool = False,
149
+ optimizer: str = None,
150
+ conversationally: bool = False,
151
+ ) -> str | Generator[str, None, None]:
152
+ """Generate response `str`
153
+ Args:
154
+ prompt (str): Prompt to be send.
155
+ stream (bool, optional): Flag for streaming response. Defaults to False.
156
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
157
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
158
+ Returns:
159
+ str: Response generated
160
+ """
161
+
162
+ def for_stream():
163
+ for response in self.ask(
164
+ prompt, True, optimizer=optimizer, conversationally=conversationally
165
+ ):
166
+ yield self.get_message(response)
167
+
168
+ def for_non_stream():
169
+ return self.get_message(
170
+ self.ask(
171
+ prompt,
172
+ False,
173
+ optimizer=optimizer,
174
+ conversationally=conversationally,
175
+ )
176
+ )
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def get_message(self, response: Dict[str, Any]) -> str:
181
+ """Retrieves message only from response.
182
+
183
+ Args:
184
+ response (dict): Response generated by `self.ask`
185
+
186
+ Returns:
187
+ str: Message extracted.
188
+ """
189
+ assert isinstance(response, dict), "Response should be of dict data-type only"
190
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
191
+
192
+ if __name__ == "__main__":
193
+ t = Talkai()
194
+ resp = t.chat("write me about AI", stream=True)
195
+ for chunk in resp:
196
+ print(chunk, end="", flush=True)
@@ -41,7 +41,8 @@ class AIUncensored(Provider):
41
41
  self.session = requests.Session()
42
42
  self.is_conversation = is_conversation
43
43
  self.max_tokens_to_sample = max_tokens
44
- self.api_endpoint = 'https://twitterclone-i0wr.onrender.com/api/chat'
44
+ self.api_endpoint = ['https://twitterclone-i0wr.onrender.com/api/chat', 'https://twitterclone-4e8t.onrender.com/api/chat', 'https://twitterclone-8wd1.onrender.com/api/chat']
45
+ self.endpoint_index = 0
45
46
  self.stream_chunk_size = 64
46
47
  self.timeout = timeout
47
48
  self.last_response = {}
@@ -149,10 +150,10 @@ class AIUncensored(Provider):
149
150
 
150
151
 
151
152
  def for_stream():
152
- with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
153
+ full_content = ''
154
+ with requests.post(self.api_endpoint[self.endpoint_index], headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
153
155
 
154
156
  if response.status_code == 200:
155
- full_content = ''
156
157
  for line in response.iter_lines():
157
158
  decoded_line = line.decode('utf-8').strip()
158
159
  if decoded_line:
@@ -174,11 +175,11 @@ class AIUncensored(Provider):
174
175
  self.conversation.update_chat_history(
175
176
  prompt, self.get_message(self.last_response)
176
177
  )
178
+ self.endpoint_index = (self.endpoint_index + 1) % len(self.api_endpoint)
177
179
  def for_non_stream():
178
-
180
+ full_content = ''
179
181
  for _ in for_stream():
180
182
  pass
181
-
182
183
  return self.last_response
183
184
 
184
185
  return for_stream() if stream else for_non_stream()
@@ -241,4 +242,4 @@ if __name__ == "__main__":
241
242
  ai = AIUncensored(timeout=5000)
242
243
  response = ai.chat("write a poem about AI", stream=True)
243
244
  for chunk in response:
244
- print(chunk, end="", flush=True)
245
+ print(chunk, end="", flush=True)