webscout 5.8__py3-none-any.whl → 5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -59,6 +59,8 @@ from .twitterclone import *
59
59
  from .tutorai import *
60
60
  from .bixin import *
61
61
  from .ChatGPTES import *
62
+ from .Amigo import *
63
+ from .prefind import *
62
64
  __all__ = [
63
65
  'Farfalle',
64
66
  'LLAMA',
@@ -122,6 +124,9 @@ __all__ = [
122
124
  'TutorAI',
123
125
  'Bixin',
124
126
  'ChatGPTES',
127
+ 'AmigoChat',
128
+ 'PrefindAI',
129
+ # 'LearnFast',
125
130
 
126
131
 
127
132
  ]
@@ -0,0 +1,253 @@
1
+ import os
2
+ import json
3
+ from typing import Optional
4
+ import uuid
5
+ import requests
6
+ import cloudscraper
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+
14
+
15
+ class LearnFast(Provider):
16
+ """
17
+ A class to interact with the LearnFast.ai API.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ is_conversation: bool = True,
23
+ max_tokens: int = 600,
24
+ timeout: int = 30,
25
+ intro: str = None,
26
+ filepath: str = None,
27
+ update_file: bool = True,
28
+ proxies: dict = {},
29
+ history_offset: int = 10250,
30
+ act: str = None,
31
+ system_prompt: str = "You are a helpful AI assistant.",
32
+ ):
33
+ """
34
+ Initializes the LearnFast.ai API with given parameters.
35
+ """
36
+ self.session = cloudscraper.create_scraper()
37
+ self.is_conversation = is_conversation
38
+ self.max_tokens_to_sample = max_tokens
39
+ self.api_endpoint = 'https://autosite.erweima.ai/api/v1/chat'
40
+ self.stream_chunk_size = 64
41
+ self.timeout = timeout
42
+ self.last_response = {}
43
+ self.system_prompt = system_prompt
44
+ self.headers = {
45
+ "authority": "autosite.erweima.ai",
46
+ "accept": "*/*",
47
+ "accept-encoding": "gzip, deflate, br, zstd",
48
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
49
+ "authorization": "", # Always empty
50
+ "content-type": "application/json",
51
+ "dnt": "1",
52
+ "origin": "https://learnfast.ai",
53
+ "priority": "u=1, i",
54
+ "referer": "https://learnfast.ai/",
55
+ "sec-ch-ua": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
56
+ "sec-ch-ua-mobile": "?0",
57
+ "sec-ch-ua-platform": '"Windows"',
58
+ "sec-fetch-dest": "empty",
59
+ "sec-fetch-mode": "cors",
60
+ "sec-fetch-site": "cross-site",
61
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
62
+ }
63
+
64
+ self.__available_optimizers = (
65
+ method
66
+ for method in dir(Optimizers)
67
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
+ )
69
+ self.session.headers.update(self.headers)
70
+ Conversation.intro = (
71
+ AwesomePrompts().get_act(
72
+ act, raise_not_found=True, default=None, case_insensitive=True
73
+ )
74
+ if act
75
+ else intro or Conversation.intro
76
+ )
77
+ self.conversation = Conversation(
78
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
79
+ )
80
+ self.conversation.history_offset = history_offset
81
+ self.session.proxies = proxies
82
+
83
+ def generate_unique_id(self) -> str:
84
+ """Generate a 32-character hexadecimal unique ID."""
85
+ return uuid.uuid4().hex
86
+
87
+ def generate_session_id(self) -> str:
88
+ """Generate a 32-character hexadecimal session ID."""
89
+ return uuid.uuid4().hex
90
+
91
+ def upload_image_to_0x0(self, image_path: str) -> str:
92
+ """
93
+ Uploads an image to 0x0.st and returns the public URL.
94
+ """
95
+ if not os.path.isfile(image_path):
96
+ raise FileNotFoundError(f"The file '{image_path}' does not exist.")
97
+
98
+ with open(image_path, "rb") as img_file:
99
+ files = {"file": img_file}
100
+ try:
101
+ response = requests.post("https://0x0.st", files=files)
102
+ response.raise_for_status()
103
+ image_url = response.text.strip()
104
+ if not image_url.startswith("http"):
105
+ raise ValueError("Received an invalid URL from 0x0.st.")
106
+ return image_url
107
+ except requests.exceptions.RequestException as e:
108
+ raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
109
+
110
+ def create_payload(
111
+ self,
112
+ session_id: str,
113
+ conversation_prompt: str,
114
+ image_url: Optional[str] = None
115
+ ) -> dict:
116
+ """
117
+ Creates the JSON payload for the request.
118
+ """
119
+ payload = {
120
+ "prompt": conversation_prompt,
121
+ "sessionId": session_id,
122
+ }
123
+ if image_url:
124
+ payload["attachments"] = [
125
+ {
126
+ "fileType": "image/jpeg",
127
+ "file": {},
128
+ "fileContent": image_url
129
+ }
130
+ ]
131
+ return payload
132
+
133
+ def ask(
134
+ self,
135
+ prompt: str,
136
+ stream: bool = False,
137
+ raw: bool = False,
138
+ optimizer: str = None,
139
+ conversationally: bool = False,
140
+ image_path: Optional[str] = None,
141
+ ) -> dict:
142
+ """Chat with LearnFast
143
+
144
+ Args:
145
+ prompt (str): Prompt to be send.
146
+ stream (bool, optional): Flag for streaming response. Defaults to False.
147
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
148
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
149
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
150
+ image_path (Optional[str], optional): Path to the image to be uploaded.
151
+ Defaults to None.
152
+
153
+ Returns:
154
+ dict : {}
155
+ """
156
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
157
+ if optimizer:
158
+ if optimizer in self.__available_optimizers:
159
+ conversation_prompt = getattr(Optimizers, optimizer)(
160
+ conversation_prompt if conversationally else prompt
161
+ )
162
+ else:
163
+ raise Exception(
164
+ f"Optimizer is not one of {self.__available_optimizers}"
165
+ )
166
+
167
+ # Generate unique ID and session ID
168
+ unique_id = self.generate_unique_id()
169
+ session_id = self.generate_session_id()
170
+
171
+ # Update headers with the unique ID
172
+ self.headers["uniqueid"] = unique_id
173
+
174
+ # Upload image and get URL if image_path is provided
175
+ image_url = None
176
+ if image_path:
177
+ try:
178
+ image_url = self.upload_image_to_0x0(image_path)
179
+ except Exception as e:
180
+ raise exceptions.FailedToGenerateResponseError(f"Error uploading image: {e}") from e
181
+
182
+ # Create the payload
183
+ payload = self.create_payload(session_id, conversation_prompt, image_url)
184
+
185
+ # Convert the payload to a JSON string
186
+ data = json.dumps(payload)
187
+
188
+ try:
189
+ # Send the POST request with streaming enabled
190
+ response = self.session.post(self.api_endpoint, headers=self.headers, data=data, stream=True, timeout=self.timeout)
191
+ response.raise_for_status() # Check for HTTP errors
192
+
193
+ # Process the streamed response
194
+ full_response = ""
195
+ for line in response.iter_lines(decode_unicode=True):
196
+ if line:
197
+ if line.strip() == "[DONE]":
198
+ break
199
+ try:
200
+ json_response = json.loads(line)
201
+ message = json_response.get('data', {}).get('message', '')
202
+ if message:
203
+ full_response += message
204
+ # print(message, end='', flush=True)
205
+ except json.JSONDecodeError:
206
+ print(f"\nFailed to parse JSON: {line}")
207
+ self.last_response.update({"text": full_response})
208
+ self.conversation.update_chat_history(prompt, full_response)
209
+
210
+ return self.last_response
211
+ except requests.exceptions.RequestException as e:
212
+ raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
213
+
214
+ def chat(
215
+ self,
216
+ prompt: str,
217
+ stream: bool = False,
218
+ optimizer: str = None,
219
+ conversationally: bool = False,
220
+ image_path: Optional[str] = None,
221
+ ) -> str:
222
+ """Generate response `str`
223
+ Args:
224
+ prompt (str): Prompt to be send.
225
+ stream (bool, optional): Flag for streaming response. Defaults to False.
226
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
227
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
228
+ image_path (Optional[str], optional): Path to the image to be uploaded.
229
+ Defaults to None.
230
+ Returns:
231
+ str: Response generated
232
+ """
233
+ response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
234
+ return self.get_message(response)
235
+
236
+ def get_message(self, response: dict) -> str:
237
+ """Retrieves message only from response
238
+
239
+ Args:
240
+ response (dict): Response generated by `self.ask`
241
+
242
+ Returns:
243
+ str: Message extracted
244
+ """
245
+ assert isinstance(response, dict), "Response should be of dict data-type only"
246
+ return response["text"]
247
+
248
+ if __name__ == "__main__":
249
+ from rich import print
250
+ ai = LearnFast()
251
+ response = ai.chat(input(">>> "), image_path="photo_2024-07-06_22-19-42.jpg")
252
+ for chunk in response:
253
+ print(chunk, end="", flush=True)
@@ -0,0 +1,232 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ import os
5
+ from typing import Any, Dict, Optional, Generator
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+
13
+ class PrefindAI(Provider):
14
+ """
15
+ A class to interact with the Prefind AI API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = ["llama", "claude"]
19
+
20
+ def __init__(
21
+ self,
22
+ is_conversation: bool = True,
23
+ max_tokens: int = 600,
24
+ timeout: int = 30,
25
+ intro: str = None,
26
+ filepath: str = None,
27
+ update_file: bool = True,
28
+ proxies: dict = {},
29
+ history_offset: int = 10250,
30
+ act: str = None,
31
+ model: str = "claude",
32
+ ):
33
+ """
34
+ Initializes the Prefind AI API with given parameters.
35
+
36
+ Args:
37
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
38
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
39
+ timeout (int, optional): Http request timeout. Defaults to 30.
40
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
41
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
42
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
43
+ proxies (dict, optional): Http request proxies. Defaults to {}.
44
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
45
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
46
+ model (str, optional): The AI model to use for text generation. Defaults to "claude".
47
+ Options: "llama", "claude".
48
+ """
49
+ self.session = requests.Session()
50
+ self.is_conversation = is_conversation
51
+ self.max_tokens_to_sample = max_tokens
52
+ self.api_endpoint = "https://api.prefind.ai/api/search/v1"
53
+ self.stream_chunk_size = 64
54
+ self.timeout = timeout
55
+ self.last_response = {}
56
+ self.model = model
57
+ self.device_token = self.get_device_token()
58
+
59
+ self.__available_optimizers = (
60
+ method
61
+ for method in dir(Optimizers)
62
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
63
+ )
64
+ self.session.headers.update(
65
+ {
66
+ "Content-Type": "application/json",
67
+ "Accept": "text/event-stream",
68
+ }
69
+ )
70
+ Conversation.intro = (
71
+ AwesomePrompts().get_act(
72
+ act, raise_not_found=True, default=None, case_insensitive=True
73
+ )
74
+ if act
75
+ else intro or Conversation.intro
76
+ )
77
+ self.conversation = Conversation(
78
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
79
+ )
80
+ self.conversation.history_offset = history_offset
81
+ self.session.proxies = proxies
82
+
83
+ def get_device_token(self) -> str:
84
+ """
85
+ Retrieves a device token from the Prefind AI API.
86
+ """
87
+ device_token_url = "https://api.prefind.ai/api/auth/device-token/create"
88
+ headers = {"Content-Type": "application/json; charset=utf-8"}
89
+ data = {}
90
+ response = requests.post(
91
+ device_token_url, headers=headers, data=json.dumps(data)
92
+ )
93
+
94
+ if response.status_code == 200:
95
+ device_token_data = response.json()
96
+ return device_token_data["sessionToken"]
97
+ else:
98
+ raise exceptions.FailedToGenerateResponseError(
99
+ f"Failed to get device token - ({response.status_code}, {response.reason}) - {response.text}"
100
+ )
101
+
102
+ def ask(
103
+ self,
104
+ prompt: str,
105
+ stream: bool = False,
106
+ raw: bool = False,
107
+ optimizer: str = None,
108
+ conversationally: bool = False,
109
+ ) -> Dict[str, Any]:
110
+ """Chat with AI
111
+
112
+ Args:
113
+ prompt (str): Prompt to be send.
114
+ stream (bool, optional): Flag for streaming response. Defaults to False.
115
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
116
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
+ Returns:
119
+ dict : {}
120
+ ```json
121
+ {
122
+ "text" : "How may I assist you today?"
123
+ }
124
+ ```
125
+ """
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(
134
+ f"Optimizer is not one of {self.__available_optimizers}"
135
+ )
136
+
137
+ search_data = {"query": conversation_prompt, "deviceToken": self.device_token}
138
+
139
+ def for_stream():
140
+ response = self.session.post(
141
+ self.api_endpoint, json=search_data, stream=True, timeout=self.timeout
142
+ )
143
+ if not response.ok:
144
+ raise exceptions.FailedToGenerateResponseError(
145
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
146
+ )
147
+
148
+ streaming_text = ""
149
+ for line in response.iter_lines(decode_unicode=True): #This is already decoding
150
+ if line:
151
+ # line = line.decode('utf-8').strip() # This line is unnecessary and causing the error
152
+ if line.startswith("event: "):
153
+ event = line[7:]
154
+ elif line.startswith("data: "):
155
+ data_str = line[6:]
156
+ if event == "received":
157
+ data = json.loads(data_str)
158
+ if data['type'] == 'chunk':
159
+ model = data['model']
160
+ if (self.model == "llama" and model == 'OPENROUTER_LLAMA_3') or \
161
+ (self.model == "claude" and model == 'OPENROUTER_CLAUDE'):
162
+ content = data['chunk']['content']
163
+ if content:
164
+ streaming_text += content + ("\n" if stream else "")
165
+ resp = dict(text=streaming_text)
166
+ self.last_response.update(resp)
167
+ yield resp if raw else resp
168
+ self.conversation.update_chat_history(
169
+ prompt, self.get_message(self.last_response)
170
+ )
171
+
172
+ def for_non_stream():
173
+ # let's make use of stream
174
+ for _ in for_stream():
175
+ pass
176
+ return self.last_response
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def chat(
181
+ self,
182
+ prompt: str,
183
+ stream: bool = False,
184
+ optimizer: str = None,
185
+ conversationally: bool = False,
186
+ ) -> str:
187
+ """Generate response `str`
188
+ Args:
189
+ prompt (str): Prompt to be send.
190
+ stream (bool, optional): Flag for streaming response. Defaults to False.
191
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
192
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
193
+ Returns:
194
+ str: Response generated
195
+ """
196
+
197
+ def for_stream():
198
+ for response in self.ask(
199
+ prompt, True, optimizer=optimizer, conversationally=conversationally
200
+ ):
201
+ yield self.get_message(response)
202
+
203
+ def for_non_stream():
204
+ return self.get_message(
205
+ self.ask(
206
+ prompt,
207
+ False,
208
+ optimizer=optimizer,
209
+ conversationally=conversationally,
210
+ )
211
+ )
212
+
213
+ return for_stream() if stream else for_non_stream()
214
+
215
+ def get_message(self, response: dict) -> str:
216
+ """Retrieves message only from response
217
+
218
+ Args:
219
+ response (dict): Response generated by `self.ask`
220
+
221
+ Returns:
222
+ str: Message extracted
223
+ """
224
+ assert isinstance(response, dict), "Response should be of dict data-type only"
225
+ return response["text"]
226
+
227
+ if __name__ == '__main__':
228
+ from rich import print
229
+ ai = PrefindAI(model="claude")
230
+ response = ai.chat(input(">>> "))
231
+ for chunk in response:
232
+ print(chunk, end="", flush=True)
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "5.8"
1
+ __version__ = "5.9"
2
2
  __prog__ = "webscout"