webscout 6.2__py3-none-any.whl → 6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/LLM.py CHANGED
@@ -1,100 +1,279 @@
1
- import requests
2
- import base64
3
- from typing import List, Dict, Union
4
- import json
5
- import requests
6
- import base64
7
- from typing import List, Dict, Union
8
-
9
- class LLM:
10
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
11
- self.model = model
12
- self.conversation_history = [{"role": "system", "content": system_message}]
13
-
14
- def chat(self, messages: List[Dict[str, str]]) -> Union[str, None]:
15
- url = "https://api.deepinfra.com/v1/openai/chat/completions"
16
- headers = {
17
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
18
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
19
- 'Cache-Control': 'no-cache',
20
- 'Connection': 'keep-alive',
21
- 'Content-Type': 'application/json',
22
- 'Origin': 'https://deepinfra.com',
23
- 'Pragma': 'no-cache',
24
- 'Referer': 'https://deepinfra.com/',
25
- 'Sec-Fetch-Dest': 'empty',
26
- 'Sec-Fetch-Mode': 'cors',
27
- 'Sec-Fetch-Site': 'same-site',
28
- 'X-Deepinfra-Source': 'web-embed',
29
- 'accept': 'text/event-stream',
30
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
31
- 'sec-ch-ua-mobile': '?0',
32
- 'sec-ch-ua-platform': '"macOS"'
33
- }
34
- data = json.dumps(
35
- {
36
- 'model': self.model,
37
- 'messages': messages,
38
- 'temperature': 0.7,
39
- 'max_tokens': 8028,
40
- 'stop': [],
41
- 'stream': False #dont change it
42
- }, separators=(',', ':')
43
- )
44
- try:
45
- result = requests.post(url=url, data=data, headers=headers)
46
- return result.json()['choices'][0]['message']['content']
47
- except:
48
- return None
49
- # def main():
50
- # llm = LLM(model="meta-llama/Meta-Llama-3-70B-Instruct")
51
- # messages = [
52
- # {"role": "user", "content": "Hello, how are you?"}
53
- # ]
54
- # response = llm.chat(messages)
55
- # print(response)
56
-
57
- # if __name__ == "__main__":
58
- # main()
59
-
60
-
61
- class VLM:
62
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
63
- self.model = model
64
- self.conversation_history = [{"role": "system", "content": system_message}]
65
-
66
- def chat(self, messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]]) -> Union[str, None]:
67
- api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
68
- headers = {
69
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
70
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
71
- 'Cache-Control': 'no-cache',
72
- 'Connection': 'keep-alive',
73
- 'Content-Type': 'application/json',
74
- 'Origin': 'https://deepinfra.com',
75
- 'Pragma': 'no-cache',
76
- 'Referer': 'https://deepinfra.com/',
77
- 'Sec-Fetch-Dest': 'empty',
78
- 'Sec-Fetch-Mode': 'cors',
79
- 'Sec-Fetch-Site': 'same-site',
80
- 'X-Deepinfra-Source': 'web-embed',
81
- 'accept': 'text/event-stream',
82
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
83
- 'sec-ch-ua-mobile': '?0',
84
- 'sec-ch-ua-platform': '"macOS"'
85
- }
86
- payload = {
87
- "model": self.model,
88
- "messages": messages,
89
- "stream": False
90
- }
91
- try:
92
- response = requests.post(api_url, headers=headers, json=payload)
93
- return response.json()['choices'][0]['message']['content']
94
- except Exception as e:
95
- print(f"An error occurred: {e}")
96
- return None
97
-
98
- def encode_image_to_base64(image_path: str) -> str:
99
- with open(image_path, "rb") as image_file:
100
- return base64.b64encode(image_file.read()).decode("utf-8")
1
+ import requests
2
+ import base64
3
+ import json
4
+ from typing import List, Dict, Union, Generator, Optional, Any
5
+
6
+ class LLMError(Exception):
7
+ """Custom exception for LLM API errors"""
8
+ pass
9
+
10
+ class LLM:
11
+ """A class for interacting with the DeepInfra LLM API."""
12
+
13
+ def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
14
+ """
15
+ Initialize the LLM client.
16
+
17
+ Args:
18
+ model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
19
+ system_message: The system message to use for the conversation
20
+ """
21
+ self.model = model
22
+ self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
23
+ self.conversation_history = [{"role": "system", "content": system_message}]
24
+ self.headers = {
25
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
26
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
27
+ 'Cache-Control': 'no-cache',
28
+ 'Connection': 'keep-alive',
29
+ 'Content-Type': 'application/json',
30
+ 'Origin': 'https://deepinfra.com',
31
+ 'Pragma': 'no-cache',
32
+ 'Referer': 'https://deepinfra.com/',
33
+ 'Sec-Fetch-Dest': 'empty',
34
+ 'Sec-Fetch-Mode': 'cors',
35
+ 'Sec-Fetch-Site': 'same-site',
36
+ 'X-Deepinfra-Source': 'web-embed',
37
+ 'accept': 'text/event-stream',
38
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
39
+ 'sec-ch-ua-mobile': '?0',
40
+ 'sec-ch-ua-platform': '"macOS"'
41
+ }
42
+
43
+ def _prepare_payload(
44
+ self,
45
+ messages: List[Dict[str, str]],
46
+ stream: bool = False,
47
+ temperature: float = 0.7,
48
+ max_tokens: int = 8028,
49
+ stop: Optional[List[str]] = None,
50
+ ) -> Dict[str, Any]:
51
+ """Prepare the API request payload."""
52
+ return {
53
+ 'model': self.model,
54
+ 'messages': messages,
55
+ 'temperature': temperature,
56
+ 'max_tokens': max_tokens,
57
+ 'stop': stop or [],
58
+ 'stream': stream
59
+ }
60
+
61
+ def chat(
62
+ self,
63
+ messages: List[Dict[str, str]],
64
+ stream: bool = False,
65
+ temperature: float = 0.7,
66
+ max_tokens: int = 8028,
67
+ stop: Optional[List[str]] = None,
68
+ ) -> Union[str, Generator[str, None, None]]:
69
+ """
70
+ Send a chat request to the DeepInfra API.
71
+
72
+ Args:
73
+ messages: List of message dictionaries with 'role' and 'content'
74
+ stream: Whether to stream the response
75
+ temperature: Sampling temperature (0-1)
76
+ max_tokens: Maximum tokens to generate
77
+ stop: Optional list of stop sequences
78
+
79
+ Returns:
80
+ Either a string response or a generator for streaming
81
+
82
+ Raises:
83
+ LLMError: If the API request fails
84
+ """
85
+ payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
86
+
87
+ try:
88
+ if stream:
89
+ return self._stream_response(payload)
90
+ else:
91
+ return self._send_request(payload)
92
+ except Exception as e:
93
+ raise LLMError(f"API request failed: {str(e)}")
94
+
95
+ def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
96
+ """Stream the chat response."""
97
+ try:
98
+ with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
99
+ response.raise_for_status()
100
+ for line in response.iter_lines():
101
+ if line:
102
+ if line.strip() == b'data: [DONE]':
103
+ break
104
+ if line.startswith(b'data: '):
105
+ try:
106
+ chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
107
+ if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
108
+ yield content
109
+ except json.JSONDecodeError:
110
+ continue
111
+ except requests.RequestException as e:
112
+ raise LLMError(f"Stream request failed: {str(e)}")
113
+
114
+ def _send_request(self, payload: Dict[str, Any]) -> str:
115
+ """Send a non-streaming chat request."""
116
+ try:
117
+ response = requests.post(self.api_url, json=payload, headers=self.headers)
118
+ response.raise_for_status()
119
+ result = response.json()
120
+ return result['choices'][0]['message']['content']
121
+ except requests.RequestException as e:
122
+ raise LLMError(f"Request failed: {str(e)}")
123
+ except (KeyError, IndexError) as e:
124
+ raise LLMError(f"Invalid response format: {str(e)}")
125
+ except json.JSONDecodeError as e:
126
+ raise LLMError(f"Invalid JSON response: {str(e)}")
127
+
128
+
129
+ class VLM:
130
+ """A class for interacting with the DeepInfra VLM (Vision Language Model) API."""
131
+
132
+ def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
133
+ """
134
+ Initialize the VLM client.
135
+
136
+ Args:
137
+ model: The model identifier
138
+ system_message: The system message to use for the conversation
139
+ """
140
+ self.model = model
141
+ self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
142
+ self.conversation_history = [{"role": "system", "content": system_message}]
143
+ self.headers = {
144
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
145
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
146
+ 'Cache-Control': 'no-cache',
147
+ 'Connection': 'keep-alive',
148
+ 'Content-Type': 'application/json',
149
+ 'Origin': 'https://deepinfra.com',
150
+ 'Pragma': 'no-cache',
151
+ 'Referer': 'https://deepinfra.com/',
152
+ 'Sec-Fetch-Dest': 'empty',
153
+ 'Sec-Fetch-Mode': 'cors',
154
+ 'Sec-Fetch-Site': 'same-site',
155
+ 'X-Deepinfra-Source': 'web-embed',
156
+ 'accept': 'text/event-stream',
157
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
158
+ 'sec-ch-ua-mobile': '?0',
159
+ 'sec-ch-ua-platform': '"macOS"'
160
+ }
161
+
162
+ def chat(
163
+ self,
164
+ messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
165
+ stream: bool = False,
166
+ temperature: float = 0.7,
167
+ max_tokens: int = 8028,
168
+ ) -> Union[str, Generator[str, None, None]]:
169
+ """
170
+ Send a chat request with image support to the DeepInfra API.
171
+
172
+ Args:
173
+ messages: List of message dictionaries that may include image data
174
+ stream: Whether to stream the response
175
+ temperature: Sampling temperature (0-1)
176
+ max_tokens: Maximum tokens to generate
177
+
178
+ Returns:
179
+ Either a string response or a generator for streaming
180
+
181
+ Raises:
182
+ LLMError: If the API request fails
183
+ """
184
+ payload = {
185
+ "model": self.model,
186
+ "messages": messages,
187
+ "stream": stream,
188
+ "temperature": temperature,
189
+ "max_tokens": max_tokens
190
+ }
191
+
192
+ try:
193
+ if stream:
194
+ return self._stream_response(payload)
195
+ else:
196
+ return self._send_request(payload)
197
+ except Exception as e:
198
+ raise LLMError(f"VLM API request failed: {str(e)}")
199
+
200
+ def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
201
+ """Stream the VLM chat response."""
202
+ try:
203
+ with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
204
+ response.raise_for_status()
205
+ for line in response.iter_lines():
206
+ if line:
207
+ if line.strip() == b'data: [DONE]':
208
+ break
209
+ if line.startswith(b'data: '):
210
+ try:
211
+ chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
212
+ if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
213
+ yield content
214
+ except json.JSONDecodeError:
215
+ continue
216
+ except requests.RequestException as e:
217
+ raise LLMError(f"VLM stream request failed: {str(e)}")
218
+
219
+ def _send_request(self, payload: Dict[str, Any]) -> str:
220
+ """Send a non-streaming VLM chat request."""
221
+ try:
222
+ response = requests.post(self.api_url, json=payload, headers=self.headers)
223
+ response.raise_for_status()
224
+ result = response.json()
225
+ return result['choices'][0]['message']['content']
226
+ except requests.RequestException as e:
227
+ raise LLMError(f"VLM request failed: {str(e)}")
228
+ except (KeyError, IndexError) as e:
229
+ raise LLMError(f"Invalid VLM response format: {str(e)}")
230
+ except json.JSONDecodeError as e:
231
+ raise LLMError(f"Invalid VLM JSON response: {str(e)}")
232
+
233
+
234
+ def encode_image_to_base64(image_path: str) -> str:
235
+ """
236
+ Encode an image file to base64 string.
237
+
238
+ Args:
239
+ image_path: Path to the image file
240
+
241
+ Returns:
242
+ Base64 encoded string of the image
243
+
244
+ Raises:
245
+ IOError: If the image file cannot be read
246
+ """
247
+ try:
248
+ with open(image_path, "rb") as image_file:
249
+ return base64.b64encode(image_file.read()).decode("utf-8")
250
+ except IOError as e:
251
+ raise LLMError(f"Failed to read image file: {str(e)}")
252
+
253
+
254
+ if __name__ == "__main__":
255
+ # Example usage
256
+ try:
257
+ # Initialize LLM with Llama 3 model
258
+ llm = LLM(model="Qwen/Qwen2.5-Coder-32B-Instruct")
259
+
260
+ # Example messages
261
+ messages = [
262
+ {"role": "user", "content": "Write a short poem about AI."}
263
+ ]
264
+
265
+ # Example 1: Non-streaming response
266
+ print("\nNon-streaming response:")
267
+ response = llm.chat(messages, stream=False)
268
+ print(response)
269
+
270
+ # Example 2: Streaming response
271
+ print("\nStreaming response:")
272
+ for chunk in llm.chat(messages, stream=True):
273
+ print(chunk, end='', flush=True)
274
+ print("\n")
275
+
276
+ except LLMError as e:
277
+ print(f"Error: {str(e)}")
278
+ except KeyboardInterrupt:
279
+ print("\nOperation cancelled by user")
@@ -16,12 +16,17 @@ class AmigoChat(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", # Llama 3
20
- "o1-mini", # OpenAI O1 Mini
21
- "claude-3-sonnet-20240229", # Claude Sonnet
22
- "gemini-1.5-pro", # Gemini Pro
23
- "gemini-1-5-flash", # Gemini Flash
24
- "o1-preview", # OpenAI O1 Preview
19
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", # Llama 3
20
+ "o1-mini", # OpenAI O1 Mini
21
+ "claude-3-sonnet-20240229", # Claude Sonnet
22
+ "gemini-1.5-pro", # Gemini Pro
23
+ "gemini-1-5-flash", # Gemini Flash
24
+ "o1-preview", # OpenAI O1 Preview
25
+ "claude-3-5-sonnet-20241022", # Claude 3.5 Sonnet
26
+ "Qwen/Qwen2.5-72B-Instruct-Turbo", # Qwen 2.5
27
+ "gpt-4o" # OpenAI GPT-4o
28
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo" # Llama 3.2
29
+
25
30
  ]
26
31
 
27
32
  def __init__(
@@ -29,13 +34,15 @@ class AmigoChat(Provider):
29
34
  is_conversation: bool = True,
30
35
  max_tokens: int = 600,
31
36
  timeout: int = 30,
37
+ temperature: float = 1,
32
38
  intro: str = None,
33
39
  filepath: str = None,
40
+ top_p: float = 0.95,
34
41
  update_file: bool = True,
35
42
  proxies: dict = {},
36
43
  history_offset: int = 10250,
37
44
  act: str = None,
38
- model: str = "o1-preview", # Default model
45
+ model: str = "Qwen/Qwen2.5-72B-Instruct-Turbo", # Default model
39
46
  system_prompt: str = "You are a helpful and friendly AI assistant.",
40
47
  ):
41
48
  """
@@ -68,8 +75,10 @@ class AmigoChat(Provider):
68
75
  self.api_endpoint = "https://api.amigochat.io/v1/chat/completions"
69
76
  self.stream_chunk_size = 64
70
77
  self.timeout = timeout
78
+ self.temperature = temperature
71
79
  self.last_response = {}
72
80
  self.model = model
81
+ self.top_p = top_p
73
82
  self.headers = {
74
83
  "Accept": "*/*",
75
84
  "Accept-Encoding": "gzip, deflate, br, zstd",
@@ -158,11 +167,11 @@ class AmigoChat(Provider):
158
167
  ],
159
168
  "model": self.model,
160
169
  "frequency_penalty": 0,
161
- "max_tokens": 4000,
170
+ "max_tokens": self.max_tokens_to_sample,
162
171
  "presence_penalty": 0,
163
172
  "stream": stream,
164
- "temperature": 0.5,
165
- "top_p": 0.95
173
+ "temperature":self.temperature,
174
+ "top_p": self.top_p
166
175
  }
167
176
 
168
177
  def for_stream():
webscout/Provider/Andi.py CHANGED
@@ -92,40 +92,7 @@ class AndiSearch(Provider):
92
92
  optimizer: str = None,
93
93
  conversationally: bool = False,
94
94
  ) -> dict:
95
- """Chat with AI
96
95
 
97
- Args:
98
- prompt (str): Prompt to be send.
99
- stream (bool, optional): Flag for streaming response. Defaults to False.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
- Returns:
104
- dict : {}
105
- ```json
106
- {
107
- "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
108
- "object": "chat.completion",
109
- "created": 1704623244,
110
- "model": "gpt-3.5-turbo",
111
- "usage": {
112
- "prompt_tokens": 0,
113
- "completion_tokens": 0,
114
- "total_tokens": 0
115
- },
116
- "choices": [
117
- {
118
- "message": {
119
- "role": "assistant",
120
- "content": "Hello! How can I assist you today?"
121
- },
122
- "finish_reason": "stop",
123
- "index": 0
124
- }
125
- ]
126
- }
127
- ```
128
- """
129
96
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
130
97
  if optimizer:
131
98
  if optimizer in self.__available_optimizers: