webscout 7.0__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -8,10 +8,12 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
+ from webscout import LitAgent
12
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
11
13
 
12
14
  class DeepInfra(Provider):
13
15
  """
14
- A class to interact with the DeepInfra API.
16
+ A class to interact with the DeepInfra API with logging and LitAgent user-agent.
15
17
  """
16
18
 
17
19
  def __init__(
@@ -25,13 +27,29 @@ class DeepInfra(Provider):
25
27
  proxies: dict = {},
26
28
  history_offset: int = 10250,
27
29
  act: str = None,
28
- model: str = "Qwen/Qwen2.5-72B-Instruct",
30
+ model: str = "Qwen/Qwen2.5-72B-Instruct",
31
+ logging: bool = False
29
32
  ):
30
- """Initializes the DeepInfra API client."""
33
+ """Initializes the DeepInfra API client with logging support."""
31
34
  self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
35
+ # Use LitAgent for user-agent instead of hardcoded string.
32
36
  self.headers = {
33
- "Accept": "text/event-stream, application/json",
34
-
37
+ 'User-Agent': LitAgent().random(),
38
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
39
+ 'Cache-Control': 'no-cache',
40
+ 'Connection': 'keep-alive',
41
+ 'Content-Type': 'application/json',
42
+ 'Origin': 'https://deepinfra.com',
43
+ 'Pragma': 'no-cache',
44
+ 'Referer': 'https://deepinfra.com/',
45
+ 'Sec-Fetch-Dest': 'empty',
46
+ 'Sec-Fetch-Mode': 'cors',
47
+ 'Sec-Fetch-Site': 'same-site',
48
+ 'X-Deepinfra-Source': 'web-embed',
49
+ 'accept': 'text/event-stream',
50
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
51
+ 'sec-ch-ua-mobile': '?0',
52
+ 'sec-ch-ua-platform': '"macOS"'
35
53
  }
36
54
  self.session = requests.Session()
37
55
  self.session.headers.update(self.headers)
@@ -61,6 +79,16 @@ class DeepInfra(Provider):
61
79
  )
62
80
  self.conversation.history_offset = history_offset
63
81
 
82
+ # Initialize logger if enabled
83
+ self.logger = LitLogger(
84
+ name="DeepInfra",
85
+ format=LogFormat.MODERN_EMOJI,
86
+ color_scheme=ColorScheme.CYBERPUNK
87
+ ) if logging else None
88
+
89
+ if self.logger:
90
+ self.logger.info("DeepInfra initialized successfully")
91
+
64
92
  def ask(
65
93
  self,
66
94
  prompt: str,
@@ -69,14 +97,17 @@ class DeepInfra(Provider):
69
97
  optimizer: str = None,
70
98
  conversationally: bool = False,
71
99
  ) -> Union[Dict[str, Any], Generator]:
72
-
73
100
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
74
101
  if optimizer:
75
102
  if optimizer in self.__available_optimizers:
76
103
  conversation_prompt = getattr(Optimizers, optimizer)(
77
104
  conversation_prompt if conversationally else prompt
78
105
  )
106
+ if self.logger:
107
+ self.logger.debug(f"Applied optimizer: {optimizer}")
79
108
  else:
109
+ if self.logger:
110
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
80
111
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
81
112
 
82
113
  # Payload construction
@@ -90,17 +121,23 @@ class DeepInfra(Provider):
90
121
  }
91
122
 
92
123
  def for_stream():
124
+ if self.logger:
125
+ self.logger.debug("Sending streaming request to DeepInfra API...")
93
126
  try:
94
127
  with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
95
128
  if response.status_code != 200:
96
- raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
129
+ if self.logger:
130
+ self.logger.error(f"Request failed with status code {response.status_code}")
97
131
 
132
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
133
+ if self.logger:
134
+ self.logger.debug(response.text)
98
135
  streaming_text = ""
99
- for line in response.iter_lines(decode_unicode=True): # Decode lines
136
+ for line in response.iter_lines(decode_unicode=True):
100
137
  if line:
101
138
  line = line.strip()
102
139
  if line.startswith("data: "):
103
- json_str = line[6:] #Remove "data: " prefix
140
+ json_str = line[6:] # Remove "data: " prefix
104
141
  if json_str == "[DONE]":
105
142
  break
106
143
  try:
@@ -110,28 +147,27 @@ class DeepInfra(Provider):
110
147
  if 'delta' in choice and 'content' in choice['delta']:
111
148
  content = choice['delta']['content']
112
149
  streaming_text += content
113
-
114
- # Yield ONLY the new content:
115
- resp = dict(text=content)
150
+ resp = dict(text=content)
116
151
  yield resp if raw else resp
117
152
  except json.JSONDecodeError:
118
- pass # Or handle the error as needed
119
- self.conversation.update_chat_history(prompt, streaming_text) # Update history *after* streaming
153
+ if self.logger:
154
+ self.logger.error("JSON decode error in streaming data")
155
+ pass
156
+ self.conversation.update_chat_history(prompt, streaming_text)
157
+ if self.logger:
158
+ self.logger.info("Streaming response completed successfully")
120
159
  except requests.RequestException as e:
160
+ if self.logger:
161
+ self.logger.error(f"Request failed: {e}")
121
162
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
122
163
 
123
-
124
164
  def for_non_stream():
125
- # let's make use of stream
126
165
  for _ in for_stream():
127
166
  pass
128
167
  return self.last_response
129
168
 
130
-
131
169
  return for_stream() if stream else for_non_stream()
132
170
 
133
-
134
-
135
171
  def chat(
136
172
  self,
137
173
  prompt: str,
@@ -139,34 +175,22 @@ class DeepInfra(Provider):
139
175
  optimizer: str = None,
140
176
  conversationally: bool = False,
141
177
  ) -> str:
142
-
143
178
  def for_stream():
144
- for response in self.ask(
145
- prompt, True, optimizer=optimizer, conversationally=conversationally
146
- ):
179
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
147
180
  yield self.get_message(response)
148
-
149
181
  def for_non_stream():
150
182
  return self.get_message(
151
- self.ask(
152
- prompt,
153
- False,
154
- optimizer=optimizer,
155
- conversationally=conversationally,
156
- )
183
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
157
184
  )
158
-
159
185
  return for_stream() if stream else for_non_stream()
160
186
 
161
187
  def get_message(self, response: dict) -> str:
162
188
  assert isinstance(response, dict), "Response should be of dict data-type only"
163
189
  return response["text"]
164
190
 
165
-
166
-
167
191
  if __name__ == "__main__":
168
192
  from rich import print
169
- ai = DeepInfra(timeout=5000)
193
+ ai = DeepInfra(timeout=5000, logging=True)
170
194
  response = ai.chat("write a poem about AI", stream=True)
171
195
  for chunk in response:
172
- print(chunk, end="", flush=True)
196
+ print(chunk, end="", flush=True)
@@ -2,49 +2,52 @@ import requests
2
2
  import json
3
3
  from typing import Any, Dict, Generator, Optional
4
4
 
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
6
  from webscout.AIbase import Provider
9
7
  from webscout import exceptions
8
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
10
9
  from webscout import LitAgent as Lit
10
+
11
11
  class GliderAI(Provider):
12
12
  """
13
- A class to interact with the Glider.so API.
13
+ A class to interact with the Glider.so API with comprehensive logging.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = [
16
+ AVAILABLE_MODELS = {
17
17
  "chat-llama-3-1-70b",
18
18
  "chat-llama-3-1-8b",
19
19
  "chat-llama-3-2-3b",
20
20
  "deepseek-ai/DeepSeek-R1",
21
- ]
22
-
23
- model_aliases = {
24
- "llama-3.1-70b": "chat-llama-3-1-70b",
25
- "llama-3.1-8b": "chat-llama-3-1-8b",
26
- "llama-3.2-3b": "chat-llama-3-2-3b",
27
- "deepseek-r1": "deepseek-ai/DeepSeek-R1",
28
21
  }
29
22
 
30
-
31
23
  def __init__(
32
24
  self,
33
25
  is_conversation: bool = True,
34
26
  max_tokens: int = 600,
35
27
  timeout: int = 30,
36
- intro: str = None,
37
- filepath: str = None,
28
+ intro: Optional[str] = None,
29
+ filepath: Optional[str] = None,
38
30
  update_file: bool = True,
39
31
  proxies: dict = {},
40
32
  history_offset: int = 10250,
41
- act: str = None,
42
- model: str = "llama-3.1-70b",
33
+ act: Optional[str] = None,
34
+ model: str = "chat-llama-3-1-70b",
43
35
  system_prompt: str = "You are a helpful AI assistant.",
36
+ logging: bool = False
44
37
  ):
45
- """Initializes the GliderAI API client."""
46
- if model not in self.AVAILABLE_MODELS and model not in self.model_aliases:
38
+ """Initializes the GliderAI API client with logging capabilities."""
39
+ if model not in self.AVAILABLE_MODELS:
47
40
  raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
41
+
42
+ self.logger = LitLogger(
43
+ name="GliderAI",
44
+ format=LogFormat.MODERN_EMOJI,
45
+ color_scheme=ColorScheme.CYBERPUNK
46
+ ) if logging else None
47
+
48
+ if self.logger:
49
+ self.logger.info(f"Initializing GliderAI with model: {model}")
50
+
48
51
  self.session = requests.Session()
49
52
  self.is_conversation = is_conversation
50
53
  self.max_tokens_to_sample = max_tokens
@@ -52,7 +55,7 @@ class GliderAI(Provider):
52
55
  self.stream_chunk_size = 64
53
56
  self.timeout = timeout
54
57
  self.last_response = {}
55
- self.model = self.model_aliases.get(model,model)
58
+ self.model = model
56
59
  self.system_prompt = system_prompt
57
60
  self.headers = {
58
61
  "accept": "*/*",
@@ -66,10 +69,10 @@ class GliderAI(Provider):
66
69
  self.session.proxies = proxies
67
70
 
68
71
  self.__available_optimizers = (
69
- method
70
- for method in dir(Optimizers)
72
+ method for method in dir(Optimizers)
71
73
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
74
  )
75
+
73
76
  Conversation.intro = (
74
77
  AwesomePrompts().get_act(
75
78
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -82,40 +85,44 @@ class GliderAI(Provider):
82
85
  )
83
86
  self.conversation.history_offset = history_offset
84
87
 
88
+ if self.logger:
89
+ self.logger.info("GliderAI initialized successfully")
90
+
85
91
  def ask(
86
92
  self,
87
93
  prompt: str,
88
94
  stream: bool = False,
89
95
  raw: bool = False,
90
- optimizer: str = None,
96
+ optimizer: Optional[str] = None,
91
97
  conversationally: bool = False,
92
- ) -> Dict[str, Any] | Generator:
93
- """Chat with AI
98
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
99
+ """Chat with AI with logging capabilities.
94
100
 
95
101
  Args:
96
- prompt (str): Prompt to be send.
102
+ prompt (str): Prompt to be sent.
97
103
  stream (bool, optional): Flag for streaming response. Defaults to False.
98
- raw (bool, optional): Stream back raw response as received. Defaults to False.
99
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
100
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+ raw (bool, optional): Return raw response chunks instead of dict. Defaults to False.
105
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
106
+ conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
101
107
  Returns:
102
- dict : {}
103
- ```json
104
- {
105
- "text" : "How may I assist you today?"
106
- }
107
- ```
108
+ dict or Generator[dict, None, None]: The response from the API.
108
109
  """
110
+ if self.logger:
111
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
112
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
113
+
109
114
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
115
  if optimizer:
111
116
  if optimizer in self.__available_optimizers:
112
117
  conversation_prompt = getattr(Optimizers, optimizer)(
113
118
  conversation_prompt if conversationally else prompt
114
119
  )
120
+ if self.logger:
121
+ self.logger.debug(f"Applied optimizer: {optimizer}")
115
122
  else:
116
- raise Exception(
117
- f"Optimizer is not one of {self.__available_optimizers}"
118
- )
123
+ if self.logger:
124
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
125
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
119
126
 
120
127
  payload = {
121
128
  "messages": [
@@ -126,14 +133,19 @@ class GliderAI(Provider):
126
133
  }
127
134
 
128
135
  def for_stream():
136
+ if self.logger:
137
+ self.logger.debug("Initiating streaming request to API")
129
138
  response = self.session.post(
130
139
  self.api_endpoint, json=payload, stream=True, timeout=self.timeout
131
140
  )
132
141
  if not response.ok:
142
+ if self.logger:
143
+ self.logger.error(
144
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
145
+ )
133
146
  raise exceptions.FailedToGenerateResponseError(
134
147
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
135
148
  )
136
-
137
149
  streaming_text = ""
138
150
  for value in response.iter_lines(decode_unicode=True):
139
151
  if value:
@@ -143,46 +155,50 @@ class GliderAI(Provider):
143
155
  content = data['choices'][0].get('delta', {}).get("content", "")
144
156
  if content:
145
157
  streaming_text += content
146
- yield content if raw else dict(text=content)
158
+ yield content if raw else {"text": content}
147
159
  except json.JSONDecodeError:
148
- if "stop" in value :
149
- break
150
-
160
+ if "stop" in value:
161
+ break
151
162
  self.last_response.update(dict(text=streaming_text))
152
- self.conversation.update_chat_history(
153
- prompt, self.get_message(self.last_response)
154
- )
163
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
164
+ if self.logger:
165
+ self.logger.debug("Response processing completed")
166
+
155
167
  def for_non_stream():
168
+ if self.logger:
169
+ self.logger.debug("Processing non-streaming request")
156
170
  for _ in for_stream():
157
171
  pass
158
172
  return self.last_response
159
173
 
160
174
  return for_stream() if stream else for_non_stream()
161
175
 
162
-
163
176
  def chat(
164
177
  self,
165
178
  prompt: str,
166
179
  stream: bool = False,
167
- optimizer: str = None,
180
+ optimizer: Optional[str] = None,
168
181
  conversationally: bool = False,
169
182
  ) -> str | Generator[str, None, None]:
170
- """Generate response `str`
183
+ """Generate response as a string with logging.
184
+
171
185
  Args:
172
- prompt (str): Prompt to be send.
186
+ prompt (str): Prompt to be sent.
173
187
  stream (bool, optional): Flag for streaming response. Defaults to False.
174
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
188
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
189
+ conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
176
190
  Returns:
177
- str: Response generated
191
+ str or Generator[str, None, None]: The response generated.
178
192
  """
193
+ if self.logger:
194
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
179
195
  def for_stream():
180
196
  for response in self.ask(
181
197
  prompt, True, optimizer=optimizer, conversationally=conversationally
182
198
  ):
183
- yield self.get_message(response)
199
+ yield self.get_message(response)
184
200
  def for_non_stream():
185
- return self.get_message(
201
+ return self.get_message(
186
202
  self.ask(
187
203
  prompt,
188
204
  False,
@@ -192,16 +208,15 @@ class GliderAI(Provider):
192
208
  )
193
209
  return for_stream() if stream else for_non_stream()
194
210
 
195
-
196
211
  def get_message(self, response: dict) -> str:
197
- """Retrieves message only from response"""
212
+ """Retrieves message only from response."""
198
213
  assert isinstance(response, dict), "Response should be of dict data-type only"
199
214
  return response["text"]
200
215
 
201
-
202
216
  if __name__ == "__main__":
203
217
  from rich import print
204
- ai = GliderAI(model="llama-3.1-70b")
218
+ # For testing with logging enabled
219
+ ai = GliderAI(model="chat-llama-3-1-70b", logging=True)
205
220
  response = ai.chat("Meaning of Life", stream=True)
206
221
  for chunk in response:
207
222
  print(chunk, end="", flush=True)
webscout/Provider/Groq.py CHANGED
@@ -16,18 +16,22 @@ class GROQ(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- "llama-3.1-405b-reasoning",
20
- "llama-3.1-70b-versatile",
21
- "llama-3.1-8b-instant",
22
- "llama3-groq-70b-8192-tool-use-preview",
23
- "llama3-groq-8b-8192-tool-use-preview",
24
- "llama-guard-3-8b",
19
+ # "whisper-large-v3",
25
20
  "llama3-70b-8192",
21
+ "llama-3.2-3b-preview",
22
+ "gemma2-9b-it",
23
+ "llama-3.2-11b-vision-preview",
26
24
  "llama3-8b-8192",
25
+ "llama-3.3-70b-versatile",
26
+ "deepseek-r1-distill-llama-70b",
27
+ # "distil-whisper-large-v3-en",
27
28
  "mixtral-8x7b-32768",
28
- "gemma-7b-it",
29
- "gemma2-9b-it",
30
- "whisper-large-v3"
29
+ "llama-3.3-70b-specdec",
30
+ "llama-3.2-90b-vision-preview",
31
+ "llama-3.2-1b-preview",
32
+ # "whisper-large-v3-turbo",
33
+ "llama-3.1-8b-instant",
34
+ "llama-guard-3-8b"
31
35
  ]
32
36
 
33
37
  def __init__(
@@ -337,18 +341,22 @@ class AsyncGROQ(AsyncProvider):
337
341
  """
338
342
 
339
343
  AVAILABLE_MODELS = [
340
- "llama-3.1-405b-reasoning",
341
- "llama-3.1-70b-versatile",
342
- "llama-3.1-8b-instant",
343
- "llama3-groq-70b-8192-tool-use-preview",
344
- "llama3-groq-8b-8192-tool-use-preview",
345
- "llama-guard-3-8b",
344
+ # "whisper-large-v3",
346
345
  "llama3-70b-8192",
346
+ "llama-3.2-3b-preview",
347
+ "gemma2-9b-it",
348
+ "llama-3.2-11b-vision-preview",
347
349
  "llama3-8b-8192",
350
+ "llama-3.3-70b-versatile",
351
+ "deepseek-r1-distill-llama-70b",
352
+ # "distil-whisper-large-v3-en",
348
353
  "mixtral-8x7b-32768",
349
- "gemma-7b-it",
350
- "gemma2-9b-it",
351
- "whisper-large-v3"
354
+ "llama-3.3-70b-specdec",
355
+ "llama-3.2-90b-vision-preview",
356
+ "llama-3.2-1b-preview",
357
+ # "whisper-large-v3-turbo",
358
+ "llama-3.1-8b-instant",
359
+ "llama-guard-3-8b"
352
360
  ]
353
361
 
354
362
  def __init__(