webscout 7.0__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,3 +1,4 @@
1
+
1
2
  import requests
2
3
  import json
3
4
  from typing import Any, Dict, Optional, Generator, List
@@ -7,10 +8,12 @@ from webscout.AIutel import Conversation
7
8
  from webscout.AIutel import AwesomePrompts
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
11
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
10
12
  from webscout import LitAgent as Lit
13
+
11
14
  class LLMChat(Provider):
12
15
  """
13
- A class to interact with the LLMChat API.
16
+ A class to interact with the LLMChat API with comprehensive logging.
14
17
  """
15
18
 
16
19
  AVAILABLE_MODELS = [
@@ -33,13 +36,25 @@ class LLMChat(Provider):
33
36
  proxies: dict = {},
34
37
  history_offset: int = 10250,
35
38
  act: str = None,
36
- model: str = "@cf/meta/llama-3.1-70b-instruct", # Default model
39
+ model: str = "@cf/meta/llama-3.1-70b-instruct",
37
40
  system_prompt: str = "You are a helpful assistant.",
41
+ logging: bool = False
38
42
  ):
39
43
  """
40
- Initializes the LLMChat API with given parameters.
44
+ Initializes the LLMChat API with given parameters and logging capabilities.
41
45
  """
46
+ self.logger = LitLogger(
47
+ name="LLMChat",
48
+ format=LogFormat.MODERN_EMOJI,
49
+ color_scheme=ColorScheme.CYBERPUNK
50
+ ) if logging else None
51
+
52
+ if self.logger:
53
+ self.logger.info(f"Initializing LLMChat with model: {model}")
54
+
42
55
  if model not in self.AVAILABLE_MODELS:
56
+ if self.logger:
57
+ self.logger.error(f"Invalid model selected: {model}")
43
58
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
44
59
 
45
60
  self.session = requests.Session()
@@ -50,6 +65,7 @@ class LLMChat(Provider):
50
65
  self.last_response = {}
51
66
  self.model = model
52
67
  self.system_prompt = system_prompt
68
+
53
69
  self.headers = {
54
70
  "Content-Type": "application/json",
55
71
  "Accept": "*/*",
@@ -57,11 +73,13 @@ class LLMChat(Provider):
57
73
  "Origin": "https://llmchat.in",
58
74
  "Referer": "https://llmchat.in/"
59
75
  }
76
+
60
77
  self.__available_optimizers = (
61
78
  method
62
79
  for method in dir(Optimizers)
63
80
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
64
81
  )
82
+
65
83
  Conversation.intro = (
66
84
  AwesomePrompts().get_act(
67
85
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -69,12 +87,16 @@ class LLMChat(Provider):
69
87
  if act
70
88
  else intro or Conversation.intro
71
89
  )
90
+
72
91
  self.conversation = Conversation(
73
92
  is_conversation, self.max_tokens_to_sample, filepath, update_file
74
93
  )
75
94
  self.conversation.history_offset = history_offset
76
95
  self.session.proxies = proxies
77
96
 
97
+ if self.logger:
98
+ self.logger.info("LLMChat initialized successfully")
99
+
78
100
  def ask(
79
101
  self,
80
102
  prompt: str,
@@ -83,24 +105,22 @@ class LLMChat(Provider):
83
105
  optimizer: str = None,
84
106
  conversationally: bool = False,
85
107
  ) -> Dict[str, Any]:
86
- """Chat with LLMChat
87
-
88
- Args:
89
- prompt (str): Prompt to be sent.
90
- stream (bool, optional): Flag for streaming response. Defaults to False.
91
- raw (bool, optional): Stream back raw response as received. Defaults to False.
92
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
93
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
94
- Returns:
95
- dict: Response dictionary.
96
- """
108
+ """Chat with LLMChat with logging capabilities"""
109
+ if self.logger:
110
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
111
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
112
+
97
113
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
98
114
  if optimizer:
99
115
  if optimizer in self.__available_optimizers:
100
116
  conversation_prompt = getattr(Optimizers, optimizer)(
101
117
  conversation_prompt if conversationally else prompt
102
118
  )
119
+ if self.logger:
120
+ self.logger.debug(f"Applied optimizer: {optimizer}")
103
121
  else:
122
+ if self.logger:
123
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
104
124
  raise exceptions.FailedToGenerateResponseError(
105
125
  f"Optimizer is not one of {self.__available_optimizers}"
106
126
  )
@@ -117,8 +137,15 @@ class LLMChat(Provider):
117
137
 
118
138
  def for_stream():
119
139
  try:
140
+ if self.logger:
141
+ self.logger.debug("Initiating streaming request to API")
142
+
120
143
  with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
121
144
  response.raise_for_status()
145
+
146
+ if self.logger:
147
+ self.logger.info(f"API connection established successfully. Status: {response.status_code}")
148
+
122
149
  full_response = ""
123
150
  for line in response.iter_lines():
124
151
  if line:
@@ -132,19 +159,31 @@ class LLMChat(Provider):
132
159
  yield response_text if raw else dict(text=response_text)
133
160
  except json.JSONDecodeError:
134
161
  if line.strip() != 'data: [DONE]':
135
- print(f"Failed to parse line: {line}")
162
+ if self.logger:
163
+ self.logger.warning(f"Failed to parse line: {line}")
136
164
  continue
165
+
137
166
  self.last_response.update(dict(text=full_response))
138
167
  self.conversation.update_chat_history(
139
168
  prompt, self.get_message(self.last_response)
140
169
  )
170
+
141
171
  except requests.exceptions.RequestException as e:
172
+ if self.logger:
173
+ self.logger.error(f"API request failed: {str(e)}")
142
174
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
143
175
 
144
176
  def for_non_stream():
177
+ if self.logger:
178
+ self.logger.debug("Processing non-streaming request")
179
+
145
180
  full_response = ""
146
181
  for line in for_stream():
147
182
  full_response += line['text'] if not raw else line
183
+
184
+ if self.logger:
185
+ self.logger.debug("Response processing completed")
186
+
148
187
  return dict(text=full_response)
149
188
 
150
189
  return for_stream() if stream else for_non_stream()
@@ -156,15 +195,9 @@ class LLMChat(Provider):
156
195
  optimizer: str = None,
157
196
  conversationally: bool = False,
158
197
  ) -> str | Generator[str, None, None]:
159
- """Generate response `str`
160
- Args:
161
- prompt (str): Prompt to be send.
162
- stream (bool, optional): Flag for streaming response. Defaults to False.
163
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
164
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
165
- Returns:
166
- str: Response generated
167
- """
198
+ """Generate response with logging capabilities"""
199
+ if self.logger:
200
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
168
201
 
169
202
  def for_stream():
170
203
  for response in self.ask(
@@ -185,21 +218,14 @@ class LLMChat(Provider):
185
218
  return for_stream() if stream else for_non_stream()
186
219
 
187
220
  def get_message(self, response: Dict[str, Any]) -> str:
188
- """Retrieves message only from response.
189
-
190
- Args:
191
- response (dict): Response generated by `self.ask`
192
-
193
- Returns:
194
- str: Message extracted.
195
- """
221
+ """Retrieves message from response with validation"""
196
222
  assert isinstance(response, dict), "Response should be of dict data-type only"
197
223
  return response["text"]
198
224
 
199
-
200
225
  if __name__ == "__main__":
201
226
  from rich import print
202
- ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
227
+ # Enable logging for testing
228
+ ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct', logging=True)
203
229
  response = ai.chat("What's the meaning of life?", stream=True)
204
230
  for chunk in response:
205
- print(chunk, end="", flush=True)
231
+ print(chunk, end="", flush=True)
webscout/Provider/meta.py CHANGED
@@ -146,7 +146,7 @@ def get_fb_session(email, password, proxies=None):
146
146
  "sec-fetch-site": "same-origin",
147
147
  "sec-fetch-user": "?1",
148
148
  "upgrade-insecure-requests": "1",
149
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
149
+ "user-agent": Lit().random(),
150
150
  "viewport-width": "1728",
151
151
  }
152
152
 
@@ -191,7 +191,8 @@ def get_fb_session(email, password, proxies=None):
191
191
  "referer": "https://www.meta.ai/",
192
192
  "sec-fetch-mode": "cors",
193
193
  "sec-fetch-site": "same-origin",
194
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
194
+ "user-agent": Lit().random(),
195
+ "viewport-width": "1728",
195
196
  }
196
197
 
197
198
  response = requests.request("POST", url, headers=headers, data=payload, proxies=proxies)
@@ -211,7 +212,7 @@ def get_fb_session(email, password, proxies=None):
211
212
  "sec-fetch-site": "cross-site",
212
213
  "sec-fetch-user": "?1",
213
214
  "upgrade-insecure-requests": "1",
214
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
215
+ "user-agent": Lit().random(),
215
216
  }
216
217
  session = requests.session()
217
218
  session.proxies = proxies
@@ -223,7 +224,7 @@ def get_fb_session(email, password, proxies=None):
223
224
 
224
225
  payload = {}
225
226
  headers = {
226
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:125.0) Gecko/20100101 Firefox/125.0",
227
+ "User-Agent": Lit().random(),
227
228
  "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
228
229
  "Accept-Language": "en-US,en;q=0.5",
229
230
  "Accept-Encoding": "gzip, deflate, br",
@@ -330,8 +331,7 @@ class Meta(Provider):
330
331
  self.session = requests.Session()
331
332
  self.session.headers.update(
332
333
  {
333
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 "
334
- "(KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
334
+ "user-agent": Lit().random(),
335
335
  }
336
336
  )
337
337
  self.access_token = None