webscout 6.9__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (51) hide show
  1. webscout/AIbase.py +12 -2
  2. webscout/DWEBS.py +38 -22
  3. webscout/Extra/autocoder/autocoder_utiles.py +68 -7
  4. webscout/Extra/autollama.py +0 -16
  5. webscout/Extra/gguf.py +0 -13
  6. webscout/LLM.py +1 -1
  7. webscout/Provider/AISEARCH/DeepFind.py +251 -0
  8. webscout/Provider/AISEARCH/__init__.py +2 -2
  9. webscout/Provider/AISEARCH/felo_search.py +167 -118
  10. webscout/Provider/Blackboxai.py +136 -137
  11. webscout/Provider/Cloudflare.py +92 -78
  12. webscout/Provider/Deepinfra.py +59 -35
  13. webscout/Provider/Glider.py +222 -0
  14. webscout/Provider/Groq.py +26 -18
  15. webscout/Provider/HF_space/__init__.py +0 -0
  16. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  17. webscout/Provider/Jadve.py +108 -77
  18. webscout/Provider/Llama3.py +117 -94
  19. webscout/Provider/Marcus.py +65 -10
  20. webscout/Provider/Netwrck.py +61 -49
  21. webscout/Provider/PI.py +77 -122
  22. webscout/Provider/PizzaGPT.py +129 -82
  23. webscout/Provider/TextPollinationsAI.py +229 -0
  24. webscout/Provider/Youchat.py +28 -22
  25. webscout/Provider/__init__.py +12 -4
  26. webscout/Provider/askmyai.py +2 -2
  27. webscout/Provider/chatglm.py +205 -0
  28. webscout/Provider/dgaf.py +215 -0
  29. webscout/Provider/gaurish.py +106 -66
  30. webscout/Provider/hermes.py +219 -0
  31. webscout/Provider/llamatutor.py +72 -62
  32. webscout/Provider/llmchat.py +62 -35
  33. webscout/Provider/meta.py +6 -6
  34. webscout/Provider/multichat.py +205 -104
  35. webscout/Provider/typegpt.py +26 -23
  36. webscout/Provider/yep.py +3 -3
  37. webscout/litagent/__init__.py +3 -146
  38. webscout/litagent/agent.py +120 -0
  39. webscout/litagent/constants.py +31 -0
  40. webscout/tempid.py +0 -4
  41. webscout/version.py +1 -1
  42. webscout/webscout_search.py +1141 -1140
  43. webscout/webscout_search_async.py +635 -635
  44. {webscout-6.9.dist-info → webscout-7.1.dist-info}/METADATA +37 -33
  45. {webscout-6.9.dist-info → webscout-7.1.dist-info}/RECORD +49 -41
  46. {webscout-6.9.dist-info → webscout-7.1.dist-info}/WHEEL +1 -1
  47. webscout/Provider/AISEARCH/ooai.py +0 -155
  48. webscout/Provider/RUBIKSAI.py +0 -272
  49. {webscout-6.9.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
  50. {webscout-6.9.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
  51. {webscout-6.9.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,219 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Generator, Optional
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+
11
+ class NousHermes(Provider):
12
+ """
13
+ A class to interact with the Hermes API.
14
+ """
15
+
16
+ AVAILABLE_MODELS = ["Hermes-3-Llama-3.1-70B", "Hermes-3-Llama-3.1-8B"]
17
+
18
+ def __init__(
19
+ self,
20
+ cookies_path: str,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 8000,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "Hermes-3-Llama-3.1-70B",
31
+ system_prompt: str = "You are a helpful AI assistant.",
32
+ temperature: float = 0.7,
33
+ top_p: float = 0.9,
34
+ ):
35
+ """Initializes the Hermes API client."""
36
+ if model not in self.AVAILABLE_MODELS:
37
+ raise ValueError(
38
+ f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
39
+ )
40
+
41
+ self.session = requests.Session()
42
+ self.is_conversation = is_conversation
43
+ self.max_tokens_to_sample = max_tokens
44
+ self.timeout = timeout
45
+ self.last_response = {}
46
+ self.model = model
47
+ self.system_prompt = system_prompt
48
+ self.api_endpoint = "https://hermes.nousresearch.com/api/chat"
49
+ self.temperature = temperature
50
+ self.top_p = top_p
51
+ self.cookies_path = cookies_path
52
+ self.cookies = self._load_cookies()
53
+ self.headers = {
54
+ 'accept': '*/*',
55
+ 'accept-language': 'en-US,en;q=0.9',
56
+ 'content-type': 'application/json',
57
+ 'origin': 'https://hermes.nousresearch.com',
58
+ 'referer': 'https://hermes.nousresearch.com/',
59
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
60
+ 'cookie': self.cookies
61
+ }
62
+
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ self.session.headers.update(self.headers)
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+ self.session.proxies = proxies
81
+
82
+ def _load_cookies(self) -> Optional[str]:
83
+ """Load cookies from a JSON file and convert them to a string."""
84
+ try:
85
+ with open(self.cookies_path, 'r') as f:
86
+ cookies_data = json.load(f)
87
+ return '; '.join([f"{cookie['name']}={cookie['value']}" for cookie in cookies_data])
88
+ except FileNotFoundError:
89
+ print("Error: cookies.json file not found!")
90
+ return None
91
+ except json.JSONDecodeError:
92
+ print("Error: Invalid JSON format in cookies.json!")
93
+ return None
94
+
95
+ def ask(
96
+ self,
97
+ prompt: str,
98
+ stream: bool = False,
99
+ raw: bool = False,
100
+ optimizer: str = None,
101
+ conversationally: bool = False,
102
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
103
+ """Chat with AI
104
+ Args:
105
+ prompt (str): Prompt to be send.
106
+ stream (bool, optional): Flag for streaming response. Defaults to False.
107
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
108
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
109
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
110
+ Returns:
111
+ dict|AsyncGenerator : ai content
112
+ ```json
113
+ {
114
+ "text" : "How may I assist you today?"
115
+ }
116
+ ```
117
+ """
118
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
119
+ if optimizer:
120
+ if optimizer in self.__available_optimizers:
121
+ conversation_prompt = getattr(Optimizers, optimizer)(
122
+ conversation_prompt if conversationally else prompt
123
+ )
124
+ else:
125
+ raise exceptions.FailedToGenerateResponseError(
126
+ f"Optimizer is not one of {self.__available_optimizers}"
127
+ )
128
+
129
+ payload = {
130
+ "messages": [{"role": "system", "content": self.system_prompt}, {"role": "user", "content": conversation_prompt}],
131
+ "model": self.model,
132
+ "max_tokens": self.max_tokens_to_sample,
133
+ "temperature": self.temperature,
134
+ "top_p": self.top_p,
135
+ }
136
+ def for_stream():
137
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
138
+ if not response.ok:
139
+ raise exceptions.FailedToGenerateResponseError(
140
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
141
+ )
142
+ full_response = ""
143
+ for line in response.iter_lines():
144
+ if line:
145
+ decoded_line = line.decode('utf-8').replace('data: ', '')
146
+ try:
147
+ data = json.loads(decoded_line)
148
+ if data['type'] == 'llm_response':
149
+ content = data['content']
150
+ full_response += content
151
+ yield content if raw else dict(text=content)
152
+ except json.JSONDecodeError:
153
+ continue
154
+ self.last_response.update(dict(text=full_response))
155
+ self.conversation.update_chat_history(
156
+ prompt, self.get_message(self.last_response)
157
+ )
158
+
159
+ def for_non_stream():
160
+ for _ in for_stream():
161
+ pass
162
+ return self.last_response
163
+
164
+ return for_stream() if stream else for_non_stream()
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> str | Generator[str, None, None]:
173
+ """Generate response `str`
174
+ Args:
175
+ prompt (str): Prompt to be send.
176
+ stream (bool, optional): Flag for streaming response. Defaults to False.
177
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
+ Returns:
180
+ str: Response generated
181
+ """
182
+
183
+ def for_stream():
184
+ for response in self.ask(
185
+ prompt, True, optimizer=optimizer, conversationally=conversationally
186
+ ):
187
+ yield self.get_message(response)
188
+
189
+ def for_non_stream():
190
+ return self.get_message(
191
+ self.ask(
192
+ prompt,
193
+ False,
194
+ optimizer=optimizer,
195
+ conversationally=conversationally,
196
+ )
197
+ )
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def get_message(self, response: dict) -> str:
202
+ """Retrieves message only from response
203
+
204
+ Args:
205
+ response (dict): Response generated by `self.ask`
206
+
207
+ Returns:
208
+ str: Message extracted
209
+ """
210
+ assert isinstance(response, dict), "Response should be of dict data-type only"
211
+ return response["text"]
212
+
213
+
214
+ if __name__ == "__main__":
215
+ from rich import print
216
+ ai = NousHermes(cookies_path="cookies.json")
217
+ response = ai.chat(input(">>> "), stream=True)
218
+ for chunk in response:
219
+ print(chunk, end="", flush=True)
@@ -1,3 +1,4 @@
1
+
1
2
  import requests
2
3
  import json
3
4
 
@@ -7,9 +8,11 @@ from webscout.AIutel import AwesomePrompts
7
8
  from webscout.AIbase import Provider
8
9
  from webscout import exceptions
9
10
  from webscout import LitAgent as Lit
11
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
12
+
10
13
  class LlamaTutor(Provider):
11
14
  """
12
- A class to interact with the LlamaTutor API (Together.ai).
15
+ A class to interact with the LlamaTutor API (Together.ai) with comprehensive logging.
13
16
  """
14
17
 
15
18
  def __init__(
@@ -24,23 +27,20 @@ class LlamaTutor(Provider):
24
27
  history_offset: int = 10250,
25
28
  act: str = None,
26
29
  system_prompt: str = "You are a helpful AI assistant.",
30
+ logging: bool = False
27
31
  ):
28
32
  """
29
- Initializes the LlamaTutor API with given parameters.
30
-
31
- Args:
32
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
33
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
34
- timeout (int, optional): Http request timeout. Defaults to 30.
35
- intro (str, optional): Conversation introductory prompt. Defaults to None.
36
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
37
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
38
- proxies (dict, optional): Http request proxies. Defaults to {}.
39
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
40
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
41
- system_prompt (str, optional): System prompt for LlamaTutor.
42
- Defaults to "You are a helpful AI assistant.".
33
+ Initializes the LlamaTutor API with given parameters and logging capabilities.
43
34
  """
35
+ self.logger = LitLogger(
36
+ name="LlamaTutor",
37
+ format=LogFormat.MODERN_EMOJI,
38
+ color_scheme=ColorScheme.CYBERPUNK
39
+ ) if logging else None
40
+
41
+ if self.logger:
42
+ self.logger.info("Initializing LlamaTutor API")
43
+
44
44
  self.session = requests.Session()
45
45
  self.is_conversation = is_conversation
46
46
  self.max_tokens_to_sample = max_tokens
@@ -49,6 +49,7 @@ class LlamaTutor(Provider):
49
49
  self.timeout = timeout
50
50
  self.last_response = {}
51
51
  self.system_prompt = system_prompt
52
+
52
53
  self.headers = {
53
54
  "Content-Type": "application/json",
54
55
  "Accept": "*/*",
@@ -71,7 +72,12 @@ class LlamaTutor(Provider):
71
72
  for method in dir(Optimizers)
72
73
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
74
  )
75
+
74
76
  self.session.headers.update(self.headers)
77
+
78
+ if self.logger:
79
+ self.logger.debug("Headers configured and session updated")
80
+
75
81
  Conversation.intro = (
76
82
  AwesomePrompts().get_act(
77
83
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -79,12 +85,16 @@ class LlamaTutor(Provider):
79
85
  if act
80
86
  else intro or Conversation.intro
81
87
  )
88
+
82
89
  self.conversation = Conversation(
83
90
  is_conversation, self.max_tokens_to_sample, filepath, update_file
84
91
  )
85
92
  self.conversation.history_offset = history_offset
86
93
  self.session.proxies = proxies
87
94
 
95
+ if self.logger:
96
+ self.logger.info("LlamaTutor initialized successfully")
97
+
88
98
  def ask(
89
99
  self,
90
100
  prompt: str,
@@ -93,32 +103,23 @@ class LlamaTutor(Provider):
93
103
  optimizer: str = None,
94
104
  conversationally: bool = False,
95
105
  ) -> dict:
96
- """Chat with LlamaTutor
97
-
98
- Args:
99
- prompt (str): Prompt to be send.
100
- stream (bool, optional): Flag for streaming response. Defaults to False.
101
- raw (bool, optional): Stream back raw response as received. Defaults to False.
102
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
103
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
- Returns:
105
- dict : {}
106
- ```json
107
- {
108
- "text" : "How may I assist you today?"
109
- }
110
- ```
111
- """
106
+ """Chat with LlamaTutor with logging capabilities"""
107
+ if self.logger:
108
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
109
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
110
+
112
111
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
113
112
  if optimizer:
114
113
  if optimizer in self.__available_optimizers:
115
114
  conversation_prompt = getattr(Optimizers, optimizer)(
116
115
  conversation_prompt if conversationally else prompt
117
116
  )
117
+ if self.logger:
118
+ self.logger.debug(f"Applied optimizer: {optimizer}")
118
119
  else:
119
- raise Exception(
120
- f"Optimizer is not one of {self.__available_optimizers}"
121
- )
120
+ if self.logger:
121
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
122
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
122
123
 
123
124
  payload = {
124
125
  "messages": [
@@ -135,19 +136,35 @@ class LlamaTutor(Provider):
135
136
 
136
137
  def for_stream():
137
138
  try:
138
- response = requests.post(self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout)
139
+ if self.logger:
140
+ self.logger.debug("Initiating streaming request to API")
141
+
142
+ response = requests.post(
143
+ self.api_endpoint,
144
+ headers=self.headers,
145
+ data=json.dumps(payload),
146
+ stream=True,
147
+ timeout=self.timeout
148
+ )
139
149
  response.raise_for_status()
140
150
 
141
- # Stream and process the response line by line
151
+ if self.logger:
152
+ self.logger.info(f"API connection established successfully. Status: {response.status_code}")
153
+
142
154
  full_response = ''
143
155
  for line in response.iter_lines(decode_unicode=True):
144
156
  if line:
145
- decoded_line = line.decode('utf-8')
146
- if decoded_line.startswith("data: "):
147
- json_data = json.loads(decoded_line[6:])
148
- if "text" in json_data:
149
- full_response += json_data["text"]
150
- yield json_data["text"] if raw else dict(text=json_data["text"])
157
+ try:
158
+ decoded_line = line.decode('utf-8')
159
+ if decoded_line.startswith("data: "):
160
+ json_data = json.loads(decoded_line[6:])
161
+ if "text" in json_data:
162
+ full_response += json_data["text"]
163
+ yield json_data["text"] if raw else dict(text=json_data["text"])
164
+ except json.JSONDecodeError as e:
165
+ if self.logger:
166
+ self.logger.warning(f"Failed to parse response line: {e}")
167
+ continue
151
168
 
152
169
  self.last_response.update(dict(text=full_response))
153
170
  self.conversation.update_chat_history(
@@ -155,11 +172,17 @@ class LlamaTutor(Provider):
155
172
  )
156
173
 
157
174
  except requests.exceptions.HTTPError as http_err:
175
+ if self.logger:
176
+ self.logger.error(f"HTTP error occurred: {http_err}")
158
177
  raise exceptions.FailedToGenerateResponseError(f"HTTP error occurred: {http_err}")
159
178
  except requests.exceptions.RequestException as err:
179
+ if self.logger:
180
+ self.logger.error(f"Request error occurred: {err}")
160
181
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {err}")
161
182
 
162
183
  def for_non_stream():
184
+ if self.logger:
185
+ self.logger.debug("Processing non-streaming request")
163
186
  for _ in for_stream():
164
187
  pass
165
188
  return self.last_response
@@ -173,15 +196,9 @@ class LlamaTutor(Provider):
173
196
  optimizer: str = None,
174
197
  conversationally: bool = False,
175
198
  ) -> str:
176
- """Generate response `str`
177
- Args:
178
- prompt (str): Prompt to be send.
179
- stream (bool, optional): Flag for streaming response. Defaults to False.
180
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
181
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
182
- Returns:
183
- str: Response generated
184
- """
199
+ """Generate response with logging capabilities"""
200
+ if self.logger:
201
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
185
202
 
186
203
  def for_stream():
187
204
  for response in self.ask(
@@ -202,21 +219,14 @@ class LlamaTutor(Provider):
202
219
  return for_stream() if stream else for_non_stream()
203
220
 
204
221
  def get_message(self, response: dict) -> str:
205
- """Retrieves message only from response
206
-
207
- Args:
208
- response (dict): Response generated by `self.ask`
209
-
210
- Returns:
211
- str: Message extracted
212
- """
222
+ """Retrieves message from response with validation"""
213
223
  assert isinstance(response, dict), "Response should be of dict data-type only"
214
224
  return response["text"]
215
225
 
216
226
  if __name__ == "__main__":
217
227
  from rich import print
218
-
219
- ai = LlamaTutor()
220
- response = ai.chat("write a poem about AI", stream=True)
228
+ # Enable logging for testing
229
+ ai = LlamaTutor(logging=True)
230
+ response = ai.chat("Write a poem about AI", stream=True)
221
231
  for chunk in response:
222
232
  print(chunk, end="", flush=True)
@@ -1,3 +1,4 @@
1
+
1
2
  import requests
2
3
  import json
3
4
  from typing import Any, Dict, Optional, Generator, List
@@ -7,10 +8,12 @@ from webscout.AIutel import Conversation
7
8
  from webscout.AIutel import AwesomePrompts
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
11
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
10
12
  from webscout import LitAgent as Lit
13
+
11
14
  class LLMChat(Provider):
12
15
  """
13
- A class to interact with the LLMChat API.
16
+ A class to interact with the LLMChat API with comprehensive logging.
14
17
  """
15
18
 
16
19
  AVAILABLE_MODELS = [
@@ -19,6 +22,7 @@ class LLMChat(Provider):
19
22
  "@cf/meta/llama-3.2-3b-instruct",
20
23
  "@cf/meta/llama-3.2-1b-instruct"
21
24
  "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
25
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b"
22
26
  ]
23
27
 
24
28
  def __init__(
@@ -32,13 +36,25 @@ class LLMChat(Provider):
32
36
  proxies: dict = {},
33
37
  history_offset: int = 10250,
34
38
  act: str = None,
35
- model: str = "@cf/meta/llama-3.1-70b-instruct", # Default model
39
+ model: str = "@cf/meta/llama-3.1-70b-instruct",
36
40
  system_prompt: str = "You are a helpful assistant.",
41
+ logging: bool = False
37
42
  ):
38
43
  """
39
- Initializes the LLMChat API with given parameters.
44
+ Initializes the LLMChat API with given parameters and logging capabilities.
40
45
  """
46
+ self.logger = LitLogger(
47
+ name="LLMChat",
48
+ format=LogFormat.MODERN_EMOJI,
49
+ color_scheme=ColorScheme.CYBERPUNK
50
+ ) if logging else None
51
+
52
+ if self.logger:
53
+ self.logger.info(f"Initializing LLMChat with model: {model}")
54
+
41
55
  if model not in self.AVAILABLE_MODELS:
56
+ if self.logger:
57
+ self.logger.error(f"Invalid model selected: {model}")
42
58
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
59
 
44
60
  self.session = requests.Session()
@@ -49,6 +65,7 @@ class LLMChat(Provider):
49
65
  self.last_response = {}
50
66
  self.model = model
51
67
  self.system_prompt = system_prompt
68
+
52
69
  self.headers = {
53
70
  "Content-Type": "application/json",
54
71
  "Accept": "*/*",
@@ -56,11 +73,13 @@ class LLMChat(Provider):
56
73
  "Origin": "https://llmchat.in",
57
74
  "Referer": "https://llmchat.in/"
58
75
  }
76
+
59
77
  self.__available_optimizers = (
60
78
  method
61
79
  for method in dir(Optimizers)
62
80
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
63
81
  )
82
+
64
83
  Conversation.intro = (
65
84
  AwesomePrompts().get_act(
66
85
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -68,12 +87,16 @@ class LLMChat(Provider):
68
87
  if act
69
88
  else intro or Conversation.intro
70
89
  )
90
+
71
91
  self.conversation = Conversation(
72
92
  is_conversation, self.max_tokens_to_sample, filepath, update_file
73
93
  )
74
94
  self.conversation.history_offset = history_offset
75
95
  self.session.proxies = proxies
76
96
 
97
+ if self.logger:
98
+ self.logger.info("LLMChat initialized successfully")
99
+
77
100
  def ask(
78
101
  self,
79
102
  prompt: str,
@@ -82,24 +105,22 @@ class LLMChat(Provider):
82
105
  optimizer: str = None,
83
106
  conversationally: bool = False,
84
107
  ) -> Dict[str, Any]:
85
- """Chat with LLMChat
86
-
87
- Args:
88
- prompt (str): Prompt to be sent.
89
- stream (bool, optional): Flag for streaming response. Defaults to False.
90
- raw (bool, optional): Stream back raw response as received. Defaults to False.
91
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
92
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
93
- Returns:
94
- dict: Response dictionary.
95
- """
108
+ """Chat with LLMChat with logging capabilities"""
109
+ if self.logger:
110
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
111
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
112
+
96
113
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
97
114
  if optimizer:
98
115
  if optimizer in self.__available_optimizers:
99
116
  conversation_prompt = getattr(Optimizers, optimizer)(
100
117
  conversation_prompt if conversationally else prompt
101
118
  )
119
+ if self.logger:
120
+ self.logger.debug(f"Applied optimizer: {optimizer}")
102
121
  else:
122
+ if self.logger:
123
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
103
124
  raise exceptions.FailedToGenerateResponseError(
104
125
  f"Optimizer is not one of {self.__available_optimizers}"
105
126
  )
@@ -116,8 +137,15 @@ class LLMChat(Provider):
116
137
 
117
138
  def for_stream():
118
139
  try:
140
+ if self.logger:
141
+ self.logger.debug("Initiating streaming request to API")
142
+
119
143
  with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
120
144
  response.raise_for_status()
145
+
146
+ if self.logger:
147
+ self.logger.info(f"API connection established successfully. Status: {response.status_code}")
148
+
121
149
  full_response = ""
122
150
  for line in response.iter_lines():
123
151
  if line:
@@ -131,19 +159,31 @@ class LLMChat(Provider):
131
159
  yield response_text if raw else dict(text=response_text)
132
160
  except json.JSONDecodeError:
133
161
  if line.strip() != 'data: [DONE]':
134
- print(f"Failed to parse line: {line}")
162
+ if self.logger:
163
+ self.logger.warning(f"Failed to parse line: {line}")
135
164
  continue
165
+
136
166
  self.last_response.update(dict(text=full_response))
137
167
  self.conversation.update_chat_history(
138
168
  prompt, self.get_message(self.last_response)
139
169
  )
170
+
140
171
  except requests.exceptions.RequestException as e:
172
+ if self.logger:
173
+ self.logger.error(f"API request failed: {str(e)}")
141
174
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
142
175
 
143
176
  def for_non_stream():
177
+ if self.logger:
178
+ self.logger.debug("Processing non-streaming request")
179
+
144
180
  full_response = ""
145
181
  for line in for_stream():
146
182
  full_response += line['text'] if not raw else line
183
+
184
+ if self.logger:
185
+ self.logger.debug("Response processing completed")
186
+
147
187
  return dict(text=full_response)
148
188
 
149
189
  return for_stream() if stream else for_non_stream()
@@ -155,15 +195,9 @@ class LLMChat(Provider):
155
195
  optimizer: str = None,
156
196
  conversationally: bool = False,
157
197
  ) -> str | Generator[str, None, None]:
158
- """Generate response `str`
159
- Args:
160
- prompt (str): Prompt to be send.
161
- stream (bool, optional): Flag for streaming response. Defaults to False.
162
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
163
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
164
- Returns:
165
- str: Response generated
166
- """
198
+ """Generate response with logging capabilities"""
199
+ if self.logger:
200
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
167
201
 
168
202
  def for_stream():
169
203
  for response in self.ask(
@@ -184,21 +218,14 @@ class LLMChat(Provider):
184
218
  return for_stream() if stream else for_non_stream()
185
219
 
186
220
  def get_message(self, response: Dict[str, Any]) -> str:
187
- """Retrieves message only from response.
188
-
189
- Args:
190
- response (dict): Response generated by `self.ask`
191
-
192
- Returns:
193
- str: Message extracted.
194
- """
221
+ """Retrieves message from response with validation"""
195
222
  assert isinstance(response, dict), "Response should be of dict data-type only"
196
223
  return response["text"]
197
224
 
198
-
199
225
  if __name__ == "__main__":
200
226
  from rich import print
201
- ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
227
+ # Enable logging for testing
228
+ ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct', logging=True)
202
229
  response = ai.chat("What's the meaning of life?", stream=True)
203
230
  for chunk in response:
204
- print(chunk, end="", flush=True)
231
+ print(chunk, end="", flush=True)