webscout 6.9__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (51) hide show
  1. webscout/AIbase.py +12 -2
  2. webscout/DWEBS.py +38 -22
  3. webscout/Extra/autocoder/autocoder_utiles.py +68 -7
  4. webscout/Extra/autollama.py +0 -16
  5. webscout/Extra/gguf.py +0 -13
  6. webscout/LLM.py +1 -1
  7. webscout/Provider/AISEARCH/DeepFind.py +251 -0
  8. webscout/Provider/AISEARCH/__init__.py +2 -2
  9. webscout/Provider/AISEARCH/felo_search.py +167 -118
  10. webscout/Provider/Blackboxai.py +136 -137
  11. webscout/Provider/Cloudflare.py +92 -78
  12. webscout/Provider/Deepinfra.py +59 -35
  13. webscout/Provider/Glider.py +222 -0
  14. webscout/Provider/Groq.py +26 -18
  15. webscout/Provider/HF_space/__init__.py +0 -0
  16. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  17. webscout/Provider/Jadve.py +108 -77
  18. webscout/Provider/Llama3.py +117 -94
  19. webscout/Provider/Marcus.py +65 -10
  20. webscout/Provider/Netwrck.py +61 -49
  21. webscout/Provider/PI.py +77 -122
  22. webscout/Provider/PizzaGPT.py +129 -82
  23. webscout/Provider/TextPollinationsAI.py +229 -0
  24. webscout/Provider/Youchat.py +28 -22
  25. webscout/Provider/__init__.py +12 -4
  26. webscout/Provider/askmyai.py +2 -2
  27. webscout/Provider/chatglm.py +205 -0
  28. webscout/Provider/dgaf.py +215 -0
  29. webscout/Provider/gaurish.py +106 -66
  30. webscout/Provider/hermes.py +219 -0
  31. webscout/Provider/llamatutor.py +72 -62
  32. webscout/Provider/llmchat.py +62 -35
  33. webscout/Provider/meta.py +6 -6
  34. webscout/Provider/multichat.py +205 -104
  35. webscout/Provider/typegpt.py +26 -23
  36. webscout/Provider/yep.py +3 -3
  37. webscout/litagent/__init__.py +3 -146
  38. webscout/litagent/agent.py +120 -0
  39. webscout/litagent/constants.py +31 -0
  40. webscout/tempid.py +0 -4
  41. webscout/version.py +1 -1
  42. webscout/webscout_search.py +1141 -1140
  43. webscout/webscout_search_async.py +635 -635
  44. {webscout-6.9.dist-info → webscout-7.1.dist-info}/METADATA +37 -33
  45. {webscout-6.9.dist-info → webscout-7.1.dist-info}/RECORD +49 -41
  46. {webscout-6.9.dist-info → webscout-7.1.dist-info}/WHEEL +1 -1
  47. webscout/Provider/AISEARCH/ooai.py +0 -155
  48. webscout/Provider/RUBIKSAI.py +0 -272
  49. {webscout-6.9.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
  50. {webscout-6.9.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
  51. {webscout-6.9.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
webscout/Provider/PI.py CHANGED
@@ -1,3 +1,4 @@
1
+
1
2
  import cloudscraper
2
3
  import json
3
4
  import re
@@ -7,7 +8,7 @@ from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
9
  from webscout.AIutel import AwesomePrompts
9
10
  from webscout.AIbase import Provider
10
- from typing import Dict, Union, Any
11
+ from typing import Dict, Union, Any, Optional
11
12
  from webscout import LitAgent
12
13
  from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
13
14
 
@@ -35,26 +36,6 @@ class PiAI(Provider):
35
36
  ):
36
37
  """
37
38
  Initializes the PiAI provider with specified parameters.
38
-
39
- Args:
40
- is_conversation (bool): Whether to maintain conversation history
41
- max_tokens (int): Maximum number of tokens in response
42
- timeout (int): Request timeout in seconds
43
- intro (str): Custom introduction message
44
- filepath (str): Path to save conversation history
45
- update_file (bool): Whether to update conversation history file
46
- proxies (dict): Proxy configuration
47
- history_offset (int): Conversation history limit
48
- act (str): Custom personality/act for the AI
49
- logging (bool): Enable debug logging
50
-
51
- Examples:
52
- >>> ai = PiAI(logging=True)
53
- >>> ai.ask("What's the weather today?", "Alice")
54
- Sends a prompt to Pi.ai and returns the response.
55
-
56
- >>> ai.chat("Tell me a joke", voice_name="William")
57
- Initiates a chat with Pi.ai using the provided prompt.
58
39
  """
59
40
  self.scraper = cloudscraper.create_scraper()
60
41
  self.url = 'https://pi.ai/api/chat'
@@ -115,30 +96,19 @@ class PiAI(Provider):
115
96
  self.conversation.history_offset = history_offset
116
97
  self.session.proxies = proxies
117
98
 
118
- # Initialize logger
119
99
  self.logger = LitLogger(name="PiAI", format=LogFormat.MODERN_EMOJI, color_scheme=ColorScheme.CYBERPUNK) if logging else None
120
100
 
121
101
  self.knowledge_cutoff = "December 2023"
122
102
 
123
- # Initialize conversation ID
124
103
  if self.is_conversation:
125
104
  self.start_conversation()
126
105
 
127
106
  if self.logger:
128
- self.logger.debug("PiAI instance initialized")
107
+ self.logger.info("PiAI instance initialized successfully")
129
108
 
130
109
  def start_conversation(self) -> str:
131
110
  """
132
111
  Initializes a new conversation and returns the conversation ID.
133
-
134
- Returns:
135
- str: The conversation ID from Pi.ai
136
-
137
- Examples:
138
- >>> ai = PiAI()
139
- >>> conversation_id = ai.start_conversation()
140
- >>> print(conversation_id)
141
- 'abc123xyz'
142
112
  """
143
113
  if self.logger:
144
114
  self.logger.debug("Starting new conversation")
@@ -151,52 +121,34 @@ class PiAI(Provider):
151
121
  timeout=self.timeout
152
122
  )
153
123
 
154
- if not response.ok and self.logger:
155
- self.logger.error(f"Failed to start conversation: {response.status_code}")
124
+ if not response.ok:
125
+ if self.logger:
126
+ self.logger.error(f"Failed to start conversation. Status code: {response.status_code}")
127
+ raise Exception(f"Failed to start conversation: {response.status_code}")
156
128
 
157
129
  data = response.json()
158
130
  self.conversation_id = data['conversations'][0]['sid']
159
131
 
160
132
  if self.logger:
161
- self.logger.debug(f"Conversation started with ID: {self.conversation_id}")
133
+ self.logger.info(f"Conversation started successfully with ID: {self.conversation_id}")
162
134
 
163
135
  return self.conversation_id
164
136
 
165
137
  def ask(
166
138
  self,
167
139
  prompt: str,
168
- voice_name:str,
140
+ voice_name: Optional[str] = None,
169
141
  stream: bool = False,
170
142
  raw: bool = False,
171
143
  optimizer: str = None,
172
144
  conversationally: bool = False,
173
- verbose:bool = None,
174
- output_file:str = None
145
+ output_file: str = None
175
146
  ) -> dict:
176
147
  """
177
148
  Interact with Pi.ai by sending a prompt and receiving a response.
178
-
179
- Args:
180
- prompt (str): The prompt to be sent to Pi.ai
181
- voice_name (str): The name of the voice to use for audio responses
182
- stream (bool): Flag for streaming response
183
- raw (bool): If True, returns the raw response as received
184
- optimizer (str): Name of the prompt optimizer to use
185
- conversationally (bool): If True, chat conversationally when using optimizer
186
- verbose (bool): If True, provides detailed output
187
- output_file (str): File path to save the output
188
-
189
- Returns:
190
- dict: A dictionary containing the AI's response
191
-
192
- Examples:
193
- >>> ai = PiAI(logging=True)
194
- >>> response = ai.ask("Hello!", "Alice", verbose=True)
195
- >>> print(response['text'])
196
- 'Hi! How can I help you today?'
197
149
  """
198
150
  if self.logger:
199
- self.logger.debug(f"ask() called with prompt: {prompt}, voice: {voice_name}")
151
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}... Voice: {voice_name}")
200
152
 
201
153
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
202
154
  if optimizer:
@@ -206,7 +158,7 @@ class PiAI(Provider):
206
158
  )
207
159
  else:
208
160
  if self.logger:
209
- self.logger.error(f"Invalid optimizer: {optimizer}")
161
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
210
162
  raise Exception(
211
163
  f"Optimizer is not one of {self.__available_optimizers}"
212
164
  )
@@ -217,25 +169,44 @@ class PiAI(Provider):
217
169
  }
218
170
 
219
171
  def for_stream():
220
- response = self.scraper.post(self.url, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout)
172
+ response = self.scraper.post(
173
+ self.url,
174
+ headers=self.headers,
175
+ cookies=self.cookies,
176
+ json=data,
177
+ stream=True,
178
+ timeout=self.timeout
179
+ )
180
+
181
+ if not response.ok:
182
+ if self.logger:
183
+ self.logger.error(f"API request failed. Status code: {response.status_code}")
184
+ raise Exception(f"API request failed: {response.status_code}")
185
+
221
186
  output_str = response.content.decode('utf-8')
222
187
  sids = re.findall(r'"sid":"(.*?)"', output_str)
223
188
  second_sid = sids[1] if len(sids) >= 2 else None
224
- #Start the audio download in a separate thread
225
- threading.Thread(target=self.download_audio_threaded, args=(voice_name, second_sid, verbose, output_file)).start()
189
+
190
+ if voice_name and second_sid:
191
+ threading.Thread(
192
+ target=self.download_audio_threaded,
193
+ args=(voice_name, second_sid, output_file)
194
+ ).start()
226
195
 
227
196
  streaming_text = ""
228
197
  for line in response.iter_lines(decode_unicode=True):
229
198
  if line.startswith("data: "):
230
- json_data = line[6:]
231
199
  try:
232
- parsed_data = json.loads(json_data)
200
+ parsed_data = json.loads(line[6:])
233
201
  if 'text' in parsed_data:
234
202
  streaming_text += parsed_data['text']
235
203
  resp = dict(text=streaming_text)
236
204
  self.last_response.update(resp)
237
205
  yield parsed_data if raw else resp
238
- except:continue
206
+ except json.JSONDecodeError:
207
+ if self.logger:
208
+ self.logger.warning("Failed to parse JSON from stream")
209
+ continue
239
210
 
240
211
  self.conversation.update_chat_history(
241
212
  prompt, self.get_message(self.last_response)
@@ -251,44 +222,30 @@ class PiAI(Provider):
251
222
  def chat(
252
223
  self,
253
224
  prompt: str,
254
- voice_name: str = "Alice",
225
+ voice_name: Optional[str] = None,
255
226
  stream: bool = False,
256
227
  optimizer: str = None,
257
228
  conversationally: bool = False,
258
- verbose:bool = True,
259
- output_file:str = "PiAi.mp3"
229
+ output_file: str = "PiAi.mp3"
260
230
  ) -> str:
261
231
  """
262
232
  Generates a response based on the provided prompt.
263
-
264
- Args:
265
- prompt (str): Input prompt for generating response
266
- voice_name (str): Voice to use for audio response
267
- stream (bool): Enable response streaming
268
- optimizer (str): Prompt optimizer to use
269
- conversationally (bool): Enable conversational mode with optimizer
270
- verbose (bool): Enable verbose output
271
- output_file (str): Audio output file path
272
-
273
- Returns:
274
- str: The generated response
275
-
276
- Examples:
277
- >>> ai = PiAI(logging=True)
278
- >>> response = ai.chat("Tell me a joke", voice_name="William")
279
- >>> print(response)
280
- 'Why did the scarecrow win an award? Because he was outstanding in his field!'
281
233
  """
282
234
  if self.logger:
283
- self.logger.debug(f"chat() called with prompt: {prompt}, voice: {voice_name}")
235
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
236
+
237
+ if voice_name and voice_name not in self.AVAILABLE_VOICES:
238
+ if self.logger:
239
+ self.logger.error(f"Invalid voice requested: {voice_name}")
240
+ raise ValueError(f"Voice '{voice_name}' not one of [{', '.join(self.AVAILABLE_VOICES.keys())}]")
284
241
 
285
- assert (
286
- voice_name in self.AVAILABLE_VOICES
287
- ), f"Voice '{voice_name}' not one of [{', '.join(self.AVAILABLE_VOICES.keys())}]"
288
242
  def for_stream():
289
243
  for response in self.ask(
290
- prompt, voice_name, True, optimizer=optimizer, conversationally=conversationally,
291
- verbose=verbose,
244
+ prompt,
245
+ voice_name,
246
+ True,
247
+ optimizer=optimizer,
248
+ conversationally=conversationally,
292
249
  output_file=output_file
293
250
  ):
294
251
  yield self.get_message(response).encode('utf-8').decode('utf-8')
@@ -301,7 +258,6 @@ class PiAI(Provider):
301
258
  False,
302
259
  optimizer=optimizer,
303
260
  conversationally=conversationally,
304
- verbose=verbose,
305
261
  output_file=output_file
306
262
  )
307
263
  ).encode('utf-8').decode('utf-8')
@@ -309,51 +265,50 @@ class PiAI(Provider):
309
265
  return for_stream() if stream else for_non_stream()
310
266
 
311
267
  def get_message(self, response: dict) -> str:
312
- """Retrieves message only from response
313
-
314
- Args:
315
- response (dict): Response generated by `self.ask`
316
-
317
- Returns:
318
- str: Message extracted
319
- """
268
+ """Retrieves message only from response"""
320
269
  assert isinstance(response, dict), "Response should be of dict data-type only"
321
270
  return response["text"]
322
271
 
323
- def download_audio_threaded(self, voice_name: str, second_sid: str, verbose:bool, output_file:str) -> None:
324
- """Downloads audio in a separate thread.
325
-
326
- Args:
327
- voice_name (str): The name of the desired voice.
328
- second_sid (str): The message SID for the audio request.
329
- verbose (bool): Flag to indicate if verbose output is desired.
330
- output_file (str): The file path where the audio will be saved.
331
- """
272
+ def download_audio_threaded(self, voice_name: str, second_sid: str, output_file: str) -> None:
273
+ """Downloads audio in a separate thread."""
332
274
  if self.logger:
333
- self.logger.debug(f"Downloading audio with voice: {voice_name}")
275
+ self.logger.debug(f"Starting audio download - Voice: {voice_name}, SID: {second_sid}")
334
276
 
335
277
  params = {
336
278
  'mode': 'eager',
337
279
  'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
338
280
  'messageSid': second_sid,
339
281
  }
282
+
340
283
  try:
341
- audio_response = self.scraper.get('https://pi.ai/api/chat/voice', params=params, cookies=self.cookies, headers=self.headers, timeout=self.timeout)
342
- if not audio_response.ok and self.logger:
343
- self.logger.error(f"Audio download failed: {audio_response.status_code}")
284
+ audio_response = self.scraper.get(
285
+ 'https://pi.ai/api/chat/voice',
286
+ params=params,
287
+ cookies=self.cookies,
288
+ headers=self.headers,
289
+ timeout=self.timeout
290
+ )
291
+
292
+ if not audio_response.ok:
293
+ if self.logger:
294
+ self.logger.error(f"Audio download failed. Status code: {audio_response.status_code}")
295
+ return
344
296
 
345
- audio_response.raise_for_status() # Raise an exception for bad status codes
297
+ audio_response.raise_for_status()
298
+
346
299
  with open(output_file, "wb") as file:
347
300
  file.write(audio_response.content)
348
- if verbose:print("\nAudio file downloaded successfully.")
301
+
302
+ if self.logger:
303
+ self.logger.info(f"Audio file successfully downloaded to: {output_file}")
304
+
349
305
  except requests.exceptions.RequestException as e:
350
306
  if self.logger:
351
- self.logger.error(f"Audio download failed: {e}")
352
- if verbose:print(f"\nFailed to download audio file. Error: {e}")
307
+ self.logger.error(f"Audio download failed: {str(e)}")
353
308
 
354
309
  if __name__ == '__main__':
355
310
  from rich import print
356
- ai = PiAI()
357
- response = ai.chat(input(">>> "), stream=True, verbose=False)
311
+ ai = PiAI(logging=True)
312
+ response = ai.chat(input(">>> "), stream=True)
358
313
  for chunk in response:
359
- print(chunk, end="", flush=True)
314
+ print(chunk, end="", flush=True)
@@ -1,11 +1,9 @@
1
1
  import requests
2
- from typing import Any, AsyncGenerator, Dict, Optional, Union, Generator
3
2
  import json
4
-
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider, AsyncProvider
3
+ import re
4
+ from typing import Any, Dict, Optional, Union, Generator
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
9
7
  from webscout import exceptions
10
8
  from webscout import LitAgent as Lit
11
9
  from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
@@ -13,9 +11,7 @@ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
13
11
  class PIZZAGPT(Provider):
14
12
  """
15
13
  PIZZAGPT is a provider class for interacting with the PizzaGPT API.
16
-
17
- Attributes:
18
- knowledge_cutoff (str): The knowledge cutoff date for the model
14
+ Supports web search integration and handles responses using regex.
19
15
  """
20
16
 
21
17
  def __init__(
@@ -30,18 +26,9 @@ class PIZZAGPT(Provider):
30
26
  history_offset: int = 10250,
31
27
  act: str = None,
32
28
  logging: bool = False,
29
+ model: str = "gpt-4o-mini"
33
30
  ) -> None:
34
- """
35
- Initializes the PizzaGPT provider with the specified parameters.
36
-
37
- Examples:
38
- >>> ai = PIZZAGPT(logging=True)
39
- >>> ai.ask("What's the weather today?")
40
- Sends a prompt to the PizzaGPT API and returns the response.
41
-
42
- >>> ai.chat("Tell me a joke")
43
- Initiates a chat with the PizzaGPT API using the provided prompt.
44
- """
31
+ """Initialize PizzaGPT with enhanced configuration options."""
45
32
  self.session = requests.Session()
46
33
  self.is_conversation = is_conversation
47
34
  self.max_tokens_to_sample = max_tokens
@@ -49,31 +36,23 @@ class PIZZAGPT(Provider):
49
36
  self.stream_chunk_size = 64
50
37
  self.timeout = timeout
51
38
  self.last_response = {}
39
+ self.model = model
40
+
52
41
  self.headers = {
53
42
  "accept": "application/json",
54
- "accept-encoding": "gzip, deflate, br, zstd",
55
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
56
- "content-length": "17",
43
+ "accept-language": "en-US,en;q=0.9",
57
44
  "content-type": "application/json",
58
- "dnt": "1",
59
45
  "origin": "https://www.pizzagpt.it",
60
- "priority": "u=1, i",
61
46
  "referer": "https://www.pizzagpt.it/en",
62
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
63
- "sec-ch-ua-mobile": "?0",
64
- "sec-ch-ua-platform": '"Windows"',
65
- "sec-fetch-dest": "empty",
66
- "sec-fetch-mode": "cors",
67
- "sec-fetch-site": "same-origin",
68
47
  "user-agent": Lit().random(),
69
48
  "x-secret": "Marinara"
70
49
  }
71
50
 
72
51
  self.__available_optimizers = (
73
- method
74
- for method in dir(Optimizers)
52
+ method for method in dir(Optimizers)
75
53
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
76
54
  )
55
+
77
56
  self.session.headers.update(self.headers)
78
57
  Conversation.intro = (
79
58
  AwesomePrompts().get_act(
@@ -82,15 +61,57 @@ class PIZZAGPT(Provider):
82
61
  if act
83
62
  else intro or Conversation.intro
84
63
  )
64
+
85
65
  self.conversation = Conversation(
86
66
  is_conversation, self.max_tokens_to_sample, filepath, update_file
87
67
  )
88
68
  self.conversation.history_offset = history_offset
89
69
  self.session.proxies = proxies
90
70
 
91
- # Initialize logger
92
- self.logger = LitLogger(name="PIZZAGPT", format=LogFormat.MODERN_EMOJI, color_scheme=ColorScheme.CYBERPUNK) if logging else None
71
+ self.logger = LitLogger(
72
+ name="PIZZAGPT",
73
+ format=LogFormat.MODERN_EMOJI,
74
+ color_scheme=ColorScheme.CYBERPUNK
75
+ ) if logging else None
93
76
 
77
+ if self.logger:
78
+ self.logger.info(f"PIZZAGPT initialized with model: {self.model}")
79
+
80
+ def _extract_content(self, text: str) -> Dict[str, Any]:
81
+ """
82
+ Extract content from response text using regex.
83
+ """
84
+ if self.logger:
85
+ self.logger.debug("Extracting content from response text")
86
+
87
+ try:
88
+ # Look for content pattern
89
+ content_match = re.search(r'"content"\s*:\s*"(.*?)"(?=\s*[,}])', text, re.DOTALL)
90
+ if not content_match:
91
+ if self.logger:
92
+ self.logger.error("Content pattern not found in response")
93
+ raise exceptions.FailedToGenerateResponseError("Content not found in response")
94
+
95
+ content = content_match.group(1)
96
+ # Unescape special characters
97
+ content = content.encode().decode('unicode_escape')
98
+
99
+ # Look for citations if present
100
+ citations = []
101
+ citations_match = re.search(r'"citations"\s*:\s*\[(.*?)\]', text, re.DOTALL)
102
+ if citations_match:
103
+ citations_text = citations_match.group(1)
104
+ citations = re.findall(r'"(.*?)"', citations_text)
105
+
106
+ return {
107
+ "content": content,
108
+ "citations": citations
109
+ }
110
+
111
+ except Exception as e:
112
+ if self.logger:
113
+ self.logger.error(f"Failed to extract content: {str(e)}")
114
+ raise exceptions.FailedToGenerateResponseError(f"Failed to extract content: {str(e)}")
94
115
 
95
116
  def ask(
96
117
  self,
@@ -99,15 +120,14 @@ class PIZZAGPT(Provider):
99
120
  raw: bool = False,
100
121
  optimizer: str = None,
101
122
  conversationally: bool = False,
123
+ web_search: bool = False,
102
124
  ) -> Dict[str, Any]:
103
125
  """
104
- Sends a prompt to the PizzaGPT API and returns the response.
105
-
106
- Examples:
107
- >>> ai = PIZZAGPT()
108
- >>> ai.ask("What's the weather today?")
109
- Returns the response from the PizzaGPT API.
126
+ Send a prompt to PizzaGPT API with optional web search capability.
110
127
  """
128
+ if self.logger:
129
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
130
+ self.logger.debug(f"Web search enabled: {web_search}")
111
131
 
112
132
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
113
133
  if optimizer:
@@ -115,38 +135,66 @@ class PIZZAGPT(Provider):
115
135
  conversation_prompt = getattr(Optimizers, optimizer)(
116
136
  conversation_prompt if conversationally else prompt
117
137
  )
138
+ if self.logger:
139
+ self.logger.debug(f"Applied optimizer: {optimizer}")
118
140
  else:
119
141
  if self.logger:
120
142
  self.logger.error(f"Invalid optimizer: {optimizer}")
121
143
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
122
144
 
123
- self.session.headers.update(self.headers)
124
- payload = {"question": conversation_prompt}
145
+ payload = {
146
+ "question": conversation_prompt,
147
+ "model": self.model,
148
+ "searchEnabled": web_search
149
+ }
150
+
151
+ if self.logger:
152
+ self.logger.debug(f"Sending payload: {json.dumps(payload, indent=2)}")
125
153
 
126
154
  try:
127
155
  response = self.session.post(
128
- self.api_endpoint, json=payload, timeout=self.timeout
156
+ self.api_endpoint,
157
+ json=payload,
158
+ timeout=self.timeout
129
159
  )
160
+
130
161
  if self.logger:
131
- self.logger.debug(response)
162
+ self.logger.debug(f"Response status: {response.status_code}")
163
+
132
164
  if not response.ok:
133
165
  if self.logger:
134
- self.logger.error(f"Failed to generate response: {response.status_code} {response.reason}")
166
+ self.logger.error(f"API request failed: {response.status_code} - {response.reason}")
135
167
  raise exceptions.FailedToGenerateResponseError(
136
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
168
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
137
169
  )
138
170
 
139
- resp = response.json()
140
- if self.logger:
141
- self.logger.debug(resp)
142
- self.last_response.update(dict(text=resp['content']))
143
- self.conversation.update_chat_history(
144
- prompt, self.get_message(self.last_response)
145
- )
146
- return self.last_response
171
+ response_text = response.text
172
+ if not response_text:
173
+ if self.logger:
174
+ self.logger.error("Empty response received from API")
175
+ raise exceptions.FailedToGenerateResponseError("Empty response received from API")
147
176
 
148
- except Exception as e:
149
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
177
+ try:
178
+ resp = self._extract_content(response_text)
179
+ if self.logger:
180
+ self.logger.debug("Response parsed successfully")
181
+
182
+ self.last_response.update(dict(text=resp['content']))
183
+ self.conversation.update_chat_history(
184
+ prompt, self.get_message(self.last_response)
185
+ )
186
+ return self.last_response
187
+
188
+ except Exception as e:
189
+ if self.logger:
190
+ self.logger.error(f"Failed to parse response: {str(e)}")
191
+ self.logger.debug(f"Raw response text: {response_text[:500]}")
192
+ raise exceptions.FailedToGenerateResponseError(f"Failed to parse response: {str(e)}")
193
+
194
+ except requests.exceptions.RequestException as e:
195
+ if self.logger:
196
+ self.logger.error(f"Request failed: {str(e)}")
197
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
150
198
 
151
199
  def chat(
152
200
  self,
@@ -154,40 +202,39 @@ class PIZZAGPT(Provider):
154
202
  stream: bool = False,
155
203
  optimizer: str = None,
156
204
  conversationally: bool = False,
205
+ web_search: bool = False,
157
206
  ) -> str:
158
207
  """
159
- Initiates a chat with the PizzaGPT API using the provided prompt.
160
-
161
- Examples:
162
- >>> ai = PIZZAGPT()
163
- >>> ai.chat("Tell me a joke")
164
- Returns the chat response from the PizzaGPT API.
208
+ Chat with PizzaGPT with optional web search capability.
165
209
  """
166
-
167
- return self.get_message(
168
- self.ask(
210
+ if self.logger:
211
+ self.logger.debug(f"Chat request initiated with web_search={web_search}")
212
+
213
+ try:
214
+ response = self.ask(
169
215
  prompt,
170
216
  optimizer=optimizer,
171
217
  conversationally=conversationally,
218
+ web_search=web_search
172
219
  )
173
- )
220
+ return self.get_message(response)
221
+ except Exception as e:
222
+ if self.logger:
223
+ self.logger.error(f"Chat failed: {str(e)}")
224
+ raise
174
225
 
175
226
  def get_message(self, response: dict) -> str:
176
- """Retrieves message only from response
177
-
178
- Args:
179
- response (dict): Response generated by `self.ask`
180
-
181
- Returns:
182
- str: Message extracted
183
- """
227
+ """Extract message from response dictionary."""
184
228
  assert isinstance(response, dict), "Response should be of dict data-type only"
185
- return response["text"]
229
+ return response.get("text", "")
230
+
186
231
  if __name__ == "__main__":
187
232
  from rich import print
188
-
189
- ai = PIZZAGPT(logging=True)
190
- # Stream the response
191
- response = ai.chat("hi")
192
- for chunk in response:
193
- print(chunk, end="", flush=True)
233
+
234
+ # Example usage with web search enabled
235
+ ai = PIZZAGPT(logging=True)
236
+ try:
237
+ response = ai.chat("Who is Founder and CEO of HelpingAI??", web_search=True)
238
+ print(response)
239
+ except Exception as e:
240
+ print(f"Error: {str(e)}")