webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/PI.py CHANGED
@@ -1,15 +1,16 @@
1
1
  from uuid import uuid4
2
- import cloudscraper
2
+ from curl_cffi.requests import Session
3
+ from curl_cffi import CurlError
3
4
  import json
4
5
  import re
5
6
  import threading
6
- import requests
7
7
  from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
8
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
9
9
  from webscout.AIutel import AwesomePrompts
10
10
  from webscout.AIbase import Provider
11
11
  from typing import Dict, Union, Any, Optional
12
12
  from webscout.litagent import LitAgent
13
+ from webscout import exceptions
13
14
 
14
15
  class PiAI(Provider):
15
16
  """
@@ -35,7 +36,7 @@ class PiAI(Provider):
35
36
  def __init__(
36
37
  self,
37
38
  is_conversation: bool = True,
38
- max_tokens: int = 2048,
39
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
39
40
  timeout: int = 30,
40
41
  intro: str = None,
41
42
  filepath: str = None,
@@ -46,7 +47,7 @@ class PiAI(Provider):
46
47
  voice: bool = False,
47
48
  voice_name: str = "voice3",
48
49
  output_file: str = "PiAI.mp3",
49
- model: str = "inflection_3_pi",
50
+ model: str = "inflection_3_pi", # Note: model is not used by this API
50
51
  ):
51
52
  """
52
53
  Initializes PiAI with voice support.
@@ -64,8 +65,8 @@ class PiAI(Provider):
64
65
  if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
65
66
  raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
66
67
 
67
- # Initialize other attributes
68
- self.scraper = cloudscraper.create_scraper()
68
+ # Initialize curl_cffi Session instead of cloudscraper/requests
69
+ self.session = Session()
69
70
  self.primary_url = 'https://pi.ai/api/chat'
70
71
  self.fallback_url = 'https://pi.ai/api/v2/chat'
71
72
  self.url = self.primary_url
@@ -77,9 +78,6 @@ class PiAI(Provider):
77
78
  'DNT': '1',
78
79
  'Origin': 'https://pi.ai',
79
80
  'Referer': 'https://pi.ai/talk',
80
- 'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
81
- 'Sec-CH-UA-Mobile': '?0',
82
- 'Sec-CH-UA-Platform': '"Windows"',
83
81
  'Sec-Fetch-Dest': 'empty',
84
82
  'Sec-Fetch-Mode': 'cors',
85
83
  'Sec-Fetch-Site': 'same-origin',
@@ -90,9 +88,12 @@ class PiAI(Provider):
90
88
  '__cf_bm': uuid4().hex
91
89
  }
92
90
 
93
- self.session = requests.Session()
91
+ # Update curl_cffi session headers, proxies, and cookies
94
92
  self.session.headers.update(self.headers)
95
- self.session.proxies = proxies
93
+ self.session.proxies = proxies # Assign proxies directly
94
+ # Set cookies on the session object for curl_cffi
95
+ for name, value in self.cookies.items():
96
+ self.session.cookies.set(name, value)
96
97
 
97
98
  self.is_conversation = is_conversation
98
99
  self.max_tokens_to_sample = max_tokens
@@ -121,25 +122,44 @@ class PiAI(Provider):
121
122
  if self.is_conversation:
122
123
  self.start_conversation()
123
124
 
125
+ @staticmethod
126
+ def _pi_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
127
+ """Extracts text content from PiAI stream JSON objects."""
128
+ if isinstance(chunk, dict) and 'text' in chunk and chunk['text'] is not None:
129
+ return chunk.get("text")
130
+ return None
131
+
124
132
  def start_conversation(self) -> str:
125
133
  """
126
134
  Initializes a new conversation and returns the conversation ID.
127
135
  """
128
- response = self.scraper.post(
129
- "https://pi.ai/api/chat/start",
130
- headers=self.headers,
131
- cookies=self.cookies,
132
- json={},
133
- timeout=self.timeout
134
- )
135
-
136
- if not response.ok:
137
- raise Exception(f"Failed to start conversation: {response.status_code}")
136
+ try:
137
+ # Use curl_cffi session post with impersonate
138
+ # Cookies are handled by the session
139
+ response = self.session.post(
140
+ "https://pi.ai/api/chat/start",
141
+ # headers are set on the session
142
+ # cookies=self.cookies, # Handled by session
143
+ json={},
144
+ timeout=self.timeout,
145
+ # proxies are set on the session
146
+ impersonate="chrome110" # Use a common impersonation profile
147
+ )
148
+ response.raise_for_status() # Check for HTTP errors
138
149
 
139
- data = response.json()
140
- self.conversation_id = data['conversations'][0]['sid']
150
+ data = response.json()
151
+ # Ensure the expected structure before accessing
152
+ if 'conversations' in data and data['conversations'] and 'sid' in data['conversations'][0]:
153
+ self.conversation_id = data['conversations'][0]['sid']
154
+ return self.conversation_id
155
+ else:
156
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected response structure from start API: {data}")
141
157
 
142
- return self.conversation_id
158
+ except CurlError as e: # Catch CurlError
159
+ raise exceptions.FailedToGenerateResponseError(f"Failed to start conversation (CurlError): {e}") from e
160
+ except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
161
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to start conversation ({type(e).__name__}): {e} - {err_text}") from e
143
163
 
144
164
  def ask(
145
165
  self,
@@ -188,65 +208,103 @@ class PiAI(Provider):
188
208
  }
189
209
 
190
210
  def process_stream():
191
- # Try primary URL first
192
- response = self.scraper.post(
193
- self.url,
194
- headers=self.headers,
195
- cookies=self.cookies,
196
- json=data,
197
- stream=True,
198
- timeout=self.timeout
199
- )
200
-
201
- # If primary URL fails, try fallback URL
202
- if not response.ok and self.url == self.primary_url:
203
- self.url = self.fallback_url
204
- response = self.scraper.post(
205
- self.url,
206
- headers=self.headers,
207
- cookies=self.cookies,
211
+ try: # Add outer try block for error handling
212
+ # Try primary URL first
213
+ current_url = self.url
214
+ response = self.session.post(
215
+ current_url,
216
+ # headers are set on the session
217
+ # cookies are handled by the session
208
218
  json=data,
209
219
  stream=True,
210
- timeout=self.timeout
220
+ timeout=self.timeout,
221
+ # proxies are set on the session
222
+ impersonate="chrome110" # Use a common impersonation profile
211
223
  )
212
224
 
213
- if not response.ok:
214
- raise Exception(f"API request failed: {response.status_code}")
215
-
216
- output_str = response.content.decode('utf-8')
217
- sids = re.findall(r'"sid":"(.*?)"', output_str)
218
- second_sid = sids[1] if len(sids) >= 2 else None
219
-
220
- if voice and voice_name and second_sid:
221
- threading.Thread(
222
- target=self.download_audio_threaded,
223
- args=(voice_name, second_sid, output_file)
224
- ).start()
225
-
226
- streaming_text = ""
227
- for line in response.iter_lines(decode_unicode=True):
228
- if line.startswith("data: "):
229
- try:
230
- parsed_data = json.loads(line[6:])
231
- if 'text' in parsed_data:
232
- streaming_text += parsed_data['text']
233
- resp = dict(text=streaming_text)
234
- self.last_response.update(resp)
235
- yield parsed_data if raw else resp
236
- except json.JSONDecodeError:
237
- continue
238
-
239
- self.conversation.update_chat_history(
240
- prompt, self.get_message(self.last_response)
241
- )
225
+ # If primary URL fails, try fallback URL
226
+ if not response.ok and current_url == self.primary_url:
227
+ current_url = self.fallback_url
228
+ response = self.session.post(
229
+ current_url,
230
+ # headers are set on the session
231
+ # cookies are handled by the session
232
+ json=data,
233
+ stream=True,
234
+ timeout=self.timeout,
235
+ # proxies are set on the session
236
+ impersonate="chrome110" # Use a common impersonation profile
237
+ )
238
+
239
+ response.raise_for_status() # Check for HTTP errors after potential fallback
240
+
241
+ # --- Process response content ---
242
+ # Note: curl_cffi's response.content might behave differently for streams.
243
+ # It's often better to iterate directly.
244
+ # output_str = response.content.decode('utf-8') # Avoid reading full content at once for streams
245
+
246
+ sids = []
247
+ streaming_text = ""
248
+ full_raw_data_for_sids = "" # Accumulate raw data to find SIDs later
249
+
250
+ # Iterate over bytes and decode manually
251
+ for line_bytes in response.iter_lines():
252
+ if line_bytes:
253
+ line = line_bytes.decode('utf-8')
254
+ full_raw_data_for_sids += line + "\n" # Accumulate for SID extraction
255
+
256
+ if line.startswith("data: "):
257
+ json_line_str = line[6:] # Get the JSON part as string
258
+ try:
259
+ # Process this single JSON line string with sanitize_stream
260
+ processed_gen = sanitize_stream(
261
+ data=json_line_str,
262
+ to_json=True,
263
+ content_extractor=self._pi_extractor
264
+ )
265
+ chunk_text = next(processed_gen, None) # Get the single extracted text item
266
+ if chunk_text and isinstance(chunk_text, str):
267
+ streaming_text += chunk_text
268
+ yield {"text": streaming_text} # Always yield dict with aggregated text
269
+ except (StopIteration, json.JSONDecodeError, UnicodeDecodeError):
270
+ continue # Skip if sanitize_stream fails or yields nothing
271
+ # Extract SIDs after processing the stream
272
+ sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
273
+ second_sid = sids[1] if len(sids) >= 2 else None
274
+
275
+ if voice and voice_name and second_sid:
276
+ threading.Thread(
277
+ target=self.download_audio_threaded,
278
+ args=(voice_name, second_sid, output_file)
279
+ ).start()
280
+
281
+ # Update history and last response after stream finishes
282
+ self.last_response = dict(text=streaming_text)
283
+ self.conversation.update_chat_history(
284
+ prompt, streaming_text
285
+ )
286
+
287
+ except CurlError as e: # Catch CurlError
288
+ raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
289
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
290
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
291
+ raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
292
+
242
293
 
243
294
  if stream:
244
295
  return process_stream()
245
296
  else:
246
297
  # For non-stream, collect all responses and return the final one
298
+ final_text = ""
299
+ # process_stream always yields dicts now
247
300
  for res in process_stream():
248
- pass
249
- return self.last_response
301
+ if isinstance(res, dict) and "text" in res:
302
+ final_text = res["text"] # Keep updating with the latest aggregated text
303
+
304
+ # last_response and history are updated within process_stream
305
+ # Return the final aggregated response dict or raw text
306
+ return final_text if raw else self.last_response
307
+
250
308
 
251
309
  def chat(
252
310
  self,
@@ -280,28 +338,35 @@ class PiAI(Provider):
280
338
 
281
339
  if stream:
282
340
  def stream_generator():
283
- for response in self.ask(
341
+ # ask() yields dicts or raw JSON objects when streaming
342
+ gen = self.ask(
284
343
  prompt,
285
344
  stream=True,
345
+ raw=False, # Ensure ask yields dicts for get_message
286
346
  optimizer=optimizer,
287
347
  conversationally=conversationally,
288
348
  voice=voice,
289
349
  voice_name=voice_name,
290
350
  output_file=output_file
291
- ):
292
- yield self.get_message(response).encode('utf-8').decode('utf-8')
351
+ )
352
+ for response_dict in gen:
353
+ # get_message expects dict
354
+ yield self.get_message(response_dict)
293
355
  return stream_generator()
294
356
  else:
295
- response = self.ask(
357
+ # ask() returns dict or raw text when not streaming
358
+ response_data = self.ask(
296
359
  prompt,
297
360
  stream=False,
361
+ raw=False, # Ensure ask returns dict for get_message
298
362
  optimizer=optimizer,
299
363
  conversationally=conversationally,
300
364
  voice=voice,
301
365
  voice_name=voice_name,
302
366
  output_file=output_file
303
367
  )
304
- return self.get_message(response)
368
+ # get_message expects dict
369
+ return self.get_message(response_data)
305
370
 
306
371
  def get_message(self, response: dict) -> str:
307
372
  """Retrieves message only from response"""
@@ -317,28 +382,48 @@ class PiAI(Provider):
317
382
  }
318
383
 
319
384
  try:
320
- audio_response = self.scraper.get(
385
+ # Use curl_cffi session get with impersonate
386
+ audio_response = self.session.get(
321
387
  'https://pi.ai/api/chat/voice',
322
388
  params=params,
323
- cookies=self.cookies,
324
- headers=self.headers,
325
- timeout=self.timeout
389
+ # cookies are handled by the session
390
+ # headers are set on the session
391
+ timeout=self.timeout,
392
+ # proxies are set on the session
393
+ impersonate="chrome110" # Use a common impersonation profile
326
394
  )
327
-
328
- if not audio_response.ok:
329
- return
330
-
331
- audio_response.raise_for_status()
395
+ audio_response.raise_for_status() # Check for HTTP errors
332
396
 
333
397
  with open(output_file, "wb") as file:
334
398
  file.write(audio_response.content)
335
399
 
336
- except requests.exceptions.RequestException:
400
+ except CurlError: # Catch CurlError
401
+ # Optionally log the error
402
+ pass
403
+ except Exception: # Catch other potential exceptions
404
+ # Optionally log the error
337
405
  pass
338
406
 
339
407
  if __name__ == '__main__':
408
+ # Ensure curl_cffi is installed
340
409
  from rich import print
341
- ai = PiAI()
342
- response = ai.chat(input(">>> "), stream=True)
343
- for chunk in response:
344
- print(chunk, end="", flush=True)
410
+ try: # Add try-except block for testing
411
+ ai = PiAI(timeout=60)
412
+ print("[bold blue]Testing Chat (Stream):[/bold blue]")
413
+ response = ai.chat(input(">>> "), stream=True)
414
+ full_response = ""
415
+ for chunk in response:
416
+ print(chunk, end="", flush=True)
417
+ full_response += chunk
418
+ print("\n[bold green]Stream Test Complete.[/bold green]")
419
+
420
+ # Optional: Test non-stream
421
+ # print("\n[bold blue]Testing Chat (Non-Stream):[/bold blue]")
422
+ # response_non_stream = ai.chat("Hello again", stream=False)
423
+ # print(response_non_stream)
424
+ # print("[bold green]Non-Stream Test Complete.[/bold green]")
425
+
426
+ except exceptions.FailedToGenerateResponseError as e:
427
+ print(f"\n[bold red]API Error:[/bold red] {e}")
428
+ except Exception as e:
429
+ print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import re
4
5
  from typing import Any, Dict, Optional, Union, Generator
@@ -17,7 +18,7 @@ class PIZZAGPT(Provider):
17
18
  def __init__(
18
19
  self,
19
20
  is_conversation: bool = True,
20
- max_tokens: int = 600,
21
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
21
22
  timeout: int = 30,
22
23
  intro: str = None,
23
24
  filepath: str = None,
@@ -31,7 +32,8 @@ class PIZZAGPT(Provider):
31
32
  if model not in self.AVAILABLE_MODELS:
32
33
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
33
34
 
34
- self.session = requests.Session()
35
+ # Initialize curl_cffi Session
36
+ self.session = Session()
35
37
  self.is_conversation = is_conversation
36
38
  self.max_tokens_to_sample = max_tokens
37
39
  self.api_endpoint = "https://www.pizzagpt.it/api/chatx-completion"
@@ -48,8 +50,6 @@ class PIZZAGPT(Provider):
48
50
  "referer": "https://www.pizzagpt.it/en",
49
51
  "user-agent": Lit().random(),
50
52
  "x-secret": "Marinara",
51
- "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
52
- "sec-ch-ua-platform": '"Windows"'
53
53
  }
54
54
 
55
55
  self.__available_optimizers = (
@@ -57,7 +57,10 @@ class PIZZAGPT(Provider):
57
57
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
58
58
  )
59
59
 
60
+ # Update curl_cffi session headers and proxies
60
61
  self.session.headers.update(self.headers)
62
+ self.session.proxies = proxies # Assign proxies directly
63
+
61
64
  Conversation.intro = (
62
65
  AwesomePrompts().get_act(
63
66
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -70,7 +73,6 @@ class PIZZAGPT(Provider):
70
73
  is_conversation, self.max_tokens_to_sample, filepath, update_file
71
74
  )
72
75
  self.conversation.history_offset = history_offset
73
- self.session.proxies = proxies
74
76
 
75
77
  def _extract_content(self, text: str) -> Dict[str, Any]:
76
78
  """
@@ -104,8 +106,8 @@ class PIZZAGPT(Provider):
104
106
  def ask(
105
107
  self,
106
108
  prompt: str,
107
- stream: bool = False,
108
- raw: bool = False,
109
+ stream: bool = False, # Note: API does not support streaming
110
+ raw: bool = False, # Keep raw param for interface consistency
109
111
  optimizer: str = None,
110
112
  conversationally: bool = False,
111
113
  web_search: bool = False,
@@ -116,9 +118,7 @@ class PIZZAGPT(Provider):
116
118
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
117
119
  if optimizer:
118
120
  if optimizer in self.__available_optimizers:
119
- conversation_prompt = getattr(Optimizers, optimizer)(
120
- conversation_prompt if conversationally else prompt
121
- )
121
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
122
122
  else:
123
123
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
124
124
 
@@ -129,16 +129,17 @@ class PIZZAGPT(Provider):
129
129
  }
130
130
 
131
131
  try:
132
+ # Use curl_cffi session post with impersonate
132
133
  response = self.session.post(
133
134
  self.api_endpoint,
135
+ # headers are set on the session
134
136
  json=payload,
135
- timeout=self.timeout
137
+ timeout=self.timeout,
138
+ # proxies are set on the session
139
+ impersonate="chrome110" # Use a common impersonation profile
136
140
  )
137
141
 
138
- if not response.ok:
139
- raise exceptions.FailedToGenerateResponseError(
140
- f"Failed to generate response - ({response.status_code}, {response.reason})"
141
- )
142
+ response.raise_for_status() # Check for HTTP errors
142
143
 
143
144
  response_text = response.text
144
145
  if not response_text:
@@ -147,52 +148,81 @@ class PIZZAGPT(Provider):
147
148
  try:
148
149
  resp = self._extract_content(response_text)
149
150
 
150
- self.last_response.update(dict(text=resp['content']))
151
+ self.last_response = {"text": resp['content']} # Store only text in last_response
151
152
  self.conversation.update_chat_history(
152
153
  prompt, self.get_message(self.last_response)
153
154
  )
154
- return self.last_response
155
+ # Return the full extracted data (content + citations) or raw text
156
+ return response_text if raw else resp
155
157
 
156
158
  except Exception as e:
157
159
  raise exceptions.FailedToGenerateResponseError(f"Failed to parse response: {str(e)}")
158
160
 
159
- except requests.exceptions.RequestException as e:
160
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
161
+ except CurlError as e: # Catch CurlError
162
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
163
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
164
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
165
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
161
166
 
162
167
  def chat(
163
168
  self,
164
169
  prompt: str,
165
- stream: bool = False,
170
+ stream: bool = False, # Keep stream param for interface consistency
166
171
  optimizer: str = None,
167
172
  conversationally: bool = False,
168
173
  web_search: bool = False,
174
+ # Add raw parameter for consistency
175
+ raw: bool = False
169
176
  ) -> str:
170
177
  """
171
178
  Chat with PizzaGPT with optional web search capability.
172
179
  """
173
- try:
174
- response = self.ask(
175
- prompt,
176
- optimizer=optimizer,
177
- conversationally=conversationally,
178
- web_search=web_search
179
- )
180
- return self.get_message(response)
181
- except Exception as e:
182
- raise
180
+ # API doesn't stream, call ask directly
181
+ response_data = self.ask(
182
+ prompt,
183
+ stream=False, # Call ask in non-stream mode
184
+ raw=raw, # Pass raw flag to ask
185
+ optimizer=optimizer,
186
+ conversationally=conversationally,
187
+ web_search=web_search
188
+ )
189
+ # If raw=True, ask returns string, otherwise dict
190
+ return response_data if raw else self.get_message(response_data)
191
+
183
192
 
184
193
  def get_message(self, response: dict) -> str:
185
194
  """Extract message from response dictionary."""
186
- assert isinstance(response, dict), "Response should be of dict data-type only"
187
- return response.get("text", "")
195
+ # Handle case where raw response (string) might be passed mistakenly
196
+ if isinstance(response, str):
197
+ # Attempt to parse if it looks like the expected structure, otherwise return as is
198
+ try:
199
+ extracted = self._extract_content(response)
200
+ return extracted.get("content", "")
201
+ except:
202
+ return response # Return raw string if parsing fails
203
+ elif isinstance(response, dict):
204
+ # If it's already the extracted dict from ask(raw=False)
205
+ if "content" in response:
206
+ return response.get("content", "")
207
+ # If it's the last_response format
208
+ elif "text" in response:
209
+ return response.get("text", "")
210
+ return "" # Default empty string
188
211
 
189
212
  if __name__ == "__main__":
213
+ # Ensure curl_cffi is installed
190
214
  from rich import print
191
215
 
192
216
  # Example usage with web search enabled
193
- ai = PIZZAGPT()
217
+ ai = PIZZAGPT(timeout=60)
194
218
  try:
195
- response = ai.chat("hi")
219
+ print("[bold blue]Testing Chat (Web Search Disabled):[/bold blue]")
220
+ response = ai.chat("hi", web_search=False)
196
221
  print(response)
222
+
223
+ # print("\n[bold blue]Testing Chat (Web Search Enabled):[/bold blue]")
224
+ # response_web = ai.chat("What's the weather in Rome?", web_search=True)
225
+ # print(response_web)
226
+
197
227
  except Exception as e:
198
- print(f"Error: {str(e)}")
228
+ print(f"[bold red]Error:[/bold red] {str(e)}")