webscout 8.2.5__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Provider/AISEARCH/scira_search.py +2 -1
  5. webscout/Provider/GizAI.py +6 -4
  6. webscout/Provider/Nemotron.py +218 -0
  7. webscout/Provider/OPENAI/scirachat.py +2 -1
  8. webscout/Provider/TeachAnything.py +8 -5
  9. webscout/Provider/WiseCat.py +1 -1
  10. webscout/Provider/WrDoChat.py +370 -0
  11. webscout/Provider/__init__.py +4 -6
  12. webscout/Provider/ai4chat.py +5 -3
  13. webscout/Provider/akashgpt.py +59 -66
  14. webscout/Provider/freeaichat.py +57 -43
  15. webscout/Provider/scira_chat.py +2 -1
  16. webscout/Provider/scnet.py +4 -1
  17. webscout/__init__.py +0 -1
  18. webscout/conversation.py +305 -446
  19. webscout/swiftcli/__init__.py +80 -794
  20. webscout/swiftcli/core/__init__.py +7 -0
  21. webscout/swiftcli/core/cli.py +297 -0
  22. webscout/swiftcli/core/context.py +104 -0
  23. webscout/swiftcli/core/group.py +241 -0
  24. webscout/swiftcli/decorators/__init__.py +28 -0
  25. webscout/swiftcli/decorators/command.py +221 -0
  26. webscout/swiftcli/decorators/options.py +220 -0
  27. webscout/swiftcli/decorators/output.py +252 -0
  28. webscout/swiftcli/exceptions.py +21 -0
  29. webscout/swiftcli/plugins/__init__.py +9 -0
  30. webscout/swiftcli/plugins/base.py +135 -0
  31. webscout/swiftcli/plugins/manager.py +262 -0
  32. webscout/swiftcli/utils/__init__.py +59 -0
  33. webscout/swiftcli/utils/formatting.py +252 -0
  34. webscout/swiftcli/utils/parsing.py +267 -0
  35. webscout/version.py +1 -1
  36. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/METADATA +1 -1
  37. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/RECORD +41 -28
  38. webscout/LLM.py +0 -442
  39. webscout/Provider/PizzaGPT.py +0 -228
  40. webscout/Provider/promptrefine.py +0 -193
  41. webscout/Provider/tutorai.py +0 -270
  42. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/WHEEL +0 -0
  43. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/entry_points.txt +0 -0
  44. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
  45. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -0
webscout/LLM.py DELETED
@@ -1,442 +0,0 @@
1
- """
2
- >>> from webscout.LLM import LLM, VLM
3
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
4
- >>> response = llm.chat([{"role": "user", "content": "What's good?"}])
5
- >>> print(response)
6
- 'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
7
-
8
- >>> # For vision tasks
9
- >>> vlm = VLM("cogvlm-grounding-generalist")
10
- >>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
11
- """
12
-
13
- import requests
14
- import base64
15
- import json
16
- from typing import List, Dict, Union, Generator, Optional, Any
17
-
18
- class LLMError(Exception):
19
- """Custom exception for LLM API errors 🚫
20
-
21
- Examples:
22
- >>> try:
23
- ... raise LLMError("API key not found!")
24
- ... except LLMError as e:
25
- ... print(f"Error: {e}")
26
- Error: API key not found!
27
- """
28
- pass
29
-
30
- class LLM:
31
- """A class for chatting with DeepInfra's powerful language models! 🚀
32
-
33
- This class lets you:
34
- - Chat with state-of-the-art language models 💬
35
- - Stream responses in real-time ⚡
36
- - Control temperature and token limits 🎮
37
- - Handle system messages and chat history 📝
38
-
39
- Examples:
40
- >>> from webscout.LLM import LLM
41
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
42
- >>> response = llm.chat([
43
- ... {"role": "user", "content": "Write a short poem!"}
44
- ... ])
45
- >>> print(response)
46
- 'Through starlit skies and morning dew,
47
- Nature's beauty, forever new.
48
- In every moment, magic gleams,
49
- Life's poetry flows like gentle streams.'
50
- """
51
-
52
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
53
- """
54
- Initialize the LLM client.
55
-
56
- Args:
57
- model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
58
- system_message: The system message to use for the conversation
59
-
60
- Examples:
61
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
62
- >>> print(llm.model)
63
- 'meta-llama/Meta-Llama-3-70B-Instruct'
64
- """
65
- self.model = model
66
- self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
67
- self.conversation_history = [{"role": "system", "content": system_message}]
68
- self.headers = {
69
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
70
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
71
- 'Cache-Control': 'no-cache',
72
- 'Connection': 'keep-alive',
73
- 'Content-Type': 'application/json',
74
- 'Origin': 'https://deepinfra.com',
75
- 'Pragma': 'no-cache',
76
- 'Referer': 'https://deepinfra.com/',
77
- 'Sec-Fetch-Dest': 'empty',
78
- 'Sec-Fetch-Mode': 'cors',
79
- 'Sec-Fetch-Site': 'same-site',
80
- 'X-Deepinfra-Source': 'web-embed',
81
- 'accept': 'text/event-stream',
82
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
83
- 'sec-ch-ua-mobile': '?0',
84
- 'sec-ch-ua-platform': '"macOS"'
85
- }
86
-
87
- def _prepare_payload(
88
- self,
89
- messages: List[Dict[str, str]],
90
- stream: bool = False,
91
- temperature: float = 0.7,
92
- max_tokens: int = 8028,
93
- stop: Optional[List[str]] = None,
94
- ) -> Dict[str, Any]:
95
- """Prepare the chat payload with all the right settings! 🎯
96
-
97
- Args:
98
- messages: Your chat messages (role & content)
99
- stream: Want real-time responses? Set True! ⚡
100
- temperature: Creativity level (0-1) 🎨
101
- max_tokens: Max words to generate 📝
102
- stop: Words to stop at (optional) 🛑
103
-
104
- Returns:
105
- Dict with all the API settings ready to go! 🚀
106
-
107
- Examples:
108
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
109
- >>> payload = llm._prepare_payload([
110
- ... {"role": "user", "content": "Hi!"}
111
- ... ])
112
- >>> print(payload['model'])
113
- 'meta-llama/Meta-Llama-3-70B-Instruct'
114
- """
115
- return {
116
- 'model': self.model,
117
- 'messages': messages,
118
- 'temperature': temperature,
119
- 'max_tokens': max_tokens,
120
- 'stop': stop or [],
121
- 'stream': stream
122
- }
123
-
124
- def chat(
125
- self,
126
- messages: List[Dict[str, str]],
127
- stream: bool = False,
128
- temperature: float = 0.7,
129
- max_tokens: int = 8028,
130
- stop: Optional[List[str]] = None,
131
- ) -> Union[str, Generator[str, None, None]]:
132
- """Start chatting with the AI! 💬
133
-
134
- This method is your gateway to:
135
- - Having awesome conversations 🗣️
136
- - Getting creative responses 🎨
137
- - Streaming real-time replies ⚡
138
- - Controlling the output style 🎮
139
-
140
- Args:
141
- messages: Your chat messages (role & content)
142
- stream: Want real-time responses? Set True!
143
- temperature: Creativity level (0-1)
144
- max_tokens: Max words to generate
145
- stop: Words to stop at (optional)
146
-
147
- Returns:
148
- Either a complete response or streaming generator
149
-
150
- Raises:
151
- LLMError: If something goes wrong 🚫
152
-
153
- Examples:
154
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
155
- >>> # Regular chat
156
- >>> response = llm.chat([
157
- ... {"role": "user", "content": "Tell me a joke!"}
158
- ... ])
159
- >>> # Streaming chat
160
- >>> for chunk in llm.chat([
161
- ... {"role": "user", "content": "Tell me a story!"}
162
- ... ], stream=True):
163
- ... print(chunk, end='')
164
- """
165
- payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
166
-
167
- try:
168
- if stream:
169
- return self._stream_response(payload)
170
- else:
171
- return self._send_request(payload)
172
- except Exception as e:
173
- raise LLMError(f"API request failed: {str(e)}")
174
-
175
- def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
176
- """Stream the chat response in real-time! ⚡
177
-
178
- Args:
179
- payload: The prepared chat payload
180
-
181
- Yields:
182
- Streaming chunks of the response
183
-
184
- Raises:
185
- LLMError: If the stream request fails 🚫
186
-
187
- Examples:
188
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
189
- >>> for chunk in llm._stream_response(llm._prepare_payload([
190
- ... {"role": "user", "content": "Tell me a story!"}
191
- ... ])):
192
- ... print(chunk, end='')
193
- """
194
- try:
195
- with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
196
- response.raise_for_status()
197
- for line in response.iter_lines():
198
- if line:
199
- if line.strip() == b'data: [DONE]':
200
- break
201
- if line.startswith(b'data: '):
202
- try:
203
- chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
204
- if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
205
- yield content
206
- except json.JSONDecodeError:
207
- continue
208
- except requests.RequestException as e:
209
- raise LLMError(f"Stream request failed: {str(e)}")
210
-
211
- def _send_request(self, payload: Dict[str, Any]) -> str:
212
- """Send a non-streaming chat request.
213
-
214
- Args:
215
- payload: The prepared chat payload
216
-
217
- Returns:
218
- The complete response
219
-
220
- Raises:
221
- LLMError: If the request fails 🚫
222
-
223
- Examples:
224
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
225
- >>> response = llm._send_request(llm._prepare_payload([
226
- ... {"role": "user", "content": "Tell me a joke!"}
227
- ... ]))
228
- >>> print(response)
229
- """
230
- try:
231
- response = requests.post(self.api_url, json=payload, headers=self.headers)
232
- response.raise_for_status()
233
- result = response.json()
234
- return result['choices'][0]['message']['content']
235
- except requests.RequestException as e:
236
- raise LLMError(f"Request failed: {str(e)}")
237
- except (KeyError, IndexError) as e:
238
- raise LLMError(f"Invalid response format: {str(e)}")
239
- except json.JSONDecodeError as e:
240
- raise LLMError(f"Invalid JSON response: {str(e)}")
241
-
242
-
243
- class VLM:
244
- """Your gateway to vision-language AI magic! 🖼️
245
-
246
- This class lets you:
247
- - Chat about images with AI 🎨
248
- - Get detailed image descriptions 📝
249
- - Answer questions about images 🤔
250
- - Stream responses in real-time ⚡
251
-
252
- Examples:
253
- >>> from webscout.LLM import VLM
254
- >>> vlm = VLM("cogvlm-grounding-generalist")
255
- >>> # Chat about an image
256
- >>> response = vlm.chat([{
257
- ... "role": "user",
258
- ... "content": [
259
- ... {"type": "image", "image_url": "path/to/image.jpg"},
260
- ... {"type": "text", "text": "What's in this image?"}
261
- ... ]
262
- ... }])
263
- >>> print(response)
264
- 'I see a beautiful sunset over mountains...'
265
- """
266
-
267
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
268
- """Get ready for some vision-language magic! 🚀
269
-
270
- Args:
271
- model: Your chosen vision model
272
- system_message: Set the AI's personality
273
-
274
- Examples:
275
- >>> vlm = VLM("cogvlm-grounding-generalist")
276
- >>> print(vlm.model)
277
- 'cogvlm-grounding-generalist'
278
- """
279
- self.model = model
280
- self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
281
- self.conversation_history = [{"role": "system", "content": system_message}]
282
- self.headers = {
283
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
284
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
285
- 'Cache-Control': 'no-cache',
286
- 'Connection': 'keep-alive',
287
- 'Content-Type': 'application/json',
288
- 'Origin': 'https://deepinfra.com',
289
- 'Pragma': 'no-cache',
290
- 'Referer': 'https://deepinfra.com/',
291
- 'Sec-Fetch-Dest': 'empty',
292
- 'Sec-Fetch-Mode': 'cors',
293
- 'Sec-Fetch-Site': 'same-site',
294
- 'X-Deepinfra-Source': 'web-embed',
295
- 'accept': 'text/event-stream',
296
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
297
- 'sec-ch-ua-mobile': '?0',
298
- 'sec-ch-ua-platform': '"macOS"'
299
- }
300
-
301
- def chat(
302
- self,
303
- messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
304
- stream: bool = False,
305
- temperature: float = 0.7,
306
- max_tokens: int = 8028,
307
- ) -> Union[str, Generator[str, None, None]]:
308
- """Chat about images with AI! 🖼️
309
-
310
- This method lets you:
311
- - Ask questions about images 🤔
312
- - Get detailed descriptions 📝
313
- - Stream responses in real-time ⚡
314
- - Control response creativity 🎨
315
-
316
- Args:
317
- messages: Your chat + image data
318
- stream: Want real-time responses?
319
- temperature: Creativity level (0-1)
320
- max_tokens: Max words to generate
321
-
322
- Returns:
323
- Either a complete response or streaming generator
324
-
325
- Raises:
326
- LLMError: If something goes wrong 🚫
327
-
328
- Examples:
329
- >>> vlm = VLM("cogvlm-grounding-generalist")
330
- >>> # Regular chat with image
331
- >>> response = vlm.chat([{
332
- ... "role": "user",
333
- ... "content": [
334
- ... {"type": "image", "image_url": "sunset.jpg"},
335
- ... {"type": "text", "text": "Describe this scene"}
336
- ... ]
337
- ... }])
338
- >>> # Streaming chat
339
- >>> for chunk in vlm.chat([...], stream=True):
340
- ... print(chunk, end='')
341
- """
342
- payload = {
343
- "model": self.model,
344
- "messages": messages,
345
- "stream": stream,
346
- "temperature": temperature,
347
- "max_tokens": max_tokens
348
- }
349
-
350
- try:
351
- if stream:
352
- return self._stream_response(payload)
353
- else:
354
- return self._send_request(payload)
355
- except Exception as e:
356
- raise LLMError(f"VLM API request failed: {str(e)}")
357
-
358
- def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
359
- """Stream the VLM chat response."""
360
- try:
361
- with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
362
- response.raise_for_status()
363
- for line in response.iter_lines():
364
- if line:
365
- if line.strip() == b'data: [DONE]':
366
- break
367
- if line.startswith(b'data: '):
368
- try:
369
- chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
370
- if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
371
- yield content
372
- except json.JSONDecodeError:
373
- continue
374
- except requests.RequestException as e:
375
- raise LLMError(f"VLM stream request failed: {str(e)}")
376
-
377
- def _send_request(self, payload: Dict[str, Any]) -> str:
378
- """Send a non-streaming VLM chat request."""
379
- try:
380
- response = requests.post(self.api_url, json=payload, headers=self.headers)
381
- response.raise_for_status()
382
- result = response.json()
383
- return result['choices'][0]['message']['content']
384
- except requests.RequestException as e:
385
- raise LLMError(f"VLM request failed: {str(e)}")
386
- except (KeyError, IndexError) as e:
387
- raise LLMError(f"Invalid VLM response format: {str(e)}")
388
- except json.JSONDecodeError as e:
389
- raise LLMError(f"Invalid VLM JSON response: {str(e)}")
390
-
391
-
392
- def encode_image_to_base64(image_path: str) -> str:
393
- """Turn your image into base64 magic! 🎨
394
-
395
- Args:
396
- image_path: Where's your image at?
397
-
398
- Returns:
399
- Your image as a base64 string ✨
400
-
401
- Raises:
402
- IOError: If we can't read your image 🚫
403
-
404
- Examples:
405
- >>> from webscout.LLM import encode_image_to_base64
406
- >>> image_data = encode_image_to_base64("cool_pic.jpg")
407
- >>> print(len(image_data)) # Check the encoded length
408
- 12345
409
- """
410
- try:
411
- with open(image_path, "rb") as image_file:
412
- return base64.b64encode(image_file.read()).decode("utf-8")
413
- except IOError as e:
414
- raise LLMError(f"Failed to read image file: {str(e)}")
415
-
416
-
417
- if __name__ == "__main__":
418
- # Example usage
419
- try:
420
- # Initialize LLM with Llama 3 model
421
- llm = LLM(model="mistralai/Mistral-Small-24B-Instruct-2501")
422
-
423
- # Example messages
424
- messages = [
425
- {"role": "user", "content": "Write a short poem about AI."}
426
- ]
427
-
428
- # Example 1: Non-streaming response
429
- print("\nNon-streaming response:")
430
- response = llm.chat(messages, stream=False)
431
- print(response)
432
-
433
- # Example 2: Streaming response
434
- print("\nStreaming response:")
435
- for chunk in llm.chat(messages, stream=True):
436
- print(chunk, end='', flush=True)
437
- print("\n")
438
-
439
- except LLMError as e:
440
- print(f"Error: {str(e)}")
441
- except KeyboardInterrupt:
442
- print("\nOperation cancelled by user")
@@ -1,228 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import re
5
- from typing import Any, Dict, Optional, Union, Generator
6
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- from webscout.litagent import LitAgent as Lit
10
-
11
- class PIZZAGPT(Provider):
12
- """
13
- PIZZAGPT is a provider class for interacting with the PizzaGPT API.
14
- Supports web search integration and handles responses using regex.
15
- """
16
- AVAILABLE_MODELS = ["gpt-4o-mini"]
17
-
18
- def __init__(
19
- self,
20
- is_conversation: bool = True,
21
- max_tokens: int = 600, # Note: max_tokens is not used by this API
22
- timeout: int = 30,
23
- intro: str = None,
24
- filepath: str = None,
25
- update_file: bool = True,
26
- proxies: dict = {},
27
- history_offset: int = 10250,
28
- act: str = None,
29
- model: str = "gpt-4o-mini"
30
- ) -> None:
31
- """Initialize PizzaGPT with enhanced configuration options."""
32
- if model not in self.AVAILABLE_MODELS:
33
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
34
-
35
- # Initialize curl_cffi Session
36
- self.session = Session()
37
- self.is_conversation = is_conversation
38
- self.max_tokens_to_sample = max_tokens
39
- self.api_endpoint = "https://www.pizzagpt.it/api/chatx-completion"
40
- self.stream_chunk_size = 64
41
- self.timeout = timeout
42
- self.last_response = {}
43
- self.model = model
44
-
45
- self.headers = {
46
- "accept": "application/json",
47
- "accept-language": "en-US,en;q=0.9",
48
- "content-type": "application/json",
49
- "origin": "https://www.pizzagpt.it",
50
- "referer": "https://www.pizzagpt.it/en",
51
- "user-agent": Lit().random(),
52
- "x-secret": "Marinara",
53
- }
54
-
55
- self.__available_optimizers = (
56
- method for method in dir(Optimizers)
57
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
58
- )
59
-
60
- # Update curl_cffi session headers and proxies
61
- self.session.headers.update(self.headers)
62
- self.session.proxies = proxies # Assign proxies directly
63
-
64
- Conversation.intro = (
65
- AwesomePrompts().get_act(
66
- act, raise_not_found=True, default=None, case_insensitive=True
67
- )
68
- if act
69
- else intro or Conversation.intro
70
- )
71
-
72
- self.conversation = Conversation(
73
- is_conversation, self.max_tokens_to_sample, filepath, update_file
74
- )
75
- self.conversation.history_offset = history_offset
76
-
77
- def _extract_content(self, text: str) -> Dict[str, Any]:
78
- """
79
- Extract content from response text using regex.
80
- """
81
- try:
82
- # Look for content pattern
83
- content_match = re.search(r'"content"\s*:\s*"(.*?)"(?=\s*[,}])', text, re.DOTALL)
84
- if not content_match:
85
- raise exceptions.FailedToGenerateResponseError("Content not found in response")
86
-
87
- content = content_match.group(1)
88
- # Unescape special characters
89
- content = content.encode().decode('unicode_escape')
90
-
91
- # Look for citations if present
92
- citations = []
93
- citations_match = re.search(r'"citations"\s*:\s*\[(.*?)\]', text, re.DOTALL)
94
- if citations_match:
95
- citations_text = citations_match.group(1)
96
- citations = re.findall(r'"(.*?)"', citations_text)
97
-
98
- return {
99
- "content": content,
100
- "citations": citations
101
- }
102
-
103
- except Exception as e:
104
- raise exceptions.FailedToGenerateResponseError(f"Failed to extract content: {str(e)}")
105
-
106
- def ask(
107
- self,
108
- prompt: str,
109
- stream: bool = False, # Note: API does not support streaming
110
- raw: bool = False, # Keep raw param for interface consistency
111
- optimizer: str = None,
112
- conversationally: bool = False,
113
- web_search: bool = False,
114
- ) -> Dict[str, Any]:
115
- """
116
- Send a prompt to PizzaGPT API with optional web search capability.
117
- """
118
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
119
- if optimizer:
120
- if optimizer in self.__available_optimizers:
121
- conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
122
- else:
123
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
124
-
125
- payload = {
126
- "question": conversation_prompt,
127
- "model": self.model,
128
- "searchEnabled": web_search
129
- }
130
-
131
- try:
132
- # Use curl_cffi session post with impersonate
133
- response = self.session.post(
134
- self.api_endpoint,
135
- # headers are set on the session
136
- json=payload,
137
- timeout=self.timeout,
138
- # proxies are set on the session
139
- impersonate="chrome110" # Use a common impersonation profile
140
- )
141
-
142
- response.raise_for_status() # Check for HTTP errors
143
-
144
- response_text = response.text
145
- if not response_text:
146
- raise exceptions.FailedToGenerateResponseError("Empty response received from API")
147
-
148
- try:
149
- resp = self._extract_content(response_text)
150
-
151
- self.last_response = {"text": resp['content']} # Store only text in last_response
152
- self.conversation.update_chat_history(
153
- prompt, self.get_message(self.last_response)
154
- )
155
- # Return the full extracted data (content + citations) or raw text
156
- return response_text if raw else resp
157
-
158
- except Exception as e:
159
- raise exceptions.FailedToGenerateResponseError(f"Failed to parse response: {str(e)}")
160
-
161
- except CurlError as e: # Catch CurlError
162
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
163
- except Exception as e: # Catch other potential exceptions (like HTTPError)
164
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
165
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
166
-
167
- def chat(
168
- self,
169
- prompt: str,
170
- stream: bool = False, # Keep stream param for interface consistency
171
- optimizer: str = None,
172
- conversationally: bool = False,
173
- web_search: bool = False,
174
- # Add raw parameter for consistency
175
- raw: bool = False
176
- ) -> str:
177
- """
178
- Chat with PizzaGPT with optional web search capability.
179
- """
180
- # API doesn't stream, call ask directly
181
- response_data = self.ask(
182
- prompt,
183
- stream=False, # Call ask in non-stream mode
184
- raw=raw, # Pass raw flag to ask
185
- optimizer=optimizer,
186
- conversationally=conversationally,
187
- web_search=web_search
188
- )
189
- # If raw=True, ask returns string, otherwise dict
190
- return response_data if raw else self.get_message(response_data)
191
-
192
-
193
- def get_message(self, response: dict) -> str:
194
- """Extract message from response dictionary."""
195
- # Handle case where raw response (string) might be passed mistakenly
196
- if isinstance(response, str):
197
- # Attempt to parse if it looks like the expected structure, otherwise return as is
198
- try:
199
- extracted = self._extract_content(response)
200
- return extracted.get("content", "")
201
- except:
202
- return response # Return raw string if parsing fails
203
- elif isinstance(response, dict):
204
- # If it's already the extracted dict from ask(raw=False)
205
- if "content" in response:
206
- return response.get("content", "")
207
- # If it's the last_response format
208
- elif "text" in response:
209
- return response.get("text", "")
210
- return "" # Default empty string
211
-
212
- if __name__ == "__main__":
213
- # Ensure curl_cffi is installed
214
- from rich import print
215
-
216
- # Example usage with web search enabled
217
- ai = PIZZAGPT(timeout=60)
218
- try:
219
- print("[bold blue]Testing Chat (Web Search Disabled):[/bold blue]")
220
- response = ai.chat("hi", web_search=False)
221
- print(response)
222
-
223
- # print("\n[bold blue]Testing Chat (Web Search Enabled):[/bold blue]")
224
- # response_web = ai.chat("What's the weather in Rome?", web_search=True)
225
- # print(response_web)
226
-
227
- except Exception as e:
228
- print(f"[bold red]Error:[/bold red] {str(e)}")