webscout 6.7__py3-none-any.whl → 6.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (43) hide show
  1. webscout/Extra/YTToolkit/YTdownloader.py +7 -2
  2. webscout/Extra/YTToolkit/ytapi/channel.py +1 -1
  3. webscout/Extra/YTToolkit/ytapi/query.py +3 -0
  4. webscout/Extra/YTToolkit/ytapi/stream.py +3 -0
  5. webscout/Extra/YTToolkit/ytapi/video.py +3 -1
  6. webscout/Provider/Cloudflare.py +2 -1
  7. webscout/Provider/DARKAI.py +2 -2
  8. webscout/Provider/Free2GPT.py +5 -5
  9. webscout/Provider/Marcus.py +3 -3
  10. webscout/Provider/PI.py +113 -47
  11. webscout/Provider/Phind.py +6 -0
  12. webscout/Provider/PizzaGPT.py +62 -53
  13. webscout/Provider/RUBIKSAI.py +93 -38
  14. webscout/Provider/__init__.py +0 -8
  15. webscout/Provider/cerebras.py +3 -3
  16. webscout/Provider/cleeai.py +2 -2
  17. webscout/Provider/elmo.py +2 -2
  18. webscout/Provider/gaurish.py +2 -2
  19. webscout/Provider/geminiprorealtime.py +2 -2
  20. webscout/Provider/lepton.py +2 -2
  21. webscout/Provider/llama3mitril.py +3 -3
  22. webscout/Provider/llamatutor.py +2 -2
  23. webscout/Provider/llmchat.py +3 -2
  24. webscout/Provider/meta.py +2 -2
  25. webscout/Provider/tutorai.py +1 -1
  26. webscout/__init__.py +0 -1
  27. webscout/swiftcli/__init__.py +1 -0
  28. webscout/version.py +1 -1
  29. webscout/webscout_search.py +1140 -1104
  30. webscout/webscout_search_async.py +635 -361
  31. {webscout-6.7.dist-info → webscout-6.9.dist-info}/METADATA +4 -32
  32. {webscout-6.7.dist-info → webscout-6.9.dist-info}/RECORD +36 -43
  33. webscout/Extra/markdownlite/__init__.py +0 -862
  34. webscout/Provider/Deepseek.py +0 -227
  35. webscout/Provider/Farfalle.py +0 -227
  36. webscout/Provider/NinjaChat.py +0 -200
  37. webscout/Provider/mhystical.py +0 -176
  38. webscout/zerodir/__init__.py +0 -225
  39. webstoken/t.py +0 -75
  40. {webscout-6.7.dist-info → webscout-6.9.dist-info}/LICENSE.md +0 -0
  41. {webscout-6.7.dist-info → webscout-6.9.dist-info}/WHEEL +0 -0
  42. {webscout-6.7.dist-info → webscout-6.9.dist-info}/entry_points.txt +0 -0
  43. {webscout-6.7.dist-info → webscout-6.9.dist-info}/top_level.txt +0 -0
@@ -12,10 +12,15 @@ from sys import stdout
12
12
  import os
13
13
  import subprocess
14
14
  import sys
15
+ import tempfile
15
16
  from webscout.version import __prog__, __version__
16
- from webscout.zerodir import user_cache_dir
17
17
  from webscout.swiftcli import CLI, option, argument, group
18
18
 
19
+ # Define cache directory using tempfile
20
+ user_cache_dir = os.path.join(tempfile.gettempdir(), 'webscout')
21
+ if not os.path.exists(user_cache_dir):
22
+ os.makedirs(user_cache_dir)
23
+
19
24
  logging = LitLogger(name="YTDownloader")
20
25
 
21
26
  session = requests.session()
@@ -32,7 +37,7 @@ session.headers.update(headers)
32
37
 
33
38
  get_excep = lambda e: e.args[1] if len(e.args) > 1 else e
34
39
 
35
- appdir = user_cache_dir(__prog__, __prog__)
40
+ appdir = user_cache_dir
36
41
 
37
42
  if not path.isdir(appdir):
38
43
  try:
@@ -304,4 +304,4 @@ class Channel:
304
304
  List[str] | None
305
305
  The ids of all playlists or None
306
306
  """
307
- return dup_filter(Patterns.playlists.findall(channel_playlists(self._target_url)))
307
+ return dup_filter(Patterns.playlists.findall(channel_playlists(self._target_url)))
@@ -35,3 +35,6 @@ class Search:
35
35
  @staticmethod
36
36
  def playlists(keywords: str, limit: int = 20) -> Optional[List[str]]:
37
37
  return dup_filter(Patterns.playlist_id.findall(find_playlists(keywords)), limit)
38
+
39
+ if __name__ == "__main__":
40
+ print(Search.videos("java"))
@@ -58,3 +58,6 @@ class Video:
58
58
  'tags': data[7].split(',') if data[7] else None,
59
59
  'description': data[8].replace('\\n', '\n') if data[8] else None
60
60
  }
61
+
62
+ if __name__ == '__main__':
63
+ print(Video('https://www.youtube.com/watch?v=9bZkp7q19f0').metadata)
@@ -99,4 +99,6 @@ class Video:
99
99
  data['genre'] = genre_pattern.search(self._video_data).group(1)
100
100
  except AttributeError:
101
101
  data['genre'] = None
102
- return data
102
+ return data
103
+ if __name__ == '__main__':
104
+ print(Video('https://www.youtube.com/watch?v=9bZkp7q19f0').metadata)
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  from uuid import uuid4
3
+ import webscout
3
4
  from webscout.AIutel import Optimizers
4
5
  from webscout.AIutel import Conversation
5
6
  from webscout.AIutel import AwesomePrompts, sanitize_stream
@@ -107,7 +108,7 @@ class Cloudflare(Provider):
107
108
  'Sec-Fetch-Dest': 'empty',
108
109
  'Sec-Fetch-Mode': 'cors',
109
110
  'Sec-Fetch-Site': 'same-origin',
110
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
111
+ 'User-Agent': webscout.LitAgent().random()
111
112
  }
112
113
 
113
114
  self.cookies = {
@@ -4,7 +4,7 @@ from webscout.AIutel import Optimizers
4
4
  from webscout.AIutel import Conversation
5
5
  from webscout.AIutel import AwesomePrompts, sanitize_stream
6
6
  from webscout.AIbase import Provider
7
- from webscout import exceptions
7
+ from webscout import exceptions, LitAgent
8
8
  import requests
9
9
 
10
10
  class DARKAI(Provider):
@@ -75,7 +75,7 @@ class DARKAI(Provider):
75
75
  "sec-fetch-dest": "empty",
76
76
  "sec-fetch-mode": "cors",
77
77
  "sec-fetch-site": "cross-site",
78
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
78
+ "user-agent": LitAgent().random(),
79
79
  }
80
80
 
81
81
  self.__available_optimizers = (
@@ -9,7 +9,7 @@ from webscout.AIutel import Conversation
9
9
  from webscout.AIutel import AwesomePrompts
10
10
  from webscout.AIbase import Provider
11
11
  from webscout import exceptions
12
-
12
+ from webscout import LitAgent
13
13
 
14
14
  class Free2GPT(Provider):
15
15
  """
@@ -48,7 +48,7 @@ class Free2GPT(Provider):
48
48
  self.session = requests.Session()
49
49
  self.is_conversation = is_conversation
50
50
  self.max_tokens_to_sample = max_tokens
51
- self.api_endpoint = "https://chat10.free2gpt.xyz/api/generate"
51
+ self.api_endpoint = "https://chat1.free2gpt.com/api/generate"
52
52
  self.stream_chunk_size = 64
53
53
  self.timeout = timeout
54
54
  self.last_response = {}
@@ -59,15 +59,15 @@ class Free2GPT(Provider):
59
59
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
60
60
  "content-type": "text/plain;charset=UTF-8",
61
61
  "dnt": "1",
62
- "origin": "https://chat10.free2gpt.xyz",
63
- "referer": "https://chat10.free2gpt.xyz/",
62
+ "origin": "https://chat1.free2gpt.co",
63
+ "referer": "https://chat1.free2gpt.co",
64
64
  "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
65
65
  "sec-ch-ua-mobile": "?0",
66
66
  "sec-ch-ua-platform": '"Windows"',
67
67
  "sec-fetch-dest": "empty",
68
68
  "sec-fetch-mode": "cors",
69
69
  "sec-fetch-site": "same-origin",
70
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0"
70
+ "user-agent": LitAgent().random(),
71
71
  }
72
72
 
73
73
  self.__available_optimizers = (
@@ -7,7 +7,7 @@ from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
-
10
+ from webscout import LitAgent as Lit
11
11
 
12
12
  class Marcus(Provider):
13
13
  """
@@ -39,7 +39,7 @@ class Marcus(Provider):
39
39
  'accept': '*/*',
40
40
  'origin': 'https://www.askmarcus.app',
41
41
  'referer': 'https://www.askmarcus.app/chat',
42
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
42
+ 'user-agent': Lit().random(),
43
43
  }
44
44
  self.__available_optimizers = (
45
45
  method
@@ -134,4 +134,4 @@ if __name__ == '__main__':
134
134
  ai = Marcus(timeout=30)
135
135
  response = ai.chat("Tell me about India", stream=True)
136
136
  for chunk in response:
137
- print(chunk)
137
+ print(chunk, end="", flush=True)
webscout/Provider/PI.py CHANGED
@@ -7,21 +7,19 @@ from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
- from typing import Dict
10
+ from typing import Dict, Union, Any
11
11
  from webscout import LitAgent
12
- class PiAI(Provider):
13
- """PiAI class for interacting with the Pi.ai chat API, extending the Provider class.
12
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
14
13
 
15
- This class provides methods for sending messages to the Pi.ai chat API and receiving responses,
16
- enabling conversational interactions. It supports various configurations such as conversation mode,
17
- token limits, and history management.
14
+ class PiAI(Provider):
15
+ """
16
+ PiAI is a provider class for interacting with the Pi.ai chat API.
18
17
 
19
18
  Attributes:
20
- scraper (cloudscraper.CloudScraper): The scraper instance for handling HTTP requests.
21
- url (str): The API endpoint for the Pi.ai chat service.
22
- AVAILABLE_VOICES (Dict[str, int]): A dictionary mapping voice names to their corresponding IDs.
23
- headers (Dict[str, str]): The headers to be used in HTTP requests to the API.
19
+ knowledge_cutoff (str): The knowledge cutoff date for the model
20
+ AVAILABLE_VOICES (Dict[str, int]): Available voice options for audio responses
24
21
  """
22
+
25
23
  def __init__(
26
24
  self,
27
25
  is_conversation: bool = True,
@@ -33,19 +31,30 @@ class PiAI(Provider):
33
31
  proxies: dict = {},
34
32
  history_offset: int = 10250,
35
33
  act: str = None,
34
+ logging: bool = False,
36
35
  ):
37
- """Initializes the PiAI class for interacting with the Pi.ai chat API.
36
+ """
37
+ Initializes the PiAI provider with specified parameters.
38
38
 
39
39
  Args:
40
- is_conversation (bool, optional): Flag for enabling conversational mode. Defaults to True.
41
- max_tokens (int, optional): Maximum number of tokens to generate in the response. Defaults to 600.
42
- timeout (int, optional): Timeout duration for HTTP requests in seconds. Defaults to 30.
43
- intro (str, optional): Introductory prompt for the conversation. Defaults to None.
44
- filepath (str, optional): Path to a file for storing conversation history. Defaults to None.
45
- update_file (bool, optional): Indicates whether to update the file with new prompts and responses. Defaults to True.
46
- proxies (dict, optional): Dictionary of HTTP request proxies. Defaults to an empty dictionary.
47
- history_offset (int, optional): Number of last messages to retain in conversation history. Defaults to 10250.
48
- act (str|int, optional): Key or index for selecting an awesome prompt to use as an intro. Defaults to None.
40
+ is_conversation (bool): Whether to maintain conversation history
41
+ max_tokens (int): Maximum number of tokens in response
42
+ timeout (int): Request timeout in seconds
43
+ intro (str): Custom introduction message
44
+ filepath (str): Path to save conversation history
45
+ update_file (bool): Whether to update conversation history file
46
+ proxies (dict): Proxy configuration
47
+ history_offset (int): Conversation history limit
48
+ act (str): Custom personality/act for the AI
49
+ logging (bool): Enable debug logging
50
+
51
+ Examples:
52
+ >>> ai = PiAI(logging=True)
53
+ >>> ai.ask("What's the weather today?", "Alice")
54
+ Sends a prompt to Pi.ai and returns the response.
55
+
56
+ >>> ai.chat("Tell me a joke", voice_name="William")
57
+ Initiates a chat with Pi.ai using the provided prompt.
49
58
  """
50
59
  self.scraper = cloudscraper.create_scraper()
51
60
  self.url = 'https://pi.ai/api/chat'
@@ -84,7 +93,7 @@ class PiAI(Provider):
84
93
  self.max_tokens_to_sample = max_tokens
85
94
  self.stream_chunk_size = 64
86
95
  self.timeout = timeout
87
- self.last_response = {}
96
+ self.last_response = {} if self.is_conversation else {'text': ""}
88
97
  self.conversation_id = None
89
98
 
90
99
  self.__available_optimizers = (
@@ -105,11 +114,35 @@ class PiAI(Provider):
105
114
  )
106
115
  self.conversation.history_offset = history_offset
107
116
  self.session.proxies = proxies
117
+
118
+ # Initialize logger
119
+ self.logger = LitLogger(name="PiAI", format=LogFormat.MODERN_EMOJI, color_scheme=ColorScheme.CYBERPUNK) if logging else None
120
+
121
+ self.knowledge_cutoff = "December 2023"
122
+
108
123
  # Initialize conversation ID
109
124
  if self.is_conversation:
110
125
  self.start_conversation()
111
126
 
127
+ if self.logger:
128
+ self.logger.debug("PiAI instance initialized")
129
+
112
130
  def start_conversation(self) -> str:
131
+ """
132
+ Initializes a new conversation and returns the conversation ID.
133
+
134
+ Returns:
135
+ str: The conversation ID from Pi.ai
136
+
137
+ Examples:
138
+ >>> ai = PiAI()
139
+ >>> conversation_id = ai.start_conversation()
140
+ >>> print(conversation_id)
141
+ 'abc123xyz'
142
+ """
143
+ if self.logger:
144
+ self.logger.debug("Starting new conversation")
145
+
113
146
  response = self.scraper.post(
114
147
  "https://pi.ai/api/chat/start",
115
148
  headers=self.headers,
@@ -117,8 +150,16 @@ class PiAI(Provider):
117
150
  json={},
118
151
  timeout=self.timeout
119
152
  )
153
+
154
+ if not response.ok and self.logger:
155
+ self.logger.error(f"Failed to start conversation: {response.status_code}")
156
+
120
157
  data = response.json()
121
158
  self.conversation_id = data['conversations'][0]['sid']
159
+
160
+ if self.logger:
161
+ self.logger.debug(f"Conversation started with ID: {self.conversation_id}")
162
+
122
163
  return self.conversation_id
123
164
 
124
165
  def ask(
@@ -132,26 +173,31 @@ class PiAI(Provider):
132
173
  verbose:bool = None,
133
174
  output_file:str = None
134
175
  ) -> dict:
135
- """Interact with the AI by sending a prompt and receiving a response.
176
+ """
177
+ Interact with Pi.ai by sending a prompt and receiving a response.
136
178
 
137
179
  Args:
138
- prompt (str): The prompt to be sent to the AI.
139
- voice_name (str): The name of the voice to use for audio responses.
140
- stream (bool, optional): Flag for streaming response. Defaults to False.
141
- raw (bool, optional): If True, returns the raw response as received. Defaults to False.
142
- optimizer (str, optional): Name of the prompt optimizer to use - `[code, shell_command]`. Defaults to None.
143
- conversationally (bool, optional): If True, chat conversationally when using optimizer. Defaults to False.
144
- verbose (bool, optional): If True, provides detailed output. Defaults to None.
145
- output_file (str, optional): File path to save the output. Defaults to None.
180
+ prompt (str): The prompt to be sent to Pi.ai
181
+ voice_name (str): The name of the voice to use for audio responses
182
+ stream (bool): Flag for streaming response
183
+ raw (bool): If True, returns the raw response as received
184
+ optimizer (str): Name of the prompt optimizer to use
185
+ conversationally (bool): If True, chat conversationally when using optimizer
186
+ verbose (bool): If True, provides detailed output
187
+ output_file (str): File path to save the output
146
188
 
147
189
  Returns:
148
- dict: A dictionary containing the AI's response.
149
- ```json
150
- {
151
- "text": "How may I assist you today?"
152
- }
153
- ```
190
+ dict: A dictionary containing the AI's response
191
+
192
+ Examples:
193
+ >>> ai = PiAI(logging=True)
194
+ >>> response = ai.ask("Hello!", "Alice", verbose=True)
195
+ >>> print(response['text'])
196
+ 'Hi! How can I help you today?'
154
197
  """
198
+ if self.logger:
199
+ self.logger.debug(f"ask() called with prompt: {prompt}, voice: {voice_name}")
200
+
155
201
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
156
202
  if optimizer:
157
203
  if optimizer in self.__available_optimizers:
@@ -159,6 +205,8 @@ class PiAI(Provider):
159
205
  conversation_prompt if conversationally else prompt
160
206
  )
161
207
  else:
208
+ if self.logger:
209
+ self.logger.error(f"Invalid optimizer: {optimizer}")
162
210
  raise Exception(
163
211
  f"Optimizer is not one of {self.__available_optimizers}"
164
212
  )
@@ -187,8 +235,8 @@ class PiAI(Provider):
187
235
  resp = dict(text=streaming_text)
188
236
  self.last_response.update(resp)
189
237
  yield parsed_data if raw else resp
190
- except json.JSONDecodeError:
191
- continue
238
+ except:continue
239
+
192
240
  self.conversation.update_chat_history(
193
241
  prompt, self.get_message(self.last_response)
194
242
  )
@@ -210,20 +258,30 @@ class PiAI(Provider):
210
258
  verbose:bool = True,
211
259
  output_file:str = "PiAi.mp3"
212
260
  ) -> str:
213
- """Generates a response based on the provided prompt.
261
+ """
262
+ Generates a response based on the provided prompt.
214
263
 
215
264
  Args:
216
- prompt (str): The input prompt to be sent for generating a response.
217
- voice_name (str, optional): The name of the voice to use for the response. Defaults to "Alice".
218
- stream (bool, optional): Flag for streaming the response. Defaults to False.
219
- optimizer (str, optional): The name of the prompt optimizer to use - `[code, shell_command]`. Defaults to None.
220
- conversationally (bool, optional): Indicates whether to chat conversationally when using the optimizer. Defaults to False.
221
- verbose (bool, optional): Flag to indicate if verbose output is desired. Defaults to True.
222
- output_file (str, optional): The file path where the audio will be saved. Defaults to "PiAi.mp3".
265
+ prompt (str): Input prompt for generating response
266
+ voice_name (str): Voice to use for audio response
267
+ stream (bool): Enable response streaming
268
+ optimizer (str): Prompt optimizer to use
269
+ conversationally (bool): Enable conversational mode with optimizer
270
+ verbose (bool): Enable verbose output
271
+ output_file (str): Audio output file path
223
272
 
224
273
  Returns:
225
- str: The generated response.
274
+ str: The generated response
275
+
276
+ Examples:
277
+ >>> ai = PiAI(logging=True)
278
+ >>> response = ai.chat("Tell me a joke", voice_name="William")
279
+ >>> print(response)
280
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
226
281
  """
282
+ if self.logger:
283
+ self.logger.debug(f"chat() called with prompt: {prompt}, voice: {voice_name}")
284
+
227
285
  assert (
228
286
  voice_name in self.AVAILABLE_VOICES
229
287
  ), f"Voice '{voice_name}' not one of [{', '.join(self.AVAILABLE_VOICES.keys())}]"
@@ -271,6 +329,9 @@ class PiAI(Provider):
271
329
  verbose (bool): Flag to indicate if verbose output is desired.
272
330
  output_file (str): The file path where the audio will be saved.
273
331
  """
332
+ if self.logger:
333
+ self.logger.debug(f"Downloading audio with voice: {voice_name}")
334
+
274
335
  params = {
275
336
  'mode': 'eager',
276
337
  'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
@@ -278,11 +339,16 @@ class PiAI(Provider):
278
339
  }
279
340
  try:
280
341
  audio_response = self.scraper.get('https://pi.ai/api/chat/voice', params=params, cookies=self.cookies, headers=self.headers, timeout=self.timeout)
342
+ if not audio_response.ok and self.logger:
343
+ self.logger.error(f"Audio download failed: {audio_response.status_code}")
344
+
281
345
  audio_response.raise_for_status() # Raise an exception for bad status codes
282
346
  with open(output_file, "wb") as file:
283
347
  file.write(audio_response.content)
284
348
  if verbose:print("\nAudio file downloaded successfully.")
285
349
  except requests.exceptions.RequestException as e:
350
+ if self.logger:
351
+ self.logger.error(f"Audio download failed: {e}")
286
352
  if verbose:print(f"\nFailed to download audio file. Error: {e}")
287
353
 
288
354
  if __name__ == '__main__':
@@ -501,3 +501,9 @@ class Phindv2(Provider):
501
501
  else ""
502
502
  )
503
503
 
504
+ if __name__ == "__main__":
505
+ from rich import print
506
+
507
+ ai = Phindv2()
508
+ print(ai.chat("Tell me a joke"))
509
+ # Returns the chat response from the Phindv2 API.
@@ -1,17 +1,21 @@
1
1
  import requests
2
- from typing import Any, AsyncGenerator, Dict, Optional
2
+ from typing import Any, AsyncGenerator, Dict, Optional, Union, Generator
3
3
  import json
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider, AsyncProvider
8
+ from webscout.AIbase import Provider, AsyncProvider
9
9
  from webscout import exceptions
10
10
  from webscout import LitAgent as Lit
11
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
11
12
 
12
13
  class PIZZAGPT(Provider):
13
14
  """
14
- A class to interact with the PizzaGPT API.
15
+ PIZZAGPT is a provider class for interacting with the PizzaGPT API.
16
+
17
+ Attributes:
18
+ knowledge_cutoff (str): The knowledge cutoff date for the model
15
19
  """
16
20
 
17
21
  def __init__(
@@ -25,20 +29,18 @@ class PIZZAGPT(Provider):
25
29
  proxies: dict = {},
26
30
  history_offset: int = 10250,
27
31
  act: str = None,
32
+ logging: bool = False,
28
33
  ) -> None:
29
34
  """
30
- Initializes the PizzaGPT API with given parameters.
35
+ Initializes the PizzaGPT provider with the specified parameters.
31
36
 
32
- Args:
33
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
34
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
35
- timeout (int, optional): Http request timeout. Defaults to 30.
36
- intro (str, optional): Conversation introductory prompt. Defaults to None.
37
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
38
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
39
- proxies (dict, optional): Http request proxies. Defaults to {}.
40
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
41
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
37
+ Examples:
38
+ >>> ai = PIZZAGPT(logging=True)
39
+ >>> ai.ask("What's the weather today?")
40
+ Sends a prompt to the PizzaGPT API and returns the response.
41
+
42
+ >>> ai.chat("Tell me a joke")
43
+ Initiates a chat with the PizzaGPT API using the provided prompt.
42
44
  """
43
45
  self.session = requests.Session()
44
46
  self.is_conversation = is_conversation
@@ -85,6 +87,10 @@ class PIZZAGPT(Provider):
85
87
  )
86
88
  self.conversation.history_offset = history_offset
87
89
  self.session.proxies = proxies
90
+
91
+ # Initialize logger
92
+ self.logger = LitLogger(name="PIZZAGPT", format=LogFormat.MODERN_EMOJI, color_scheme=ColorScheme.CYBERPUNK) if logging else None
93
+
88
94
 
89
95
  def ask(
90
96
  self,
@@ -93,23 +99,16 @@ class PIZZAGPT(Provider):
93
99
  raw: bool = False,
94
100
  optimizer: str = None,
95
101
  conversationally: bool = False,
96
- ) -> dict:
97
- """Chat with AI
102
+ ) -> Dict[str, Any]:
103
+ """
104
+ Sends a prompt to the PizzaGPT API and returns the response.
98
105
 
99
- Args:
100
- prompt (str): Prompt to be send.
101
- stream (bool, optional): Flag for streaming response. Defaults to False.
102
- raw (bool, optional): Stream back raw response as received. Defaults to False.
103
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
- Returns:
106
- dict : {}
107
- ```json
108
- {
109
- "text" : "How may I assist you today?"
110
- }
111
- ```
106
+ Examples:
107
+ >>> ai = PIZZAGPT()
108
+ >>> ai.ask("What's the weather today?")
109
+ Returns the response from the PizzaGPT API.
112
110
  """
111
+
113
112
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114
113
  if optimizer:
115
114
  if optimizer in self.__available_optimizers:
@@ -117,27 +116,37 @@ class PIZZAGPT(Provider):
117
116
  conversation_prompt if conversationally else prompt
118
117
  )
119
118
  else:
120
- raise Exception(
121
- f"Optimizer is not one of {self.__available_optimizers}"
122
- )
119
+ if self.logger:
120
+ self.logger.error(f"Invalid optimizer: {optimizer}")
121
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
123
122
 
124
123
  self.session.headers.update(self.headers)
125
124
  payload = {"question": conversation_prompt}
126
125
 
127
- response = self.session.post(
128
- self.api_endpoint, json=payload, timeout=self.timeout
129
- )
130
- if not response.ok:
131
- raise exceptions.FailedToGenerateResponseError(
132
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
126
+ try:
127
+ response = self.session.post(
128
+ self.api_endpoint, json=payload, timeout=self.timeout
133
129
  )
130
+ if self.logger:
131
+ self.logger.debug(response)
132
+ if not response.ok:
133
+ if self.logger:
134
+ self.logger.error(f"Failed to generate response: {response.status_code} {response.reason}")
135
+ raise exceptions.FailedToGenerateResponseError(
136
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
137
+ )
134
138
 
135
- resp = response.json()
136
- self.last_response.update(dict(text=resp['content']))
137
- self.conversation.update_chat_history(
138
- prompt, self.get_message(self.last_response)
139
- )
140
- return self.last_response # Return the updated last_response
139
+ resp = response.json()
140
+ if self.logger:
141
+ self.logger.debug(resp)
142
+ self.last_response.update(dict(text=resp['content']))
143
+ self.conversation.update_chat_history(
144
+ prompt, self.get_message(self.last_response)
145
+ )
146
+ return self.last_response
147
+
148
+ except Exception as e:
149
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
141
150
 
142
151
  def chat(
143
152
  self,
@@ -146,14 +155,13 @@ class PIZZAGPT(Provider):
146
155
  optimizer: str = None,
147
156
  conversationally: bool = False,
148
157
  ) -> str:
149
- """Generate response `str`
150
- Args:
151
- prompt (str): Prompt to be send.
152
- stream (bool, optional): Flag for streaming response. Defaults to False.
153
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
154
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
155
- Returns:
156
- str: Response generated
158
+ """
159
+ Initiates a chat with the PizzaGPT API using the provided prompt.
160
+
161
+ Examples:
162
+ >>> ai = PIZZAGPT()
163
+ >>> ai.chat("Tell me a joke")
164
+ Returns the chat response from the PizzaGPT API.
157
165
  """
158
166
 
159
167
  return self.get_message(
@@ -163,6 +171,7 @@ class PIZZAGPT(Provider):
163
171
  conversationally=conversationally,
164
172
  )
165
173
  )
174
+
166
175
  def get_message(self, response: dict) -> str:
167
176
  """Retrieves message only from response
168
177
 
@@ -177,7 +186,7 @@ class PIZZAGPT(Provider):
177
186
  if __name__ == "__main__":
178
187
  from rich import print
179
188
 
180
- ai = PIZZAGPT()
189
+ ai = PIZZAGPT(logging=True)
181
190
  # Stream the response
182
191
  response = ai.chat("hi")
183
192
  for chunk in response: