webscout 7.0__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/LLM.py CHANGED
@@ -418,7 +418,7 @@ if __name__ == "__main__":
418
418
  # Example usage
419
419
  try:
420
420
  # Initialize LLM with Llama 3 model
421
- llm = LLM(model="Qwen/Qwen2.5-Coder-32B-Instruct")
421
+ llm = LLM(model="mistralai/Mistral-Small-24B-Instruct-2501")
422
422
 
423
423
  # Example messages
424
424
  messages = [
@@ -1,16 +1,35 @@
1
1
  import requests
2
- import re
3
2
  import json
4
- from webscout.AIutel import Optimizers
5
- from webscout.AIutel import Conversation
6
- from webscout.AIutel import AwesomePrompts, sanitize_stream
7
- from webscout.AIbase import Provider, AsyncProvider
3
+ from typing import Any, Dict, Optional, Union, Generator, List
4
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
5
+ from webscout.AIbase import Provider
8
6
  from webscout import exceptions
9
- from typing import Any, AsyncGenerator, Dict
10
- import httpx
7
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
8
+
9
+ class BLACKBOXAI(Provider):
10
+ """
11
+ BlackboxAI provider for interacting with the Blackbox API.
12
+ Supports synchronous operations with multiple models.
13
+ """
14
+ url = "https://api.blackbox.ai"
15
+ api_endpoint = "https://api.blackbox.ai/api/chat"
16
+
17
+
18
+
19
+ AVAILABLE_MODELS = {
20
+ "deepseek-v3": "deepseek-ai/DeepSeek-V3",
21
+ "deepseek-r1": "deepseek-ai/DeepSeek-R1",
22
+ "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
23
+ "mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501",
24
+ "dbrx-instruct": "databricks/dbrx-instruct",
25
+ "qwq-32b": "Qwen/QwQ-32B-Preview",
26
+ "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
27
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
28
+ "gemini-1.5-flash": "gemini-1.5-flash",
29
+ "gemini-1.5-pro": "gemini-pro",
30
+ "gemini-2.0-flash": "Gemini-Flash-2.0",
31
+ }
11
32
 
12
- #------------------------------------------------------BLACKBOXAI--------------------------------------------------------
13
- class BLACKBOXAI:
14
33
  def __init__(
15
34
  self,
16
35
  is_conversation: bool = True,
@@ -22,51 +41,38 @@ class BLACKBOXAI:
22
41
  proxies: dict = {},
23
42
  history_offset: int = 10250,
24
43
  act: str = None,
25
- model: str = None,
44
+ model: str = "deepseek-ai/DeepSeek-V3",
45
+ logging: bool = False,
46
+ system_message: str = "You are a helpful AI assistant."
26
47
  ):
27
- """Instantiates BLACKBOXAI
28
-
29
- Args:
30
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
31
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
32
- timeout (int, optional): Http request timeout. Defaults to 30.
33
- intro (str, optional): Conversation introductory prompt. Defaults to None.
34
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
35
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
36
- proxies (dict, optional): Http request proxies. Defaults to {}.
37
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
38
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
39
- model (str, optional): Model name. Defaults to "Phind Model".
40
- """
48
+ """Initialize BlackboxAI with enhanced configuration options."""
49
+ self.logger = LitLogger(
50
+ name="BlackboxAI",
51
+ format=LogFormat.MODERN_EMOJI,
52
+ color_scheme=ColorScheme.CYBERPUNK
53
+ ) if logging else None
54
+
41
55
  self.session = requests.Session()
42
56
  self.max_tokens_to_sample = max_tokens
43
57
  self.is_conversation = is_conversation
44
- self.chat_endpoint = "https://api.blackbox.ai/api/chat"
45
- self.stream_chunk_size = 64
46
58
  self.timeout = timeout
47
59
  self.last_response = {}
48
- self.model = model
49
- self.previewToken: str = None
50
- self.userId: str = ""
51
- self.codeModelMode: bool = True
52
- self.id: str = ""
53
- self.agentMode: dict = {}
54
- self.trendingAgentMode: dict = {}
55
- self.isMicMode: bool = False
60
+ self.model = self.get_model(model)
61
+ self.system_message = system_message
56
62
 
57
63
  self.headers = {
58
64
  "Content-Type": "application/json",
59
- "User-Agent": "",
60
65
  "Accept": "*/*",
61
- "Accept-Encoding": "Identity",
62
66
  }
63
67
 
68
+ if self.logger:
69
+ self.logger.info(f"Initializing BlackboxAI with model: {self.model}")
70
+
64
71
  self.__available_optimizers = (
65
- method
66
- for method in dir(Optimizers)
72
+ method for method in dir(Optimizers)
67
73
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
68
74
  )
69
- self.session.headers.update(self.headers)
75
+
70
76
  Conversation.intro = (
71
77
  AwesomePrompts().get_act(
72
78
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -74,12 +80,62 @@ class BLACKBOXAI:
74
80
  if act
75
81
  else intro or Conversation.intro
76
82
  )
83
+
77
84
  self.conversation = Conversation(
78
85
  is_conversation, self.max_tokens_to_sample, filepath, update_file
79
86
  )
80
87
  self.conversation.history_offset = history_offset
81
88
  self.session.proxies = proxies
82
89
 
90
+ @classmethod
91
+ def get_model(self, model: str) -> str:
92
+ """Resolve model name from alias"""
93
+ if model in self.AVAILABLE_MODELS:
94
+ return self.AVAILABLE_MODELS[model]
95
+ raise ValueError(f"Unknown model: {model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
96
+
97
+ def _make_request(
98
+ self,
99
+ messages: List[Dict[str, str]],
100
+ stream: bool = False
101
+ ) -> Generator[str, None, None]:
102
+ """Make synchronous request to BlackboxAI API."""
103
+ if self.logger:
104
+ self.logger.debug(f"Making request with {len(messages)} messages")
105
+
106
+ data = {
107
+ "messages": messages,
108
+ "model": self.model,
109
+ "max_tokens": self.max_tokens_to_sample
110
+ }
111
+
112
+ try:
113
+ response = self.session.post(
114
+ self.api_endpoint,
115
+ json=data,
116
+ headers=self.headers,
117
+ stream=stream,
118
+ timeout=self.timeout
119
+ )
120
+
121
+ if not response.ok:
122
+ error_msg = f"API request failed: {response.status_code} - {response.text}"
123
+ if self.logger:
124
+ self.logger.error(error_msg)
125
+ raise exceptions.FailedToGenerateResponseError(error_msg)
126
+
127
+ if stream:
128
+ for line in response.iter_lines(decode_unicode=True):
129
+ if line:
130
+ yield line
131
+ else:
132
+ yield response.text
133
+
134
+ except requests.exceptions.RequestException as e:
135
+ if self.logger:
136
+ self.logger.error(f"Request failed: {str(e)}")
137
+ raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
138
+
83
139
  def ask(
84
140
  self,
85
141
  prompt: str,
@@ -87,83 +143,36 @@ class BLACKBOXAI:
87
143
  raw: bool = False,
88
144
  optimizer: str = None,
89
145
  conversationally: bool = False,
90
- ) -> dict:
91
- """Chat with AI
92
-
93
- Args:
94
- prompt (str): Prompt to be send.
95
- stream (bool, optional): Flag for streaming response. Defaults to False.
96
- raw (bool, optional): Stream back raw response as received. Defaults to False.
97
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
98
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99
- Returns:
100
- dict : {}
101
- ```json
102
- {
103
- "text" : "print('How may I help you today?')"
104
- }
105
- ```
106
- """
146
+ ) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
147
+ """Send a prompt to BlackboxAI API and return the response."""
148
+ if self.logger:
149
+ self.logger.debug(f"Processing request [stream={stream}]")
150
+
107
151
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
152
  if optimizer:
109
153
  if optimizer in self.__available_optimizers:
110
154
  conversation_prompt = getattr(Optimizers, optimizer)(
111
155
  conversation_prompt if conversationally else prompt
112
156
  )
157
+ if self.logger:
158
+ self.logger.debug(f"Applied optimizer: {optimizer}")
113
159
  else:
114
- raise Exception(
115
- f"Optimizer is not one of {self.__available_optimizers}"
116
- )
160
+ if self.logger:
161
+ self.logger.error(f"Invalid optimizer: {optimizer}")
162
+ raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
117
163
 
118
- self.session.headers.update(self.headers)
119
- payload = {
120
- "messages": [
121
- # json.loads(prev_messages),
122
- {"content": conversation_prompt, "role": "user"}
123
- ],
124
- "id": self.id,
125
- "previewToken": self.previewToken,
126
- "userId": self.userId,
127
- "codeModelMode": self.codeModelMode,
128
- "agentMode": self.agentMode,
129
- "trendingAgentMode": self.trendingAgentMode,
130
- "isMicMode": self.isMicMode,
131
- }
164
+ messages = [
165
+ {"role": "system", "content": self.system_message},
166
+ {"role": "user", "content": conversation_prompt}
167
+ ]
132
168
 
133
169
  def for_stream():
134
- response = self.session.post(
135
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
136
- )
137
- if (
138
- not response.ok
139
- or not response.headers.get("Content-Type")
140
- == "text/plain; charset=utf-8"
141
- ):
142
- raise Exception(
143
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144
- )
145
- streaming_text = ""
146
- for value in response.iter_lines(
147
- decode_unicode=True,
148
- chunk_size=self.stream_chunk_size,
149
-
150
- ):
151
- try:
152
- if bool(value):
153
- streaming_text += value + ("\n" if stream else "")
154
-
155
- resp = dict(text=streaming_text)
156
- self.last_response.update(resp)
157
- yield value if raw else resp
158
- except json.decoder.JSONDecodeError:
159
- pass
160
- self.conversation.update_chat_history(
161
- prompt, self.get_message(self.last_response)
162
- )
170
+ for text in self._make_request(messages, stream=True):
171
+ yield {"text": text}
163
172
 
164
173
  def for_non_stream():
165
- for _ in for_stream():
166
- pass
174
+ response_text = next(self._make_request(messages, stream=False))
175
+ self.last_response = {"text": response_text}
167
176
  return self.last_response
168
177
 
169
178
  return for_stream() if stream else for_non_stream()
@@ -174,20 +183,17 @@ class BLACKBOXAI:
174
183
  stream: bool = False,
175
184
  optimizer: str = None,
176
185
  conversationally: bool = False,
177
- ) -> str:
178
- """Generate response `str`
179
- Args:
180
- prompt (str): Prompt to be send.
181
- stream (bool, optional): Flag for streaming response. Defaults to False.
182
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
183
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184
- Returns:
185
- str: Response generated
186
- """
186
+ ) -> Union[str, Generator[str, None, None]]:
187
+ """Generate response as string."""
188
+ if self.logger:
189
+ self.logger.debug(f"Chat request initiated [stream={stream}]")
187
190
 
188
191
  def for_stream():
189
192
  for response in self.ask(
190
- prompt, True, optimizer=optimizer, conversationally=conversationally
193
+ prompt,
194
+ stream=True,
195
+ optimizer=optimizer,
196
+ conversationally=conversationally
191
197
  ):
192
198
  yield self.get_message(response)
193
199
 
@@ -195,7 +201,7 @@ class BLACKBOXAI:
195
201
  return self.get_message(
196
202
  self.ask(
197
203
  prompt,
198
- False,
204
+ stream=False,
199
205
  optimizer=optimizer,
200
206
  conversationally=conversationally,
201
207
  )
@@ -203,28 +209,21 @@ class BLACKBOXAI:
203
209
 
204
210
  return for_stream() if stream else for_non_stream()
205
211
 
206
- def get_message(self, response: dict) -> str:
207
- """Retrieves message only from response
208
-
209
- Args:
210
- response (dict): Response generated by `self.ask`
211
-
212
- Returns:
213
- str: Message extracted
214
- """
212
+ def get_message(self, response: Dict[str, Any]) -> str:
213
+ """Extract message from response dictionary."""
215
214
  assert isinstance(response, dict), "Response should be of dict data-type only"
216
215
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
217
216
 
218
- # Function to clean the response text
219
- def clean_response(response_text: str) -> str:
220
- # Remove web search results
221
- cleaned_response = re.sub(r'\$~~~\$.*?\$~~~\$', '', response_text, flags=re.DOTALL)
222
- # Remove any remaining special characters or markers
223
- cleaned_response = re.sub(r'\$~~~', '', cleaned_response)
224
- return cleaned_response.strip()
225
- if __name__ == '__main__':
217
+ if __name__ == "__main__":
226
218
  from rich import print
227
- ai = BLACKBOXAI()
228
- response = ai.chat("tell me about india")
229
- for chunk in response:
230
- print(chunk, end="", flush=True)
219
+
220
+ # Example usage
221
+ ai = BLACKBOXAI(model="deepseek-v3", logging=True)
222
+
223
+ try:
224
+ print("Non-streaming response:")
225
+ response = ai.chat("What is quantum computing?")
226
+ print(response)
227
+
228
+ except Exception as e:
229
+ print(f"Error: {str(e)}")
@@ -8,33 +8,42 @@ from webscout.AIbase import Provider, AsyncProvider
8
8
  from webscout import exceptions
9
9
  from typing import Any, AsyncGenerator, Dict
10
10
  import cloudscraper
11
+ from webscout import LitAgent
12
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
11
13
 
12
14
  class Cloudflare(Provider):
15
+ """
16
+ Cloudflare provider to interact with Cloudflare's text generation API.
17
+ Includes logging capabilities using LitLogger and uses LitAgent for user-agent.
18
+ """
13
19
 
20
+ # Updated AVAILABLE_MODELS from given JSON data
14
21
  AVAILABLE_MODELS = [
15
- "@cf/llava-hf/llava-1.5-7b-hf",
16
- "@cf/unum/uform-gen2-qwen-500m",
17
- "@cf/facebook/detr-resnet-50",
18
- "@cf/facebook/bart-large-cnn",
19
22
  "@hf/thebloke/deepseek-coder-6.7b-base-awq",
20
23
  "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
21
- "@cf/deepseek-ai/deepseek-math-7b-base",
22
24
  "@cf/deepseek-ai/deepseek-math-7b-instruct",
25
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
23
26
  "@cf/thebloke/discolm-german-7b-v1-awq",
24
27
  "@cf/tiiuae/falcon-7b-instruct",
25
- "@cf/google/gemma-2b-it-lora",
26
28
  "@hf/google/gemma-7b-it",
27
- "@cf/google/gemma-7b-it-lora",
28
29
  "@hf/nousresearch/hermes-2-pro-mistral-7b",
29
30
  "@hf/thebloke/llama-2-13b-chat-awq",
30
- "@cf/meta-llama/llama-2-7b-chat-hf-lora",
31
+ "@cf/meta/llama-2-7b-chat-fp16",
32
+ "@cf/meta/llama-2-7b-chat-int8",
31
33
  "@cf/meta/llama-3-8b-instruct",
32
34
  "@cf/meta/llama-3-8b-instruct-awq",
33
35
  "@cf/meta/llama-3.1-8b-instruct",
36
+ "@cf/meta/llama-3.1-8b-instruct-awq",
37
+ "@cf/meta/llama-3.1-8b-instruct-fp8",
38
+ "@cf/meta/llama-3.2-11b-vision-instruct",
39
+ "@cf/meta/llama-3.2-1b-instruct",
40
+ "@cf/meta/llama-3.2-3b-instruct",
41
+ "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
34
42
  "@hf/thebloke/llamaguard-7b-awq",
43
+ "@hf/meta-llama/meta-llama-3-8b-instruct",
44
+ "@cf/mistral/mistral-7b-instruct-v0.1",
35
45
  "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
36
46
  "@hf/mistral/mistral-7b-instruct-v0.2",
37
- "@cf/mistral/mistral-7b-instruct-v0.2-lora",
38
47
  "@hf/thebloke/neural-chat-7b-v3-1-awq",
39
48
  "@cf/openchat/openchat-3.5-0106",
40
49
  "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
@@ -61,26 +70,25 @@ class Cloudflare(Provider):
61
70
  proxies: dict = {},
62
71
  history_offset: int = 10250,
63
72
  act: str = None,
64
- model: str = "@cf/meta/llama-3.1-8b-instruct",
65
- system_prompt: str = "You are a helpful assistant."
73
+ model: str = "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
74
+ system_prompt: str = "You are a helpful assistant.",
75
+ logging: bool = False
66
76
  ):
67
- """Instantiates Cloudflare
77
+ """Instantiates Cloudflare Provider
68
78
 
69
79
  Args:
70
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
71
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
72
- timeout (int, optional): Http request timeout. Defaults to 30.
73
- intro (str, optional): Conversation introductory prompt. Defaults to None.
74
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
75
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
76
- proxies (dict, optional): Http request proxies. Defaults to {}.
77
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
78
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
79
- model (str, optional): Model to use for generating text.
80
- Defaults to "@cf/meta/llama-3.1-8b-instruct".
81
- Choose from AVAILABLE_MODELS.
82
- system_prompt (str, optional): System prompt for Cloudflare.
83
- Defaults to "You are a helpful assistant.".
80
+ is_conversation (bool, optional): Flag for conversational mode. Defaults to True.
81
+ max_tokens (int, optional): Max tokens to generate. Defaults to 600.
82
+ timeout (int, optional): HTTP request timeout. Defaults to 30.
83
+ intro (str, optional): Introductory prompt. Defaults to None.
84
+ filepath (str, optional): File path for conversation history. Defaults to None.
85
+ update_file (bool, optional): Update history file flag. Defaults to True.
86
+ proxies (dict, optional): Request proxies. Defaults to {}.
87
+ history_offset (int, optional): Chat history limit. Defaults to 10250.
88
+ act (str, optional): Awesome prompt key/index. Defaults to None.
89
+ model (str, optional): Model to use. Defaults to "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b".
90
+ system_prompt (str, optional): System prompt for conversation. Defaults to "You are a helpful assistant.".
91
+ logging (bool, optional): Enable logging if True. Defaults to False.
84
92
  """
85
93
  if model not in self.AVAILABLE_MODELS:
86
94
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
@@ -94,6 +102,7 @@ class Cloudflare(Provider):
94
102
  self.last_response = {}
95
103
  self.model = model
96
104
  self.system_prompt = system_prompt
105
+
97
106
  self.headers = {
98
107
  'Accept': 'text/event-stream',
99
108
  'Accept-Encoding': 'gzip, deflate, br, zstd',
@@ -108,7 +117,7 @@ class Cloudflare(Provider):
108
117
  'Sec-Fetch-Dest': 'empty',
109
118
  'Sec-Fetch-Mode': 'cors',
110
119
  'Sec-Fetch-Site': 'same-origin',
111
- 'User-Agent': webscout.LitAgent().random()
120
+ 'User-Agent': LitAgent().random()
112
121
  }
113
122
 
114
123
  self.cookies = {
@@ -122,21 +131,28 @@ class Cloudflare(Provider):
122
131
  for method in dir(Optimizers)
123
132
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
124
133
  )
125
- # FIX: Initialize the session here
126
- self.session = cloudscraper.create_scraper()
134
+
135
+ # Initialize session and apply proxies
136
+ self.session = cloudscraper.create_scraper()
127
137
  self.session.headers.update(self.headers)
138
+ self.session.proxies = proxies
139
+
128
140
  Conversation.intro = (
129
- AwesomePrompts().get_act(
130
- act, raise_not_found=True, default=None, case_insensitive=True
131
- )
132
- if act
133
- else intro or Conversation.intro
134
- )
135
- self.conversation = Conversation(
136
- is_conversation, self.max_tokens_to_sample, filepath, update_file
141
+ AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
142
+ if act else intro or Conversation.intro
137
143
  )
144
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
138
145
  self.conversation.history_offset = history_offset
139
- self.session.proxies = proxies
146
+
147
+ # Initialize logger if logging is enabled
148
+ self.logger = LitLogger(
149
+ name="Cloudflare",
150
+ format=LogFormat.MODERN_EMOJI,
151
+ color_scheme=ColorScheme.CYBERPUNK
152
+ ) if logging else None
153
+
154
+ if self.logger:
155
+ self.logger.info("Cloudflare initialized successfully")
140
156
 
141
157
  def ask(
142
158
  self,
@@ -149,14 +165,13 @@ class Cloudflare(Provider):
149
165
  """Chat with AI
150
166
 
151
167
  Args:
152
- prompt (str): Prompt to be send.
168
+ prompt (str): Prompt to be sent.
153
169
  stream (bool, optional): Whether to stream the response. Defaults to False.
154
- raw (bool, optional): Whether to return the raw response. Defaults to False.
155
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
156
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
157
-
170
+ raw (bool, optional): Return raw response. Defaults to False.
171
+ optimizer (str, optional): Optimizer to use. Defaults to None.
172
+ conversationally (bool, optional): Conversational mode flag. Defaults to False.
158
173
  Returns:
159
- The response from the API.
174
+ dict: Response from the API.
160
175
  """
161
176
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
162
177
  if optimizer:
@@ -164,10 +179,12 @@ class Cloudflare(Provider):
164
179
  conversation_prompt = getattr(Optimizers, optimizer)(
165
180
  conversation_prompt if conversationally else prompt
166
181
  )
182
+ if self.logger:
183
+ self.logger.debug(f"Applied optimizer: {optimizer}")
167
184
  else:
168
- raise Exception(
169
- f"Optimizer is not one of {self.__available_optimizers}"
170
- )
185
+ if self.logger:
186
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
187
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
171
188
 
172
189
  payload = {
173
190
  "messages": [
@@ -181,11 +198,19 @@ class Cloudflare(Provider):
181
198
  }
182
199
 
183
200
  def for_stream():
201
+ if self.logger:
202
+ self.logger.debug("Sending streaming request to Cloudflare API...")
184
203
  response = self.scraper.post(
185
- self.chat_endpoint, headers=self.headers, cookies=self.cookies, data=json.dumps(payload), stream=True, timeout=self.timeout
204
+ self.chat_endpoint,
205
+ headers=self.headers,
206
+ cookies=self.cookies,
207
+ data=json.dumps(payload),
208
+ stream=True,
209
+ timeout=self.timeout
186
210
  )
187
-
188
211
  if not response.ok:
212
+ if self.logger:
213
+ self.logger.error(f"Request failed: ({response.status_code}, {response.reason})")
189
214
  raise exceptions.FailedToGenerateResponseError(
190
215
  f"Failed to generate response - ({response.status_code}, {response.reason})"
191
216
  )
@@ -197,9 +222,9 @@ class Cloudflare(Provider):
197
222
  streaming_response += content
198
223
  yield content if raw else dict(text=content)
199
224
  self.last_response.update(dict(text=streaming_response))
200
- self.conversation.update_chat_history(
201
- prompt, self.get_message(self.last_response)
202
- )
225
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
226
+ if self.logger:
227
+ self.logger.info("Streaming response completed successfully")
203
228
 
204
229
  def for_non_stream():
205
230
  for _ in for_stream():
@@ -215,48 +240,37 @@ class Cloudflare(Provider):
215
240
  optimizer: str = None,
216
241
  conversationally: bool = False,
217
242
  ) -> str:
218
- """Generate response `str`
243
+ """Generate response string from chat
244
+
219
245
  Args:
220
- prompt (str): Prompt to be send.
221
- stream (bool, optional): Flag for streaming response. Defaults to False.
222
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
223
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
246
+ prompt (str): Prompt to be sent.
247
+ stream (bool, optional): Stream response flag. Defaults to False.
248
+ optimizer (str, optional): Optimizer name. Defaults to None.
249
+ conversationally (bool, optional): Conversational mode flag. Defaults to False.
224
250
  Returns:
225
- str: Response generated
251
+ str: Generated response.
226
252
  """
227
-
228
253
  def for_stream():
229
- for response in self.ask(
230
- prompt, True, optimizer=optimizer, conversationally=conversationally
231
- ):
254
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
232
255
  yield self.get_message(response)
233
-
234
256
  def for_non_stream():
235
- return self.get_message(
236
- self.ask(
237
- prompt,
238
- False,
239
- optimizer=optimizer,
240
- conversationally=conversationally,
241
- )
242
- )
243
-
257
+ return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
244
258
  return for_stream() if stream else for_non_stream()
245
259
 
246
260
  def get_message(self, response: dict) -> str:
247
- """Retrieves message only from response
261
+ """Extracts the message text from the response
248
262
 
249
263
  Args:
250
- response (dict): Response generated by `self.ask`
251
-
264
+ response (dict): API response.
252
265
  Returns:
253
- str: Message extracted
266
+ str: Extracted text.
254
267
  """
255
268
  assert isinstance(response, dict), "Response should be of dict data-type only"
256
269
  return response["text"]
270
+
257
271
  if __name__ == '__main__':
258
272
  from rich import print
259
- ai = Cloudflare(timeout=5000)
273
+ ai = Cloudflare(timeout=5000, logging=True)
260
274
  response = ai.chat("write a poem about AI", stream=True)
261
275
  for chunk in response:
262
- print(chunk, end="", flush=True)
276
+ print(chunk, end="", flush=True)