webscout 7.3__py3-none-any.whl → 7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -1,3 +1,4 @@
1
- from .felo_search import Felo
2
- from .DeepFind import DeepFind
3
- from .ISou import Isou
1
+ from .felo_search import *
2
+ from .DeepFind import *
3
+ from .ISou import *
4
+ from .genspark_search import *
@@ -0,0 +1,208 @@
1
+ import cloudscraper
2
+ from uuid import uuid4
3
+ import json
4
+ import re
5
+ from typing import Dict, Optional, Generator, Union, Any
6
+
7
+ from webscout.AIbase import AISearch
8
+ from webscout import exceptions
9
+ from webscout import LitAgent
10
+
11
+
12
+ class Response:
13
+ """A wrapper class for Genspark API responses.
14
+
15
+ This class automatically converts response objects to their text representation
16
+ when printed or converted to string.
17
+
18
+ Attributes:
19
+ text (str): The text content of the response
20
+
21
+ Example:
22
+ >>> response = Response("Hello, world!")
23
+ >>> print(response)
24
+ Hello, world!
25
+ >>> str(response)
26
+ 'Hello, world!'
27
+ """
28
+ def __init__(self, text: str):
29
+ self.text = text
30
+
31
+ def __str__(self):
32
+ return self.text
33
+
34
+ def __repr__(self):
35
+ return self.text
36
+
37
+
38
+ class Genspark(AISearch):
39
+ """A class to interact with the Genspark AI search API.
40
+
41
+ Genspark provides a powerful search interface that returns AI-generated responses
42
+ based on web content. It supports both streaming and non-streaming responses.
43
+
44
+ Basic Usage:
45
+ >>> from webscout import Genspark
46
+ >>> ai = Genspark()
47
+ >>> # Non-streaming example
48
+ >>> response = ai.search("What is Python?")
49
+ >>> print(response)
50
+ Python is a high-level programming language...
51
+
52
+ >>> # Streaming example
53
+ >>> for chunk in ai.search("Tell me about AI", stream=True):
54
+ ... print(chunk, end="", flush=True)
55
+ Artificial Intelligence is...
56
+
57
+ >>> # Raw response format
58
+ >>> for chunk in ai.search("Hello", stream=True, raw=True):
59
+ ... print(chunk)
60
+ {'text': 'Hello'}
61
+ {'text': ' there!'}
62
+
63
+ Args:
64
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
65
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
66
+ max_tokens (int, optional): Maximum tokens to generate. Defaults to 600.
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ timeout: int = 30,
72
+ proxies: Optional[dict] = None,
73
+ max_tokens: int = 600,
74
+ ):
75
+ """Initialize the Genspark API client.
76
+
77
+ Args:
78
+ timeout (int, optional): Request timeout in seconds. Defaults to 30.
79
+ proxies (dict, optional): Proxy configuration for requests. Defaults to None.
80
+ max_tokens (int, optional): Maximum tokens to generate. Defaults to 600.
81
+ """
82
+ self.session = cloudscraper.create_scraper()
83
+ self.max_tokens = max_tokens
84
+ self.chat_endpoint = "https://www.genspark.ai/api/search/stream"
85
+ self.stream_chunk_size = 64
86
+ self.timeout = timeout
87
+ self.last_response = {}
88
+
89
+ self.headers = {
90
+ "Accept": "*/*",
91
+ "Accept-Encoding": "gzip, deflate, br, zstd",
92
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
93
+ "Content-Type": "application/json",
94
+ "DNT": "1",
95
+ "Origin": "https://www.genspark.ai",
96
+ "Priority": "u=1, i",
97
+ "Sec-CH-UA": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
98
+ "Sec-CH-UA-Mobile": "?0",
99
+ "Sec-CH-UA-Platform": '"Windows"',
100
+ "Sec-Fetch-Dest": "empty",
101
+ "Sec-Fetch-Mode": "cors",
102
+ "Sec-Fetch-Site": "same-origin",
103
+ "User-Agent": LitAgent().random(),
104
+ }
105
+
106
+ self.cookies = {
107
+ "i18n_redirected": "en-US",
108
+ "agree_terms": "0",
109
+ "session_id": uuid4().hex,
110
+ }
111
+
112
+ self.session.headers.update(self.headers)
113
+ self.session.proxies = proxies or {}
114
+
115
+ def search(
116
+ self,
117
+ prompt: str,
118
+ stream: bool = False,
119
+ raw: bool = False,
120
+ ) -> Union[Dict[str, Any], Generator[Union[Dict[str, Any], str], None, None]]:
121
+ """Search using the Genspark API and get AI-generated responses.
122
+
123
+ Args:
124
+ prompt (str): The search query or prompt to send to the API.
125
+ stream (bool, optional): If True, yields response chunks as they arrive.
126
+ If False, returns complete response. Defaults to False.
127
+ raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
128
+ If False, returns Response objects that convert to text automatically.
129
+ Defaults to False.
130
+
131
+ Returns:
132
+ Union[Dict[str, Any], Generator[Union[Dict[str, Any], str], None, None]]:
133
+ - If stream=False: Returns complete response
134
+ - If stream=True: Yields response chunks as they arrive
135
+
136
+ Raises:
137
+ APIConnectionError: If the API request fails
138
+ """
139
+ url = f"https://www.genspark.ai/api/search/stream?query={prompt.replace(' ', '%20')}"
140
+
141
+ def for_stream():
142
+ try:
143
+ with self.session.post(
144
+ url,
145
+ headers=self.headers,
146
+ cookies=self.cookies,
147
+ json={}, # Empty payload as query is in URL
148
+ stream=True,
149
+ timeout=self.timeout,
150
+ ) as response:
151
+ if not response.ok:
152
+ raise exceptions.APIConnectionError(
153
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
154
+ )
155
+
156
+ for line in response.iter_lines(decode_unicode=True):
157
+ if line and line.startswith("data: "):
158
+ try:
159
+ data = json.loads(line[6:])
160
+ if "field_name" in data and "delta" in data:
161
+ if data["field_name"].startswith("streaming_detail_answer"):
162
+ delta_text = data.get("delta", "")
163
+
164
+ # Clean up markdown links in text
165
+ delta_text = re.sub(r"\[.*?\]\(.*?\)", "", delta_text)
166
+
167
+ if raw:
168
+ yield {"text": delta_text}
169
+ else:
170
+ yield Response(delta_text)
171
+ except json.JSONDecodeError:
172
+ continue
173
+
174
+ except cloudscraper.exceptions as e:
175
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
176
+
177
+ def for_non_stream():
178
+ full_response = ""
179
+ for chunk in for_stream():
180
+ if raw:
181
+ yield chunk
182
+ else:
183
+ full_response += str(chunk)
184
+
185
+ if not raw:
186
+ # Process the full response to clean up any JSON structures
187
+ try:
188
+ text_json = json.loads(full_response)
189
+ if isinstance(text_json, dict) and "detailAnswer" in text_json:
190
+ full_response = text_json.get("detailAnswer", full_response)
191
+ except (json.JSONDecodeError, TypeError):
192
+ # Not valid JSON or not a dictionary, keep as is
193
+ pass
194
+
195
+ self.last_response = Response(full_response)
196
+ return self.last_response
197
+
198
+ return for_stream() if stream else for_non_stream()
199
+
200
+
201
+ if __name__ == "__main__":
202
+
203
+ from rich import print
204
+
205
+ ai = Genspark()
206
+ response = ai.search(input(">>> "), stream=True, raw=False)
207
+ for chunk in response:
208
+ print(chunk, end="", flush=True)
@@ -0,0 +1,282 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from uuid import uuid4
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class AllenAI(Provider):
15
+ """
16
+ A class to interact with the AllenAI (Ai2 Playground) API.
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ 'tulu3-405b',
21
+ 'OLMo-2-1124-13B-Instruct',
22
+ 'tulu-3-1-8b',
23
+ 'Llama-3-1-Tulu-3-70B',
24
+ 'olmoe-0125'
25
+ ]
26
+
27
+
28
+ def __init__(
29
+ self,
30
+ is_conversation: bool = True,
31
+ max_tokens: int = 2048,
32
+ timeout: int = 30,
33
+ intro: str = None,
34
+ filepath: str = None,
35
+ update_file: bool = True,
36
+ proxies: dict = {},
37
+ history_offset: int = 10250,
38
+ act: str = None,
39
+ model: str = "tulu3-405b",
40
+ system_prompt: str = "You are a helpful AI assistant.",
41
+ ):
42
+ """Initializes the AllenAI API client."""
43
+ if model not in self.AVAILABLE_MODELS:
44
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
45
+
46
+ self.url = "https://playground.allenai.org"
47
+ self.api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
48
+
49
+ # Use LitAgent for user-agent
50
+ self.headers = {
51
+ 'User-Agent': LitAgent().random(),
52
+ 'Accept': '*/*',
53
+ 'Accept-Language': 'en-US,en;q=0.9',
54
+ 'Origin': self.url,
55
+ 'Referer': f"{self.url}/",
56
+ 'Connection': 'keep-alive',
57
+ 'Cache-Control': 'no-cache',
58
+ 'Pragma': 'no-cache',
59
+ 'Sec-Fetch-Dest': 'empty',
60
+ 'Sec-Fetch-Mode': 'cors',
61
+ 'Sec-Fetch-Site': 'same-site',
62
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
63
+ 'sec-ch-ua-mobile': '?0',
64
+ 'sec-ch-ua-platform': '"Windows"'
65
+ }
66
+
67
+ self.session = requests.Session()
68
+ self.session.headers.update(self.headers)
69
+ self.session.proxies.update(proxies)
70
+ self.model = model
71
+ self.system_prompt = system_prompt
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.timeout = timeout
75
+ self.last_response = {}
76
+ # Generate user ID if needed
77
+ self.x_anonymous_user_id = str(uuid4())
78
+ self.parent = None
79
+
80
+ self.__available_optimizers = (
81
+ method
82
+ for method in dir(Optimizers)
83
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
84
+ )
85
+ Conversation.intro = (
86
+ AwesomePrompts().get_act(
87
+ act, raise_not_found=True, default=None, case_insensitive=True
88
+ )
89
+ if act
90
+ else intro or Conversation.intro
91
+ )
92
+
93
+ self.conversation = Conversation(
94
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
95
+ )
96
+ self.conversation.history_offset = history_offset
97
+
98
+ def format_prompt(self, messages):
99
+ """Format messages into a prompt string"""
100
+ formatted = []
101
+ for msg in messages:
102
+ role = msg.get("role", "")
103
+ content = msg.get("content", "")
104
+ if role == "system":
105
+ formatted.append(f"System: {content}")
106
+ elif role == "user":
107
+ formatted.append(f"User: {content}")
108
+ elif role == "assistant":
109
+ formatted.append(f"Assistant: {content}")
110
+ return "\n".join(formatted)
111
+
112
+ def ask(
113
+ self,
114
+ prompt: str,
115
+ stream: bool = False,
116
+ raw: bool = False,
117
+ optimizer: str = None,
118
+ conversationally: bool = False,
119
+ host: str = "inferd",
120
+ private: bool = True,
121
+ top_p: float = None,
122
+ temperature: float = None,
123
+ ) -> Union[Dict[str, Any], Generator]:
124
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
125
+ if optimizer:
126
+ if optimizer in self.__available_optimizers:
127
+ conversation_prompt = getattr(Optimizers, optimizer)(
128
+ conversation_prompt if conversationally else prompt
129
+ )
130
+ else:
131
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
132
+
133
+ # Generate boundary for multipart form
134
+ boundary = f"----WebKitFormBoundary{uuid4().hex}"
135
+
136
+ # Set content-type header for this specific request
137
+ self.session.headers.update({
138
+ "content-type": f"multipart/form-data; boundary={boundary}",
139
+ "x-anonymous-user-id": self.x_anonymous_user_id
140
+ })
141
+
142
+ # Format messages for AllenAI
143
+ messages = [
144
+ {"role": "system", "content": self.system_prompt},
145
+ {"role": "user", "content": conversation_prompt}
146
+ ]
147
+
148
+ # Build multipart form data
149
+ form_data = [
150
+ f'--{boundary}\r\n'
151
+ f'Content-Disposition: form-data; name="model"\r\n\r\n{self.model}\r\n',
152
+
153
+ f'--{boundary}\r\n'
154
+ f'Content-Disposition: form-data; name="host"\r\n\r\n{host}\r\n',
155
+
156
+ f'--{boundary}\r\n'
157
+ f'Content-Disposition: form-data; name="content"\r\n\r\n{self.format_prompt(messages)}\r\n',
158
+
159
+ f'--{boundary}\r\n'
160
+ f'Content-Disposition: form-data; name="private"\r\n\r\n{str(private).lower()}\r\n'
161
+ ]
162
+
163
+ # Add parent if exists
164
+ if self.parent:
165
+ form_data.append(
166
+ f'--{boundary}\r\n'
167
+ f'Content-Disposition: form-data; name="parent"\r\n\r\n{self.parent}\r\n'
168
+ )
169
+
170
+ # Add optional parameters
171
+ if temperature is not None:
172
+ form_data.append(
173
+ f'--{boundary}\r\n'
174
+ f'Content-Disposition: form-data; name="temperature"\r\n\r\n{temperature}\r\n'
175
+ )
176
+
177
+ if top_p is not None:
178
+ form_data.append(
179
+ f'--{boundary}\r\n'
180
+ f'Content-Disposition: form-data; name="top_p"\r\n\r\n{top_p}\r\n'
181
+ )
182
+
183
+ form_data.append(f'--{boundary}--\r\n')
184
+ data = "".join(form_data).encode()
185
+
186
+ def for_stream():
187
+ nonlocal data # Explicitly capture the data variable from outer scope
188
+ try:
189
+ response = self.session.post(
190
+ self.api_endpoint,
191
+ data=data,
192
+ stream=True,
193
+ timeout=self.timeout
194
+ )
195
+
196
+ if response.status_code != 200:
197
+ raise exceptions.FailedToGenerateResponseError(
198
+ f"Request failed with status code {response.status_code}: {response.text}"
199
+ )
200
+
201
+ streaming_text = ""
202
+ current_parent = None
203
+
204
+ for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
205
+ if not chunk:
206
+ continue
207
+
208
+ decoded = chunk.decode(errors="ignore")
209
+ for line in decoded.splitlines():
210
+ line = line.strip()
211
+ if not line:
212
+ continue
213
+
214
+ try:
215
+ data = json.loads(line)
216
+ except json.JSONDecodeError:
217
+ continue
218
+
219
+ if isinstance(data, dict):
220
+ # Update the parent ID
221
+ if data.get("children"):
222
+ for child in data["children"]:
223
+ if child.get("role") == "assistant":
224
+ current_parent = child.get("id")
225
+ break
226
+
227
+ # Process content
228
+ if "message" in data and data.get("content"):
229
+ content = data["content"]
230
+ if content.strip():
231
+ streaming_text += content
232
+ resp = dict(text=content)
233
+ yield resp if raw else resp
234
+
235
+ # Handle completion
236
+ if data.get("final") or data.get("finish_reason") == "stop":
237
+ if current_parent:
238
+ self.parent = current_parent
239
+
240
+ # Update conversation history
241
+ self.conversation.update_chat_history(prompt, streaming_text)
242
+ self.last_response = {"text": streaming_text}
243
+ return
244
+
245
+ except requests.RequestException as e:
246
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
247
+
248
+ def for_non_stream():
249
+ streaming_text = ""
250
+ for resp in for_stream():
251
+ streaming_text += resp["text"]
252
+ self.last_response = {"text": streaming_text}
253
+ return self.last_response
254
+
255
+ return for_stream() if stream else for_non_stream()
256
+
257
+ def chat(
258
+ self,
259
+ prompt: str,
260
+ stream: bool = False,
261
+ optimizer: str = None,
262
+ conversationally: bool = False,
263
+ ) -> str:
264
+ def for_stream():
265
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
266
+ yield self.get_message(response)
267
+ def for_non_stream():
268
+ return self.get_message(
269
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
270
+ )
271
+ return for_stream() if stream else for_non_stream()
272
+
273
+ def get_message(self, response: dict) -> str:
274
+ assert isinstance(response, dict), "Response should be of dict data-type only"
275
+ return response["text"]
276
+
277
+ if __name__ == "__main__":
278
+ from rich import print
279
+ ai = AllenAI(timeout=5000)
280
+ response = ai.chat("write a poem about AI", stream=True)
281
+ for chunk in response:
282
+ print(chunk, end="", flush=True)
@@ -9,14 +9,48 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
11
  from webscout import LitAgent
12
- from webscout.Litlogger import Logger, LogFormat, ConsoleHandler
13
- from webscout.Litlogger.core.level import LogLevel
14
12
 
15
13
  class DeepInfra(Provider):
16
14
  """
17
- A class to interact with the DeepInfra API with logging and LitAgent user-agent.
15
+ A class to interact with the DeepInfra API with LitAgent user-agent.
18
16
  """
19
17
 
18
+ AVAILABLE_MODELS = [
19
+ "deepseek-ai/DeepSeek-R1-Turbo",
20
+ "deepseek-ai/DeepSeek-R1",
21
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
22
+ "deepseek-ai/DeepSeek-V3",
23
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
24
+ "mistralai/Mistral-Small-24B-Instruct-2501",
25
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
26
+ "microsoft/phi-4",
27
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
28
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
29
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
30
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
31
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
32
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
33
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
34
+ "Qwen/Qwen2.5-72B-Instruct",
35
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
36
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
37
+ "Gryphe/MythoMax-L2-13b",
38
+ "NousResearch/Hermes-3-Llama-3.1-405B",
39
+ "NovaSky-AI/Sky-T1-32B-Preview",
40
+ "Qwen/Qwen2.5-7B-Instruct",
41
+ "Sao10K/L3.1-70B-Euryale-v2.2",
42
+ "Sao10K/L3.3-70B-Euryale-v2.3",
43
+ "google/gemma-2-27b-it",
44
+ "google/gemma-2-9b-it",
45
+ "meta-llama/Llama-3.2-1B-Instruct",
46
+ "meta-llama/Llama-3.2-3B-Instruct",
47
+ "meta-llama/Meta-Llama-3-70B-Instruct",
48
+ "meta-llama/Meta-Llama-3-8B-Instruct",
49
+ "mistralai/Mistral-Nemo-Instruct-2407",
50
+ "mistralai/Mistral-7B-Instruct-v0.3",
51
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
52
+ ]
53
+
20
54
  def __init__(
21
55
  self,
22
56
  is_conversation: bool = True,
@@ -28,10 +62,12 @@ class DeepInfra(Provider):
28
62
  proxies: dict = {},
29
63
  history_offset: int = 10250,
30
64
  act: str = None,
31
- model: str = "Qwen/Qwen2.5-72B-Instruct",
32
- logging: bool = False
65
+ model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo" # Updated default model
33
66
  ):
34
- """Initializes the DeepInfra API client with logging support."""
67
+ """Initializes the DeepInfra API client."""
68
+ if model not in self.AVAILABLE_MODELS:
69
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
70
+
35
71
  self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
36
72
  # Use LitAgent for user-agent instead of hardcoded string.
37
73
  self.headers = {
@@ -80,21 +116,6 @@ class DeepInfra(Provider):
80
116
  )
81
117
  self.conversation.history_offset = history_offset
82
118
 
83
- # Initialize logger with proper configuration
84
- if logging:
85
- console_handler = ConsoleHandler(
86
- level=LogLevel.DEBUG,
87
- )
88
-
89
- self.logger = Logger(
90
- name="DeepInfra",
91
- level=LogLevel.DEBUG,
92
- handlers=[console_handler]
93
- )
94
- self.logger.info("DeepInfra initialized successfully ✨")
95
- else:
96
- self.logger = None
97
-
98
119
  def ask(
99
120
  self,
100
121
  prompt: str,
@@ -103,20 +124,13 @@ class DeepInfra(Provider):
103
124
  optimizer: str = None,
104
125
  conversationally: bool = False,
105
126
  ) -> Union[Dict[str, Any], Generator]:
106
- if self.logger:
107
- self.logger.debug(f"Processing request - Stream: {stream}, Optimizer: {optimizer}")
108
-
109
127
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
128
  if optimizer:
111
129
  if optimizer in self.__available_optimizers:
112
130
  conversation_prompt = getattr(Optimizers, optimizer)(
113
131
  conversation_prompt if conversationally else prompt
114
132
  )
115
- if self.logger:
116
- self.logger.info(f"Applied optimizer: {optimizer} 🔧")
117
133
  else:
118
- if self.logger:
119
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
120
134
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
121
135
 
122
136
  # Payload construction
@@ -129,17 +143,10 @@ class DeepInfra(Provider):
129
143
  "stream": stream
130
144
  }
131
145
 
132
- if self.logger:
133
- self.logger.debug(f"Sending request to model: {self.model} 🚀")
134
-
135
146
  def for_stream():
136
- if self.logger:
137
- self.logger.info("Starting stream processing ⚡")
138
147
  try:
139
148
  with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
140
149
  if response.status_code != 200:
141
- if self.logger:
142
- self.logger.error(f"Request failed with status {response.status_code} ❌")
143
150
  raise exceptions.FailedToGenerateResponseError(
144
151
  f"Request failed with status code {response.status_code}"
145
152
  )
@@ -151,8 +158,6 @@ class DeepInfra(Provider):
151
158
  if line.startswith("data: "):
152
159
  json_str = line[6:]
153
160
  if json_str == "[DONE]":
154
- if self.logger:
155
- self.logger.info("Stream completed successfully ✅")
156
161
  break
157
162
  try:
158
163
  json_data = json.loads(json_str)
@@ -164,20 +169,14 @@ class DeepInfra(Provider):
164
169
  resp = dict(text=content)
165
170
  yield resp if raw else resp
166
171
  except json.JSONDecodeError:
167
- if self.logger:
168
- self.logger.error("Failed to decode JSON response 🔥")
169
172
  continue
170
173
 
171
174
  self.conversation.update_chat_history(prompt, streaming_text)
172
175
 
173
176
  except requests.RequestException as e:
174
- if self.logger:
175
- self.logger.error(f"Request failed: {str(e)} 🔥")
176
177
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
177
178
 
178
179
  def for_non_stream():
179
- if self.logger:
180
- self.logger.debug("Processing non-stream request")
181
180
  for _ in for_stream():
182
181
  pass
183
182
  return self.last_response
@@ -206,7 +205,7 @@ class DeepInfra(Provider):
206
205
 
207
206
  if __name__ == "__main__":
208
207
  from rich import print
209
- ai = DeepInfra(timeout=5000, logging=True)
208
+ ai = DeepInfra(timeout=5000)
210
209
  response = ai.chat("write a poem about AI", stream=True)
211
210
  for chunk in response:
212
211
  print(chunk, end="", flush=True)