webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,3 @@
1
-
2
1
  import requests
3
2
  import json
4
3
  import re
@@ -8,15 +7,13 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
8
7
  from webscout.AIbase import Provider
9
8
  from webscout import exceptions
10
9
  from webscout.litagent import LitAgent
11
- from webscout.Litlogger import Logger, LogFormat
12
10
 
13
11
  class JadveOpenAI(Provider):
14
12
  """
15
13
  A class to interact with the OpenAI API through jadve.com using the streaming endpoint.
16
- Includes optional logging capabilities.
17
14
  """
18
15
 
19
- AVAILABLE_MODELS = ["gpt-4o", "gpt-4o-mini"]
16
+ AVAILABLE_MODELS = ["gpt-4o", "gpt-4o-mini", "claude-3-7-sonnet-20250219", "claude-3-5-sonnet-20240620", "o1-mini", "deepseek-chat", "o1-mini", "claude-3-5-haiku-20241022"]
20
17
 
21
18
  def __init__(
22
19
  self,
@@ -29,12 +26,11 @@ class JadveOpenAI(Provider):
29
26
  proxies: dict = {},
30
27
  history_offset: int = 10250,
31
28
  act: str = None,
32
- model: str = "gpt-4o-mini",
33
- system_prompt: str = "You are a helpful AI assistant.",
34
- logging: bool = False
29
+ model: str = "claude-3-7-sonnet-20250219",
30
+ system_prompt: str = "You are a helpful AI assistant."
35
31
  ):
36
32
  """
37
- Initializes the JadveOpenAI client with optional logging support.
33
+ Initializes the JadveOpenAI client.
38
34
 
39
35
  Args:
40
36
  is_conversation (bool, optional): Enable conversational mode. Defaults to True.
@@ -48,24 +44,13 @@ class JadveOpenAI(Provider):
48
44
  act (str|int, optional): Act key for AwesomePrompts. Defaults to None.
49
45
  model (str, optional): AI model to be used. Defaults to "gpt-4o-mini".
50
46
  system_prompt (str, optional): System prompt text. Defaults to "You are a helpful AI assistant."
51
- logging (bool, optional): Enable logging functionality. Defaults to False.
52
47
  """
53
48
  if model not in self.AVAILABLE_MODELS:
54
49
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
50
 
56
- self.logger = Logger(
57
- name="JadveOpenAI",
58
- format=LogFormat.MODERN_EMOJI,
59
-
60
- ) if logging else None
61
-
62
- if self.logger:
63
- self.logger.info(f"Initializing JadveOpenAI with model: {model}")
64
-
65
51
  self.session = requests.Session()
66
52
  self.is_conversation = is_conversation
67
53
  self.max_tokens_to_sample = max_tokens
68
- # Streaming endpoint for jadve.com
69
54
  self.api_endpoint = "https://openai.jadve.com/stream"
70
55
  self.stream_chunk_size = 64
71
56
  self.timeout = timeout
@@ -73,17 +58,17 @@ class JadveOpenAI(Provider):
73
58
  self.model = model
74
59
  self.system_prompt = system_prompt
75
60
 
76
- # Updated headers with required x-authorization header.
61
+ # Headers for API requests
77
62
  self.headers = {
78
63
  "accept": "*/*",
79
64
  "accept-encoding": "gzip, deflate, br, zstd",
80
- "accept-language": "en",
65
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
81
66
  "content-type": "application/json",
82
67
  "dnt": "1",
83
68
  "origin": "https://jadve.com",
84
69
  "priority": "u=1, i",
85
70
  "referer": "https://jadve.com/",
86
- "sec-ch-ua": '"Microsoft Edge";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
71
+ "sec-ch-ua": '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
87
72
  "sec-ch-ua-mobile": "?0",
88
73
  "sec-ch-ua-platform": '"Windows"',
89
74
  "sec-fetch-dest": "empty",
@@ -113,9 +98,6 @@ class JadveOpenAI(Provider):
113
98
  )
114
99
  self.conversation.history_offset = history_offset
115
100
 
116
- if self.logger:
117
- self.logger.info("JadveOpenAI initialized successfully.")
118
-
119
101
  def ask(
120
102
  self,
121
103
  prompt: str,
@@ -136,21 +118,13 @@ class JadveOpenAI(Provider):
136
118
  Returns:
137
119
  dict or generator: A dictionary with the generated text or a generator yielding text chunks.
138
120
  """
139
- if self.logger:
140
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
141
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
142
-
143
121
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
122
  if optimizer:
145
123
  if optimizer in self.__available_optimizers:
146
124
  conversation_prompt = getattr(Optimizers, optimizer)(
147
125
  conversation_prompt if conversationally else prompt
148
126
  )
149
- if self.logger:
150
- self.logger.debug(f"Applied optimizer: {optimizer}")
151
127
  else:
152
- if self.logger:
153
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
154
128
  raise Exception(
155
129
  f"Optimizer is not one of {list(self.__available_optimizers)}"
156
130
  )
@@ -169,43 +143,59 @@ class JadveOpenAI(Provider):
169
143
  }
170
144
 
171
145
  def for_stream():
172
- if self.logger:
173
- self.logger.debug("Initiating streaming request to API")
174
146
  response = self.session.post(
175
147
  self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
176
148
  )
177
149
 
178
150
  if not response.ok:
179
- if self.logger:
180
- self.logger.error(f"API request failed. Status: {response.status_code}, Reason: {response.reason}")
181
151
  raise exceptions.FailedToGenerateResponseError(
182
152
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
183
153
  )
184
154
 
185
- if self.logger:
186
- self.logger.info(f"API connection established successfully. Status: {response.status_code}")
187
-
188
- # Read the entire response text.
189
- response_text = response.text
155
+ # Pattern to match the streaming chunks format: 0:"text"
190
156
  pattern = r'0:"(.*?)"'
191
- chunks = re.findall(pattern, response_text)
192
- streaming_text = ""
193
- for content in chunks:
194
- streaming_text += content
195
-
196
- yield content if raw else dict(text=content)
197
-
198
- self.last_response.update(dict(text=streaming_text))
157
+ full_response_text = ""
158
+
159
+ # Process the response as it comes in
160
+ buffer = ""
161
+
162
+ for line in response.iter_lines(decode_unicode=True):
163
+ if not line:
164
+ continue
165
+
166
+ buffer += line
167
+
168
+ # Try to match chunks in the current buffer
169
+ matches = re.findall(pattern, buffer)
170
+ if matches:
171
+ for chunk in matches:
172
+ full_response_text += chunk
173
+ # Return the current chunk
174
+ yield chunk if raw else dict(text=chunk)
175
+
176
+ # Remove matched parts from the buffer
177
+ matched_parts = [f'0:"{match}"' for match in matches]
178
+ for part in matched_parts:
179
+ buffer = buffer.replace(part, '', 1)
180
+
181
+ # Check if we've reached the end of the response
182
+ if 'e:' in line or 'd:' in line:
183
+ # No need to process usage data without logging
184
+ break
185
+
186
+ self.last_response.update(dict(text=full_response_text))
199
187
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
200
188
 
201
- if self.logger:
202
- self.logger.debug("Response processing completed.")
203
-
204
189
  def for_non_stream():
205
- if self.logger:
206
- self.logger.debug("Processing non-streaming request")
207
- for _ in for_stream():
208
- pass
190
+ # For non-streaming requests, we collect all chunks and return the complete response
191
+ collected_text = ""
192
+ for chunk in for_stream():
193
+ if raw:
194
+ collected_text += chunk
195
+ else:
196
+ collected_text += chunk.get("text", "")
197
+
198
+ self.last_response = {"text": collected_text}
209
199
  return self.last_response
210
200
 
211
201
  return for_stream() if stream else for_non_stream()
@@ -228,9 +218,6 @@ class JadveOpenAI(Provider):
228
218
  Returns:
229
219
  str or generator: Generated response string or generator yielding response chunks.
230
220
  """
231
- if self.logger:
232
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
233
-
234
221
  def for_stream():
235
222
  for response in self.ask(
236
223
  prompt, stream=True, optimizer=optimizer, conversationally=conversationally
@@ -258,8 +245,7 @@ class JadveOpenAI(Provider):
258
245
 
259
246
  if __name__ == "__main__":
260
247
  from rich import print
261
- ai = JadveOpenAI(timeout=5000, logging=False)
262
- # For streaming response demonstration.
263
- response = ai.chat("yo what's up", stream=True)
248
+ ai = JadveOpenAI(timeout=5000)
249
+ response = ai.chat("Who made u?", stream=True)
264
250
  for chunk in response:
265
251
  print(chunk, end="", flush=True)
@@ -7,13 +7,11 @@ from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
- from webscout.Litlogger import Logger, LogFormat
11
- from webscout import LitAgent as Lit
12
10
 
13
11
  class Marcus(Provider):
14
12
  """
15
13
  This class provides methods for interacting with the AskMarcus API.
16
- Improved to match webscout provider standards with comprehensive logging.
14
+ Improved to match webscout provider standards.
17
15
  """
18
16
 
19
17
  def __init__(
@@ -26,18 +24,9 @@ class Marcus(Provider):
26
24
  update_file: bool = True,
27
25
  proxies: dict = {},
28
26
  history_offset: int = 10250,
29
- act: str = None,
30
- logging: bool = False
27
+ act: str = None
31
28
  ):
32
- """Initializes the Marcus API with logging capabilities."""
33
- self.logger = Logger(
34
- name="Marcus",
35
- format=LogFormat.MODERN_EMOJI,
36
- ) if logging else None
37
-
38
- if self.logger:
39
- self.logger.info("Initializing Marcus API")
40
-
29
+ """Initializes the Marcus API."""
41
30
  self.session = requests.Session()
42
31
  self.is_conversation = is_conversation
43
32
  self.max_tokens_to_sample = max_tokens
@@ -50,7 +39,7 @@ class Marcus(Provider):
50
39
  'accept': '*/*',
51
40
  'origin': 'https://www.askmarcus.app',
52
41
  'referer': 'https://www.askmarcus.app/chat',
53
- 'user-agent': Lit().random(),
42
+ 'user-agent': 'Mozilla/5.0',
54
43
  }
55
44
 
56
45
  self.__available_optimizers = (
@@ -73,9 +62,6 @@ class Marcus(Provider):
73
62
  self.conversation.history_offset = history_offset
74
63
  self.session.proxies = proxies
75
64
 
76
- if self.logger:
77
- self.logger.info("Marcus API initialized successfully")
78
-
79
65
  def ask(
80
66
  self,
81
67
  prompt: str,
@@ -84,22 +70,14 @@ class Marcus(Provider):
84
70
  optimizer: str = None,
85
71
  conversationally: bool = False,
86
72
  ) -> Dict[str, Any] | Generator[str, None, None]:
87
- """Sends a prompt to the AskMarcus API and returns the response with logging."""
88
- if self.logger:
89
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
90
- self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
91
-
73
+ """Sends a prompt to the AskMarcus API and returns the response."""
92
74
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
93
75
  if optimizer:
94
76
  if optimizer in self.__available_optimizers:
95
77
  conversation_prompt = getattr(Optimizers, optimizer)(
96
78
  conversation_prompt if conversationally else prompt
97
79
  )
98
- if self.logger:
99
- self.logger.debug(f"Applied optimizer: {optimizer}")
100
80
  else:
101
- if self.logger:
102
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
103
81
  raise exceptions.FailedToGenerateResponseError(
104
82
  f"Optimizer is not one of {self.__available_optimizers}"
105
83
  )
@@ -108,9 +86,6 @@ class Marcus(Provider):
108
86
 
109
87
  def for_stream():
110
88
  try:
111
- if self.logger:
112
- self.logger.debug("Initiating streaming request to API")
113
-
114
89
  with requests.post(
115
90
  self.api_endpoint,
116
91
  headers=self.headers,
@@ -119,35 +94,21 @@ class Marcus(Provider):
119
94
  timeout=self.timeout
120
95
  ) as response:
121
96
  response.raise_for_status()
122
-
123
- if self.logger:
124
- self.logger.info(f"API connection established successfully. Status: {response.status_code}")
125
-
126
97
  for line in response.iter_lines():
127
98
  if line:
128
99
  yield line.decode('utf-8')
129
-
130
100
  self.conversation.update_chat_history(
131
101
  prompt, self.get_message(self.last_response)
132
102
  )
133
103
 
134
104
  except requests.exceptions.RequestException as e:
135
- if self.logger:
136
- self.logger.error(f"API request failed: {str(e)}")
137
105
  raise exceptions.ProviderConnectionError(f"Error connecting to Marcus: {str(e)}")
138
106
 
139
107
  def for_non_stream():
140
- if self.logger:
141
- self.logger.debug("Processing non-streaming request")
142
-
143
108
  full_response = ""
144
109
  for line in for_stream():
145
110
  full_response += line
146
111
  self.last_response = {"text": full_response}
147
-
148
- if self.logger:
149
- self.logger.debug("Response processing completed")
150
-
151
112
  return self.last_response
152
113
 
153
114
  return for_stream() if stream else for_non_stream()
@@ -159,10 +120,7 @@ class Marcus(Provider):
159
120
  optimizer: str = None,
160
121
  conversationally: bool = False,
161
122
  ) -> str | Generator[str, None, None]:
162
- """Generates a response from the AskMarcus API with logging."""
163
- if self.logger:
164
- self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
165
-
123
+ """Generates a response from the AskMarcus API."""
166
124
  def for_stream():
167
125
  for response_chunk in self.ask(
168
126
  prompt, stream=True, optimizer=optimizer, conversationally=conversationally
@@ -184,8 +142,7 @@ class Marcus(Provider):
184
142
 
185
143
  if __name__ == "__main__":
186
144
  from rich import print
187
- # Enable logging for testing
188
- ai = Marcus(logging=True)
145
+ ai = Marcus()
189
146
  response = ai.chat(input(">>> "), stream=True)
190
147
  for chunk in response:
191
148
  print(chunk, end="", flush=True)
@@ -8,7 +8,6 @@ from datetime import date
8
8
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
- from webscout.Litlogger import Logger, LogFormat
12
11
  from webscout.litagent import LitAgent
13
12
 
14
13
  class Netwrck(Provider):
@@ -20,7 +19,7 @@ class Netwrck(Provider):
20
19
  AVAILABLE_MODELS = {
21
20
  "lumimaid": "neversleep/llama-3-lumimaid-8b:extended",
22
21
  "grok": "x-ai/grok-2",
23
- "claude": "anthropic/claude-3.5-sonnet:beta",
22
+ "claude": "anthropic/claude-3-7-sonnet-20250219",
24
23
  "euryale": "sao10k/l3-euryale-70b",
25
24
  "gpt4mini": "openai/gpt-4o-mini",
26
25
  "mythomax": "gryphe/mythomax-l2-13b",
@@ -44,22 +43,11 @@ class Netwrck(Provider):
44
43
  act: Optional[str] = None,
45
44
  system_prompt: str = "You are a helpful assistant.",
46
45
  temperature: float = 0.7,
47
- top_p: float = 0.8,
48
- logging: bool = False
46
+ top_p: float = 0.8
49
47
  ):
50
48
  """Initializes the Netwrck API client."""
51
- # Initialize logger first for initialization logging
52
- self.logger = Logger(
53
- name="Netwrck",
54
- format=LogFormat.MODERN_EMOJI,
55
-
56
- ) if logging else None
57
-
58
49
  if model not in self.AVAILABLE_MODELS:
59
- error_msg = f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}"
60
- if self.logger:
61
- self.logger.error(error_msg)
62
- raise ValueError(error_msg)
50
+ raise ValueError(f"Invalid model: {model}. Choose from: {list(self.AVAILABLE_MODELS.keys())}")
63
51
 
64
52
  self.model = model
65
53
  self.model_name = self.AVAILABLE_MODELS[model]
@@ -99,9 +87,6 @@ class Netwrck(Provider):
99
87
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
100
88
  )
101
89
 
102
- if self.logger:
103
- self.logger.info(f"Initialized Netwrck with model: {self.model_name}")
104
-
105
90
  def ask(
106
91
  self,
107
92
  prompt: str,
@@ -112,18 +97,13 @@ class Netwrck(Provider):
112
97
  ) -> Union[Dict[str, Any], Generator]:
113
98
  """Sends a prompt to the Netwrck API and returns the response."""
114
99
  if optimizer and optimizer not in self.__available_optimizers:
115
- error_msg = f"Optimizer is not one of {self.__available_optimizers}"
116
- if self.logger:
117
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
118
- raise exceptions.FailedToGenerateResponseError(error_msg)
100
+ raise exceptions.FailedToGenerateResponseError(f"Optimizer is not one of {self.__available_optimizers}")
119
101
 
120
102
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
103
  if optimizer:
122
104
  conversation_prompt = getattr(Optimizers, optimizer)(
123
105
  conversation_prompt if conversationally else prompt
124
106
  )
125
- if self.logger:
126
- self.logger.debug(f"Applied optimizer: {optimizer}")
127
107
 
128
108
  payload = {
129
109
  "query": prompt,
@@ -133,9 +113,6 @@ class Netwrck(Provider):
133
113
  "greeting": self.greeting
134
114
  }
135
115
 
136
- if self.logger:
137
- self.logger.debug(f"Sending request to Netwrck API [stream={stream}]")
138
-
139
116
  def for_stream():
140
117
  try:
141
118
  response = self.session.post(
@@ -158,12 +135,8 @@ class Netwrck(Provider):
158
135
  self.conversation.update_chat_history(payload["query"], streaming_text)
159
136
 
160
137
  except requests.exceptions.RequestException as e:
161
- if self.logger:
162
- self.logger.error(f"Network error: {str(e)}")
163
138
  raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
164
139
  except Exception as e:
165
- if self.logger:
166
- self.logger.error(f"Unexpected error: {str(e)}")
167
140
  raise exceptions.ProviderConnectionError(f"Unexpected error: {str(e)}") from e
168
141
 
169
142
  def for_non_stream():
@@ -177,9 +150,6 @@ class Netwrck(Provider):
177
150
  )
178
151
  response.raise_for_status()
179
152
 
180
- if self.logger:
181
- self.logger.debug(f"Response status: {response.status_code}")
182
-
183
153
  text = response.text.strip('"')
184
154
  self.last_response = {"text": text}
185
155
  self.conversation.update_chat_history(prompt, text)
@@ -187,12 +157,8 @@ class Netwrck(Provider):
187
157
  return self.last_response
188
158
 
189
159
  except requests.exceptions.RequestException as e:
190
- if self.logger:
191
- self.logger.error(f"Network error: {str(e)}")
192
160
  raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
193
161
  except Exception as e:
194
- if self.logger:
195
- self.logger.error(f"Unexpected error: {str(e)}")
196
162
  raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}") from e
197
163
 
198
164
  return for_stream() if stream else for_non_stream()
@@ -205,9 +171,6 @@ class Netwrck(Provider):
205
171
  conversationally: bool = False,
206
172
  ) -> str:
207
173
  """Generates a response from the Netwrck API."""
208
- if self.logger:
209
- self.logger.debug(f"Processing chat request [stream={stream}]")
210
-
211
174
  def for_stream():
212
175
  for response in self.ask(
213
176
  prompt,
@@ -237,15 +200,5 @@ class Netwrck(Provider):
237
200
  if __name__ == "__main__":
238
201
  from rich import print
239
202
 
240
- # Example with logging enabled
241
- netwrck = Netwrck(model="claude", logging=False)
242
-
243
- print("Non-Streaming Response:")
244
- response = netwrck.chat("Tell me about Russia")
245
- print(response)
246
-
247
- print("\nStreaming Response:")
248
- response = netwrck.chat("Tell me about India", stream=True)
249
- for chunk in response:
250
- print(chunk, end="", flush=True)
251
- print()
203
+ netwrck = Netwrck(model="claude")
204
+ print(netwrck.chat("Hello! How are you?"))