webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -1,221 +1,251 @@
1
- import requests
2
- import json
3
- import time
4
- from typing import Any, Dict, Optional, Generator, Union
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
- from webscout import LitAgent
12
- from webscout.Litlogger import Logger, LogFormat
13
-
14
- class FreeAIChat(Provider):
15
- """
16
- A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
17
- """
18
-
19
- AVAILABLE_MODELS = [
20
- "mistral-nemo",
21
- "mistral-large",
22
- "llama3.1-70b-fast",
23
- "gemini-2.0-flash",
24
- "gemini-1.5-pro",
25
- "gemini-1.5-flash",
26
- "gemini-2.0-pro-exp-02-05",
27
- "deepseek-r1",
28
- "deepseek-v3",
29
- "Deepseek r1 14B",
30
- "Deepseek r1 32B",
31
- "o3-mini-high",
32
- "o3-mini-medium",
33
- "o3-mini-low",
34
- "o3-mini",
35
- "GPT-4o-mini",
36
- "o1",
37
- "o1-mini",
38
- "GPT-4o"
39
- ]
40
-
41
- def __init__(
42
- self,
43
- is_conversation: bool = True,
44
- max_tokens: int = 2049,
45
- timeout: int = 30,
46
- intro: str = None,
47
- filepath: str = None,
48
- update_file: bool = True,
49
- proxies: dict = {},
50
- history_offset: int = 10250,
51
- act: str = None,
52
- model: str = "GPT-4o",
53
- system_prompt: str = "You are a helpful AI assistant.",
54
- logging: bool = False
55
- ):
56
- """Initializes the FreeAIChat API client with logging support."""
57
- if model not in self.AVAILABLE_MODELS:
58
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
59
-
60
- self.url = "https://freeaichatplayground.com/api/v1/chat/completions"
61
- self.headers = {
62
- 'User-Agent': LitAgent().random(),
63
- 'Accept': '*/*',
64
- 'Content-Type': 'application/json',
65
- 'Origin': 'https://freeaichatplayground.com',
66
- 'Referer': 'https://freeaichatplayground.com/',
67
- 'Sec-Fetch-Mode': 'cors',
68
- 'Sec-Fetch-Site': 'same-origin'
69
- }
70
- self.session = requests.Session()
71
- self.session.headers.update(self.headers)
72
- self.session.proxies.update(proxies)
73
-
74
- self.is_conversation = is_conversation
75
- self.max_tokens_to_sample = max_tokens
76
- self.timeout = timeout
77
- self.last_response = {}
78
- self.model = model
79
- self.system_prompt = system_prompt
80
-
81
- self.__available_optimizers = (
82
- method
83
- for method in dir(Optimizers)
84
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
85
- )
86
- Conversation.intro = (
87
- AwesomePrompts().get_act(
88
- act, raise_not_found=True, default=None, case_insensitive=True
89
- )
90
- if act
91
- else intro or Conversation.intro
92
- )
93
-
94
- self.conversation = Conversation(
95
- is_conversation, self.max_tokens_to_sample, filepath, update_file
96
- )
97
- self.conversation.history_offset = history_offset
98
-
99
- self.logger = Logger(
100
- name="FreeAIChat",
101
- format=LogFormat.MODERN_EMOJI,
102
- ) if logging else None
103
-
104
- if self.logger:
105
- self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
106
-
107
- def ask(
108
- self,
109
- prompt: str,
110
- stream: bool = False,
111
- raw: bool = False,
112
- optimizer: str = None,
113
- conversationally: bool = False,
114
- ) -> Union[Dict[str, Any], Generator]:
115
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
116
- if optimizer:
117
- if optimizer in self.__available_optimizers:
118
- conversation_prompt = getattr(Optimizers, optimizer)(
119
- conversation_prompt if conversationally else prompt
120
- )
121
- if self.logger:
122
- self.logger.debug(f"Applied optimizer: {optimizer}")
123
- else:
124
- if self.logger:
125
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
126
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
127
-
128
- messages = [
129
- {
130
- "role": "system",
131
- "content": self.system_prompt
132
- },
133
- {
134
- "role": "user",
135
- "content": conversation_prompt
136
- }
137
- ]
138
-
139
- payload = {
140
- "model": self.model,
141
- "messages": messages
142
- }
143
-
144
- def for_stream():
145
- if self.logger:
146
- self.logger.debug("Sending streaming request to FreeAIChat API...")
147
- try:
148
- with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
149
- if response.status_code != 200:
150
- if self.logger:
151
- self.logger.error(f"Request failed with status code {response.status_code}")
152
- raise exceptions.FailedToGenerateResponseError(
153
- f"Request failed with status code {response.status_code}"
154
- )
155
-
156
- streaming_text = ""
157
- for line in response.iter_lines(decode_unicode=True):
158
- if line:
159
- line = line.strip()
160
- if line.startswith("data: "):
161
- json_str = line[6:] # Remove "data: " prefix
162
- if json_str == "[DONE]":
163
- break
164
- try:
165
- json_data = json.loads(json_str)
166
- if 'choices' in json_data:
167
- choice = json_data['choices'][0]
168
- if 'delta' in choice and 'content' in choice['delta']:
169
- content = choice['delta']['content']
170
- streaming_text += content
171
- resp = dict(text=content)
172
- yield resp if raw else resp
173
- except json.JSONDecodeError:
174
- if self.logger:
175
- self.logger.error("JSON decode error in streaming data")
176
- pass
177
-
178
- self.conversation.update_chat_history(prompt, streaming_text)
179
- if self.logger:
180
- self.logger.info("Streaming response completed successfully")
181
-
182
- except requests.RequestException as e:
183
- if self.logger:
184
- self.logger.error(f"Request failed: {e}")
185
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
186
-
187
- def for_non_stream():
188
- for _ in for_stream():
189
- pass
190
- return self.last_response
191
-
192
- return for_stream() if stream else for_non_stream()
193
-
194
- def chat(
195
- self,
196
- prompt: str,
197
- stream: bool = False,
198
- optimizer: str = None,
199
- conversationally: bool = False,
200
- ) -> str:
201
- def for_stream():
202
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
203
- yield self.get_message(response)
204
-
205
- def for_non_stream():
206
- return self.get_message(
207
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
208
- )
209
-
210
- return for_stream() if stream else for_non_stream()
211
-
212
- def get_message(self, response: dict) -> str:
213
- assert isinstance(response, dict), "Response should be of dict data-type only"
214
- return response["text"]
215
-
216
- if __name__ == "__main__":
217
- from rich import print
218
- ai = FreeAIChat(model="GPT-4o", logging=True)
219
- response = ai.chat("Write a hello world program in Python", stream=True)
220
- for chunk in response:
221
- print(chunk, end="", flush=True)
1
+ import requests
2
+ import json
3
+ import time
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+ from webscout.Litlogger import Logger, LogFormat
13
+
14
+ class FreeAIChat(Provider):
15
+ """
16
+ A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "mistral-nemo",
21
+ "mistral-large",
22
+ "gemini-2.0-flash",
23
+ "gemini-1.5-pro",
24
+ "gemini-1.5-flash",
25
+ "gemini-2.0-pro-exp-02-05",
26
+ "deepseek-r1",
27
+ "deepseek-v3",
28
+ "Deepseek r1 14B",
29
+ "Deepseek r1 32B",
30
+ "o3-mini-high",
31
+ "o3-mini-medium",
32
+ "o3-mini-low",
33
+ "o3-mini",
34
+ "GPT-4o-mini",
35
+ "o1",
36
+ "o1-mini",
37
+ "GPT-4o",
38
+ "Qwen coder",
39
+ "Qwen 2.5 72B",
40
+ "Llama 3.1 405B",
41
+ "llama3.1-70b-fast",
42
+ "Llama 3.3 70B",
43
+ "claude 3.5 haiku",
44
+ "claude 3.5 sonnet",
45
+ ]
46
+
47
+ def __init__(
48
+ self,
49
+ is_conversation: bool = True,
50
+ max_tokens: int = 2049,
51
+ timeout: int = 30,
52
+ intro: str = None,
53
+ filepath: str = None,
54
+ update_file: bool = True,
55
+ proxies: dict = {},
56
+ history_offset: int = 10250,
57
+ act: str = None,
58
+ model: str = "GPT-4o",
59
+ system_prompt: str = "You are a helpful AI assistant.",
60
+ logging: bool = False
61
+ ):
62
+ """Initializes the FreeAIChat API client with logging support."""
63
+ if model not in self.AVAILABLE_MODELS:
64
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
65
+
66
+ self.url = "https://freeaichatplayground.com/api/v1/chat/completions"
67
+ self.headers = {
68
+ 'User-Agent': LitAgent().random(),
69
+ 'Accept': '*/*',
70
+ 'Content-Type': 'application/json',
71
+ 'Origin': 'https://freeaichatplayground.com',
72
+ 'Referer': 'https://freeaichatplayground.com/',
73
+ 'Sec-Fetch-Mode': 'cors',
74
+ 'Sec-Fetch-Site': 'same-origin'
75
+ }
76
+ self.session = requests.Session()
77
+ self.session.headers.update(self.headers)
78
+ self.session.proxies.update(proxies)
79
+
80
+ self.is_conversation = is_conversation
81
+ self.max_tokens_to_sample = max_tokens
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.model = model
85
+ self.system_prompt = system_prompt
86
+
87
+ self.__available_optimizers = (
88
+ method
89
+ for method in dir(Optimizers)
90
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
91
+ )
92
+ Conversation.intro = (
93
+ AwesomePrompts().get_act(
94
+ act, raise_not_found=True, default=None, case_insensitive=True
95
+ )
96
+ if act
97
+ else intro or Conversation.intro
98
+ )
99
+
100
+ self.conversation = Conversation(
101
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
102
+ )
103
+ self.conversation.history_offset = history_offset
104
+
105
+ self.logger = Logger(
106
+ name="FreeAIChat",
107
+ format=LogFormat.MODERN_EMOJI,
108
+ ) if logging else None
109
+
110
+ if self.logger:
111
+ self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
112
+
113
+ def ask(
114
+ self,
115
+ prompt: str,
116
+ stream: bool = False,
117
+ raw: bool = False,
118
+ optimizer: str = None,
119
+ conversationally: bool = False,
120
+ ) -> Union[Dict[str, Any], Generator]:
121
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122
+ if optimizer:
123
+ if optimizer in self.__available_optimizers:
124
+ conversation_prompt = getattr(Optimizers, optimizer)(
125
+ conversation_prompt if conversationally else prompt
126
+ )
127
+ if self.logger:
128
+ self.logger.debug(f"Applied optimizer: {optimizer}")
129
+ else:
130
+ if self.logger:
131
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
132
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
133
+
134
+ messages = [
135
+ {
136
+ "role": "system",
137
+ "content": self.system_prompt
138
+ },
139
+ {
140
+ "role": "user",
141
+ "content": conversation_prompt
142
+ }
143
+ ]
144
+
145
+ payload = {
146
+ "model": self.model,
147
+ "messages": messages
148
+ }
149
+
150
+ def for_stream():
151
+ if self.logger:
152
+ self.logger.debug("Sending streaming request to FreeAIChat API...")
153
+ try:
154
+ with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
155
+ if response.status_code != 200:
156
+ if self.logger:
157
+ self.logger.error(f"Request failed with status code {response.status_code}")
158
+ raise exceptions.FailedToGenerateResponseError(
159
+ f"Request failed with status code {response.status_code}"
160
+ )
161
+
162
+ streaming_text = ""
163
+ for line in response.iter_lines(decode_unicode=True):
164
+ if line:
165
+ line = line.strip()
166
+ if line.startswith("data: "):
167
+ json_str = line[6:] # Remove "data: " prefix
168
+ if json_str == "[DONE]":
169
+ break
170
+ try:
171
+ json_data = json.loads(json_str)
172
+ if 'choices' in json_data:
173
+ choice = json_data['choices'][0]
174
+ if 'delta' in choice and 'content' in choice['delta']:
175
+ content = choice['delta']['content']
176
+ streaming_text += content
177
+ resp = dict(text=content)
178
+ yield resp if raw else resp
179
+ except json.JSONDecodeError:
180
+ if self.logger:
181
+ self.logger.error("JSON decode error in streaming data")
182
+ pass
183
+
184
+ self.conversation.update_chat_history(prompt, streaming_text)
185
+ if self.logger:
186
+ self.logger.info("Streaming response completed successfully")
187
+
188
+ except requests.RequestException as e:
189
+ if self.logger:
190
+ self.logger.error(f"Request failed: {e}")
191
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
192
+
193
+ def for_non_stream():
194
+ full_text = ""
195
+ for chunk in for_stream():
196
+ full_text += chunk["text"]
197
+ return {"text": full_text}
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def chat(
202
+ self,
203
+ prompt: str,
204
+ stream: bool = False,
205
+ optimizer: str = None,
206
+ conversationally: bool = False,
207
+ ) -> str:
208
+ def for_stream():
209
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
210
+ yield self.get_message(response)
211
+
212
+ def for_non_stream():
213
+ return self.get_message(
214
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
215
+ )
216
+
217
+ return for_stream() if stream else for_non_stream()
218
+
219
+ def get_message(self, response: dict) -> str:
220
+ assert isinstance(response, dict), "Response should be of dict data-type only"
221
+ return response["text"]
222
+
223
+ @staticmethod
224
+ def fix_encoding(text):
225
+ if isinstance(text, dict) and "text" in text:
226
+ try:
227
+ text["text"] = text["text"].encode("latin1").decode("utf-8")
228
+ return text
229
+ except (UnicodeError, AttributeError) as e:
230
+ return text
231
+ elif isinstance(text, str):
232
+ try:
233
+ return text.encode("latin1").decode("utf-8")
234
+ except (UnicodeError, AttributeError) as e:
235
+ return text
236
+ return text
237
+
238
+
239
+ if __name__ == "__main__":
240
+ from rich import print
241
+ ai = FreeAIChat(model="GPT-4o", logging=True)
242
+ # response = ai.chat(input(">>>"), stream=True)
243
+ # full_text = ""
244
+
245
+ # for chunk in response:
246
+ # corrected_chunk = ai.fix_encoding(chunk)
247
+ # full_text += corrected_chunk
248
+
249
+ response = ai.chat(input(">>>"), stream=False)
250
+ response = ai.fix_encoding(response)
251
+ print(response)
@@ -5,15 +5,15 @@ from typing import Any, Dict, Generator
5
5
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
- from webscout.Litlogger import Logger, LogFormat
9
8
  from webscout import LitAgent as Lit
9
+
10
10
  class IBMGranite(Provider):
11
11
  """
12
12
  A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
13
- with comprehensive logging and using Lit agent for the user agent.
13
+ using Lit agent for the user agent.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = ["granite-3-8b-instruct"]
16
+ AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
17
17
 
18
18
  def __init__(
19
19
  self,
@@ -27,24 +27,14 @@ class IBMGranite(Provider):
27
27
  proxies: dict = {},
28
28
  history_offset: int = 10250,
29
29
  act: str = None,
30
- model: str = "granite-3-8b-instruct",
30
+ model: str = "granite-3-2-8b-instruct",
31
31
  system_prompt: str = "You are a helpful AI assistant.",
32
- logging: bool = False
32
+ thinking: bool = False,
33
33
  ):
34
- """Initializes the IBM Granite API client with logging and Lit agent for the user agent."""
34
+ """Initializes the IBMGranite API client using Lit agent for the user agent."""
35
35
  if model not in self.AVAILABLE_MODELS:
36
36
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
37
 
38
- # Setup logging if enabled
39
- self.logger = Logger(
40
- name="IBMGranite",
41
- format=LogFormat.MODERN_EMOJI,
42
-
43
- ) if logging else None
44
-
45
- if self.logger:
46
- self.logger.info(f"Initializing IBMGranite with model: {model}")
47
-
48
38
  self.session = requests.Session()
49
39
  self.is_conversation = is_conversation
50
40
  self.max_tokens_to_sample = max_tokens
@@ -54,6 +44,7 @@ class IBMGranite(Provider):
54
44
  self.last_response = {}
55
45
  self.model = model
56
46
  self.system_prompt = system_prompt
47
+ self.thinking = thinking
57
48
 
58
49
  # Use Lit agent to generate a random User-Agent
59
50
  self.headers = {
@@ -101,20 +92,13 @@ class IBMGranite(Provider):
101
92
  Returns:
102
93
  Union[Dict, Generator[Dict, None, None]]: Response generated
103
94
  """
104
- if self.logger:
105
- self.logger.debug(f"Ask method initiated - Prompt (first 50 chars): {prompt[:50]}")
106
-
107
95
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
96
  if optimizer:
109
97
  if optimizer in self.__available_optimizers:
110
98
  conversation_prompt = getattr(Optimizers, optimizer)(
111
99
  conversation_prompt if conversationally else prompt
112
100
  )
113
- if self.logger:
114
- self.logger.debug(f"Applied optimizer: {optimizer}")
115
101
  else:
116
- if self.logger:
117
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
118
102
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
119
103
 
120
104
  payload = {
@@ -123,20 +107,17 @@ class IBMGranite(Provider):
123
107
  {"role": "system", "content": self.system_prompt},
124
108
  {"role": "user", "content": conversation_prompt},
125
109
  ],
126
- "stream": stream
110
+ "stream": stream,
111
+ "thinking": self.thinking,
127
112
  }
128
113
 
129
114
  def for_stream():
130
115
  try:
131
- if self.logger:
132
- self.logger.debug(f"Sending POST request to {self.api_endpoint} with payload: {payload}")
133
116
  response = self.session.post(
134
117
  self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
135
118
  )
136
119
  if not response.ok:
137
120
  msg = f"Request failed with status code {response.status_code}: {response.text}"
138
- if self.logger:
139
- self.logger.error(msg)
140
121
  raise exceptions.FailedToGenerateResponseError(msg)
141
122
 
142
123
  streaming_text = ""
@@ -149,28 +130,17 @@ class IBMGranite(Provider):
149
130
  streaming_text += content
150
131
  yield content if raw else dict(text=content)
151
132
  else:
152
- if self.logger:
153
- self.logger.debug(f"Skipping unrecognized line: {line}")
154
- except json.JSONDecodeError as e:
155
- if self.logger:
156
- self.logger.error(f"JSON decode error: {e}")
133
+ # Skip unrecognized lines
134
+ pass
135
+ except json.JSONDecodeError:
157
136
  continue
158
137
  self.last_response.update(dict(text=streaming_text))
159
138
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
160
- if self.logger:
161
- self.logger.info("Stream processing completed.")
162
-
163
139
  except requests.exceptions.RequestException as e:
164
- if self.logger:
165
- self.logger.error(f"Request exception: {e}")
166
140
  raise exceptions.ProviderConnectionError(f"Request failed: {e}")
167
141
  except json.JSONDecodeError as e:
168
- if self.logger:
169
- self.logger.error(f"Invalid JSON received: {e}")
170
142
  raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
171
143
  except Exception as e:
172
- if self.logger:
173
- self.logger.error(f"Unexpected error: {e}")
174
144
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
175
145
 
176
146
  def for_non_stream():
@@ -189,20 +159,14 @@ class IBMGranite(Provider):
189
159
  conversationally: bool = False,
190
160
  ) -> str | Generator[str, None, None]:
191
161
  """Generate response as a string using chat method"""
192
- if self.logger:
193
- self.logger.debug(f"Chat method initiated - Prompt (first 50 chars): {prompt[:50]}")
194
-
195
162
  def for_stream():
196
163
  for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
197
164
  yield self.get_message(response)
198
165
 
199
166
  def for_non_stream():
200
- result = self.get_message(
167
+ return self.get_message(
201
168
  self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
202
169
  )
203
- if self.logger:
204
- self.logger.info("Chat method completed.")
205
- return result
206
170
 
207
171
  return for_stream() if stream else for_non_stream()
208
172
 
@@ -213,11 +177,11 @@ class IBMGranite(Provider):
213
177
 
214
178
  if __name__ == "__main__":
215
179
  from rich import print
216
- # Example usage: Initialize with logging enabled.
180
+ # Example usage: Initialize without logging.
217
181
  ai = IBMGranite(
218
- api_key="", # press f12 to see the API key
219
- logging=True
182
+ api_key="", # press f12 to see the API key
183
+ thinking=True,
220
184
  )
221
185
  response = ai.chat("write a poem about AI", stream=True)
222
186
  for chunk in response:
223
- print(chunk, end="", flush=True)
187
+ print(chunk, end="", flush=True)