webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (137) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +432 -0
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/Cloudflare.py +18 -21
  24. webscout/Provider/DeepSeek.py +27 -48
  25. webscout/Provider/Deepinfra.py +129 -53
  26. webscout/Provider/Gemini.py +1 -1
  27. webscout/Provider/GithubChat.py +362 -0
  28. webscout/Provider/Glider.py +25 -8
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +38 -5
  31. webscout/Provider/HuggingFaceChat.py +462 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/Marcus.py +7 -50
  34. webscout/Provider/Netwrck.py +43 -67
  35. webscout/Provider/PI.py +4 -2
  36. webscout/Provider/Perplexitylabs.py +26 -6
  37. webscout/Provider/Phind.py +29 -3
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  51. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  52. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  53. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  54. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  55. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  56. webscout/Provider/TTS/__init__.py +2 -2
  57. webscout/Provider/TTS/deepgram.py +12 -39
  58. webscout/Provider/TTS/elevenlabs.py +14 -40
  59. webscout/Provider/TTS/gesserit.py +11 -35
  60. webscout/Provider/TTS/murfai.py +13 -39
  61. webscout/Provider/TTS/parler.py +17 -40
  62. webscout/Provider/TTS/speechma.py +180 -0
  63. webscout/Provider/TTS/streamElements.py +17 -44
  64. webscout/Provider/TextPollinationsAI.py +39 -59
  65. webscout/Provider/Venice.py +217 -200
  66. webscout/Provider/WiseCat.py +27 -5
  67. webscout/Provider/Youchat.py +63 -36
  68. webscout/Provider/__init__.py +13 -8
  69. webscout/Provider/akashgpt.py +28 -10
  70. webscout/Provider/copilot.py +416 -0
  71. webscout/Provider/flowith.py +196 -0
  72. webscout/Provider/freeaichat.py +32 -45
  73. webscout/Provider/granite.py +17 -53
  74. webscout/Provider/koala.py +20 -5
  75. webscout/Provider/llamatutor.py +7 -47
  76. webscout/Provider/llmchat.py +36 -53
  77. webscout/Provider/multichat.py +92 -98
  78. webscout/Provider/talkai.py +1 -0
  79. webscout/Provider/turboseek.py +3 -0
  80. webscout/Provider/tutorai.py +2 -0
  81. webscout/Provider/typegpt.py +154 -64
  82. webscout/Provider/x0gpt.py +3 -1
  83. webscout/Provider/yep.py +102 -20
  84. webscout/__init__.py +3 -0
  85. webscout/cli.py +4 -40
  86. webscout/conversation.py +1 -10
  87. webscout/exceptions.py +19 -9
  88. webscout/litagent/__init__.py +2 -2
  89. webscout/litagent/agent.py +351 -20
  90. webscout/litagent/constants.py +34 -5
  91. webscout/litprinter/__init__.py +0 -3
  92. webscout/models.py +181 -0
  93. webscout/optimizers.py +1 -1
  94. webscout/prompt_manager.py +2 -8
  95. webscout/scout/core/scout.py +1 -4
  96. webscout/scout/core/search_result.py +1 -1
  97. webscout/scout/core/text_utils.py +1 -1
  98. webscout/scout/core.py +2 -5
  99. webscout/scout/element.py +1 -1
  100. webscout/scout/parsers/html_parser.py +1 -1
  101. webscout/scout/utils.py +0 -1
  102. webscout/swiftcli/__init__.py +1 -3
  103. webscout/tempid.py +1 -1
  104. webscout/update_checker.py +55 -95
  105. webscout/version.py +1 -1
  106. webscout/webscout_search_async.py +1 -2
  107. webscout/yep_search.py +297 -297
  108. webscout-7.6.dist-info/LICENSE.md +146 -0
  109. {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
  110. {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
  111. webscout/Extra/autollama.py +0 -231
  112. webscout/Local/__init__.py +0 -10
  113. webscout/Local/_version.py +0 -3
  114. webscout/Local/formats.py +0 -747
  115. webscout/Local/model.py +0 -1368
  116. webscout/Local/samplers.py +0 -125
  117. webscout/Local/thread.py +0 -539
  118. webscout/Local/ui.py +0 -401
  119. webscout/Local/utils.py +0 -388
  120. webscout/Provider/Amigo.py +0 -274
  121. webscout/Provider/Bing.py +0 -243
  122. webscout/Provider/DiscordRocks.py +0 -253
  123. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  124. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  125. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  126. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  127. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  128. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  129. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  130. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  131. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  132. webscout/Provider/TTS/voicepod.py +0 -117
  133. webscout/Provider/dgaf.py +0 -214
  134. webscout-7.4.dist-info/LICENSE.md +0 -211
  135. {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  136. {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  137. {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,196 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ import os
5
+ import re
6
+ from typing import Any, Dict, Optional, Generator, Union
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider, AsyncProvider
12
+ from webscout import exceptions
13
+ from webscout import LitAgent
14
+
15
+ class Flowith(Provider):
16
+ """
17
+ A class to interact with the Flowith AI chat API.
18
+ """
19
+
20
+ AVAILABLE_MODELS = [
21
+ "gpt-4o-mini",
22
+ "deepseek-chat",
23
+ "deepseek-reasoner",
24
+ "claude-3.5-haiku",
25
+ "llama-3.2-11b",
26
+ "llama-3.2-90b",
27
+ "gemini-2.0-flash",
28
+ "o1",
29
+ "o3-mini",
30
+ "gpt-4o",
31
+ "claude-3.5-sonnet",
32
+ "gemini-2.0-pro",
33
+ "claude-3.7-sonnet"
34
+
35
+ ]
36
+
37
+ def __init__(
38
+ self,
39
+ is_conversation: bool = True,
40
+ max_tokens: int = 2048,
41
+ timeout: int = 30,
42
+ intro: str = None,
43
+ filepath: str = None,
44
+ update_file: bool = True,
45
+ proxies: dict = {},
46
+ history_offset: int = 10250,
47
+ act: str = None,
48
+ model: str = "claude-3.5-haiku"
49
+
50
+ ):
51
+ """Initializes the Flowith API client."""
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
54
+
55
+ self.url = "https://edge.flowith.net/ai/chat?mode=general"
56
+
57
+ # Set up headers for the API request
58
+ self.headers = {
59
+ "authority": "edge.flowith.net",
60
+ "accept": "*/*",
61
+ "accept-encoding": "gzip, deflate, br, zstd",
62
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
63
+ "content-type": "application/json",
64
+ "origin": "https://flowith.io",
65
+ "referer": "https://edge.flowith.net/",
66
+ "responsetype": "stream",
67
+ "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
68
+ "sec-ch-ua-mobile": "?0",
69
+ "sec-ch-ua-platform": '"Windows"',
70
+ "sec-fetch-dest": "empty",
71
+ "sec-fetch-mode": "cors",
72
+ "sec-fetch-site": "cross-site",
73
+ "user-agent": LitAgent().random() # Use LitAgent for user-agent
74
+ }
75
+
76
+ self.session = requests.Session()
77
+ self.session.headers.update(self.headers)
78
+ self.session.proxies.update(proxies)
79
+
80
+ self.is_conversation = is_conversation
81
+ self.max_tokens_to_sample = max_tokens
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.model = model
85
+ self.node_id = str(uuid.uuid4()) # Generate a new UUID for node ID
86
+
87
+ self.__available_optimizers = (
88
+ method
89
+ for method in dir(Optimizers)
90
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
91
+ )
92
+ Conversation.intro = (
93
+ AwesomePrompts().get_act(
94
+ act, raise_not_found=True, default=None, case_insensitive=True
95
+ )
96
+ if act
97
+ else intro or Conversation.intro
98
+ )
99
+
100
+ self.conversation = Conversation(
101
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
102
+ )
103
+ self.conversation.history_offset = history_offset
104
+
105
+ def clean_response(self, text):
106
+ """Remove text between <think> tags and other specific text patterns."""
107
+ # Remove text between <think> tags
108
+ text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
109
+
110
+ return text.strip()
111
+
112
+ def ask(
113
+ self,
114
+ prompt: str,
115
+ stream: bool = False, # This parameter is kept for compatibility
116
+ raw: bool = False,
117
+ optimizer: str = None,
118
+ conversationally: bool = False,
119
+ ) -> Union[Dict[str, Any], Dict[str, str]]:
120
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
+ if optimizer:
122
+ if optimizer in self.__available_optimizers:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+ else:
127
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
128
+
129
+ # Payload construction - using stream=False for simpler handling
130
+ payload = {
131
+ "model": self.model,
132
+ "messages": [{"content": conversation_prompt, "role": "user"}],
133
+ "stream": True, # Set to False for direct response
134
+ "nodeId": self.node_id
135
+ }
136
+
137
+ try:
138
+ # Simple non-streaming request
139
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
140
+
141
+ if response.status_code != 200:
142
+ raise exceptions.FailedToGenerateResponseError(
143
+ f"Request failed with status code {response.status_code}"
144
+ )
145
+
146
+ # Get the response text directly
147
+ response_text = response.text.strip()
148
+
149
+ # Clean the response
150
+ cleaned_text = self.clean_response(response_text)
151
+ self.last_response = {"text": cleaned_text}
152
+
153
+ # Update conversation history
154
+ self.conversation.update_chat_history(prompt, cleaned_text)
155
+
156
+ return {"text": cleaned_text}
157
+
158
+ except requests.RequestException as e:
159
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
160
+
161
+ def chat(
162
+ self,
163
+ prompt: str,
164
+ stream: bool = False, # Parameter kept for compatibility
165
+ optimizer: str = None,
166
+ conversationally: bool = False,
167
+ ) -> str:
168
+ # Always use non-streaming mode
169
+ response = self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
170
+ return self.get_message(response)
171
+
172
+ def get_message(self, response: dict) -> str:
173
+ assert isinstance(response, dict), "Response should be of dict data-type only"
174
+ return response["text"]
175
+
176
+ if __name__ == "__main__":
177
+ print("-" * 80)
178
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
179
+ print("-" * 80)
180
+
181
+ for model in Flowith.AVAILABLE_MODELS:
182
+ try:
183
+ test_ai = Flowith(model=model, timeout=60)
184
+ response = test_ai.chat("Say 'Hello' in one word")
185
+ response_text = response
186
+
187
+ if response_text and len(response_text.strip()) > 0:
188
+ status = "✓"
189
+ # Truncate response if too long
190
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
191
+ else:
192
+ status = "✗"
193
+ display_text = "Empty or invalid response"
194
+ print(f"{model:<50} {status:<10} {display_text}")
195
+ except Exception as e:
196
+ print(f"{model:<50} {'✗':<10} {str(e)}")
@@ -9,11 +9,10 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
11
  from webscout import LitAgent
12
- from webscout.Litlogger import Logger, LogFormat
13
12
 
14
13
  class FreeAIChat(Provider):
15
14
  """
16
- A class to interact with the FreeAIChat API with logging and LitAgent user-agent.
15
+ A class to interact with the FreeAIChat API with LitAgent user-agent.
17
16
  """
18
17
 
19
18
  AVAILABLE_MODELS = [
@@ -23,10 +22,10 @@ class FreeAIChat(Provider):
23
22
  "gemini-1.5-pro",
24
23
  "gemini-1.5-flash",
25
24
  "gemini-2.0-pro-exp-02-05",
26
- "deepseek-r1",
25
+ # "deepseek-r1", >>>> NOT WORKING
27
26
  "deepseek-v3",
28
- "Deepseek r1 14B",
29
- "Deepseek r1 32B",
27
+ # "Deepseek r1 14B", >>>> NOT WORKING
28
+ # "Deepseek r1 32B", >>>> NOT WORKING
30
29
  "o3-mini-high",
31
30
  "o3-mini-medium",
32
31
  "o3-mini-low",
@@ -36,10 +35,10 @@ class FreeAIChat(Provider):
36
35
  "o1-mini",
37
36
  "GPT-4o",
38
37
  "Qwen coder",
39
- "Qwen 2.5 72B",
38
+ # "Qwen 2.5 72B", >>>> NOT WORKING
40
39
  "Llama 3.1 405B",
41
- "llama3.1-70b-fast",
42
- "Llama 3.3 70B",
40
+ # "llama3.1-70b-fast", >>>> NOT WORKING
41
+ # "Llama 3.3 70B", >>>> NOT WORKING
43
42
  "claude 3.5 haiku",
44
43
  "claude 3.5 sonnet",
45
44
  ]
@@ -57,9 +56,8 @@ class FreeAIChat(Provider):
57
56
  act: str = None,
58
57
  model: str = "GPT-4o",
59
58
  system_prompt: str = "You are a helpful AI assistant.",
60
- logging: bool = False
61
59
  ):
62
- """Initializes the FreeAIChat API client with logging support."""
60
+ """Initializes the FreeAIChat API client."""
63
61
  if model not in self.AVAILABLE_MODELS:
64
62
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
65
63
 
@@ -102,14 +100,6 @@ class FreeAIChat(Provider):
102
100
  )
103
101
  self.conversation.history_offset = history_offset
104
102
 
105
- self.logger = Logger(
106
- name="FreeAIChat",
107
- format=LogFormat.MODERN_EMOJI,
108
- ) if logging else None
109
-
110
- if self.logger:
111
- self.logger.info(f"FreeAIChat initialized successfully with model: {model}")
112
-
113
103
  def ask(
114
104
  self,
115
105
  prompt: str,
@@ -124,11 +114,7 @@ class FreeAIChat(Provider):
124
114
  conversation_prompt = getattr(Optimizers, optimizer)(
125
115
  conversation_prompt if conversationally else prompt
126
116
  )
127
- if self.logger:
128
- self.logger.debug(f"Applied optimizer: {optimizer}")
129
117
  else:
130
- if self.logger:
131
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
132
118
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
133
119
 
134
120
  messages = [
@@ -148,13 +134,9 @@ class FreeAIChat(Provider):
148
134
  }
149
135
 
150
136
  def for_stream():
151
- if self.logger:
152
- self.logger.debug("Sending streaming request to FreeAIChat API...")
153
137
  try:
154
138
  with requests.post(self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
155
139
  if response.status_code != 200:
156
- if self.logger:
157
- self.logger.error(f"Request failed with status code {response.status_code}")
158
140
  raise exceptions.FailedToGenerateResponseError(
159
141
  f"Request failed with status code {response.status_code}"
160
142
  )
@@ -177,17 +159,11 @@ class FreeAIChat(Provider):
177
159
  resp = dict(text=content)
178
160
  yield resp if raw else resp
179
161
  except json.JSONDecodeError:
180
- if self.logger:
181
- self.logger.error("JSON decode error in streaming data")
182
162
  pass
183
163
 
184
164
  self.conversation.update_chat_history(prompt, streaming_text)
185
- if self.logger:
186
- self.logger.info("Streaming response completed successfully")
187
165
 
188
166
  except requests.RequestException as e:
189
- if self.logger:
190
- self.logger.error(f"Request failed: {e}")
191
167
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
192
168
 
193
169
  def for_non_stream():
@@ -234,18 +210,29 @@ class FreeAIChat(Provider):
234
210
  except (UnicodeError, AttributeError) as e:
235
211
  return text
236
212
  return text
237
-
238
213
 
239
214
  if __name__ == "__main__":
240
- from rich import print
241
- ai = FreeAIChat(model="GPT-4o", logging=True)
242
- # response = ai.chat(input(">>>"), stream=True)
243
- # full_text = ""
244
-
245
- # for chunk in response:
246
- # corrected_chunk = ai.fix_encoding(chunk)
247
- # full_text += corrected_chunk
248
-
249
- response = ai.chat(input(">>>"), stream=False)
250
- response = ai.fix_encoding(response)
251
- print(response)
215
+ print("-" * 80)
216
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
217
+ print("-" * 80)
218
+
219
+ for model in FreeAIChat.AVAILABLE_MODELS:
220
+ try:
221
+ test_ai = FreeAIChat(model=model, timeout=60)
222
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
223
+ response_text = ""
224
+ for chunk in response:
225
+ response_text += chunk
226
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
227
+
228
+ if response_text and len(response_text.strip()) > 0:
229
+ status = "✓"
230
+ # Clean and truncate response
231
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
232
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
233
+ else:
234
+ status = "✗"
235
+ display_text = "Empty or invalid response"
236
+ print(f"\r{model:<50} {status:<10} {display_text}")
237
+ except Exception as e:
238
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -5,15 +5,15 @@ from typing import Any, Dict, Generator
5
5
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
- from webscout.Litlogger import Logger, LogFormat
9
8
  from webscout import LitAgent as Lit
9
+
10
10
  class IBMGranite(Provider):
11
11
  """
12
12
  A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
13
- with comprehensive logging and using Lit agent for the user agent.
13
+ using Lit agent for the user agent.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = ["granite-3-8b-instruct"]
16
+ AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
17
17
 
18
18
  def __init__(
19
19
  self,
@@ -27,24 +27,14 @@ class IBMGranite(Provider):
27
27
  proxies: dict = {},
28
28
  history_offset: int = 10250,
29
29
  act: str = None,
30
- model: str = "granite-3-8b-instruct",
30
+ model: str = "granite-3-2-8b-instruct",
31
31
  system_prompt: str = "You are a helpful AI assistant.",
32
- logging: bool = False
32
+ thinking: bool = False,
33
33
  ):
34
- """Initializes the IBM Granite API client with logging and Lit agent for the user agent."""
34
+ """Initializes the IBMGranite API client using Lit agent for the user agent."""
35
35
  if model not in self.AVAILABLE_MODELS:
36
36
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
37
 
38
- # Setup logging if enabled
39
- self.logger = Logger(
40
- name="IBMGranite",
41
- format=LogFormat.MODERN_EMOJI,
42
-
43
- ) if logging else None
44
-
45
- if self.logger:
46
- self.logger.info(f"Initializing IBMGranite with model: {model}")
47
-
48
38
  self.session = requests.Session()
49
39
  self.is_conversation = is_conversation
50
40
  self.max_tokens_to_sample = max_tokens
@@ -54,6 +44,7 @@ class IBMGranite(Provider):
54
44
  self.last_response = {}
55
45
  self.model = model
56
46
  self.system_prompt = system_prompt
47
+ self.thinking = thinking
57
48
 
58
49
  # Use Lit agent to generate a random User-Agent
59
50
  self.headers = {
@@ -101,20 +92,13 @@ class IBMGranite(Provider):
101
92
  Returns:
102
93
  Union[Dict, Generator[Dict, None, None]]: Response generated
103
94
  """
104
- if self.logger:
105
- self.logger.debug(f"Ask method initiated - Prompt (first 50 chars): {prompt[:50]}")
106
-
107
95
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
96
  if optimizer:
109
97
  if optimizer in self.__available_optimizers:
110
98
  conversation_prompt = getattr(Optimizers, optimizer)(
111
99
  conversation_prompt if conversationally else prompt
112
100
  )
113
- if self.logger:
114
- self.logger.debug(f"Applied optimizer: {optimizer}")
115
101
  else:
116
- if self.logger:
117
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
118
102
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
119
103
 
120
104
  payload = {
@@ -123,20 +107,17 @@ class IBMGranite(Provider):
123
107
  {"role": "system", "content": self.system_prompt},
124
108
  {"role": "user", "content": conversation_prompt},
125
109
  ],
126
- "stream": stream
110
+ "stream": stream,
111
+ "thinking": self.thinking,
127
112
  }
128
113
 
129
114
  def for_stream():
130
115
  try:
131
- if self.logger:
132
- self.logger.debug(f"Sending POST request to {self.api_endpoint} with payload: {payload}")
133
116
  response = self.session.post(
134
117
  self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
135
118
  )
136
119
  if not response.ok:
137
120
  msg = f"Request failed with status code {response.status_code}: {response.text}"
138
- if self.logger:
139
- self.logger.error(msg)
140
121
  raise exceptions.FailedToGenerateResponseError(msg)
141
122
 
142
123
  streaming_text = ""
@@ -149,28 +130,17 @@ class IBMGranite(Provider):
149
130
  streaming_text += content
150
131
  yield content if raw else dict(text=content)
151
132
  else:
152
- if self.logger:
153
- self.logger.debug(f"Skipping unrecognized line: {line}")
154
- except json.JSONDecodeError as e:
155
- if self.logger:
156
- self.logger.error(f"JSON decode error: {e}")
133
+ # Skip unrecognized lines
134
+ pass
135
+ except json.JSONDecodeError:
157
136
  continue
158
137
  self.last_response.update(dict(text=streaming_text))
159
138
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
160
- if self.logger:
161
- self.logger.info("Stream processing completed.")
162
-
163
139
  except requests.exceptions.RequestException as e:
164
- if self.logger:
165
- self.logger.error(f"Request exception: {e}")
166
140
  raise exceptions.ProviderConnectionError(f"Request failed: {e}")
167
141
  except json.JSONDecodeError as e:
168
- if self.logger:
169
- self.logger.error(f"Invalid JSON received: {e}")
170
142
  raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
171
143
  except Exception as e:
172
- if self.logger:
173
- self.logger.error(f"Unexpected error: {e}")
174
144
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
175
145
 
176
146
  def for_non_stream():
@@ -189,20 +159,14 @@ class IBMGranite(Provider):
189
159
  conversationally: bool = False,
190
160
  ) -> str | Generator[str, None, None]:
191
161
  """Generate response as a string using chat method"""
192
- if self.logger:
193
- self.logger.debug(f"Chat method initiated - Prompt (first 50 chars): {prompt[:50]}")
194
-
195
162
  def for_stream():
196
163
  for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
197
164
  yield self.get_message(response)
198
165
 
199
166
  def for_non_stream():
200
- result = self.get_message(
167
+ return self.get_message(
201
168
  self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
202
169
  )
203
- if self.logger:
204
- self.logger.info("Chat method completed.")
205
- return result
206
170
 
207
171
  return for_stream() if stream else for_non_stream()
208
172
 
@@ -213,11 +177,11 @@ class IBMGranite(Provider):
213
177
 
214
178
  if __name__ == "__main__":
215
179
  from rich import print
216
- # Example usage: Initialize with logging enabled.
180
+ # Example usage: Initialize without logging.
217
181
  ai = IBMGranite(
218
- api_key="", # press f12 to see the API key
219
- logging=True
182
+ api_key="", # press f12 to see the API key
183
+ thinking=True,
220
184
  )
221
185
  response = ai.chat("write a poem about AI", stream=True)
222
186
  for chunk in response:
223
- print(chunk, end="", flush=True)
187
+ print(chunk, end="", flush=True)
@@ -246,8 +246,23 @@ class KOALA(Provider):
246
246
  assert isinstance(response, dict), "Response should be of dict data-type only"
247
247
  return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
248
248
  if __name__ == '__main__':
249
- from rich import print
250
- ai = KOALA()
251
- response = ai.chat("tell me about india")
252
- for chunk in response:
253
- print(chunk, end="", flush=True)
249
+ print("-" * 80)
250
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
251
+ print("-" * 80)
252
+
253
+ for model in KOALA.AVAILABLE_MODELS:
254
+ try:
255
+ test_ai = KOALA(model=model, timeout=60)
256
+ response = test_ai.chat("Say 'Hello' in one word")
257
+ response_text = response
258
+
259
+ if response_text and len(response_text.strip()) > 0:
260
+ status = "✓"
261
+ # Truncate response if too long
262
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
263
+ else:
264
+ status = "✗"
265
+ display_text = "Empty or invalid response"
266
+ print(f"{model:<50} {status:<10} {display_text}")
267
+ except Exception as e:
268
+ print(f"{model:<50} {'✗':<10} {str(e)}")