webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,8 +1,9 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
- from typing import Union, Any, Dict, Generator
4
+ from typing import Optional, Union, Any, Dict, Generator
4
5
 
5
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
6
7
  from webscout.AIbase import Provider
7
8
  from webscout import exceptions
8
9
  from webscout.litagent import LitAgent as Lit
@@ -19,7 +20,7 @@ class IBMGranite(Provider):
19
20
  self,
20
21
  api_key: str,
21
22
  is_conversation: bool = True,
22
- max_tokens: int = 600,
23
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
23
24
  timeout: int = 30,
24
25
  intro: str = None,
25
26
  filepath: str = None,
@@ -35,7 +36,8 @@ class IBMGranite(Provider):
35
36
  if model not in self.AVAILABLE_MODELS:
36
37
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
38
 
38
- self.session = requests.Session()
39
+ # Initialize curl_cffi Session
40
+ self.session = Session()
39
41
  self.is_conversation = is_conversation
40
42
  self.max_tokens_to_sample = max_tokens
41
43
  self.api_endpoint = "https://d18n68ssusgr7r.cloudfront.net/v1/chat/completions"
@@ -46,18 +48,19 @@ class IBMGranite(Provider):
46
48
  self.system_prompt = system_prompt
47
49
  self.thinking = thinking
48
50
 
49
- # Use Lit agent to generate a random User-Agent
51
+ # Use Lit agent (keep if needed for other headers or logic)
50
52
  self.headers = {
51
- "authority": "d18n68ssusgr7r.cloudfront.net",
52
- "accept": "application/json,application/jsonl",
53
+ "authority": "d18n68ssusgr7r.cloudfront.net", # Keep authority
54
+ "accept": "application/json,application/jsonl", # Keep accept
53
55
  "content-type": "application/json",
54
- "origin": "https://www.ibm.com",
55
- "referer": "https://www.ibm.com/",
56
- "user-agent": Lit().random(),
56
+ "origin": "https://www.ibm.com", # Keep origin
57
+ "referer": "https://www.ibm.com/", # Keep referer
57
58
  }
58
59
  self.headers["Authorization"] = f"Bearer {api_key}"
60
+
61
+ # Update curl_cffi session headers and proxies
59
62
  self.session.headers.update(self.headers)
60
- self.session.proxies = proxies
63
+ self.session.proxies = proxies # Assign proxies directly
61
64
 
62
65
  self.__available_optimizers = (
63
66
  method for method in dir(Optimizers)
@@ -74,10 +77,17 @@ class IBMGranite(Provider):
74
77
  self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
75
78
  self.conversation.history_offset = history_offset
76
79
 
80
+ @staticmethod
81
+ def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
+ """Extracts content from IBM Granite stream JSON lists [3, "text"]."""
83
+ if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
84
+ return chunk[1]
85
+ return None
86
+
77
87
  def ask(
78
88
  self,
79
89
  prompt: str,
80
- stream: bool = False,
90
+ stream: bool = False, # API supports streaming
81
91
  raw: bool = False,
82
92
  optimizer: str = None,
83
93
  conversationally: bool = False,
@@ -107,48 +117,77 @@ class IBMGranite(Provider):
107
117
  {"role": "system", "content": self.system_prompt},
108
118
  {"role": "user", "content": conversation_prompt},
109
119
  ],
110
- "stream": stream,
111
- "thinking": self.thinking,
120
+ "stream": True # API seems to require stream=True based on response format
112
121
  }
113
122
 
114
123
  def for_stream():
124
+ streaming_text = "" # Initialize outside try block
115
125
  try:
126
+ # Use curl_cffi session post with impersonate
116
127
  response = self.session.post(
117
- self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
128
+ self.api_endpoint,
129
+ # headers are set on the session
130
+ json=payload,
131
+ stream=True,
132
+ timeout=self.timeout,
133
+ impersonate="chrome110" # Use a common impersonation profile
134
+ )
135
+ response.raise_for_status() # Check for HTTP errors
136
+
137
+ # Use sanitize_stream
138
+ processed_stream = sanitize_stream(
139
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
140
+ intro_value=None, # No prefix
141
+ to_json=True, # Stream sends JSON lines (which are lists)
142
+ content_extractor=self._granite_extractor, # Use the specific extractor
143
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
118
144
  )
119
- if not response.ok:
120
- msg = f"Request failed with status code {response.status_code}: {response.text}"
121
- raise exceptions.FailedToGenerateResponseError(msg)
122
-
123
- streaming_text = ""
124
- for line in response.iter_lines(decode_unicode=True):
125
- if line:
126
- try:
127
- data = json.loads(line)
128
- if len(data) == 2 and data[0] == 3 and isinstance(data[1], str):
129
- content = data[1]
130
- streaming_text += content
131
- yield content if raw else dict(text=content)
132
- else:
133
- # Skip unrecognized lines
134
- pass
135
- except json.JSONDecodeError:
136
- continue
137
- self.last_response.update(dict(text=streaming_text))
138
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
139
- except requests.exceptions.RequestException as e:
140
- raise exceptions.ProviderConnectionError(f"Request failed: {e}")
141
- except json.JSONDecodeError as e:
142
- raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
143
- except Exception as e:
144
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
145
+
146
+ for content_chunk in processed_stream:
147
+ # content_chunk is the string extracted by _granite_extractor
148
+ if content_chunk and isinstance(content_chunk, str):
149
+ streaming_text += content_chunk
150
+ resp = dict(text=content_chunk)
151
+ yield resp if not raw else content_chunk
152
+
153
+ # Update history after stream finishes
154
+ self.last_response = dict(text=streaming_text)
155
+ self.conversation.update_chat_history(prompt, streaming_text)
156
+
157
+ except CurlError as e: # Catch CurlError
158
+ raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
159
+ except json.JSONDecodeError as e: # Keep specific JSON error handling
160
+ raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}") from e
161
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
162
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
163
+ # Use specific exception type if available, otherwise generic
164
+ ex_type = exceptions.FailedToGenerateResponseError if not isinstance(e, exceptions.ProviderConnectionError) else type(e)
165
+ raise ex_type(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
166
+
145
167
 
146
168
  def for_non_stream():
147
- # Run the generator to completion
148
- for _ in for_stream():
149
- pass
150
- return self.last_response
169
+ # Aggregate the stream using the updated for_stream logic
170
+ full_text = ""
171
+ try:
172
+ # Ensure raw=False so for_stream yields dicts
173
+ for chunk_data in for_stream():
174
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
175
+ full_text += chunk_data["text"]
176
+ # Handle raw string case if raw=True was passed
177
+ elif raw and isinstance(chunk_data, str):
178
+ full_text += chunk_data
179
+ except Exception as e:
180
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
181
+ if not full_text:
182
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
151
183
 
184
+ # last_response and history are updated within for_stream
185
+ # Return the final aggregated response dict or raw string
186
+ return full_text if raw else self.last_response
187
+
188
+
189
+ # Since the API endpoint suggests streaming, always call the stream generator.
190
+ # The non-stream wrapper will handle aggregation if stream=False.
152
191
  return for_stream() if stream else for_non_stream()
153
192
 
154
193
  def chat(
@@ -159,16 +198,24 @@ class IBMGranite(Provider):
159
198
  conversationally: bool = False,
160
199
  ) -> Union[str, Generator[str, None, None]]:
161
200
  """Generate response as a string using chat method"""
162
- def for_stream():
163
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
164
- yield self.get_message(response)
165
-
166
- def for_non_stream():
167
- return self.get_message(
168
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
201
+ def for_stream_chat():
202
+ # ask() yields dicts or strings when streaming
203
+ gen = self.ask(
204
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
205
+ optimizer=optimizer, conversationally=conversationally
206
+ )
207
+ for response_dict in gen:
208
+ yield self.get_message(response_dict) # get_message expects dict
209
+
210
+ def for_non_stream_chat():
211
+ # ask() returns dict or str when not streaming
212
+ response_data = self.ask(
213
+ prompt, stream=False, raw=False, # Ensure ask returns dict
214
+ optimizer=optimizer, conversationally=conversationally
169
215
  )
216
+ return self.get_message(response_data) # get_message expects dict
170
217
 
171
- return for_stream() if stream else for_non_stream()
218
+ return for_stream_chat() if stream else for_non_stream_chat()
172
219
 
173
220
  def get_message(self, response: dict) -> str:
174
221
  """Retrieves message only from response"""
@@ -176,6 +223,7 @@ class IBMGranite(Provider):
176
223
  return response["text"]
177
224
 
178
225
  if __name__ == "__main__":
226
+ # Ensure curl_cffi is installed
179
227
  from rich import print
180
228
  # Example usage: Initialize without logging.
181
229
  ai = IBMGranite(
@@ -1,9 +1,10 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Generator, Optional
4
5
 
5
6
  from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
+ from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
7
8
  from webscout.AIutel import AwesomePrompts
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
@@ -38,7 +39,7 @@ class NousHermes(Provider):
38
39
  f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
39
40
  )
40
41
 
41
- self.session = requests.Session()
42
+ self.session = Session()
42
43
  self.is_conversation = is_conversation
43
44
  self.max_tokens_to_sample = max_tokens
44
45
  self.timeout = timeout
@@ -49,15 +50,14 @@ class NousHermes(Provider):
49
50
  self.temperature = temperature
50
51
  self.top_p = top_p
51
52
  self.cookies_path = cookies_path
52
- self.cookies = self._load_cookies()
53
+ self.cookies_dict = self._load_cookies()
54
+
53
55
  self.headers = {
54
56
  'accept': '*/*',
55
57
  'accept-language': 'en-US,en;q=0.9',
56
58
  'content-type': 'application/json',
57
59
  'origin': 'https://hermes.nousresearch.com',
58
60
  'referer': 'https://hermes.nousresearch.com/',
59
- 'user-agent': LitAgent().random(),
60
- 'cookie': self.cookies
61
61
  }
62
62
 
63
63
  self.__available_optimizers = (
@@ -77,20 +77,38 @@ class NousHermes(Provider):
77
77
  is_conversation, self.max_tokens_to_sample, filepath, update_file
78
78
  )
79
79
  self.conversation.history_offset = history_offset
80
+ # Update curl_cffi session headers and proxies
80
81
  self.session.proxies = proxies
82
+
83
+ # Apply cookies to curl_cffi session
84
+ if self.cookies_dict:
85
+ for name, value in self.cookies_dict.items():
86
+ self.session.cookies.set(name, value, domain="hermes.nousresearch.com")
81
87
 
82
- def _load_cookies(self) -> Optional[str]:
83
- """Load cookies from a JSON file and convert them to a string."""
88
+ def _load_cookies(self) -> Optional[Dict[str, str]]:
89
+ """Load cookies from a JSON file and return them as a dictionary."""
84
90
  try:
85
91
  with open(self.cookies_path, 'r') as f:
86
92
  cookies_data = json.load(f)
87
- return '; '.join([f"{cookie['name']}={cookie['value']}" for cookie in cookies_data])
93
+ # Convert list of cookie objects to a dictionary
94
+ return {cookie['name']: cookie['value'] for cookie in cookies_data if 'name' in cookie and 'value' in cookie}
88
95
  except FileNotFoundError:
89
- print("Error: cookies.json file not found!")
96
+ print(f"Warning: Cookies file not found at {self.cookies_path}")
90
97
  return None
91
98
  except json.JSONDecodeError:
92
- print("Error: Invalid JSON format in cookies.json!")
99
+ print(f"Warning: Invalid JSON format in cookies file at {self.cookies_path}")
93
100
  return None
101
+ except Exception as e:
102
+ print(f"Warning: Error loading cookies: {e}")
103
+ return None
104
+
105
+ @staticmethod
106
+ def _hermes_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
107
+ """Extracts content from Hermes stream JSON objects."""
108
+ if isinstance(chunk, dict) and chunk.get('type') == 'llm_response':
109
+ return chunk.get('content')
110
+ return None
111
+
94
112
 
95
113
  def ask(
96
114
  self,
@@ -134,32 +152,59 @@ class NousHermes(Provider):
134
152
  "top_p": self.top_p,
135
153
  }
136
154
  def for_stream():
137
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
138
- if not response.ok:
139
- raise exceptions.FailedToGenerateResponseError(
140
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
155
+ streaming_text = "" # Initialize outside try block
156
+ try:
157
+ response = self.session.post(
158
+ self.api_endpoint,
159
+ json=payload,
160
+ stream=True,
161
+ timeout=self.timeout,
162
+ impersonate="chrome110" # Keep impersonate
163
+ )
164
+ response.raise_for_status()
165
+
166
+ # Use sanitize_stream
167
+ processed_stream = sanitize_stream(
168
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
169
+ intro_value="data:",
170
+ to_json=True, # Stream sends JSON
171
+ content_extractor=self._hermes_extractor, # Use the specific extractor
172
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
173
+ )
174
+
175
+ for content_chunk in processed_stream:
176
+ # content_chunk is the string extracted by _hermes_extractor
177
+ if content_chunk and isinstance(content_chunk, str):
178
+ streaming_text += content_chunk
179
+ resp = dict(text=content_chunk)
180
+ yield resp if not raw else content_chunk
181
+
182
+ self.last_response = dict(text=streaming_text) # Use streaming_text
183
+ self.conversation.update_chat_history(
184
+ prompt, streaming_text # Use streaming_text
141
185
  )
142
- full_response = ""
143
- for line in response.iter_lines():
144
- if line:
145
- decoded_line = line.decode('utf-8').replace('data: ', '')
146
- try:
147
- data = json.loads(decoded_line)
148
- if data['type'] == 'llm_response':
149
- content = data['content']
150
- full_response += content
151
- yield content if raw else dict(text=content)
152
- except json.JSONDecodeError:
153
- continue
154
- self.last_response.update(dict(text=full_response))
155
- self.conversation.update_chat_history(
156
- prompt, self.get_message(self.last_response)
157
- )
186
+
187
+ except CurlError as e:
188
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
189
+ except Exception as e:
190
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
191
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
192
+
158
193
 
159
194
  def for_non_stream():
160
- for _ in for_stream():
161
- pass
162
- return self.last_response
195
+ collected_text = ""
196
+ try:
197
+ for chunk_data in for_stream():
198
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
199
+ collected_text += chunk_data["text"]
200
+ elif raw and isinstance(chunk_data, str):
201
+ collected_text += chunk_data
202
+ except Exception as e:
203
+ if not collected_text:
204
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
205
+
206
+ return collected_text if raw else self.last_response
207
+
163
208
 
164
209
  return for_stream() if stream else for_non_stream()
165
210
 
@@ -180,23 +225,25 @@ class NousHermes(Provider):
180
225
  str: Response generated
181
226
  """
182
227
 
183
- def for_stream():
184
- for response in self.ask(
185
- prompt, True, optimizer=optimizer, conversationally=conversationally
186
- ):
187
- yield self.get_message(response)
228
+ def for_stream_chat():
229
+ gen = self.ask(
230
+ prompt, stream=True, raw=False,
231
+ optimizer=optimizer, conversationally=conversationally
232
+ )
233
+ for response_dict in gen:
234
+ yield self.get_message(response_dict)
188
235
 
189
- def for_non_stream():
190
- return self.get_message(
191
- self.ask(
192
- prompt,
193
- False,
194
- optimizer=optimizer,
195
- conversationally=conversationally,
196
- )
236
+ def for_non_stream_chat():
237
+ response_data = self.ask(
238
+ prompt,
239
+ stream=False,
240
+ raw=False,
241
+ optimizer=optimizer,
242
+ conversationally=conversationally,
197
243
  )
244
+ return self.get_message(response_data)
198
245
 
199
- return for_stream() if stream else for_non_stream()
246
+ return for_stream_chat() if stream else for_non_stream_chat()
200
247
 
201
248
  def get_message(self, response: dict) -> str:
202
249
  """Retrieves message only from response
@@ -28,7 +28,7 @@ class KOALA(Provider):
28
28
  proxies: dict = {},
29
29
  history_offset: int = 10250,
30
30
  act: str = None,
31
- model: str = "gpt-4o",
31
+ model: str = "gpt-4.1",
32
32
  web_search: bool = True,
33
33
 
34
34
  ) -> None: