webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Optional, Generator, List
4
5
 
@@ -44,7 +45,8 @@ class LLMChat(Provider):
44
45
  if model not in self.AVAILABLE_MODELS:
45
46
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
47
 
47
- self.session = requests.Session()
48
+ # Initialize curl_cffi Session
49
+ self.session = Session()
48
50
  self.is_conversation = is_conversation
49
51
  self.max_tokens_to_sample = max_tokens
50
52
  self.api_endpoint = "https://llmchat.in/inference/stream"
@@ -56,7 +58,6 @@ class LLMChat(Provider):
56
58
  self.headers = {
57
59
  "Content-Type": "application/json",
58
60
  "Accept": "*/*",
59
- "User-Agent": Lit().random(),
60
61
  "Origin": "https://llmchat.in",
61
62
  "Referer": "https://llmchat.in/"
62
63
  }
@@ -79,7 +80,10 @@ class LLMChat(Provider):
79
80
  is_conversation, self.max_tokens_to_sample, filepath, update_file
80
81
  )
81
82
  self.conversation.history_offset = history_offset
82
- self.session.proxies = proxies
83
+
84
+ # Update curl_cffi session headers and proxies
85
+ self.session.headers.update(self.headers)
86
+ self.session.proxies = proxies # Assign proxies directly
83
87
 
84
88
  def ask(
85
89
  self,
@@ -88,7 +92,7 @@ class LLMChat(Provider):
88
92
  raw: bool = False,
89
93
  optimizer: str = None,
90
94
  conversationally: bool = False,
91
- ) -> Dict[str, Any]:
95
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
92
96
  """Chat with LLMChat with logging capabilities"""
93
97
 
94
98
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
@@ -109,46 +113,79 @@ class LLMChat(Provider):
109
113
  {"role": "user", "content": conversation_prompt}
110
114
  ],
111
115
  "max_tokens": self.max_tokens_to_sample,
112
- "stream": stream
116
+ "stream": True # API seems to always stream based on endpoint name
113
117
  }
114
118
 
115
119
  def for_stream():
120
+ full_response = "" # Initialize outside try block
116
121
  try:
122
+ # Use curl_cffi session post with impersonate
123
+ response = self.session.post(
124
+ url,
125
+ json=payload,
126
+ stream=True,
127
+ timeout=self.timeout,
128
+ impersonate="chrome110" # Use a common impersonation profile
129
+ )
130
+ response.raise_for_status() # Check for HTTP errors
117
131
 
118
- with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
119
- response.raise_for_status()
120
-
121
- full_response = ""
122
- for line in response.iter_lines():
123
- if line:
124
- line = line.decode('utf-8')
132
+ # Iterate over bytes and decode manually
133
+ for line_bytes in response.iter_lines():
134
+ if line_bytes:
135
+ try:
136
+ line = line_bytes.decode('utf-8')
125
137
  if line.startswith('data: '):
138
+ data_str = line[6:]
139
+ if data_str == '[DONE]':
140
+ break
126
141
  try:
127
- data = json.loads(line[6:])
142
+ data = json.loads(data_str)
128
143
  if data.get('response'):
129
144
  response_text = data['response']
130
145
  full_response += response_text
131
- yield response_text if raw else dict(text=response_text)
146
+ resp = dict(text=response_text)
147
+ # Yield dict or raw string chunk
148
+ yield resp if not raw else response_text
132
149
  except json.JSONDecodeError:
133
- if line.strip() != 'data: [DONE]':
134
- continue
135
-
136
- self.last_response.update(dict(text=full_response))
137
- self.conversation.update_chat_history(
138
- prompt, self.get_message(self.last_response)
139
- )
150
+ continue # Ignore invalid JSON data
151
+ except UnicodeDecodeError:
152
+ continue # Ignore decoding errors
153
+
154
+ # Update history after stream finishes
155
+ self.last_response = dict(text=full_response)
156
+ self.conversation.update_chat_history(
157
+ prompt, full_response
158
+ )
140
159
 
141
- except requests.exceptions.RequestException as e:
142
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
160
+ except CurlError as e: # Catch CurlError
161
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
162
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
163
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
164
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
143
165
 
144
166
  def for_non_stream():
145
-
146
- full_response = ""
147
- for line in for_stream():
148
- full_response += line['text'] if not raw else line
149
-
150
- return dict(text=full_response)
151
-
167
+ # Aggregate the stream using the updated for_stream logic
168
+ full_response_text = ""
169
+ try:
170
+ # Ensure raw=False so for_stream yields dicts
171
+ for chunk_data in for_stream():
172
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
173
+ full_response_text += chunk_data["text"]
174
+ # Handle raw string case if raw=True was passed
175
+ elif raw and isinstance(chunk_data, str):
176
+ full_response_text += chunk_data
177
+ except Exception as e:
178
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
179
+ if not full_response_text:
180
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
181
+
182
+ # last_response and history are updated within for_stream
183
+ # Return the final aggregated response dict or raw string
184
+ return full_response_text if raw else self.last_response
185
+
186
+
187
+ # Since the API endpoint suggests streaming, always call the stream generator.
188
+ # The non-stream wrapper will handle aggregation if stream=False.
152
189
  return for_stream() if stream else for_non_stream()
153
190
 
154
191
  def chat(
@@ -160,23 +197,27 @@ class LLMChat(Provider):
160
197
  ) -> Union[str, Generator[str, None, None]]:
161
198
  """Generate response with logging capabilities"""
162
199
 
163
- def for_stream():
164
- for response in self.ask(
165
- prompt, True, optimizer=optimizer, conversationally=conversationally
166
- ):
167
- yield self.get_message(response)
168
-
169
- def for_non_stream():
170
- return self.get_message(
171
- self.ask(
172
- prompt,
173
- False,
174
- optimizer=optimizer,
175
- conversationally=conversationally,
176
- )
200
+ def for_stream_chat():
201
+ # ask() yields dicts or strings when streaming
202
+ gen = self.ask(
203
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
204
+ optimizer=optimizer, conversationally=conversationally
205
+ )
206
+ for response_dict in gen:
207
+ yield self.get_message(response_dict) # get_message expects dict
208
+
209
+ def for_non_stream_chat():
210
+ # ask() returns dict or str when not streaming
211
+ response_data = self.ask(
212
+ prompt,
213
+ stream=False,
214
+ raw=False, # Ensure ask returns dict
215
+ optimizer=optimizer,
216
+ conversationally=conversationally,
177
217
  )
218
+ return self.get_message(response_data) # get_message expects dict
178
219
 
179
- return for_stream() if stream else for_non_stream()
220
+ return for_stream_chat() if stream else for_non_stream_chat()
180
221
 
181
222
  def get_message(self, response: Dict[str, Any]) -> str:
182
223
  """Retrieves message from response with validation"""
@@ -184,6 +225,7 @@ class LLMChat(Provider):
184
225
  return response["text"]
185
226
 
186
227
  if __name__ == "__main__":
228
+ # Ensure curl_cffi is installed
187
229
  print("-" * 80)
188
230
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
189
231
  print("-" * 80)
@@ -1,10 +1,11 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import uuid
4
5
  import re
5
6
  from typing import Union, Any, Dict, Optional, Generator, List
6
7
 
7
- from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Optimizers, sanitize_stream # Import sanitize_stream
8
9
  from webscout.AIutel import Conversation
9
10
  from webscout.AIutel import AwesomePrompts
10
11
  from webscout.AIbase import Provider
@@ -37,7 +38,7 @@ class LLMChatCo(Provider):
37
38
  def __init__(
38
39
  self,
39
40
  is_conversation: bool = True,
40
- max_tokens: int = 2048,
41
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
41
42
  timeout: int = 60,
42
43
  intro: str = None,
43
44
  filepath: str = None,
@@ -55,7 +56,8 @@ class LLMChatCo(Provider):
55
56
  if model not in self.AVAILABLE_MODELS:
56
57
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
57
58
 
58
- self.session = requests.Session()
59
+ # Initialize curl_cffi Session
60
+ self.session = Session()
59
61
  self.is_conversation = is_conversation
60
62
  self.max_tokens_to_sample = max_tokens
61
63
  self.api_endpoint = "https://llmchat.co/api/completion"
@@ -64,10 +66,10 @@ class LLMChatCo(Provider):
64
66
  self.model = model
65
67
  self.system_prompt = system_prompt
66
68
  self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
67
-
68
- # Create LitAgent instance for user agent generation
69
+
70
+ # Create LitAgent instance (keep if needed for other headers)
69
71
  lit_agent = Lit()
70
-
72
+
71
73
  # Headers based on the provided request
72
74
  self.headers = {
73
75
  "Content-Type": "application/json",
@@ -79,7 +81,8 @@ class LLMChatCo(Provider):
79
81
  "DNT": "1",
80
82
  "Sec-Fetch-Dest": "empty",
81
83
  "Sec-Fetch-Mode": "cors",
82
- "Sec-Fetch-Site": "same-origin"
84
+ "Sec-Fetch-Site": "same-origin",
85
+ # Add sec-ch-ua headers if needed for impersonation consistency
83
86
  }
84
87
 
85
88
  self.__available_optimizers = (
@@ -100,28 +103,22 @@ class LLMChatCo(Provider):
100
103
  is_conversation, self.max_tokens_to_sample, filepath, update_file
101
104
  )
102
105
  self.conversation.history_offset = history_offset
103
- self.session.proxies = proxies
106
+ # Update curl_cffi session headers and proxies
107
+ self.session.headers.update(self.headers)
108
+ self.session.proxies = proxies # Assign proxies directly
104
109
  # Store message history for conversation context
105
110
  self.last_assistant_response = ""
106
111
 
107
- def parse_sse(self, data):
108
- """Parse Server-Sent Events data"""
109
- if not data or not data.strip():
110
- return None
111
-
112
- # Check if it's an event line
113
- if data.startswith('event:'):
114
- return {'event': data[6:].strip()}
115
-
116
- # Check if it's data
117
- if data.startswith('data:'):
118
- data_content = data[5:].strip()
119
- if data_content:
120
- try:
121
- return {'data': json.loads(data_content)}
122
- except json.JSONDecodeError:
123
- return {'data': data_content}
124
-
112
+ @staticmethod
113
+ def _llmchatco_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
114
+ """Extracts text content from LLMChat.co stream JSON objects."""
115
+ if isinstance(chunk, dict) and "answer" in chunk:
116
+ answer = chunk["answer"]
117
+ # Prefer fullText if available and status is COMPLETED
118
+ if answer.get("fullText") and answer.get("status") == "COMPLETED":
119
+ return answer["fullText"]
120
+ elif "text" in answer:
121
+ return answer["text"]
125
122
  return None
126
123
 
127
124
  def ask(
@@ -167,85 +164,78 @@ class LLMChatCo(Provider):
167
164
  }
168
165
 
169
166
  def for_stream():
167
+ full_response = "" # Initialize outside try block
170
168
  try:
171
- # Set up the streaming request
169
+ # Use curl_cffi session post with impersonate
172
170
  response = self.session.post(
173
- self.api_endpoint,
174
- json=payload,
175
- headers=self.headers,
176
- stream=True,
177
- timeout=self.timeout
171
+ self.api_endpoint,
172
+ json=payload,
173
+ # headers are set on the session
174
+ stream=True,
175
+ timeout=self.timeout,
176
+ # proxies are set on the session
177
+ impersonate="chrome110" # Use a common impersonation profile
178
178
  )
179
- response.raise_for_status()
180
-
181
- # Process the SSE stream
182
- full_response = ""
183
- current_event = None
184
- buffer = ""
185
-
186
- # Use a raw read approach to handle SSE
187
- for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
188
- if not chunk:
189
- continue
190
-
191
- # Decode the chunk and add to buffer
192
- buffer += chunk.decode('utf-8')
193
-
194
- # Process complete lines in the buffer
195
- while '\n' in buffer:
196
- line, buffer = buffer.split('\n', 1)
197
- line = line.strip()
198
-
199
- if not line:
200
- continue
201
-
202
- if line.startswith('event:'):
203
- current_event = line[6:].strip()
204
- elif line.startswith('data:'):
205
- data_content = line[5:].strip()
206
- if data_content and current_event == 'answer':
207
- try:
208
- json_data = json.loads(data_content)
209
- if "answer" in json_data and "text" in json_data["answer"]:
210
- text_chunk = json_data["answer"]["text"]
211
- # If there's a fullText, use it as it's more complete
212
- if json_data["answer"].get("fullText") and json_data["answer"].get("status") == "COMPLETED":
213
- text_chunk = json_data["answer"]["fullText"]
214
-
215
- # Extract only new content since last chunk
216
- new_text = text_chunk[len(full_response):]
217
- if new_text:
218
- full_response = text_chunk
219
- yield new_text if raw else dict(text=new_text)
220
- except json.JSONDecodeError:
221
- continue
222
- elif data_content and current_event == 'done':
223
- break
224
-
225
- self.last_response.update(dict(text=full_response))
179
+ response.raise_for_status() # Check for HTTP errors
180
+
181
+ # Use sanitize_stream
182
+ # Note: This won't handle SSE 'event:' lines, only 'data:' lines.
183
+ # The original code checked for event == 'answer'. We assume relevant data is JSON after 'data:'.
184
+ processed_stream = sanitize_stream(
185
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
186
+ intro_value="data:",
187
+ to_json=True, # Stream sends JSON
188
+ content_extractor=self._llmchatco_extractor, # Use the specific extractor
189
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
190
+ )
191
+
192
+ last_yielded_text = ""
193
+ for current_full_text in processed_stream:
194
+ # current_full_text is the full text extracted by _llmchatco_extractor
195
+ if current_full_text and isinstance(current_full_text, str):
196
+ # Calculate the new part of the text
197
+ new_text = current_full_text[len(last_yielded_text):]
198
+ if new_text:
199
+ full_response = current_full_text # Keep track of the latest full text
200
+ last_yielded_text = current_full_text # Update tracker
201
+ resp = dict(text=new_text)
202
+ # Yield dict or raw string chunk
203
+ yield resp if not raw else new_text
204
+
205
+ # Update history after stream finishes
206
+ self.last_response = dict(text=full_response)
226
207
  self.last_assistant_response = full_response
227
208
  self.conversation.update_chat_history(
228
- prompt, self.get_message(self.last_response)
209
+ prompt, full_response
229
210
  )
230
211
 
231
- except requests.exceptions.RequestException as e:
232
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
233
- except Exception as e:
234
- raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}")
235
-
212
+ except CurlError as e: # Catch CurlError
213
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
214
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
215
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
216
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
217
+
236
218
  def for_non_stream():
237
- full_response = ""
219
+ # Aggregate the stream using the updated for_stream logic
220
+ full_response_text = ""
238
221
  try:
239
- for chunk in for_stream():
240
- if not raw:
241
- full_response += chunk.get('text', '')
242
- else:
243
- full_response += chunk
222
+ # Ensure raw=False so for_stream yields dicts
223
+ for chunk_data in for_stream():
224
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
225
+ full_response_text += chunk_data["text"]
226
+ # Handle raw string case if raw=True was passed
227
+ elif raw and isinstance(chunk_data, str):
228
+ full_response_text += chunk_data
229
+
244
230
  except Exception as e:
245
- if not full_response:
246
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
247
-
248
- return dict(text=full_response)
231
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
232
+ if not full_response_text:
233
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
234
+
235
+ # last_response and history are updated within for_stream
236
+ # Return the final aggregated response dict or raw string
237
+ return full_response_text if raw else self.last_response
238
+
249
239
 
250
240
  return for_stream() if stream else for_non_stream()
251
241
 
@@ -259,25 +249,29 @@ class LLMChatCo(Provider):
259
249
  ) -> Union[str, Generator[str, None, None]]:
260
250
  """Generate response with streaming capabilities"""
261
251
 
262
- def for_stream():
263
- for response in self.ask(
264
- prompt, True, optimizer=optimizer, conversationally=conversationally,
252
+ def for_stream_chat():
253
+ # ask() yields dicts or strings when streaming
254
+ gen = self.ask(
255
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
256
+ optimizer=optimizer, conversationally=conversationally,
265
257
  web_search=web_search
266
- ):
267
- yield self.get_message(response)
258
+ )
259
+ for response_dict in gen:
260
+ yield self.get_message(response_dict) # get_message expects dict
268
261
 
269
- def for_non_stream():
270
- return self.get_message(
271
- self.ask(
272
- prompt,
273
- False,
274
- optimizer=optimizer,
275
- conversationally=conversationally,
276
- web_search=web_search
277
- )
262
+ def for_non_stream_chat():
263
+ # ask() returns dict or str when not streaming
264
+ response_data = self.ask(
265
+ prompt,
266
+ stream=False,
267
+ raw=False, # Ensure ask returns dict
268
+ optimizer=optimizer,
269
+ conversationally=conversationally,
270
+ web_search=web_search
278
271
  )
272
+ return self.get_message(response_data) # get_message expects dict
279
273
 
280
- return for_stream() if stream else for_non_stream()
274
+ return for_stream_chat() if stream else for_non_stream_chat()
281
275
 
282
276
  def get_message(self, response: Dict[str, Any]) -> str:
283
277
  """Retrieves message from response with validation"""
@@ -285,20 +279,21 @@ class LLMChatCo(Provider):
285
279
  return response["text"]
286
280
 
287
281
  if __name__ == "__main__":
282
+ # Ensure curl_cffi is installed
288
283
  print("-" * 80)
289
284
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
290
285
  print("-" * 80)
291
-
286
+
292
287
  # Test all available models
293
288
  working = 0
294
289
  total = len(LLMChatCo.AVAILABLE_MODELS)
295
-
290
+
296
291
  for model in LLMChatCo.AVAILABLE_MODELS:
297
292
  try:
298
293
  test_ai = LLMChatCo(model=model, timeout=60)
299
294
  response = test_ai.chat("Say 'Hello' in one word")
300
295
  response_text = response
301
-
296
+
302
297
  if response_text and len(response_text.strip()) > 0:
303
298
  status = "✓"
304
299
  # Truncate response if too long
@@ -308,4 +303,4 @@ if __name__ == "__main__":
308
303
  display_text = "Empty or invalid response"
309
304
  print(f"{model:<50} {status:<10} {display_text}")
310
305
  except Exception as e:
311
- print(f"{model:<50} {'✗':<10} {str(e)}")
306
+ print(f"{model:<50} {'✗':<10} {str(e)}")