webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,13 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
- import re
4
- from typing import Union, Any, Dict, Optional, Generator
4
+ from typing import Union, Any, Dict, Generator
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
- from webscout.litagent import LitAgent as Lit
10
+
11
11
 
12
12
  class Llama3Mitril(Provider):
13
13
  """
@@ -29,7 +29,7 @@ class Llama3Mitril(Provider):
29
29
  temperature: float = 0.8,
30
30
  ):
31
31
  """Initializes the Llama3Mitril API."""
32
- self.session = requests.Session()
32
+ self.session = Session()
33
33
  self.is_conversation = is_conversation
34
34
  self.max_tokens = max_tokens
35
35
  self.temperature = temperature
@@ -40,7 +40,6 @@ class Llama3Mitril(Provider):
40
40
  self.headers = {
41
41
  "Content-Type": "application/json",
42
42
  "DNT": "1",
43
- "User-Agent": Lit().random(),
44
43
  }
45
44
  self.__available_optimizers = (
46
45
  method
@@ -58,6 +57,8 @@ class Llama3Mitril(Provider):
58
57
  is_conversation, self.max_tokens, filepath, update_file
59
58
  )
60
59
  self.conversation.history_offset = history_offset
60
+ # Update curl_cffi session headers and proxies
61
+ self.session.headers.update(self.headers)
61
62
  self.session.proxies = proxies
62
63
 
63
64
  def _format_prompt(self, prompt: str) -> str:
@@ -73,7 +74,7 @@ class Llama3Mitril(Provider):
73
74
  def ask(
74
75
  self,
75
76
  prompt: str,
76
- stream: bool = True,
77
+ stream: bool = True, # API supports streaming
77
78
  raw: bool = False,
78
79
  optimizer: str = None,
79
80
  conversationally: bool = False,
@@ -100,66 +101,99 @@ class Llama3Mitril(Provider):
100
101
  }
101
102
 
102
103
  def for_stream():
103
- response = self.session.post(
104
- self.api_endpoint,
105
- headers=self.headers,
106
- json=data,
107
- stream=True,
108
- timeout=self.timeout
109
- )
110
- if not response.ok:
111
- raise exceptions.FailedToGenerateResponseError(
112
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
104
+ streaming_response = "" # Initialize outside try block
105
+ try:
106
+ # Use curl_cffi session post with impersonate
107
+ response = self.session.post(
108
+ self.api_endpoint,
109
+ # headers are set on the session
110
+ json=data,
111
+ stream=True,
112
+ timeout=self.timeout,
113
+ # proxies are set on the session
114
+ impersonate="chrome110" # Use a common impersonation profile
115
+ )
116
+ response.raise_for_status() # Check for HTTP errors
117
+
118
+ # Iterate over bytes and decode manually
119
+ for line_bytes in response.iter_lines():
120
+ if line_bytes:
121
+ try:
122
+ line = line_bytes.decode('utf-8')
123
+ if line.startswith('data: '):
124
+ chunk_str = line.split('data: ', 1)[1]
125
+ chunk = json.loads(chunk_str)
126
+ if token_text := chunk.get('token', {}).get('text'):
127
+ if '<|eot_id|>' not in token_text:
128
+ streaming_response += token_text
129
+ resp = {"text": token_text}
130
+ # Yield dict or raw string chunk
131
+ yield resp if not raw else token_text
132
+ except (json.JSONDecodeError, IndexError, UnicodeDecodeError) as e:
133
+ # Ignore errors in parsing specific lines
134
+ continue
135
+
136
+ # Update history after stream finishes
137
+ self.last_response = {"text": streaming_response}
138
+ self.conversation.update_chat_history(
139
+ prompt, streaming_response
113
140
  )
114
141
 
115
- streaming_response = ""
116
- for line in response.iter_lines(decode_unicode=True):
117
- if line:
118
- try:
119
- chunk = json.loads(line.split('data: ')[1])
120
- if token_text := chunk.get('token', {}).get('text'):
121
- if '<|eot_id|>' not in token_text:
122
- streaming_response += token_text
123
- yield token_text if raw else {"text": token_text}
124
- except (json.JSONDecodeError, IndexError) as e:
125
- continue
126
-
127
- self.last_response.update({"text": streaming_response})
128
- self.conversation.update_chat_history(
129
- prompt, self.get_message(self.last_response)
130
- )
142
+ except CurlError as e: # Catch CurlError
143
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
144
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
145
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
146
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
131
147
 
132
148
  def for_non_stream():
133
- full_response = ""
134
- for chunk in for_stream():
135
- full_response += chunk if raw else chunk['text']
136
- return {"text": full_response}
149
+ # Aggregate the stream using the updated for_stream logic
150
+ full_response_text = ""
151
+ try:
152
+ # Ensure raw=False so for_stream yields dicts
153
+ for chunk_data in for_stream():
154
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
155
+ full_response_text += chunk_data["text"]
156
+ # Handle raw string case if raw=True was passed
157
+ elif raw and isinstance(chunk_data, str):
158
+ full_response_text += chunk_data
159
+ except Exception as e:
160
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
161
+ if not full_response_text:
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
163
+
164
+ # last_response and history are updated within for_stream
165
+ # Return the final aggregated response dict or raw string
166
+ return full_response_text if raw else self.last_response
137
167
 
138
168
  return for_stream() if stream else for_non_stream()
139
169
 
140
170
  def chat(
141
171
  self,
142
172
  prompt: str,
143
- stream: bool = True,
173
+ stream: bool = True, # Default to True as API supports it
144
174
  optimizer: str = None,
145
175
  conversationally: bool = False,
146
176
  ) -> Union[str, Generator[str, None, None]]:
147
177
  """Generates a response from the Llama3 Mitril API."""
148
178
 
149
- def for_stream():
150
- for response in self.ask(
151
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
152
- ):
153
- yield self.get_message(response)
154
-
155
- def for_non_stream():
156
- return self.get_message(
157
- self.ask(
158
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
159
- )
179
+ def for_stream_chat():
180
+ # ask() yields dicts or strings when streaming
181
+ gen = self.ask(
182
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
183
+ optimizer=optimizer, conversationally=conversationally
160
184
  )
185
+ for response_dict in gen:
186
+ yield self.get_message(response_dict) # get_message expects dict
187
+
188
+ def for_non_stream_chat():
189
+ # ask() returns dict or str when not streaming
190
+ response_data = self.ask(
191
+ prompt, stream=False, raw=False, # Ensure ask returns dict
192
+ optimizer=optimizer, conversationally=conversationally
193
+ )
194
+ return self.get_message(response_data) # get_message expects dict
161
195
 
162
- return for_stream() if stream else for_non_stream()
196
+ return for_stream_chat() if stream else for_non_stream_chat()
163
197
 
164
198
  def get_message(self, response: Dict[str, Any]) -> str:
165
199
  """Extracts the message from the API response."""
@@ -168,8 +202,9 @@ class Llama3Mitril(Provider):
168
202
 
169
203
 
170
204
  if __name__ == "__main__":
205
+ # Ensure curl_cffi is installed
171
206
  from rich import print
172
-
207
+
173
208
  ai = Llama3Mitril(
174
209
  max_tokens=2048,
175
210
  temperature=0.8,
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Union, Any, Dict, Optional, Generator, List
4
5
 
@@ -44,7 +45,8 @@ class LLMChat(Provider):
44
45
  if model not in self.AVAILABLE_MODELS:
45
46
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
46
47
 
47
- self.session = requests.Session()
48
+ # Initialize curl_cffi Session
49
+ self.session = Session()
48
50
  self.is_conversation = is_conversation
49
51
  self.max_tokens_to_sample = max_tokens
50
52
  self.api_endpoint = "https://llmchat.in/inference/stream"
@@ -56,7 +58,6 @@ class LLMChat(Provider):
56
58
  self.headers = {
57
59
  "Content-Type": "application/json",
58
60
  "Accept": "*/*",
59
- "User-Agent": Lit().random(),
60
61
  "Origin": "https://llmchat.in",
61
62
  "Referer": "https://llmchat.in/"
62
63
  }
@@ -79,7 +80,10 @@ class LLMChat(Provider):
79
80
  is_conversation, self.max_tokens_to_sample, filepath, update_file
80
81
  )
81
82
  self.conversation.history_offset = history_offset
82
- self.session.proxies = proxies
83
+
84
+ # Update curl_cffi session headers and proxies
85
+ self.session.headers.update(self.headers)
86
+ self.session.proxies = proxies # Assign proxies directly
83
87
 
84
88
  def ask(
85
89
  self,
@@ -88,7 +92,7 @@ class LLMChat(Provider):
88
92
  raw: bool = False,
89
93
  optimizer: str = None,
90
94
  conversationally: bool = False,
91
- ) -> Dict[str, Any]:
95
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]: # Corrected return type hint
92
96
  """Chat with LLMChat with logging capabilities"""
93
97
 
94
98
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
@@ -109,46 +113,79 @@ class LLMChat(Provider):
109
113
  {"role": "user", "content": conversation_prompt}
110
114
  ],
111
115
  "max_tokens": self.max_tokens_to_sample,
112
- "stream": stream
116
+ "stream": True # API seems to always stream based on endpoint name
113
117
  }
114
118
 
115
119
  def for_stream():
120
+ full_response = "" # Initialize outside try block
116
121
  try:
122
+ # Use curl_cffi session post with impersonate
123
+ response = self.session.post(
124
+ url,
125
+ json=payload,
126
+ stream=True,
127
+ timeout=self.timeout,
128
+ impersonate="chrome110" # Use a common impersonation profile
129
+ )
130
+ response.raise_for_status() # Check for HTTP errors
117
131
 
118
- with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
119
- response.raise_for_status()
120
-
121
- full_response = ""
122
- for line in response.iter_lines():
123
- if line:
124
- line = line.decode('utf-8')
132
+ # Iterate over bytes and decode manually
133
+ for line_bytes in response.iter_lines():
134
+ if line_bytes:
135
+ try:
136
+ line = line_bytes.decode('utf-8')
125
137
  if line.startswith('data: '):
138
+ data_str = line[6:]
139
+ if data_str == '[DONE]':
140
+ break
126
141
  try:
127
- data = json.loads(line[6:])
142
+ data = json.loads(data_str)
128
143
  if data.get('response'):
129
144
  response_text = data['response']
130
145
  full_response += response_text
131
- yield response_text if raw else dict(text=response_text)
146
+ resp = dict(text=response_text)
147
+ # Yield dict or raw string chunk
148
+ yield resp if not raw else response_text
132
149
  except json.JSONDecodeError:
133
- if line.strip() != 'data: [DONE]':
134
- continue
135
-
136
- self.last_response.update(dict(text=full_response))
137
- self.conversation.update_chat_history(
138
- prompt, self.get_message(self.last_response)
139
- )
150
+ continue # Ignore invalid JSON data
151
+ except UnicodeDecodeError:
152
+ continue # Ignore decoding errors
153
+
154
+ # Update history after stream finishes
155
+ self.last_response = dict(text=full_response)
156
+ self.conversation.update_chat_history(
157
+ prompt, full_response
158
+ )
140
159
 
141
- except requests.exceptions.RequestException as e:
142
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
160
+ except CurlError as e: # Catch CurlError
161
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
162
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
163
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
164
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
143
165
 
144
166
  def for_non_stream():
145
-
146
- full_response = ""
147
- for line in for_stream():
148
- full_response += line['text'] if not raw else line
149
-
150
- return dict(text=full_response)
151
-
167
+ # Aggregate the stream using the updated for_stream logic
168
+ full_response_text = ""
169
+ try:
170
+ # Ensure raw=False so for_stream yields dicts
171
+ for chunk_data in for_stream():
172
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
173
+ full_response_text += chunk_data["text"]
174
+ # Handle raw string case if raw=True was passed
175
+ elif raw and isinstance(chunk_data, str):
176
+ full_response_text += chunk_data
177
+ except Exception as e:
178
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
179
+ if not full_response_text:
180
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
181
+
182
+ # last_response and history are updated within for_stream
183
+ # Return the final aggregated response dict or raw string
184
+ return full_response_text if raw else self.last_response
185
+
186
+
187
+ # Since the API endpoint suggests streaming, always call the stream generator.
188
+ # The non-stream wrapper will handle aggregation if stream=False.
152
189
  return for_stream() if stream else for_non_stream()
153
190
 
154
191
  def chat(
@@ -160,23 +197,27 @@ class LLMChat(Provider):
160
197
  ) -> Union[str, Generator[str, None, None]]:
161
198
  """Generate response with logging capabilities"""
162
199
 
163
- def for_stream():
164
- for response in self.ask(
165
- prompt, True, optimizer=optimizer, conversationally=conversationally
166
- ):
167
- yield self.get_message(response)
168
-
169
- def for_non_stream():
170
- return self.get_message(
171
- self.ask(
172
- prompt,
173
- False,
174
- optimizer=optimizer,
175
- conversationally=conversationally,
176
- )
200
+ def for_stream_chat():
201
+ # ask() yields dicts or strings when streaming
202
+ gen = self.ask(
203
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
204
+ optimizer=optimizer, conversationally=conversationally
205
+ )
206
+ for response_dict in gen:
207
+ yield self.get_message(response_dict) # get_message expects dict
208
+
209
+ def for_non_stream_chat():
210
+ # ask() returns dict or str when not streaming
211
+ response_data = self.ask(
212
+ prompt,
213
+ stream=False,
214
+ raw=False, # Ensure ask returns dict
215
+ optimizer=optimizer,
216
+ conversationally=conversationally,
177
217
  )
218
+ return self.get_message(response_data) # get_message expects dict
178
219
 
179
- return for_stream() if stream else for_non_stream()
220
+ return for_stream_chat() if stream else for_non_stream_chat()
180
221
 
181
222
  def get_message(self, response: Dict[str, Any]) -> str:
182
223
  """Retrieves message from response with validation"""
@@ -184,6 +225,7 @@ class LLMChat(Provider):
184
225
  return response["text"]
185
226
 
186
227
  if __name__ == "__main__":
228
+ # Ensure curl_cffi is installed
187
229
  print("-" * 80)
188
230
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
189
231
  print("-" * 80)
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import uuid
4
5
  import re
@@ -37,7 +38,7 @@ class LLMChatCo(Provider):
37
38
  def __init__(
38
39
  self,
39
40
  is_conversation: bool = True,
40
- max_tokens: int = 2048,
41
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
41
42
  timeout: int = 60,
42
43
  intro: str = None,
43
44
  filepath: str = None,
@@ -55,7 +56,8 @@ class LLMChatCo(Provider):
55
56
  if model not in self.AVAILABLE_MODELS:
56
57
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
57
58
 
58
- self.session = requests.Session()
59
+ # Initialize curl_cffi Session
60
+ self.session = Session()
59
61
  self.is_conversation = is_conversation
60
62
  self.max_tokens_to_sample = max_tokens
61
63
  self.api_endpoint = "https://llmchat.co/api/completion"
@@ -65,21 +67,22 @@ class LLMChatCo(Provider):
65
67
  self.system_prompt = system_prompt
66
68
  self.thread_id = str(uuid.uuid4()) # Generate a unique thread ID for conversations
67
69
 
68
- # Create LitAgent instance for user agent generation
70
+ # Create LitAgent instance (keep if needed for other headers)
69
71
  lit_agent = Lit()
70
72
 
71
73
  # Headers based on the provided request
72
74
  self.headers = {
73
75
  "Content-Type": "application/json",
74
76
  "Accept": "text/event-stream",
75
- "User-Agent": lit_agent.random(),
77
+ "User-Agent": lit_agent.random(),
76
78
  "Accept-Language": "en-US,en;q=0.9",
77
79
  "Origin": "https://llmchat.co",
78
80
  "Referer": f"https://llmchat.co/chat/{self.thread_id}",
79
81
  "DNT": "1",
80
82
  "Sec-Fetch-Dest": "empty",
81
83
  "Sec-Fetch-Mode": "cors",
82
- "Sec-Fetch-Site": "same-origin"
84
+ "Sec-Fetch-Site": "same-origin",
85
+ # Add sec-ch-ua headers if needed for impersonation consistency
83
86
  }
84
87
 
85
88
  self.__available_optimizers = (
@@ -100,7 +103,9 @@ class LLMChatCo(Provider):
100
103
  is_conversation, self.max_tokens_to_sample, filepath, update_file
101
104
  )
102
105
  self.conversation.history_offset = history_offset
103
- self.session.proxies = proxies
106
+ # Update curl_cffi session headers and proxies
107
+ self.session.headers.update(self.headers)
108
+ self.session.proxies = proxies # Assign proxies directly
104
109
  # Store message history for conversation context
105
110
  self.last_assistant_response = ""
106
111
 
@@ -167,29 +172,31 @@ class LLMChatCo(Provider):
167
172
  }
168
173
 
169
174
  def for_stream():
175
+ full_response = "" # Initialize outside try block
170
176
  try:
171
- # Set up the streaming request
177
+ # Use curl_cffi session post with impersonate
172
178
  response = self.session.post(
173
179
  self.api_endpoint,
174
180
  json=payload,
175
- headers=self.headers,
181
+ # headers are set on the session
176
182
  stream=True,
177
- timeout=self.timeout
183
+ timeout=self.timeout,
184
+ # proxies are set on the session
185
+ impersonate="chrome110" # Use a common impersonation profile
178
186
  )
179
- response.raise_for_status()
187
+ response.raise_for_status() # Check for HTTP errors
180
188
 
181
189
  # Process the SSE stream
182
- full_response = ""
183
190
  current_event = None
184
191
  buffer = ""
185
192
 
186
- # Use a raw read approach to handle SSE
187
- for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
193
+ # Iterate over bytes and decode manually
194
+ for chunk in response.iter_content(chunk_size=None, decode_unicode=False): # Use chunk_size=None for better SSE handling
188
195
  if not chunk:
189
196
  continue
190
197
 
191
198
  # Decode the chunk and add to buffer
192
- buffer += chunk.decode('utf-8')
199
+ buffer += chunk.decode('utf-8', errors='replace') # Use replace for potential errors
193
200
 
194
201
  # Process complete lines in the buffer
195
202
  while '\n' in buffer:
@@ -215,37 +222,50 @@ class LLMChatCo(Provider):
215
222
  # Extract only new content since last chunk
216
223
  new_text = text_chunk[len(full_response):]
217
224
  if new_text:
218
- full_response = text_chunk
219
- yield new_text if raw else dict(text=new_text)
225
+ full_response = text_chunk # Update full response tracker
226
+ resp = dict(text=new_text)
227
+ # Yield dict or raw string chunk
228
+ yield resp if not raw else new_text
220
229
  except json.JSONDecodeError:
221
- continue
230
+ continue # Ignore invalid JSON data
222
231
  elif data_content and current_event == 'done':
223
- break
232
+ # Handle potential final data before done event if needed
233
+ break # Exit loop on 'done' event
224
234
 
225
- self.last_response.update(dict(text=full_response))
235
+ # Update history after stream finishes
236
+ self.last_response = dict(text=full_response)
226
237
  self.last_assistant_response = full_response
227
238
  self.conversation.update_chat_history(
228
- prompt, self.get_message(self.last_response)
239
+ prompt, full_response
229
240
  )
230
241
 
231
- except requests.exceptions.RequestException as e:
232
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
233
- except Exception as e:
234
- raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {str(e)}")
242
+ except CurlError as e: # Catch CurlError
243
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
244
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
245
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
246
+ raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
235
247
 
236
248
  def for_non_stream():
237
- full_response = ""
249
+ # Aggregate the stream using the updated for_stream logic
250
+ full_response_text = ""
238
251
  try:
239
- for chunk in for_stream():
240
- if not raw:
241
- full_response += chunk.get('text', '')
242
- else:
243
- full_response += chunk
252
+ # Ensure raw=False so for_stream yields dicts
253
+ for chunk_data in for_stream():
254
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
255
+ full_response_text += chunk_data["text"]
256
+ # Handle raw string case if raw=True was passed
257
+ elif raw and isinstance(chunk_data, str):
258
+ full_response_text += chunk_data
259
+
244
260
  except Exception as e:
245
- if not full_response:
246
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response: {str(e)}")
261
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
262
+ if not full_response_text:
263
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
247
264
 
248
- return dict(text=full_response)
265
+ # last_response and history are updated within for_stream
266
+ # Return the final aggregated response dict or raw string
267
+ return full_response_text if raw else self.last_response
268
+
249
269
 
250
270
  return for_stream() if stream else for_non_stream()
251
271
 
@@ -259,25 +279,29 @@ class LLMChatCo(Provider):
259
279
  ) -> Union[str, Generator[str, None, None]]:
260
280
  """Generate response with streaming capabilities"""
261
281
 
262
- def for_stream():
263
- for response in self.ask(
264
- prompt, True, optimizer=optimizer, conversationally=conversationally,
282
+ def for_stream_chat():
283
+ # ask() yields dicts or strings when streaming
284
+ gen = self.ask(
285
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
286
+ optimizer=optimizer, conversationally=conversationally,
265
287
  web_search=web_search
266
- ):
267
- yield self.get_message(response)
288
+ )
289
+ for response_dict in gen:
290
+ yield self.get_message(response_dict) # get_message expects dict
268
291
 
269
- def for_non_stream():
270
- return self.get_message(
271
- self.ask(
272
- prompt,
273
- False,
274
- optimizer=optimizer,
275
- conversationally=conversationally,
276
- web_search=web_search
277
- )
292
+ def for_non_stream_chat():
293
+ # ask() returns dict or str when not streaming
294
+ response_data = self.ask(
295
+ prompt,
296
+ stream=False,
297
+ raw=False, # Ensure ask returns dict
298
+ optimizer=optimizer,
299
+ conversationally=conversationally,
300
+ web_search=web_search
278
301
  )
302
+ return self.get_message(response_data) # get_message expects dict
279
303
 
280
- return for_stream() if stream else for_non_stream()
304
+ return for_stream_chat() if stream else for_non_stream_chat()
281
305
 
282
306
  def get_message(self, response: Dict[str, Any]) -> str:
283
307
  """Retrieves message from response with validation"""
@@ -285,6 +309,7 @@ class LLMChatCo(Provider):
285
309
  return response["text"]
286
310
 
287
311
  if __name__ == "__main__":
312
+ # Ensure curl_cffi is installed
288
313
  print("-" * 80)
289
314
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
290
315
  print("-" * 80)