webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import os
4
5
  from typing import Any, Dict, Optional, Generator, Union
@@ -26,7 +27,7 @@ class Hunyuan(Provider):
26
27
  def __init__(
27
28
  self,
28
29
  is_conversation: bool = True,
29
- max_tokens: int = 2048,
30
+ max_tokens: int = 2048, # Note: max_tokens is not used by this API
30
31
  timeout: int = 30,
31
32
  intro: str = None,
32
33
  filepath: str = None,
@@ -35,7 +36,7 @@ class Hunyuan(Provider):
35
36
  history_offset: int = 10250,
36
37
  act: str = None,
37
38
  model: str = "hunyuan-t1-latest",
38
- browser: str = "chrome",
39
+ browser: str = "chrome", # Note: browser fingerprinting might be less effective with impersonate
39
40
  api_key: str = None,
40
41
  system_prompt: str = "You are a helpful assistant.",
41
42
  ):
@@ -46,28 +47,23 @@ class Hunyuan(Provider):
46
47
 
47
48
  self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
48
49
 
49
- # Initialize LitAgent for user agent generation
50
+ # Initialize LitAgent (keep if needed for other headers or logic)
50
51
  self.agent = LitAgent()
51
- # Use fingerprinting to create a consistent browser identity
52
- self.fingerprint = self.agent.generate_fingerprint(browser)
52
+ # Fingerprint generation might be less relevant with impersonate
53
+ self.fingerprint = self.agent.generate_fingerprint(browser)
53
54
 
54
- # Use the fingerprint for headers
55
+ # Use the fingerprint for headers (keep relevant ones)
55
56
  self.headers = {
56
57
  "Accept": "*/*",
57
- "Accept-Encoding": "gzip, deflate, br, zstd",
58
- "Accept-Language": self.fingerprint["accept_language"],
58
+ "Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
59
59
  "Content-Type": "application/json",
60
- "DNT": "1",
61
- "Origin": "https://llm.hunyuan.tencent.com",
62
- "Referer": "https://llm.hunyuan.tencent.com/",
63
- "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
64
- "Sec-CH-UA-Mobile": "?0",
65
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
66
- "Sec-Fetch-Dest": "empty",
60
+ "DNT": "1", # Keep DNT
61
+ "Origin": "https://llm.hunyuan.tencent.com", # Keep Origin
62
+ "Referer": "https://llm.hunyuan.tencent.com/", # Keep Referer
63
+ "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
67
64
  "Sec-Fetch-Mode": "cors",
68
65
  "Sec-Fetch-Site": "same-origin",
69
- "Sec-GPC": "1",
70
- "User-Agent": self.fingerprint["user_agent"],
66
+ "Sec-GPC": "1", # Keep Sec-GPC
71
67
  }
72
68
 
73
69
  # Add authorization if API key is provided
@@ -77,9 +73,11 @@ class Hunyuan(Provider):
77
73
  # Default test key (may not work long-term)
78
74
  self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
79
75
 
80
- self.session = requests.Session()
76
+ # Initialize curl_cffi Session
77
+ self.session = Session()
78
+ # Update curl_cffi session headers and proxies
81
79
  self.session.headers.update(self.headers)
82
- self.session.proxies.update(proxies)
80
+ self.session.proxies = proxies # Assign proxies directly
83
81
  self.system_message = system_prompt
84
82
  self.is_conversation = is_conversation
85
83
  self.max_tokens_to_sample = max_tokens
@@ -115,24 +113,20 @@ class Hunyuan(Provider):
115
113
  browser = browser or self.fingerprint.get("browser_type", "chrome")
116
114
  self.fingerprint = self.agent.generate_fingerprint(browser)
117
115
 
118
- # Update headers with new fingerprint
116
+ # Update headers with new fingerprint (only relevant ones)
119
117
  self.headers.update({
120
118
  "Accept-Language": self.fingerprint["accept_language"],
121
- "Sec-CH-UA": f'"{self.fingerprint["sec_ch_ua"]}"' or self.headers["Sec-CH-UA"],
122
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
123
- "User-Agent": self.fingerprint["user_agent"],
124
119
  })
125
120
 
126
121
  # Update session headers
127
- for header, value in self.headers.items():
128
- self.session.headers[header] = value
122
+ self.session.headers.update(self.headers) # Update only relevant headers
129
123
 
130
124
  return self.fingerprint
131
125
 
132
126
  def ask(
133
127
  self,
134
128
  prompt: str,
135
- stream: bool = False,
129
+ stream: bool = False, # API supports streaming
136
130
  raw: bool = False,
137
131
  optimizer: str = None,
138
132
  conversationally: bool = False,
@@ -140,9 +134,7 @@ class Hunyuan(Provider):
140
134
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
141
135
  if optimizer:
142
136
  if optimizer in self.__available_optimizers:
143
- conversation_prompt = getattr(Optimizers, optimizer)(
144
- conversation_prompt if conversationally else prompt
145
- )
137
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
146
138
  else:
147
139
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
148
140
 
@@ -152,7 +144,7 @@ class Hunyuan(Provider):
152
144
 
153
145
  # Payload construction
154
146
  payload = {
155
- "stream": stream,
147
+ "stream": True, # API seems to require stream=True based on response format
156
148
  "model": self.model,
157
149
  "query_id": query_id,
158
150
  "messages": [
@@ -164,66 +156,74 @@ class Hunyuan(Provider):
164
156
  }
165
157
 
166
158
  def for_stream():
159
+ streaming_text = "" # Initialize outside try block
167
160
  try:
168
- with self.session.post(self.url, data=json.dumps(payload), stream=True, timeout=self.timeout, verify=False) as response:
169
- if response.status_code != 200:
170
- raise exceptions.FailedToGenerateResponseError(
171
- f"Request failed with status code {response.status_code}"
172
- )
161
+ # Use curl_cffi session post with impersonate
162
+ response = self.session.post(
163
+ self.url,
164
+ data=json.dumps(payload),
165
+ stream=True,
166
+ timeout=self.timeout,
167
+ impersonate="chrome110" # Use a common impersonation profile
168
+ )
169
+ response.raise_for_status() # Check for HTTP errors
173
170
 
174
- streaming_text = ""
175
- for line in response.iter_lines(decode_unicode=True):
176
- if line:
177
- line = line.strip()
171
+ # Iterate over bytes and decode manually
172
+ for line_bytes in response.iter_lines():
173
+ if line_bytes:
174
+ try:
175
+ line = line_bytes.decode('utf-8').strip()
178
176
  if line.startswith("data: "):
179
177
  json_str = line[6:]
180
178
  if json_str == "[DONE]":
181
179
  break
182
- try:
183
- json_data = json.loads(json_str)
184
- if 'choices' in json_data:
185
- choice = json_data['choices'][0]
186
- if 'delta' in choice and 'content' in choice['delta']:
187
- content = choice['delta']['content']
180
+ json_data = json.loads(json_str)
181
+ if 'choices' in json_data:
182
+ choice = json_data['choices'][0]
183
+ if 'delta' in choice and 'content' in choice['delta']:
184
+ content = choice['delta']['content']
185
+ if content: # Ensure content is not None or empty
188
186
  streaming_text += content
189
187
  resp = dict(text=content)
190
- yield resp if raw else resp
191
- except json.JSONDecodeError:
192
- continue
188
+ # Yield dict or raw string chunk
189
+ yield resp if not raw else content
190
+ except (json.JSONDecodeError, UnicodeDecodeError):
191
+ continue # Ignore lines that are not valid JSON or cannot be decoded
193
192
 
194
- self.last_response = {"text": streaming_text}
195
- self.conversation.update_chat_history(prompt, streaming_text)
193
+ # Update history after stream finishes
194
+ self.last_response = {"text": streaming_text}
195
+ self.conversation.update_chat_history(prompt, streaming_text)
196
196
 
197
- except requests.RequestException as e:
198
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
197
+ except CurlError as e: # Catch CurlError
198
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
199
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
200
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
201
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
202
+
199
203
 
200
204
  def for_non_stream():
205
+ # Aggregate the stream using the updated for_stream logic
206
+ full_text = ""
201
207
  try:
202
- response = self.session.post(self.url, data=json.dumps(payload), timeout=self.timeout, verify=False)
203
- if response.status_code != 200:
204
- raise exceptions.FailedToGenerateResponseError(
205
- f"Request failed with status code {response.status_code}"
206
- )
207
-
208
- # Process non-streaming response (need to parse all lines)
209
- full_text = ""
210
- for line in response.text.split('\n'):
211
- if line.startswith("data: ") and line[6:] != "[DONE]":
212
- try:
213
- json_data = json.loads(line[6:])
214
- if 'choices' in json_data:
215
- choice = json_data['choices'][0]
216
- if 'delta' in choice and 'content' in choice['delta']:
217
- full_text += choice['delta']['content']
218
- except json.JSONDecodeError:
219
- continue
220
-
221
- self.last_response = {"text": full_text}
222
- self.conversation.update_chat_history(prompt, full_text)
223
- return {"text": full_text}
208
+ # Ensure raw=False so for_stream yields dicts
209
+ for chunk_data in for_stream():
210
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
211
+ full_text += chunk_data["text"]
212
+ # Handle raw string case if raw=True was passed
213
+ elif raw and isinstance(chunk_data, str):
214
+ full_text += chunk_data
224
215
  except Exception as e:
225
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
216
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
217
+ if not full_text:
218
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
226
219
 
220
+ # last_response and history are updated within for_stream
221
+ # Return the final aggregated response dict or raw string
222
+ return full_text if raw else self.last_response
223
+
224
+
225
+ # Since the API endpoint suggests streaming, always call the stream generator.
226
+ # The non-stream wrapper will handle aggregation if stream=False.
227
227
  return for_stream() if stream else for_non_stream()
228
228
 
229
229
  def chat(
@@ -233,20 +233,31 @@ class Hunyuan(Provider):
233
233
  optimizer: str = None,
234
234
  conversationally: bool = False,
235
235
  ) -> Union[str, Generator[str, None, None]]:
236
- def for_stream():
237
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
238
- yield self.get_message(response)
239
- def for_non_stream():
240
- return self.get_message(
241
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
236
+ def for_stream_chat():
237
+ # ask() yields dicts or strings when streaming
238
+ gen = self.ask(
239
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
240
+ optimizer=optimizer, conversationally=conversationally
242
241
  )
243
- return for_stream() if stream else for_non_stream()
242
+ for response_dict in gen:
243
+ yield self.get_message(response_dict) # get_message expects dict
244
+
245
+ def for_non_stream_chat():
246
+ # ask() returns dict or str when not streaming
247
+ response_data = self.ask(
248
+ prompt, stream=False, raw=False, # Ensure ask returns dict
249
+ optimizer=optimizer, conversationally=conversationally
250
+ )
251
+ return self.get_message(response_data) # get_message expects dict
252
+
253
+ return for_stream_chat() if stream else for_non_stream_chat()
244
254
 
245
255
  def get_message(self, response: dict) -> str:
246
256
  assert isinstance(response, dict), "Response should be of dict data-type only"
247
257
  return response["text"]
248
258
 
249
259
  if __name__ == "__main__":
260
+ # Ensure curl_cffi is installed
250
261
  print("-" * 80)
251
262
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
252
263
  print("-" * 80)
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  import re
4
5
  from typing import Union, Any, Dict, Optional, Generator
@@ -27,7 +28,7 @@ class JadveOpenAI(Provider):
27
28
  history_offset: int = 10250,
28
29
  act: str = None,
29
30
  model: str = "gpt-4o-mini",
30
- system_prompt: str = "You are a helpful AI assistant."
31
+ system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not used by this API
31
32
  ):
32
33
  """
33
34
  Initializes the JadveOpenAI client.
@@ -48,7 +49,8 @@ class JadveOpenAI(Provider):
48
49
  if model not in self.AVAILABLE_MODELS:
49
50
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
50
51
 
51
- self.session = requests.Session()
52
+ # Initialize curl_cffi Session
53
+ self.session = Session()
52
54
  self.is_conversation = is_conversation
53
55
  self.max_tokens_to_sample = max_tokens
54
56
  self.api_endpoint = "https://openai.jadve.com/stream"
@@ -61,24 +63,21 @@ class JadveOpenAI(Provider):
61
63
  # Headers for API requests
62
64
  self.headers = {
63
65
  "accept": "*/*",
64
- "accept-encoding": "gzip, deflate, br, zstd",
65
66
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
66
67
  "content-type": "application/json",
67
68
  "dnt": "1",
68
69
  "origin": "https://jadve.com",
69
- "priority": "u=1, i",
70
+ "priority": "u=1, i", # Keep priority header if needed
70
71
  "referer": "https://jadve.com/",
71
- "sec-ch-ua": '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
72
- "sec-ch-ua-mobile": "?0",
73
- "sec-ch-ua-platform": '"Windows"',
74
72
  "sec-fetch-dest": "empty",
75
73
  "sec-fetch-mode": "cors",
76
74
  "sec-fetch-site": "same-site",
77
- "user-agent": LitAgent().random(),
78
- "x-authorization": "Bearer"
75
+ "x-authorization": "Bearer" # Keep custom headers
79
76
  }
77
+
78
+ # Update curl_cffi session headers and proxies
80
79
  self.session.headers.update(self.headers)
81
- self.session.proxies = proxies
80
+ self.session.proxies = proxies # Assign proxies directly
82
81
 
83
82
  self.__available_optimizers = (
84
83
  method for method in dir(Optimizers)
@@ -101,7 +100,7 @@ class JadveOpenAI(Provider):
101
100
  def ask(
102
101
  self,
103
102
  prompt: str,
104
- stream: bool = False,
103
+ stream: bool = False, # API supports streaming
105
104
  raw: bool = False,
106
105
  optimizer: str = None,
107
106
  conversationally: bool = False,
@@ -121,83 +120,109 @@ class JadveOpenAI(Provider):
121
120
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122
121
  if optimizer:
123
122
  if optimizer in self.__available_optimizers:
124
- conversation_prompt = getattr(Optimizers, optimizer)(
125
- conversation_prompt if conversationally else prompt
126
- )
123
+ conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
127
124
  else:
128
- raise Exception(
129
- f"Optimizer is not one of {list(self.__available_optimizers)}"
130
- )
125
+ raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
131
126
 
132
127
  payload = {
133
128
  "messages": [
129
+ {"role": "system", "content": self.system_prompt},
134
130
  {"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
135
131
  ],
136
132
  "model": self.model,
137
133
  "botId": "",
138
134
  "chatId": "",
139
- "stream": stream,
135
+ "stream": True, # API endpoint suggests streaming is default/required
140
136
  "temperature": 0.7,
141
137
  "returnTokensUsage": True,
142
138
  "useTools": False
143
139
  }
144
140
 
145
141
  def for_stream():
146
- response = self.session.post(
147
- self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
148
- )
149
-
150
- if not response.ok:
151
- raise exceptions.FailedToGenerateResponseError(
152
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
142
+ full_response_text = "" # Initialize outside try block
143
+ try:
144
+ # Use curl_cffi session post with impersonate
145
+ response = self.session.post(
146
+ self.api_endpoint,
147
+ # headers are set on the session
148
+ json=payload,
149
+ stream=True,
150
+ timeout=self.timeout,
151
+ # proxies are set on the session
152
+ impersonate="chrome110" # Use a common impersonation profile
153
153
  )
154
+ response.raise_for_status() # Check for HTTP errors
154
155
 
155
- # Pattern to match the streaming chunks format: 0:"text"
156
- pattern = r'0:"(.*?)"'
157
- full_response_text = ""
158
-
159
- # Process the response as it comes in
160
- buffer = ""
161
-
162
- for line in response.iter_lines(decode_unicode=True):
163
- if not line:
164
- continue
156
+ # Pattern to match the streaming chunks format: 0:"text"
157
+ pattern = r'0:"(.*?)"'
158
+ buffer = ""
165
159
 
166
- buffer += line
167
-
168
- # Try to match chunks in the current buffer
169
- matches = re.findall(pattern, buffer)
170
- if matches:
171
- for chunk in matches:
172
- full_response_text += chunk
173
- # Return the current chunk
174
- yield chunk if raw else dict(text=chunk)
160
+ # Iterate over bytes and decode manually
161
+ for line_bytes in response.iter_lines():
162
+ if not line_bytes:
163
+ continue
175
164
 
176
- # Remove matched parts from the buffer
177
- matched_parts = [f'0:"{match}"' for match in matches]
178
- for part in matched_parts:
179
- buffer = buffer.replace(part, '', 1)
180
-
181
- # Check if we've reached the end of the response
182
- if 'e:' in line or 'd:' in line:
183
- # No need to process usage data without logging
184
- break
165
+ try:
166
+ line = line_bytes.decode('utf-8')
167
+ buffer += line
168
+
169
+ # Try to match chunks in the current buffer
170
+ matches = re.findall(pattern, buffer)
171
+ if matches:
172
+ for chunk in matches:
173
+ # Handle potential escape sequences like \\n
174
+ decoded_chunk = chunk.encode().decode('unicode_escape')
175
+ full_response_text += decoded_chunk
176
+ resp = {"text": decoded_chunk}
177
+ # Yield dict or raw string chunk
178
+ yield resp if not raw else decoded_chunk
179
+
180
+ # Remove matched parts from the buffer
181
+ # Be careful with buffer modification during iteration if issues arise
182
+ matched_parts = [f'0:"{match}"' for match in matches]
183
+ for part in matched_parts:
184
+ buffer = buffer.replace(part, '', 1)
185
+
186
+ # Check if we've reached the end of the response
187
+ if 'e:' in line or 'd:' in line:
188
+ break
189
+ except UnicodeDecodeError:
190
+ continue # Ignore decoding errors for specific lines
191
+
192
+ # Update history after stream finishes
193
+ self.last_response = {"text": full_response_text}
194
+ self.conversation.update_chat_history(prompt, full_response_text)
195
+
196
+ except CurlError as e: # Catch CurlError
197
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
198
+ except Exception as e: # Catch other potential exceptions (like HTTPError)
199
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
200
+ raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
185
201
 
186
- self.last_response.update(dict(text=full_response_text))
187
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
188
202
 
189
203
  def for_non_stream():
190
- # For non-streaming requests, we collect all chunks and return the complete response
204
+ # Aggregate the stream using the updated for_stream logic
191
205
  collected_text = ""
192
- for chunk in for_stream():
193
- if raw:
194
- collected_text += chunk
195
- else:
196
- collected_text += chunk.get("text", "")
197
-
198
- self.last_response = {"text": collected_text}
199
- return self.last_response
206
+ try:
207
+ # Ensure raw=False so for_stream yields dicts
208
+ for chunk_data in for_stream():
209
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
210
+ collected_text += chunk_data["text"]
211
+ # Handle raw string case if raw=True was passed
212
+ elif raw and isinstance(chunk_data, str):
213
+ collected_text += chunk_data
214
+ except Exception as e:
215
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
216
+ if not collected_text:
217
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
218
+
219
+ # last_response and history are updated within for_stream
220
+ # Return the final aggregated response dict or raw string
221
+ return collected_text if raw else self.last_response
222
+
200
223
 
224
+ # Since the API endpoint suggests streaming, always call the stream generator.
225
+ # The non-stream wrapper will handle aggregation if stream=False.
201
226
  return for_stream() if stream else for_non_stream()
202
227
 
203
228
  def chat(
@@ -208,7 +233,7 @@ class JadveOpenAI(Provider):
208
233
  conversationally: bool = False,
209
234
  ) -> Union[str, Generator[str, None, None]]:
210
235
  """
211
- Generate a chat response (string).
236
+ Generate a chat response (string).
212
237
 
213
238
  Args:
214
239
  prompt (str): Prompt to be sent.
@@ -218,18 +243,24 @@ class JadveOpenAI(Provider):
218
243
  Returns:
219
244
  str or generator: Generated response string or generator yielding response chunks.
220
245
  """
221
- def for_stream():
222
- for response in self.ask(
223
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
224
- ):
225
- yield self.get_message(response)
246
+ def for_stream_chat():
247
+ # ask() yields dicts or strings when streaming
248
+ gen = self.ask(
249
+ prompt, stream=True, raw=False, # Ensure ask yields dicts
250
+ optimizer=optimizer, conversationally=conversationally
251
+ )
252
+ for response_dict in gen:
253
+ yield self.get_message(response_dict) # get_message expects dict
226
254
 
227
- def for_non_stream():
228
- return self.get_message(
229
- self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally)
255
+ def for_non_stream_chat():
256
+ # ask() returns dict or str when not streaming
257
+ response_data = self.ask(
258
+ prompt, stream=False, raw=False, # Ensure ask returns dict
259
+ optimizer=optimizer, conversationally=conversationally
230
260
  )
261
+ return self.get_message(response_data) # get_message expects dict
231
262
 
232
- return for_stream() if stream else for_non_stream()
263
+ return for_stream_chat() if stream else for_non_stream_chat()
233
264
 
234
265
  def get_message(self, response: dict) -> str:
235
266
  """
@@ -244,6 +275,7 @@ class JadveOpenAI(Provider):
244
275
  return response["text"]
245
276
 
246
277
  if __name__ == "__main__":
278
+ # Ensure curl_cffi is installed
247
279
  print("-" * 80)
248
280
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
249
281
  print("-" * 80)