webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -14,10 +14,11 @@ class GeminiProxy(Provider):
14
14
  AVAILABLE_MODELS = [
15
15
  "gemini-2.0-flash-lite",
16
16
  "gemini-2.0-flash",
17
- "gemini-2.5-pro-preview-06-05",
18
- "gemini-2.5-pro-preview-05-06",
19
17
  "gemini-2.5-flash-preview-04-17",
20
18
  "gemini-2.5-flash-preview-05-20",
19
+ "gemini-2.5-flash-lite-preview-06-17",
20
+ "gemini-2.5-pro",
21
+ "gemini-2.5-flash",
21
22
 
22
23
  ]
23
24
 
@@ -135,6 +136,31 @@ class GeminiProxy(Provider):
135
136
  return str(response)
136
137
 
137
138
  if __name__ == "__main__":
138
- ai = GeminiProxy(timeout=30, model="gemini-2.5-flash-preview-05-20")
139
- response = ai.chat("write a poem about AI")
140
- print(response)
139
+ # Ensure curl_cffi is installed
140
+ print("-" * 80)
141
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
142
+ print("-" * 80)
143
+
144
+ # Test all available models
145
+ working = 0
146
+ total = len(GeminiProxy.AVAILABLE_MODELS)
147
+
148
+ for model in GeminiProxy.AVAILABLE_MODELS:
149
+ try:
150
+ test_ai = GeminiProxy(model=model, timeout=60)
151
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
152
+ response_text = ""
153
+ for chunk in response:
154
+ response_text += chunk
155
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
156
+
157
+ if response_text and len(response_text.strip()) > 0:
158
+ status = "✓"
159
+ # Truncate response if too long
160
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
161
+ else:
162
+ status = "✗"
163
+ display_text = "Empty or invalid response"
164
+ print(f"\r{model:<50} {status:<10} {display_text}")
165
+ except Exception as e:
166
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -57,7 +57,7 @@ class HeckAI(Provider):
57
57
  proxies: dict = {},
58
58
  history_offset: int = 10250,
59
59
  act: str = None,
60
- model: str = "google/gemini-2.0-flash-001",
60
+ model: str = "google/gemini-2.5-flash-preview",
61
61
  language: str = "English"
62
62
  ):
63
63
  """
@@ -177,79 +177,73 @@ class HeckAI(Provider):
177
177
  def for_stream():
178
178
  streaming_text = "" # Initialize outside try block
179
179
  try:
180
- # Use curl_cffi session post with impersonate
181
180
  response = self.session.post(
182
181
  self.url,
183
- # headers are set on the session
184
182
  data=json.dumps(payload),
185
183
  stream=True,
186
184
  timeout=self.timeout,
187
- impersonate="chrome110" # Use a common impersonation profile
185
+ impersonate="chrome110"
188
186
  )
189
- response.raise_for_status() # Check for HTTP errors
187
+ response.raise_for_status()
190
188
 
191
- # Use sanitize_stream to process the stream
192
189
  processed_stream = sanitize_stream(
193
- data=response.iter_content(chunk_size=1024), # Pass byte iterator
194
- intro_value="data: ", # Prefix to remove (note the space)
195
- to_json=False, # Content is text
196
- start_marker="data: [ANSWER_START]",
197
- end_marker="data: [ANSWER_DONE]",
198
- skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
199
- yield_raw_on_error=True,
200
- strip_chars=" \n\r\t" # Strip whitespace characters from chunks
190
+ data=response.iter_content(chunk_size=1024),
191
+ intro_value="data: ",
192
+ to_json=False,
193
+ start_marker="data: [ANSWER_START]",
194
+ end_marker="data: [ANSWER_DONE]",
195
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
196
+ yield_raw_on_error=True,
197
+ strip_chars=" \n\r\t",
198
+ raw=raw
201
199
  )
202
200
 
203
201
  for content_chunk in processed_stream:
204
- # content_chunk is the text between ANSWER_START and ANSWER_DONE
205
202
  if content_chunk and isinstance(content_chunk, str):
206
- streaming_text += content_chunk
207
- yield dict(text=content_chunk) if not raw else content_chunk
203
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
204
+ if raw:
205
+ if content_chunk and isinstance(content_chunk, str):
206
+ streaming_text += content_chunk
207
+ yield content_chunk
208
+ else:
209
+ if content_chunk and isinstance(content_chunk, str):
210
+ streaming_text += content_chunk
211
+ yield dict(text=content_chunk)
208
212
 
209
213
  # Only update history if we received a valid response
210
214
  if streaming_text:
211
- # Update history and previous answer after stream finishes
212
215
  self.previous_answer = streaming_text
213
- # Convert to simple text before updating conversation
214
216
  try:
215
- # Ensure content is valid before updating conversation
216
217
  if streaming_text and isinstance(streaming_text, str):
217
- # Sanitize the content to ensure it's valid
218
218
  sanitized_text = streaming_text.strip()
219
- if sanitized_text: # Only update if we have non-empty content
219
+ if sanitized_text:
220
220
  self.conversation.update_chat_history(prompt, sanitized_text)
221
221
  except Exception as e:
222
- # If conversation update fails, log but don't crash
223
222
  print(f"Warning: Failed to update conversation history: {str(e)}")
224
-
225
- except CurlError as e: # Catch CurlError
223
+ except CurlError as e:
226
224
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
227
- except Exception as e: # Catch other potential exceptions (like HTTPError)
225
+ except Exception as e:
228
226
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
229
227
  raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
230
228
 
231
-
232
229
  def for_non_stream():
233
- # Aggregate the stream using the updated for_stream logic
234
230
  full_text = ""
235
231
  try:
236
- # Ensure raw=False so for_stream yields dicts
237
232
  for chunk_data in for_stream():
238
- if isinstance(chunk_data, dict) and "text" in chunk_data:
239
- full_text += chunk_data["text"]
240
- # Handle raw string case if raw=True was passed
241
- elif raw and isinstance(chunk_data, str):
242
- full_text += chunk_data
233
+ if raw:
234
+ if isinstance(chunk_data, str):
235
+ chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
236
+ full_text += chunk_data
237
+ else:
238
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
239
+ text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
240
+ full_text += text
243
241
  except Exception as e:
244
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
245
- if not full_text:
246
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
247
-
248
- # Return the final aggregated response dict or raw string
249
- self.last_response = {"text": full_text} # Update last_response here
242
+ if not full_text:
243
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
244
+ self.last_response = {"text": full_text}
250
245
  return full_text if raw else self.last_response
251
246
 
252
-
253
247
  return for_stream() if stream else for_non_stream()
254
248
 
255
249
  @staticmethod
@@ -266,15 +260,15 @@ class HeckAI(Provider):
266
260
  if isinstance(text, dict) and "text" in text:
267
261
  try:
268
262
  text["text"] = text["text"].encode("latin1").decode("utf-8")
269
- return text
263
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
270
264
  except (UnicodeError, AttributeError) as e:
271
- return text
265
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
272
266
  elif isinstance(text, str):
273
267
  try:
274
268
  return text.encode("latin1").decode("utf-8")
275
269
  except (UnicodeError, AttributeError) as e:
276
- return text
277
- return text
270
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
271
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
278
272
 
279
273
  def chat(
280
274
  self,
@@ -282,6 +276,7 @@ class HeckAI(Provider):
282
276
  stream: bool = False,
283
277
  optimizer: str = None,
284
278
  conversationally: bool = False,
279
+ raw: bool = False,
285
280
  ) -> Union[str, Generator[str, None, None]]:
286
281
  """
287
282
  Sends a prompt to the HeckAI API and returns only the message text.
@@ -298,18 +293,23 @@ class HeckAI(Provider):
298
293
  def for_stream_chat():
299
294
  # ask() yields dicts or strings when streaming
300
295
  gen = self.ask(
301
- prompt, stream=True, raw=False, # Ensure ask yields dicts
296
+ prompt, stream=True, raw=raw,
302
297
  optimizer=optimizer, conversationally=conversationally
303
298
  )
304
- for response_dict in gen:
305
- yield self.get_message(response_dict) # get_message expects dict
299
+ for response in gen:
300
+ if raw:
301
+ yield response
302
+ else:
303
+ yield self.get_message(response)
306
304
 
307
305
  def for_non_stream_chat():
308
306
  # ask() returns dict or str when not streaming
309
307
  response_data = self.ask(
310
- prompt, stream=False, raw=False, # Ensure ask returns dict
308
+ prompt, stream=False, raw=raw,
311
309
  optimizer=optimizer, conversationally=conversationally
312
310
  )
311
+ if raw:
312
+ return response_data if isinstance(response_data, str) else str(response_data)
313
313
  return self.get_message(response_data) # get_message expects dict
314
314
 
315
315
  return for_stream_chat() if stream else for_non_stream_chat()
@@ -338,38 +338,43 @@ class HeckAI(Provider):
338
338
  # Ensure text is a string
339
339
  text = response["text"]
340
340
  if not isinstance(text, str):
341
- return str(text)
341
+ text = str(text)
342
342
 
343
- return text
343
+ return text.replace('\\\\', '\\').replace('\\"', '"')
344
344
 
345
345
  if __name__ == "__main__":
346
- # Ensure curl_cffi is installed
347
- print("-" * 80)
348
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
- print("-" * 80)
350
-
351
- for model in HeckAI.AVAILABLE_MODELS:
352
- try:
353
- test_ai = HeckAI(model=model, timeout=60)
354
- # Use non-streaming mode first to avoid potential streaming issues
355
- try:
356
- response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
- print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
- except Exception as e1:
359
- # Fall back to streaming if non-streaming fails
360
- print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
- response_text = ""
363
- for chunk in response:
364
- if chunk and isinstance(chunk, str):
365
- response_text += chunk
346
+ # # Ensure curl_cffi is installed
347
+ # print("-" * 80)
348
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
+ # print("-" * 80)
350
+
351
+ # for model in HeckAI.AVAILABLE_MODELS:
352
+ # try:
353
+ # test_ai = HeckAI(model=model, timeout=60)
354
+ # # Use non-streaming mode first to avoid potential streaming issues
355
+ # try:
356
+ # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
+ # print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
+ # except Exception as e1:
359
+ # # Fall back to streaming if non-streaming fails
360
+ # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
+ # response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
+ # response_text = ""
363
+ # for chunk in response:
364
+ # if chunk and isinstance(chunk, str):
365
+ # response_text += chunk
366
366
 
367
- if response_text and len(response_text.strip()) > 0:
368
- status = "✓"
369
- # Truncate response if too long
370
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
- print(f"\r{model:<50} {status:<10} {display_text}")
372
- else:
373
- raise ValueError("Empty or invalid response")
374
- except Exception as e:
375
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
367
+ # if response_text and len(response_text.strip()) > 0:
368
+ # status = "✓"
369
+ # # Truncate response if too long
370
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
+ # print(f"\r{model:<50} {status:<10} {display_text}")
372
+ # else:
373
+ # raise ValueError("Empty or invalid response")
374
+ # except Exception as e:
375
+ # print(f"\r{model:<50} {'✗':<10} {str(e)}")
376
+ from rich import print
377
+ ai = HeckAI()
378
+ response = ai.chat("tell me about humans", stream=True, raw=False)
379
+ for chunk in response:
380
+ print(chunk, end='', flush=True)
@@ -170,16 +170,20 @@ class JadveOpenAI(Provider):
170
170
  intro_value=None, # No simple prefix
171
171
  to_json=False, # Content is text after extraction
172
172
  content_extractor=self._jadve_extractor, # Use the specific extractor
173
- # end_marker="e:", # Add if 'e:' reliably marks the end
174
- yield_raw_on_error=True
173
+ yield_raw_on_error=True,
174
+ raw=raw
175
175
  )
176
176
 
177
177
  for content_chunk in processed_stream:
178
- # content_chunk is the string extracted by _jadve_extractor
179
- if content_chunk and isinstance(content_chunk, str):
180
- full_response_text += content_chunk
181
- resp = {"text": content_chunk}
182
- yield resp if not raw else content_chunk
178
+ if raw:
179
+ if content_chunk and isinstance(content_chunk, str):
180
+ full_response_text += content_chunk
181
+ yield content_chunk
182
+ else:
183
+ if content_chunk and isinstance(content_chunk, str):
184
+ full_response_text += content_chunk
185
+ resp = {"text": content_chunk}
186
+ yield resp
183
187
 
184
188
  # Update history after stream finishes
185
189
  self.last_response = {"text": full_response_text}
@@ -191,30 +195,22 @@ class JadveOpenAI(Provider):
191
195
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
192
196
  raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
193
197
 
194
-
195
198
  def for_non_stream():
196
- # Aggregate the stream using the updated for_stream logic
197
199
  collected_text = ""
198
200
  try:
199
- # Ensure raw=False so for_stream yields dicts
200
201
  for chunk_data in for_stream():
201
- if isinstance(chunk_data, dict) and "text" in chunk_data:
202
- collected_text += chunk_data["text"]
203
- # Handle raw string case if raw=True was passed
204
- elif raw and isinstance(chunk_data, str):
205
- collected_text += chunk_data
202
+ if raw:
203
+ if isinstance(chunk_data, str):
204
+ collected_text += chunk_data
205
+ else:
206
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
207
+ collected_text += chunk_data["text"]
206
208
  except Exception as e:
207
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
208
- if not collected_text:
209
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
210
-
209
+ if not collected_text:
210
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
211
211
  # last_response and history are updated within for_stream
212
- # Return the final aggregated response dict or raw string
213
212
  return collected_text if raw else self.last_response
214
213
 
215
-
216
- # Since the API endpoint suggests streaming, always call the stream generator.
217
- # The non-stream wrapper will handle aggregation if stream=False.
218
214
  return for_stream() if stream else for_non_stream()
219
215
 
220
216
  def chat(
@@ -223,6 +219,7 @@ class JadveOpenAI(Provider):
223
219
  stream: bool = False,
224
220
  optimizer: str = None,
225
221
  conversationally: bool = False,
222
+ raw: bool = False,
226
223
  ) -> Union[str, Generator[str, None, None]]:
227
224
  """
228
225
  Generate a chat response (string).
@@ -232,25 +229,29 @@ class JadveOpenAI(Provider):
232
229
  stream (bool, optional): Flag for streaming response. Defaults to False.
233
230
  optimizer (str, optional): Prompt optimizer name. Defaults to None.
234
231
  conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
232
+ raw (bool, optional): Return raw response. Defaults to False.
235
233
  Returns:
236
234
  str or generator: Generated response string or generator yielding response chunks.
237
235
  """
238
236
  def for_stream_chat():
239
- # ask() yields dicts or strings when streaming
240
237
  gen = self.ask(
241
- prompt, stream=True, raw=False, # Ensure ask yields dicts
238
+ prompt, stream=True, raw=raw,
242
239
  optimizer=optimizer, conversationally=conversationally
243
240
  )
244
- for response_dict in gen:
245
- yield self.get_message(response_dict) # get_message expects dict
241
+ for response in gen:
242
+ if raw:
243
+ yield response
244
+ else:
245
+ yield self.get_message(response)
246
246
 
247
247
  def for_non_stream_chat():
248
- # ask() returns dict or str when not streaming
249
248
  response_data = self.ask(
250
- prompt, stream=False, raw=False, # Ensure ask returns dict
249
+ prompt, stream=False, raw=raw,
251
250
  optimizer=optimizer, conversationally=conversationally
252
251
  )
253
- return self.get_message(response_data) # get_message expects dict
252
+ if raw:
253
+ return response_data if isinstance(response_data, str) else str(response_data)
254
+ return self.get_message(response_data)
254
255
 
255
256
  return for_stream_chat() if stream else for_non_stream_chat()
256
257
 
@@ -268,24 +269,29 @@ class JadveOpenAI(Provider):
268
269
  return response.get("text", "")
269
270
 
270
271
  if __name__ == "__main__":
271
- # Ensure curl_cffi is installed
272
- print("-" * 80)
273
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
274
- print("-" * 80)
275
-
276
- for model in JadveOpenAI.AVAILABLE_MODELS:
277
- try:
278
- test_ai = JadveOpenAI(model=model, timeout=60)
279
- response = test_ai.chat("Say 'Hello' in one word")
280
- response_text = response
272
+ # # Ensure curl_cffi is installed
273
+ # print("-" * 80)
274
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
275
+ # print("-" * 80)
276
+
277
+ # for model in JadveOpenAI.AVAILABLE_MODELS:
278
+ # try:
279
+ # test_ai = JadveOpenAI(model=model, timeout=60)
280
+ # response = test_ai.chat("Say 'Hello' in one word")
281
+ # response_text = response
281
282
 
282
- if response_text and len(response_text.strip()) > 0:
283
- status = "✓"
284
- # Truncate response if too long
285
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
286
- else:
287
- status = "✗"
288
- display_text = "Empty or invalid response"
289
- print(f"{model:<50} {status:<10} {display_text}")
290
- except Exception as e:
291
- print(f"{model:<50} {'✗':<10} {str(e)}")
283
+ # if response_text and len(response_text.strip()) > 0:
284
+ # status = "✓"
285
+ # # Truncate response if too long
286
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
287
+ # else:
288
+ # status = "✗"
289
+ # display_text = "Empty or invalid response"
290
+ # print(f"{model:<50} {status:<10} {display_text}")
291
+ # except Exception as e:
292
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
293
+ from rich import print
294
+ ai = JadveOpenAI()
295
+ response = ai.chat("tell me about humans", stream=True, raw=False)
296
+ for chunk in response:
297
+ print(chunk, end='', flush=True)
@@ -93,49 +93,49 @@ class LambdaChat(Provider):
93
93
  self.session.proxies = proxies # Assign proxies directly
94
94
 
95
95
  def create_conversation(self, model: str):
96
- """Create a new conversation with the specified model."""
96
+ """Create a new conversation with the specified model, using updated headers and cookies."""
97
97
  url = f"{self.url}/conversation"
98
98
  payload = {
99
99
  "model": model,
100
- "preprompt": self.system_prompt,
101
-
100
+ "preprompt": self.system_prompt
102
101
  }
103
-
104
- # Update referer for this specific request
102
+
103
+ # Update headers for this specific request
105
104
  headers = self.headers.copy()
106
- headers["Referer"] = f"{self.url}/models/{model}"
107
-
105
+ headers["Referer"] = f"{self.url}/"
106
+ # Add browser-like headers for best compatibility
107
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
108
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
109
+ headers["Sec-GPC"] = "1"
110
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
111
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
112
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
113
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
114
+ headers["Origin"] = self.url
115
+ # cookies are handled by curl_cffi session automatically
116
+
108
117
  try:
109
- # Use curl_cffi session post with impersonate
110
118
  response = self.session.post(
111
- url,
112
- json=payload,
113
- headers=headers, # Use updated headers with specific Referer
114
- impersonate="chrome110" # Use a common impersonation profile
119
+ url,
120
+ json=payload,
121
+ headers=headers,
122
+ impersonate="chrome110"
115
123
  )
116
-
117
124
  if response.status_code == 401:
118
125
  raise exceptions.AuthenticationError("Authentication failed.")
119
-
120
- # Handle other error codes
121
126
  if response.status_code != 200:
122
127
  return None
123
-
124
128
  data = response.json()
125
129
  conversation_id = data.get("conversationId")
126
-
127
- # Store conversation data
128
130
  if model not in self._conversation_data:
129
131
  self._conversation_data[model] = {
130
132
  "conversationId": conversation_id,
131
- "messageId": str(uuid.uuid4()) # Initial message ID
133
+ "messageId": str(uuid.uuid4())
132
134
  }
133
-
134
135
  return conversation_id
135
- except CurlError as e: # Catch CurlError
136
- # Log or handle CurlError specifically if needed
136
+ except CurlError:
137
137
  return None
138
- except Exception: # Catch other potential exceptions (like JSONDecodeError, HTTPError)
138
+ except Exception:
139
139
  return None
140
140
 
141
141
  def fetch_message_id(self, conversation_id: str) -> str:
@@ -230,35 +230,43 @@ class LambdaChat(Provider):
230
230
  url = f"{self.url}/conversation/{conversation_id}"
231
231
  message_id = self._conversation_data[model]["messageId"]
232
232
 
233
- # Data to send
233
+ # Data to send (tools should be empty list by default)
234
234
  request_data = {
235
235
  "inputs": prompt,
236
236
  "id": message_id,
237
237
  "is_retry": False,
238
238
  "is_continue": False,
239
239
  "web_search": web_search,
240
- "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
240
+ "tools": []
241
241
  }
242
-
242
+
243
243
  # Update headers for this specific request
244
244
  headers = self.headers.copy()
245
245
  headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
246
-
246
+ headers["Accept-Encoding"] = "gzip, deflate, br, zstd"
247
+ headers["Accept-Language"] = "en-US,en;q=0.9,en-IN;q=0.8"
248
+ headers["Sec-GPC"] = "1"
249
+ headers["Sec-Ch-Ua"] = '"Not)A;Brand";v="8", "Chromium";v="138", "Microsoft Edge";v="138"'
250
+ headers["Sec-Ch-Ua-Mobile"] = "?0"
251
+ headers["Sec-Ch-Ua-Platform"] = '"Windows"'
252
+ headers["User-Agent"] = LitAgent().random() # Use LitAgent for User-Agent
253
+ headers["Origin"] = self.url
254
+
247
255
  # Create multipart form data
248
256
  boundary = self.generate_boundary()
249
257
  multipart_headers = headers.copy()
250
258
  multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
251
-
259
+
252
260
  # Serialize the data to JSON
253
261
  data_json = json.dumps(request_data, separators=(',', ':'))
254
-
262
+
255
263
  # Create the multipart form data body
256
264
  body = f"--{boundary}\r\n"
257
265
  body += f'Content-Disposition: form-data; name="data"\r\n'
258
- body += f"Content-Type: application/json\r\n\r\n"
266
+ body += f"\r\n"
259
267
  body += f"{data_json}\r\n"
260
268
  body += f"--{boundary}--\r\n"
261
-
269
+
262
270
  multipart_headers["Content-Length"] = str(len(body))
263
271
 
264
272
  def for_stream():