webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -321,36 +321,88 @@ class SciraChat(OpenAICompatibleProvider):
321
321
  messages=[{"role": "user", "content": "Hello!"}]
322
322
  )
323
323
  """
324
- # List of model display names for registration (aliases)
325
- AVAILABLE_MODELS = [
326
- "Grok3-mini (thinking)",
327
- "Grok3",
328
- "Claude 4 Sonnet",
329
- "Claude 4 Sonnet Thinking",
330
- "Grok2-Vision (vision)",
331
- "GPT4o",
332
- "QWQ-32B",
333
- "o4-mini",
334
- "Gemini 2.5 Flash Thinking",
335
- "Gemini 2.5 Pro",
336
- "Llama 4 Maverick",
337
- ]
338
- # Mapping from display name to internal model key
339
- MODEL_NAME_MAP = {
340
- "Grok3-mini (thinking)": "scira-default",
341
- "Grok3": "scira-grok-3",
342
- "Claude 4 Sonnet": "scira-anthropic",
343
- "Claude 4 Sonnet Thinking": "scira-anthropic-thinking",
344
- "Grok2-Vision (vision)": "scira-vision",
345
- "GPT4o": "scira-4o",
346
- "QWQ-32B": "scira-qwq",
347
- "o4-mini": "scira-o4-mini",
348
- "Gemini 2.5 Flash Thinking": "scira-google",
349
- "Gemini 2.5 Pro": "scira-google-pro",
350
- "Llama 4 Maverick": "scira-llama-4",
324
+ # Model mapping: actual model names to Scira API format
325
+ MODEL_MAPPING = {
326
+ "grok-3-mini": "scira-default",
327
+ "grok-3-mini-fast": "scira-x-fast-mini",
328
+ "grok-3-fast": "scira-x-fast",
329
+ "gpt-4.1-nano": "scira-nano",
330
+ "grok-3": "scira-grok-3",
331
+ "grok-4": "scira-grok-4",
332
+ "grok-2-vision-1212": "scira-vision",
333
+ "grok-2-latest": "scira-g2",
334
+ "gpt-4o-mini": "scira-4o-mini",
335
+ "o4-mini-2025-04-16": "scira-o4-mini",
336
+ "o3": "scira-o3",
337
+ "qwen/qwen3-32b": "scira-qwen-32b",
338
+ "qwen3-30b-a3b": "scira-qwen-30b",
339
+ "deepseek-v3-0324": "scira-deepseek-v3",
340
+ "claude-3-5-haiku-20241022": "scira-haiku",
341
+ "mistral-small-latest": "scira-mistral",
342
+ "gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
343
+ "gemini-2.5-flash": "scira-google",
344
+ "gemini-2.5-pro": "scira-google-pro",
345
+ "claude-sonnet-4-20250514": "scira-anthropic",
346
+ "claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
347
+ "claude-4-opus-20250514": "scira-opus",
348
+ "claude-4-opus-20250514-pro": "scira-opus-pro",
349
+ "meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
351
350
  }
351
+ # Reverse mapping: Scira format to actual model names
352
+ SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
353
+ # Add special cases for aliases and duplicate mappings
354
+ SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
355
+ SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
356
+ SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
357
+ SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
358
+ SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
359
+ SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
360
+ SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
361
+ SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
362
+ SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
363
+ MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
364
+ # Available models list (actual model names + scira aliases)
365
+ AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
352
366
  # Optional: pretty display names for UI (reverse mapping)
353
- MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_NAME_MAP.items()}
367
+ MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_MAPPING.items()}
368
+
369
+ @classmethod
370
+ def _resolve_model(cls, model: str) -> str:
371
+ """
372
+ Resolve a model name to its Scira API format.
373
+
374
+ Args:
375
+ model: Either an actual model name or a Scira alias
376
+
377
+ Returns:
378
+ The Scira API format model name
379
+
380
+ Raises:
381
+ ValueError: If the model is not supported
382
+ """
383
+ # If it's already a Scira format, return as-is
384
+ if model in cls.SCIRA_TO_MODEL:
385
+ return model
386
+ # If it's an actual model name, convert to Scira format
387
+ if model in cls.MODEL_MAPPING:
388
+ return cls.MODEL_MAPPING[model]
389
+ # Model not found
390
+ raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
391
+
392
+ def convert_model_name(self, model: str) -> str:
393
+ """
394
+ Convert model display names or internal keys to ones supported by SciraChat.
395
+ Args:
396
+ model: Model name or alias to convert
397
+ Returns:
398
+ SciraChat model name
399
+ """
400
+ # Use the new _resolve_model logic
401
+ try:
402
+ return self._resolve_model(model)
403
+ except Exception as e:
404
+ print(f"Warning: {e} Using 'scira-default' instead.")
405
+ return "scira-default"
354
406
 
355
407
  def __init__(
356
408
  self,
@@ -447,25 +499,6 @@ class SciraChat(OpenAICompatibleProvider):
447
499
  print(f"Warning: Error formatting text: {e}")
448
500
  return text
449
501
 
450
- def convert_model_name(self, model: str) -> str:
451
- """
452
- Convert model display names or internal keys to ones supported by SciraChat.
453
-
454
- Args:
455
- model: Model name or alias to convert
456
-
457
- Returns:
458
- SciraChat model name
459
- """
460
- # If model is a display name (alias), map to internal key
461
- if model in self.MODEL_NAME_MAP:
462
- return self.MODEL_NAME_MAP[model]
463
- # If model is already an internal key, return it if valid
464
- if model in self.MODEL_DISPLAY_NAMES:
465
- return model
466
- # Default to scira-default if model not found
467
- print(f"Warning: Unknown model '{model}'. Using 'scira-default' instead.")
468
- return "scira-default"
469
502
 
470
503
  @property
471
504
  def models(self):
@@ -478,9 +511,9 @@ class SciraChat(OpenAICompatibleProvider):
478
511
  if __name__ == "__main__":
479
512
  ai = SciraChat()
480
513
  response = ai.chat.completions.create(
481
- model="Gemini 2.5 Pro",
514
+ model="grok-3-mini-fast-latest",
482
515
  messages=[
483
- {"role": "user", "content": "who is pm of india?"}
516
+ {"role": "user", "content": "who are u?"}
484
517
  ],
485
518
  stream=True
486
519
  )
@@ -279,25 +279,26 @@ class TextPollinations(OpenAICompatibleProvider):
279
279
  "openai",
280
280
  "openai-fast",
281
281
  "openai-large",
282
+ "openai-reasoning",
282
283
  "openai-roblox",
283
- "qwen-coder",
284
- "llama",
284
+ "openai-audio",
285
+ "deepseek",
286
+ "deepseek-reasoning",
287
+ "grok",
285
288
  "llamascout",
286
289
  "mistral",
287
- "unity",
288
- "mirexa",
289
- "midijourney",
290
- "rtist",
290
+ "phi",
291
+ "qwen-coder",
291
292
  "searchgpt",
293
+ "bidara",
294
+ "elixposearch",
292
295
  "evil",
293
- "deepseek-reasoning",
294
- "phi",
295
- "hormoz",
296
296
  "hypnosis-tracy",
297
- "deepseek",
297
+ "midijourney",
298
+ "mirexa",
299
+ "rtist",
298
300
  "sur",
299
- "bidara",
300
- "openai-audio",
301
+ "unity",
301
302
  ]
302
303
 
303
304
  def __init__(
@@ -292,6 +292,7 @@ class Toolbaz(OpenAICompatibleProvider):
292
292
  AVAILABLE_MODELS = [
293
293
  "gemini-2.5-flash",
294
294
  "gemini-2.0-flash-thinking",
295
+ "sonar",
295
296
  "gemini-2.0-flash",
296
297
  "gemini-1.5-flash",
297
298
  "o3-mini",
@@ -286,12 +286,12 @@ class TypeGPT(OpenAICompatibleProvider):
286
286
 
287
287
  AVAILABLE_MODELS = [
288
288
  # Working Models (based on testing)
289
- "gpt-4o-mini-2024-07-18",
289
+ "gpt-4o-mini",
290
290
  "chatgpt-4o-latest",
291
- "deepseek-r1",
291
+ # "deepseek-r1",
292
292
  "deepseek-v3",
293
- "uncensored-r1",
294
- "Image-Generator",
293
+ # "uncensored-r1",
294
+ # "Image-Generator",
295
295
  ]
296
296
 
297
297
  def __init__(
@@ -1,10 +1,9 @@
1
- from typing import List, Dict, Optional, Any, Union, Literal
1
+ from typing import List, Dict, Optional, Any
2
2
  from enum import Enum
3
3
  import time
4
4
  import uuid
5
5
  from webscout.Provider.OPENAI.pydantic_imports import (
6
- BaseModel, Field, field_validator, model_validator, field_serializer, model_serializer,
7
- StrictStr, StrictInt, StrictFloat, StrictBool
6
+ BaseModel, Field, StrictStr, StrictInt
8
7
  )
9
8
 
10
9
  # --- OpenAI Response Structure Mimics ---
@@ -270,49 +269,27 @@ def get_last_user_message(messages: List[Dict[str, Any]]) -> str:
270
269
 
271
270
  def count_tokens(text_or_messages: Any) -> int:
272
271
  """
273
- Count tokens in a string or a list of messages using tiktoken if available, else fallback to webstoken's WordTokenizer.
272
+ Count tokens in a string or a list of messages using tiktoken.
274
273
 
275
274
  Args:
276
275
  text_or_messages: A string or a list of messages (string or any type).
277
- model: Optional model name for tiktoken encoding.
278
276
 
279
277
  Returns:
280
278
  int: Number of tokens.
281
279
  """
282
- try:
283
- import tiktoken
284
- # Use tiktoken if available
285
- if isinstance(text_or_messages, str):
286
- enc = tiktoken.encoding_for_model("gpt-4o")
287
- return len(enc.encode(text_or_messages))
288
- elif isinstance(text_or_messages, list):
289
- enc = tiktoken.encoding_for_model("gpt-4o")
290
- total = 0
291
- for m in text_or_messages:
292
- # Remove .get('content', '') and treat m as string or convert to string
293
- if isinstance(m, str):
294
- total += len(enc.encode(m))
295
- else:
296
- total += len(enc.encode(str(m)))
297
- return total
298
- else:
299
- return 0
300
- except ImportError:
301
- # Fallback to webstoken's WordTokenizer
302
- try:
303
- from webstoken import WordTokenizer
304
- except ImportError:
305
- return 0
306
- tokenizer = WordTokenizer()
307
- if isinstance(text_or_messages, str):
308
- return len(tokenizer.tokenize(text_or_messages))
309
- elif isinstance(text_or_messages, list):
310
- total = 0
311
- for m in text_or_messages:
312
- if isinstance(m, str):
313
- total += len(tokenizer.tokenize(m))
314
- else:
315
- total += len(tokenizer.tokenize(str(m)))
316
- return total
317
- else:
318
- return 0
280
+ import tiktoken
281
+ if isinstance(text_or_messages, str):
282
+ enc = tiktoken.encoding_for_model("gpt-4o")
283
+ return len(enc.encode(text_or_messages))
284
+ elif isinstance(text_or_messages, list):
285
+ enc = tiktoken.encoding_for_model("gpt-4o")
286
+ total = 0
287
+ for m in text_or_messages:
288
+ if isinstance(m, str):
289
+ total += len(enc.encode(m))
290
+ else:
291
+ total += len(enc.encode(str(m)))
292
+ return total
293
+ else:
294
+ return 0
295
+
@@ -6,8 +6,8 @@ import json
6
6
  from typing import List, Dict, Optional, Union, Generator, Any
7
7
 
8
8
  # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
9
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from webscout.Provider.OPENAI.utils import (
11
11
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
12
  ChatCompletionMessage, CompletionUsage, count_tokens
13
13
  )
@@ -365,3 +365,15 @@ class X0GPT(OpenAICompatibleProvider):
365
365
  """
366
366
  # X0GPT doesn't actually use model names, but we'll keep this for compatibility
367
367
  return model
368
+
369
+ if __name__ == "__main__":
370
+ from rich import print
371
+ client = X0GPT()
372
+ response = client.chat.completions.create(
373
+ model="X0GPT",
374
+ messages=[{"role": "user", "content": "Hello!"}],
375
+ stream=True
376
+ )
377
+
378
+ for chunk in response:
379
+ print(chunk, end='', flush=True)
@@ -87,7 +87,7 @@ class OpenGPT(Provider):
87
87
  def ask(
88
88
  self,
89
89
  prompt: str,
90
- stream: bool = False, # Note: API does not support streaming
90
+ stream: bool = False, # Note: API does not support streaming natively
91
91
  raw: bool = False,
92
92
  optimizer: str = None,
93
93
  conversationally: bool = False,
@@ -121,38 +121,54 @@ class OpenGPT(Provider):
121
121
  "id": self.app_id,
122
122
  "userKey": "" # Assuming userKey is meant to be empty as in the original code
123
123
  }
124
-
125
- # API does not stream, implement non-stream logic directly
124
+
125
+ def for_stream():
126
+ try:
127
+ response = self.session.post(
128
+ "https://open-gpt.app/api/generate",
129
+ data=json.dumps(payload),
130
+ timeout=self.timeout,
131
+ impersonate="chrome110"
132
+ )
133
+ response.raise_for_status()
134
+ response_text = response.text
135
+ buffer = ""
136
+ chunk_size = 32
137
+ for i in range(0, len(response_text), chunk_size):
138
+ out = response_text[i:i+chunk_size]
139
+ if out.strip():
140
+ if raw:
141
+ yield out
142
+ else:
143
+ yield {"text": out}
144
+ self.last_response = {"text": response_text}
145
+ self.conversation.update_chat_history(prompt, response_text)
146
+ except CurlError as e:
147
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
148
+ except Exception as e:
149
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
150
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
151
+
126
152
  def for_non_stream():
127
153
  try:
128
- # Use curl_cffi session post with impersonate
129
154
  response = self.session.post(
130
155
  "https://open-gpt.app/api/generate",
131
- # headers are set on the session
132
- data=json.dumps(payload), # Keep data as JSON string
156
+ data=json.dumps(payload),
133
157
  timeout=self.timeout,
134
- # proxies are set on the session
135
- impersonate="chrome110" # Use a common impersonation profile
158
+ impersonate="chrome110"
136
159
  )
137
-
138
- response.raise_for_status() # Check for HTTP errors
139
-
140
- # Use response.text which is already decoded
160
+ response.raise_for_status()
141
161
  response_text = response.text
142
162
  self.last_response = {"text": response_text}
143
163
  self.conversation.update_chat_history(prompt, response_text)
144
-
145
- # Return dict or raw string based on raw flag
146
164
  return {"raw": response_text} if raw else {"text": response_text}
147
-
148
- except CurlError as e: # Catch CurlError
165
+ except CurlError as e:
149
166
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
150
- except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
167
+ except Exception as e:
151
168
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
152
169
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
153
-
154
- # This provider doesn't support streaming, so just return non-stream
155
- return for_non_stream()
170
+
171
+ return for_stream() if stream else for_non_stream()
156
172
 
157
173
  def chat(
158
174
  self,
@@ -173,21 +189,25 @@ class OpenGPT(Provider):
173
189
  Returns:
174
190
  A string with the response text.
175
191
  """
176
- # Since ask() now handles both stream=True/False by returning the full response dict:
177
- response_data = self.ask(
178
- prompt,
179
- stream=False, # Call ask in non-stream mode internally
180
- raw=False, # Ensure ask returns dict with 'text' key
181
- optimizer=optimizer,
182
- conversationally=conversationally
183
- )
184
- # If stream=True was requested, simulate streaming by yielding the full message at once
185
192
  if stream:
186
193
  def stream_wrapper():
187
- yield self.get_message(response_data) # yield only the text string
194
+ for part in self.ask(
195
+ prompt,
196
+ stream=True,
197
+ raw=False,
198
+ optimizer=optimizer,
199
+ conversationally=conversationally
200
+ ):
201
+ yield self.get_message(part) if isinstance(part, dict) else part
188
202
  return stream_wrapper()
189
203
  else:
190
- # If stream=False, return the full message directly
204
+ response_data = self.ask(
205
+ prompt,
206
+ stream=False,
207
+ raw=False,
208
+ optimizer=optimizer,
209
+ conversationally=conversationally
210
+ )
191
211
  return self.get_message(response_data)
192
212
 
193
213
  def get_message(self, response: dict) -> str:
@@ -206,4 +226,6 @@ class OpenGPT(Provider):
206
226
 
207
227
  if __name__ == "__main__":
208
228
  ai = OpenGPT()
209
- print(ai.chat("Hello, how are you?"))
229
+ response = ai.chat("write me about humans in points", stream=True)
230
+ for part in response:
231
+ print(part, end="", flush=True)