webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
@@ -2,7 +2,9 @@ from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
4
  from typing import Union, Any, Dict, Generator, Optional, List
5
- from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
5
+
6
+ import requests
7
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
6
8
  from webscout.AIbase import Provider
7
9
  from webscout import exceptions
8
10
  from webscout.litagent import LitAgent as Lit
@@ -13,32 +15,30 @@ class TextPollinationsAI(Provider):
13
15
  """
14
16
 
15
17
  AVAILABLE_MODELS = [
16
- "openai", # OpenAI GPT-4.1-nano (Azure) - vision capable
17
- "openai-large", # OpenAI GPT-4.1 mini (Azure) - vision capable
18
- "openai-reasoning", # OpenAI o4-mini (Azure) - vision capable, reasoning
19
- "qwen-coder", # Qwen 2.5 Coder 32B (Scaleway)
20
- "llama", # Llama 3.3 70B (Cloudflare)
21
- "llamascout", # Llama 4 Scout 17B (Cloudflare)
22
- "mistral", # Mistral Small 3 (Scaleway) - vision capable
23
- "unity", # Unity Mistral Large (Scaleway) - vision capable, uncensored
24
- "midijourney", # Midijourney (Azure)
25
- "rtist", # Rtist (Azure)
26
- "searchgpt", # SearchGPT (Azure) - vision capable
27
- "evil", # Evil (Scaleway) - vision capable, uncensored
28
- "deepseek-reasoning", # DeepSeek-R1 Distill Qwen 32B (Cloudflare) - reasoning
29
- "deepseek-reasoning-large", # DeepSeek R1 - Llama 70B (Scaleway) - reasoning
30
- "phi", # Phi-4 Instruct (Cloudflare) - vision and audio capable
31
- "llama-vision", # Llama 3.2 11B Vision (Cloudflare) - vision capable
32
- "gemini", # gemini-2.5-flash-preview-04-17 (Azure) - vision and audio capable
33
- "hormoz", # Hormoz 8b (Modal)
34
- "hypnosis-tracy", # Hypnosis Tracy 7B (Azure) - audio capable
35
- "deepseek", # DeepSeek-V3 (DeepSeek)
36
- "sur", # Sur AI Assistant (Mistral) (Scaleway) - vision capable
37
- "openai-audio", # OpenAI GPT-4o-audio-preview (Azure) - vision and audio capable
18
+ "openai",
19
+ "openai-large",
20
+ "qwen-coder",
21
+ "llama",
22
+ "llamascout",
23
+ "mistral",
24
+ "unity",
25
+ "midijourney",
26
+ "rtist",
27
+ "searchgpt",
28
+ "evil",
29
+ "deepseek-reasoning",
30
+ "deepseek-reasoning-large",
31
+ "phi",
32
+ "llama-vision",
33
+ "hormoz",
34
+ "hypnosis-tracy",
35
+ "deepseek",
36
+ "sur",
37
+ "openai-audio",
38
38
  ]
39
+ _models_url = "https://text.pollinations.ai/models"
39
40
 
40
- def __init__(
41
- self,
41
+ def __init__(self,
42
42
  is_conversation: bool = True,
43
43
  max_tokens: int = 8096, # Note: max_tokens is not directly used by this API endpoint
44
44
  timeout: int = 30,
@@ -52,10 +52,6 @@ class TextPollinationsAI(Provider):
52
52
  system_prompt: str = "You are a helpful AI assistant.",
53
53
  ):
54
54
  """Initializes the TextPollinationsAI API client."""
55
- if model not in self.AVAILABLE_MODELS:
56
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
57
-
58
- # Initialize curl_cffi Session
59
55
  self.session = Session()
60
56
  self.is_conversation = is_conversation
61
57
  self.max_tokens_to_sample = max_tokens
@@ -66,6 +62,10 @@ class TextPollinationsAI(Provider):
66
62
  self.model = model
67
63
  self.system_prompt = system_prompt
68
64
 
65
+ # Validate against the hardcoded list
66
+ if model not in self.AVAILABLE_MODELS:
67
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
68
+
69
69
  self.headers = {
70
70
  'Accept': '*/*',
71
71
  'Accept-Language': 'en-US,en;q=0.9',
@@ -96,6 +96,7 @@ class TextPollinationsAI(Provider):
96
96
  )
97
97
  self.conversation.history_offset = history_offset
98
98
 
99
+
99
100
  def ask(
100
101
  self,
101
102
  prompt: str,
@@ -148,40 +149,34 @@ class TextPollinationsAI(Provider):
148
149
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
149
150
  )
150
151
 
151
- full_response = ""
152
- # Iterate over bytes and decode manually
153
- for line_bytes in response.iter_lines():
154
- if line_bytes:
155
- line = line_bytes.decode('utf-8').strip()
156
- if line == "data: [DONE]":
157
- break
158
- if line.startswith('data: '):
159
- try:
160
- json_data = json.loads(line[6:])
161
- if 'choices' in json_data and len(json_data['choices']) > 0:
162
- choice = json_data['choices'][0]
163
- if 'delta' in choice:
164
- if 'content' in choice['delta'] and choice['delta']['content'] is not None:
165
- content = choice['delta']['content']
166
- full_response += content
167
- # Yield dict or raw string
168
- yield content if raw else dict(text=content)
169
- elif 'tool_calls' in choice['delta']:
170
- # Handle tool calls in streaming response
171
- tool_calls = choice['delta']['tool_calls']
172
- # Yield dict or raw list
173
- yield tool_calls if raw else dict(tool_calls=tool_calls)
174
- except json.JSONDecodeError:
175
- continue
176
- except UnicodeDecodeError:
177
- continue
152
+ streaming_text = ""
153
+ # Use sanitize_stream
154
+ processed_stream = sanitize_stream(
155
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
156
+ intro_value="data:",
157
+ to_json=True, # Stream sends JSON
158
+ skip_markers=["[DONE]"],
159
+ # Extractor handles both content and tool_calls
160
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta') if isinstance(chunk, dict) else None,
161
+ yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
162
+ )
163
+
164
+ for delta in processed_stream:
165
+ # delta is the extracted 'delta' object or None
166
+ if delta and isinstance(delta, dict):
167
+ if 'content' in delta and delta['content'] is not None:
168
+ content = delta['content']
169
+ streaming_text += content
170
+ yield content if raw else dict(text=content)
171
+ elif 'tool_calls' in delta:
172
+ tool_calls = delta['tool_calls']
173
+ yield tool_calls if raw else dict(tool_calls=tool_calls)
178
174
 
179
175
  # Update history and last response after stream finishes
180
- # Note: last_response might only contain text, not tool calls if they occurred
181
- self.last_response.update(dict(text=full_response))
182
- if full_response: # Only update history if text was received
176
+ self.last_response.update(dict(text=streaming_text)) # Store aggregated text
177
+ if streaming_text: # Only update history if text was received
183
178
  self.conversation.update_chat_history(
184
- prompt, full_response # Use the fully aggregated text
179
+ prompt, streaming_text # Use the fully aggregated text
185
180
  )
186
181
  except CurlError as e: # Catch CurlError
187
182
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
@@ -193,22 +188,27 @@ class TextPollinationsAI(Provider):
193
188
  # Aggregate the stream using the updated for_stream logic
194
189
  final_content = ""
195
190
  tool_calls_aggregated = None # To store potential tool calls
196
- for chunk_data in for_stream():
197
- if isinstance(chunk_data, dict):
198
- if "text" in chunk_data:
199
- final_content += chunk_data["text"]
200
- elif "tool_calls" in chunk_data:
201
- # Aggregate tool calls (simple aggregation, might need refinement)
202
- if tool_calls_aggregated is None:
203
- tool_calls_aggregated = []
204
- tool_calls_aggregated.extend(chunk_data["tool_calls"])
205
- elif isinstance(chunk_data, str): # Handle raw stream case
206
- final_content += chunk_data
207
- # Handle raw tool calls list if raw=True
208
- elif isinstance(chunk_data, list) and raw:
209
- if tool_calls_aggregated is None:
210
- tool_calls_aggregated = []
211
- tool_calls_aggregated.extend(chunk_data)
191
+ try: # Add try block for potential errors during aggregation
192
+ for chunk_data in for_stream():
193
+ if isinstance(chunk_data, dict):
194
+ if "text" in chunk_data:
195
+ final_content += chunk_data["text"]
196
+ elif "tool_calls" in chunk_data:
197
+ # Aggregate tool calls (simple aggregation, might need refinement)
198
+ if tool_calls_aggregated is None:
199
+ tool_calls_aggregated = []
200
+ tool_calls_aggregated.extend(chunk_data["tool_calls"])
201
+ elif isinstance(chunk_data, str): # Handle raw stream case
202
+ final_content += chunk_data
203
+ # Handle raw tool calls list if raw=True
204
+ elif isinstance(chunk_data, list) and raw:
205
+ if tool_calls_aggregated is None:
206
+ tool_calls_aggregated = []
207
+ tool_calls_aggregated.extend(chunk_data)
208
+ except Exception as e:
209
+ # If aggregation fails but some text was received, use it. Otherwise, re-raise.
210
+ if not final_content and not tool_calls_aggregated:
211
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
212
212
 
213
213
 
214
214
  # last_response and history are updated within for_stream (for text)
@@ -263,6 +263,7 @@ class TextPollinationsAI(Provider):
263
263
  elif "tool_calls" in response:
264
264
  # For tool calls, return a string representation
265
265
  return json.dumps(response["tool_calls"])
266
+ return "" # Return empty string if neither text nor tool_calls found
266
267
 
267
268
  if __name__ == "__main__":
268
269
  # Ensure curl_cffi is installed
@@ -274,6 +275,7 @@ if __name__ == "__main__":
274
275
  working = 0
275
276
  total = len(TextPollinationsAI.AVAILABLE_MODELS)
276
277
 
278
+
277
279
  for model in TextPollinationsAI.AVAILABLE_MODELS:
278
280
  try:
279
281
  test_ai = TextPollinationsAI(model=model, timeout=60)
@@ -2,20 +2,25 @@ from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
4
  from typing import Any, Dict, Optional, Generator, Union
5
+ import re # Import re for parsing SSE
5
6
 
6
7
  from webscout.AIutel import Optimizers
7
8
  from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
9
10
  from webscout.AIbase import Provider
10
11
  from webscout import exceptions
11
12
  from webscout.litagent import LitAgent
13
+
14
+
12
15
  class TwoAI(Provider):
13
16
  """
14
- A class to interact with the Two AI API with LitAgent user-agent.
17
+ A class to interact with the Two AI API (v2) with LitAgent user-agent.
15
18
  """
16
19
 
17
20
  AVAILABLE_MODELS = [
18
- "sutra-light",
21
+ "sutra-v2",
22
+ "sutra-r0"
23
+
19
24
  ]
20
25
 
21
26
  def __init__(
@@ -30,26 +35,25 @@ class TwoAI(Provider):
30
35
  proxies: dict = {},
31
36
  history_offset: int = 10250,
32
37
  act: str = None,
33
- model: str = "sutra-light",
38
+ model: str = "sutra-v2", # Update default model
34
39
  temperature: float = 0.6,
35
40
  system_message: str = "You are a helpful assistant."
36
41
  ):
37
42
  """Initializes the TwoAI API client."""
38
43
  if model not in self.AVAILABLE_MODELS:
39
44
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
40
- self.url = "https://api.two.app/v1/sutra-light/completion"
45
+ self.url = "https://api.two.app/v2/chat/completions" # Update API endpoint
41
46
  self.headers = {
42
47
  'User-Agent': LitAgent().random(),
43
- 'Accept': 'application/json',
48
+ 'Accept': 'application/json', # Keep application/json for request, response is text/event-stream
44
49
  'Content-Type': 'application/json',
45
50
  'X-Session-Token': api_key,
46
51
  'Origin': 'https://chat.two.ai',
47
52
  'Referer': 'https://api.two.app/'
48
53
  }
49
-
54
+
50
55
  # Initialize curl_cffi Session
51
56
  self.session = Session()
52
- # Update curl_cffi session headers and proxies
53
57
  self.session.headers.update(self.headers)
54
58
  self.session.proxies = proxies
55
59
 
@@ -79,6 +83,19 @@ class TwoAI(Provider):
79
83
  )
80
84
  self.conversation.history_offset = history_offset
81
85
 
86
+ @staticmethod
87
+ def _twoai_extractor(chunk_json: Dict[str, Any]) -> Optional[str]:
88
+ """Extracts content from TwoAI v2 stream JSON objects."""
89
+ if not isinstance(chunk_json, dict) or "choices" not in chunk_json or not chunk_json["choices"]:
90
+ return None
91
+
92
+ delta = chunk_json["choices"][0].get("delta")
93
+ if not isinstance(delta, dict):
94
+ return None
95
+
96
+ content = delta.get("content")
97
+ return content if isinstance(content, str) else None
98
+
82
99
  def ask(
83
100
  self,
84
101
  prompt: str,
@@ -87,7 +104,6 @@ class TwoAI(Provider):
87
104
  optimizer: str = None,
88
105
  conversationally: bool = False,
89
106
  online_search: bool = True,
90
- reasoning_on: bool = False,
91
107
  ) -> Union[Dict[str, Any], Generator]:
92
108
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
93
109
  if optimizer:
@@ -96,78 +112,92 @@ class TwoAI(Provider):
96
112
  else:
97
113
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
98
114
 
99
- # Payload construction
100
115
  payload = {
101
116
  "messages": [
102
- {"role": "system", "content": self.system_message},
117
+ *([{"role": "system", "content": self.system_message}] if self.system_message else []),
103
118
  {"role": "user", "content": conversation_prompt},
104
119
  ],
105
120
  "model": self.model,
106
121
  "temperature": self.temperature,
107
122
  "max_tokens": self.max_tokens_to_sample,
108
- "reasoningOn": reasoning_on,
109
- "onlineSearch": online_search
123
+ "extra_body": {
124
+ "online_search": online_search,
125
+ }
110
126
  }
111
127
 
112
128
  def for_stream():
129
+ streaming_text = "" # Initialize outside try block
113
130
  try:
114
- # Use curl_cffi session post with impersonate
115
131
  response = self.session.post(
116
- self.url,
117
- json=payload,
118
- stream=True,
132
+ self.url,
133
+ json=payload,
134
+ stream=True,
119
135
  timeout=self.timeout,
120
- impersonate="chrome110" # Add impersonate
136
+ impersonate="chrome110"
121
137
  )
122
-
138
+
123
139
  if response.status_code != 200:
140
+ error_detail = response.text
141
+ try:
142
+ error_json = response.json()
143
+ error_detail = error_json.get("error", {}).get("message", error_detail)
144
+ except json.JSONDecodeError:
145
+ pass
124
146
  raise exceptions.FailedToGenerateResponseError(
125
- f"Request failed with status code {response.status_code} - {response.text}"
147
+ f"Request failed with status code {response.status_code} - {error_detail}"
126
148
  )
127
-
128
- streaming_text = ""
129
- # Iterate over bytes and decode manually
130
- for line_bytes in response.iter_lines():
131
- if line_bytes:
132
- try:
133
- line = line_bytes.decode('utf-8') # Decode bytes
134
- chunk = json.loads(line)
135
- if chunk.get("typeName") == "LLMChunk": # Use .get for safety
136
- content = chunk.get("content", "") # Use .get for safety
137
- streaming_text += content
138
- resp = dict(text=content)
139
- # Yield dict or raw string
140
- yield resp if raw else resp
141
- except json.JSONDecodeError:
142
- continue
143
- except UnicodeDecodeError:
144
- continue
145
-
146
- # Update history and last response after stream finishes
149
+
150
+ # Use sanitize_stream for SSE processing
151
+ processed_stream = sanitize_stream(
152
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
153
+ intro_value="data:",
154
+ to_json=True, # Stream sends JSON
155
+ skip_markers=["[DONE]"],
156
+ content_extractor=self._twoai_extractor, # Use the specific extractor
157
+ yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
158
+ )
159
+
160
+ for content_chunk in processed_stream:
161
+ # content_chunk is the string extracted by _twoai_extractor
162
+ if content_chunk and isinstance(content_chunk, str):
163
+ streaming_text += content_chunk
164
+ resp = dict(text=content_chunk)
165
+ yield resp if not raw else content_chunk
166
+
167
+ # If stream completes successfully, update history
147
168
  self.last_response = {"text": streaming_text}
148
169
  self.conversation.update_chat_history(prompt, streaming_text)
149
-
150
- except CurlError as e: # Catch CurlError
151
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
152
- except Exception as e: # Catch other potential exceptions
153
- raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
170
+
171
+ except CurlError as e:
172
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
173
+ except exceptions.FailedToGenerateResponseError:
174
+ raise # Re-raise specific exception
175
+ except Exception as e:
176
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred during streaming ({type(e).__name__}): {e}") from e
177
+ finally:
178
+ # Ensure history is updated even if stream ends abruptly but text was received
179
+ if streaming_text and not self.last_response: # Check if last_response wasn't set in the try block
180
+ self.last_response = {"text": streaming_text}
181
+ self.conversation.update_chat_history(prompt, streaming_text)
182
+
154
183
 
155
184
  def for_non_stream():
156
- # Non-stream requests might not work the same way if the API expects streaming.
157
- # This implementation aggregates the stream.
185
+ # Non-stream still uses the stream internally and aggregates
158
186
  streaming_text = ""
159
- # Iterate through the generator provided by for_stream
160
- for chunk_data in for_stream():
161
- # Check if chunk_data is a dict (not raw) and has 'text'
162
- if isinstance(chunk_data, dict) and "text" in chunk_data:
163
- streaming_text += chunk_data["text"]
164
- # If raw=True, chunk_data is the string content itself
165
- elif isinstance(chunk_data, str):
166
- streaming_text += chunk_data
167
- # last_response and history are updated within for_stream
168
- return self.last_response # Return the final aggregated response
169
-
170
- # Ensure stream defaults to True if not provided, matching original behavior
187
+ # We need to consume the generator from for_stream()
188
+ gen = for_stream()
189
+ try:
190
+ for chunk_data in gen:
191
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
192
+ streaming_text += chunk_data["text"]
193
+ elif isinstance(chunk_data, str): # Handle raw=True case
194
+ streaming_text += chunk_data
195
+ except exceptions.FailedToGenerateResponseError:
196
+ # If the underlying stream fails, re-raise the error
197
+ raise
198
+ # self.last_response and history are updated within for_stream's try/finally
199
+ return self.last_response # Return the final aggregated dict
200
+
171
201
  effective_stream = stream if stream is not None else True
172
202
  return for_stream() if effective_stream else for_non_stream()
173
203
 
@@ -178,56 +208,56 @@ class TwoAI(Provider):
178
208
  optimizer: str = None,
179
209
  conversationally: bool = False,
180
210
  online_search: bool = True,
181
- reasoning_on: bool = False,
182
211
  ) -> str:
183
- # Ensure stream defaults to True if not provided
184
212
  effective_stream = stream if stream is not None else True
185
213
 
186
214
  def for_stream_chat():
187
- # ask() yields dicts when raw=False
188
- for response_dict in self.ask(
189
- prompt,
190
- stream=True,
215
+ # ask() yields dicts when raw=False (default for chat)
216
+ gen = self.ask(
217
+ prompt,
218
+ stream=True,
191
219
  raw=False, # Ensure ask yields dicts
192
- optimizer=optimizer,
220
+ optimizer=optimizer,
193
221
  conversationally=conversationally,
194
222
  online_search=online_search,
195
- reasoning_on=reasoning_on
196
- ):
197
- yield self.get_message(response_dict)
198
-
223
+ )
224
+ for response_dict in gen:
225
+ yield self.get_message(response_dict) # get_message expects dict
226
+
199
227
  def for_non_stream_chat():
200
- # ask() returns a dict when stream=False
228
+ # ask() returns a dict when stream=False
201
229
  response_dict = self.ask(
202
- prompt,
203
- stream=False,
204
- optimizer=optimizer,
230
+ prompt,
231
+ stream=False, # Ensure ask returns dict
232
+ raw=False,
233
+ optimizer=optimizer,
205
234
  conversationally=conversationally,
206
235
  online_search=online_search,
207
- reasoning_on=reasoning_on
208
236
  )
209
- return self.get_message(response_dict)
210
-
237
+ return self.get_message(response_dict) # get_message expects dict
238
+
211
239
  return for_stream_chat() if effective_stream else for_non_stream_chat()
212
240
 
213
241
  def get_message(self, response: dict) -> str:
214
242
  assert isinstance(response, dict), "Response should be of dict data-type only"
215
- return response["text"]
243
+ return response.get("text", "") # Use .get for safety
244
+
216
245
 
217
246
  if __name__ == "__main__":
218
247
  from rich import print
248
+ import os
249
+
250
+ api_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzanl2OHJtZGxDZDFnQ2hQdGxzZHdxUlVteXkyIiwic291cmNlIjoiRmlyZWJhc2UiLCJpYXQiOjE3NDYxMDY0NjksImV4cCI6MTc0NjEwNzM2OX0.o3fprDgsUJwvwCsWr0HfqmVpSBUthHsxqnopfWhtiYc"
219
251
 
220
- api_key = "" # Add your API key here or load from env
221
-
222
- try: # Add try-except block for testing
252
+ try:
223
253
  ai = TwoAI(
224
254
  api_key=api_key,
225
255
  timeout=60,
256
+ model="sutra-r0",
226
257
  system_message="You are an intelligent AI assistant. Be concise and helpful."
227
258
  )
228
-
229
- print("[bold blue]Testing Stream:[/bold blue]")
230
- response_stream = ai.chat("666+444=?", stream=True, reasoning_on=True)
259
+
260
+ response_stream = ai.chat("write me a poem about AI", stream=True, online_search=True)
231
261
  full_stream_response = ""
232
262
  for chunk in response_stream:
233
263
  print(chunk, end="", flush=True)
@@ -236,13 +266,15 @@ if __name__ == "__main__":
236
266
 
237
267
  # Optional: Test non-stream
238
268
  # print("[bold blue]Testing Non-Stream:[/bold blue]")
239
- # response_non_stream = ai.chat("What is the capital of France?", stream=False)
240
- # print(response_non_stream)
241
- # print("[bold green]Non-Stream Test Complete.[/bold green]")
269
+ # non_stream_response = ai.chat("What is the capital of France?", stream=False, online_search=False)
270
+ # print(non_stream_response)
271
+ # print("[bold green]Non-Stream Test Complete.[/bold green]\n")
272
+
242
273
 
243
274
  except exceptions.FailedToGenerateResponseError as e:
244
275
  print(f"\n[bold red]API Error:[/bold red] {e}")
245
276
  except ValueError as e:
246
- print(f"\n[bold red]Configuration Error:[/bold red] {e}")
277
+ print(f"\n[bold red]Configuration Error:[/bold red] {e}")
247
278
  except Exception as e:
248
279
  print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
280
+