webscout 7.4__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (42) hide show
  1. webscout/Provider/C4ai.py +414 -0
  2. webscout/Provider/Cloudflare.py +18 -21
  3. webscout/Provider/DeepSeek.py +3 -32
  4. webscout/Provider/Deepinfra.py +30 -21
  5. webscout/Provider/GithubChat.py +362 -0
  6. webscout/Provider/HeckAI.py +20 -3
  7. webscout/Provider/HuggingFaceChat.py +462 -0
  8. webscout/Provider/Marcus.py +7 -50
  9. webscout/Provider/Netwrck.py +6 -53
  10. webscout/Provider/Phind.py +29 -3
  11. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  12. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  13. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  14. webscout/Provider/Venice.py +200 -200
  15. webscout/Provider/Youchat.py +1 -1
  16. webscout/Provider/__init__.py +13 -2
  17. webscout/Provider/akashgpt.py +8 -5
  18. webscout/Provider/copilot.py +416 -0
  19. webscout/Provider/flowith.py +181 -0
  20. webscout/Provider/granite.py +17 -53
  21. webscout/Provider/llamatutor.py +6 -46
  22. webscout/Provider/llmchat.py +7 -46
  23. webscout/Provider/multichat.py +29 -91
  24. webscout/exceptions.py +19 -9
  25. webscout/update_checker.py +55 -93
  26. webscout/version.py +1 -1
  27. webscout-7.5.dist-info/LICENSE.md +146 -0
  28. {webscout-7.4.dist-info → webscout-7.5.dist-info}/METADATA +5 -126
  29. {webscout-7.4.dist-info → webscout-7.5.dist-info}/RECORD +32 -33
  30. webscout/Local/__init__.py +0 -10
  31. webscout/Local/_version.py +0 -3
  32. webscout/Local/formats.py +0 -747
  33. webscout/Local/model.py +0 -1368
  34. webscout/Local/samplers.py +0 -125
  35. webscout/Local/thread.py +0 -539
  36. webscout/Local/ui.py +0 -401
  37. webscout/Local/utils.py +0 -388
  38. webscout/Provider/dgaf.py +0 -214
  39. webscout-7.4.dist-info/LICENSE.md +0 -211
  40. {webscout-7.4.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  41. {webscout-7.4.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  42. {webscout-7.4.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,416 @@
1
+ import os
2
+ import json
3
+ import base64
4
+ import asyncio
5
+ import requests
6
+ from urllib.parse import quote
7
+ from typing import Optional, Dict, Any, List, Union, Generator
8
+
9
+ from curl_cffi.requests import Session, CurlWsFlag
10
+
11
+ from webscout.AIutel import Optimizers
12
+ from webscout.AIutel import Conversation
13
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
14
+ from webscout.AIbase import Provider, AsyncProvider
15
+ from webscout import exceptions
16
+ from webscout import LitAgent
17
+
18
+ try:
19
+ has_curl_cffi = True
20
+ except ImportError:
21
+ has_curl_cffi = False
22
+
23
+ try:
24
+ import nodriver
25
+ has_nodriver = True
26
+ except ImportError:
27
+ has_nodriver = False
28
+
29
+
30
+ class NoValidHarFileError(Exception):
31
+ pass
32
+
33
+
34
+ class CopilotConversation:
35
+ conversation_id: str
36
+
37
+ def __init__(self, conversation_id: str):
38
+ self.conversation_id = conversation_id
39
+
40
+
41
+ class Copilot(Provider):
42
+ """
43
+ A class to interact with the Microsoft Copilot API.
44
+ """
45
+
46
+ AVAILABLE_MODELS = ["Copilot"]
47
+ url = "https://copilot.microsoft.com"
48
+ websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
49
+ conversation_url = f"{url}/c/api/conversations"
50
+
51
+ _access_token: str = None
52
+ _cookies: dict = None
53
+
54
+ def __init__(
55
+ self,
56
+ is_conversation: bool = True,
57
+ max_tokens: int = 2000,
58
+ timeout: int = 900,
59
+ intro: str = None,
60
+ filepath: str = None,
61
+ update_file: bool = True,
62
+ proxies: dict = {},
63
+ history_offset: int = 10250,
64
+ act: str = None,
65
+ model: str = "Copilot"
66
+ ):
67
+ """Initializes the Copilot API client."""
68
+ if model not in self.AVAILABLE_MODELS:
69
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
70
+
71
+ # Use LitAgent for user-agent
72
+ self.headers = {
73
+ 'User-Agent': LitAgent().random(),
74
+ 'Accept-Language': 'en-US,en;q=0.9',
75
+ 'Connection': 'keep-alive',
76
+ 'Content-Type': 'application/json',
77
+ 'Origin': self.url,
78
+ 'Referer': f'{self.url}/',
79
+ 'Sec-Fetch-Dest': 'empty',
80
+ 'Sec-Fetch-Mode': 'cors',
81
+ 'Sec-Fetch-Site': 'same-origin',
82
+ }
83
+
84
+ self.is_conversation = is_conversation
85
+ self.max_tokens_to_sample = max_tokens
86
+ self.timeout = timeout
87
+ self.last_response = {}
88
+ self.model = model
89
+ self.proxies = proxies
90
+
91
+ self.__available_optimizers = (
92
+ method
93
+ for method in dir(Optimizers)
94
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
95
+ )
96
+ Conversation.intro = (
97
+ AwesomePrompts().get_act(
98
+ act, raise_not_found=True, default=None, case_insensitive=True
99
+ )
100
+ if act
101
+ else intro or Conversation.intro
102
+ )
103
+
104
+ self.conversation = Conversation(
105
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
106
+ )
107
+ self.conversation.history_offset = history_offset
108
+
109
+ def ask(
110
+ self,
111
+ prompt: str,
112
+ stream: bool = True,
113
+ raw: bool = False,
114
+ optimizer: str = None,
115
+ conversationally: bool = False,
116
+ images = None,
117
+ api_key: str = None,
118
+ **kwargs
119
+ ) -> Union[Dict[str, Any], Generator]:
120
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
+ if optimizer:
122
+ if optimizer in self.__available_optimizers:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+ else:
127
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
128
+
129
+ # Main logic for calling Copilot API
130
+ def for_stream():
131
+ try:
132
+ if not has_curl_cffi:
133
+ raise Exception('Install or update "curl_cffi" package | pip install -U curl_cffi')
134
+
135
+ websocket_url = self.websocket_url
136
+ headers = None
137
+
138
+ if images is not None:
139
+ if api_key is not None:
140
+ self._access_token = api_key
141
+ if self._access_token is None:
142
+ try:
143
+ self._access_token, self._cookies = readHAR(self.url)
144
+ except NoValidHarFileError as h:
145
+ # print(f"Copilot: {h}")
146
+ if has_nodriver:
147
+ yield {"type": "login", "provider": self.label, "url": os.environ.get("webscout_login", "")}
148
+ self._access_token, self._cookies = asyncio.run(get_access_token_and_cookies(self.url, self.proxies.get("https")))
149
+ else:
150
+ raise h
151
+ websocket_url = f"{websocket_url}&accessToken={quote(self._access_token)}"
152
+ headers = {"authorization": f"Bearer {self._access_token}"}
153
+
154
+ with Session(
155
+ timeout=self.timeout,
156
+ proxy=self.proxies.get("https"),
157
+ impersonate="chrome",
158
+ headers=headers,
159
+ cookies=self._cookies,
160
+ ) as session:
161
+ if self._access_token is not None:
162
+ self._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
163
+
164
+ response = session.get(f"{self.url}/c/api/user")
165
+ if response.status_code == 401:
166
+ raise exceptions.AuthenticationError("Status 401: Invalid access token")
167
+ if response.status_code != 200:
168
+ raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
169
+ user = response.json().get('firstName')
170
+ if user is None:
171
+ self._access_token = None
172
+ # print(f"Copilot: User: {user or 'null'}")
173
+
174
+ # Create or use existing conversation
175
+ conversation = kwargs.get("conversation", None)
176
+ if conversation is None:
177
+ response = session.post(self.conversation_url)
178
+ if response.status_code != 200:
179
+ raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
180
+ conversation_id = response.json().get("id")
181
+ conversation = CopilotConversation(conversation_id)
182
+ if kwargs.get("return_conversation", False):
183
+ yield conversation
184
+ # print(f"Copilot: Created conversation: {conversation_id}")
185
+ else:
186
+ conversation_id = conversation.conversation_id
187
+ # print(f"Copilot: Use conversation: {conversation_id}")
188
+
189
+ # Handle image uploads if any
190
+ uploaded_images = []
191
+ if images is not None:
192
+ for image, _ in images:
193
+ # Convert image to bytes if needed
194
+ if isinstance(image, str):
195
+ if image.startswith("data:"):
196
+ # Data URL
197
+ header, encoded = image.split(",", 1)
198
+ data = base64.b64decode(encoded)
199
+ else:
200
+ # File path or URL
201
+ with open(image, "rb") as f:
202
+ data = f.read()
203
+ else:
204
+ data = image
205
+
206
+ # Get content type
207
+ content_type = "image/jpeg" # Default
208
+ if data[:2] == b'\xff\xd8':
209
+ content_type = "image/jpeg"
210
+ elif data[:8] == b'\x89PNG\r\n\x1a\n':
211
+ content_type = "image/png"
212
+ elif data[:6] in (b'GIF87a', b'GIF89a'):
213
+ content_type = "image/gif"
214
+ elif data[:2] in (b'BM', b'BA'):
215
+ content_type = "image/bmp"
216
+
217
+ response = session.post(
218
+ f"{self.url}/c/api/attachments",
219
+ headers={"content-type": content_type},
220
+ data=data
221
+ )
222
+ if response.status_code != 200:
223
+ raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
224
+ uploaded_images.append({"type":"image", "url": response.json().get("url")})
225
+ break
226
+
227
+ # Connect to WebSocket
228
+ wss = session.ws_connect(websocket_url)
229
+ wss.send(json.dumps({
230
+ "event": "send",
231
+ "conversationId": conversation_id,
232
+ "content": [*uploaded_images, {
233
+ "type": "text",
234
+ "text": conversation_prompt,
235
+ }],
236
+ "mode": "chat"
237
+ }).encode(), CurlWsFlag.TEXT)
238
+
239
+ # Process response
240
+ is_started = False
241
+ msg = None
242
+ image_prompt: str = None
243
+ last_msg = None
244
+ streaming_text = ""
245
+
246
+ try:
247
+ while True:
248
+ try:
249
+ msg = wss.recv()[0]
250
+ msg = json.loads(msg)
251
+ except:
252
+ break
253
+ last_msg = msg
254
+ if msg.get("event") == "appendText":
255
+ is_started = True
256
+ content = msg.get("text")
257
+ streaming_text += content
258
+ resp = {"text": content}
259
+ yield resp if raw else resp
260
+ elif msg.get("event") == "generatingImage":
261
+ image_prompt = msg.get("prompt")
262
+ elif msg.get("event") == "imageGenerated":
263
+ yield {"type": "image", "url": msg.get("url"), "prompt": image_prompt, "preview": msg.get("thumbnailUrl")}
264
+ elif msg.get("event") == "done":
265
+ break
266
+ elif msg.get("event") == "replaceText":
267
+ content = msg.get("text")
268
+ streaming_text += content
269
+ resp = {"text": content}
270
+ yield resp if raw else resp
271
+ elif msg.get("event") == "error":
272
+ raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
273
+
274
+ if not is_started:
275
+ raise exceptions.FailedToGenerateResponseError(f"Invalid response: {last_msg}")
276
+
277
+ # Update conversation history
278
+ self.conversation.update_chat_history(prompt, streaming_text)
279
+ self.last_response = {"text": streaming_text}
280
+
281
+ finally:
282
+ wss.close()
283
+
284
+ except requests.RequestException as e:
285
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
286
+ except Exception as e:
287
+ raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
288
+
289
+ def for_non_stream():
290
+ streaming_text = ""
291
+ for response in for_stream():
292
+ if isinstance(response, dict) and "text" in response:
293
+ streaming_text += response["text"]
294
+ self.last_response = {"text": streaming_text}
295
+ return self.last_response
296
+
297
+ return for_stream() if stream else for_non_stream()
298
+
299
+ def chat(
300
+ self,
301
+ prompt: str,
302
+ stream: bool = True,
303
+ optimizer: str = None,
304
+ conversationally: bool = False,
305
+ images = None,
306
+ api_key: str = None,
307
+ **kwargs
308
+ ) -> Union[str, Generator]:
309
+ def for_stream():
310
+ for response in self.ask(prompt, True, optimizer=optimizer,
311
+ conversationally=conversationally,
312
+ images=images, api_key=api_key, **kwargs):
313
+ if isinstance(response, dict) and "text" in response:
314
+ yield response["text"]
315
+ elif isinstance(response, dict) and "type" in response and response["type"] == "image":
316
+ yield f"\n![Image]({response['url']})\n"
317
+
318
+ def for_non_stream():
319
+ response = self.ask(prompt, False, optimizer=optimizer,
320
+ conversationally=conversationally,
321
+ images=images, api_key=api_key, **kwargs)
322
+ return self.get_message(response)
323
+
324
+ return for_stream() if stream else for_non_stream()
325
+
326
+ def get_message(self, response: dict) -> str:
327
+ assert isinstance(response, dict), "Response should be of dict data-type only"
328
+ return response.get("text", "")
329
+
330
+
331
+ async def get_access_token_and_cookies(url: str, proxy: str = None, target: str = "ChatAI"):
332
+ browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="copilot")
333
+ try:
334
+ page = await browser.get(url)
335
+ access_token = None
336
+ while access_token is None:
337
+ access_token = await page.evaluate("""
338
+ (() => {
339
+ for (var i = 0; i < localStorage.length; i++) {
340
+ try {
341
+ item = JSON.parse(localStorage.getItem(localStorage.key(i)));
342
+ if (item.credentialType == "AccessToken"
343
+ && item.expiresOn > Math.floor(Date.now() / 1000)
344
+ && item.target.includes("target")) {
345
+ return item.secret;
346
+ }
347
+ } catch(e) {}
348
+ }
349
+ })()
350
+ """.replace('"target"', json.dumps(target)))
351
+ if access_token is None:
352
+ await asyncio.sleep(1)
353
+ cookies = {}
354
+ for c in await page.send(nodriver.cdp.network.get_cookies([url])):
355
+ cookies[c.name] = c.value
356
+ await page.close()
357
+ return access_token, cookies
358
+ finally:
359
+ stop_browser()
360
+
361
+
362
+ def readHAR(url: str):
363
+ api_key = None
364
+ cookies = None
365
+ har_files = []
366
+ # Look for HAR files in common locations
367
+ har_paths = [
368
+ os.path.join(os.path.expanduser("~"), "Downloads"),
369
+ os.path.join(os.path.expanduser("~"), "Desktop")
370
+ ]
371
+ for path in har_paths:
372
+ if os.path.exists(path):
373
+ for file in os.listdir(path):
374
+ if file.endswith(".har"):
375
+ har_files.append(os.path.join(path, file))
376
+
377
+ for path in har_files:
378
+ with open(path, 'rb') as file:
379
+ try:
380
+ harFile = json.loads(file.read())
381
+ except json.JSONDecodeError:
382
+ # Error: not a HAR file!
383
+ continue
384
+ for v in harFile['log']['entries']:
385
+ if v['request']['url'].startswith(url):
386
+ v_headers = {h['name'].lower(): h['value'] for h in v['request']['headers']}
387
+ if "authorization" in v_headers:
388
+ api_key = v_headers["authorization"].split(maxsplit=1).pop()
389
+ if v['request']['cookies']:
390
+ cookies = {c['name']: c['value'] for c in v['request']['cookies']}
391
+ if api_key is None:
392
+ raise NoValidHarFileError("No access token found in .har files")
393
+
394
+ return api_key, cookies
395
+
396
+
397
+ # def get_clarity() -> bytes:
398
+ # body = base64.b64decode("H4sIAAAAAAAAA23RwU7DMAwG4HfJ2aqS2E5ibjxH1cMOnQYqYZvUTQPx7vyJRGGAemj01XWcP+9udg+j80MetDhSyrEISc5GrqrtZnmaTydHbrdUnSsWYT2u+8Obo0Ce/IQvaDBmjkwhUlKKIRNHmQgosqEArWPRDQMx90rxeUMPzB1j+UJvwNIxhTvsPcXyX1T+rizE4juK3mEEhpAUg/JvzW1/+U/tB1LATmhqotoiweMea50PLy2vui4LOY3XfD1dwnkor5fn/e18XBFgm6fHjSzZmCyV7d3aRByAEYextaTHEH3i5pgKGVP/s+DScE5PuLKIpW6FnCi1gY3Rbpqmj0/DI/+L7QEAAA==")
399
+ # return body
400
+
401
+
402
+ async def get_nodriver(proxy=None, user_data_dir=None):
403
+ browser = await nodriver.Browser(
404
+ headless=True,
405
+ proxy=proxy,
406
+ user_data_dir=user_data_dir
407
+ )
408
+ return browser, lambda: browser.close()
409
+
410
+
411
+ if __name__ == "__main__":
412
+ from rich import print
413
+ ai = Copilot(timeout=900)
414
+ response = ai.chat(input("> "), stream=True)
415
+ for chunk in response:
416
+ print(chunk, end="", flush=True)
@@ -0,0 +1,181 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ import os
5
+ import re
6
+ from typing import Any, Dict, Optional, Generator, Union
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider, AsyncProvider
12
+ from webscout import exceptions
13
+ from webscout import LitAgent
14
+
15
+ class Flowith(Provider):
16
+ """
17
+ A class to interact with the Flowith AI chat API.
18
+ """
19
+
20
+ AVAILABLE_MODELS = [
21
+ "gpt-4o-mini",
22
+ "deepseek-chat",
23
+ "deepseek-reasoner",
24
+ "claude-3.5-haiku",
25
+ "llama-3.2-11b",
26
+ "llama-3.2-90b",
27
+ "gemini-2.0-flash",
28
+ "o1",
29
+ "o3-mini",
30
+ "gpt-4o",
31
+ "claude-3.5-sonnet",
32
+ "gemini-2.0-pro",
33
+ "claude-3.7-sonnet"
34
+
35
+ ]
36
+
37
+ def __init__(
38
+ self,
39
+ is_conversation: bool = True,
40
+ max_tokens: int = 2048,
41
+ timeout: int = 30,
42
+ intro: str = None,
43
+ filepath: str = None,
44
+ update_file: bool = True,
45
+ proxies: dict = {},
46
+ history_offset: int = 10250,
47
+ act: str = None,
48
+ model: str = "claude-3.5-haiku"
49
+
50
+ ):
51
+ """Initializes the Flowith API client."""
52
+ if model not in self.AVAILABLE_MODELS:
53
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
54
+
55
+ self.url = "https://edge.flowith.net/ai/chat?mode=general"
56
+
57
+ # Set up headers for the API request
58
+ self.headers = {
59
+ "authority": "edge.flowith.net",
60
+ "accept": "*/*",
61
+ "accept-encoding": "gzip, deflate, br, zstd",
62
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
63
+ "content-type": "application/json",
64
+ "origin": "https://flowith.io",
65
+ "referer": "https://edge.flowith.net/",
66
+ "responsetype": "stream",
67
+ "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
68
+ "sec-ch-ua-mobile": "?0",
69
+ "sec-ch-ua-platform": '"Windows"',
70
+ "sec-fetch-dest": "empty",
71
+ "sec-fetch-mode": "cors",
72
+ "sec-fetch-site": "cross-site",
73
+ "user-agent": LitAgent().random() # Use LitAgent for user-agent
74
+ }
75
+
76
+ self.session = requests.Session()
77
+ self.session.headers.update(self.headers)
78
+ self.session.proxies.update(proxies)
79
+
80
+ self.is_conversation = is_conversation
81
+ self.max_tokens_to_sample = max_tokens
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.model = model
85
+ self.node_id = str(uuid.uuid4()) # Generate a new UUID for node ID
86
+
87
+ self.__available_optimizers = (
88
+ method
89
+ for method in dir(Optimizers)
90
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
91
+ )
92
+ Conversation.intro = (
93
+ AwesomePrompts().get_act(
94
+ act, raise_not_found=True, default=None, case_insensitive=True
95
+ )
96
+ if act
97
+ else intro or Conversation.intro
98
+ )
99
+
100
+ self.conversation = Conversation(
101
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
102
+ )
103
+ self.conversation.history_offset = history_offset
104
+
105
+ def clean_response(self, text):
106
+ """Remove text between <think> tags and other specific text patterns."""
107
+ # Remove text between <think> tags
108
+ text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
109
+
110
+ return text.strip()
111
+
112
+ def ask(
113
+ self,
114
+ prompt: str,
115
+ stream: bool = False, # This parameter is kept for compatibility
116
+ raw: bool = False,
117
+ optimizer: str = None,
118
+ conversationally: bool = False,
119
+ ) -> Union[Dict[str, Any], Dict[str, str]]:
120
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121
+ if optimizer:
122
+ if optimizer in self.__available_optimizers:
123
+ conversation_prompt = getattr(Optimizers, optimizer)(
124
+ conversation_prompt if conversationally else prompt
125
+ )
126
+ else:
127
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
128
+
129
+ # Payload construction - using stream=False for simpler handling
130
+ payload = {
131
+ "model": self.model,
132
+ "messages": [{"content": conversation_prompt, "role": "user"}],
133
+ "stream": True, # Set to False for direct response
134
+ "nodeId": self.node_id
135
+ }
136
+
137
+ try:
138
+ # Simple non-streaming request
139
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
140
+
141
+ if response.status_code != 200:
142
+ raise exceptions.FailedToGenerateResponseError(
143
+ f"Request failed with status code {response.status_code}"
144
+ )
145
+
146
+ # Get the response text directly
147
+ response_text = response.text.strip()
148
+
149
+ # Clean the response
150
+ cleaned_text = self.clean_response(response_text)
151
+ self.last_response = {"text": cleaned_text}
152
+
153
+ # Update conversation history
154
+ self.conversation.update_chat_history(prompt, cleaned_text)
155
+
156
+ return {"text": cleaned_text}
157
+
158
+ except requests.RequestException as e:
159
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
160
+
161
+ def chat(
162
+ self,
163
+ prompt: str,
164
+ stream: bool = False, # Parameter kept for compatibility
165
+ optimizer: str = None,
166
+ conversationally: bool = False,
167
+ ) -> str:
168
+ # Always use non-streaming mode
169
+ response = self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
170
+ return self.get_message(response)
171
+
172
+ def get_message(self, response: dict) -> str:
173
+ assert isinstance(response, dict), "Response should be of dict data-type only"
174
+ return response["text"]
175
+
176
+ if __name__ == "__main__":
177
+ from rich import print
178
+ ai = Flowith(timeout=60)
179
+ prompt = input("> ")
180
+ response = ai.chat(prompt)
181
+ print(response)