webscout 7.6__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (36) hide show
  1. webscout/Extra/autocoder/__init__.py +9 -9
  2. webscout/Extra/autocoder/autocoder_utiles.py +193 -195
  3. webscout/Extra/autocoder/rawdog.py +789 -649
  4. webscout/Extra/gguf.py +54 -24
  5. webscout/Provider/AISEARCH/ISou.py +0 -21
  6. webscout/Provider/AllenAI.py +4 -21
  7. webscout/Provider/ChatGPTClone.py +226 -0
  8. webscout/Provider/Glider.py +8 -4
  9. webscout/Provider/Hunyuan.py +272 -0
  10. webscout/Provider/LambdaChat.py +391 -0
  11. webscout/Provider/OLLAMA.py +256 -32
  12. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +18 -45
  13. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +34 -46
  14. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  15. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  16. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  17. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  18. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  19. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  20. webscout/Provider/WebSim.py +227 -0
  21. webscout/Provider/__init__.py +12 -1
  22. webscout/Provider/flowith.py +13 -2
  23. webscout/Provider/labyrinth.py +239 -0
  24. webscout/Provider/learnfastai.py +28 -15
  25. webscout/Provider/sonus.py +208 -0
  26. webscout/Provider/typegpt.py +1 -1
  27. webscout/Provider/uncovr.py +297 -0
  28. webscout/cli.py +49 -0
  29. webscout/litagent/agent.py +14 -9
  30. webscout/version.py +1 -1
  31. {webscout-7.6.dist-info → webscout-7.7.dist-info}/METADATA +33 -22
  32. {webscout-7.6.dist-info → webscout-7.7.dist-info}/RECORD +36 -29
  33. {webscout-7.6.dist-info → webscout-7.7.dist-info}/LICENSE.md +0 -0
  34. {webscout-7.6.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  35. {webscout-7.6.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  36. {webscout-7.6.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,227 @@
1
+ import requests
2
+ import json
3
+ import string
4
+ import random
5
+ from typing import Any, Dict, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+
13
+ class WebSim(Provider):
14
+ """
15
+ A class to interact with the WebSim API.
16
+ """
17
+
18
+ url = "https://websim.ai"
19
+ chat_api_endpoint = "https://websim.ai/api/v1/inference/run_chat_completion"
20
+ image_api_endpoint = "https://websim.ai/api/v1/inference/run_image_generation"
21
+
22
+ image_models = ['flux']
23
+ AVAILABLE_MODELS = ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-flash', 'gemini-pro', 'gemini-flash-thinking'] + image_models
24
+
25
+ @staticmethod
26
+ def generate_project_id(for_image=False):
27
+ """
28
+ Generate a project ID in the appropriate format
29
+
30
+ For chat: format like 'ke3_xh5gai3gjkmruomu'
31
+ For image: format like 'kx0m131_rzz66qb2xoy7'
32
+ """
33
+ chars = string.ascii_lowercase + string.digits
34
+
35
+ if for_image:
36
+ first_part = ''.join(random.choices(chars, k=7))
37
+ second_part = ''.join(random.choices(chars, k=12))
38
+ return f"{first_part}_{second_part}"
39
+ else:
40
+ prefix = ''.join(random.choices(chars, k=3))
41
+ suffix = ''.join(random.choices(chars, k=15))
42
+ return f"{prefix}_{suffix}"
43
+
44
+ def __init__(
45
+ self,
46
+ is_conversation: bool = True,
47
+ max_tokens: int = 2049,
48
+ timeout: int = 30,
49
+ intro: str = None,
50
+ filepath: str = None,
51
+ update_file: bool = True,
52
+ proxies: dict = {},
53
+ history_offset: int = 10250,
54
+ act: str = None,
55
+ model: str = 'gemini-1.5-pro',
56
+ aspect_ratio: str = "1:1"
57
+ ):
58
+ """Initializes the WebSim API client."""
59
+ if model not in self.AVAILABLE_MODELS:
60
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
61
+
62
+ self.headers = {
63
+ 'accept': '*/*',
64
+ 'accept-language': 'en-US,en;q=0.9',
65
+ 'content-type': 'text/plain;charset=UTF-8',
66
+ 'origin': 'https://websim.ai',
67
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
68
+ 'websim-flags;': ''
69
+ }
70
+
71
+ self.session = requests.Session()
72
+ self.session.headers.update(self.headers)
73
+ self.session.proxies.update(proxies)
74
+
75
+ self.is_conversation = is_conversation
76
+ self.max_tokens_to_sample = max_tokens
77
+ self.timeout = timeout
78
+ self.last_response = {}
79
+ self.model = model
80
+ self.aspect_ratio = aspect_ratio
81
+
82
+ self.__available_optimizers = (
83
+ method
84
+ for method in dir(Optimizers)
85
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
86
+ )
87
+ Conversation.intro = (
88
+ AwesomePrompts().get_act(
89
+ act, raise_not_found=True, default=None, case_insensitive=True
90
+ )
91
+ if act
92
+ else intro or Conversation.intro
93
+ )
94
+
95
+ self.conversation = Conversation(
96
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
97
+ )
98
+ self.conversation.history_offset = history_offset
99
+
100
+ def ask(
101
+ self,
102
+ prompt: str,
103
+ stream: bool = False,
104
+ raw: bool = False,
105
+ optimizer: str = None,
106
+ conversationally: bool = False,
107
+ ) -> Dict[str, Any]:
108
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
109
+ if optimizer:
110
+ if optimizer in self.__available_optimizers:
111
+ conversation_prompt = getattr(Optimizers, optimizer)(
112
+ conversation_prompt if conversationally else prompt
113
+ )
114
+ else:
115
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
116
+
117
+ is_image_request = self.model in self.image_models
118
+ project_id = self.generate_project_id(for_image=is_image_request)
119
+
120
+ if is_image_request:
121
+ self.headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/ai-image-prompt-generator'
122
+ return self._handle_image_request(project_id, conversation_prompt)
123
+ else:
124
+ self.headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/zelos-ai-assistant'
125
+ return self._handle_chat_request(project_id, conversation_prompt)
126
+
127
+ def _handle_image_request(self, project_id: str, prompt: str) -> Dict[str, Any]:
128
+ try:
129
+ data = {
130
+ "project_id": project_id,
131
+ "prompt": prompt,
132
+ "aspect_ratio": self.aspect_ratio
133
+ }
134
+ response = self.session.post(
135
+ self.image_api_endpoint,
136
+ json=data,
137
+ timeout=self.timeout
138
+ )
139
+ response.raise_for_status()
140
+ response_json = response.json()
141
+ image_url = response_json.get("url")
142
+ if image_url:
143
+ self.last_response = {"text": image_url}
144
+ self.conversation.update_chat_history(prompt, image_url)
145
+ return {"text": image_url}
146
+ raise exceptions.FailedToGenerateResponseError("No image URL found in response")
147
+ except requests.RequestException as e:
148
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
149
+
150
+ def _handle_chat_request(self, project_id: str, prompt: str) -> Dict[str, Any]:
151
+ max_retries = 3
152
+ retry_count = 0
153
+ last_error = None
154
+
155
+ while retry_count < max_retries:
156
+ try:
157
+ data = {
158
+ "project_id": project_id,
159
+ "messages": [{"role": "user", "content": prompt}]
160
+ }
161
+ response = self.session.post(
162
+ self.chat_api_endpoint,
163
+ json=data,
164
+ timeout=self.timeout
165
+ )
166
+
167
+ if response.status_code == 429:
168
+ last_error = exceptions.FailedToGenerateResponseError(
169
+ f"Rate limit exceeded: {response.text}"
170
+ )
171
+ retry_count += 1
172
+ if retry_count < max_retries:
173
+ continue
174
+ raise last_error
175
+
176
+ response.raise_for_status()
177
+ response_json = response.json()
178
+ content = response_json.get("content", "")
179
+
180
+ self.last_response = {"text": content}
181
+ self.conversation.update_chat_history(prompt, content)
182
+ return {"text": content.strip()}
183
+
184
+ except requests.RequestException as e:
185
+ if "Rate limit exceeded" in str(e) and retry_count < max_retries:
186
+ retry_count += 1
187
+ else:
188
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
189
+
190
+ raise last_error or exceptions.FailedToGenerateResponseError("Max retries exceeded")
191
+
192
+ def chat(
193
+ self,
194
+ prompt: str,
195
+ stream: bool = False,
196
+ optimizer: str = None,
197
+ conversationally: bool = False,
198
+ ) -> str:
199
+ return self.get_message(
200
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
201
+ )
202
+
203
+ def get_message(self, response: dict) -> str:
204
+ assert isinstance(response, dict), "Response should be of dict data-type only"
205
+ return response["text"]
206
+
207
+ if __name__ == "__main__":
208
+ print("-" * 80)
209
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
210
+ print("-" * 80)
211
+
212
+ for model in WebSim.AVAILABLE_MODELS:
213
+ try:
214
+ test_ai = WebSim(model=model, timeout=60)
215
+ response = test_ai.chat("Say 'Hello' in one word")
216
+
217
+ if response and len(response.strip()) > 0:
218
+ status = "✓"
219
+ # Clean and truncate response
220
+ clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
221
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
222
+ else:
223
+ status = "✗"
224
+ display_text = "Empty or invalid response"
225
+ print(f"\r{model:<50} {status:<10} {display_text}")
226
+ except Exception as e:
227
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -78,8 +78,15 @@ from .GithubChat import *
78
78
  from .copilot import *
79
79
  from .C4ai import *
80
80
  from .flowith import *
81
+ from .sonus import *
82
+ from .uncovr import *
83
+ from .labyrinth import *
84
+ from .WebSim import *
85
+ from .LambdaChat import *
86
+ from .ChatGPTClone import *
81
87
  __all__ = [
82
88
  'LLAMA',
89
+ 'LabyrinthAI',
83
90
  'Flowith',
84
91
  'C4ai',
85
92
  'Venice',
@@ -95,7 +102,7 @@ __all__ = [
95
102
  'IBMGranite',
96
103
  'QwenLM',
97
104
  'ChatGPTGratis',
98
-
105
+ 'LambdaChat',
99
106
  'TextPollinationsAI',
100
107
  'GliderAI',
101
108
  'Cohere',
@@ -134,8 +141,10 @@ __all__ = [
134
141
  'Cerebras',
135
142
  'Lepton',
136
143
  'GEMINIAPI',
144
+ 'SonusAI',
137
145
  'Cleeai',
138
146
  'Elmo',
147
+ 'ChatGPTClone',
139
148
  'Free2GPT',
140
149
  'GPTWeb',
141
150
  'Netwrck',
@@ -160,4 +169,6 @@ __all__ = [
160
169
  'FreeAIChat',
161
170
  'ElectronHub',
162
171
  'GithubChat',
172
+ 'UncovrAI',
173
+ 'WebSim',
163
174
  ]
@@ -109,6 +109,17 @@ class Flowith(Provider):
109
109
 
110
110
  return text.strip()
111
111
 
112
+ def decode_response(self, content):
113
+ """Try to decode the response content using multiple encodings."""
114
+ encodings = ['utf-8', 'latin1', 'iso-8859-1', 'cp1252']
115
+ for encoding in encodings:
116
+ try:
117
+ return content.decode(encoding)
118
+ except UnicodeDecodeError:
119
+ continue
120
+ # If all encodings fail, try to decode with 'latin1' as it can decode any byte
121
+ return content.decode('latin1')
122
+
112
123
  def ask(
113
124
  self,
114
125
  prompt: str,
@@ -143,8 +154,8 @@ class Flowith(Provider):
143
154
  f"Request failed with status code {response.status_code}"
144
155
  )
145
156
 
146
- # Get the response text directly
147
- response_text = response.text.strip()
157
+ # Get the response text using our multi-encoding decoder
158
+ response_text = self.decode_response(response.content).strip()
148
159
 
149
160
  # Clean the response
150
161
  cleaned_text = self.clean_response(response_text)
@@ -0,0 +1,239 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class LabyrinthAI(Provider):
13
+ """
14
+ A class to interact with the Labyrinth AI chat API.
15
+ """
16
+
17
+ # AVAILABLE_MODELS = [
18
+ # "gemini-2.0-flash"
19
+ # ]
20
+
21
+ def __init__(
22
+ self,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 2049,
25
+ timeout: int = 30,
26
+ intro: str = None,
27
+ filepath: str = None,
28
+ update_file: bool = True,
29
+ proxies: dict = {},
30
+ history_offset: int = 10250,
31
+ act: str = None,
32
+ # model: str = "gemini-2.0-flash",
33
+ browser: str = "chrome"
34
+ ):
35
+ """Initializes the Labyrinth AI API client."""
36
+ # if model not in self.AVAILABLE_MODELS:
37
+ # raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
38
+
39
+ self.url = "https://labyrinth-ebon.vercel.app/api/chat"
40
+
41
+ # Initialize LitAgent for user agent generation
42
+ self.agent = LitAgent()
43
+ # Use fingerprinting to create a consistent browser identity
44
+ self.fingerprint = self.agent.generate_fingerprint(browser)
45
+
46
+ # Use the fingerprint for headers
47
+ self.headers = {
48
+ "Accept": self.fingerprint["accept"],
49
+ "Accept-Encoding": "gzip, deflate, br, zstd",
50
+ "Accept-Language": self.fingerprint["accept_language"],
51
+ "Content-Type": "application/json",
52
+ "Origin": "https://labyrinth-ebon.vercel.app",
53
+ "Cookie": "stock-mode=false; __Host-next-auth.csrf-token=68aa6224f2ff7bbf2c4480a90c49b7b95aaac01a63ed90f3d20a69292c16a366%7C1f6672653c6e304ea971373fecdc3fe491568d014c68cdf3b26ead42f1c6ac62; __Secure-next-auth.callback-url=https%3A%2F%2Flabyrinth-ebon.vercel.app%2F; selectedModel={\"id\":\"gemini-2.0-flash\",\"name\":\"Gemini 2.0 Flash\",\"provider\":\"Google Generative AI\",\"providerId\":\"google\",\"enabled\":true,\"toolCallType\":\"native\",\"searchMode\":true}; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..Z5-1j_rsCWRHY17B.s0lMkhWr0S7a3-4h2p-ce0NJHeNyh8nDyOcsrzFU8AZtBbygGcHKbJ8PzLLQBNL7NwrUwET3fKGbtnAphaVjuSJQfXA0tu69zKJELPw-A3x0Ev6aHJMTG3l9_SweByHyfCSCnGB7tvjwEFsW4c5xs_HzMdPmoRTYyYzlZPuDGhHtQX7WyeUiARc36NfwV-KJYpzXV5-g0VkpsxFEawcfdk6D_S7JtOMmjMTTYuw2BbNYvtlvM-n_XivIctQmQ5Fp65JEE73nr5hWVReyYrkyfUGt4Q.TP8Woa-7Ao05yVCjbbGDug",
54
+ "Referer": "https://labyrinth-ebon.vercel.app/",
55
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
56
+ "Sec-CH-UA-Mobile": "?0",
57
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
58
+ "User-Agent": self.fingerprint["user_agent"],
59
+ "Sec-Fetch-Dest": "empty",
60
+ "Sec-Fetch-Mode": "cors",
61
+ "Sec-Fetch-Site": "same-origin",
62
+ "Sec-GPC": "1"
63
+ }
64
+
65
+ self.session = requests.Session()
66
+ self.session.headers.update(self.headers)
67
+ self.session.proxies.update(proxies)
68
+
69
+ self.is_conversation = is_conversation
70
+ self.max_tokens_to_sample = max_tokens
71
+ self.timeout = timeout
72
+ self.last_response = {}
73
+ # self.model = model
74
+
75
+ self.__available_optimizers = (
76
+ method
77
+ for method in dir(Optimizers)
78
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
79
+ )
80
+ Conversation.intro = (
81
+ AwesomePrompts().get_act(
82
+ act, raise_not_found=True, default=None, case_insensitive=True
83
+ )
84
+ if act
85
+ else intro or Conversation.intro
86
+ )
87
+
88
+ self.conversation = Conversation(
89
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
90
+ )
91
+ self.conversation.history_offset = history_offset
92
+
93
+ def refresh_identity(self, browser: str = None):
94
+ """
95
+ Refreshes the browser identity fingerprint.
96
+
97
+ Args:
98
+ browser: Specific browser to use for the new fingerprint
99
+ """
100
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
101
+ self.fingerprint = self.agent.generate_fingerprint(browser)
102
+
103
+ # Update headers with new fingerprint
104
+ self.headers.update({
105
+ "Accept": self.fingerprint["accept"],
106
+ "Accept-Language": self.fingerprint["accept_language"],
107
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
108
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
109
+ "User-Agent": self.fingerprint["user_agent"],
110
+ })
111
+
112
+ # Update session headers
113
+ for header, value in self.headers.items():
114
+ self.session.headers[header] = value
115
+
116
+ return self.fingerprint
117
+
118
+ def ask(
119
+ self,
120
+ prompt: str,
121
+ stream: bool = False,
122
+ raw: bool = False,
123
+ optimizer: str = None,
124
+ conversationally: bool = False,
125
+ ) -> Union[Dict[str, Any], Generator]:
126
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
127
+ if optimizer:
128
+ if optimizer in self.__available_optimizers:
129
+ conversation_prompt = getattr(Optimizers, optimizer)(
130
+ conversation_prompt if conversationally else prompt
131
+ )
132
+ else:
133
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
134
+
135
+ # Prepare the request payload
136
+ payload = {
137
+ "id": str(uuid.uuid4()),
138
+ "messages": [
139
+ {
140
+ "role": "user",
141
+ "content": conversation_prompt,
142
+ "parts": [{"type": "text", "text": conversation_prompt}]
143
+ }
144
+ ],
145
+ "stockMode": False
146
+ }
147
+
148
+ def for_stream():
149
+ try:
150
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
151
+ if response.status_code != 200:
152
+ # If we get a non-200 response, try refreshing our identity once
153
+ if response.status_code in [403, 429]:
154
+ self.refresh_identity()
155
+ # Retry with new identity
156
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
157
+ if not retry_response.ok:
158
+ raise exceptions.FailedToGenerateResponseError(
159
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
160
+ )
161
+ response = retry_response
162
+ else:
163
+ raise exceptions.FailedToGenerateResponseError(
164
+ f"Request failed with status code {response.status_code}"
165
+ )
166
+
167
+ streaming_text = ""
168
+ for line in response.iter_lines():
169
+ if line:
170
+ try:
171
+ line = line.decode('utf-8')
172
+ if line.startswith('0:'):
173
+ content = line[2:].strip('"')
174
+ streaming_text += content
175
+ resp = dict(text=content)
176
+ yield resp if raw else resp
177
+ except UnicodeDecodeError:
178
+ continue
179
+
180
+ self.last_response = {"text": streaming_text}
181
+ self.conversation.update_chat_history(prompt, streaming_text)
182
+
183
+ except requests.RequestException as e:
184
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
185
+
186
+ def for_non_stream():
187
+ try:
188
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
189
+ if response.status_code != 200:
190
+ if response.status_code in [403, 429]:
191
+ self.refresh_identity()
192
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
193
+ if not response.ok:
194
+ raise exceptions.FailedToGenerateResponseError(
195
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
196
+ )
197
+ else:
198
+ raise exceptions.FailedToGenerateResponseError(
199
+ f"Request failed with status code {response.status_code}"
200
+ )
201
+
202
+ full_response = ""
203
+ for line in response.iter_lines():
204
+ if line:
205
+ try:
206
+ line = line.decode('utf-8')
207
+ if line.startswith('0:'):
208
+ content = line[2:].strip('"')
209
+ full_response += content
210
+ except UnicodeDecodeError:
211
+ continue
212
+
213
+ self.last_response = {"text": full_response}
214
+ self.conversation.update_chat_history(prompt, full_response)
215
+ return {"text": full_response}
216
+ except Exception as e:
217
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
218
+
219
+ return for_stream() if stream else for_non_stream()
220
+
221
+ def chat(
222
+ self,
223
+ prompt: str,
224
+ stream: bool = False,
225
+ optimizer: str = None,
226
+ conversationally: bool = False,
227
+ ) -> Union[str, Generator[str, None, None]]:
228
+ def for_stream():
229
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
230
+ yield self.get_message(response)
231
+ def for_non_stream():
232
+ return self.get_message(
233
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
234
+ )
235
+ return for_stream() if stream else for_non_stream()
236
+
237
+ def get_message(self, response: dict) -> str:
238
+ assert isinstance(response, dict), "Response should be of dict data-type only"
239
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  import json
3
- from typing import Optional
3
+ from typing import Optional, Union, Generator
4
4
  import uuid
5
5
  import requests
6
6
  import cloudscraper
@@ -118,7 +118,9 @@ class LearnFast(Provider):
118
118
  """
119
119
  payload = {
120
120
  "prompt": conversation_prompt,
121
+ "firstQuestionFlag": True,
121
122
  "sessionId": session_id,
123
+ "attachments": []
122
124
  }
123
125
  if image_url:
124
126
  payload["attachments"] = [
@@ -138,7 +140,7 @@ class LearnFast(Provider):
138
140
  optimizer: str = None,
139
141
  conversationally: bool = False,
140
142
  image_path: Optional[str] = None,
141
- ) -> dict:
143
+ ) -> Union[dict, Generator[dict, None, None]]:
142
144
  """Chat with LearnFast
143
145
 
144
146
  Args:
@@ -151,7 +153,7 @@ class LearnFast(Provider):
151
153
  Defaults to None.
152
154
 
153
155
  Returns:
154
- dict : {}
156
+ Union[dict, Generator[dict, None, None]]: Response generated
155
157
  """
156
158
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
157
159
  if optimizer:
@@ -194,20 +196,24 @@ class LearnFast(Provider):
194
196
  full_response = ""
195
197
  for line in response.iter_lines(decode_unicode=True):
196
198
  if line:
197
- if line.strip() == "[DONE]":
199
+ line = line.strip()
200
+ if line == "[DONE]":
198
201
  break
199
202
  try:
200
203
  json_response = json.loads(line)
201
- message = json_response.get('data', {}).get('message', '')
202
- if message:
203
- full_response += message
204
- # print(message, end='', flush=True)
204
+ if json_response.get('code') == 200 and json_response.get('data'):
205
+ message = json_response['data'].get('message', '')
206
+ if message:
207
+ full_response += message
208
+ if stream:
209
+ yield {"text": message}
205
210
  except json.JSONDecodeError:
206
- print(f"\nFailed to parse JSON: {line}")
211
+ pass
207
212
  self.last_response.update({"text": full_response})
208
213
  self.conversation.update_chat_history(prompt, full_response)
209
214
 
210
- return self.last_response
215
+ if not stream:
216
+ return self.last_response
211
217
  except requests.exceptions.RequestException as e:
212
218
  raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
213
219
 
@@ -218,7 +224,7 @@ class LearnFast(Provider):
218
224
  optimizer: str = None,
219
225
  conversationally: bool = False,
220
226
  image_path: Optional[str] = None,
221
- ) -> str:
227
+ ) -> Union[str, Generator[str, None, None]]:
222
228
  """Generate response `str`
223
229
  Args:
224
230
  prompt (str): Prompt to be send.
@@ -228,10 +234,17 @@ class LearnFast(Provider):
228
234
  image_path (Optional[str], optional): Path to the image to be uploaded.
229
235
  Defaults to None.
230
236
  Returns:
231
- str: Response generated
237
+ Union[str, Generator[str, None, None]]: Response generated
232
238
  """
233
- response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
234
- return self.get_message(response)
239
+ try:
240
+ response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
241
+ if stream:
242
+ for chunk in response:
243
+ yield chunk["text"]
244
+ else:
245
+ return str(response)
246
+ except Exception as e:
247
+ return f"Error: {str(e)}"
235
248
 
236
249
  def get_message(self, response: dict) -> str:
237
250
  """Retrieves message only from response
@@ -248,6 +261,6 @@ class LearnFast(Provider):
248
261
  if __name__ == "__main__":
249
262
  from rich import print
250
263
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path=None)
264
+ response = ai.chat(input(">>> "), stream=True)
252
265
  for chunk in response:
253
266
  print(chunk, end="", flush=True)