webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -1,296 +1,326 @@
1
- from uuid import uuid4
2
- from re import findall
3
- import json
4
-
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider, AsyncProvider
10
- from webscout import exceptions
11
- from typing import Union, Any, AsyncGenerator, Dict
12
-
13
- import cloudscraper
14
-
15
-
16
- class YouChat(Provider):
17
- """
18
- This class provides methods for interacting with the You.com chat API in a consistent provider structure.
19
- """
20
-
21
- # Updated available models based on provided "aiModels" list
22
- AVAILABLE_MODELS = [
23
- # "gpt_4_5_preview", #isProOnly": true,
24
- # "openai_o3_mini_high", #isProOnly": true,
25
- # "openai_o3_mini_medium", #isProOnly": true,
26
- # "openai_o1", #isProOnly": true,
27
- # "openai_o1_preview", #isProOnly": true,
28
- # "openai_o1_mini", #isProOnly": true,
29
- "gpt_4o_mini",
30
- "gpt_4o",
31
- "gpt_4_turbo",
32
- # "gpt_4", #isProOnly": true,
33
- # "claude_3_7_sonnet_thinking", #isProOnly": true,
34
- # "claude_3_7_sonnet", #isProOnly": true,
35
- # "claude_3_5_sonnet", #isProOnly": true,
36
- # "claude_3_opus", #isProOnly": true,
37
- "claude_3_sonnet",
38
- "claude_3_5_haiku",
39
- # "qwq_32b", #isProOnly": true,
40
- "qwen2p5_72b",
41
- "qwen2p5_coder_32b",
42
- # "deepseek_r1", #isProOnly": true,
43
- # "deepseek_v3", #isProOnly": true,
44
- "grok_2",
45
- # "llama3_3_70b", #isProOnly": false, "isAllowedForUserChatModes": false,
46
- # "llama3_2_90b", #isProOnly": false, "isAllowedForUserChatModes": false,
47
- "llama3_1_405b",
48
- "mistral_large_2",
49
- "gemini_2_flash",
50
- "gemini_1_5_flash",
51
- "gemini_1_5_pro",
52
- "databricks_dbrx_instruct",
53
- "command_r_plus",
54
- "solar_1_mini",
55
- "dolphin_2_5"
56
- ]
57
-
58
- def __init__(
59
- self,
60
- is_conversation: bool = True,
61
- max_tokens: int = 600,
62
- timeout: int = 30,
63
- intro: str = None,
64
- filepath: str = None,
65
- update_file: bool = True,
66
- proxies: dict = {},
67
- history_offset: int = 10250,
68
- act: str = None,
69
- model: str = "gemini_2_flash",
70
- ):
71
- """Instantiates YouChat
72
-
73
- Args:
74
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
75
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
76
- timeout (int, optional): Http request timeout. Defaults to 30.
77
- intro (str, optional): Conversation introductory prompt. Defaults to None.
78
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
79
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
80
- proxies (dict, optional): Http request proxies. Defaults to {}.
81
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
82
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
83
- model (str, optional): Model to use. Defaults to "claude_3_5_haiku".
84
- """
85
- if model not in self.AVAILABLE_MODELS:
86
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
87
-
88
- self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
89
- self.is_conversation = is_conversation
90
- self.max_tokens_to_sample = max_tokens
91
- self.chat_endpoint = "https://you.com/api/streamingSearch"
92
- self.stream_chunk_size = 64
93
- self.timeout = timeout
94
- self.last_response = {}
95
- self.model = model
96
- self.headers = {
97
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
98
- "Accept": "text/event-stream",
99
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
100
- "Referer": "https://you.com/search?q=hi&fromSearchBar=true&tbm=youchat",
101
- "Connection": "keep-alive",
102
- "DNT": "1",
103
- }
104
- self.cookies = {
105
- "uuid_guest_backup": uuid4().hex,
106
- "youchat_personalization": "true",
107
- "youchat_smart_learn": "true",
108
- "youpro_subscription": "false",
109
- "ydc_stytch_session": uuid4().hex,
110
- "ydc_stytch_session_jwt": uuid4().hex,
111
- "__cf_bm": uuid4().hex,
112
- }
113
-
114
- self.__available_optimizers = (
115
- method
116
- for method in dir(Optimizers)
117
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
118
- )
119
- Conversation.intro = (
120
- AwesomePrompts().get_act(
121
- act, raise_not_found=True, default=None, case_insensitive=True
122
- )
123
- if act
124
- else intro or Conversation.intro
125
- )
126
- self.conversation = Conversation(
127
- is_conversation, self.max_tokens_to_sample, filepath, update_file
128
- )
129
- self.conversation.history_offset = history_offset
130
- self.session.proxies = proxies
131
-
132
- def ask(
133
- self,
134
- prompt: str,
135
- stream: bool = False,
136
- raw: bool = False,
137
- optimizer: str = None,
138
- conversationally: bool = False,
139
- ) -> dict:
140
- """Chat with AI
141
-
142
- Args:
143
- prompt (str): Prompt to be send.
144
- stream (bool, optional): Flag for streaming response. Defaults to False.
145
- raw (bool, optional): Stream back raw response as received. Defaults to False.
146
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
147
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
148
- Returns:
149
- dict : {}
150
- ```json
151
- {
152
- "text" : "How may I assist you today?"
153
- }
154
- ```
155
- """
156
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
157
- if optimizer:
158
- if optimizer in self.__available_optimizers:
159
- conversation_prompt = getattr(Optimizers, optimizer)(
160
- conversation_prompt if conversationally else prompt
161
- )
162
- else:
163
- raise Exception(
164
- f"Optimizer is not one of {self.__available_optimizers}"
165
- )
166
-
167
- trace_id = str(uuid4())
168
- conversation_turn_id = str(uuid4())
169
-
170
- # Updated query parameters to match the new API format
171
- params = {
172
- "page": 1,
173
- "count": 10,
174
- "safeSearch": "Moderate",
175
- "mkt": "en-IN",
176
- "enable_worklow_generation_ux": "true",
177
- "domain": "youchat",
178
- "use_personalization_extraction": "true",
179
- "queryTraceId": trace_id,
180
- "chatId": trace_id,
181
- "conversationTurnId": conversation_turn_id,
182
- "pastChatLength": 0,
183
- "selectedChatMode": "custom",
184
- "selectedAiModel": self.model,
185
- "enable_agent_clarification_questions": "true",
186
- "traceId": f"{trace_id}|{conversation_turn_id}|{uuid4()}",
187
- "use_nested_youchat_updates": "true"
188
- }
189
-
190
- # New payload format is JSON
191
- payload = {
192
- "query": conversation_prompt,
193
- "chat": "[]"
194
- }
195
-
196
- def for_stream():
197
- response = self.session.post(
198
- self.chat_endpoint,
199
- headers=self.headers,
200
- cookies=self.cookies,
201
- params=params,
202
- data=json.dumps(payload),
203
- stream=True,
204
- timeout=self.timeout
205
- )
206
- if not response.ok:
207
- raise exceptions.FailedToGenerateResponseError(
208
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
209
- )
210
-
211
- streaming_text = ""
212
- found_marker = False # Flag to track if we've passed the '####' marker
213
-
214
- for value in response.iter_lines(
215
- decode_unicode=True,
216
- chunk_size=self.stream_chunk_size,
217
- delimiter="\n",
218
- ):
219
- try:
220
- if bool(value) and value.startswith('data: ') and 'youChatToken' in value:
221
- data = json.loads(value[6:])
222
- token = data.get('youChatToken', '')
223
-
224
- # Check if this is the marker with '####'
225
- if token == '####':
226
- found_marker = True
227
- continue # Skip the marker itself
228
-
229
- # Only process tokens after the marker has been found
230
- if found_marker and token:
231
- streaming_text += token
232
- yield token if raw else dict(text=token)
233
- except json.decoder.JSONDecodeError:
234
- pass
235
-
236
- self.last_response.update(dict(text=streaming_text))
237
- self.conversation.update_chat_history(
238
- prompt, self.get_message(self.last_response)
239
- )
240
-
241
- def for_non_stream():
242
- for _ in for_stream():
243
- pass
244
- return self.last_response
245
-
246
- return for_stream() if stream else for_non_stream()
247
-
248
- def chat(
249
- self,
250
- prompt: str,
251
- stream: bool = False,
252
- optimizer: str = None,
253
- conversationally: bool = False,
254
- ) -> str:
255
- """Generate response `str`
256
- Args:
257
- prompt (str): Prompt to be send.
258
- stream (bool, optional): Flag for streaming response. Defaults to False.
259
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
260
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
261
- Returns:
262
- str: Response generated
263
- """
264
-
265
- def for_stream():
266
- for response in self.ask(
267
- prompt, True, optimizer=optimizer, conversationally=conversationally
268
- ):
269
- yield self.get_message(response)
270
-
271
- def for_non_stream():
272
- return self.get_message(
273
- self.ask(
274
- prompt,
275
- False,
276
- optimizer=optimizer,
277
- conversationally=conversationally,
278
- )
279
- )
280
-
281
- return for_stream() if stream else for_non_stream()
282
-
283
- def get_message(self, response: dict) -> str:
284
- """Retrieves message only from response
285
-
286
- str: Message extracted
287
- """
288
- assert isinstance(response, dict), "Response should be of dict data-type only"
289
- return response["text"]
290
-
291
- if __name__ == '__main__':
292
- from rich import print
293
- ai = YouChat(timeout=5000)
294
- response = ai.chat("hi", stream=True)
295
- for chunk in response:
296
- print(chunk, end="", flush=True)
1
+ from uuid import uuid4
2
+ import json
3
+ import datetime
4
+ from webscout.AIutel import Optimizers
5
+ from webscout.AIutel import Conversation
6
+ from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+ import cloudscraper
10
+
11
+
12
+ class YouChat(Provider):
13
+ """
14
+ This class provides methods for interacting with the You.com chat API in a consistent provider structure.
15
+ """
16
+
17
+ # Updated available models based on latest aiModels list
18
+ # All models with isProOnly: false are included
19
+ AVAILABLE_MODELS = [
20
+ # ProOnly models (not available without subscription)
21
+ # "gpt_4_5_preview", # isProOnly: true
22
+ # "openai_o3_mini_high", # isProOnly: true
23
+ # "openai_o3_mini_medium", # isProOnly: true
24
+ # "openai_o1", # isProOnly: true
25
+ # "openai_o1_preview", # isProOnly: true
26
+ # "openai_o1_mini", # isProOnly: true
27
+ # "gpt_4", # isProOnly: true
28
+ # "claude_3_7_sonnet_thinking", # isProOnly: true
29
+ # "claude_3_7_sonnet", # isProOnly: true
30
+ # "claude_3_5_sonnet", # isProOnly: true
31
+ # "claude_3_opus", # isProOnly: true
32
+ # "qwq_32b", # isProOnly: true
33
+ # "deepseek_r1", # isProOnly: true
34
+ # "deepseek_v3", # isProOnly: true
35
+ # "gemini_2_5_pro_experimental", # isProOnly: true
36
+
37
+ # Free models (isProOnly: false)
38
+ "gpt_4o_mini",
39
+ "gpt_4o",
40
+ "gpt_4_turbo",
41
+ "claude_3_sonnet",
42
+ "claude_3_5_haiku",
43
+ "qwen2p5_72b",
44
+ "qwen2p5_coder_32b",
45
+ "gemini_2_flash",
46
+ "gemini_1_5_flash",
47
+ "gemini_1_5_pro",
48
+ "grok_2",
49
+ "llama4_maverick",
50
+ "llama4_scout",
51
+ "llama3_1_405b",
52
+ "mistral_large_2",
53
+ "command_r_plus",
54
+
55
+ # Free models not enabled for user chat modes
56
+ # "llama3_3_70b", # isAllowedForUserChatModes: false
57
+ # "llama3_2_90b", # isAllowedForUserChatModes: false
58
+ # "databricks_dbrx_instruct", # isAllowedForUserChatModes: false
59
+ # "solar_1_mini", # isAllowedForUserChatModes: false
60
+ # "dolphin_2_5", # isAllowedForUserChatModes: false, isUncensoredModel: true
61
+ ]
62
+
63
+ def __init__(
64
+ self,
65
+ is_conversation: bool = True,
66
+ max_tokens: int = 600,
67
+ timeout: int = 30,
68
+ intro: str = None,
69
+ filepath: str = None,
70
+ update_file: bool = True,
71
+ proxies: dict = {},
72
+ history_offset: int = 10250,
73
+ act: str = None,
74
+ model: str = "gemini_2_flash",
75
+ ):
76
+ """Instantiates YouChat
77
+
78
+ Args:
79
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
80
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
81
+ timeout (int, optional): Http request timeout. Defaults to 30.
82
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
83
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
84
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
85
+ proxies (dict, optional): Http request proxies. Defaults to {}.
86
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
87
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
88
+ model (str, optional): Model to use. Defaults to "gemini_2_flash".
89
+ """
90
+ if model not in self.AVAILABLE_MODELS:
91
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
92
+
93
+ self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
94
+ self.is_conversation = is_conversation
95
+ self.max_tokens_to_sample = max_tokens
96
+ self.chat_endpoint = "https://you.com/api/streamingSearch"
97
+ self.stream_chunk_size = 64
98
+ self.timeout = timeout
99
+ self.last_response = {}
100
+ self.model = model
101
+ self.headers = {
102
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
103
+ "Accept": "text/event-stream",
104
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
105
+ "Referer": "https://you.com/search?q=hi&fromSearchBar=true&tbm=youchat",
106
+ "Connection": "keep-alive",
107
+ "DNT": "1",
108
+ "Content-Type": "text/plain;charset=UTF-8",
109
+ }
110
+ self.cookies = {
111
+ "uuid_guest_backup": uuid4().hex,
112
+ "youchat_personalization": "true",
113
+ "youchat_smart_learn": "true",
114
+ "youpro_subscription": "false",
115
+ "you_subscription": "freemium",
116
+ "safesearch_guest": "Moderate",
117
+ "__cf_bm": uuid4().hex,
118
+ }
119
+
120
+ self.__available_optimizers = (
121
+ method
122
+ for method in dir(Optimizers)
123
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
124
+ )
125
+ Conversation.intro = (
126
+ AwesomePrompts().get_act(
127
+ act, raise_not_found=True, default=None, case_insensitive=True
128
+ )
129
+ if act
130
+ else intro or Conversation.intro
131
+ )
132
+ self.conversation = Conversation(
133
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
134
+ )
135
+ self.conversation.history_offset = history_offset
136
+ self.session.proxies = proxies
137
+
138
+ def ask(
139
+ self,
140
+ prompt: str,
141
+ stream: bool = False,
142
+ raw: bool = False,
143
+ optimizer: str = None,
144
+ conversationally: bool = False,
145
+ ) -> dict:
146
+ """Chat with AI
147
+
148
+ Args:
149
+ prompt (str): Prompt to be send.
150
+ stream (bool, optional): Flag for streaming response. Defaults to False.
151
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
152
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
153
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
154
+ Returns:
155
+ dict : {}
156
+ ```json
157
+ {
158
+ "text" : "How may I assist you today?"
159
+ }
160
+ ```
161
+ """
162
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
163
+ if optimizer:
164
+ if optimizer in self.__available_optimizers:
165
+ conversation_prompt = getattr(Optimizers, optimizer)(
166
+ conversation_prompt if conversationally else prompt
167
+ )
168
+ else:
169
+ raise Exception(
170
+ f"Optimizer is not one of {self.__available_optimizers}"
171
+ )
172
+
173
+ trace_id = str(uuid4())
174
+ conversation_turn_id = str(uuid4())
175
+
176
+ # Current timestamp in ISO format for traceId
177
+ current_time = datetime.datetime.now().isoformat()
178
+
179
+ # Updated query parameters to match the new API format
180
+ params = {
181
+ "page": 1,
182
+ "count": 10,
183
+ "safeSearch": "Moderate",
184
+ "mkt": "en-IN",
185
+ "enable_worklow_generation_ux": "true",
186
+ "domain": "youchat",
187
+ "use_personalization_extraction": "true",
188
+ "queryTraceId": trace_id,
189
+ "chatId": trace_id,
190
+ "conversationTurnId": conversation_turn_id,
191
+ "pastChatLength": 0,
192
+ "selectedChatMode": "smart_routing", # Updated from custom to smart_routing
193
+ "enable_agent_clarification_questions": "true",
194
+ "traceId": f"{trace_id}|{conversation_turn_id}|{current_time}",
195
+ "use_nested_youchat_updates": "true"
196
+ }
197
+
198
+ # New payload format is JSON
199
+ payload = {
200
+ "query": conversation_prompt,
201
+ "chat": "[]"
202
+ }
203
+
204
+ def for_stream():
205
+ response = self.session.post(
206
+ self.chat_endpoint,
207
+ headers=self.headers,
208
+ cookies=self.cookies,
209
+ params=params,
210
+ data=json.dumps(payload),
211
+ stream=True,
212
+ timeout=self.timeout
213
+ )
214
+ if not response.ok:
215
+ raise exceptions.FailedToGenerateResponseError(
216
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
217
+ )
218
+
219
+ streaming_text = ""
220
+ found_marker = False # Flag to track if we've passed the '####' marker
221
+
222
+ for value in response.iter_lines(
223
+ decode_unicode=True,
224
+ chunk_size=self.stream_chunk_size,
225
+ delimiter="\n",
226
+ ):
227
+ try:
228
+ if bool(value) and value.startswith('data: ') and 'youChatToken' in value:
229
+ data = json.loads(value[6:])
230
+ token = data.get('youChatToken', '')
231
+
232
+ # Check if this is the marker with '####'
233
+ if token == '####':
234
+ found_marker = True
235
+ continue # Skip the marker itself
236
+
237
+ # Only process tokens after the marker has been found
238
+ if found_marker and token:
239
+ streaming_text += token
240
+ yield token if raw else dict(text=token)
241
+ except json.decoder.JSONDecodeError:
242
+ pass
243
+
244
+ self.last_response.update(dict(text=streaming_text))
245
+ self.conversation.update_chat_history(
246
+ prompt, self.get_message(self.last_response)
247
+ )
248
+
249
+ def for_non_stream():
250
+ for _ in for_stream():
251
+ pass
252
+ return self.last_response
253
+
254
+ return for_stream() if stream else for_non_stream()
255
+
256
+ def chat(
257
+ self,
258
+ prompt: str,
259
+ stream: bool = False,
260
+ optimizer: str = None,
261
+ conversationally: bool = False,
262
+ ) -> str:
263
+ """Generate response `str`
264
+ Args:
265
+ prompt (str): Prompt to be send.
266
+ stream (bool, optional): Flag for streaming response. Defaults to False.
267
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
268
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
269
+ Returns:
270
+ str: Response generated
271
+ """
272
+
273
+ def for_stream():
274
+ for response in self.ask(
275
+ prompt, True, optimizer=optimizer, conversationally=conversationally
276
+ ):
277
+ yield self.get_message(response)
278
+
279
+ def for_non_stream():
280
+ return self.get_message(
281
+ self.ask(
282
+ prompt,
283
+ False,
284
+ optimizer=optimizer,
285
+ conversationally=conversationally,
286
+ )
287
+ )
288
+
289
+ return for_stream() if stream else for_non_stream()
290
+
291
+ def get_message(self, response: dict) -> str:
292
+ """Retrieves message only from response
293
+
294
+ str: Message extracted
295
+ """
296
+ assert isinstance(response, dict), "Response should be of dict data-type only"
297
+ return response["text"]
298
+
299
+ if __name__ == '__main__':
300
+ print("-" * 80)
301
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
302
+ print("-" * 80)
303
+
304
+ # Test all available models
305
+ working = 0
306
+ total = len(YouChat.AVAILABLE_MODELS)
307
+
308
+ for model in YouChat.AVAILABLE_MODELS:
309
+ try:
310
+ test_ai = YouChat(model=model, timeout=60)
311
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
312
+ response_text = ""
313
+ for chunk in response:
314
+ response_text += chunk
315
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
316
+
317
+ if response_text and len(response_text.strip()) > 0:
318
+ status = "✓"
319
+ # Truncate response if too long
320
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
321
+ else:
322
+ status = "✗"
323
+ display_text = "Empty or invalid response"
324
+ print(f"\r{model:<50} {status:<10} {display_text}")
325
+ except Exception as e:
326
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")