webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,330 +1,330 @@
1
- from uuid import uuid4
2
- import json
3
- import datetime
4
- from webscout.AIutel import Optimizers
5
- from webscout.AIutel import Conversation
6
- from webscout.AIutel import AwesomePrompts
7
- from webscout.AIbase import Provider
8
- from webscout import exceptions
9
- import cloudscraper
10
-
11
-
12
- class YouChat(Provider):
13
- """
14
- This class provides methods for interacting with the You.com chat API in a consistent provider structure.
15
- """
16
-
17
- # Updated available models based on latest aiModels list
18
- # All models with isProOnly: false are included
19
- AVAILABLE_MODELS = [
20
- # ProOnly models (not available without subscription)
21
- # "gpt_4_5_preview", # isProOnly: true
22
- # "openai_o3_mini_high", # isProOnly: true
23
- # "openai_o3_mini_medium", # isProOnly: true
24
- # "openai_o1", # isProOnly: true
25
- # "openai_o1_preview", # isProOnly: true
26
- # "openai_o1_mini", # isProOnly: true
27
- # "gpt_4", # isProOnly: true
28
- # "claude_3_7_sonnet_thinking", # isProOnly: true
29
- # "claude_3_7_sonnet", # isProOnly: true
30
- # "claude_3_5_sonnet", # isProOnly: true
31
- # "claude_3_opus", # isProOnly: true
32
- # "qwq_32b", # isProOnly: true
33
- # "deepseek_r1", # isProOnly: true
34
- # "deepseek_v3", # isProOnly: true
35
- # "gemini_2_5_pro_experimental", # isProOnly: true
36
-
37
- # Free models (isProOnly: false)
38
- "gpt_4o_mini",
39
- "gpt_4o",
40
- "gpt_4_turbo",
41
- "claude_3_sonnet",
42
- "claude_3_5_haiku",
43
- "qwen2p5_72b",
44
- "qwen2p5_coder_32b",
45
- "gemini_2_flash",
46
- "gemini_1_5_flash",
47
- "gemini_1_5_pro",
48
- "grok_2",
49
- "llama4_maverick",
50
- "llama4_scout",
51
- "llama3_1_405b",
52
- "mistral_large_2",
53
- "command_r_plus",
54
-
55
- # Free models not enabled for user chat modes
56
- "llama3_3_70b", # isAllowedForUserChatModes: false
57
- "llama3_2_90b", # isAllowedForUserChatModes: false
58
- "databricks_dbrx_instruct", # isAllowedForUserChatModes: false
59
- "solar_1_mini", # isAllowedForUserChatModes: false
60
- "dolphin_2_5", # isAllowedForUserChatModes: false, isUncensoredModel: true
61
- ]
62
-
63
- def __init__(
64
- self,
65
- is_conversation: bool = True,
66
- max_tokens: int = 600,
67
- timeout: int = 30,
68
- intro: str = None,
69
- filepath: str = None,
70
- update_file: bool = True,
71
- proxies: dict = {},
72
- history_offset: int = 10250,
73
- act: str = None,
74
- model: str = "gemini_2_flash",
75
- ):
76
- """Instantiates YouChat
77
-
78
- Args:
79
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
80
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
81
- timeout (int, optional): Http request timeout. Defaults to 30.
82
- intro (str, optional): Conversation introductory prompt. Defaults to None.
83
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
84
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
85
- proxies (dict, optional): Http request proxies. Defaults to {}.
86
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
87
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
88
- model (str, optional): Model to use. Defaults to "gemini_2_flash".
89
- """
90
- if model not in self.AVAILABLE_MODELS:
91
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
92
-
93
- self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
94
- self.is_conversation = is_conversation
95
- self.max_tokens_to_sample = max_tokens
96
- self.chat_endpoint = "https://you.com/api/streamingSearch"
97
- self.stream_chunk_size = 64
98
- self.timeout = timeout
99
- self.last_response = {}
100
- self.model = model
101
- self.headers = {
102
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
103
- "Accept": "text/event-stream",
104
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
105
- "Referer": "https://you.com/search?q=hi&fromSearchBar=true&tbm=youchat",
106
- "Connection": "keep-alive",
107
- "DNT": "1",
108
- "Content-Type": "text/plain;charset=UTF-8",
109
- }
110
- self.cookies = {
111
- "uuid_guest": uuid4().hex,
112
- "uuid_guest_backup": uuid4().hex,
113
- "youchat_personalization": "true",
114
- "youchat_smart_learn": "true",
115
- "youpro_subscription": "false",
116
- "you_subscription": "freemium",
117
- "safesearch_guest": "Moderate",
118
- "__cf_bm": uuid4().hex,
119
- }
120
-
121
- self.__available_optimizers = (
122
- method
123
- for method in dir(Optimizers)
124
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
125
- )
126
- Conversation.intro = (
127
- AwesomePrompts().get_act(
128
- act, raise_not_found=True, default=None, case_insensitive=True
129
- )
130
- if act
131
- else intro or Conversation.intro
132
- )
133
- self.conversation = Conversation(
134
- is_conversation, self.max_tokens_to_sample, filepath, update_file
135
- )
136
- self.conversation.history_offset = history_offset
137
- self.session.proxies = proxies
138
-
139
- def ask(
140
- self,
141
- prompt: str,
142
- stream: bool = False,
143
- raw: bool = False,
144
- optimizer: str = None,
145
- conversationally: bool = False,
146
- ) -> dict:
147
- """Chat with AI
148
-
149
- Args:
150
- prompt (str): Prompt to be send.
151
- stream (bool, optional): Flag for streaming response. Defaults to False.
152
- raw (bool, optional): Stream back raw response as received. Defaults to False.
153
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
154
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
155
- Returns:
156
- dict : {}
157
- ```json
158
- {
159
- "text" : "How may I assist you today?"
160
- }
161
- ```
162
- """
163
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
164
- if optimizer:
165
- if optimizer in self.__available_optimizers:
166
- conversation_prompt = getattr(Optimizers, optimizer)(
167
- conversation_prompt if conversationally else prompt
168
- )
169
- else:
170
- raise Exception(
171
- f"Optimizer is not one of {self.__available_optimizers}"
172
- )
173
-
174
- trace_id = str(uuid4())
175
- conversation_turn_id = str(uuid4())
176
-
177
- # Current timestamp in ISO format for traceId
178
- current_time = datetime.datetime.now().isoformat()
179
-
180
- # Updated query parameters to match the new API format
181
- params = {
182
- "page": 1,
183
- "count": 10,
184
- "safeSearch": "Moderate",
185
- "mkt": "en-IN",
186
- "enable_worklow_generation_ux": "true",
187
- "domain": "youchat",
188
- "use_personalization_extraction": "true",
189
- "queryTraceId": trace_id,
190
- "chatId": trace_id,
191
- "conversationTurnId": conversation_turn_id,
192
- "pastChatLength": len(self.conversation.history) if hasattr(self.conversation, "history") else 0,
193
- "selectedChatMode": "custom",
194
- "selectedAiModel": self.model,
195
- # "enable_agent_clarification_questions": "true",
196
- "traceId": f"{trace_id}|{conversation_turn_id}|{current_time}",
197
- "use_nested_youchat_updates": "true"
198
- }
199
-
200
- # New payload format is JSON
201
- payload = {
202
- "query": conversation_prompt,
203
- "chat": "[]"
204
- }
205
-
206
- def for_stream():
207
- response = self.session.post(
208
- self.chat_endpoint,
209
- headers=self.headers,
210
- cookies=self.cookies,
211
- params=params,
212
- data=json.dumps(payload),
213
- stream=True,
214
- timeout=self.timeout
215
- )
216
- if not response.ok:
217
- raise exceptions.FailedToGenerateResponseError(
218
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
219
- )
220
-
221
- streaming_text = ""
222
- # New SSE event-based parsing
223
- event_type = None
224
- for value in response.iter_lines(
225
- decode_unicode=True,
226
- chunk_size=self.stream_chunk_size,
227
- delimiter="\n",
228
- ):
229
- if not value:
230
- continue
231
- if value.startswith("event: "):
232
- event_type = value[7:].strip()
233
- continue
234
- if value.startswith("data: "):
235
- data_str = value[6:]
236
- if event_type == "youChatToken":
237
- try:
238
- data = json.loads(data_str)
239
- token = data.get("youChatToken", "")
240
- if token:
241
- streaming_text += token
242
- yield token if raw else dict(text=token)
243
- except Exception:
244
- pass
245
- # Reset event_type after processing
246
- event_type = None
247
-
248
- self.last_response.update(dict(text=streaming_text))
249
- self.conversation.update_chat_history(
250
- prompt, self.get_message(self.last_response)
251
- )
252
-
253
- def for_non_stream():
254
- for _ in for_stream():
255
- pass
256
- return self.last_response
257
-
258
- return for_stream() if stream else for_non_stream()
259
-
260
- def chat(
261
- self,
262
- prompt: str,
263
- stream: bool = False,
264
- optimizer: str = None,
265
- conversationally: bool = False,
266
- ) -> str:
267
- """Generate response `str`
268
- Args:
269
- prompt (str): Prompt to be send.
270
- stream (bool, optional): Flag for streaming response. Defaults to False.
271
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
272
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
273
- Returns:
274
- str: Response generated
275
- """
276
-
277
- def for_stream():
278
- for response in self.ask(
279
- prompt, True, optimizer=optimizer, conversationally=conversationally
280
- ):
281
- yield self.get_message(response)
282
-
283
- def for_non_stream():
284
- return self.get_message(
285
- self.ask(
286
- prompt,
287
- False,
288
- optimizer=optimizer,
289
- conversationally=conversationally,
290
- )
291
- )
292
-
293
- return for_stream() if stream else for_non_stream()
294
-
295
- def get_message(self, response: dict) -> str:
296
- """Retrieves message only from response
297
-
298
- str: Message extracted
299
- """
300
- assert isinstance(response, dict), "Response should be of dict data-type only"
301
- return response["text"]
302
-
303
- if __name__ == '__main__':
304
- print("-" * 80)
305
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
306
- print("-" * 80)
307
-
308
- # Test all available models
309
- working = 0
310
- total = len(YouChat.AVAILABLE_MODELS)
311
-
312
- for model in YouChat.AVAILABLE_MODELS:
313
- try:
314
- test_ai = YouChat(model=model, timeout=60)
315
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
316
- response_text = ""
317
- for chunk in response:
318
- response_text += chunk
319
- print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
320
-
321
- if response_text and len(response_text.strip()) > 0:
322
- status = "✓"
323
- # Truncate response if too long
324
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
325
- else:
326
- status = "✗"
327
- display_text = "Empty or invalid response"
328
- print(f"\r{model:<50} {status:<10} {display_text}")
329
- except Exception as e:
330
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
1
+ from uuid import uuid4
2
+ import json
3
+ import datetime
4
+ from webscout.AIutel import Optimizers
5
+ from webscout.AIutel import Conversation
6
+ from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+ import cloudscraper
10
+
11
+
12
+ class YouChat(Provider):
13
+ """
14
+ This class provides methods for interacting with the You.com chat API in a consistent provider structure.
15
+ """
16
+
17
+ # Updated available models based on latest aiModels list
18
+ # All models with isProOnly: false are included
19
+ AVAILABLE_MODELS = [
20
+ # ProOnly models (not available without subscription)
21
+ # "gpt_4_5_preview", # isProOnly: true
22
+ # "openai_o3_mini_high", # isProOnly: true
23
+ # "openai_o3_mini_medium", # isProOnly: true
24
+ # "openai_o1", # isProOnly: true
25
+ # "openai_o1_preview", # isProOnly: true
26
+ # "openai_o1_mini", # isProOnly: true
27
+ # "gpt_4", # isProOnly: true
28
+ # "claude_3_7_sonnet_thinking", # isProOnly: true
29
+ # "claude_3_7_sonnet", # isProOnly: true
30
+ # "claude_3_5_sonnet", # isProOnly: true
31
+ # "claude_3_opus", # isProOnly: true
32
+ # "qwq_32b", # isProOnly: true
33
+ # "deepseek_r1", # isProOnly: true
34
+ # "deepseek_v3", # isProOnly: true
35
+ # "gemini_2_5_pro_experimental", # isProOnly: true
36
+
37
+ # Free models (isProOnly: false)
38
+ "gpt_4o_mini",
39
+ "gpt_4o",
40
+ "gpt_4_turbo",
41
+ "claude_3_sonnet",
42
+ "claude_3_5_haiku",
43
+ "qwen2p5_72b",
44
+ "qwen2p5_coder_32b",
45
+ "gemini_2_flash",
46
+ "gemini_1_5_flash",
47
+ "gemini_1_5_pro",
48
+ "grok_2",
49
+ "llama4_maverick",
50
+ "llama4_scout",
51
+ "llama3_1_405b",
52
+ "mistral_large_2",
53
+ "command_r_plus",
54
+
55
+ # Free models not enabled for user chat modes
56
+ "llama3_3_70b", # isAllowedForUserChatModes: false
57
+ "llama3_2_90b", # isAllowedForUserChatModes: false
58
+ "databricks_dbrx_instruct", # isAllowedForUserChatModes: false
59
+ "solar_1_mini", # isAllowedForUserChatModes: false
60
+ "dolphin_2_5", # isAllowedForUserChatModes: false, isUncensoredModel: true
61
+ ]
62
+
63
+ def __init__(
64
+ self,
65
+ is_conversation: bool = True,
66
+ max_tokens: int = 600,
67
+ timeout: int = 30,
68
+ intro: str = None,
69
+ filepath: str = None,
70
+ update_file: bool = True,
71
+ proxies: dict = {},
72
+ history_offset: int = 10250,
73
+ act: str = None,
74
+ model: str = "gemini_2_flash",
75
+ ):
76
+ """Instantiates YouChat
77
+
78
+ Args:
79
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
80
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
81
+ timeout (int, optional): Http request timeout. Defaults to 30.
82
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
83
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
84
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
85
+ proxies (dict, optional): Http request proxies. Defaults to {}.
86
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
87
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
88
+ model (str, optional): Model to use. Defaults to "gemini_2_flash".
89
+ """
90
+ if model not in self.AVAILABLE_MODELS:
91
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
92
+
93
+ self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
94
+ self.is_conversation = is_conversation
95
+ self.max_tokens_to_sample = max_tokens
96
+ self.chat_endpoint = "https://you.com/api/streamingSearch"
97
+ self.stream_chunk_size = 64
98
+ self.timeout = timeout
99
+ self.last_response = {}
100
+ self.model = model
101
+ self.headers = {
102
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
103
+ "Accept": "text/event-stream",
104
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
105
+ "Referer": "https://you.com/search?q=hi&fromSearchBar=true&tbm=youchat",
106
+ "Connection": "keep-alive",
107
+ "DNT": "1",
108
+ "Content-Type": "text/plain;charset=UTF-8",
109
+ }
110
+ self.cookies = {
111
+ "uuid_guest": uuid4().hex,
112
+ "uuid_guest_backup": uuid4().hex,
113
+ "youchat_personalization": "true",
114
+ "youchat_smart_learn": "true",
115
+ "youpro_subscription": "false",
116
+ "you_subscription": "freemium",
117
+ "safesearch_guest": "Moderate",
118
+ "__cf_bm": uuid4().hex,
119
+ }
120
+
121
+ self.__available_optimizers = (
122
+ method
123
+ for method in dir(Optimizers)
124
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
125
+ )
126
+ Conversation.intro = (
127
+ AwesomePrompts().get_act(
128
+ act, raise_not_found=True, default=None, case_insensitive=True
129
+ )
130
+ if act
131
+ else intro or Conversation.intro
132
+ )
133
+ self.conversation = Conversation(
134
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
135
+ )
136
+ self.conversation.history_offset = history_offset
137
+ self.session.proxies = proxies
138
+
139
+ def ask(
140
+ self,
141
+ prompt: str,
142
+ stream: bool = False,
143
+ raw: bool = False,
144
+ optimizer: str = None,
145
+ conversationally: bool = False,
146
+ ) -> dict:
147
+ """Chat with AI
148
+
149
+ Args:
150
+ prompt (str): Prompt to be send.
151
+ stream (bool, optional): Flag for streaming response. Defaults to False.
152
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
153
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
154
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
155
+ Returns:
156
+ dict : {}
157
+ ```json
158
+ {
159
+ "text" : "How may I assist you today?"
160
+ }
161
+ ```
162
+ """
163
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
164
+ if optimizer:
165
+ if optimizer in self.__available_optimizers:
166
+ conversation_prompt = getattr(Optimizers, optimizer)(
167
+ conversation_prompt if conversationally else prompt
168
+ )
169
+ else:
170
+ raise Exception(
171
+ f"Optimizer is not one of {self.__available_optimizers}"
172
+ )
173
+
174
+ trace_id = str(uuid4())
175
+ conversation_turn_id = str(uuid4())
176
+
177
+ # Current timestamp in ISO format for traceId
178
+ current_time = datetime.datetime.now().isoformat()
179
+
180
+ # Updated query parameters to match the new API format
181
+ params = {
182
+ "page": 1,
183
+ "count": 10,
184
+ "safeSearch": "Moderate",
185
+ "mkt": "en-IN",
186
+ "enable_worklow_generation_ux": "true",
187
+ "domain": "youchat",
188
+ "use_personalization_extraction": "true",
189
+ "queryTraceId": trace_id,
190
+ "chatId": trace_id,
191
+ "conversationTurnId": conversation_turn_id,
192
+ "pastChatLength": len(self.conversation.history) if hasattr(self.conversation, "history") else 0,
193
+ "selectedChatMode": "custom",
194
+ "selectedAiModel": self.model,
195
+ # "enable_agent_clarification_questions": "true",
196
+ "traceId": f"{trace_id}|{conversation_turn_id}|{current_time}",
197
+ "use_nested_youchat_updates": "true"
198
+ }
199
+
200
+ # New payload format is JSON
201
+ payload = {
202
+ "query": conversation_prompt,
203
+ "chat": "[]"
204
+ }
205
+
206
+ def for_stream():
207
+ response = self.session.post(
208
+ self.chat_endpoint,
209
+ headers=self.headers,
210
+ cookies=self.cookies,
211
+ params=params,
212
+ data=json.dumps(payload),
213
+ stream=True,
214
+ timeout=self.timeout
215
+ )
216
+ if not response.ok:
217
+ raise exceptions.FailedToGenerateResponseError(
218
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
219
+ )
220
+
221
+ streaming_text = ""
222
+ # New SSE event-based parsing
223
+ event_type = None
224
+ for value in response.iter_lines(
225
+ decode_unicode=True,
226
+ chunk_size=self.stream_chunk_size,
227
+ delimiter="\n",
228
+ ):
229
+ if not value:
230
+ continue
231
+ if value.startswith("event: "):
232
+ event_type = value[7:].strip()
233
+ continue
234
+ if value.startswith("data: "):
235
+ data_str = value[6:]
236
+ if event_type == "youChatToken":
237
+ try:
238
+ data = json.loads(data_str)
239
+ token = data.get("youChatToken", "")
240
+ if token:
241
+ streaming_text += token
242
+ yield token if raw else dict(text=token)
243
+ except Exception:
244
+ pass
245
+ # Reset event_type after processing
246
+ event_type = None
247
+
248
+ self.last_response.update(dict(text=streaming_text))
249
+ self.conversation.update_chat_history(
250
+ prompt, self.get_message(self.last_response)
251
+ )
252
+
253
+ def for_non_stream():
254
+ for _ in for_stream():
255
+ pass
256
+ return self.last_response
257
+
258
+ return for_stream() if stream else for_non_stream()
259
+
260
+ def chat(
261
+ self,
262
+ prompt: str,
263
+ stream: bool = False,
264
+ optimizer: str = None,
265
+ conversationally: bool = False,
266
+ ) -> str:
267
+ """Generate response `str`
268
+ Args:
269
+ prompt (str): Prompt to be send.
270
+ stream (bool, optional): Flag for streaming response. Defaults to False.
271
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
272
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
273
+ Returns:
274
+ str: Response generated
275
+ """
276
+
277
+ def for_stream():
278
+ for response in self.ask(
279
+ prompt, True, optimizer=optimizer, conversationally=conversationally
280
+ ):
281
+ yield self.get_message(response)
282
+
283
+ def for_non_stream():
284
+ return self.get_message(
285
+ self.ask(
286
+ prompt,
287
+ False,
288
+ optimizer=optimizer,
289
+ conversationally=conversationally,
290
+ )
291
+ )
292
+
293
+ return for_stream() if stream else for_non_stream()
294
+
295
+ def get_message(self, response: dict) -> str:
296
+ """Retrieves message only from response
297
+
298
+ str: Message extracted
299
+ """
300
+ assert isinstance(response, dict), "Response should be of dict data-type only"
301
+ return response["text"]
302
+
303
+ if __name__ == '__main__':
304
+ print("-" * 80)
305
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
306
+ print("-" * 80)
307
+
308
+ # Test all available models
309
+ working = 0
310
+ total = len(YouChat.AVAILABLE_MODELS)
311
+
312
+ for model in YouChat.AVAILABLE_MODELS:
313
+ try:
314
+ test_ai = YouChat(model=model, timeout=60)
315
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
316
+ response_text = ""
317
+ for chunk in response:
318
+ response_text += chunk
319
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
320
+
321
+ if response_text and len(response_text.strip()) > 0:
322
+ status = "✓"
323
+ # Truncate response if too long
324
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
325
+ else:
326
+ status = "✗"
327
+ display_text = "Empty or invalid response"
328
+ print(f"\r{model:<50} {status:<10} {display_text}")
329
+ except Exception as e:
330
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")