webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (137) hide show
  1. webscout/AIauto.py +5 -53
  2. webscout/AIutel.py +8 -318
  3. webscout/DWEBS.py +460 -489
  4. webscout/Extra/YTToolkit/YTdownloader.py +14 -53
  5. webscout/Extra/YTToolkit/transcriber.py +12 -13
  6. webscout/Extra/YTToolkit/ytapi/video.py +0 -1
  7. webscout/Extra/__init__.py +0 -1
  8. webscout/Extra/autocoder/autocoder_utiles.py +0 -4
  9. webscout/Extra/autocoder/rawdog.py +13 -41
  10. webscout/Extra/gguf.py +652 -428
  11. webscout/Extra/weather.py +178 -156
  12. webscout/Extra/weather_ascii.py +70 -17
  13. webscout/Litlogger/core/logger.py +1 -2
  14. webscout/Litlogger/handlers/file.py +1 -1
  15. webscout/Litlogger/styles/formats.py +0 -2
  16. webscout/Litlogger/utils/detectors.py +0 -1
  17. webscout/Provider/AISEARCH/DeepFind.py +0 -1
  18. webscout/Provider/AISEARCH/ISou.py +1 -1
  19. webscout/Provider/AISEARCH/felo_search.py +0 -1
  20. webscout/Provider/AllenAI.py +24 -9
  21. webscout/Provider/C4ai.py +432 -0
  22. webscout/Provider/ChatGPTGratis.py +24 -56
  23. webscout/Provider/Cloudflare.py +18 -21
  24. webscout/Provider/DeepSeek.py +27 -48
  25. webscout/Provider/Deepinfra.py +129 -53
  26. webscout/Provider/Gemini.py +1 -1
  27. webscout/Provider/GithubChat.py +362 -0
  28. webscout/Provider/Glider.py +25 -8
  29. webscout/Provider/HF_space/qwen_qwen2.py +2 -2
  30. webscout/Provider/HeckAI.py +38 -5
  31. webscout/Provider/HuggingFaceChat.py +462 -0
  32. webscout/Provider/Jadve.py +20 -5
  33. webscout/Provider/Marcus.py +7 -50
  34. webscout/Provider/Netwrck.py +43 -67
  35. webscout/Provider/PI.py +4 -2
  36. webscout/Provider/Perplexitylabs.py +26 -6
  37. webscout/Provider/Phind.py +29 -3
  38. webscout/Provider/PizzaGPT.py +10 -51
  39. webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
  40. webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
  41. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
  42. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
  43. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
  44. webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
  45. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
  46. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
  47. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
  48. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
  49. webscout/Provider/TTI/__init__.py +2 -3
  50. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  51. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  52. webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
  53. webscout/Provider/TTI/fastflux/__init__.py +22 -0
  54. webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
  55. webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
  56. webscout/Provider/TTS/__init__.py +2 -2
  57. webscout/Provider/TTS/deepgram.py +12 -39
  58. webscout/Provider/TTS/elevenlabs.py +14 -40
  59. webscout/Provider/TTS/gesserit.py +11 -35
  60. webscout/Provider/TTS/murfai.py +13 -39
  61. webscout/Provider/TTS/parler.py +17 -40
  62. webscout/Provider/TTS/speechma.py +180 -0
  63. webscout/Provider/TTS/streamElements.py +17 -44
  64. webscout/Provider/TextPollinationsAI.py +39 -59
  65. webscout/Provider/Venice.py +217 -200
  66. webscout/Provider/WiseCat.py +27 -5
  67. webscout/Provider/Youchat.py +63 -36
  68. webscout/Provider/__init__.py +13 -8
  69. webscout/Provider/akashgpt.py +28 -10
  70. webscout/Provider/copilot.py +416 -0
  71. webscout/Provider/flowith.py +196 -0
  72. webscout/Provider/freeaichat.py +32 -45
  73. webscout/Provider/granite.py +17 -53
  74. webscout/Provider/koala.py +20 -5
  75. webscout/Provider/llamatutor.py +7 -47
  76. webscout/Provider/llmchat.py +36 -53
  77. webscout/Provider/multichat.py +92 -98
  78. webscout/Provider/talkai.py +1 -0
  79. webscout/Provider/turboseek.py +3 -0
  80. webscout/Provider/tutorai.py +2 -0
  81. webscout/Provider/typegpt.py +154 -64
  82. webscout/Provider/x0gpt.py +3 -1
  83. webscout/Provider/yep.py +102 -20
  84. webscout/__init__.py +3 -0
  85. webscout/cli.py +4 -40
  86. webscout/conversation.py +1 -10
  87. webscout/exceptions.py +19 -9
  88. webscout/litagent/__init__.py +2 -2
  89. webscout/litagent/agent.py +351 -20
  90. webscout/litagent/constants.py +34 -5
  91. webscout/litprinter/__init__.py +0 -3
  92. webscout/models.py +181 -0
  93. webscout/optimizers.py +1 -1
  94. webscout/prompt_manager.py +2 -8
  95. webscout/scout/core/scout.py +1 -4
  96. webscout/scout/core/search_result.py +1 -1
  97. webscout/scout/core/text_utils.py +1 -1
  98. webscout/scout/core.py +2 -5
  99. webscout/scout/element.py +1 -1
  100. webscout/scout/parsers/html_parser.py +1 -1
  101. webscout/scout/utils.py +0 -1
  102. webscout/swiftcli/__init__.py +1 -3
  103. webscout/tempid.py +1 -1
  104. webscout/update_checker.py +55 -95
  105. webscout/version.py +1 -1
  106. webscout/webscout_search_async.py +1 -2
  107. webscout/yep_search.py +297 -297
  108. webscout-7.6.dist-info/LICENSE.md +146 -0
  109. {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
  110. {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
  111. webscout/Extra/autollama.py +0 -231
  112. webscout/Local/__init__.py +0 -10
  113. webscout/Local/_version.py +0 -3
  114. webscout/Local/formats.py +0 -747
  115. webscout/Local/model.py +0 -1368
  116. webscout/Local/samplers.py +0 -125
  117. webscout/Local/thread.py +0 -539
  118. webscout/Local/ui.py +0 -401
  119. webscout/Local/utils.py +0 -388
  120. webscout/Provider/Amigo.py +0 -274
  121. webscout/Provider/Bing.py +0 -243
  122. webscout/Provider/DiscordRocks.py +0 -253
  123. webscout/Provider/TTI/blackbox/__init__.py +0 -4
  124. webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
  125. webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
  126. webscout/Provider/TTI/deepinfra/__init__.py +0 -4
  127. webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
  128. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
  129. webscout/Provider/TTI/imgninza/__init__.py +0 -4
  130. webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
  131. webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
  132. webscout/Provider/TTS/voicepod.py +0 -117
  133. webscout/Provider/dgaf.py +0 -214
  134. webscout-7.4.dist-info/LICENSE.md +0 -211
  135. {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
  136. {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
  137. {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
@@ -1,200 +1,217 @@
1
- import requests
2
- import json
3
- from typing import Generator, Dict, Any, List, Union
4
- from uuid import uuid4
5
-
6
- from webscout.AIutel import Optimizers
7
- from webscout.AIutel import Conversation
8
- from webscout.AIutel import AwesomePrompts, sanitize_stream
9
- from webscout.AIbase import Provider
10
- from webscout import exceptions
11
- from webscout import LitAgent
12
-
13
- class Venice(Provider):
14
- """
15
- A class to interact with the Venice AI API.
16
- """
17
-
18
- AVAILABLE_MODELS = [
19
- "llama-3.3-70b",
20
- "llama-3.2-3b-akash",
21
- "qwen2dot5-coder-32b"
22
-
23
-
24
- ]
25
-
26
- def __init__(
27
- self,
28
- is_conversation: bool = True,
29
- max_tokens: int = 2000,
30
- timeout: int = 30,
31
- temperature: float = 0.8,
32
- top_p: float = 0.9,
33
- intro: str = None,
34
- filepath: str = None,
35
- update_file: bool = True,
36
- proxies: dict = {},
37
- history_offset: int = 10250,
38
- act: str = None,
39
- model: str = "llama-3.3-70b",
40
- system_prompt: str = "You are a helpful AI assistant."
41
- ):
42
- """Initialize Venice AI client"""
43
- if model not in self.AVAILABLE_MODELS:
44
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
45
-
46
- self.api_endpoint = "https://venice.ai/api/inference/chat"
47
- self.session = requests.Session()
48
- self.is_conversation = is_conversation
49
- self.max_tokens_to_sample = max_tokens
50
- self.temperature = temperature
51
- self.top_p = top_p
52
- self.timeout = timeout
53
- self.model = model
54
- self.system_prompt = system_prompt
55
- self.last_response = {}
56
-
57
- # Headers for the request
58
- self.headers = {
59
- "User-Agent": LitAgent().random(),
60
- "accept": "*/*",
61
- "accept-language": "en-US,en;q=0.9",
62
- "content-type": "application/json",
63
- "origin": "https://venice.ai",
64
- "referer": "https://venice.ai/chat/",
65
- "sec-ch-ua": '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
66
- "sec-ch-ua-mobile": "?0",
67
- "sec-ch-ua-platform": '"Windows"',
68
- "sec-fetch-dest": "empty",
69
- "sec-fetch-mode": "cors",
70
- "sec-fetch-site": "same-origin"
71
- }
72
-
73
- self.session.headers.update(self.headers)
74
- self.session.proxies.update(proxies)
75
-
76
- self.__available_optimizers = (
77
- method
78
- for method in dir(Optimizers)
79
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
80
- )
81
- Conversation.intro = (
82
- AwesomePrompts().get_act(
83
- act, raise_not_found=True, default=None, case_insensitive=True
84
- )
85
- if act
86
- else intro or Conversation.intro
87
- )
88
-
89
- self.conversation = Conversation(
90
- is_conversation, self.max_tokens_to_sample, filepath, update_file
91
- )
92
- self.conversation.history_offset = history_offset
93
-
94
- def ask(
95
- self,
96
- prompt: str,
97
- stream: bool = False,
98
- raw: bool = False,
99
- optimizer: str = None,
100
- conversationally: bool = False,
101
- ) -> Union[Dict[str, Any], Generator]:
102
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
103
- if optimizer:
104
- if optimizer in self.__available_optimizers:
105
- conversation_prompt = getattr(Optimizers, optimizer)(
106
- conversation_prompt if conversationally else prompt
107
- )
108
- else:
109
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
110
-
111
- # Payload construction
112
- payload = {
113
- "requestId": str(uuid4())[:7],
114
- "modelId": self.model,
115
- "prompt": [{"content": conversation_prompt, "role": "user"}],
116
- "systemPrompt": self.system_prompt,
117
- "conversationType": "text",
118
- "temperature": self.temperature,
119
- "webEnabled": True,
120
- "topP": self.top_p,
121
- "includeVeniceSystemPrompt": False,
122
- "isCharacter": False,
123
- "clientProcessingTime": 2000
124
- }
125
-
126
- def for_stream():
127
- try:
128
- with self.session.post(
129
- self.api_endpoint,
130
- json=payload,
131
- stream=True,
132
- timeout=self.timeout
133
- ) as response:
134
- if response.status_code != 200:
135
- raise exceptions.FailedToGenerateResponseError(
136
- f"Request failed with status code {response.status_code}"
137
- )
138
-
139
- streaming_text = ""
140
- for line in response.iter_lines():
141
- if not line:
142
- continue
143
-
144
- try:
145
- # Decode bytes to string
146
- line_data = line.decode('utf-8').strip()
147
- if '"kind":"content"' in line_data:
148
- data = json.loads(line_data)
149
- if 'content' in data:
150
- content = data['content']
151
- streaming_text += content
152
- resp = dict(text=content)
153
- yield resp if raw else resp
154
- except json.JSONDecodeError:
155
- continue
156
- except UnicodeDecodeError:
157
- continue
158
-
159
- self.conversation.update_chat_history(prompt, streaming_text)
160
-
161
- except requests.RequestException as e:
162
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
163
-
164
- def for_non_stream():
165
- for _ in for_stream():
166
- pass
167
- return self.last_response
168
-
169
- return for_stream() if stream else for_non_stream()
170
-
171
- def chat(
172
- self,
173
- prompt: str,
174
- stream: bool = False,
175
- optimizer: str = None,
176
- conversationally: bool = False,
177
- ) -> Union[str, Generator]:
178
- def for_stream():
179
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
180
- yield self.get_message(response)
181
- def for_non_stream():
182
- return self.get_message(
183
- self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
184
- )
185
- return for_stream() if stream else for_non_stream()
186
-
187
- def get_message(self, response: dict) -> str:
188
- assert isinstance(response, dict), "Response should be of dict data-type only"
189
- return response["text"]
190
-
191
- if __name__ == "__main__":
192
- from rich import print
193
-
194
- # Initialize Venice AI
195
- ai = Venice(model="qwen2dot5-coder-32b", timeout=50)
196
-
197
- # Test chat with streaming
198
- response = ai.chat("Write a short story about an AI assistant", stream=True)
199
- for chunk in response:
200
- print(chunk, end="", flush=True)
1
+ import requests
2
+ import json
3
+ from typing import Generator, Dict, Any, List, Union
4
+ from uuid import uuid4
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+ class Venice(Provider):
14
+ """
15
+ A class to interact with the Venice AI API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "llama-3.3-70b",
20
+ "llama-3.2-3b-akash",
21
+ "qwen2dot5-coder-32b"
22
+ ]
23
+
24
+ def __init__(
25
+ self,
26
+ is_conversation: bool = True,
27
+ max_tokens: int = 2000,
28
+ timeout: int = 30,
29
+ temperature: float = 0.8,
30
+ top_p: float = 0.9,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "llama-3.3-70b",
38
+ system_prompt: str = "You are a helpful AI assistant."
39
+ ):
40
+ """Initialize Venice AI client"""
41
+ if model not in self.AVAILABLE_MODELS:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
+
44
+ self.api_endpoint = "https://venice.ai/api/inference/chat"
45
+ self.session = requests.Session()
46
+ self.is_conversation = is_conversation
47
+ self.max_tokens_to_sample = max_tokens
48
+ self.temperature = temperature
49
+ self.top_p = top_p
50
+ self.timeout = timeout
51
+ self.model = model
52
+ self.system_prompt = system_prompt
53
+ self.last_response = {}
54
+
55
+ # Headers for the request
56
+ self.headers = {
57
+ "User-Agent": LitAgent().random(),
58
+ "accept": "*/*",
59
+ "accept-language": "en-US,en;q=0.9",
60
+ "content-type": "application/json",
61
+ "origin": "https://venice.ai",
62
+ "referer": "https://venice.ai/chat/",
63
+ "sec-ch-ua": '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
64
+ "sec-ch-ua-mobile": "?0",
65
+ "sec-ch-ua-platform": '"Windows"',
66
+ "sec-fetch-dest": "empty",
67
+ "sec-fetch-mode": "cors",
68
+ "sec-fetch-site": "same-origin"
69
+ }
70
+
71
+ self.session.headers.update(self.headers)
72
+ self.session.proxies.update(proxies)
73
+
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+ Conversation.intro = (
80
+ AwesomePrompts().get_act(
81
+ act, raise_not_found=True, default=None, case_insensitive=True
82
+ )
83
+ if act
84
+ else intro or Conversation.intro
85
+ )
86
+
87
+ self.conversation = Conversation(
88
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
89
+ )
90
+ self.conversation.history_offset = history_offset
91
+
92
+ def ask(
93
+ self,
94
+ prompt: str,
95
+ stream: bool = False,
96
+ raw: bool = False,
97
+ optimizer: str = None,
98
+ conversationally: bool = False,
99
+ ) -> Union[Dict[str, Any], Generator]:
100
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
+ if optimizer:
102
+ if optimizer in self.__available_optimizers:
103
+ conversation_prompt = getattr(Optimizers, optimizer)(
104
+ conversation_prompt if conversationally else prompt
105
+ )
106
+ else:
107
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
108
+
109
+ # Payload construction
110
+ payload = {
111
+ "requestId": str(uuid4())[:7],
112
+ "modelId": self.model,
113
+ "prompt": [{"content": conversation_prompt, "role": "user"}],
114
+ "systemPrompt": self.system_prompt,
115
+ "conversationType": "text",
116
+ "temperature": self.temperature,
117
+ "webEnabled": True,
118
+ "topP": self.top_p,
119
+ "includeVeniceSystemPrompt": False,
120
+ "isCharacter": False,
121
+ "clientProcessingTime": 2000
122
+ }
123
+
124
+ def for_stream():
125
+ try:
126
+ with self.session.post(
127
+ self.api_endpoint,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self.timeout
131
+ ) as response:
132
+ if response.status_code != 200:
133
+ raise exceptions.FailedToGenerateResponseError(
134
+ f"Request failed with status code {response.status_code}"
135
+ )
136
+
137
+ streaming_text = ""
138
+ for line in response.iter_lines():
139
+ if not line:
140
+ continue
141
+
142
+ try:
143
+ # Decode bytes to string
144
+ line_data = line.decode('utf-8').strip()
145
+ if '"kind":"content"' in line_data:
146
+ data = json.loads(line_data)
147
+ if 'content' in data:
148
+ content = data['content']
149
+ streaming_text += content
150
+ resp = dict(text=content)
151
+ yield resp if raw else resp
152
+ except json.JSONDecodeError:
153
+ continue
154
+ except UnicodeDecodeError:
155
+ continue
156
+
157
+ self.conversation.update_chat_history(prompt, streaming_text)
158
+
159
+ except requests.RequestException as e:
160
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
161
+
162
+ def for_non_stream():
163
+ full_text = ""
164
+ for chunk in for_stream():
165
+ full_text += chunk["text"]
166
+ return {"text": full_text}
167
+
168
+ return for_stream() if stream else for_non_stream()
169
+
170
+ def chat(
171
+ self,
172
+ prompt: str,
173
+ stream: bool = False,
174
+ optimizer: str = None,
175
+ conversationally: bool = False,
176
+ ) -> Union[str, Generator]:
177
+ def for_stream():
178
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
179
+ yield self.get_message(response)
180
+ def for_non_stream():
181
+ return self.get_message(
182
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
183
+ )
184
+ return for_stream() if stream else for_non_stream()
185
+
186
+ def get_message(self, response: dict) -> str:
187
+ assert isinstance(response, dict), "Response should be of dict data-type only"
188
+ return response["text"]
189
+
190
+ if __name__ == "__main__":
191
+ print("-" * 80)
192
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
193
+ print("-" * 80)
194
+
195
+ # Test all available models
196
+ working = 0
197
+ total = len(Venice.AVAILABLE_MODELS)
198
+
199
+ for model in Venice.AVAILABLE_MODELS:
200
+ try:
201
+ test_ai = Venice(model=model, timeout=60)
202
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
203
+ response_text = ""
204
+ for chunk in response:
205
+ response_text += chunk
206
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
207
+
208
+ if response_text and len(response_text.strip()) > 0:
209
+ status = "✓"
210
+ # Truncate response if too long
211
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
212
+ else:
213
+ status = "✗"
214
+ display_text = "Empty or invalid response"
215
+ print(f"\r{model:<50} {status:<10} {display_text}")
216
+ except Exception as e:
217
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -169,8 +169,30 @@ class WiseCat(Provider):
169
169
  return response["text"]
170
170
 
171
171
  if __name__ == "__main__":
172
- from rich import print
173
- ai = WiseCat()
174
- response = ai.chat(input(">>> "))
175
- for chunk in response:
176
- print(chunk, end="", flush=True)
172
+ print("-" * 80)
173
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
174
+ print("-" * 80)
175
+
176
+ # Test all available models
177
+ working = 0
178
+ total = len(WiseCat.AVAILABLE_MODELS)
179
+
180
+ for model in WiseCat.AVAILABLE_MODELS:
181
+ try:
182
+ test_ai = WiseCat(model=model, timeout=60)
183
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
184
+ response_text = ""
185
+ for chunk in response:
186
+ response_text += chunk
187
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
188
+
189
+ if response_text and len(response_text.strip()) > 0:
190
+ status = "✓"
191
+ # Truncate response if too long
192
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
193
+ else:
194
+ status = "✗"
195
+ display_text = "Empty or invalid response"
196
+ print(f"\r{model:<50} {status:<10} {display_text}")
197
+ except Exception as e:
198
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -2,6 +2,7 @@ from uuid import uuid4
2
2
  from re import findall
3
3
  import json
4
4
 
5
+
5
6
  from webscout.AIutel import Optimizers
6
7
  from webscout.AIutel import Conversation
7
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
@@ -11,6 +12,7 @@ from typing import Any, AsyncGenerator, Dict
11
12
 
12
13
  import cloudscraper
13
14
 
15
+
14
16
  class YouChat(Provider):
15
17
  """
16
18
  This class provides methods for interacting with the You.com chat API in a consistent provider structure.
@@ -18,31 +20,36 @@ class YouChat(Provider):
18
20
 
19
21
  # Updated available models based on provided "aiModels" list
20
22
  AVAILABLE_MODELS = [
21
- "openai_o3_mini_high",
22
- "openai_o3_mini_medium",
23
- "openai_o1",
24
- "openai_o1_preview",
25
- "openai_o1_mini",
23
+ # "gpt_4_5_preview", #isProOnly": true,
24
+ # "openai_o3_mini_high", #isProOnly": true,
25
+ # "openai_o3_mini_medium", #isProOnly": true,
26
+ # "openai_o1", #isProOnly": true,
27
+ # "openai_o1_preview", #isProOnly": true,
28
+ # "openai_o1_mini", #isProOnly": true,
26
29
  "gpt_4o_mini",
27
30
  "gpt_4o",
28
31
  "gpt_4_turbo",
29
- "gpt_4",
30
- "grok_2",
31
- "claude_3_5_sonnet",
32
- "claude_3_opus",
32
+ # "gpt_4", #isProOnly": true,
33
+ # "claude_3_7_sonnet_thinking", #isProOnly": true,
34
+ # "claude_3_7_sonnet", #isProOnly": true,
35
+ # "claude_3_5_sonnet", #isProOnly": true,
36
+ # "claude_3_opus", #isProOnly": true,
33
37
  "claude_3_sonnet",
34
38
  "claude_3_5_haiku",
35
- "deepseek_r1",
36
- "deepseek_v3",
37
- "llama3_3_70b",
38
- "llama3_2_90b",
39
+ # "qwq_32b", #isProOnly": true,
40
+ "qwen2p5_72b",
41
+ "qwen2p5_coder_32b",
42
+ # "deepseek_r1", #isProOnly": true,
43
+ # "deepseek_v3", #isProOnly": true,
44
+ "grok_2",
45
+ # "llama3_3_70b", #isProOnly": false, "isAllowedForUserChatModes": false,
46
+ # "llama3_2_90b", #isProOnly": false, "isAllowedForUserChatModes": false,
39
47
  "llama3_1_405b",
40
48
  "mistral_large_2",
49
+ "gemini_2_flash",
41
50
  "gemini_1_5_flash",
42
51
  "gemini_1_5_pro",
43
52
  "databricks_dbrx_instruct",
44
- "qwen2p5_72b",
45
- "qwen2p5_coder_32b",
46
53
  "command_r_plus",
47
54
  "solar_1_mini",
48
55
  "dolphin_2_5"
@@ -59,7 +66,7 @@ class YouChat(Provider):
59
66
  proxies: dict = {},
60
67
  history_offset: int = 10250,
61
68
  act: str = None,
62
- model: str = "claude_3_5_haiku", # Default model set to claude_3_5_haiku
69
+ model: str = "gemini_2_flash",
63
70
  ):
64
71
  """Instantiates YouChat
65
72
 
@@ -157,30 +164,44 @@ class YouChat(Provider):
157
164
  f"Optimizer is not one of {self.__available_optimizers}"
158
165
  )
159
166
 
160
- payload = {
161
- "q": conversation_prompt,
167
+ trace_id = str(uuid4())
168
+ conversation_turn_id = str(uuid4())
169
+
170
+ # Updated query parameters to match the new API format
171
+ params = {
162
172
  "page": 1,
163
173
  "count": 10,
164
174
  "safeSearch": "Moderate",
165
175
  "mkt": "en-IN",
166
- "enable_workflow_generation_ux": "true",
176
+ "enable_worklow_generation_ux": "true",
167
177
  "domain": "youchat",
168
- "use_personalization_extraction": "false",
169
- "enable_agent_clarification_questions": "true",
170
- "queryTraceId": str(uuid4()),
171
- "chatId": str(uuid4()),
172
- "conversationTurnId": str(uuid4()),
178
+ "use_personalization_extraction": "true",
179
+ "queryTraceId": trace_id,
180
+ "chatId": trace_id,
181
+ "conversationTurnId": conversation_turn_id,
173
182
  "pastChatLength": 0,
174
- "isSmallMediumDevice": "true",
175
- "selectedChatMode": self.model,
176
- "use_nested_youchat_updates": "true",
177
- "traceId": str(uuid4()),
183
+ "selectedChatMode": "custom",
184
+ "selectedAiModel": self.model,
185
+ "enable_agent_clarification_questions": "true",
186
+ "traceId": f"{trace_id}|{conversation_turn_id}|{uuid4()}",
187
+ "use_nested_youchat_updates": "true"
188
+ }
189
+
190
+ # New payload format is JSON
191
+ payload = {
192
+ "query": conversation_prompt,
178
193
  "chat": "[]"
179
194
  }
180
195
 
181
196
  def for_stream():
182
- response = self.session.get(
183
- self.chat_endpoint, headers=self.headers, cookies=self.cookies, params=payload, stream=True, timeout=self.timeout
197
+ response = self.session.post(
198
+ self.chat_endpoint,
199
+ headers=self.headers,
200
+ cookies=self.cookies,
201
+ params=params,
202
+ data=json.dumps(payload),
203
+ stream=True,
204
+ timeout=self.timeout
184
205
  )
185
206
  if not response.ok:
186
207
  raise exceptions.FailedToGenerateResponseError(
@@ -188,6 +209,8 @@ class YouChat(Provider):
188
209
  )
189
210
 
190
211
  streaming_text = ""
212
+ found_marker = False # Flag to track if we've passed the '####' marker
213
+
191
214
  for value in response.iter_lines(
192
215
  decode_unicode=True,
193
216
  chunk_size=self.stream_chunk_size,
@@ -197,11 +220,19 @@ class YouChat(Provider):
197
220
  if bool(value) and value.startswith('data: ') and 'youChatToken' in value:
198
221
  data = json.loads(value[6:])
199
222
  token = data.get('youChatToken', '')
200
- if token:
223
+
224
+ # Check if this is the marker with '####'
225
+ if token == '####':
226
+ found_marker = True
227
+ continue # Skip the marker itself
228
+
229
+ # Only process tokens after the marker has been found
230
+ if found_marker and token:
201
231
  streaming_text += token
202
232
  yield token if raw else dict(text=token)
203
233
  except json.decoder.JSONDecodeError:
204
234
  pass
235
+
205
236
  self.last_response.update(dict(text=streaming_text))
206
237
  self.conversation.update_chat_history(
207
238
  prompt, self.get_message(self.last_response)
@@ -252,10 +283,6 @@ class YouChat(Provider):
252
283
  def get_message(self, response: dict) -> str:
253
284
  """Retrieves message only from response
254
285
 
255
- Args:
256
- response (dict): Response generated by `self.ask`
257
-
258
- Returns:
259
286
  str: Message extracted
260
287
  """
261
288
  assert isinstance(response, dict), "Response should be of dict data-type only"
@@ -266,4 +293,4 @@ if __name__ == '__main__':
266
293
  ai = YouChat(timeout=5000)
267
294
  response = ai.chat("hi", stream=True)
268
295
  for chunk in response:
269
- print(chunk, end="", flush=True)
296
+ print(chunk, end="", flush=True)