webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/conversation.py
CHANGED
|
@@ -86,8 +86,10 @@ class Conversation:
|
|
|
86
86
|
self.status = status
|
|
87
87
|
self.max_tokens_to_sample = max_tokens
|
|
88
88
|
self.chat_history = "" # Initialize as empty string
|
|
89
|
-
|
|
90
|
-
self.
|
|
89
|
+
# Updated history formats
|
|
90
|
+
self.history_format = "\nUser: %(user)s\nAssistant: %(llm)s"
|
|
91
|
+
# Tool format: Assistant outputs the tool call, then Tool provides the result
|
|
92
|
+
self.tool_history_format = "\nUser: %(user)s\nAssistant: <tool_call>%(tool_json)s</tool_call>\nTool: %(result)s"
|
|
91
93
|
self.file = filepath
|
|
92
94
|
self.update_file = update_file
|
|
93
95
|
self.history_offset = 10250
|
|
@@ -245,10 +247,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
245
247
|
3. Avoid *all* prohibited explanations/text.
|
|
246
248
|
''')
|
|
247
249
|
|
|
248
|
-
incomplete_chat_history = self.chat_history +
|
|
249
|
-
"user": prompt,
|
|
250
|
-
"llm": ""
|
|
251
|
-
}
|
|
250
|
+
incomplete_chat_history = self.chat_history + "\nUser: " + prompt + "\nAssistant:" # Ensure it ends correctly
|
|
252
251
|
complete_prompt = intro + self.__trim_chat_history(incomplete_chat_history, intro)
|
|
253
252
|
return complete_prompt
|
|
254
253
|
|
|
@@ -274,6 +273,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
274
273
|
if not self.status and not force:
|
|
275
274
|
return
|
|
276
275
|
|
|
276
|
+
# Use the updated history_format
|
|
277
277
|
new_history = self.history_format % {"user": prompt, "llm": response}
|
|
278
278
|
|
|
279
279
|
if self.file and self.update_file:
|
|
@@ -290,31 +290,33 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
290
290
|
# logger.info(f"Chat history updated with prompt: {prompt}")
|
|
291
291
|
|
|
292
292
|
def update_chat_history_with_tool(
|
|
293
|
-
self, prompt: str,
|
|
293
|
+
self, prompt: str, tool_call_json: str, tool_result: str, force: bool = False # Changed tool_name to tool_call_json
|
|
294
294
|
) -> None:
|
|
295
295
|
"""Update chat history with a tool call and its result.
|
|
296
296
|
|
|
297
297
|
This method:
|
|
298
|
-
- Adds tool call interaction to the history
|
|
298
|
+
- Adds tool call interaction to the history using the new format
|
|
299
299
|
- Updates the file if needed
|
|
300
300
|
- Maintains the conversation flow with tools
|
|
301
301
|
|
|
302
302
|
Args:
|
|
303
303
|
prompt (str): The user's message that triggered the tool call
|
|
304
|
-
|
|
304
|
+
tool_call_json (str): The JSON string representing the tool call made by the assistant
|
|
305
305
|
tool_result (str): Result returned by the tool
|
|
306
306
|
force (bool): Force update even if history is off. Default: False
|
|
307
307
|
|
|
308
308
|
Examples:
|
|
309
309
|
>>> chat = Conversation()
|
|
310
|
-
>>>
|
|
310
|
+
>>> tool_json = '{"name": "weather_tool", "arguments": {"location": "London"}}'
|
|
311
|
+
>>> chat.update_chat_history_with_tool("What's the weather?", tool_json, "It's sunny, 75°F")
|
|
311
312
|
"""
|
|
312
313
|
if not self.status and not force:
|
|
313
314
|
return
|
|
314
315
|
|
|
316
|
+
# Use the updated tool_history_format
|
|
315
317
|
new_history = self.tool_history_format % {
|
|
316
318
|
"user": prompt,
|
|
317
|
-
"
|
|
319
|
+
"tool_json": tool_call_json, # Use the JSON string
|
|
318
320
|
"result": tool_result
|
|
319
321
|
}
|
|
320
322
|
|
|
@@ -350,25 +352,35 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
350
352
|
if not self.validate_message(role, content):
|
|
351
353
|
raise ValueError("Invalid message role or content")
|
|
352
354
|
|
|
355
|
+
# Updated role formats to match User/Assistant
|
|
353
356
|
role_formats = {
|
|
354
357
|
"user": "User",
|
|
355
|
-
"
|
|
358
|
+
"assistant": "Assistant", # Changed from 'llm'
|
|
359
|
+
"llm": "Assistant", # Keep llm for backward compatibility? Or remove? Let's keep for now.
|
|
356
360
|
"tool": "Tool",
|
|
357
|
-
"reasoning": "Reasoning"
|
|
361
|
+
"reasoning": "Reasoning" # Keep reasoning if used internally
|
|
358
362
|
}
|
|
359
363
|
|
|
360
364
|
if role in role_formats:
|
|
361
|
-
|
|
365
|
+
# Special handling for assistant's tool call output
|
|
366
|
+
if role == "assistant" and "<tool_call>" in content:
|
|
367
|
+
# History format already includes the tags, just add the content
|
|
368
|
+
self.chat_history += f"\n{role_formats[role]}: {content}"
|
|
369
|
+
elif role == "tool":
|
|
370
|
+
# Tool results follow the Assistant's tool call
|
|
371
|
+
self.chat_history += f"\n{role_formats[role]}: {content}"
|
|
372
|
+
else:
|
|
373
|
+
# Standard user/assistant message
|
|
374
|
+
self.chat_history += f"\n{role_formats[role]}: {content}"
|
|
362
375
|
else:
|
|
363
376
|
raise ValueError(f"Invalid role: {role}. Must be one of {list(role_formats.keys())}")
|
|
364
377
|
|
|
365
|
-
|
|
366
|
-
# logger.info(f"Added message from {role}: {content}")
|
|
367
|
-
# logging.info(f"Message added: {role}: {content}")
|
|
378
|
+
# ... (logging remains the same) ...
|
|
368
379
|
|
|
369
380
|
def validate_message(self, role: str, content: str) -> bool:
|
|
370
381
|
"""Validate the message role and content."""
|
|
371
|
-
|
|
382
|
+
# Updated valid roles
|
|
383
|
+
valid_roles = {'user', 'assistant', 'llm', 'tool', 'reasoning'} # Changed 'llm' to 'assistant', kept 'llm' maybe?
|
|
372
384
|
if role not in valid_roles:
|
|
373
385
|
logging.error(f"Invalid role: {role}")
|
|
374
386
|
return False
|
|
@@ -546,14 +558,16 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
546
558
|
result = self.execute_function(function_call_data)
|
|
547
559
|
|
|
548
560
|
# Add the result to chat history as a tool message
|
|
549
|
-
|
|
561
|
+
# The assistant's response (the tool call itself) should have been added before calling this
|
|
562
|
+
# Now we add the tool's result
|
|
563
|
+
self.add_message("tool", result) # This will now correctly add "\nTool: <result>"
|
|
550
564
|
|
|
551
565
|
return {
|
|
552
566
|
"is_tool_call": True,
|
|
553
567
|
"success": True,
|
|
554
|
-
"result": result,
|
|
568
|
+
"result": result, # This is the tool's execution result
|
|
555
569
|
"tool_calls": function_call_data.get("tool_calls", []),
|
|
556
|
-
"original_response": response
|
|
570
|
+
"original_response": response # This is the LLM's response containing the <tool_call>
|
|
557
571
|
}
|
|
558
572
|
|
|
559
573
|
return {
|
webscout/exceptions.py
CHANGED
|
@@ -11,6 +11,26 @@ class WebscoutE(Exception):
|
|
|
11
11
|
pass
|
|
12
12
|
|
|
13
13
|
|
|
14
|
+
class ModelNotFoundError(WebscoutE):
|
|
15
|
+
"""
|
|
16
|
+
Exception raised when a requested model is not found or available.
|
|
17
|
+
|
|
18
|
+
This exception is raised when the specified model cannot be located or accessed by the provider.
|
|
19
|
+
It indicates that the model name might be incorrect or the provider does not support it.
|
|
20
|
+
"""
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MissingRequirementsError(WebscoutE):
|
|
25
|
+
"""
|
|
26
|
+
Exception raised when required dependencies are missing.
|
|
27
|
+
|
|
28
|
+
This exception is raised when a feature requires certain libraries or packages that are not installed.
|
|
29
|
+
It indicates that the user needs to install the missing dependencies to use the feature.
|
|
30
|
+
"""
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
14
34
|
class APIConnectionError(WebscoutE):
|
|
15
35
|
"""
|
|
16
36
|
Exception raised when there are issues connecting to an API.
|
webscout/prompt_manager.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
4
|
import json
|
|
5
|
-
import
|
|
5
|
+
from curl_cffi.requests import Session
|
|
6
6
|
from typing import Optional, Dict, Union
|
|
7
7
|
from rich.console import Console
|
|
8
8
|
from rich.table import Table
|
|
@@ -12,12 +12,14 @@ console = Console()
|
|
|
12
12
|
|
|
13
13
|
class AwesomePrompts:
|
|
14
14
|
"""The most awesome prompts manager you'll ever see fr fr! 🔥"""
|
|
15
|
-
|
|
15
|
+
|
|
16
16
|
def __init__(
|
|
17
17
|
self,
|
|
18
18
|
repo_url: str = "https://raw.githubusercontent.com/OE-LUCIFER/prompts/main/prompt.json",
|
|
19
19
|
local_path: Optional[str] = None,
|
|
20
|
-
auto_update: bool = True
|
|
20
|
+
auto_update: bool = True,
|
|
21
|
+
timeout: int = 10,
|
|
22
|
+
impersonate: str = "chrome110"
|
|
21
23
|
):
|
|
22
24
|
"""Initialize them Awesome Prompts with style! 💫
|
|
23
25
|
|
|
@@ -25,6 +27,8 @@ class AwesomePrompts:
|
|
|
25
27
|
repo_url (str): URL to fetch prompts from
|
|
26
28
|
local_path (str, optional): Where to save them prompts locally
|
|
27
29
|
auto_update (bool): Auto update prompts on init. Defaults to True
|
|
30
|
+
timeout (int): Timeout for HTTP requests. Defaults to 10.
|
|
31
|
+
impersonate (str): Browser profile for curl_cffi. Defaults to "chrome110".
|
|
28
32
|
"""
|
|
29
33
|
self.repo_url = repo_url
|
|
30
34
|
self.local_path = local_path or os.path.join(
|
|
@@ -34,14 +38,20 @@ class AwesomePrompts:
|
|
|
34
38
|
)
|
|
35
39
|
self._cache: Dict[Union[str, int], str] = {}
|
|
36
40
|
self._last_update: Optional[datetime] = None
|
|
37
|
-
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
# Initialize curl_cffi session
|
|
43
|
+
self.session = Session(
|
|
44
|
+
timeout=self.timeout,
|
|
45
|
+
impersonate=impersonate
|
|
46
|
+
)
|
|
47
|
+
|
|
38
48
|
# Create directory if it doesn't exist
|
|
39
49
|
os.makedirs(os.path.dirname(self.local_path), exist_ok=True)
|
|
40
|
-
|
|
50
|
+
|
|
41
51
|
# Load those prompts on init if auto_update is True
|
|
42
52
|
if auto_update:
|
|
43
53
|
self.update_prompts_from_online()
|
|
44
|
-
|
|
54
|
+
|
|
45
55
|
def _load_prompts(self) -> Dict[Union[str, int], str]:
|
|
46
56
|
"""Load prompts from the local file fr fr! 📂"""
|
|
47
57
|
try:
|
|
@@ -52,7 +62,7 @@ class AwesomePrompts:
|
|
|
52
62
|
except Exception as e:
|
|
53
63
|
console.print(f"[red]❌ Error loading prompts: {str(e)}[/red]")
|
|
54
64
|
return {}
|
|
55
|
-
|
|
65
|
+
|
|
56
66
|
def _save_prompts(self, prompts: Dict[Union[str, int], str]) -> None:
|
|
57
67
|
"""Save them prompts with style! 💾"""
|
|
58
68
|
try:
|
|
@@ -62,7 +72,7 @@ class AwesomePrompts:
|
|
|
62
72
|
console.print("[green]✨ Prompts saved successfully![/green]")
|
|
63
73
|
except Exception as e:
|
|
64
74
|
console.print(f"[red]❌ Error saving prompts: {str(e)}[/red]")
|
|
65
|
-
|
|
75
|
+
|
|
66
76
|
def update_prompts_from_online(self, force: bool = False) -> bool:
|
|
67
77
|
"""Update prompts from the repo! 🚀
|
|
68
78
|
|
|
@@ -78,34 +88,39 @@ class AwesomePrompts:
|
|
|
78
88
|
(datetime.now() - self._last_update).total_seconds() < 3600:
|
|
79
89
|
console.print("[yellow]⚡ Prompts are already up to date![/yellow]")
|
|
80
90
|
return True
|
|
81
|
-
|
|
91
|
+
|
|
82
92
|
console.print("[cyan]🔄 Updating prompts...[/cyan]")
|
|
83
|
-
|
|
93
|
+
# Use the curl_cffi session
|
|
94
|
+
response = self.session.get(self.repo_url)
|
|
84
95
|
response.raise_for_status()
|
|
85
|
-
|
|
96
|
+
|
|
86
97
|
# Merge new prompts with existing ones
|
|
87
98
|
new_prompts = response.json()
|
|
88
99
|
existing_prompts = self._load_prompts()
|
|
89
100
|
merged_prompts = {**existing_prompts, **new_prompts}
|
|
90
|
-
|
|
101
|
+
|
|
91
102
|
# Create a new dictionary for numeric indices
|
|
92
103
|
indexed_prompts = merged_prompts.copy()
|
|
93
|
-
|
|
104
|
+
|
|
94
105
|
# Add indices for numeric access
|
|
95
106
|
for i, (key, value) in enumerate(list(merged_prompts.items())):
|
|
96
107
|
if isinstance(key, str):
|
|
97
108
|
indexed_prompts[i] = value
|
|
98
|
-
|
|
109
|
+
|
|
99
110
|
self._save_prompts(indexed_prompts)
|
|
100
111
|
self._last_update = datetime.now()
|
|
101
|
-
|
|
112
|
+
|
|
102
113
|
console.print("[green]✨ Prompts updated successfully![/green]")
|
|
103
114
|
return True
|
|
104
|
-
|
|
115
|
+
|
|
105
116
|
except Exception as e:
|
|
106
|
-
|
|
117
|
+
# Provide more specific error context if possible
|
|
118
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
119
|
+
console.print(f"[red]❌ Error updating prompts with status {e.response.status_code}: {str(e)}[/red]")
|
|
120
|
+
else:
|
|
121
|
+
console.print(f"[red]❌ Error updating prompts: {str(e)}[/red]")
|
|
107
122
|
return False
|
|
108
|
-
|
|
123
|
+
|
|
109
124
|
def get_act(
|
|
110
125
|
self,
|
|
111
126
|
key: Union[str, int],
|
|
@@ -124,20 +139,20 @@ class AwesomePrompts:
|
|
|
124
139
|
str: The prompt or default value
|
|
125
140
|
"""
|
|
126
141
|
prompts = self._cache or self._load_prompts()
|
|
127
|
-
|
|
142
|
+
|
|
128
143
|
# Try direct access first
|
|
129
144
|
if key in prompts:
|
|
130
145
|
return prompts[key]
|
|
131
|
-
|
|
146
|
+
|
|
132
147
|
# Try case-insensitive search for string keys
|
|
133
148
|
if isinstance(key, str) and case_insensitive:
|
|
134
149
|
key_lower = key.lower()
|
|
135
150
|
for k, v in prompts.items():
|
|
136
151
|
if isinstance(k, str) and k.lower() == key_lower:
|
|
137
152
|
return v
|
|
138
|
-
|
|
153
|
+
|
|
139
154
|
return default
|
|
140
|
-
|
|
155
|
+
|
|
141
156
|
def add_prompt(self, name: str, prompt: str) -> bool:
|
|
142
157
|
"""Add a new prompt to the collection! ✨
|
|
143
158
|
|
|
@@ -151,12 +166,12 @@ class AwesomePrompts:
|
|
|
151
166
|
if not name or not prompt:
|
|
152
167
|
console.print("[red]❌ Name and prompt cannot be empty![/red]")
|
|
153
168
|
return False
|
|
154
|
-
|
|
169
|
+
|
|
155
170
|
prompts = self._cache or self._load_prompts()
|
|
156
171
|
prompts[name] = prompt
|
|
157
172
|
self._save_prompts(prompts)
|
|
158
173
|
return True
|
|
159
|
-
|
|
174
|
+
|
|
160
175
|
def delete_prompt(
|
|
161
176
|
self,
|
|
162
177
|
name: Union[str, int],
|
|
@@ -174,13 +189,13 @@ class AwesomePrompts:
|
|
|
174
189
|
bool: Success status
|
|
175
190
|
"""
|
|
176
191
|
prompts = self._cache or self._load_prompts()
|
|
177
|
-
|
|
192
|
+
|
|
178
193
|
# Handle direct key match
|
|
179
194
|
if name in prompts:
|
|
180
195
|
del prompts[name]
|
|
181
196
|
self._save_prompts(prompts)
|
|
182
197
|
return True
|
|
183
|
-
|
|
198
|
+
|
|
184
199
|
# Handle case-insensitive match
|
|
185
200
|
if isinstance(name, str) and case_insensitive:
|
|
186
201
|
name_lower = name.lower()
|
|
@@ -189,12 +204,12 @@ class AwesomePrompts:
|
|
|
189
204
|
del prompts[k]
|
|
190
205
|
self._save_prompts(prompts)
|
|
191
206
|
return True
|
|
192
|
-
|
|
207
|
+
|
|
193
208
|
if raise_not_found:
|
|
194
209
|
raise KeyError(f"Prompt '{name}' not found!")
|
|
195
210
|
console.print(f"[yellow]⚠️ Prompt '{name}' not found![/yellow]")
|
|
196
211
|
return False
|
|
197
|
-
|
|
212
|
+
|
|
198
213
|
@property
|
|
199
214
|
def all_acts(self) -> Dict[Union[str, int], str]:
|
|
200
215
|
"""All them awesome prompts mapped with style! 📚
|
|
@@ -206,17 +221,17 @@ class AwesomePrompts:
|
|
|
206
221
|
if not prompts:
|
|
207
222
|
self.update_prompts_from_online()
|
|
208
223
|
prompts = self._load_prompts()
|
|
209
|
-
|
|
224
|
+
|
|
210
225
|
# Create a new dictionary for the result
|
|
211
226
|
result = prompts.copy()
|
|
212
|
-
|
|
227
|
+
|
|
213
228
|
# Add numeric indices to the copy
|
|
214
229
|
for i, (key, value) in enumerate(list(prompts.items())):
|
|
215
230
|
if isinstance(key, str):
|
|
216
231
|
result[i] = value
|
|
217
|
-
|
|
232
|
+
|
|
218
233
|
return result
|
|
219
|
-
|
|
234
|
+
|
|
220
235
|
def show_acts(self, search: Optional[str] = None) -> None:
|
|
221
236
|
"""Show all them awesome prompts with style! 📋
|
|
222
237
|
|
|
@@ -224,7 +239,7 @@ class AwesomePrompts:
|
|
|
224
239
|
search: Optional search term to filter prompts
|
|
225
240
|
"""
|
|
226
241
|
prompts = self.all_acts
|
|
227
|
-
|
|
242
|
+
|
|
228
243
|
# Create a fire table! 🔥
|
|
229
244
|
table = Table(
|
|
230
245
|
title="🚀 Awesome Prompts Collection",
|
|
@@ -234,21 +249,21 @@ class AwesomePrompts:
|
|
|
234
249
|
table.add_column("Index", style="cyan", justify="right")
|
|
235
250
|
table.add_column("Name", style="green")
|
|
236
251
|
table.add_column("Preview", style="yellow")
|
|
237
|
-
|
|
252
|
+
|
|
238
253
|
for i, (key, value) in enumerate(prompts.items()):
|
|
239
254
|
if isinstance(key, int):
|
|
240
255
|
continue # Skip numeric keys as they're duplicates
|
|
241
|
-
|
|
256
|
+
|
|
242
257
|
# Filter by search term if provided
|
|
243
258
|
if search and search.lower() not in key.lower() and \
|
|
244
259
|
search.lower() not in value.lower():
|
|
245
260
|
continue
|
|
246
|
-
|
|
261
|
+
|
|
247
262
|
preview = value[:50] + "..." if len(value) > 50 else value
|
|
248
263
|
table.add_row(str(i), str(key), preview)
|
|
249
|
-
|
|
264
|
+
|
|
250
265
|
console.print(table)
|
|
251
|
-
|
|
266
|
+
|
|
252
267
|
def get_random_act(self) -> Optional[str]:
|
|
253
268
|
"""Get a random prompt for that surprise factor! 🎲"""
|
|
254
269
|
import random
|
|
@@ -261,14 +276,13 @@ class AwesomePrompts:
|
|
|
261
276
|
if __name__ == "__main__":
|
|
262
277
|
# Quick demo of the features! 🚀
|
|
263
278
|
prompts = AwesomePrompts()
|
|
264
|
-
prompts.update_prompts_from_online()
|
|
265
279
|
prompts.show_acts()
|
|
266
|
-
|
|
280
|
+
|
|
267
281
|
# Add a test prompt
|
|
268
282
|
prompts.add_prompt("test_prompt", "This is a test prompt! 🔥")
|
|
269
|
-
|
|
283
|
+
|
|
270
284
|
# Show the new prompt
|
|
271
285
|
print("\nTest Prompt:", prompts.get_act("test_prompt"))
|
|
272
|
-
|
|
286
|
+
|
|
273
287
|
# Clean up
|
|
274
288
|
prompts.delete_prompt("test_prompt")
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "8.2.
|
|
1
|
+
__version__ = "8.2.4"
|
|
2
2
|
__prog__ = "webscout"
|
webscout/webscout_search.py
CHANGED
|
@@ -17,7 +17,7 @@ from typing import Any, cast
|
|
|
17
17
|
import os
|
|
18
18
|
from typing import Literal, Iterator
|
|
19
19
|
|
|
20
|
-
import
|
|
20
|
+
import curl_cffi.requests # type: ignore
|
|
21
21
|
|
|
22
22
|
try:
|
|
23
23
|
from lxml.etree import _Element
|
|
@@ -46,18 +46,14 @@ class WEBS:
|
|
|
46
46
|
"""webscout class to get search results from duckduckgo.com."""
|
|
47
47
|
|
|
48
48
|
_executor: ThreadPoolExecutor = ThreadPoolExecutor()
|
|
49
|
+
# curl_cffi supports different browser versions than primp
|
|
49
50
|
_impersonates = (
|
|
50
|
-
"
|
|
51
|
-
"
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"safari_17.0", "safari_17.2.1", "safari_17.4.1", "safari_17.5",
|
|
57
|
-
"safari_18", "safari_18.2",
|
|
58
|
-
"safari_ipad_18",
|
|
59
|
-
"edge_101", "edge_122", "edge_127", "edge_131",
|
|
60
|
-
"firefox_109", "firefox_117", "firefox_128", "firefox_133", "firefox_135",
|
|
51
|
+
"chrome99", "chrome100", "chrome101", "chrome104", "chrome107", "chrome110",
|
|
52
|
+
"chrome116", "chrome119", "chrome120", "chrome123", "chrome124", "chrome131", "chrome133a",
|
|
53
|
+
"chrome99_android", "chrome131_android",
|
|
54
|
+
"safari15_3", "safari15_5", "safari17_0", "safari17_2_ios", "safari18_0", "safari18_0_ios",
|
|
55
|
+
"edge99", "edge101",
|
|
56
|
+
"firefox133", "firefox135",
|
|
61
57
|
) # fmt: skip
|
|
62
58
|
_impersonates_os = ("android", "ios", "linux", "macos", "windows")
|
|
63
59
|
_chat_models = {
|
|
@@ -109,15 +105,14 @@ class WEBS:
|
|
|
109
105
|
self.headers = headers if headers else {}
|
|
110
106
|
self.headers.update(default_headers)
|
|
111
107
|
|
|
112
|
-
|
|
108
|
+
# curl_cffi has different parameters than primp
|
|
109
|
+
impersonate_browser = choice(self._impersonates)
|
|
110
|
+
self.client = curl_cffi.requests.Session(
|
|
113
111
|
headers=self.headers,
|
|
114
|
-
|
|
112
|
+
proxies={'http': self.proxy, 'https': self.proxy} if self.proxy else None,
|
|
115
113
|
timeout=timeout,
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
impersonate=choice(self._impersonates),
|
|
119
|
-
impersonate_os=choice(self._impersonates_os),
|
|
120
|
-
follow_redirects=False,
|
|
114
|
+
# curl_cffi doesn't accept cookies=True, it needs a dict or None
|
|
115
|
+
impersonate=impersonate_browser,
|
|
121
116
|
verify=verify,
|
|
122
117
|
)
|
|
123
118
|
self.timeout = timeout
|
|
@@ -166,17 +161,33 @@ class WEBS:
|
|
|
166
161
|
) -> Any:
|
|
167
162
|
self._sleep()
|
|
168
163
|
try:
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
164
|
+
# curl_cffi doesn't accept cookies=True in request methods
|
|
165
|
+
request_kwargs = {
|
|
166
|
+
"params": params,
|
|
167
|
+
"headers": headers,
|
|
168
|
+
"json": json,
|
|
169
|
+
"timeout": timeout or self.timeout,
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
# Add cookies if they're a dict, not a bool
|
|
173
|
+
if isinstance(cookies, dict):
|
|
174
|
+
request_kwargs["cookies"] = cookies
|
|
175
|
+
|
|
176
|
+
if method == "GET":
|
|
177
|
+
# curl_cffi uses data instead of content
|
|
178
|
+
if content:
|
|
179
|
+
request_kwargs["data"] = content
|
|
180
|
+
resp = self.client.get(url, **request_kwargs)
|
|
181
|
+
elif method == "POST":
|
|
182
|
+
# handle both data and content
|
|
183
|
+
if data or content:
|
|
184
|
+
request_kwargs["data"] = data or content
|
|
185
|
+
resp = self.client.post(url, **request_kwargs)
|
|
186
|
+
else:
|
|
187
|
+
# handle both data and content
|
|
188
|
+
if data or content:
|
|
189
|
+
request_kwargs["data"] = data or content
|
|
190
|
+
resp = self.client.request(method, url, **request_kwargs)
|
|
180
191
|
except Exception as ex:
|
|
181
192
|
if "time" in str(ex).lower():
|
|
182
193
|
raise TimeoutE(f"{url} {type(ex).__name__}: {ex}") from ex
|
|
@@ -296,7 +307,8 @@ class WEBS:
|
|
|
296
307
|
self._chat_vqd_hash = resp.headers.get("x-vqd-hash-1", "")
|
|
297
308
|
chunks = []
|
|
298
309
|
|
|
299
|
-
|
|
310
|
+
# curl_cffi uses iter_content instead of stream
|
|
311
|
+
for chunk in resp.iter_content(chunk_size=1024):
|
|
300
312
|
lines = chunk.split(b"data:")
|
|
301
313
|
for line in lines:
|
|
302
314
|
if line := line.strip():
|
|
@@ -304,20 +316,24 @@ class WEBS:
|
|
|
304
316
|
break
|
|
305
317
|
if line == b"[DONE][LIMIT_CONVERSATION]":
|
|
306
318
|
raise ConversationLimitException("ERR_CONVERSATION_LIMIT")
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
if x
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
319
|
+
try:
|
|
320
|
+
x = json_loads(line)
|
|
321
|
+
if isinstance(x, dict):
|
|
322
|
+
if x.get("action") == "error":
|
|
323
|
+
err_message = x.get("type", "")
|
|
324
|
+
if x.get("status") == 429:
|
|
325
|
+
raise (
|
|
326
|
+
ConversationLimitException(err_message)
|
|
327
|
+
if err_message == "ERR_CONVERSATION_LIMIT"
|
|
328
|
+
else RatelimitE(err_message)
|
|
329
|
+
)
|
|
330
|
+
raise WebscoutE(err_message)
|
|
331
|
+
elif message := x.get("message"):
|
|
332
|
+
chunks.append(message)
|
|
333
|
+
yield message
|
|
334
|
+
except Exception as e:
|
|
335
|
+
# Skip invalid JSON data
|
|
336
|
+
continue
|
|
321
337
|
|
|
322
338
|
# If we get here, the request was successful
|
|
323
339
|
result = "".join(chunks)
|
|
@@ -541,7 +557,8 @@ class WEBS:
|
|
|
541
557
|
return []
|
|
542
558
|
|
|
543
559
|
page_results = []
|
|
544
|
-
|
|
560
|
+
# curl_cffi returns bytes, not a file-like object
|
|
561
|
+
tree = document_fromstring(resp_content)
|
|
545
562
|
elements = tree.xpath("//div[h2]")
|
|
546
563
|
if not isinstance(elements, list):
|
|
547
564
|
return []
|
|
@@ -628,7 +645,8 @@ class WEBS:
|
|
|
628
645
|
return []
|
|
629
646
|
|
|
630
647
|
page_results = []
|
|
631
|
-
|
|
648
|
+
# curl_cffi returns bytes, not a file-like object
|
|
649
|
+
tree = document_fromstring(resp_content)
|
|
632
650
|
elements = tree.xpath("//table[last()]//tr")
|
|
633
651
|
if not isinstance(elements, list):
|
|
634
652
|
return []
|