webscout 7.6__py3-none-any.whl → 7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (36) hide show
  1. webscout/Extra/autocoder/__init__.py +9 -9
  2. webscout/Extra/autocoder/autocoder_utiles.py +193 -195
  3. webscout/Extra/autocoder/rawdog.py +789 -649
  4. webscout/Extra/gguf.py +54 -24
  5. webscout/Provider/AISEARCH/ISou.py +0 -21
  6. webscout/Provider/AllenAI.py +4 -21
  7. webscout/Provider/ChatGPTClone.py +226 -0
  8. webscout/Provider/Glider.py +8 -4
  9. webscout/Provider/Hunyuan.py +272 -0
  10. webscout/Provider/LambdaChat.py +391 -0
  11. webscout/Provider/OLLAMA.py +256 -32
  12. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +18 -45
  13. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +34 -46
  14. webscout/Provider/TTI/artbit/async_artbit.py +3 -32
  15. webscout/Provider/TTI/artbit/sync_artbit.py +3 -31
  16. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  17. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  18. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  19. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  20. webscout/Provider/WebSim.py +227 -0
  21. webscout/Provider/__init__.py +12 -1
  22. webscout/Provider/flowith.py +13 -2
  23. webscout/Provider/labyrinth.py +239 -0
  24. webscout/Provider/learnfastai.py +28 -15
  25. webscout/Provider/sonus.py +208 -0
  26. webscout/Provider/typegpt.py +1 -1
  27. webscout/Provider/uncovr.py +297 -0
  28. webscout/cli.py +49 -0
  29. webscout/litagent/agent.py +14 -9
  30. webscout/version.py +1 -1
  31. {webscout-7.6.dist-info → webscout-7.7.dist-info}/METADATA +33 -22
  32. {webscout-7.6.dist-info → webscout-7.7.dist-info}/RECORD +36 -29
  33. {webscout-7.6.dist-info → webscout-7.7.dist-info}/LICENSE.md +0 -0
  34. {webscout-7.6.dist-info → webscout-7.7.dist-info}/WHEEL +0 -0
  35. {webscout-7.6.dist-info → webscout-7.7.dist-info}/entry_points.txt +0 -0
  36. {webscout-7.6.dist-info → webscout-7.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,208 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+ from webscout.AIutel import Optimizers
5
+ from webscout.AIutel import Conversation
6
+ from webscout.AIutel import AwesomePrompts
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class SonusAI(Provider):
11
+ """
12
+ A class to interact with the Sonus AI chat API.
13
+ """
14
+
15
+ AVAILABLE_MODELS = [
16
+ "pro",
17
+ "air",
18
+ "mini"
19
+ ]
20
+
21
+ def __init__(
22
+ self,
23
+ is_conversation: bool = True,
24
+ max_tokens: int = 2049,
25
+ timeout: int = 30,
26
+ intro: str = None,
27
+ filepath: str = None,
28
+ update_file: bool = True,
29
+ proxies: dict = {},
30
+ history_offset: int = 10250,
31
+ act: str = None,
32
+ model: str = "pro"
33
+ ):
34
+ """Initializes the Sonus AI API client."""
35
+ if model not in self.AVAILABLE_MODELS:
36
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
+
38
+ self.url = "https://chat.sonus.ai/chat.php"
39
+
40
+ # Headers for the request
41
+ self.headers = {
42
+ 'Accept': '*/*',
43
+ 'Accept-Language': 'en-US,en;q=0.9',
44
+ 'Origin': 'https://chat.sonus.ai',
45
+ 'Referer': 'https://chat.sonus.ai/',
46
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
47
+ }
48
+
49
+ self.session = requests.Session()
50
+ self.session.headers.update(self.headers)
51
+ self.session.proxies.update(proxies)
52
+
53
+ self.is_conversation = is_conversation
54
+ self.max_tokens_to_sample = max_tokens
55
+ self.timeout = timeout
56
+ self.last_response = {}
57
+ self.model = model
58
+
59
+ self.__available_optimizers = (
60
+ method
61
+ for method in dir(Optimizers)
62
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
63
+ )
64
+ Conversation.intro = (
65
+ AwesomePrompts().get_act(
66
+ act, raise_not_found=True, default=None, case_insensitive=True
67
+ )
68
+ if act
69
+ else intro or Conversation.intro
70
+ )
71
+
72
+ self.conversation = Conversation(
73
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
74
+ )
75
+ self.conversation.history_offset = history_offset
76
+
77
+ def ask(
78
+ self,
79
+ prompt: str,
80
+ stream: bool = False,
81
+ raw: bool = False,
82
+ optimizer: str = None,
83
+ conversationally: bool = False,
84
+ reasoning: bool = False,
85
+ ) -> Union[Dict[str, Any], Generator]:
86
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
87
+ if optimizer:
88
+ if optimizer in self.__available_optimizers:
89
+ conversation_prompt = getattr(Optimizers, optimizer)(
90
+ conversation_prompt if conversationally else prompt
91
+ )
92
+ else:
93
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
94
+
95
+ # Prepare the multipart form data
96
+ files = {
97
+ 'message': (None, conversation_prompt),
98
+ 'history': (None),
99
+ 'reasoning': (None, str(reasoning).lower()),
100
+ 'model': (None, self.model)
101
+ }
102
+
103
+ def for_stream():
104
+ try:
105
+ with requests.post(self.url, files=files, headers=self.headers, stream=True, timeout=self.timeout) as response:
106
+ if response.status_code != 200:
107
+ raise exceptions.FailedToGenerateResponseError(
108
+ f"Request failed with status code {response.status_code}"
109
+ )
110
+
111
+ streaming_text = ""
112
+ for line in response.iter_lines():
113
+ if line:
114
+ try:
115
+ # Decode the line and remove 'data: ' prefix if present
116
+ line = line.decode('utf-8')
117
+ if line.startswith('data: '):
118
+ line = line[6:]
119
+
120
+ data = json.loads(line)
121
+ if "content" in data:
122
+ content = data["content"]
123
+ streaming_text += content
124
+ resp = dict(text=content)
125
+ yield resp if raw else resp
126
+ except (json.JSONDecodeError, UnicodeDecodeError):
127
+ continue
128
+
129
+ self.last_response = {"text": streaming_text}
130
+ self.conversation.update_chat_history(prompt, streaming_text)
131
+
132
+ except requests.RequestException as e:
133
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
134
+
135
+ def for_non_stream():
136
+ try:
137
+ response = requests.post(self.url, files=files, headers=self.headers, timeout=self.timeout)
138
+ if response.status_code != 200:
139
+ raise exceptions.FailedToGenerateResponseError(
140
+ f"Request failed with status code {response.status_code}"
141
+ )
142
+
143
+ full_response = ""
144
+ for line in response.iter_lines():
145
+ if line:
146
+ try:
147
+ line = line.decode('utf-8')
148
+ if line.startswith('data: '):
149
+ line = line[6:]
150
+ data = json.loads(line)
151
+ if "content" in data:
152
+ full_response += data["content"]
153
+ except (json.JSONDecodeError, UnicodeDecodeError):
154
+ continue
155
+
156
+ self.last_response = {"text": full_response}
157
+ self.conversation.update_chat_history(prompt, full_response)
158
+ return {"text": full_response}
159
+ except Exception as e:
160
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
161
+
162
+ return for_stream() if stream else for_non_stream()
163
+
164
+ def chat(
165
+ self,
166
+ prompt: str,
167
+ stream: bool = False,
168
+ optimizer: str = None,
169
+ conversationally: bool = False,
170
+ reasoning: bool = False,
171
+ ) -> Union[str, Generator[str, None, None]]:
172
+ def for_stream():
173
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning):
174
+ yield self.get_message(response)
175
+ def for_non_stream():
176
+ return self.get_message(
177
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally, reasoning=reasoning)
178
+ )
179
+ return for_stream() if stream else for_non_stream()
180
+
181
+ def get_message(self, response: dict) -> str:
182
+ assert isinstance(response, dict), "Response should be of dict data-type only"
183
+ return response["text"]
184
+
185
+ if __name__ == "__main__":
186
+ print("-" * 80)
187
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
188
+ print("-" * 80)
189
+
190
+ for model in SonusAI.AVAILABLE_MODELS:
191
+ try:
192
+ test_ai = SonusAI(model=model, timeout=60)
193
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
194
+ response_text = ""
195
+ for chunk in response:
196
+ response_text += chunk
197
+
198
+ if response_text and len(response_text.strip()) > 0:
199
+ status = "✓"
200
+ # Clean and truncate response
201
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
202
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
203
+ else:
204
+ status = "✗"
205
+ display_text = "Empty or invalid response"
206
+ print(f"\r{model:<50} {status:<10} {display_text}")
207
+ except Exception as e:
208
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -267,7 +267,7 @@ class TypeGPT(Provider):
267
267
  self.session = requests.Session()
268
268
  self.is_conversation = is_conversation
269
269
  self.max_tokens_to_sample = max_tokens
270
- self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
270
+ self.api_endpoint = "https://chat.typegpt.net/api/openai/typegpt/v1/chat/completions"
271
271
  self.timeout = timeout
272
272
  self.last_response = {}
273
273
  self.model = model
@@ -0,0 +1,297 @@
1
+ import requests
2
+ import json
3
+ import uuid
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout.litagent import LitAgent
11
+
12
+ class UncovrAI(Provider):
13
+ """
14
+ A class to interact with the Uncovr AI chat API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ "default",
19
+ "gpt-4o-mini",
20
+ "gemini-2-flash",
21
+ "o3-mini",
22
+ "claude-3-7-sonnet",
23
+ "gpt-4o",
24
+ "claude-3-5-sonnet-v2",
25
+ "groq-llama-3-1-8b",
26
+ "deepseek-r1-distill-llama-70b",
27
+ "deepseek-r1-distill-qwen-32b",
28
+ "gemini-2-flash-lite-preview",
29
+ "qwen-qwq-32b"
30
+ ]
31
+
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 2049,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ model: str = "default",
44
+ chat_id: str = None,
45
+ user_id: str = None,
46
+ browser: str = "chrome"
47
+ ):
48
+ """Initializes the Uncovr AI API client."""
49
+ if model not in self.AVAILABLE_MODELS:
50
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
51
+
52
+ self.url = "https://uncovr.app/api/workflows/chat"
53
+
54
+ # Initialize LitAgent for user agent generation
55
+ self.agent = LitAgent()
56
+ # Use fingerprinting to create a consistent browser identity
57
+ self.fingerprint = self.agent.generate_fingerprint(browser)
58
+
59
+ # Use the fingerprint for headers
60
+ self.headers = {
61
+ "Accept": self.fingerprint["accept"],
62
+ "Accept-Encoding": "gzip, deflate, br, zstd",
63
+ "Accept-Language": self.fingerprint["accept_language"],
64
+ "Content-Type": "application/json",
65
+ "Origin": "https://uncovr.app",
66
+ "Referer": "https://uncovr.app/",
67
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
68
+ "Sec-CH-UA-Mobile": "?0",
69
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
70
+ "User-Agent": self.fingerprint["user_agent"],
71
+ "Sec-Fetch-Dest": "empty",
72
+ "Sec-Fetch-Mode": "cors",
73
+ "Sec-Fetch-Site": "same-origin"
74
+ }
75
+
76
+ self.session = requests.Session()
77
+ self.session.headers.update(self.headers)
78
+ self.session.proxies.update(proxies)
79
+
80
+ self.is_conversation = is_conversation
81
+ self.max_tokens_to_sample = max_tokens
82
+ self.timeout = timeout
83
+ self.last_response = {}
84
+ self.model = model
85
+ self.chat_id = chat_id or str(uuid.uuid4())
86
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
87
+
88
+ self.__available_optimizers = (
89
+ method
90
+ for method in dir(Optimizers)
91
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
92
+ )
93
+ Conversation.intro = (
94
+ AwesomePrompts().get_act(
95
+ act, raise_not_found=True, default=None, case_insensitive=True
96
+ )
97
+ if act
98
+ else intro or Conversation.intro
99
+ )
100
+
101
+ self.conversation = Conversation(
102
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
103
+ )
104
+ self.conversation.history_offset = history_offset
105
+
106
+ def refresh_identity(self, browser: str = None):
107
+ """
108
+ Refreshes the browser identity fingerprint.
109
+
110
+ Args:
111
+ browser: Specific browser to use for the new fingerprint
112
+ """
113
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
114
+ self.fingerprint = self.agent.generate_fingerprint(browser)
115
+
116
+ # Update headers with new fingerprint
117
+ self.headers.update({
118
+ "Accept": self.fingerprint["accept"],
119
+ "Accept-Language": self.fingerprint["accept_language"],
120
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
121
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
122
+ "User-Agent": self.fingerprint["user_agent"],
123
+ })
124
+
125
+ # Update session headers
126
+ for header, value in self.headers.items():
127
+ self.session.headers[header] = value
128
+
129
+ return self.fingerprint
130
+
131
+ def ask(
132
+ self,
133
+ prompt: str,
134
+ stream: bool = False,
135
+ raw: bool = False,
136
+ optimizer: str = None,
137
+ conversationally: bool = False,
138
+ temperature: int = 32,
139
+ creativity: str = "medium",
140
+ selected_focus: list = ["web"],
141
+ selected_tools: list = ["quick-cards"]
142
+ ) -> Union[Dict[str, Any], Generator]:
143
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
+ if optimizer:
145
+ if optimizer in self.__available_optimizers:
146
+ conversation_prompt = getattr(Optimizers, optimizer)(
147
+ conversation_prompt if conversationally else prompt
148
+ )
149
+ else:
150
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
151
+
152
+ # Prepare the request payload
153
+ payload = {
154
+ "content": conversation_prompt,
155
+ "chatId": self.chat_id,
156
+ "userMessageId": str(uuid.uuid4()),
157
+ "ai_config": {
158
+ "selectedFocus": selected_focus,
159
+ "selectedTools": selected_tools,
160
+ "agentId": "chat",
161
+ "modelId": self.model,
162
+ "temperature": temperature,
163
+ "creativity": creativity
164
+ }
165
+ }
166
+
167
+ def for_stream():
168
+ try:
169
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
170
+ if response.status_code != 200:
171
+ # If we get a non-200 response, try refreshing our identity once
172
+ if response.status_code in [403, 429]:
173
+ self.refresh_identity()
174
+ # Retry with new identity
175
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as retry_response:
176
+ if not retry_response.ok:
177
+ raise exceptions.FailedToGenerateResponseError(
178
+ f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
179
+ )
180
+ response = retry_response
181
+ else:
182
+ raise exceptions.FailedToGenerateResponseError(
183
+ f"Request failed with status code {response.status_code}"
184
+ )
185
+
186
+ streaming_text = ""
187
+ for line in response.iter_lines():
188
+ if line:
189
+ try:
190
+ line = line.decode('utf-8')
191
+ # Handle different message types
192
+ if line.startswith('0:'): # Content message
193
+ content = line[2:].strip('"')
194
+ streaming_text += content
195
+ resp = dict(text=content)
196
+ yield resp if raw else resp
197
+ except (json.JSONDecodeError, UnicodeDecodeError):
198
+ continue
199
+
200
+ self.last_response = {"text": streaming_text}
201
+ self.conversation.update_chat_history(prompt, streaming_text)
202
+
203
+ except requests.RequestException as e:
204
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
205
+
206
+ def for_non_stream():
207
+ try:
208
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
209
+ if response.status_code != 200:
210
+ if response.status_code in [403, 429]:
211
+ self.refresh_identity()
212
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
213
+ if not response.ok:
214
+ raise exceptions.FailedToGenerateResponseError(
215
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
216
+ )
217
+ else:
218
+ raise exceptions.FailedToGenerateResponseError(
219
+ f"Request failed with status code {response.status_code}"
220
+ )
221
+
222
+ full_response = ""
223
+ for line in response.iter_lines():
224
+ if line:
225
+ try:
226
+ line = line.decode('utf-8')
227
+ if line.startswith('0:'): # Content message
228
+ content = line[2:].strip('"')
229
+ full_response += content
230
+ except (json.JSONDecodeError, UnicodeDecodeError):
231
+ continue
232
+
233
+ self.last_response = {"text": full_response}
234
+ self.conversation.update_chat_history(prompt, full_response)
235
+ return {"text": full_response}
236
+ except Exception as e:
237
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
238
+
239
+ return for_stream() if stream else for_non_stream()
240
+
241
+ def chat(
242
+ self,
243
+ prompt: str,
244
+ stream: bool = False,
245
+ optimizer: str = None,
246
+ conversationally: bool = False,
247
+ temperature: int = 32,
248
+ creativity: str = "medium",
249
+ selected_focus: list = ["web"],
250
+ selected_tools: list = []
251
+ ) -> Union[str, Generator[str, None, None]]:
252
+ def for_stream():
253
+ for response in self.ask(
254
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
255
+ temperature=temperature, creativity=creativity,
256
+ selected_focus=selected_focus, selected_tools=selected_tools
257
+ ):
258
+ yield self.get_message(response)
259
+ def for_non_stream():
260
+ return self.get_message(
261
+ self.ask(
262
+ prompt, False, optimizer=optimizer, conversationally=conversationally,
263
+ temperature=temperature, creativity=creativity,
264
+ selected_focus=selected_focus, selected_tools=selected_tools
265
+ )
266
+ )
267
+ return for_stream() if stream else for_non_stream()
268
+
269
+ def get_message(self, response: dict) -> str:
270
+ assert isinstance(response, dict), "Response should be of dict data-type only"
271
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
272
+
273
+ if __name__ == "__main__":
274
+ print("-" * 80)
275
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
276
+ print("-" * 80)
277
+
278
+ for model in UncovrAI.AVAILABLE_MODELS:
279
+ try:
280
+ test_ai = UncovrAI(model=model, timeout=60)
281
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
282
+ response_text = ""
283
+ for chunk in response:
284
+ response_text += chunk
285
+
286
+ if response_text and len(response_text.strip()) > 0:
287
+ status = "✓"
288
+ # Clean and truncate response
289
+ clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
290
+ display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
291
+ else:
292
+ status = "✗"
293
+ display_text = "Empty or invalid response"
294
+ print(f"\r{model:<50} {status:<10} {display_text}")
295
+ except Exception as e:
296
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
297
+
webscout/cli.py CHANGED
@@ -49,6 +49,42 @@ def _print_data(data):
49
49
  console.print(Panel(table, title=f"Result {i}", expand=False, style="green on black"))
50
50
  console.print("\n")
51
51
 
52
+ def _print_weather(data):
53
+ """Prints weather data in a clean, focused format."""
54
+ console = Console()
55
+
56
+ # Current weather panel
57
+ current = data["current"]
58
+ current_table = Table(show_header=False, show_lines=True, expand=True, box=None)
59
+ current_table.add_column("Metric", style="cyan", no_wrap=True, width=15)
60
+ current_table.add_column("Value", style="white")
61
+
62
+ current_table.add_row("Temperature", f"{current['temperature_c']}°C")
63
+ current_table.add_row("Feels Like", f"{current['feels_like_c']}°C")
64
+ current_table.add_row("Humidity", f"{current['humidity']}%")
65
+ current_table.add_row("Wind", f"{current['wind_speed_ms']} m/s")
66
+ current_table.add_row("Direction", f"{current['wind_direction']}°")
67
+
68
+ console.print(Panel(current_table, title=f"Current Weather in {data['location']}", expand=False, style="green on black"))
69
+ console.print("\n")
70
+
71
+ # Daily forecast panel
72
+ daily_table = Table(show_header=True, show_lines=True, expand=True, box=None)
73
+ daily_table.add_column("Date", style="cyan")
74
+ daily_table.add_column("Condition", style="white")
75
+ daily_table.add_column("High", style="red")
76
+ daily_table.add_column("Low", style="blue")
77
+
78
+ for day in data["daily_forecast"][:5]: # Show next 5 days
79
+ daily_table.add_row(
80
+ day["date"],
81
+ day["condition"],
82
+ f"{day['max_temp_c']}°C",
83
+ f"{day['min_temp_c']}°C"
84
+ )
85
+
86
+ console.print(Panel(daily_table, title="5-Day Forecast", expand=False, style="green on black"))
87
+
52
88
  # Initialize CLI app
53
89
  app = CLI(name="webscout", help="Search the web with a rich UI", version=__version__)
54
90
 
@@ -272,6 +308,19 @@ def suggestions(keywords: str, region: str, proxy: str = None):
272
308
  except Exception as e:
273
309
  raise e
274
310
 
311
+ @app.command()
312
+ @option("--location", "-l", help="Location to get weather for", required=True)
313
+ @option("--language", "-lang", help="Language code (e.g. 'en', 'es')", default="en")
314
+ @option("--proxy", "-p", help="Proxy URL to use for requests")
315
+ def weather(location: str, language: str, proxy: str = None):
316
+ """Get weather information for a location from DuckDuckGo."""
317
+ webs = WEBS(proxy=proxy)
318
+ try:
319
+ results = webs.weather(location, language)
320
+ _print_weather(results)
321
+ except Exception as e:
322
+ raise e
323
+
275
324
  def main():
276
325
  """Main entry point for the CLI."""
277
326
  try:
@@ -340,21 +340,26 @@ class LitAgent:
340
340
  Returns:
341
341
  Dictionary with fingerprinting headers
342
342
  """
343
- browser = browser.lower() if browser else random.choice(list(BROWSERS.keys()))
344
- if browser not in BROWSERS:
345
- browser = 'chrome'
346
-
347
- version = random.randint(*BROWSERS[browser])
348
- user_agent = self.custom(browser=browser, version=str(version))
343
+ # Get a random user agent using the random() method
344
+ user_agent = self.random()
345
+
346
+ # If browser is specified, try to get a matching one
347
+ if browser:
348
+ browser = browser.lower()
349
+ if browser in BROWSERS:
350
+ user_agent = self.browser(browser)
349
351
 
350
352
  accept_language = random.choice(FINGERPRINTS["accept_language"])
351
353
  accept = random.choice(FINGERPRINTS["accept"])
352
354
  platform = random.choice(FINGERPRINTS["platforms"])
353
355
 
354
- # Generate sec-ch-ua
356
+ # Generate sec-ch-ua based on the user agent
355
357
  sec_ch_ua = ""
356
- if browser in FINGERPRINTS["sec_ch_ua"]:
357
- sec_ch_ua = FINGERPRINTS["sec_ch_ua"][browser].format(version, version)
358
+ for browser_name in FINGERPRINTS["sec_ch_ua"]:
359
+ if browser_name in user_agent.lower():
360
+ version = random.randint(*BROWSERS[browser_name])
361
+ sec_ch_ua = FINGERPRINTS["sec_ch_ua"][browser_name].format(version, version)
362
+ break
358
363
 
359
364
  fingerprint = {
360
365
  "user_agent": user_agent,
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "7.6"
1
+ __version__ = "7.7"
2
2
  __prog__ = "webscout"