webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,347 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage,
13
+ get_system_prompt, get_last_user_message, format_prompt # Import format_prompt
14
+ )
15
+
16
+ # Import LitAgent for browser fingerprinting
17
+ from webscout.litagent import LitAgent
18
+
19
+ class Completions(BaseCompletions):
20
+ def __init__(self, client: 'C4AI'):
21
+ self._client = client
22
+
23
+ def create(
24
+ self,
25
+ *,
26
+ model: str,
27
+ messages: List[Dict[str, str]],
28
+ max_tokens: Optional[int] = 2000,
29
+ stream: bool = False,
30
+ temperature: Optional[float] = None,
31
+ top_p: Optional[float] = None,
32
+ **kwargs: Any
33
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
34
+ """
35
+ Creates a model response for the given chat conversation.
36
+ Mimics openai.chat.completions.create
37
+ """
38
+ # Extract system prompt using utility function
39
+ system_prompt = get_system_prompt(messages)
40
+ if not system_prompt:
41
+ system_prompt = "You are a helpful assistant."
42
+
43
+ # Format the conversation history using format_prompt
44
+ # Note: C4AI API might expect only the *last* user message here.
45
+ # Sending the full history might cause issues.
46
+ # We exclude the system prompt from format_prompt as it's sent separately.
47
+ # We also set do_continue=True as C4AI adds its own assistant prompt implicitly.
48
+ conversation_prompt = format_prompt(messages, include_system=False, do_continue=True)
49
+
50
+ if not conversation_prompt:
51
+ # Fallback to last user message if formatted prompt is empty
52
+ last_user_message = get_last_user_message(messages)
53
+ if not last_user_message:
54
+ raise ValueError("No user message found or formatted prompt is empty.")
55
+ conversation_prompt = last_user_message
56
+
57
+ # Create or get conversation ID
58
+ if model not in self._client._conversation_data:
59
+ conversation_id = self._client.create_conversation(model, system_prompt)
60
+ if not conversation_id:
61
+ raise IOError(f"Failed to create conversation with model {model}")
62
+ else:
63
+ conversation_id = self._client._conversation_data[model]["conversationId"]
64
+ self._client._conversation_data[model]["messageId"] = self._client.fetch_message_id(conversation_id)
65
+
66
+ request_id = f"chatcmpl-{uuid.uuid4()}"
67
+ created_time = int(time.time())
68
+
69
+ # Pass the formatted conversation prompt
70
+ if stream:
71
+ return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
72
+ else:
73
+ return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
74
+
75
+ def _create_stream(
76
+ self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
77
+ ) -> Generator[ChatCompletionChunk, None, None]:
78
+ try:
79
+ message_id = self._client._conversation_data[model]["messageId"]
80
+ url = f"{self._client.url}/api/chat/message"
81
+ payload = {
82
+ "conversationId": conversation_id,
83
+ "messageId": message_id,
84
+ "model": model,
85
+ "prompt": prompt, # Use the formatted conversation history as prompt
86
+ "preprompt": system_prompt,
87
+ "temperature": 0.7,
88
+ "top_p": 1,
89
+ "top_k": 50,
90
+ "max_tokens": self._client.max_tokens_to_sample,
91
+ "stop": [],
92
+ "stream": True
93
+ }
94
+
95
+ response = self._client.session.post(
96
+ url,
97
+ headers=self._client.headers,
98
+ json=payload,
99
+ stream=True,
100
+ timeout=self._client.timeout
101
+ )
102
+ response.raise_for_status()
103
+
104
+ full_text = ""
105
+ for line in response.iter_lines():
106
+ if line:
107
+ line = line.decode('utf-8')
108
+ if line.startswith('data: '):
109
+ data = line[6:]
110
+ if data == "[DONE]":
111
+ break
112
+
113
+ try:
114
+ json_data = json.loads(data)
115
+ delta_text = json_data.get('text', '')
116
+ new_content = delta_text[len(full_text):]
117
+ full_text = delta_text
118
+ delta = ChoiceDelta(content=new_content)
119
+ choice = Choice(index=0, delta=delta, finish_reason=None)
120
+ chunk = ChatCompletionChunk(
121
+ id=request_id,
122
+ choices=[choice],
123
+ created=created_time,
124
+ model=model
125
+ )
126
+ yield chunk
127
+ except json.JSONDecodeError:
128
+ continue
129
+
130
+ delta = ChoiceDelta(content=None)
131
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
132
+ chunk = ChatCompletionChunk(
133
+ id=request_id,
134
+ choices=[choice],
135
+ created=created_time,
136
+ model=model
137
+ )
138
+ yield chunk
139
+
140
+ except Exception as e:
141
+ print(f"Error during C4AI stream request: {e}")
142
+ raise IOError(f"C4AI request failed: {e}") from e
143
+
144
+ def _create_non_stream(
145
+ self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
146
+ ) -> ChatCompletion:
147
+ try:
148
+ message_id = self._client._conversation_data[model]["messageId"]
149
+ url = f"{self._client.url}/api/chat/message"
150
+ payload = {
151
+ "conversationId": conversation_id,
152
+ "messageId": message_id,
153
+ "model": model,
154
+ "prompt": prompt, # Use the formatted conversation history as prompt
155
+ "preprompt": system_prompt,
156
+ "temperature": 0.7,
157
+ "top_p": 1,
158
+ "top_k": 50,
159
+ "max_tokens": self._client.max_tokens_to_sample,
160
+ "stop": [],
161
+ "stream": False
162
+ }
163
+
164
+ response = self._client.session.post(
165
+ url,
166
+ headers=self._client.headers,
167
+ json=payload,
168
+ timeout=self._client.timeout
169
+ )
170
+ response.raise_for_status()
171
+
172
+ data = response.json()
173
+ response_text = data.get('text', '')
174
+ message = ChatCompletionMessage(role="assistant", content=response_text)
175
+ choice = Choice(index=0, message=message, finish_reason="stop")
176
+ # Estimate tokens based on the formatted prompt
177
+ prompt_tokens = len(prompt) // 4
178
+ completion_tokens = len(response_text) // 4
179
+ usage = CompletionUsage(
180
+ prompt_tokens=prompt_tokens,
181
+ completion_tokens=completion_tokens,
182
+ total_tokens=prompt_tokens + completion_tokens
183
+ )
184
+ completion = ChatCompletion(
185
+ id=request_id,
186
+ choices=[choice],
187
+ created=created_time,
188
+ model=model,
189
+ usage=usage
190
+ )
191
+ return completion
192
+
193
+ except Exception as e:
194
+ print(f"Error during C4AI non-stream request: {e}")
195
+ raise IOError(f"C4AI request failed: {e}") from e
196
+
197
+ class Chat(BaseChat):
198
+ def __init__(self, client: 'C4AI'):
199
+ self.completions = Completions(client)
200
+
201
+ class C4AI(OpenAICompatibleProvider):
202
+ """
203
+ OpenAI-compatible client for C4AI API.
204
+
205
+ Usage:
206
+ client = C4AI()
207
+ response = client.chat.completions.create(
208
+ model="command-a-03-2025",
209
+ messages=[{"role": "user", "content": "Hello!"}]
210
+ )
211
+ """
212
+
213
+ AVAILABLE_MODELS = [
214
+ 'command-a-03-2025',
215
+ 'command-r-plus-08-2024',
216
+ 'command-r-08-2024',
217
+ 'command-r-plus',
218
+ 'command-r',
219
+ 'command-r7b-12-2024',
220
+ 'command-r7b-arabic-02-2025'
221
+ ]
222
+
223
+ def __init__(
224
+ self,
225
+ timeout: Optional[int] = None,
226
+ browser: str = "chrome"
227
+ ):
228
+ """
229
+ Initialize the C4AI client.
230
+
231
+ Args:
232
+ timeout: Request timeout in seconds.
233
+ browser: Browser name for LitAgent to generate User-Agent.
234
+ """
235
+ self.timeout = timeout
236
+ self.url = "https://cohereforai-c4ai-command.hf.space"
237
+ self.session = requests.Session()
238
+ self.max_tokens_to_sample = 2000
239
+
240
+ agent = LitAgent()
241
+ fingerprint = agent.generate_fingerprint(browser)
242
+
243
+ self.headers = {
244
+ "Content-Type": "application/json",
245
+ "User-Agent": fingerprint["user_agent"],
246
+ "Accept": "*/*",
247
+ "Accept-Encoding": "gzip, deflate, br, zstd",
248
+ "Accept-Language": fingerprint["accept_language"],
249
+ "Origin": "https://cohereforai-c4ai-command.hf.space",
250
+ "Referer": "https://cohereforai-c4ai-command.hf.space/",
251
+ "Sec-Ch-Ua": fingerprint["sec_ch_ua"] or "\"Chromium\";v=\"120\"",
252
+ "Sec-Ch-Ua-Mobile": "?0",
253
+ "Sec-Ch-Ua-Platform": f"\"{fingerprint['platform']}\"",
254
+ "Sec-Fetch-Dest": "empty",
255
+ "Sec-Fetch-Mode": "cors",
256
+ "Sec-Fetch-Site": "same-origin",
257
+ "DNT": "1",
258
+ "Priority": "u=1, i"
259
+ }
260
+
261
+ self._conversation_data = {}
262
+ self.chat = Chat(self)
263
+ self.update_available_models()
264
+
265
+ def update_available_models(self):
266
+ """Update the list of available models from the server."""
267
+ try:
268
+ response = requests.get("https://cohereforai-c4ai-command.hf.space/")
269
+ text = response.text
270
+ models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
271
+
272
+ if not models_match:
273
+ return
274
+
275
+ models_text = models_match.group(1)
276
+ models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
277
+ models_text = models_text.replace('void 0', 'null')
278
+
279
+ def add_quotation_mark(match):
280
+ return f'{match.group(1)}"{match.group(2)}":'
281
+
282
+ models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
283
+
284
+ models_data = json.loads(models_text)
285
+ self.AVAILABLE_MODELS = [model["id"] for model in models_data]
286
+ except Exception:
287
+ pass
288
+
289
+ def create_conversation(self, model: str, system_prompt: str):
290
+ """Create a new conversation with the specified model."""
291
+ url = f"{self.url}/api/conversation"
292
+ payload = {
293
+ "model": model,
294
+ "preprompt": system_prompt,
295
+ }
296
+
297
+ try:
298
+ response = self.session.post(
299
+ url,
300
+ headers=self.headers,
301
+ json=payload,
302
+ timeout=self.timeout
303
+ )
304
+ response.raise_for_status()
305
+
306
+ data = response.json()
307
+ conversation_id = data.get("conversationId")
308
+
309
+ if conversation_id:
310
+ self._conversation_data[model] = {
311
+ "conversationId": conversation_id,
312
+ "messageId": self.fetch_message_id(conversation_id)
313
+ }
314
+ return conversation_id
315
+
316
+ return None
317
+
318
+ except Exception as e:
319
+ print(f"Error creating conversation: {e}")
320
+ return None
321
+
322
+ def fetch_message_id(self, conversation_id: str):
323
+ """Fetch the latest message ID for a conversation."""
324
+ url = f"{self.url}/api/conversation/{conversation_id}"
325
+
326
+ try:
327
+ response = self.session.get(
328
+ url,
329
+ headers=self.headers,
330
+ timeout=self.timeout
331
+ )
332
+ response.raise_for_status()
333
+
334
+ json_data = response.json()
335
+
336
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
337
+ return str(uuid.uuid4())
338
+
339
+ data = json_data["nodes"][1]["data"]
340
+ keys = data[data[0]["messages"]]
341
+ message_keys = data[keys[-1]]
342
+ message_id = data[message_keys["id"]]
343
+
344
+ return message_id
345
+
346
+ except Exception:
347
+ return str(uuid.uuid4())