webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (87) hide show
  1. inferno/lol.py +589 -0
  2. webscout/AIutel.py +226 -14
  3. webscout/Bard.py +579 -206
  4. webscout/DWEBS.py +78 -35
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AllenAI.py +163 -126
  8. webscout/Provider/ChatGPTClone.py +96 -84
  9. webscout/Provider/Deepinfra.py +95 -67
  10. webscout/Provider/ElectronHub.py +55 -0
  11. webscout/Provider/GPTWeb.py +96 -46
  12. webscout/Provider/Groq.py +194 -91
  13. webscout/Provider/HeckAI.py +89 -47
  14. webscout/Provider/HuggingFaceChat.py +113 -106
  15. webscout/Provider/Hunyuan.py +94 -83
  16. webscout/Provider/Jadve.py +107 -75
  17. webscout/Provider/LambdaChat.py +106 -64
  18. webscout/Provider/Llama3.py +94 -39
  19. webscout/Provider/MCPCore.py +318 -0
  20. webscout/Provider/Marcus.py +85 -36
  21. webscout/Provider/Netwrck.py +76 -43
  22. webscout/Provider/OPENAI/__init__.py +4 -1
  23. webscout/Provider/OPENAI/ai4chat.py +286 -0
  24. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  25. webscout/Provider/OPENAI/deepinfra.py +37 -0
  26. webscout/Provider/OPENAI/groq.py +354 -0
  27. webscout/Provider/OPENAI/heckai.py +6 -2
  28. webscout/Provider/OPENAI/mcpcore.py +376 -0
  29. webscout/Provider/OPENAI/multichat.py +368 -0
  30. webscout/Provider/OPENAI/netwrck.py +3 -1
  31. webscout/Provider/OpenGPT.py +48 -38
  32. webscout/Provider/PI.py +168 -92
  33. webscout/Provider/PizzaGPT.py +66 -36
  34. webscout/Provider/TeachAnything.py +85 -51
  35. webscout/Provider/TextPollinationsAI.py +109 -51
  36. webscout/Provider/TwoAI.py +109 -60
  37. webscout/Provider/Venice.py +93 -56
  38. webscout/Provider/VercelAI.py +2 -2
  39. webscout/Provider/WiseCat.py +65 -28
  40. webscout/Provider/Writecream.py +37 -11
  41. webscout/Provider/WritingMate.py +135 -63
  42. webscout/Provider/__init__.py +3 -21
  43. webscout/Provider/ai4chat.py +6 -7
  44. webscout/Provider/copilot.py +0 -3
  45. webscout/Provider/elmo.py +101 -58
  46. webscout/Provider/granite.py +91 -46
  47. webscout/Provider/hermes.py +87 -47
  48. webscout/Provider/koala.py +1 -1
  49. webscout/Provider/learnfastai.py +104 -50
  50. webscout/Provider/llama3mitril.py +86 -51
  51. webscout/Provider/llmchat.py +88 -46
  52. webscout/Provider/llmchatco.py +74 -49
  53. webscout/Provider/meta.py +41 -37
  54. webscout/Provider/multichat.py +54 -25
  55. webscout/Provider/scnet.py +93 -43
  56. webscout/Provider/searchchat.py +82 -75
  57. webscout/Provider/sonus.py +103 -51
  58. webscout/Provider/toolbaz.py +132 -77
  59. webscout/Provider/turboseek.py +92 -41
  60. webscout/Provider/tutorai.py +82 -64
  61. webscout/Provider/typefully.py +75 -33
  62. webscout/Provider/typegpt.py +96 -35
  63. webscout/Provider/uncovr.py +112 -62
  64. webscout/Provider/x0gpt.py +69 -26
  65. webscout/Provider/yep.py +79 -66
  66. webscout/conversation.py +35 -21
  67. webscout/exceptions.py +20 -0
  68. webscout/prompt_manager.py +56 -42
  69. webscout/version.py +1 -1
  70. webscout/webscout_search.py +65 -47
  71. webscout/webscout_search_async.py +81 -126
  72. webscout/yep_search.py +93 -43
  73. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
  74. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
  75. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
  76. webscout/Provider/C4ai.py +0 -432
  77. webscout/Provider/ChatGPTES.py +0 -237
  78. webscout/Provider/DeepSeek.py +0 -196
  79. webscout/Provider/Llama.py +0 -200
  80. webscout/Provider/Phind.py +0 -535
  81. webscout/Provider/WebSim.py +0 -228
  82. webscout/Provider/labyrinth.py +0 -340
  83. webscout/Provider/lepton.py +0 -194
  84. webscout/Provider/llamatutor.py +0 -192
  85. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
  86. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
  87. {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
@@ -1,535 +0,0 @@
1
- import requests
2
- import re
3
- import json
4
- import yaml
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider
9
-
10
- from webscout import exceptions
11
- from typing import Union, Any, AsyncGenerator, Dict
12
-
13
-
14
- #------------------------------------------------------phind-------------------------------------------------------------
15
- class PhindSearch:
16
- # Available models for Phind
17
- AVAILABLE_MODELS = [
18
- "Phind Model",
19
- "Claude 3.7 Sonnet",
20
- "Claude Opus",
21
- "GPT-4o",
22
- "o3-mini",
23
- "Phind-405B",
24
- "Phind-70B"
25
- ]
26
-
27
- def __init__(
28
- self,
29
- is_conversation: bool = True,
30
- max_tokens: int = 8000,
31
- timeout: int = 30,
32
- intro: str = None,
33
- filepath: str = None,
34
- update_file: bool = True,
35
- proxies: dict = {},
36
- history_offset: int = 10250,
37
- act: str = None,
38
- model: str = "Phind Model",
39
- quiet: bool = False,
40
- ):
41
- """Instantiates PHIND
42
-
43
- Args:
44
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
45
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
- timeout (int, optional): Http request timeout. Defaults to 30.
47
- intro (str, optional): Conversation introductory prompt. Defaults to None.
48
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
- proxies (dict, optional): Http request proxies. Defaults to {}.
51
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
- model (str, optional): Model name. Defaults to "Phind Model".
54
- quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
55
- """
56
- if model not in self.AVAILABLE_MODELS:
57
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
-
59
- self.session = requests.Session()
60
- self.max_tokens_to_sample = max_tokens
61
- self.is_conversation = is_conversation
62
- self.chat_endpoint = "https://https.extension.phind.com/agent/"
63
- self.stream_chunk_size = 64
64
- self.timeout = timeout
65
- self.last_response = {}
66
- self.model = model
67
- self.quiet = quiet
68
-
69
- self.headers = {
70
- "Content-Type": "application/json",
71
- "User-Agent": "",
72
- "Accept": "*/*",
73
- "Accept-Encoding": "Identity",
74
- }
75
-
76
- self.__available_optimizers = (
77
- method
78
- for method in dir(Optimizers)
79
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
80
- )
81
- self.session.headers.update(self.headers)
82
- Conversation.intro = (
83
- AwesomePrompts().get_act(
84
- act, raise_not_found=True, default=None, case_insensitive=True
85
- )
86
- if act
87
- else intro or Conversation.intro
88
- )
89
- self.conversation = Conversation(
90
- is_conversation, self.max_tokens_to_sample, filepath, update_file
91
- )
92
- self.conversation.history_offset = history_offset
93
- self.session.proxies = proxies
94
-
95
- def ask(
96
- self,
97
- prompt: str,
98
- stream: bool = False,
99
- raw: bool = False,
100
- optimizer: str = None,
101
- conversationally: bool = False,
102
- ) -> dict:
103
- """Chat with AI
104
-
105
- Args:
106
- prompt (str): Prompt to be send.
107
- stream (bool, optional): Flag for streaming response. Defaults to False.
108
- raw (bool, optional): Stream back raw response as received. Defaults to False.
109
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
110
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
111
- Returns:
112
- dict : {}
113
- ```json
114
- {
115
- "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
116
- "object": "chat.completion.chunk",
117
- "created": 1706775384,
118
- "model": "trt-llm-phind-model-serving",
119
- "choices": [
120
- {
121
- "index": 0,
122
- "delta": {
123
- "content": "Hello! How can I assist you with your programming today?"
124
- },
125
- "finish_reason": null
126
- }
127
- ]
128
- }
129
- ```
130
- """
131
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
132
- if optimizer:
133
- if optimizer in self.__available_optimizers:
134
- conversation_prompt = getattr(Optimizers, optimizer)(
135
- conversation_prompt if conversationally else prompt
136
- )
137
- else:
138
- raise Exception(
139
- f"Optimizer is not one of {self.__available_optimizers}"
140
- )
141
-
142
- self.session.headers.update(self.headers)
143
- payload = {
144
- "additional_extension_context": "",
145
- "allow_magic_buttons": True,
146
- "is_vscode_extension": True,
147
- "message_history": [
148
- {"content": conversation_prompt, "metadata": {}, "role": "user"}
149
- ],
150
- "requested_model": self.model,
151
- "user_input": prompt,
152
- }
153
-
154
- def for_stream():
155
- response = self.session.post(
156
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
157
- )
158
- if (
159
- not response.ok
160
- or not response.headers.get("Content-Type")
161
- == "text/event-stream; charset=utf-8"
162
- ):
163
- raise exceptions.FailedToGenerateResponseError(
164
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
165
- )
166
- streaming_text = ""
167
- for value in response.iter_lines(
168
- decode_unicode=True,
169
- chunk_size=self.stream_chunk_size,
170
- ):
171
- try:
172
- modified_value = re.sub("data:", "", value)
173
- json_modified_value = json.loads(modified_value)
174
- retrieved_text = self.get_message(json_modified_value)
175
- if not retrieved_text:
176
- continue
177
- streaming_text += retrieved_text
178
- json_modified_value["choices"][0]["delta"][
179
- "content"
180
- ] = streaming_text
181
- self.last_response.update(json_modified_value)
182
- yield value if raw else json_modified_value
183
- except json.decoder.JSONDecodeError:
184
- pass
185
- self.conversation.update_chat_history(
186
- prompt, self.get_message(self.last_response)
187
- )
188
-
189
- def for_non_stream():
190
- for _ in for_stream():
191
- pass
192
- return self.last_response
193
-
194
- return for_stream() if stream else for_non_stream()
195
-
196
- def chat(
197
- self,
198
- prompt: str,
199
- stream: bool = False,
200
- optimizer: str = None,
201
- conversationally: bool = False,
202
- ) -> str:
203
- """Generate response `str`
204
- Args:
205
- prompt (str): Prompt to be send.
206
- stream (bool, optional): Flag for streaming response. Defaults to False.
207
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
208
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
209
- Returns:
210
- str: Response generated
211
- """
212
-
213
- def for_stream():
214
- for response in self.ask(
215
- prompt, True, optimizer=optimizer, conversationally=conversationally
216
- ):
217
- yield self.get_message(response)
218
-
219
- def for_non_stream():
220
- return self.get_message(
221
- self.ask(
222
- prompt,
223
- False,
224
- optimizer=optimizer,
225
- conversationally=conversationally,
226
- )
227
- )
228
-
229
- return for_stream() if stream else for_non_stream()
230
-
231
- def get_message(self, response: dict) -> str:
232
- """Retrieves message only from response
233
-
234
- Args:
235
- response (dict): Response generated by `self.ask`
236
-
237
- Returns:
238
- str: Message extracted
239
- """
240
- assert isinstance(response, dict), "Response should be of dict data-type only"
241
- if response.get("type", "") == "metadata":
242
- return
243
-
244
- delta: dict = response["choices"][0]["delta"]
245
-
246
- if not delta:
247
- return ""
248
-
249
- elif delta.get("function_call"):
250
- if self.quiet:
251
- return ""
252
-
253
- function_call: dict = delta["function_call"]
254
- if function_call.get("name"):
255
- return function_call["name"]
256
- elif function_call.get("arguments"):
257
- return function_call.get("arguments")
258
-
259
- elif delta.get("metadata"):
260
- if self.quiet:
261
- return ""
262
- return yaml.dump(delta["metadata"])
263
-
264
- else:
265
- return (
266
- response["choices"][0]["delta"].get("content")
267
- if response["choices"][0].get("finish_reason") is None
268
- else ""
269
- )
270
-
271
- class Phindv2(Provider):
272
- # Available models for Phindv2
273
- AVAILABLE_MODELS = [
274
- "Claude 3.7 Sonnet",
275
- "Claude Opus",
276
- "GPT-4o",
277
- "o3-mini",
278
- "Phind-405B",
279
- "Phind-70B"
280
- ]
281
-
282
- def __init__(
283
- self,
284
- is_conversation: bool = True,
285
- max_tokens: int = 8000,
286
- timeout: int = 30,
287
- intro: str = None,
288
- filepath: str = None,
289
- update_file: bool = True,
290
- proxies: dict = {},
291
- history_offset: int = 10250,
292
- act: str = None,
293
- model: str = "Claude 3.7 Sonnet",
294
- quiet: bool = False,
295
- system_prompt: str = "Be Helpful and Friendly",
296
- ):
297
- """Instantiates Phindv2
298
-
299
- Args:
300
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
301
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
302
- timeout (int, optional): Http request timeout. Defaults to 30.
303
- intro (str, optional): Conversation introductory prompt. Defaults to None.
304
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
305
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
306
- proxies (dict, optional): Http request proxies. Defaults to {}.
307
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
308
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
309
- model (str, optional): Model name. Defaults to "Phind Model".
310
- quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
311
- system_prompt (str, optional): System prompt for Phindv2. Defaults to "Be Helpful and Friendly".
312
- """
313
- if model not in self.AVAILABLE_MODELS:
314
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
315
-
316
- self.session = requests.Session()
317
- self.max_tokens_to_sample = max_tokens
318
- self.is_conversation = is_conversation
319
- self.chat_endpoint = "https://https.extension.phind.com/agent/"
320
- self.stream_chunk_size = 64
321
- self.timeout = timeout
322
- self.last_response = {}
323
- self.model = model
324
- self.quiet = quiet
325
- self.system_prompt = system_prompt
326
-
327
- self.headers = {
328
- "Content-Type": "application/json",
329
- "User-Agent": "",
330
- "Accept": "*/*",
331
- "Accept-Encoding": "Identity",
332
- }
333
-
334
- self.__available_optimizers = (
335
- method
336
- for method in dir(Optimizers)
337
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
338
- )
339
- self.session.headers.update(self.headers)
340
- Conversation.intro = (
341
- AwesomePrompts().get_act(
342
- act, raise_not_found=True, default=None, case_insensitive=True
343
- )
344
- if act
345
- else intro or Conversation.intro
346
- )
347
- self.conversation = Conversation(
348
- is_conversation, self.max_tokens_to_sample, filepath, update_file
349
- )
350
- self.conversation.history_offset = history_offset
351
- self.session.proxies = proxies
352
-
353
- def ask(
354
- self,
355
- prompt: str,
356
- stream: bool = False,
357
- raw: bool = False,
358
- optimizer: str = None,
359
- conversationally: bool = False,
360
- ) -> dict:
361
- """Chat with AI
362
-
363
- Args:
364
- prompt (str): Prompt to be send.
365
- stream (bool, optional): Flag for streaming response. Defaults to False.
366
- raw (bool, optional): Stream back raw response as received. Defaults to False.
367
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
368
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
369
- Returns:
370
- dict : {}
371
- ```json
372
- {
373
- "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
374
- "object": "chat.completion.chunk",
375
- "created": 1706775384,
376
- "model": "trt-llm-phind-model-serving",
377
- "choices": [
378
- {
379
- "index": 0,
380
- "delta": {
381
- "content": "Hello! How can I assist you with your programming today?"
382
- },
383
- "finish_reason": null
384
- }
385
- ]
386
- }
387
- ```
388
- """
389
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
390
- if optimizer:
391
- if optimizer in self.__available_optimizers:
392
- conversation_prompt = getattr(Optimizers, optimizer)(
393
- conversation_prompt if conversationally else prompt
394
- )
395
- else:
396
- raise Exception(
397
- f"Optimizer is not one of {self.__available_optimizers}"
398
- )
399
-
400
- self.session.headers.update(self.headers)
401
- payload = {
402
- "additional_extension_context": "",
403
- "allow_magic_buttons": True,
404
- "is_vscode_extension": True,
405
- "message_history": [
406
- {"content": self.system_prompt, "metadata": {}, "role": "system"},
407
- {"content": conversation_prompt, "metadata": {}, "role": "user"}
408
- ],
409
- "requested_model": self.model,
410
- "user_input": prompt,
411
- }
412
-
413
- def for_stream():
414
- response = self.session.post(
415
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
416
- )
417
- if (
418
- not response.ok
419
- or not response.headers.get("Content-Type")
420
- == "text/event-stream; charset=utf-8"
421
- ):
422
- raise exceptions.FailedToGenerateResponseError(
423
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
424
- )
425
- streaming_text = ""
426
- for value in response.iter_lines(
427
- decode_unicode=True,
428
- chunk_size=self.stream_chunk_size,
429
- ):
430
- try:
431
- modified_value = re.sub("data:", "", value)
432
- json_modified_value = json.loads(modified_value)
433
- retrieved_text = self.get_message(json_modified_value)
434
- if not retrieved_text:
435
- continue
436
- streaming_text += retrieved_text
437
- json_modified_value["choices"][0]["delta"][
438
- "content"
439
- ] = streaming_text
440
- self.last_response.update(json_modified_value)
441
- yield value if raw else json_modified_value
442
- except json.decoder.JSONDecodeError:
443
- pass
444
- self.conversation.update_chat_history(
445
- prompt, self.get_message(self.last_response)
446
- )
447
-
448
- def for_non_stream():
449
- for _ in for_stream():
450
- pass
451
- return self.last_response
452
-
453
- return for_stream() if stream else for_non_stream()
454
-
455
- def chat(
456
- self,
457
- prompt: str,
458
- stream: bool = False,
459
- optimizer: str = None,
460
- conversationally: bool = False,
461
- ) -> str:
462
- """Generate response `str`
463
- Args:
464
- prompt (str): Prompt to be send.
465
- stream (bool, optional): Flag for streaming response. Defaults to False.
466
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
467
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
468
- Returns:
469
- str: Response generated
470
- """
471
-
472
- def for_stream():
473
- for response in self.ask(
474
- prompt, True, optimizer=optimizer, conversationally=conversationally
475
- ):
476
- yield self.get_message(response)
477
-
478
- def for_non_stream():
479
- return self.get_message(
480
- self.ask(
481
- prompt,
482
- False,
483
- optimizer=optimizer,
484
- conversationally=conversationally,
485
- )
486
- )
487
-
488
- return for_stream() if stream else for_non_stream()
489
-
490
- def get_message(self, response: dict) -> str:
491
- """Retrieves message only from response
492
-
493
- Args:
494
- response (dict): Response generated by `self.ask`
495
-
496
- Returns:
497
- str: Message extracted
498
- """
499
- assert isinstance(response, dict), "Response should be of dict data-type only"
500
- if response.get("type", "") == "metadata":
501
- return
502
-
503
- delta: dict = response["choices"][0]["delta"]
504
-
505
- if not delta:
506
- return ""
507
-
508
- elif delta.get("function_call"):
509
- if self.quiet:
510
- return ""
511
-
512
- function_call: dict = delta["function_call"]
513
- if function_call.get("name"):
514
- return function_call["name"]
515
- elif function_call.get("arguments"):
516
- return function_call.get("arguments")
517
-
518
- elif delta.get("metadata"):
519
- if self.quiet:
520
- return ""
521
- return yaml.dump(delta["metadata"])
522
-
523
- else:
524
- return (
525
- response["choices"][0]["delta"].get("content")
526
- if response["choices"][0].get("finish_reason") is None
527
- else ""
528
- )
529
-
530
- if __name__ == "__main__":
531
- from rich import print
532
-
533
- ai = Phindv2()
534
- print(ai.chat("Who are u"))
535
- # Returns the chat response from the Phindv2 API.