webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (144) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +3 -3
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +3 -4
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +3 -3
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +3 -3
  45. webscout/Provider/Groq.py +5 -1
  46. webscout/Provider/Jadve.py +3 -3
  47. webscout/Provider/Marcus.py +191 -192
  48. webscout/Provider/Netwrck.py +3 -3
  49. webscout/Provider/PI.py +2 -2
  50. webscout/Provider/PizzaGPT.py +2 -3
  51. webscout/Provider/QwenLM.py +311 -0
  52. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  53. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  54. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  55. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  56. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  57. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  58. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  59. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  60. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  61. webscout/Provider/TTI/artbit/__init__.py +22 -22
  62. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  63. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  64. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  65. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  66. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  67. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  68. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  69. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  70. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  71. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  72. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  73. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  74. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  75. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  76. webscout/Provider/TTI/talkai/__init__.py +4 -4
  77. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  78. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  79. webscout/Provider/TTS/deepgram.py +182 -182
  80. webscout/Provider/TTS/elevenlabs.py +136 -136
  81. webscout/Provider/TTS/gesserit.py +150 -150
  82. webscout/Provider/TTS/murfai.py +138 -138
  83. webscout/Provider/TTS/parler.py +133 -134
  84. webscout/Provider/TTS/streamElements.py +360 -360
  85. webscout/Provider/TTS/utils.py +280 -280
  86. webscout/Provider/TTS/voicepod.py +116 -116
  87. webscout/Provider/TextPollinationsAI.py +2 -3
  88. webscout/Provider/WiseCat.py +193 -0
  89. webscout/Provider/__init__.py +144 -134
  90. webscout/Provider/cerebras.py +242 -227
  91. webscout/Provider/chatglm.py +204 -204
  92. webscout/Provider/dgaf.py +2 -3
  93. webscout/Provider/gaurish.py +2 -3
  94. webscout/Provider/geminiapi.py +208 -208
  95. webscout/Provider/granite.py +223 -0
  96. webscout/Provider/hermes.py +218 -218
  97. webscout/Provider/llama3mitril.py +179 -179
  98. webscout/Provider/llamatutor.py +3 -3
  99. webscout/Provider/llmchat.py +2 -3
  100. webscout/Provider/meta.py +794 -794
  101. webscout/Provider/multichat.py +331 -331
  102. webscout/Provider/typegpt.py +359 -359
  103. webscout/Provider/yep.py +2 -2
  104. webscout/__main__.py +5 -5
  105. webscout/cli.py +319 -319
  106. webscout/conversation.py +241 -242
  107. webscout/exceptions.py +328 -328
  108. webscout/litagent/__init__.py +28 -28
  109. webscout/litagent/agent.py +2 -3
  110. webscout/litprinter/__init__.py +0 -58
  111. webscout/scout/__init__.py +8 -8
  112. webscout/scout/core.py +884 -884
  113. webscout/scout/element.py +459 -459
  114. webscout/scout/parsers/__init__.py +69 -69
  115. webscout/scout/parsers/html5lib_parser.py +172 -172
  116. webscout/scout/parsers/html_parser.py +236 -236
  117. webscout/scout/parsers/lxml_parser.py +178 -178
  118. webscout/scout/utils.py +38 -38
  119. webscout/swiftcli/__init__.py +811 -811
  120. webscout/update_checker.py +2 -12
  121. webscout/version.py +1 -1
  122. webscout/webscout_search.py +5 -4
  123. webscout/zeroart/__init__.py +54 -54
  124. webscout/zeroart/base.py +60 -60
  125. webscout/zeroart/effects.py +99 -99
  126. webscout/zeroart/fonts.py +816 -816
  127. {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
  128. webscout-7.2.dist-info/RECORD +217 -0
  129. webstoken/__init__.py +30 -30
  130. webstoken/classifier.py +189 -189
  131. webstoken/keywords.py +216 -216
  132. webstoken/language.py +128 -128
  133. webstoken/ner.py +164 -164
  134. webstoken/normalizer.py +35 -35
  135. webstoken/processor.py +77 -77
  136. webstoken/sentiment.py +206 -206
  137. webstoken/stemmer.py +73 -73
  138. webstoken/tagger.py +60 -60
  139. webstoken/tokenizer.py +158 -158
  140. webscout-7.1.dist-info/RECORD +0 -198
  141. {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  142. {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  143. {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  144. {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,180 +1,180 @@
1
- import requests
2
- import json
3
- import re
4
- from typing import Any, Dict, Optional, Generator
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
- from webscout import LitAgent as Lit
11
-
12
- class Llama3Mitril(Provider):
13
- """
14
- A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
15
- """
16
-
17
- def __init__(
18
- self,
19
- is_conversation: bool = True,
20
- max_tokens: int = 2048,
21
- timeout: int = 30,
22
- intro: str = None,
23
- filepath: str = None,
24
- update_file: bool = True,
25
- proxies: dict = {},
26
- history_offset: int = 10250,
27
- act: str = None,
28
- system_prompt: str = "You are a helpful, respectful and honest assistant.",
29
- temperature: float = 0.8,
30
- ):
31
- """Initializes the Llama3Mitril API."""
32
- self.session = requests.Session()
33
- self.is_conversation = is_conversation
34
- self.max_tokens = max_tokens
35
- self.temperature = temperature
36
- self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
37
- self.timeout = timeout
38
- self.last_response = {}
39
- self.system_prompt = system_prompt
40
- self.headers = {
41
- "Content-Type": "application/json",
42
- "DNT": "1",
43
- "User-Agent": Lit().random(),
44
- }
45
- self.__available_optimizers = (
46
- method
47
- for method in dir(Optimizers)
48
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
49
- )
50
- Conversation.intro = (
51
- AwesomePrompts().get_act(
52
- act, raise_not_found=True, default=None, case_insensitive=True
53
- )
54
- if act
55
- else intro or Conversation.intro
56
- )
57
- self.conversation = Conversation(
58
- is_conversation, self.max_tokens, filepath, update_file
59
- )
60
- self.conversation.history_offset = history_offset
61
- self.session.proxies = proxies
62
-
63
- def _format_prompt(self, prompt: str) -> str:
64
- """Format the prompt for the Llama3 model"""
65
- return (
66
- f"<|begin_of_text|>"
67
- f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
68
- f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
69
- f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
70
- f"<|start_header_id|>assistant<|end_header_id|>"
71
- )
72
-
73
- def ask(
74
- self,
75
- prompt: str,
76
- stream: bool = True,
77
- raw: bool = False,
78
- optimizer: str = None,
79
- conversationally: bool = False,
80
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
81
- """Sends a prompt to the Llama3 Mitril API and returns the response."""
82
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
83
- if optimizer:
84
- if optimizer in self.__available_optimizers:
85
- conversation_prompt = getattr(Optimizers, optimizer)(
86
- conversation_prompt if conversationally else prompt
87
- )
88
- else:
89
- raise exceptions.FailedToGenerateResponseError(
90
- f"Optimizer is not one of {self.__available_optimizers}"
91
- )
92
-
93
- data = {
94
- "inputs": self._format_prompt(conversation_prompt),
95
- "parameters": {
96
- "max_new_tokens": self.max_tokens,
97
- "temperature": self.temperature,
98
- "return_full_text": False
99
- }
100
- }
101
-
102
- def for_stream():
103
- response = self.session.post(
104
- self.api_endpoint,
105
- headers=self.headers,
106
- json=data,
107
- stream=True,
108
- timeout=self.timeout
109
- )
110
- if not response.ok:
111
- raise exceptions.FailedToGenerateResponseError(
112
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
- )
114
-
115
- streaming_response = ""
116
- for line in response.iter_lines(decode_unicode=True):
117
- if line:
118
- try:
119
- chunk = json.loads(line.split('data: ')[1])
120
- if token_text := chunk.get('token', {}).get('text'):
121
- if '<|eot_id|>' not in token_text:
122
- streaming_response += token_text
123
- yield token_text if raw else {"text": token_text}
124
- except (json.JSONDecodeError, IndexError) as e:
125
- continue
126
-
127
- self.last_response.update({"text": streaming_response})
128
- self.conversation.update_chat_history(
129
- prompt, self.get_message(self.last_response)
130
- )
131
-
132
- def for_non_stream():
133
- full_response = ""
134
- for chunk in for_stream():
135
- full_response += chunk if raw else chunk['text']
136
- return {"text": full_response}
137
-
138
- return for_stream() if stream else for_non_stream()
139
-
140
- def chat(
141
- self,
142
- prompt: str,
143
- stream: bool = True,
144
- optimizer: str = None,
145
- conversationally: bool = False,
146
- ) -> str | Generator[str, None, None]:
147
- """Generates a response from the Llama3 Mitril API."""
148
-
149
- def for_stream():
150
- for response in self.ask(
151
- prompt, stream=True, optimizer=optimizer, conversationally=conversationally
152
- ):
153
- yield self.get_message(response)
154
-
155
- def for_non_stream():
156
- return self.get_message(
157
- self.ask(
158
- prompt, stream=False, optimizer=optimizer, conversationally=conversationally
159
- )
160
- )
161
-
162
- return for_stream() if stream else for_non_stream()
163
-
164
- def get_message(self, response: Dict[str, Any]) -> str:
165
- """Extracts the message from the API response."""
166
- assert isinstance(response, dict), "Response should be of dict data-type only"
167
- return response["text"]
168
-
169
-
170
- if __name__ == "__main__":
171
- from rich import print
172
-
173
- ai = Llama3Mitril(
174
- max_tokens=2048,
175
- temperature=0.8,
176
- timeout=30
177
- )
178
-
179
- for response in ai.chat("Hello", stream=True):
1
+ import requests
2
+ import json
3
+ import re
4
+ from typing import Any, Dict, Optional, Generator
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+ from webscout import LitAgent as Lit
11
+
12
+ class Llama3Mitril(Provider):
13
+ """
14
+ A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 2048,
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ system_prompt: str = "You are a helpful, respectful and honest assistant.",
29
+ temperature: float = 0.8,
30
+ ):
31
+ """Initializes the Llama3Mitril API."""
32
+ self.session = requests.Session()
33
+ self.is_conversation = is_conversation
34
+ self.max_tokens = max_tokens
35
+ self.temperature = temperature
36
+ self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
37
+ self.timeout = timeout
38
+ self.last_response = {}
39
+ self.system_prompt = system_prompt
40
+ self.headers = {
41
+ "Content-Type": "application/json",
42
+ "DNT": "1",
43
+ "User-Agent": Lit().random(),
44
+ }
45
+ self.__available_optimizers = (
46
+ method
47
+ for method in dir(Optimizers)
48
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
49
+ )
50
+ Conversation.intro = (
51
+ AwesomePrompts().get_act(
52
+ act, raise_not_found=True, default=None, case_insensitive=True
53
+ )
54
+ if act
55
+ else intro or Conversation.intro
56
+ )
57
+ self.conversation = Conversation(
58
+ is_conversation, self.max_tokens, filepath, update_file
59
+ )
60
+ self.conversation.history_offset = history_offset
61
+ self.session.proxies = proxies
62
+
63
+ def _format_prompt(self, prompt: str) -> str:
64
+ """Format the prompt for the Llama3 model"""
65
+ return (
66
+ f"<|begin_of_text|>"
67
+ f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
68
+ f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
69
+ f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
70
+ f"<|start_header_id|>assistant<|end_header_id|>"
71
+ )
72
+
73
+ def ask(
74
+ self,
75
+ prompt: str,
76
+ stream: bool = True,
77
+ raw: bool = False,
78
+ optimizer: str = None,
79
+ conversationally: bool = False,
80
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
81
+ """Sends a prompt to the Llama3 Mitril API and returns the response."""
82
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
83
+ if optimizer:
84
+ if optimizer in self.__available_optimizers:
85
+ conversation_prompt = getattr(Optimizers, optimizer)(
86
+ conversation_prompt if conversationally else prompt
87
+ )
88
+ else:
89
+ raise exceptions.FailedToGenerateResponseError(
90
+ f"Optimizer is not one of {self.__available_optimizers}"
91
+ )
92
+
93
+ data = {
94
+ "inputs": self._format_prompt(conversation_prompt),
95
+ "parameters": {
96
+ "max_new_tokens": self.max_tokens,
97
+ "temperature": self.temperature,
98
+ "return_full_text": False
99
+ }
100
+ }
101
+
102
+ def for_stream():
103
+ response = self.session.post(
104
+ self.api_endpoint,
105
+ headers=self.headers,
106
+ json=data,
107
+ stream=True,
108
+ timeout=self.timeout
109
+ )
110
+ if not response.ok:
111
+ raise exceptions.FailedToGenerateResponseError(
112
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
113
+ )
114
+
115
+ streaming_response = ""
116
+ for line in response.iter_lines(decode_unicode=True):
117
+ if line:
118
+ try:
119
+ chunk = json.loads(line.split('data: ')[1])
120
+ if token_text := chunk.get('token', {}).get('text'):
121
+ if '<|eot_id|>' not in token_text:
122
+ streaming_response += token_text
123
+ yield token_text if raw else {"text": token_text}
124
+ except (json.JSONDecodeError, IndexError) as e:
125
+ continue
126
+
127
+ self.last_response.update({"text": streaming_response})
128
+ self.conversation.update_chat_history(
129
+ prompt, self.get_message(self.last_response)
130
+ )
131
+
132
+ def for_non_stream():
133
+ full_response = ""
134
+ for chunk in for_stream():
135
+ full_response += chunk if raw else chunk['text']
136
+ return {"text": full_response}
137
+
138
+ return for_stream() if stream else for_non_stream()
139
+
140
+ def chat(
141
+ self,
142
+ prompt: str,
143
+ stream: bool = True,
144
+ optimizer: str = None,
145
+ conversationally: bool = False,
146
+ ) -> str | Generator[str, None, None]:
147
+ """Generates a response from the Llama3 Mitril API."""
148
+
149
+ def for_stream():
150
+ for response in self.ask(
151
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
152
+ ):
153
+ yield self.get_message(response)
154
+
155
+ def for_non_stream():
156
+ return self.get_message(
157
+ self.ask(
158
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
159
+ )
160
+ )
161
+
162
+ return for_stream() if stream else for_non_stream()
163
+
164
+ def get_message(self, response: Dict[str, Any]) -> str:
165
+ """Extracts the message from the API response."""
166
+ assert isinstance(response, dict), "Response should be of dict data-type only"
167
+ return response["text"]
168
+
169
+
170
+ if __name__ == "__main__":
171
+ from rich import print
172
+
173
+ ai = Llama3Mitril(
174
+ max_tokens=2048,
175
+ temperature=0.8,
176
+ timeout=30
177
+ )
178
+
179
+ for response in ai.chat("Hello", stream=True):
180
180
  print(response, end="", flush=True)
@@ -8,7 +8,7 @@ from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout import LitAgent as Lit
11
- from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
11
+ from webscout.Litlogger import Logger, LogFormat
12
12
 
13
13
  class LlamaTutor(Provider):
14
14
  """
@@ -32,10 +32,10 @@ class LlamaTutor(Provider):
32
32
  """
33
33
  Initializes the LlamaTutor API with given parameters and logging capabilities.
34
34
  """
35
- self.logger = LitLogger(
35
+ self.logger = Logger(
36
36
  name="LlamaTutor",
37
37
  format=LogFormat.MODERN_EMOJI,
38
- color_scheme=ColorScheme.CYBERPUNK
38
+
39
39
  ) if logging else None
40
40
 
41
41
  if self.logger:
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
- from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
11
+ from webscout.Litlogger import Logger, LogFormat
12
12
  from webscout import LitAgent as Lit
13
13
 
14
14
  class LLMChat(Provider):
@@ -43,10 +43,9 @@ class LLMChat(Provider):
43
43
  """
44
44
  Initializes the LLMChat API with given parameters and logging capabilities.
45
45
  """
46
- self.logger = LitLogger(
46
+ self.logger = Logger(
47
47
  name="LLMChat",
48
48
  format=LogFormat.MODERN_EMOJI,
49
- color_scheme=ColorScheme.CYBERPUNK
50
49
  ) if logging else None
51
50
 
52
51
  if self.logger: