webscout 6.3__py3-none-any.whl → 6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (131) hide show
  1. webscout/AIauto.py +191 -176
  2. webscout/AIbase.py +0 -197
  3. webscout/AIutel.py +441 -1130
  4. webscout/DWEBS.py +189 -35
  5. webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
  6. webscout/Extra/YTToolkit/__init__.py +3 -0
  7. webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +479 -551
  8. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  10. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  11. webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
  12. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  13. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  14. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  15. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  16. webscout/Extra/YTToolkit/ytapi/query.py +37 -0
  17. webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
  18. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  19. webscout/Extra/YTToolkit/ytapi/video.py +102 -0
  20. webscout/Extra/__init__.py +3 -1
  21. webscout/Extra/autocoder/__init__.py +9 -0
  22. webscout/Extra/autocoder/autocoder_utiles.py +121 -0
  23. webscout/Extra/autocoder/rawdog.py +680 -0
  24. webscout/Extra/autollama.py +246 -195
  25. webscout/Extra/gguf.py +81 -56
  26. webscout/Extra/markdownlite/__init__.py +862 -0
  27. webscout/Extra/weather_ascii.py +2 -2
  28. webscout/LLM.py +206 -43
  29. webscout/Litlogger/__init__.py +681 -0
  30. webscout/Provider/DARKAI.py +1 -1
  31. webscout/Provider/EDITEE.py +1 -1
  32. webscout/Provider/NinjaChat.py +1 -1
  33. webscout/Provider/PI.py +120 -35
  34. webscout/Provider/Perplexity.py +590 -598
  35. webscout/Provider/Reka.py +0 -1
  36. webscout/Provider/RoboCoders.py +206 -0
  37. webscout/Provider/TTI/AiForce/__init__.py +22 -0
  38. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -0
  39. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -0
  40. webscout/Provider/TTI/Nexra/__init__.py +22 -0
  41. webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
  42. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
  43. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
  44. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -0
  45. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -0
  46. webscout/Provider/TTI/__init__.py +2 -4
  47. webscout/Provider/TTI/artbit/__init__.py +22 -0
  48. webscout/Provider/TTI/artbit/async_artbit.py +184 -0
  49. webscout/Provider/TTI/artbit/sync_artbit.py +176 -0
  50. webscout/Provider/TTI/blackbox/__init__.py +4 -0
  51. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -0
  52. webscout/Provider/TTI/{blackboximage.py → blackbox/sync_blackbox.py} +199 -153
  53. webscout/Provider/TTI/deepinfra/__init__.py +4 -0
  54. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -0
  55. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -0
  56. webscout/Provider/TTI/huggingface/__init__.py +22 -0
  57. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
  58. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
  59. webscout/Provider/TTI/imgninza/__init__.py +4 -0
  60. webscout/Provider/TTI/imgninza/async_ninza.py +214 -0
  61. webscout/Provider/TTI/{imgninza.py → imgninza/sync_ninza.py} +209 -136
  62. webscout/Provider/TTI/talkai/__init__.py +4 -0
  63. webscout/Provider/TTI/talkai/async_talkai.py +229 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
  65. webscout/Provider/TTS/__init__.py +5 -1
  66. webscout/Provider/TTS/deepgram.py +183 -0
  67. webscout/Provider/TTS/elevenlabs.py +137 -0
  68. webscout/Provider/TTS/gesserit.py +151 -0
  69. webscout/Provider/TTS/murfai.py +139 -0
  70. webscout/Provider/TTS/parler.py +134 -107
  71. webscout/Provider/TTS/streamElements.py +360 -275
  72. webscout/Provider/TTS/utils.py +280 -0
  73. webscout/Provider/TTS/voicepod.py +116 -116
  74. webscout/Provider/__init__.py +8 -1
  75. webscout/Provider/askmyai.py +2 -2
  76. webscout/Provider/cerebras.py +227 -219
  77. webscout/Provider/llama3mitril.py +0 -1
  78. webscout/Provider/meta.py +794 -779
  79. webscout/Provider/mhystical.py +176 -0
  80. webscout/Provider/perplexitylabs.py +265 -0
  81. webscout/Provider/twitterclone.py +251 -245
  82. webscout/Provider/typegpt.py +358 -0
  83. webscout/__init__.py +9 -8
  84. webscout/__main__.py +5 -5
  85. webscout/cli.py +252 -280
  86. webscout/conversation.py +227 -0
  87. webscout/exceptions.py +161 -29
  88. webscout/litagent/__init__.py +172 -0
  89. webscout/litprinter/__init__.py +832 -0
  90. webscout/optimizers.py +270 -0
  91. webscout/prompt_manager.py +279 -0
  92. webscout/scout/__init__.py +11 -0
  93. webscout/scout/core.py +884 -0
  94. webscout/scout/element.py +459 -0
  95. webscout/scout/parsers/__init__.py +69 -0
  96. webscout/scout/parsers/html5lib_parser.py +172 -0
  97. webscout/scout/parsers/html_parser.py +236 -0
  98. webscout/scout/parsers/lxml_parser.py +178 -0
  99. webscout/scout/utils.py +38 -0
  100. webscout/swiftcli/__init__.py +810 -0
  101. webscout/update_checker.py +125 -0
  102. webscout/version.py +1 -1
  103. webscout/zeroart/__init__.py +55 -0
  104. webscout/zeroart/base.py +61 -0
  105. webscout/zeroart/effects.py +99 -0
  106. webscout/zeroart/fonts.py +816 -0
  107. webscout/zerodir/__init__.py +225 -0
  108. {webscout-6.3.dist-info → webscout-6.5.dist-info}/METADATA +37 -112
  109. webscout-6.5.dist-info/RECORD +179 -0
  110. webscout/Agents/Onlinesearcher.py +0 -182
  111. webscout/Agents/__init__.py +0 -2
  112. webscout/Agents/functioncall.py +0 -248
  113. webscout/Bing_search.py +0 -154
  114. webscout/Provider/TTI/AIuncensoredimage.py +0 -103
  115. webscout/Provider/TTI/Nexra.py +0 -120
  116. webscout/Provider/TTI/PollinationsAI.py +0 -138
  117. webscout/Provider/TTI/WebSimAI.py +0 -142
  118. webscout/Provider/TTI/aiforce.py +0 -160
  119. webscout/Provider/TTI/artbit.py +0 -141
  120. webscout/Provider/TTI/deepinfra.py +0 -148
  121. webscout/Provider/TTI/huggingface.py +0 -155
  122. webscout/Provider/TTI/talkai.py +0 -116
  123. webscout/g4f.py +0 -666
  124. webscout/models.py +0 -23
  125. webscout/requestsHTMLfix.py +0 -775
  126. webscout/webai.py +0 -2590
  127. webscout-6.3.dist-info/RECORD +0 -124
  128. {webscout-6.3.dist-info → webscout-6.5.dist-info}/LICENSE.md +0 -0
  129. {webscout-6.3.dist-info → webscout-6.5.dist-info}/WHEEL +0 -0
  130. {webscout-6.3.dist-info → webscout-6.5.dist-info}/entry_points.txt +0 -0
  131. {webscout-6.3.dist-info → webscout-6.5.dist-info}/top_level.txt +0 -0
webscout/g4f.py DELETED
@@ -1,666 +0,0 @@
1
- import g4f
2
- from webscout.AIutel import Optimizers
3
- from webscout.AIutel import Conversation
4
- from webscout.AIutel import AwesomePrompts
5
- from webscout.AIbase import Provider, AsyncProvider
6
- from webscout.AIutel import available_providers
7
- from typing import Any, AsyncGenerator
8
-
9
- g4f.debug.version_check = False
10
-
11
- working_providers = available_providers
12
-
13
- completion_allowed_models = [
14
- "code-davinci-002",
15
- "text-ada-001",
16
- "text-babbage-001",
17
- "text-curie-001",
18
- "text-davinci-002",
19
- "text-davinci-003",
20
- ]
21
-
22
- default_models = {
23
- "completion": "text-davinci-003",
24
- "chat_completion": "gpt-3.5-turbo",
25
- }
26
-
27
- default_provider = "Koala"
28
-
29
- class AsyncGPT4FREE(AsyncProvider):
30
- def __init__(
31
- self,
32
- provider: str = default_provider,
33
- is_conversation: bool = True,
34
- auth: str = None,
35
- max_tokens: int = 600,
36
- model: str = None,
37
- ignore_working: bool = False,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- ):
46
- """Initialies GPT4FREE
47
-
48
- Args:
49
- provider (str, optional): gpt4free based provider name. Defaults to Koala.
50
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
51
- auth (str, optional): Authentication value for the provider incase it needs. Defaults to None.
52
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
53
- model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo.
54
- ignore_working (bool, optional): Ignore working status of the provider. Defaults to False.
55
- timeout (int, optional): Http request timeout. Defaults to 30.
56
- intro (str, optional): Conversation introductory prompt. Defaults to None.
57
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
58
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
59
- proxies (dict, optional): Http request proxies. Defaults to {}.
60
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
61
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
62
- """
63
- assert provider in available_providers, (
64
- f"Provider '{provider}' is not yet supported. "
65
- f"Try others like {', '.join(available_providers)}"
66
- )
67
- if model is None:
68
- model = default_models["chat_completion"]
69
-
70
- self.is_conversation = is_conversation
71
- self.max_tokens_to_sample = max_tokens
72
- self.stream_chunk_size = 64
73
- self.timeout = timeout
74
- self.last_response = {}
75
-
76
- self.__available_optimizers = (
77
- method
78
- for method in dir(Optimizers)
79
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
80
- )
81
- Conversation.intro = (
82
- AwesomePrompts().get_act(
83
- act, raise_not_found=True, default=None, case_insensitive=True
84
- )
85
- if act
86
- else intro or Conversation.intro
87
- )
88
- self.conversation = Conversation(
89
- is_conversation,
90
- self.max_tokens_to_sample,
91
- filepath,
92
- update_file,
93
- )
94
- self.conversation.history_offset = history_offset
95
- self.model = model
96
- self.provider = provider
97
- self.ignore_working = ignore_working
98
- self.auth = auth
99
- self.proxy = None if not proxies else list(proxies.values())[0]
100
-
101
- def __str__(self):
102
- return f"AsyncGPTFREE(provider={self.provider})"
103
-
104
- async def ask(
105
- self,
106
- prompt: str,
107
- stream: bool = False,
108
- raw: bool = False,
109
- optimizer: str = None,
110
- conversationally: bool = False,
111
- ) -> dict | AsyncGenerator:
112
- """Chat with AI asynchronously.
113
-
114
- Args:
115
- prompt (str): Prompt to be send.
116
- stream (bool, optional): Flag for streaming response. Defaults to False.
117
- raw (bool, optional): Stream back raw response as received. Defaults to False.
118
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
119
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
120
- Returns:
121
- dict|AsyncGenerator : ai content
122
- ```json
123
- {
124
- "text" : "How may I help you today?"
125
- }
126
- ```
127
- """
128
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
129
- if optimizer:
130
- if optimizer in self.__available_optimizers:
131
- conversation_prompt = getattr(Optimizers, optimizer)(
132
- conversation_prompt if conversationally else prompt
133
- )
134
- else:
135
- raise Exception(
136
- f"Optimizer is not one of {self.__available_optimizers}"
137
- )
138
-
139
- payload = dict(
140
- model=self.model,
141
- provider=self.provider, # g4f.Provider.Aichat,
142
- messages=[{"role": "user", "content": conversation_prompt}],
143
- stream=True,
144
- ignore_working=self.ignore_working,
145
- auth=self.auth,
146
- proxy=self.proxy,
147
- timeout=self.timeout,
148
- )
149
-
150
- async def format_response(response):
151
- return dict(text=response)
152
-
153
- async def for_stream():
154
- previous_chunks = ""
155
- response = g4f.ChatCompletion.create_async(**payload)
156
-
157
- async for chunk in response:
158
- previous_chunks += chunk
159
- formatted_resp = await format_response(previous_chunks)
160
- self.last_response.update(formatted_resp)
161
- yield previous_chunks if raw else formatted_resp
162
-
163
- self.conversation.update_chat_history(
164
- prompt,
165
- previous_chunks,
166
- )
167
-
168
- async def for_non_stream():
169
- async for _ in for_stream():
170
- pass
171
- return self.last_response
172
-
173
- return for_stream() if stream else await for_non_stream()
174
-
175
- async def chat(
176
- self,
177
- prompt: str,
178
- stream: bool = False,
179
- optimizer: str = None,
180
- conversationally: bool = False,
181
- ) -> dict | AsyncGenerator:
182
- """Generate response `str` asynchronously.
183
- Args:
184
- prompt (str): Prompt to be send.
185
- stream (bool, optional): Flag for streaming response. Defaults to False.
186
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
187
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
188
- Returns:
189
- str|AsyncGenerator: Response generated
190
- """
191
-
192
- async def for_stream():
193
- async_ask = await self.ask(
194
- prompt, True, optimizer=optimizer, conversationally=conversationally
195
- )
196
- async for response in async_ask:
197
- yield await self.get_message(response)
198
-
199
- async def for_non_stream():
200
- return await self.get_message(
201
- await self.ask(
202
- prompt,
203
- False,
204
- optimizer=optimizer,
205
- conversationally=conversationally,
206
- )
207
- )
208
-
209
- return for_stream() if stream else await for_non_stream()
210
-
211
- async def get_message(self, response: dict) -> str:
212
- """Retrieves message only from response
213
-
214
- Args:
215
- response (dict): Response generated by `self.ask`
216
-
217
- Returns:
218
- str: Message extracted
219
- """
220
- assert isinstance(response, dict), "Response should be of dict data-type only"
221
- return response["text"]
222
- class GPT4FREE(Provider):
223
- def __init__(
224
- self,
225
- provider: str = default_provider,
226
- is_conversation: bool = True,
227
- auth: str = None,
228
- max_tokens: int = 600,
229
- model: str = None,
230
- chat_completion: bool = True,
231
- ignore_working: bool = True,
232
- timeout: int = 30,
233
- intro: str = None,
234
- filepath: str = None,
235
- update_file: bool = True,
236
- proxies: dict = {},
237
- history_offset: int = 10250,
238
- act: str = None,
239
- ):
240
- """Initialies GPT4FREE
241
-
242
- Args:
243
- provider (str, optional): gpt4free based provider name. Defaults to Koala.
244
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
245
- auth (str, optional): Authentication value for the provider incase it needs. Defaults to None.
246
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
247
- model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo.
248
- chat_completion(bool, optional): Provide native auto-contexting (conversationally). Defaults to False.
249
- ignore_working (bool, optional): Ignore working status of the provider. Defaults to False.
250
- timeout (int, optional): Http request timeout. Defaults to 30.
251
- intro (str, optional): Conversation introductory prompt. Defaults to None.
252
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
253
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
254
- proxies (dict, optional): Http request proxies. Defaults to {}.
255
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
256
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
257
- """
258
- assert provider in available_providers, (
259
- f"Provider '{provider}' is not yet supported. "
260
- f"Try others like {', '.join(available_providers)}"
261
- )
262
- if model is None:
263
- model = (
264
- default_models["chat_completion"]
265
- if chat_completion
266
- else default_models["completion"]
267
- )
268
-
269
- elif not chat_completion:
270
- assert model in completion_allowed_models, (
271
- f"Model '{model}' is not yet supported for completion. "
272
- f"Try other models like {', '.join(completion_allowed_models)}"
273
- )
274
- self.is_conversation = is_conversation
275
- self.max_tokens_to_sample = max_tokens
276
- self.stream_chunk_size = 64
277
- self.timeout = timeout
278
- self.last_response = {}
279
-
280
- self.__available_optimizers = (
281
- method
282
- for method in dir(Optimizers)
283
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
284
- )
285
- Conversation.intro = (
286
- AwesomePrompts().get_act(
287
- act, raise_not_found=True, default=None, case_insensitive=True
288
- )
289
- if act
290
- else intro or Conversation.intro
291
- )
292
- self.conversation = Conversation(
293
- False if chat_completion else is_conversation,
294
- self.max_tokens_to_sample,
295
- filepath,
296
- update_file,
297
- )
298
- self.conversation.history_offset = history_offset
299
- self.model = model
300
- self.provider = provider
301
- self.chat_completion = chat_completion
302
- self.ignore_working = ignore_working
303
- self.auth = auth
304
- self.proxy = None if not proxies else list(proxies.values())[0]
305
- self.__chat_class = g4f.ChatCompletion if chat_completion else g4f.Completion
306
-
307
- def ask(
308
- self,
309
- prompt: str,
310
- stream: bool = False,
311
- raw: bool = False,
312
- optimizer: str = None,
313
- conversationally: bool = False,
314
- ) -> dict:
315
- """Chat with AI
316
-
317
- Args:
318
- prompt (str): Prompt to be send.
319
- stream (bool, optional): Flag for streaming response. Defaults to False.
320
- raw (bool, optional): Stream back raw response as received. Defaults to False.
321
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
322
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
323
- Returns:
324
- dict : {}
325
- ```json
326
- {
327
- "text" : "How may I help you today?"
328
- }
329
- ```
330
- """
331
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
332
- if optimizer:
333
- if optimizer in self.__available_optimizers:
334
- conversation_prompt = getattr(Optimizers, optimizer)(
335
- conversation_prompt if conversationally else prompt
336
- )
337
- else:
338
- raise Exception(
339
- f"Optimizer is not one of {self.__available_optimizers}"
340
- )
341
-
342
- def payload():
343
- if self.chat_completion:
344
- return dict(
345
- model=self.model,
346
- provider=self.provider, # g4f.Provider.Aichat,
347
- messages=[{"role": "user", "content": conversation_prompt}],
348
- stream=stream,
349
- ignore_working=self.ignore_working,
350
- auth=self.auth,
351
- proxy=self.proxy,
352
- timeout=self.timeout,
353
- )
354
-
355
- else:
356
- return dict(
357
- model=self.model,
358
- prompt=conversation_prompt,
359
- provider=self.provider,
360
- stream=stream,
361
- ignore_working=self.ignore_working,
362
- auth=self.auth,
363
- proxy=self.proxy,
364
- timeout=self.timeout,
365
- )
366
-
367
- def format_response(response):
368
- return dict(text=response)
369
-
370
- def for_stream():
371
- previous_chunks = ""
372
- response = self.__chat_class.create(**payload())
373
-
374
- for chunk in response:
375
- previous_chunks += chunk
376
- formatted_resp = format_response(previous_chunks)
377
- self.last_response.update(formatted_resp)
378
- yield previous_chunks if raw else formatted_resp
379
-
380
- self.conversation.update_chat_history(
381
- prompt,
382
- previous_chunks,
383
- )
384
-
385
- def for_non_stream():
386
- response = self.__chat_class.create(**payload())
387
- formatted_resp = format_response(response)
388
-
389
- self.last_response.update(formatted_resp)
390
- self.conversation.update_chat_history(prompt, response)
391
-
392
- return response if raw else formatted_resp
393
-
394
- return for_stream() if stream else for_non_stream()
395
-
396
- def chat(
397
- self,
398
- prompt: str,
399
- stream: bool = False,
400
- optimizer: str = None,
401
- conversationally: bool = False,
402
- ) -> str:
403
- """Generate response `str`
404
- Args:
405
- prompt (str): Prompt to be send.
406
- stream (bool, optional): Flag for streaming response. Defaults to False.
407
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
408
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
409
- Returns:
410
- str: Response generated
411
- """
412
-
413
- def for_stream():
414
- for response in self.ask(
415
- prompt, True, optimizer=optimizer, conversationally=conversationally
416
- ):
417
- yield self.get_message(response)
418
-
419
- def for_non_stream():
420
- return self.get_message(
421
- self.ask(
422
- prompt,
423
- False,
424
- optimizer=optimizer,
425
- conversationally=conversationally,
426
- )
427
- )
428
-
429
- return for_stream() if stream else for_non_stream()
430
-
431
- def get_message(self, response: dict) -> str:
432
- """Retrieves message only from response
433
-
434
- Args:
435
- response (dict): Response generated by `self.ask`
436
-
437
- Returns:
438
- str: Message extracted
439
- """
440
- assert isinstance(response, dict), "Response should be of dict data-type only"
441
- return response["text"]
442
- from pathlib import Path
443
- from webscout.AIutel import default_path
444
- from json import dump, load
445
- from time import time
446
- from threading import Thread as thr
447
- from functools import wraps
448
- from rich.progress import Progress
449
- import logging
450
-
451
- results_path = Path(default_path) / "provider_test.json"
452
-
453
-
454
- def exception_handler(func):
455
-
456
- @wraps(func)
457
- def decorator(*args, **kwargs):
458
- try:
459
- return func(*args, **kwargs)
460
- except Exception as e:
461
- pass
462
-
463
- return decorator
464
-
465
-
466
- @exception_handler
467
- def is_working(provider: str) -> bool:
468
- """Test working status of a provider
469
-
470
- Args:
471
- provider (str): Provider name
472
-
473
- Returns:
474
- bool: is_working status
475
- """
476
- bot = GPT4FREE(provider=provider, is_conversation=False)
477
- text = bot.chat("hello")
478
- assert isinstance(text, str)
479
- assert bool(text.strip())
480
- assert "</" not in text
481
- assert ":" not in text
482
- assert len(text) > 2
483
- return True
484
-
485
-
486
- class TestProviders:
487
-
488
- def __init__(
489
- self,
490
- test_at_once: int = 5,
491
- quiet: bool = False,
492
- timeout: int = 20,
493
- selenium: bool = False,
494
- do_log: bool = True,
495
- ):
496
- """Constructor
497
-
498
- Args:
499
- test_at_once (int, optional): Test n providers at once. Defaults to 5.
500
- quiet (bool, optinal): Disable stdout. Defaults to False.
501
- timout (int, optional): Thread timeout for each provider. Defaults to 20.
502
- selenium (bool, optional): Test even selenium dependent providers. Defaults to False.
503
- do_log (bool, optional): Flag to control logging. Defaults to True.
504
- """
505
- self.test_at_once: int = test_at_once
506
- self.quiet = quiet
507
- self.timeout = timeout
508
- self.do_log = do_log
509
- self.__logger = logging.getLogger(__name__)
510
- self.working_providers: list = [
511
- provider.__name__
512
- for provider in g4f.Provider.__providers__
513
- if provider.working
514
- ]
515
-
516
- if not selenium:
517
- import g4f.Provider.selenium as selenium_based
518
- from g4f import webdriver
519
-
520
- webdriver.has_requirements = False
521
- selenium_based_providers: list = dir(selenium_based)
522
- for provider in self.working_providers:
523
- try:
524
- selenium_based_providers.index(provider)
525
- except ValueError:
526
- pass
527
- else:
528
- self.__log(
529
- 10, f"Dropping provider - {provider} - [Selenium dependent]"
530
- )
531
- self.working_providers.remove(provider)
532
-
533
- self.results_path: Path = results_path
534
- self.__create_empty_file(ignore_if_found=True)
535
- self.results_file_is_empty: bool = False
536
-
537
- def __log(
538
- self,
539
- level: int,
540
- message: str,
541
- ):
542
- """class logger"""
543
- if self.do_log:
544
- self.__logger.log(level, message)
545
- else:
546
- pass
547
-
548
- def __create_empty_file(self, ignore_if_found: bool = False):
549
- if ignore_if_found and self.results_path.is_file():
550
- return
551
- with self.results_path.open("w") as fh:
552
- dump({"results": []}, fh)
553
- self.results_file_is_empty = True
554
-
555
- def test_provider(self, name: str):
556
- """Test each provider and save successful ones
557
-
558
- Args:
559
- name (str): Provider name
560
- """
561
-
562
- try:
563
- bot = GPT4FREE(provider=name, is_conversation=False)
564
- start_time = time()
565
- text = bot.chat("hello there")
566
- assert isinstance(text, str), "Non-string response returned"
567
- assert bool(text.strip()), "Empty string"
568
- assert "</" not in text, "Html code returned."
569
- assert ":" not in text, "Json formatted response returned"
570
- assert len(text) > 2
571
- except Exception as e:
572
- pass
573
- else:
574
- self.results_file_is_empty = False
575
- with self.results_path.open() as fh:
576
- current_results = load(fh)
577
- new_result = dict(time=time() - start_time, name=name)
578
- current_results["results"].append(new_result)
579
- self.__log(20, f"Test result - {new_result['name']} - {new_result['time']}")
580
-
581
- with self.results_path.open("w") as fh:
582
- dump(current_results, fh)
583
-
584
- @exception_handler
585
- def main(
586
- self,
587
- ):
588
- self.__create_empty_file()
589
- threads = []
590
- # Create a progress bar
591
- total = len(self.working_providers)
592
- with Progress() as progress:
593
- self.__log(20, f"Testing {total} providers : {self.working_providers}")
594
- task = progress.add_task(
595
- f"[cyan]Testing...[{self.test_at_once}]",
596
- total=total,
597
- visible=self.quiet == False,
598
- )
599
- while not progress.finished:
600
- for count, provider in enumerate(self.working_providers, start=1):
601
- t1 = thr(
602
- target=self.test_provider,
603
- args=(provider,),
604
- )
605
- t1.start()
606
- if count % self.test_at_once == 0 or count == len(provider):
607
- for t in threads:
608
- try:
609
- t.join(self.timeout)
610
- except Exception as e:
611
- pass
612
- threads.clear()
613
- else:
614
- threads.append(t1)
615
- progress.update(task, advance=1)
616
-
617
- def get_results(self, run: bool = False, best: bool = False) -> list[dict]:
618
- """Get test results
619
-
620
- Args:
621
- run (bool, optional): Run the test first. Defaults to False.
622
- best (bool, optional): Return name of the best provider. Defaults to False.
623
-
624
- Returns:
625
- list[dict]|str: Test results.
626
- """
627
- if run or self.results_file_is_empty:
628
- self.main()
629
-
630
- with self.results_path.open() as fh:
631
- results: dict = load(fh)
632
-
633
- results = results["results"]
634
- if not results:
635
- if run:
636
- raise Exception("Unable to find working g4f provider")
637
- else:
638
- self.__log(30, "Hunting down working g4f providers.")
639
- return self.get_results(run=True, best=best)
640
-
641
- time_list = []
642
-
643
- sorted_list = []
644
- for entry in results:
645
- time_list.append(entry["time"])
646
-
647
- time_list.sort()
648
-
649
- for time_value in time_list:
650
- for entry in results:
651
- if entry["time"] == time_value:
652
- sorted_list.append(entry)
653
- return sorted_list[0]["name"] if best else sorted_list
654
-
655
- @property
656
- def best(self):
657
- """Fastest provider overally"""
658
- return self.get_results(run=False, best=True)
659
-
660
- @property
661
- def auto(self):
662
- """Best working provider"""
663
- for result in self.get_results(run=False, best=False):
664
- self.__log(20, "Confirming working status of provider : " + result["name"])
665
- if is_working(result["name"]):
666
- return result["name"]
webscout/models.py DELETED
@@ -1,23 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Dict, Optional
3
-
4
-
5
- @dataclass
6
- class MapsResult:
7
- """Represents a result from the maps search."""
8
-
9
- title: Optional[str] = None
10
- address: Optional[str] = None
11
- country_code: Optional[str] = None
12
- latitude: Optional[str] = None
13
- longitude: Optional[str] = None
14
- url: Optional[str] = None
15
- desc: Optional[str] = None
16
- phone: Optional[str] = None
17
- image: Optional[str] = None
18
- source: Optional[str] = None
19
- hours: Optional[Dict[str, str]] = None
20
- category: Optional[str] = None
21
- facebook: Optional[str] = None
22
- instagram: Optional[str] = None
23
- twitter: Optional[str] = None