webscout 7.6__py3-none-any.whl → 7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (124) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +14 -11
  3. webscout/DWEBS.py +431 -415
  4. webscout/Extra/autocoder/autocoder_utiles.py +183 -47
  5. webscout/Extra/autocoder/rawdog.py +848 -649
  6. webscout/Extra/gguf.py +682 -652
  7. webscout/Provider/AI21.py +1 -1
  8. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  9. webscout/Provider/AISEARCH/ISou.py +2 -23
  10. webscout/Provider/AISEARCH/felo_search.py +6 -6
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/Aitopia.py +292 -0
  13. webscout/Provider/AllenAI.py +5 -22
  14. webscout/Provider/Andi.py +3 -3
  15. webscout/Provider/C4ai.py +1 -1
  16. webscout/Provider/ChatGPTClone.py +226 -0
  17. webscout/Provider/ChatGPTES.py +3 -5
  18. webscout/Provider/ChatGPTGratis.py +4 -4
  19. webscout/Provider/Chatify.py +2 -2
  20. webscout/Provider/Cloudflare.py +3 -2
  21. webscout/Provider/DARKAI.py +3 -2
  22. webscout/Provider/DeepSeek.py +2 -2
  23. webscout/Provider/Deepinfra.py +1 -1
  24. webscout/Provider/EDITEE.py +1 -1
  25. webscout/Provider/ElectronHub.py +178 -96
  26. webscout/Provider/ExaChat.py +310 -0
  27. webscout/Provider/Free2GPT.py +2 -2
  28. webscout/Provider/Gemini.py +5 -19
  29. webscout/Provider/GithubChat.py +1 -1
  30. webscout/Provider/Glider.py +12 -8
  31. webscout/Provider/Groq.py +3 -3
  32. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  33. webscout/Provider/HeckAI.py +1 -1
  34. webscout/Provider/HuggingFaceChat.py +1 -1
  35. webscout/Provider/Hunyuan.py +272 -0
  36. webscout/Provider/Jadve.py +3 -3
  37. webscout/Provider/Koboldai.py +3 -3
  38. webscout/Provider/LambdaChat.py +391 -0
  39. webscout/Provider/Llama.py +3 -5
  40. webscout/Provider/Llama3.py +4 -12
  41. webscout/Provider/Marcus.py +3 -3
  42. webscout/Provider/OLLAMA.py +260 -36
  43. webscout/Provider/Openai.py +7 -3
  44. webscout/Provider/PI.py +1 -1
  45. webscout/Provider/Perplexitylabs.py +1 -1
  46. webscout/Provider/Phind.py +1 -1
  47. webscout/Provider/PizzaGPT.py +1 -1
  48. webscout/Provider/QwenLM.py +4 -7
  49. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +21 -46
  50. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +37 -49
  51. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  52. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  53. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  54. webscout/Provider/TTI/__init__.py +3 -1
  55. webscout/Provider/TTI/artbit/async_artbit.py +4 -33
  56. webscout/Provider/TTI/artbit/sync_artbit.py +4 -32
  57. webscout/Provider/TTI/fastflux/async_fastflux.py +6 -2
  58. webscout/Provider/TTI/fastflux/sync_fastflux.py +7 -2
  59. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  60. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  61. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  62. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  63. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  64. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  65. webscout/Provider/TTS/utils.py +1 -1
  66. webscout/Provider/TeachAnything.py +1 -1
  67. webscout/Provider/TextPollinationsAI.py +4 -4
  68. webscout/Provider/TwoAI.py +1 -2
  69. webscout/Provider/Venice.py +4 -2
  70. webscout/Provider/VercelAI.py +234 -0
  71. webscout/Provider/WebSim.py +228 -0
  72. webscout/Provider/WiseCat.py +10 -12
  73. webscout/Provider/Youchat.py +1 -1
  74. webscout/Provider/__init__.py +22 -1
  75. webscout/Provider/ai4chat.py +1 -1
  76. webscout/Provider/aimathgpt.py +2 -6
  77. webscout/Provider/akashgpt.py +1 -1
  78. webscout/Provider/askmyai.py +4 -4
  79. webscout/Provider/asksteve.py +203 -0
  80. webscout/Provider/bagoodex.py +2 -2
  81. webscout/Provider/cerebras.py +1 -1
  82. webscout/Provider/chatglm.py +4 -4
  83. webscout/Provider/cleeai.py +1 -0
  84. webscout/Provider/copilot.py +427 -415
  85. webscout/Provider/elmo.py +1 -1
  86. webscout/Provider/flowith.py +14 -3
  87. webscout/Provider/freeaichat.py +57 -31
  88. webscout/Provider/gaurish.py +3 -5
  89. webscout/Provider/geminiprorealtime.py +1 -1
  90. webscout/Provider/granite.py +4 -4
  91. webscout/Provider/hermes.py +5 -5
  92. webscout/Provider/julius.py +1 -1
  93. webscout/Provider/koala.py +1 -1
  94. webscout/Provider/labyrinth.py +239 -0
  95. webscout/Provider/learnfastai.py +28 -15
  96. webscout/Provider/lepton.py +1 -1
  97. webscout/Provider/llama3mitril.py +4 -4
  98. webscout/Provider/llamatutor.py +1 -1
  99. webscout/Provider/llmchat.py +3 -3
  100. webscout/Provider/meta.py +1 -1
  101. webscout/Provider/multichat.py +10 -10
  102. webscout/Provider/promptrefine.py +1 -1
  103. webscout/Provider/searchchat.py +293 -0
  104. webscout/Provider/sonus.py +208 -0
  105. webscout/Provider/talkai.py +2 -2
  106. webscout/Provider/turboseek.py +1 -1
  107. webscout/Provider/tutorai.py +1 -1
  108. webscout/Provider/typegpt.py +6 -43
  109. webscout/Provider/uncovr.py +299 -0
  110. webscout/Provider/x0gpt.py +1 -1
  111. webscout/__init__.py +36 -36
  112. webscout/cli.py +293 -283
  113. webscout/litagent/agent.py +14 -9
  114. webscout/tempid.py +11 -11
  115. webscout/utils.py +2 -2
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +1282 -1223
  118. webscout/webscout_search_async.py +813 -692
  119. {webscout-7.6.dist-info → webscout-7.8.dist-info}/METADATA +76 -44
  120. {webscout-7.6.dist-info → webscout-7.8.dist-info}/RECORD +124 -106
  121. {webscout-7.6.dist-info → webscout-7.8.dist-info}/LICENSE.md +0 -0
  122. {webscout-7.6.dist-info → webscout-7.8.dist-info}/WHEEL +0 -0
  123. {webscout-7.6.dist-info → webscout-7.8.dist-info}/entry_points.txt +0 -0
  124. {webscout-7.6.dist-info → webscout-7.8.dist-info}/top_level.txt +0 -0
@@ -3,8 +3,12 @@ from webscout.AIutel import Conversation
3
3
  from webscout.AIutel import AwesomePrompts, sanitize_stream
4
4
  from webscout.AIbase import Provider, AsyncProvider
5
5
  from webscout import exceptions
6
- from typing import Any, AsyncGenerator, Dict
6
+ from typing import Any, AsyncGenerator, Dict, List, Optional, Union
7
7
  import ollama
8
+ from ollama import AsyncClient, Client, ResponseError
9
+ import asyncio
10
+ import base64
11
+ from pathlib import Path
8
12
 
9
13
  class OLLAMA(Provider):
10
14
  def __init__(
@@ -19,7 +23,9 @@ class OLLAMA(Provider):
19
23
  proxies: dict = {},
20
24
  history_offset: int = 10250,
21
25
  act: str = None,
22
- system_prompt: str = "You are a helpful and friendly AI assistant.",
26
+ system_prompt: str = "You are a helpful and friendly AI assistant.",
27
+ host: str = 'http://localhost:11434',
28
+ headers: Optional[Dict] = None,
23
29
  ):
24
30
  """Instantiates Ollama
25
31
 
@@ -34,7 +40,9 @@ class OLLAMA(Provider):
34
40
  proxies (dict, optional): Http request proxies. Defaults to {}.
35
41
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
36
42
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
37
- system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
43
+ system_prompt (str, optional): System prompt for Ollama. Defaults to "You are a helpful and friendly AI assistant.".
44
+ host (str, optional): Ollama host URL. Defaults to 'http://localhost:11434'.
45
+ headers (dict, optional): Custom headers for requests. Defaults to None.
38
46
  """
39
47
  self.model = model
40
48
  self.is_conversation = is_conversation
@@ -42,6 +50,8 @@ class OLLAMA(Provider):
42
50
  self.timeout = timeout
43
51
  self.last_response = {}
44
52
  self.system_prompt = system_prompt
53
+ self.client = Client(host=host, headers=headers)
54
+ self.async_client = AsyncClient(host=host, headers=headers)
45
55
 
46
56
  self.__available_optimizers = (
47
57
  method
@@ -67,7 +77,9 @@ class OLLAMA(Provider):
67
77
  raw: bool = False,
68
78
  optimizer: str = None,
69
79
  conversationally: bool = False,
70
- ) -> dict | AsyncGenerator:
80
+ tools: Optional[List[Dict]] = None,
81
+ images: Optional[List[str]] = None,
82
+ ) -> Union[dict, AsyncGenerator]:
71
83
  """Chat with AI
72
84
 
73
85
  Args:
@@ -76,13 +88,10 @@ class OLLAMA(Provider):
76
88
  raw (bool, optional): Stream back raw response as received. Defaults to False.
77
89
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
78
90
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
91
+ tools (List[Dict], optional): List of tools/functions to use. Defaults to None.
92
+ images (List[str], optional): List of image paths or base64 encoded images. Defaults to None.
79
93
  Returns:
80
- dict|AsyncGenerator : ai content
81
- ```json
82
- {
83
- "text" : "print('How may I help you today?')"
84
- }
85
- ```
94
+ Union[dict, AsyncGenerator] : ai content
86
95
  """
87
96
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
97
  if optimizer:
@@ -95,29 +104,101 @@ class OLLAMA(Provider):
95
104
  f"Optimizer is not one of {self.__available_optimizers}"
96
105
  )
97
106
 
98
- def for_stream():
99
- # Correctly call ollama.chat with stream=True
100
- stream = ollama.chat(model=self.model, messages=[
101
- {'role': 'system', 'content': self.system_prompt},
102
- {'role': 'user', 'content': conversation_prompt}
103
- ], stream=True)
107
+ messages = [
108
+ {'role': 'system', 'content': self.system_prompt},
109
+ {'role': 'user', 'content': conversation_prompt}
110
+ ]
104
111
 
105
- # Yield each chunk directly
106
- for chunk in stream:
107
- yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
112
+ if images:
113
+ messages[-1]['images'] = images
108
114
 
109
- def for_non_stream():
110
- response = ollama.chat(model=self.model, messages=[
111
- {'role': 'system', 'content': self.system_prompt}, # Add system message
112
- {'role': 'user', 'content': conversation_prompt}
113
- ])
114
- self.last_response.update(dict(text=response['message']['content']))
115
- self.conversation.update_chat_history(
116
- prompt, self.get_message(self.last_response)
117
- )
118
- return self.last_response
115
+ try:
116
+ def for_stream():
117
+ stream = self.client.chat(
118
+ model=self.model,
119
+ messages=messages,
120
+ stream=True,
121
+ tools=tools
122
+ )
123
+ for chunk in stream:
124
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
119
125
 
120
- return for_stream() if stream else for_non_stream()
126
+ def for_non_stream():
127
+ response = self.client.chat(
128
+ model=self.model,
129
+ messages=messages,
130
+ tools=tools
131
+ )
132
+ self.last_response.update(dict(text=response['message']['content']))
133
+ self.conversation.update_chat_history(
134
+ prompt, self.get_message(self.last_response)
135
+ )
136
+ return self.last_response
137
+
138
+ return for_stream() if stream else for_non_stream()
139
+ except ResponseError as e:
140
+ if e.status_code == 404:
141
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
142
+ raise e
143
+
144
+ async def aask(
145
+ self,
146
+ prompt: str,
147
+ stream: bool = False,
148
+ raw: bool = False,
149
+ optimizer: str = None,
150
+ conversationally: bool = False,
151
+ tools: Optional[List[Dict]] = None,
152
+ images: Optional[List[str]] = None,
153
+ ) -> Union[dict, AsyncGenerator]:
154
+ """Async version of ask method"""
155
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
156
+ if optimizer:
157
+ if optimizer in self.__available_optimizers:
158
+ conversation_prompt = getattr(Optimizers, optimizer)(
159
+ conversation_prompt if conversationally else prompt
160
+ )
161
+ else:
162
+ raise Exception(
163
+ f"Optimizer is not one of {self.__available_optimizers}"
164
+ )
165
+
166
+ messages = [
167
+ {'role': 'system', 'content': self.system_prompt},
168
+ {'role': 'user', 'content': conversation_prompt}
169
+ ]
170
+
171
+ if images:
172
+ messages[-1]['images'] = images
173
+
174
+ try:
175
+ async def for_stream():
176
+ stream = await self.async_client.chat(
177
+ model=self.model,
178
+ messages=messages,
179
+ stream=True,
180
+ tools=tools
181
+ )
182
+ async for chunk in stream:
183
+ yield chunk['message']['content'] if raw else dict(text=chunk['message']['content'])
184
+
185
+ async def for_non_stream():
186
+ response = await self.async_client.chat(
187
+ model=self.model,
188
+ messages=messages,
189
+ tools=tools
190
+ )
191
+ self.last_response.update(dict(text=response['message']['content']))
192
+ self.conversation.update_chat_history(
193
+ prompt, self.get_message(self.last_response)
194
+ )
195
+ return self.last_response
196
+
197
+ return for_stream() if stream else for_non_stream()
198
+ except ResponseError as e:
199
+ if e.status_code == 404:
200
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
201
+ raise e
121
202
 
122
203
  def chat(
123
204
  self,
@@ -125,20 +206,24 @@ class OLLAMA(Provider):
125
206
  stream: bool = False,
126
207
  optimizer: str = None,
127
208
  conversationally: bool = False,
128
- ) -> str | AsyncGenerator:
209
+ tools: Optional[List[Dict]] = None,
210
+ images: Optional[List[str]] = None,
211
+ ) -> Union[str, AsyncGenerator]:
129
212
  """Generate response `str`
130
213
  Args:
131
214
  prompt (str): Prompt to be send.
132
215
  stream (bool, optional): Flag for streaming response. Defaults to False.
133
216
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
134
217
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
218
+ tools (List[Dict], optional): List of tools/functions to use. Defaults to None.
219
+ images (List[str], optional): List of image paths or base64 encoded images. Defaults to None.
135
220
  Returns:
136
- str: Response generated
221
+ Union[str, AsyncGenerator]: Response generated
137
222
  """
138
-
139
223
  def for_stream():
140
224
  for response in self.ask(
141
- prompt, True, optimizer=optimizer, conversationally=conversationally
225
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
226
+ tools=tools, images=images
142
227
  ):
143
228
  yield self.get_message(response)
144
229
 
@@ -149,6 +234,39 @@ class OLLAMA(Provider):
149
234
  False,
150
235
  optimizer=optimizer,
151
236
  conversationally=conversationally,
237
+ tools=tools,
238
+ images=images
239
+ )
240
+ )
241
+
242
+ return for_stream() if stream else for_non_stream()
243
+
244
+ async def achat(
245
+ self,
246
+ prompt: str,
247
+ stream: bool = False,
248
+ optimizer: str = None,
249
+ conversationally: bool = False,
250
+ tools: Optional[List[Dict]] = None,
251
+ images: Optional[List[str]] = None,
252
+ ) -> Union[str, AsyncGenerator]:
253
+ """Async version of chat method"""
254
+ async def for_stream():
255
+ async for response in await self.aask(
256
+ prompt, True, optimizer=optimizer, conversationally=conversationally,
257
+ tools=tools, images=images
258
+ ):
259
+ yield self.get_message(response)
260
+
261
+ async def for_non_stream():
262
+ return self.get_message(
263
+ await self.aask(
264
+ prompt,
265
+ False,
266
+ optimizer=optimizer,
267
+ conversationally=conversationally,
268
+ tools=tools,
269
+ images=images
152
270
  )
153
271
  )
154
272
 
@@ -165,8 +283,114 @@ class OLLAMA(Provider):
165
283
  """
166
284
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
285
  return response["text"]
286
+
287
+ def generate(
288
+ self,
289
+ prompt: str,
290
+ stream: bool = False,
291
+ **kwargs
292
+ ) -> Union[dict, AsyncGenerator]:
293
+ """Generate text using the model"""
294
+ try:
295
+ if stream:
296
+ return self.client.generate(model=self.model, prompt=prompt, stream=True, **kwargs)
297
+ return self.client.generate(model=self.model, prompt=prompt, **kwargs)
298
+ except ResponseError as e:
299
+ if e.status_code == 404:
300
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
301
+ raise e
302
+
303
+ async def agenerate(
304
+ self,
305
+ prompt: str,
306
+ stream: bool = False,
307
+ **kwargs
308
+ ) -> Union[dict, AsyncGenerator]:
309
+ """Async version of generate method"""
310
+ try:
311
+ if stream:
312
+ return await self.async_client.generate(model=self.model, prompt=prompt, stream=True, **kwargs)
313
+ return await self.async_client.generate(model=self.model, prompt=prompt, **kwargs)
314
+ except ResponseError as e:
315
+ if e.status_code == 404:
316
+ raise Exception(f"Model {self.model} not found. Please pull it first using `ollama pull {self.model}`")
317
+ raise e
318
+
319
+ def list_models(self) -> List[dict]:
320
+ """List all available models"""
321
+ return self.client.list()
322
+
323
+ def show_model(self, model: str = None) -> dict:
324
+ """Show model details"""
325
+ model = model or self.model
326
+ return self.client.show(model)
327
+
328
+ def pull_model(self, model: str = None) -> None:
329
+ """Pull a model from Ollama"""
330
+ model = model or self.model
331
+ self.client.pull(model)
332
+
333
+ def delete_model(self, model: str = None) -> None:
334
+ """Delete a model"""
335
+ model = model or self.model
336
+ self.client.delete(model)
337
+
338
+ def embed(
339
+ self,
340
+ input: Union[str, List[str]],
341
+ model: str = None
342
+ ) -> List[float]:
343
+ """Generate embeddings for input text"""
344
+ model = model or self.model
345
+ return self.client.embed(model=model, input=input)
346
+
347
+ async def aembed(
348
+ self,
349
+ input: Union[str, List[str]],
350
+ model: str = None
351
+ ) -> List[float]:
352
+ """Async version of embed method"""
353
+ model = model or self.model
354
+ return await self.async_client.embed(model=model, input=input)
355
+
168
356
  if __name__ == "__main__":
169
- ai = OLLAMA(model="llama3.2:1b")
357
+ # Example usage
358
+ ai = OLLAMA(model="qwen2.5:0.5b")
359
+ # ai.pull_model("qwen2.5:0.5b")
360
+ # Basic chat
170
361
  response = ai.chat("write a poem about AI", stream=True)
171
362
  for chunk in response:
172
- print(chunk, end="", flush=True)
363
+ print(chunk, end="", flush=True)
364
+
365
+ # Vision example
366
+ # response = ai.chat(
367
+ # "What's in this image?",
368
+ # images=["path/to/image.jpg"]
369
+ # )
370
+ # print(response)
371
+
372
+ # Tools example
373
+ def add_numbers(a: int, b: int) -> int:
374
+ return a + b
375
+
376
+ tools = [{
377
+ 'type': 'function',
378
+ 'function': {
379
+ 'name': 'add_numbers',
380
+ 'description': 'Add two numbers',
381
+ 'parameters': {
382
+ 'type': 'object',
383
+ 'properties': {
384
+ 'a': {'type': 'integer'},
385
+ 'b': {'type': 'integer'}
386
+ },
387
+ 'required': ['a', 'b']
388
+ }
389
+ }
390
+ }]
391
+
392
+ response = ai.chat(
393
+ "What is 5 plus 3?",
394
+ tools=tools
395
+ )
396
+ print(response)
@@ -5,7 +5,7 @@ from ..AIutel import AwesomePrompts, sanitize_stream
5
5
  from ..AIbase import Provider, AsyncProvider
6
6
 
7
7
  from webscout import exceptions
8
- from typing import Any, AsyncGenerator, Dict
8
+ from typing import Any, AsyncGenerator, Dict, List, Optional, Union
9
9
  import requests
10
10
  import httpx
11
11
  #----------------------------------------------------------OpenAI-----------------------------------
@@ -332,7 +332,8 @@ class AsyncOPENAI(AsyncProvider):
332
332
  raw: bool = False,
333
333
  optimizer: str = None,
334
334
  conversationally: bool = False,
335
- ) -> dict | AsyncGenerator:
335
+ tools: Optional[List[Dict[str, Any]]] = None,
336
+ ) -> Union[dict, AsyncGenerator]:
336
337
  """Chat with AI asynchronously.
337
338
 
338
339
  Args:
@@ -341,6 +342,7 @@ class AsyncOPENAI(AsyncProvider):
341
342
  raw (bool, optional): Stream back raw response as received. Defaults to False.
342
343
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
343
344
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
345
+ tools (Optional[List[Dict[str, Any]]], optional): List of tools to be used. Defaults to None.
344
346
  Returns:
345
347
  dict|AsyncGenerator : ai content.
346
348
  ```json
@@ -444,13 +446,15 @@ class AsyncOPENAI(AsyncProvider):
444
446
  stream: bool = False,
445
447
  optimizer: str = None,
446
448
  conversationally: bool = False,
447
- ) -> str | AsyncGenerator:
449
+ tools: Optional[List[Dict[str, Any]]] = None,
450
+ ) -> Union[str, AsyncGenerator]:
448
451
  """Generate response `str` asynchronously.
449
452
  Args:
450
453
  prompt (str): Prompt to be send.
451
454
  stream (bool, optional): Flag for streaming response. Defaults to False.
452
455
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
453
456
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
457
+ tools (Optional[List[Dict[str, Any]]], optional): List of tools to be used. Defaults to None.
454
458
  Returns:
455
459
  str|AsyncGenerator: Response generated
456
460
  """
webscout/Provider/PI.py CHANGED
@@ -9,7 +9,7 @@ from webscout.AIutel import Conversation
9
9
  from webscout.AIutel import AwesomePrompts
10
10
  from webscout.AIbase import Provider
11
11
  from typing import Dict, Union, Any, Optional
12
- from webscout import LitAgent
12
+ from webscout.litagent import LitAgent
13
13
 
14
14
  class PiAI(Provider):
15
15
  """
@@ -13,7 +13,7 @@ from webscout.AIutel import Conversation
13
13
  from webscout.AIutel import AwesomePrompts, sanitize_stream
14
14
  from webscout.AIbase import Provider
15
15
  from webscout import exceptions
16
- from webscout import LitAgent
16
+ from webscout.litagent import LitAgent
17
17
 
18
18
  class PerplexityLabs(Provider):
19
19
  """
@@ -8,7 +8,7 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
8
8
  from webscout.AIbase import Provider
9
9
 
10
10
  from webscout import exceptions
11
- from typing import Any, AsyncGenerator, Dict
11
+ from typing import Union, Any, AsyncGenerator, Dict
12
12
 
13
13
 
14
14
  #------------------------------------------------------phind-------------------------------------------------------------
@@ -5,7 +5,7 @@ from typing import Any, Dict, Optional, Union, Generator
5
5
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
6
  from webscout.AIbase import Provider
7
7
  from webscout import exceptions
8
- from webscout import LitAgent as Lit
8
+ from webscout.litagent import LitAgent as Lit
9
9
 
10
10
  class PIZZAGPT(Provider):
11
11
  """
@@ -1,13 +1,10 @@
1
- import requests
2
1
  import json
3
- from typing import Any, Dict, Generator, Optional
4
- import uuid
5
- import re
2
+ from typing import Union, Any, Dict, Generator, Optional
6
3
 
7
4
  import cloudscraper
8
5
 
9
6
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
10
- from webscout.AIbase import Provider, AsyncProvider
7
+ from webscout.AIbase import Provider
11
8
  from webscout import exceptions
12
9
 
13
10
  class QwenLM(Provider):
@@ -126,7 +123,7 @@ class QwenLM(Provider):
126
123
  raw: bool = False,
127
124
  optimizer: Optional[str] = None,
128
125
  conversationally: bool = False,
129
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
126
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
130
127
  """Chat with AI."""
131
128
 
132
129
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
@@ -229,7 +226,7 @@ class QwenLM(Provider):
229
226
  stream: bool = False,
230
227
  optimizer: Optional[str] = None,
231
228
  conversationally: bool = False,
232
- ) -> str | Generator[str, None, None]:
229
+ ) -> Union[str, Generator[str, None, None]]:
233
230
  """Generate response string from chat."""
234
231
 
235
232
  def for_stream() -> Generator[str, None, None]:
@@ -7,11 +7,7 @@ from random import choice
7
7
  import aiofiles
8
8
 
9
9
  from webscout.AIbase import AsyncImageProvider
10
- from webscout.litagent import LitAgent # Import our fire user agent generator 🔥
11
- from webscout.Litlogger import Logger # For that cyberpunk logging swag ⚡
12
-
13
- # Initialize our fire logger 🚀
14
- logger = Logger("AsyncFreeAIPlayground")
10
+ from webscout.litagent import LitAgent
15
11
 
16
12
  class AsyncFreeAIImager(AsyncImageProvider):
17
13
  """
@@ -25,15 +21,16 @@ class AsyncFreeAIImager(AsyncImageProvider):
25
21
  "Flux Pro Ultra Raw",
26
22
  "Flux Schnell",
27
23
  "Flux Realism",
28
- "grok-2-aurora"
24
+ "grok-2-aurora",
25
+ "Flux Dev",
26
+
29
27
  ]
30
28
 
31
29
  def __init__(
32
30
  self,
33
- model: str = "dall-e-3", # Updated default model
31
+ model: str = "dall-e-3",
34
32
  timeout: int = 60,
35
- proxies: dict = {},
36
- logging: bool = True
33
+ proxies: dict = {}
37
34
  ):
38
35
  """Initialize your async FreeAIPlayground provider with custom settings! ⚙️
39
36
 
@@ -41,14 +38,13 @@ class AsyncFreeAIImager(AsyncImageProvider):
41
38
  model (str): Which model to use (default: dall-e-3)
42
39
  timeout (int): Request timeout in seconds (default: 60)
43
40
  proxies (dict): Proxy settings for requests (default: {})
44
- logging (bool): Enable fire logging (default: True)
45
41
  """
46
42
  self.image_gen_endpoint: str = "https://api.freeaichatplayground.com/v1/images/generations"
47
43
  self.headers = {
48
44
  "Accept": "application/json",
49
45
  "Accept-Language": "en-US,en;q=0.9",
50
46
  "Content-Type": "application/json",
51
- "User-Agent": LitAgent().random(), # Using our fire random agent! 🔥
47
+ "User-Agent": LitAgent().random(),
52
48
  "Origin": "https://freeaichatplayground.com",
53
49
  "Referer": "https://freeaichatplayground.com/",
54
50
  }
@@ -57,9 +53,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
57
53
  self.proxies = proxies
58
54
  self.prompt: str = "AI-generated image - webscout"
59
55
  self.image_extension: str = "png"
60
- self.logging = logging
61
- if self.logging:
62
- logger.info("AsyncFreeAIPlayground initialized! Ready to create some fire art! 🚀")
63
56
 
64
57
  async def generate(
65
58
  self, prompt: str, amount: int = 1, additives: bool = True,
@@ -93,9 +86,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
93
86
  + choice(punctuation)
94
87
  )
95
88
 
96
- if self.logging:
97
- logger.info(f"Generating {amount} images... 🎨")
98
-
99
89
  self.prompt = prompt
100
90
  response = []
101
91
 
@@ -115,28 +105,24 @@ class AsyncFreeAIImager(AsyncImageProvider):
115
105
  async with session.post(self.image_gen_endpoint, json=payload) as resp:
116
106
  resp.raise_for_status()
117
107
  data = await resp.json()
118
- image_url = data['data'][0]['url']
119
-
120
- # Get the image data from the URL
121
- async with session.get(image_url) as img_resp:
122
- img_resp.raise_for_status()
123
- image_bytes = await img_resp.read()
124
- response.append(image_bytes)
108
+ if 'data' in data and len(data['data']) > 0:
109
+ image_url = data['data'][0]['url']
125
110
 
126
- if self.logging:
127
- logger.success(f"Generated image {len(response)}/{amount}! 🎨")
128
- break
111
+ async with session.get(image_url) as img_resp:
112
+ img_resp.raise_for_status()
113
+ image_bytes = await img_resp.read()
114
+ response.append(image_bytes)
115
+ break
116
+ else:
117
+ print(f"Warning: No image data in response: {data}")
118
+ if attempt == max_retries - 1:
119
+ raise Exception("No image data received after all retries")
129
120
  except Exception as e:
121
+ print(f"Error generating image (attempt {attempt + 1}/{max_retries}): {str(e)}")
130
122
  if attempt == max_retries - 1:
131
- if self.logging:
132
- logger.error(f"Failed to generate image after {max_retries} attempts: {e} 😢")
133
123
  raise
134
- if self.logging:
135
- logger.warning(f"Attempt {attempt + 1} failed, retrying in {retry_delay}s... 🔄")
136
124
  await asyncio.sleep(retry_delay)
137
125
 
138
- if self.logging:
139
- logger.success("All images generated successfully! 🎉")
140
126
  return response
141
127
 
142
128
  async def save(
@@ -159,8 +145,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
159
145
  """
160
146
  if not os.path.exists(dir):
161
147
  os.makedirs(dir)
162
- if self.logging:
163
- logger.info(f"Created directory: {dir} 📁")
164
148
 
165
149
  name = self.prompt if name is None else name
166
150
  saved_paths = []
@@ -171,9 +155,6 @@ class AsyncFreeAIImager(AsyncImageProvider):
171
155
 
172
156
  async with aiofiles.open(filepath, "wb") as f:
173
157
  await f.write(image_bytes)
174
-
175
- if self.logging:
176
- logger.success(f"Saved image to: {filepath} 💾")
177
158
  return filename
178
159
 
179
160
  if isinstance(response, list):
@@ -181,14 +162,9 @@ class AsyncFreeAIImager(AsyncImageProvider):
181
162
  else:
182
163
  image_list = [chunk async for chunk in response]
183
164
 
184
- if self.logging:
185
- logger.info(f"Saving {len(image_list)} images... 💾")
186
-
187
165
  tasks = [save_single_image(img, i) for i, img in enumerate(image_list)]
188
166
  saved_paths = await asyncio.gather(*tasks)
189
167
 
190
- if self.logging:
191
- logger.success(f"All images saved successfully! Check {dir} 🎉")
192
168
  return saved_paths
193
169
 
194
170
 
@@ -199,8 +175,7 @@ if __name__ == "__main__":
199
175
  resp = await bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
200
176
  paths = await bot.save(resp)
201
177
  print(paths)
202
- except Exception as e:
203
- if bot.logging:
204
- logger.error(f"An error occurred: {e} 😢")
178
+ except Exception:
179
+ pass
205
180
 
206
181
  asyncio.run(main())