webscout 5.8__py3-none-any.whl → 6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (40) hide show
  1. webscout/Provider/Amigo.py +267 -0
  2. webscout/Provider/ChatHub.py +209 -0
  3. webscout/Provider/Chatify.py +3 -3
  4. webscout/Provider/Cloudflare.py +3 -3
  5. webscout/Provider/DARKAI.py +1 -1
  6. webscout/Provider/Deepinfra.py +95 -389
  7. webscout/Provider/Deepseek.py +4 -6
  8. webscout/Provider/DiscordRocks.py +3 -3
  9. webscout/Provider/Free2GPT.py +3 -3
  10. webscout/Provider/OLLAMA.py +4 -4
  11. webscout/Provider/RUBIKSAI.py +3 -3
  12. webscout/Provider/TTI/WebSimAI.py +142 -0
  13. webscout/Provider/TTI/__init__.py +3 -1
  14. webscout/Provider/TTI/amigo.py +148 -0
  15. webscout/Provider/TTS/__init__.py +2 -1
  16. webscout/Provider/TTS/parler.py +108 -0
  17. webscout/Provider/Youchat.py +4 -5
  18. webscout/Provider/__init__.py +10 -5
  19. webscout/Provider/ai4chat.py +3 -2
  20. webscout/Provider/bagoodex.py +145 -0
  21. webscout/Provider/bixin.py +3 -3
  22. webscout/Provider/cleeai.py +3 -3
  23. webscout/Provider/elmo.py +2 -5
  24. webscout/Provider/julius.py +6 -40
  25. webscout/Provider/learnfastai.py +253 -0
  26. webscout/Provider/llamatutor.py +2 -2
  27. webscout/Provider/prefind.py +232 -0
  28. webscout/Provider/promptrefine.py +3 -3
  29. webscout/Provider/turboseek.py +1 -1
  30. webscout/Provider/twitterclone.py +25 -41
  31. webscout/Provider/upstage.py +3 -3
  32. webscout/Provider/x0gpt.py +6 -6
  33. webscout/version.py +1 -1
  34. {webscout-5.8.dist-info → webscout-6.0.dist-info}/METADATA +187 -121
  35. {webscout-5.8.dist-info → webscout-6.0.dist-info}/RECORD +39 -32
  36. {webscout-5.8.dist-info → webscout-6.0.dist-info}/WHEEL +1 -1
  37. webscout/Provider/Poe.py +0 -208
  38. {webscout-5.8.dist-info → webscout-6.0.dist-info}/LICENSE.md +0 -0
  39. {webscout-5.8.dist-info → webscout-6.0.dist-info}/entry_points.txt +0 -0
  40. {webscout-5.8.dist-info → webscout-6.0.dist-info}/top_level.txt +0 -0
@@ -1,19 +1,23 @@
1
-
2
1
  import requests
3
-
4
- from ..AIutel import Optimizers
5
- from ..AIutel import Conversation
6
- from ..AIutel import AwesomePrompts, sanitize_stream
7
- from ..AIbase import Provider, AsyncProvider
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
8
10
  from webscout import exceptions
9
- from typing import Any, AsyncGenerator
10
- import httpx
11
11
 
12
12
  class DeepInfra(Provider):
13
+ """
14
+ A class to interact with the DeepInfra API.
15
+ """
16
+
13
17
  def __init__(
14
18
  self,
15
19
  is_conversation: bool = True,
16
- max_tokens: int = 600,
20
+ max_tokens: int = 2049, # Set a reasonable default
17
21
  timeout: int = 30,
18
22
  intro: str = None,
19
23
  filepath: str = None,
@@ -21,58 +25,29 @@ class DeepInfra(Provider):
21
25
  proxies: dict = {},
22
26
  history_offset: int = 10250,
23
27
  act: str = None,
24
- model: str = "Qwen/Qwen2.5-72B-Instruct",
25
- system_prompt: str = "You are a Helpful AI."
28
+ model: str = "Qwen/Qwen2.5-72B-Instruct",
26
29
  ):
27
- """Instantiates DeepInfra
30
+ """Initializes the DeepInfra API client."""
31
+ self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
32
+ self.headers = {
33
+ "Accept": "text/event-stream, application/json",
28
34
 
29
- Args:
30
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
31
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
32
- timeout (int, optional): Http request timeout. Defaults to 30.
33
- intro (str, optional): Conversation introductory prompt. Defaults to None.
34
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
35
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
36
- proxies (dict, optional): Http request proxies. Defaults to {}.
37
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
38
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
39
- model (str, optional): DeepInfra model name. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
40
- system_prompt (str, optional): System prompt for DeepInfra. Defaults to "You are a Helpful AI.".
41
- """
35
+ }
42
36
  self.session = requests.Session()
37
+ self.session.headers.update(self.headers)
38
+ self.session.proxies.update(proxies)
39
+
43
40
  self.is_conversation = is_conversation
44
41
  self.max_tokens_to_sample = max_tokens
45
- self.chat_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
46
42
  self.timeout = timeout
47
43
  self.last_response = {}
48
44
  self.model = model
49
- self.system_prompt = system_prompt
50
-
51
- self.headers = {
52
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
53
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
54
- 'Cache-Control': 'no-cache',
55
- 'Connection': 'keep-alive',
56
- 'Content-Type': 'application/json',
57
- 'Origin': 'https://deepinfra.com',
58
- 'Pragma': 'no-cache',
59
- 'Referer': 'https://deepinfra.com/',
60
- 'Sec-Fetch-Dest': 'empty',
61
- 'Sec-Fetch-Mode': 'cors',
62
- 'Sec-Fetch-Site': 'same-site',
63
- 'X-Deepinfra-Source': 'web-embed',
64
- 'accept': 'text/event-stream',
65
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
66
- 'sec-ch-ua-mobile': '?0',
67
- 'sec-ch-ua-platform': '"macOS"'
68
- }
69
45
 
70
46
  self.__available_optimizers = (
71
47
  method
72
48
  for method in dir(Optimizers)
73
49
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
74
50
  )
75
- self.session.headers.update(self.headers)
76
51
  Conversation.intro = (
77
52
  AwesomePrompts().get_act(
78
53
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -80,29 +55,21 @@ class DeepInfra(Provider):
80
55
  if act
81
56
  else intro or Conversation.intro
82
57
  )
58
+
83
59
  self.conversation = Conversation(
84
60
  is_conversation, self.max_tokens_to_sample, filepath, update_file
85
61
  )
86
62
  self.conversation.history_offset = history_offset
87
- self.session.proxies = proxies
88
63
 
89
64
  def ask(
90
65
  self,
91
66
  prompt: str,
67
+ stream: bool = False,
92
68
  raw: bool = False,
93
69
  optimizer: str = None,
94
70
  conversationally: bool = False,
95
- ) -> dict:
96
- """Chat with AI
71
+ ) -> Union[Dict[str, Any], Generator]:
97
72
 
98
- Args:
99
- prompt (str): Prompt to be sent.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
- Returns:
104
- dict : {}
105
- """
106
73
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
107
74
  if optimizer:
108
75
  if optimizer in self.__available_optimizers:
@@ -110,357 +77,96 @@ class DeepInfra(Provider):
110
77
  conversation_prompt if conversationally else prompt
111
78
  )
112
79
  else:
113
- raise Exception(
114
- f"Optimizer is not one of {self.__available_optimizers}"
115
- )
116
- self.session.headers.update(self.headers)
80
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
81
+
82
+ # Payload construction
117
83
  payload = {
118
- 'model': self.model,
119
- 'messages': [
120
- {"role": "system", "content": self.system_prompt},
84
+ "model": self.model,
85
+ "messages": [
86
+ {"role": "system", "content": "You are a helpful assistant."},
121
87
  {"role": "user", "content": conversation_prompt},
122
88
  ],
123
- 'temperature': 0.7,
124
- 'max_tokens': 8028,
125
- 'stop': []
89
+ "stream": stream
126
90
  }
127
91
 
128
- response = self.session.post(
129
- self.chat_endpoint, json=payload, timeout=self.timeout
130
- )
131
- if not response.ok:
132
- raise Exception(
133
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
134
- )
92
+ def for_stream():
93
+ try:
94
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
95
+ if response.status_code != 200:
96
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
97
+
98
+ streaming_text = ""
99
+ for line in response.iter_lines(decode_unicode=True): # Decode lines
100
+ if line:
101
+ line = line.strip()
102
+ if line.startswith("data: "):
103
+ json_str = line[6:] #Remove "data: " prefix
104
+ if json_str == "[DONE]":
105
+ break
106
+ try:
107
+ json_data = json.loads(json_str)
108
+ if 'choices' in json_data:
109
+ choice = json_data['choices'][0]
110
+ if 'delta' in choice and 'content' in choice['delta']:
111
+ content = choice['delta']['content']
112
+ streaming_text += content
113
+
114
+ # Yield ONLY the new content:
115
+ resp = dict(text=content)
116
+ yield resp if raw else resp
117
+ except json.JSONDecodeError:
118
+ pass # Or handle the error as needed
119
+ self.conversation.update_chat_history(prompt, streaming_text) # Update history *after* streaming
120
+ except requests.RequestException as e:
121
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
122
+
123
+
124
+ def for_non_stream():
125
+ # let's make use of stream
126
+ for _ in for_stream():
127
+ pass
128
+ return self.last_response
129
+
130
+
131
+ return for_stream() if stream else for_non_stream()
132
+
135
133
 
136
- resp = response.json()
137
- message_load = self.get_message(resp)
138
- self.conversation.update_chat_history(
139
- prompt, message_load
140
- )
141
- return resp
142
134
 
143
135
  def chat(
144
136
  self,
145
137
  prompt: str,
138
+ stream: bool = False,
146
139
  optimizer: str = None,
147
140
  conversationally: bool = False,
148
141
  ) -> str:
149
- """Generate response `str`
150
- Args:
151
- prompt (str): Prompt to be send.
152
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
153
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
154
- Returns:
155
- str: Response generated
156
- """
157
- return self.get_message(
158
- self.ask(
159
- prompt,
160
- optimizer=optimizer,
161
- conversationally=conversationally,
162
- )
163
- )
164
142
 
165
- def get_message(self, response: dict) -> str:
166
- """Retrieves message only from response
167
-
168
- Args:
169
- response (dict): Response generated by `self.ask`
170
-
171
- Returns:
172
- str: Message extracted
173
- """
174
- assert isinstance(response, dict), "Response should be of dict data-type only"
175
- try:
176
- return response["choices"][0]["message"]["content"]
177
- except KeyError:
178
- return ""
179
-
180
- class AsyncDeepInfra(AsyncProvider):
181
- def __init__(
182
- self,
183
- is_conversation: bool = True,
184
- max_tokens: int = 600,
185
- timeout: int = 30,
186
- intro: str = None,
187
- filepath: str = None,
188
- update_file: bool = True,
189
- proxies: dict = {},
190
- history_offset: int = 10250,
191
- act: str = None,
192
- model: str = "meta-llama/Meta-Llama-3-70B-Instruct",
193
- system_prompt: str = "You are a Helpful AI."
194
- ):
195
- """Instantiates DeepInfra
196
-
197
- Args:
198
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
199
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
200
- timeout (int, optional): Http request timeout. Defaults to 30.
201
- intro (str, optional): Conversation introductory prompt. Defaults to None.
202
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
203
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
204
- proxies (dict, optional): Http request proxies. Defaults to {}.
205
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
206
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
207
- model (str, optional): DeepInfra model name. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
208
- system_prompt (str, optional): System prompt for DeepInfra. Defaults to "You are a Helpful AI.".
209
- """
210
- self.is_conversation = is_conversation
211
- self.max_tokens_to_sample = max_tokens
212
- self.chat_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
213
- self.timeout = timeout
214
- self.last_response = {}
215
- self.model = model
216
- self.system_prompt = system_prompt
217
-
218
- self.headers = {
219
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
220
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
221
- 'Cache-Control': 'no-cache',
222
- 'Connection': 'keep-alive',
223
- 'Content-Type': 'application/json',
224
- 'Origin': 'https://deepinfra.com',
225
- 'Pragma': 'no-cache',
226
- 'Referer': 'https://deepinfra.com/',
227
- 'Sec-Fetch-Dest': 'empty',
228
- 'Sec-Fetch-Mode': 'cors',
229
- 'Sec-Fetch-Site': 'same-site',
230
- 'X-Deepinfra-Source': 'web-embed',
231
- 'accept': 'text/event-stream',
232
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
233
- 'sec-ch-ua-mobile': '?0',
234
- 'sec-ch-ua-platform': '"macOS"'
235
- }
236
-
237
- self.__available_optimizers = (
238
- method
239
- for method in dir(Optimizers)
240
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
241
- )
242
- self.client = httpx.AsyncClient(proxies=proxies, headers=self.headers)
243
- Conversation.intro = (
244
- AwesomePrompts().get_act(
245
- act, raise_not_found=True, default=None, case_insensitive=True
246
- )
247
- if act
248
- else intro or Conversation.intro
249
- )
250
- self.conversation = Conversation(
251
- is_conversation, self.max_tokens_to_sample, filepath, update_file
252
- )
253
- self.conversation.history_offset = history_offset
254
-
255
- async def ask(
256
- self,
257
- prompt: str,
258
- raw: bool = False,
259
- optimizer: str = None,
260
- conversationally: bool = False,
261
- ) -> dict:
262
- """Chat with AI
263
-
264
- Args:
265
- prompt (str): Prompt to be sent.
266
- raw (bool, optional): Stream back raw response as received. Defaults to False.
267
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
268
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
269
- Returns:
270
- dict : {}
271
- """
272
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
273
- if optimizer:
274
- if optimizer in self.__available_optimizers:
275
- conversation_prompt = getattr(Optimizers, optimizer)(
276
- conversation_prompt if conversationally else prompt
143
+ def for_stream():
144
+ for response in self.ask(
145
+ prompt, True, optimizer=optimizer, conversationally=conversationally
146
+ ):
147
+ yield self.get_message(response)
148
+
149
+ def for_non_stream():
150
+ return self.get_message(
151
+ self.ask(
152
+ prompt,
153
+ False,
154
+ optimizer=optimizer,
155
+ conversationally=conversationally,
277
156
  )
278
- else:
279
- raise Exception(
280
- f"Optimizer is not one of {self.__available_optimizers}"
281
- )
282
- payload = {
283
- 'model': self.model,
284
- 'messages': [
285
- {"role": "system", "content": self.system_prompt},
286
- {"role": "user", "content": conversation_prompt},
287
- ],
288
- 'temperature': 0.7,
289
- 'max_tokens': 8028,
290
- 'stop': []
291
- }
292
-
293
- response = await self.client.post(self.chat_endpoint, json=payload, timeout=self.timeout)
294
- if response.status_code != 200:
295
- raise Exception(
296
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
297
157
  )
298
158
 
299
- resp = response.json()
300
- message_load = self.get_message(resp)
301
- self.conversation.update_chat_history(
302
- prompt, message_load
303
- )
304
- return resp
305
-
306
- async def chat(
307
- self,
308
- prompt: str,
309
- optimizer: str = None,
310
- conversationally: bool = False,
311
- ) -> str:
312
- """Generate response `str`
313
- Args:
314
- prompt (str): Prompt to be send.
315
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
316
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
317
- Returns:
318
- str: Response generated
319
- """
320
- return self.get_message(
321
- await self.ask(
322
- prompt,
323
- optimizer=optimizer,
324
- conversationally=conversationally,
325
- )
326
- )
327
-
328
- def get_message(self, response: dict) -> str:
329
- """Retrieves message only from response
330
-
331
- Args:
332
- response (dict): Response generated by `self.ask`
333
-
334
- Returns:
335
- str: Message extracted
336
- """
337
- assert isinstance(response, dict), "Response should be of dict data-type only"
338
- try:
339
- return response["choices"][0]["message"]["content"]
340
- except KeyError:
341
- return ""
342
- import requests
343
- import base64
344
- from typing import List, Dict, Union, Any
345
-
346
- class VLM:
347
- def __init__(
348
- self,
349
- model: str = "llava-hf/llava-1.5-7b-hf",
350
- is_conversation: bool = True,
351
- max_tokens: int = 600,
352
- timeout: int = 30,
353
- system_prompt: str = "You are a Helpful AI.",
354
- proxies: dict = {}
355
- ):
356
- """Instantiates VLM
357
-
358
- Args:
359
- model (str): VLM model name.
360
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
361
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
362
- timeout (int, optional): Http request timeout. Defaults to 30.
363
- system_prompt (str, optional): System prompt for VLM. Defaults to "You are a Helpful AI.".
364
- proxies (dict, optional): Http request proxies. Defaults to {}.
365
- """
366
- self.model = model
367
- self.is_conversation = is_conversation
368
- self.max_tokens_to_sample = max_tokens
369
- self.timeout = timeout
370
- self.system_prompt = system_prompt
371
- self.headers = {
372
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
373
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q.0.7,es;q.0.6,en-US;q.0.5,am;q.0.4,de;q.0.3',
374
- 'Cache-Control': 'no-cache',
375
- 'Connection': 'keep-alive',
376
- 'Content-Type': 'application/json',
377
- 'Origin': 'https://deepinfra.com',
378
- 'Pragma': 'no-cache',
379
- 'Referer': 'https://deepinfra.com/',
380
- 'Sec-Fetch-Dest': 'empty',
381
- 'Sec-Fetch-Mode': 'cors',
382
- 'Sec-Fetch-Site': 'same-site',
383
- 'X-Deepinfra-Source': 'web-embed',
384
- 'accept': 'text/event-stream',
385
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
386
- 'sec-ch-ua-mobile': '?0',
387
- 'sec-ch-ua-platform': '"macOS"'
388
- }
389
-
390
- self.session = requests.Session()
391
- self.session.headers.update(self.headers)
392
- self.session.proxies.update(proxies)
393
-
394
- def encode_image_to_base64(self, image_path: str) -> str:
395
- with open(image_path, "rb") as image_file:
396
- return base64.b64encode(image_file.read()).decode("utf-8")
159
+ return for_stream() if stream else for_non_stream()
397
160
 
398
161
  def get_message(self, response: dict) -> str:
399
- """Retrieves message only from response
400
-
401
- Args:
402
- response (dict): Response generated by `self.ask`
403
-
404
- Returns:
405
- str: Message extracted
406
- """
407
162
  assert isinstance(response, dict), "Response should be of dict data-type only"
408
- try:
409
- return response["choices"][0]["message"]["content"]
410
- except KeyError:
411
- return ""
412
-
413
- def ask(
414
- self,
415
- prompt: Union[str, Dict[str, str]],
416
- raw: bool = False
417
- ) -> dict:
418
- """Chat with AI
419
-
420
- Args:
421
- prompt (Union[str, Dict[str, str]]): Prompt to be sent, can be text or a dict with base64 image.
422
- raw (bool, optional): Stream back raw response as received. Defaults to False.
423
-
424
- Returns:
425
- dict: Response from the API
426
- """
427
- messages = [
428
- {"role": "system", "content": self.system_prompt},
429
- {"role": "user", "content": prompt if isinstance(prompt, str) else prompt['content']}
430
- ]
431
-
432
- payload = {
433
- 'model': self.model,
434
- 'messages': messages,
435
- 'temperature': 0.7,
436
- 'max_tokens': self.max_tokens_to_sample,
437
- 'stop': [],
438
- 'stream': False
439
- }
440
-
441
- response = self.session.post(
442
- "https://api.deepinfra.com/v1/openai/chat/completions",
443
- json=payload,
444
- timeout=self.timeout
445
- )
446
- if not response.ok:
447
- raise Exception(
448
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
449
- )
163
+ return response["text"]
450
164
 
451
- return response.json()
452
-
453
- def chat(
454
- self,
455
- prompt: Union[str, Dict[str, str]]
456
- ) -> str:
457
- """Generate response `str`
458
165
 
459
- Args:
460
- prompt (Union[str, Dict[str, str]]): Prompt to be sent, can be text or a dict with base64 image.
461
166
 
462
- Returns:
463
- str: Response generated
464
- """
465
- return self.get_message(self.ask(prompt))
466
-
167
+ if __name__ == "__main__":
168
+ from rich import print
169
+ ai = DeepInfra(timeout=5000)
170
+ response = ai.chat("write a poem about AI", stream=True)
171
+ for chunk in response:
172
+ print(chunk, end="", flush=True)
@@ -151,16 +151,14 @@ class DeepSeek(Provider):
151
151
  f"Failed to generate response - ({response.status_code}, {response.reason})"
152
152
  )
153
153
  streaming_response = ""
154
- collected_messages = []
155
154
  for line in response.iter_lines():
156
155
  if line:
157
156
  json_line = json.loads(line.decode('utf-8').split('data: ')[1])
158
157
  if 'choices' in json_line and len(json_line['choices']) > 0:
159
158
  delta_content = json_line['choices'][0].get('delta', {}).get('content')
160
159
  if delta_content:
161
- collected_messages.append(delta_content)
162
- streaming_response = ''.join(collected_messages)
163
- yield delta_content if raw else dict(text=streaming_response)
160
+ streaming_response += delta_content
161
+ yield delta_content if raw else dict(text=delta_content)
164
162
  self.last_response.update(dict(text=streaming_response))
165
163
  self.conversation.update_chat_history(
166
164
  prompt, self.get_message(self.last_response)
@@ -222,7 +220,7 @@ class DeepSeek(Provider):
222
220
 
223
221
  if __name__ == '__main__':
224
222
  from rich import print
225
- ai = DeepSeek(api_key="")
226
- response = ai.chat("tell me about india")
223
+ ai = DeepSeek(api_key="", timeout=5000)
224
+ response = ai.chat("write a poem about AI", stream=True)
227
225
  for chunk in response:
228
226
  print(chunk, end="", flush=True)
@@ -167,7 +167,7 @@ class DiscordRocks(Provider):
167
167
  content = json_data['choices'][0]['delta'].get('content', '')
168
168
  if content:
169
169
  full_content += content
170
- yield content if raw else dict(text=full_content)
170
+ yield content if raw else dict(text=content)
171
171
  except json.JSONDecodeError:
172
172
  print(f'Error decoding JSON: {decoded_line}')
173
173
  except KeyError:
@@ -247,7 +247,7 @@ class DiscordRocks(Provider):
247
247
 
248
248
  if __name__ == '__main__':
249
249
  from rich import print
250
- ai = DiscordRocks()
251
- response = ai.chat(input(">>> "))
250
+ ai = DiscordRocks(timeout=5000)
251
+ response = ai.chat("write a poem about AI", stream=True)
252
252
  for chunk in response:
253
253
  print(chunk, end="", flush=True)
@@ -160,7 +160,7 @@ class Free2GPT(Provider):
160
160
  for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
161
161
  if chunk:
162
162
  full_response += chunk.decode('utf-8')
163
- yield chunk.decode('utf-8') if raw else dict(text=full_response)
163
+ yield chunk.decode('utf-8') if raw else dict(text=chunk.decode('utf-8'))
164
164
 
165
165
  self.last_response.update(dict(text=full_response))
166
166
  self.conversation.update_chat_history(
@@ -228,7 +228,7 @@ class Free2GPT(Provider):
228
228
  if __name__ == "__main__":
229
229
  from rich import print
230
230
 
231
- ai = Free2GPT()
232
- response = ai.chat('hi')
231
+ ai = Free2GPT(timeout=5000)
232
+ response = ai.chat("write a poem about AI", stream=True)
233
233
  for chunk in response:
234
234
  print(chunk, end="", flush=True)
@@ -166,7 +166,7 @@ class OLLAMA(Provider):
166
166
  assert isinstance(response, dict), "Response should be of dict data-type only"
167
167
  return response["text"]
168
168
  if __name__ == "__main__":
169
- ollama_provider = OLLAMA(model="qwen2:0.5b")
170
- response = ollama_provider.chat("hi", stream=True)
171
- for r in response:
172
- print(r, end="", flush=True)
169
+ ai = OLLAMA(model="qwen2:0.5b")
170
+ response = ai.chat("write a poem about AI", stream=True)
171
+ for chunk in response:
172
+ print(chunk, end="", flush=True)
@@ -146,7 +146,7 @@ class RUBIKSAI(Provider):
146
146
  if "choices" in data and len(data["choices"]) > 0:
147
147
  content = data["choices"][0]["delta"].get("content", "")
148
148
  streaming_response += content
149
- yield content if raw else dict(text=streaming_response)
149
+ yield content if raw else dict(text=content)
150
150
  except json.decoder.JSONDecodeError:
151
151
  continue
152
152
 
@@ -211,7 +211,7 @@ class RUBIKSAI(Provider):
211
211
  if __name__ == '__main__':
212
212
 
213
213
  from rich import print
214
- ai = RUBIKSAI()
215
- response = ai.chat("hi")
214
+ ai = RUBIKSAI(timeout=5000)
215
+ response = ai.chat("write a poem about AI", stream=True)
216
216
  for chunk in response:
217
217
  print(chunk, end="", flush=True)