webscout 4.7__py3-none-any.whl → 4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (53) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Bing_search.py +124 -0
  4. webscout/DWEBS.py +141 -777
  5. webscout/Local/_version.py +1 -1
  6. webscout/Provider/Andi.py +7 -1
  7. webscout/Provider/BasedGPT.py +11 -5
  8. webscout/Provider/Berlin4h.py +11 -5
  9. webscout/Provider/Blackboxai.py +10 -4
  10. webscout/Provider/Cloudflare.py +286 -0
  11. webscout/Provider/Cohere.py +11 -5
  12. webscout/Provider/DARKAI.py +25 -7
  13. webscout/Provider/Deepinfra.py +2 -1
  14. webscout/Provider/Deepseek.py +25 -9
  15. webscout/Provider/DiscordRocks.py +389 -0
  16. webscout/Provider/Farfalle.py +227 -0
  17. webscout/Provider/Gemini.py +1 -1
  18. webscout/Provider/Groq.py +244 -110
  19. webscout/Provider/Llama.py +13 -5
  20. webscout/Provider/Llama3.py +15 -2
  21. webscout/Provider/OLLAMA.py +8 -7
  22. webscout/Provider/{Geminiflash.py → PI.py} +96 -40
  23. webscout/Provider/Perplexity.py +422 -52
  24. webscout/Provider/Phind.py +6 -5
  25. webscout/Provider/PizzaGPT.py +7 -1
  26. webscout/Provider/Youchat.py +98 -76
  27. webscout/Provider/__init__.py +26 -31
  28. webscout/Provider/ai4chat.py +193 -0
  29. webscout/Provider/{VTLchat.py → felo_search.py} +62 -76
  30. webscout/Provider/julius.py +263 -0
  31. webscout/Provider/koala.py +11 -5
  32. webscout/Provider/liaobots.py +268 -0
  33. webscout/Provider/meta.py +2 -1
  34. webscout/Provider/{ChatGPTUK.py → turboseek.py} +79 -56
  35. webscout/Provider/{FreeGemini.py → xdash.py} +51 -18
  36. webscout/Provider/yep.py +258 -0
  37. webscout/__init__.py +1 -59
  38. webscout/version.py +1 -1
  39. webscout/webai.py +2 -64
  40. webscout/webscout_search.py +1 -1
  41. {webscout-4.7.dist-info → webscout-4.9.dist-info}/METADATA +249 -323
  42. webscout-4.9.dist-info/RECORD +83 -0
  43. webscout/GoogleS.py +0 -342
  44. webscout/Provider/Geminipro.py +0 -152
  45. webscout/Provider/Leo.py +0 -469
  46. webscout/Provider/OpenGPT.py +0 -867
  47. webscout/Provider/Xjai.py +0 -230
  48. webscout/Provider/Yepchat.py +0 -478
  49. webscout-4.7.dist-info/RECORD +0 -80
  50. {webscout-4.7.dist-info → webscout-4.9.dist-info}/LICENSE.md +0 -0
  51. {webscout-4.7.dist-info → webscout-4.9.dist-info}/WHEEL +0 -0
  52. {webscout-4.7.dist-info → webscout-4.9.dist-info}/entry_points.txt +0 -0
  53. {webscout-4.7.dist-info → webscout-4.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,389 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, AsyncGenerator, Dict
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
8
+ from webscout.AIbase import Provider, AsyncProvider
9
+ from webscout import exceptions
10
+
11
+
12
+ class DiscordRocks(Provider):
13
+ """
14
+ A class to interact with the DiscordRocks API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ "claude-3-haiku-20240307",
19
+ "claude-3-sonnet-20240229",
20
+ "claude-3-5-sonnet-20240620",
21
+ "claude-3-opus-20240229",
22
+ "gpt-4",
23
+ "gpt-4-0613",
24
+ "gpt-4-turbo",
25
+ "gpt-4o-mini-2024-07-18",
26
+ "gpt-4o-mini",
27
+ "gpt-3.5-turbo",
28
+ "gpt-3.5-turbo-0125",
29
+ "gpt-3.5-turbo-1106",
30
+ "gpt-3.5-turbo-16k",
31
+ "gpt-3.5-turbo-0613",
32
+ "gpt-3.5-turbo-16k-0613",
33
+ "gpt-4o",
34
+ "llama-3-70b-chat",
35
+ "llama-3-70b-chat-turbo",
36
+ "llama-3-8b-chat",
37
+ "llama-3-8b-chat-turbo",
38
+ "llama-3-70b-chat-lite",
39
+ "llama-3-8b-chat-lite",
40
+ "llama-2-70b-chat",
41
+ "llama-2-13b-chat",
42
+ "llama-2-7b-chat",
43
+ "llama-3.1-405b-turbo",
44
+ "llama-3.1-70b-turbo",
45
+ "llama-3.1-8b-turbo",
46
+ "LlamaGuard-2-8b",
47
+ "Yi-34B-Chat",
48
+ "Yi-34B",
49
+ "Yi-6B",
50
+ "Mixtral-8x7B-v0.1",
51
+ "Mixtral-8x22B",
52
+ "Mixtral-8x7B-Instruct-v0.1",
53
+ "Mixtral-8x22B-Instruct-v0.1",
54
+ "Mistral-7B-Instruct-v0.1",
55
+ "Mistral-7B-Instruct-v0.2",
56
+ "Mistral-7B-Instruct-v0.3",
57
+ "openchat-3.5",
58
+ "WizardLM-13B-V1.2",
59
+ "WizardCoder-Python-34B-V1.0",
60
+ "Qwen1.5-0.5B-Chat",
61
+ "Qwen1.5-1.8B-Chat",
62
+ "Qwen1.5-4B-Chat",
63
+ "Qwen1.5-7B-Chat",
64
+ "Qwen1.5-14B-Chat",
65
+ "Qwen1.5-72B-Chat",
66
+ "Qwen1.5-110B-Chat",
67
+ "Qwen2-72B-Instruct",
68
+ "gemma-2b-it",
69
+ "gemma-7b-it",
70
+ "gemma-2b",
71
+ "gemma-7b",
72
+ "dbrx-instruct",
73
+ "vicuna-7b-v1.5",
74
+ "vicuna-13b-v1.5",
75
+ "dolphin-2.5-mixtral-8x7b",
76
+ "deepseek-coder-33b-instruct",
77
+ "deepseek-coder-67b-instruct",
78
+ "deepseek-llm-67b-chat",
79
+ "Nous-Capybara-7B-V1p9",
80
+ "Nous-Hermes-2-Mixtral-8x7B-DPO",
81
+ "Nous-Hermes-2-Mixtral-8x7B-SFT",
82
+ "Nous-Hermes-llama-2-7b",
83
+ "Nous-Hermes-Llama2-13b",
84
+ "Nous-Hermes-2-Yi-34B",
85
+ "Mistral-7B-OpenOrca",
86
+ "alpaca-7b",
87
+ "OpenHermes-2-Mistral-7B",
88
+ "OpenHermes-2.5-Mistral-7B",
89
+ "phi-2",
90
+ "phi-3",
91
+ "WizardLM-2-8x22B",
92
+ "NexusRaven-V2-13B",
93
+ "Phind-CodeLlama-34B-v2",
94
+ "CodeLlama-7b-Python-hf",
95
+ "CodeLlama-7b-Python",
96
+ "CodeLlama-13b-Python-hf",
97
+ "CodeLlama-34b-Python-hf",
98
+ "CodeLlama-70b-Python-hf",
99
+ "snowflake-arctic-instruct",
100
+ "SOLAR-10.7B-Instruct-v1.0",
101
+ "StripedHyena-Hessian-7B",
102
+ "StripedHyena-Nous-7B",
103
+ "Llama-2-7B-32K-Instruct",
104
+ "CodeLlama-13b-Instruct",
105
+ "evo-1-131k-base",
106
+ "OLMo-7B-Instruct",
107
+ "Platypus2-70B-instruct",
108
+ "Snorkel-Mistral-PairRM-DPO",
109
+ "ReMM-SLERP-L2-13B",
110
+ "MythoMax-L2-13b",
111
+ "chronos-hermes-13b",
112
+ "Llama-Guard-7b",
113
+ "gemma-2-9b-it",
114
+ "gemma-2-27b-it",
115
+ "Toppy-M-7B",
116
+ "gemini-1.5-flash",
117
+ "gemini-1.5-pro",
118
+ "gemini-1.0-pro",
119
+ "command-r+",
120
+ "sparkdesk"
121
+ ]
122
+
123
+ def __init__(
124
+ self,
125
+ model: str = "gpt-4o",
126
+ max_tokens: int = 4096,
127
+ temperature: float = 1,
128
+ top_p: float = 1,
129
+ is_conversation: bool = True,
130
+ timeout: int = 30,
131
+ intro: str = None,
132
+ filepath: str = None,
133
+ update_file: bool = True,
134
+ proxies: dict = {},
135
+ history_offset: int = 10250,
136
+ act: str = None,
137
+ system_prompt: str = None,
138
+ ):
139
+ """
140
+ Initializes the DiscordRocks API with given parameters.
141
+
142
+ Args:
143
+ api_key (str): The API key for authentication.
144
+ model (str): The AI model to use for text generation. Defaults to "llama-3-70b-chat".
145
+ max_tokens (int): The maximum number of tokens to generate. Defaults to 4096.
146
+ temperature (float): The temperature parameter for the model. Defaults to 1.
147
+ top_p (float): The top_p parameter for the model. Defaults to 1.
148
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
149
+ timeout (int, optional): Http request timeout. Defaults to 30.
150
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
151
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
152
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
153
+ proxies (dict, optional): Http request proxies. Defaults to {}.
154
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
155
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
156
+ system_prompt (str, optional): System prompt to guide the AI's behavior. Defaults to None.
157
+ """
158
+ if model not in self.AVAILABLE_MODELS:
159
+ raise ValueError(f"Invalid model name. Available models are: {self.AVAILABLE_MODELS}")
160
+
161
+
162
+ self.model = model
163
+ self.max_tokens = max_tokens
164
+ self.temperature = temperature
165
+ self.top_p = top_p
166
+ self.session = requests.Session()
167
+ self.is_conversation = is_conversation
168
+ self.max_tokens_to_sample = max_tokens
169
+ self.timeout = timeout
170
+ self.last_response = {}
171
+ self.chat_completions_url = "https://api.discord.rocks/chat/completions"
172
+ self.images_generations_url = "https://api.discord.rocks/images/generations"
173
+ self.headers = {
174
+ "accept": "*/*",
175
+ "accept-encoding": "gzip, deflate, br, zstd",
176
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
177
+ "content-type": "application/json",
178
+ "dnt": "1",
179
+ "origin": "https://llmplayground.net",
180
+ "priority": "u=1, i",
181
+ "referer": "https://llmplayground.net/",
182
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
183
+ "sec-ch-ua-mobile": "?0",
184
+ "sec-ch-ua-platform": '"Windows"',
185
+ "sec-fetch-dest": "empty",
186
+ "sec-fetch-mode": "cors",
187
+ "sec-fetch-site": "cross-site",
188
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
189
+ }
190
+ self.__available_optimizers = (
191
+ method
192
+ for method in dir(Optimizers)
193
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
194
+ )
195
+ self.session.headers.update(self.headers)
196
+ Conversation.intro = (
197
+ AwesomePrompts().get_act(
198
+ act, raise_not_found=True, default=None, case_insensitive=True
199
+ )
200
+ if act
201
+ else intro or Conversation.intro
202
+ )
203
+ self.conversation = Conversation(
204
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
205
+ )
206
+ self.conversation.history_offset = history_offset
207
+ self.session.proxies = proxies
208
+ self.system_prompt = system_prompt # Store the system prompt
209
+
210
+ def ask(
211
+ self,
212
+ prompt: str,
213
+ stream: bool = False,
214
+ raw: bool = False,
215
+ optimizer: str = None,
216
+ conversationally: bool = False,
217
+ ) -> Dict[str, Any]:
218
+ """
219
+ Sends a prompt to the DiscordRocks AI API and returns the response.
220
+
221
+ Args:
222
+ prompt: The text prompt to generate text from.
223
+ stream (bool, optional): Whether to stream the response. Defaults to False.
224
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
225
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
226
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
227
+
228
+ Returns:
229
+ The response from the API.
230
+ """
231
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
232
+ if optimizer:
233
+ if optimizer in self.__available_optimizers:
234
+ conversation_prompt = getattr(Optimizers, optimizer)(
235
+ conversation_prompt if conversationally else prompt
236
+ )
237
+ else:
238
+ raise Exception(
239
+ f"Optimizer is not one of {self.__available_optimizers}"
240
+ )
241
+
242
+ payload = {
243
+ "messages": [
244
+ {"role": "system", "content": self.system_prompt}, # Add system prompt
245
+ {"role": "user", "content": conversation_prompt}
246
+ ],
247
+ "model": self.model,
248
+ "max_tokens": self.max_tokens,
249
+ "temperature": self.temperature,
250
+ "top_p": self.top_p,
251
+ "stream": stream
252
+ }
253
+
254
+ def for_stream():
255
+ response = self.session.post(
256
+ self.chat_completions_url, json=payload, headers=self.headers, stream=True, timeout=self.timeout
257
+ )
258
+
259
+ if not response.ok:
260
+ raise exceptions.FailedToGenerateResponseError(
261
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
262
+ )
263
+ streaming_response = ""
264
+ for line in response.iter_lines(decode_unicode=True):
265
+ if line:
266
+ try:
267
+ json_line = json.loads(line.decode('utf-8').split('data: ')[1])
268
+ content = json_line['choices'][0]['delta']['content']
269
+ streaming_response += content
270
+ yield content if raw else dict(text=streaming_response)
271
+ except:
272
+ continue
273
+ self.last_response.update(dict(text=streaming_response))
274
+ self.conversation.update_chat_history(
275
+ prompt, self.get_message(self.last_response)
276
+ )
277
+
278
+ def for_non_stream():
279
+ response = self.session.post(
280
+ self.chat_completions_url, json=payload, headers=self.headers, timeout=self.timeout
281
+ )
282
+
283
+ if not response.ok:
284
+ raise exceptions.FailedToGenerateResponseError(
285
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
286
+ )
287
+ response_data = response.json()
288
+ full_content = ''.join([choice['message']['content'] for choice in response_data['choices']])
289
+ self.last_response.update(dict(text=full_content))
290
+ self.conversation.update_chat_history(
291
+ prompt, self.get_message(self.last_response)
292
+ )
293
+ return self.last_response
294
+
295
+ return for_stream() if stream else for_non_stream()
296
+
297
+ def chat(
298
+ self,
299
+ prompt: str,
300
+ stream: bool = False,
301
+ optimizer: str = None,
302
+ conversationally: bool = False,
303
+ ) -> str:
304
+ """Generate response `str`
305
+ Args:
306
+ prompt (str): Prompt to be send.
307
+ stream (bool, optional): Flag for streaming response. Defaults to False.
308
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
309
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
310
+ Returns:
311
+ str: Response generated
312
+ """
313
+
314
+ def for_stream():
315
+ for response in self.ask(
316
+ prompt, True, optimizer=optimizer, conversationally=conversationally
317
+ ):
318
+ yield self.get_message(response)
319
+
320
+ def for_non_stream():
321
+ return self.get_message(
322
+ self.ask(
323
+ prompt,
324
+ False,
325
+ optimizer=optimizer,
326
+ conversationally=conversationally,
327
+ )
328
+ )
329
+
330
+ return for_stream() if stream else for_non_stream()
331
+
332
+ def get_message(self, response: dict) -> str:
333
+ """Retrieves message only from response
334
+
335
+ Args:
336
+ response (dict): Response generated by `self.ask`
337
+
338
+ Returns:
339
+ str: Message extracted
340
+ """
341
+ assert isinstance(response, dict), "Response should be of dict data-type only"
342
+ return response["text"]
343
+
344
+ def generate_image(
345
+ self,
346
+ prompt: str,
347
+ model: str = "dall-e-3",
348
+ n: int = 1,
349
+ quality: str = "hd",
350
+ response_format: str = "url",
351
+ size: str = "1024x1024",
352
+ ) -> dict:
353
+ """
354
+ Generates an image using the DiscordRocks API.
355
+
356
+ Args:
357
+ prompt (str): The prompt describing the image to generate.
358
+ model (str, optional): The image generation model to use. Defaults to "dall-e-3".
359
+ n (int, optional): The number of images to generate. Defaults to 1.
360
+ quality (str, optional): The quality of the generated images ("standard", "hd"). Defaults to "hd".
361
+ response_format (str, optional): The response format ("url", "b64_json"). Defaults to "url".
362
+ size (str, optional): The size of the generated images ("256x256", "512x512", "1024x1024").
363
+ Defaults to "1024x1024".
364
+
365
+ Returns:
366
+ dict: A dictionary containing the response from the API, including the generated image URLs.
367
+ """
368
+ payload = {
369
+ "prompt": prompt,
370
+ "model": model,
371
+ "n": n,
372
+ "quality": quality,
373
+ "response format": response_format,
374
+ "size": size
375
+ }
376
+
377
+ response = self.session.post(self.images_generations_url, headers=self.headers, json=payload)
378
+ if not response.ok:
379
+ raise exceptions.FailedToGenerateResponseError(
380
+ f"Failed to generate image - ({response.status_code}, {response.reason})"
381
+ )
382
+
383
+ return response.json().get("data", [])
384
+ if __name__ == "__main__":
385
+ from rich import print
386
+ ai = DiscordRocks()
387
+ response = ai.chat("tell me about india")
388
+ for chunk in response:
389
+ print(chunk, end="", flush=True)
@@ -0,0 +1,227 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider, AsyncProvider
9
+ from webscout import exceptions
10
+
11
+
12
+ class Farfalle(Provider):
13
+ """
14
+ A class to interact with the Farfalle.dev API.
15
+ """
16
+
17
+ AVAILABLE_MODELS = [
18
+ "gpt-3.5-turbo",
19
+
20
+ ]
21
+
22
+ def __init__(
23
+ self,
24
+ is_conversation: bool = True,
25
+ max_tokens: int = 600,
26
+ timeout: int = 30,
27
+ intro: str = None,
28
+ filepath: str = None,
29
+ update_file: bool = True,
30
+ proxies: dict = {},
31
+ history_offset: int = 10250,
32
+ act: str = None,
33
+ model: str = "gpt-3.5-turbo",
34
+ ) -> None:
35
+ """
36
+ Initializes the Farfalle.dev API with given parameters.
37
+
38
+ Args:
39
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
41
+ Defaults to 600.
42
+ timeout (int, optional): Http request timeout. Defaults to 30.
43
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
44
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
45
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
46
+ proxies (dict, optional): Http request proxies. Defaults to {}.
47
+ history_offset (int, optional): Limit conversation history to this number of last texts.
48
+ Defaults to 10250.
49
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50
+ model (str, optional): AI model to use. Defaults to "gpt-3.5-turbo".
51
+ Options: "gpt-3.5-turbo", "gpt-4"
52
+ """
53
+ if model not in self.AVAILABLE_MODELS:
54
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
+
56
+ self.session = requests.Session()
57
+ self.is_conversation = is_conversation
58
+ self.max_tokens_to_sample = max_tokens
59
+ self.api_endpoint = "https://api.farfalle.dev/chat"
60
+ self.stream_chunk_size = 64
61
+ self.timeout = timeout
62
+ self.last_response = {}
63
+ self.model = model
64
+ self.headers = {
65
+ "accept": "text/event-stream",
66
+ "accept-encoding": "gzip, deflate, br, zstd",
67
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
+ "content-type": "application/json",
69
+ "dnt": "1",
70
+ "origin": "https://www.farfalle.dev",
71
+ "priority": "u=1, i",
72
+ "referer": "https://www.farfalle.dev/",
73
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
74
+ "sec-ch-ua-mobile": "?0",
75
+ "sec-ch-ua-platform": '"Windows"',
76
+ "sec-fetch-dest": "empty",
77
+ "sec-fetch-mode": "cors",
78
+ "sec-fetch-site": "same-site",
79
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
80
+ }
81
+
82
+ self.__available_optimizers = (
83
+ method
84
+ for method in dir(Optimizers)
85
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
86
+ )
87
+ self.session.headers.update(self.headers)
88
+ Conversation.intro = (
89
+ AwesomePrompts().get_act(
90
+ act, raise_not_found=True, default=None, case_insensitive=True
91
+ )
92
+ if act
93
+ else intro or Conversation.intro
94
+ )
95
+ self.conversation = Conversation(
96
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
97
+ )
98
+ self.conversation.history_offset = history_offset
99
+ self.session.proxies = proxies
100
+
101
+ def ask(
102
+ self,
103
+ prompt: str,
104
+ stream: bool = False,
105
+ raw: bool = False,
106
+ optimizer: str = None,
107
+ conversationally: bool = False,
108
+ ) -> Dict[str, Any]:
109
+ """
110
+ Sends a prompt to the Farfalle.dev API and returns the response.
111
+
112
+ Args:
113
+ prompt: The text prompt to generate text from.
114
+ stream (bool, optional): Whether to stream the response. Defaults to False.
115
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
116
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
117
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
118
+
119
+ Returns:
120
+ The response from the API.
121
+ """
122
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
123
+ if optimizer:
124
+ if optimizer in self.__available_optimizers:
125
+ conversation_prompt = getattr(Optimizers, optimizer)(
126
+ conversation_prompt if conversationally else prompt
127
+ )
128
+ else:
129
+ raise Exception(
130
+ f"Optimizer is not one of {self.__available_optimizers}"
131
+ )
132
+
133
+ payload = {
134
+ "query": conversation_prompt,
135
+ "model": self.model
136
+ }
137
+
138
+ def for_stream():
139
+ response = self.session.post(
140
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
141
+ )
142
+
143
+ if not response.ok:
144
+ raise exceptions.FailedToGenerateResponseError(
145
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
146
+ )
147
+
148
+ streaming_response = ""
149
+ for line in response.iter_lines():
150
+ if line:
151
+ decoded_line = line.decode('utf-8')
152
+ if decoded_line.startswith("data:"):
153
+ data = decoded_line[len("data:"):].strip()
154
+ if data:
155
+ try:
156
+ event = json.loads(data)
157
+ if event.get("event") == "final-response":
158
+ message = event['data'].get('message', '')
159
+ streaming_response += message
160
+ yield message if raw else dict(text=streaming_response)
161
+ except json.decoder.JSONDecodeError:
162
+ continue
163
+ self.last_response.update(dict(text=streaming_response))
164
+ self.conversation.update_chat_history(
165
+ prompt, self.get_message(self.last_response)
166
+ )
167
+ def for_non_stream():
168
+ for _ in for_stream():
169
+ pass
170
+ return self.last_response
171
+
172
+ return for_stream() if stream else for_non_stream()
173
+
174
+ def chat(
175
+ self,
176
+ prompt: str,
177
+ stream: bool = False,
178
+ optimizer: str = None,
179
+ conversationally: bool = False,
180
+ ) -> str:
181
+ """Generate response `str`
182
+ Args:
183
+ prompt (str): Prompt to be send.
184
+ stream (bool, optional): Flag for streaming response. Defaults to False.
185
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
186
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
187
+ Returns:
188
+ str: Response generated
189
+ """
190
+
191
+ def for_stream():
192
+ for response in self.ask(
193
+ prompt, True, optimizer=optimizer, conversationally=conversationally
194
+ ):
195
+ yield self.get_message(response)
196
+
197
+ def for_non_stream():
198
+ return self.get_message(
199
+ self.ask(
200
+ prompt,
201
+ False,
202
+ optimizer=optimizer,
203
+ conversationally=conversationally,
204
+ )
205
+ )
206
+
207
+ return for_stream() if stream else for_non_stream()
208
+
209
+ def get_message(self, response: dict) -> str:
210
+ """Retrieves message only from response
211
+
212
+ Args:
213
+ response (dict): Response generated by `self.ask`
214
+
215
+ Returns:
216
+ str: Message extracted
217
+ """
218
+ assert isinstance(response, dict), "Response should be of dict data-type only"
219
+ return response["text"]
220
+ if __name__ == "__main__":
221
+ from rich import print
222
+
223
+ ai = Farfalle()
224
+ # Stream the response
225
+ response = ai.chat(input(">>> "))
226
+ for chunk in response:
227
+ print(chunk, end="", flush=True)
@@ -28,7 +28,7 @@ from webscout import exceptions
28
28
  from typing import Any, AsyncGenerator, Dict
29
29
  import logging
30
30
  import httpx
31
- from Bard import Chatbot
31
+ from ..Bard import Chatbot
32
32
  import logging
33
33
  from os import path
34
34
  from json import load