webscout 4.7__py3-none-any.whl → 4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (43) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Local/_version.py +1 -1
  4. webscout/Provider/Andi.py +7 -1
  5. webscout/Provider/BasedGPT.py +11 -5
  6. webscout/Provider/Berlin4h.py +11 -5
  7. webscout/Provider/Blackboxai.py +10 -4
  8. webscout/Provider/Cohere.py +11 -5
  9. webscout/Provider/DARKAI.py +25 -7
  10. webscout/Provider/Deepinfra.py +2 -1
  11. webscout/Provider/Deepseek.py +25 -9
  12. webscout/Provider/DiscordRocks.py +389 -0
  13. webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
  14. webscout/Provider/Gemini.py +1 -1
  15. webscout/Provider/Groq.py +244 -110
  16. webscout/Provider/Llama.py +13 -5
  17. webscout/Provider/Llama3.py +15 -2
  18. webscout/Provider/OLLAMA.py +8 -7
  19. webscout/Provider/Perplexity.py +422 -52
  20. webscout/Provider/Phind.py +6 -5
  21. webscout/Provider/PizzaGPT.py +7 -1
  22. webscout/Provider/__init__.py +12 -31
  23. webscout/Provider/ai4chat.py +193 -0
  24. webscout/Provider/koala.py +11 -5
  25. webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
  26. webscout/Provider/meta.py +2 -1
  27. webscout/version.py +1 -1
  28. webscout/webai.py +2 -64
  29. webscout/webscout_search.py +1 -1
  30. {webscout-4.7.dist-info → webscout-4.8.dist-info}/METADATA +227 -252
  31. {webscout-4.7.dist-info → webscout-4.8.dist-info}/RECORD +35 -40
  32. webscout/Provider/FreeGemini.py +0 -169
  33. webscout/Provider/Geminiflash.py +0 -152
  34. webscout/Provider/Geminipro.py +0 -152
  35. webscout/Provider/Leo.py +0 -469
  36. webscout/Provider/OpenGPT.py +0 -867
  37. webscout/Provider/Xjai.py +0 -230
  38. webscout/Provider/Yepchat.py +0 -478
  39. webscout/Provider/Youchat.py +0 -225
  40. {webscout-4.7.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
  41. {webscout-4.7.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
  42. {webscout-4.7.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
  43. {webscout-4.7.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
@@ -1,867 +0,0 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
9
- import requests
10
- from requests import get
11
- from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
- import json
21
- import yaml
22
- from ..AIutel import Optimizers
23
- from ..AIutel import Conversation
24
- from ..AIutel import AwesomePrompts, sanitize_stream
25
- from ..AIbase import Provider, AsyncProvider
26
- from Helpingai_T2 import Perplexity
27
- from webscout import exceptions
28
- from typing import Any, AsyncGenerator, Dict
29
- import logging
30
- import httpx
31
- #------------------------------------------------------OpenGPT-----------------------------------------------------------
32
- class OPENGPT:
33
- def __init__(
34
- self,
35
- assistant_id,
36
- is_conversation: bool = True,
37
- max_tokens: int = 600,
38
- timeout: int = 30,
39
- intro: str = None,
40
- filepath: str = None,
41
- update_file: bool = True,
42
- proxies: dict = {},
43
- history_offset: int = 10250,
44
- act: str = None,
45
- ):
46
- """Instantiates OPENGPT
47
-
48
- Args:
49
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
50
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
51
- timeout (int, optional): Http request timeout. Defaults to 30.
52
- intro (str, optional): Conversation introductory prompt. Defaults to None.
53
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
54
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
55
- proxies (dict, optional): Http request proxies. Defaults to {}.
56
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
57
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
58
- """
59
- self.session = requests.Session()
60
- self.max_tokens_to_sample = max_tokens
61
- self.is_conversation = is_conversation
62
- self.chat_endpoint = (
63
- "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
64
- )
65
- self.stream_chunk_size = 64
66
- self.timeout = timeout
67
- self.last_response = {}
68
- self.assistant_id = assistant_id
69
- self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
70
-
71
- self.headers = {
72
- "authority": self.authority,
73
- "accept": "text/event-stream",
74
- "accept-language": "en-US,en;q=0.7",
75
- "cache-control": "no-cache",
76
- "content-type": "application/json",
77
- "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
78
- "pragma": "no-cache",
79
- "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
80
- "sec-fetch-site": "same-origin",
81
- "sec-gpc": "1",
82
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
83
- }
84
-
85
- self.__available_optimizers = (
86
- method
87
- for method in dir(Optimizers)
88
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
89
- )
90
- self.session.headers.update(self.headers)
91
- Conversation.intro = (
92
- AwesomePrompts().get_act(
93
- act, raise_not_found=True, default=None, case_insensitive=True
94
- )
95
- if act
96
- else intro or Conversation.intro
97
- )
98
- self.conversation = Conversation(
99
- is_conversation, self.max_tokens_to_sample, filepath, update_file
100
- )
101
- self.conversation.history_offset = history_offset
102
- self.session.proxies = proxies
103
-
104
- def ask(
105
- self,
106
- prompt: str,
107
- stream: bool = False,
108
- raw: bool = False,
109
- optimizer: str = None,
110
- conversationally: bool = False,
111
- ) -> dict:
112
- """Chat with AI
113
-
114
- Args:
115
- prompt (str): Prompt to be send.
116
- stream (bool, optional): Flag for streaming response. Defaults to False.
117
- raw (bool, optional): Stream back raw response as received. Defaults to False.
118
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
119
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
120
- Returns:
121
- dict : {}
122
- ```json
123
- {
124
- "messages": [
125
- {
126
- "content": "Hello there",
127
- "additional_kwargs": {},
128
- "type": "human",
129
- "example": false
130
- },
131
- {
132
- "content": "Hello! How can I assist you today?",
133
- "additional_kwargs": {
134
- "agent": {
135
- "return_values": {
136
- "output": "Hello! How can I assist you today?"
137
- },
138
- "log": "Hello! How can I assist you today?",
139
- "type": "AgentFinish"
140
- }
141
- },
142
- "type": "ai",
143
- "example": false
144
- }]
145
- }
146
- ```
147
- """
148
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
149
- if optimizer:
150
- if optimizer in self.__available_optimizers:
151
- conversation_prompt = getattr(Optimizers, optimizer)(
152
- conversation_prompt if conversationally else prompt
153
- )
154
- else:
155
- raise Exception(
156
- f"Optimizer is not one of {self.__available_optimizers}"
157
- )
158
-
159
- self.session.headers.update(self.headers)
160
- self.session.headers.update(
161
- dict(
162
- cookie=f"opengpts_user_id={uuid4().__str__()}",
163
- )
164
- )
165
- payload = {
166
- "input": [
167
- {
168
- "content": conversation_prompt,
169
- "additional_kwargs": {},
170
- "type": "human",
171
- "example": False,
172
- },
173
- ],
174
- "assistant_id": self.assistant_id,
175
- "thread_id": "",
176
- }
177
-
178
- def for_stream():
179
- response = self.session.post(
180
- self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
181
- )
182
- if (
183
- not response.ok
184
- or not response.headers.get("Content-Type")
185
- == "text/event-stream; charset=utf-8"
186
- ):
187
- raise Exception(
188
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
189
- )
190
-
191
- for value in response.iter_lines(
192
- decode_unicode=True,
193
- chunk_size=self.stream_chunk_size,
194
- ):
195
- try:
196
- modified_value = re.sub("data:", "", value)
197
- resp = json.loads(modified_value)
198
- if len(resp) == 1:
199
- continue
200
- self.last_response.update(resp[1])
201
- yield value if raw else resp[1]
202
- except json.decoder.JSONDecodeError:
203
- pass
204
- self.conversation.update_chat_history(
205
- prompt, self.get_message(self.last_response)
206
- )
207
-
208
- def for_non_stream():
209
- for _ in for_stream():
210
- pass
211
- return self.last_response
212
-
213
- return for_stream() if stream else for_non_stream()
214
-
215
- def chat(
216
- self,
217
- prompt: str,
218
- stream: bool = False,
219
- optimizer: str = None,
220
- conversationally: bool = False,
221
- ) -> str:
222
- """Generate response `str`
223
- Args:
224
- prompt (str): Prompt to be send.
225
- stream (bool, optional): Flag for streaming response. Defaults to False.
226
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
227
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
228
- Returns:
229
- str: Response generated
230
- """
231
-
232
- def for_stream():
233
- for response in self.ask(
234
- prompt, True, optimizer=optimizer, conversationally=conversationally
235
- ):
236
- yield self.get_message(response)
237
-
238
- def for_non_stream():
239
- return self.get_message(
240
- self.ask(
241
- prompt,
242
- False,
243
- optimizer=optimizer,
244
- conversationally=conversationally,
245
- )
246
- )
247
-
248
- return for_stream() if stream else for_non_stream()
249
-
250
- def get_message(self, response: dict) -> str:
251
- """Retrieves message only from response
252
-
253
- Args:
254
- response (dict): Response generated by `self.ask`
255
-
256
- Returns:
257
- str: Message extracted
258
- """
259
- assert isinstance(response, dict), "Response should be of dict data-type only"
260
- return response["content"]
261
- class AsyncOPENGPT(AsyncProvider):
262
- def __init__(
263
- self,
264
- is_conversation: bool = True,
265
- max_tokens: int = 600,
266
- timeout: int = 30,
267
- intro: str = None,
268
- filepath: str = None,
269
- update_file: bool = True,
270
- proxies: dict = {},
271
- history_offset: int = 10250,
272
- act: str = None,
273
- ):
274
- """Instantiates OPENGPT
275
-
276
- Args:
277
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
278
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
279
- timeout (int, optional): Http request timeout. Defaults to 30.
280
- intro (str, optional): Conversation introductory prompt. Defaults to None.
281
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
282
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
283
- proxies (dict, optional): Http request proxies. Defaults to {}.
284
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
285
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
286
- """
287
- self.max_tokens_to_sample = max_tokens
288
- self.is_conversation = is_conversation
289
- self.chat_endpoint = (
290
- "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
291
- )
292
- self.stream_chunk_size = 64
293
- self.timeout = timeout
294
- self.last_response = {}
295
- self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
296
- self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
297
-
298
- self.headers = {
299
- "authority": self.authority,
300
- "accept": "text/event-stream",
301
- "accept-language": "en-US,en;q=0.7",
302
- "cache-control": "no-cache",
303
- "content-type": "application/json",
304
- "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
305
- "pragma": "no-cache",
306
- "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
307
- "sec-fetch-site": "same-origin",
308
- "sec-gpc": "1",
309
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
310
- }
311
-
312
- self.__available_optimizers = (
313
- method
314
- for method in dir(Optimizers)
315
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
316
- )
317
- Conversation.intro = (
318
- AwesomePrompts().get_act(
319
- act, raise_not_found=True, default=None, case_insensitive=True
320
- )
321
- if act
322
- else intro or Conversation.intro
323
- )
324
- self.conversation = Conversation(
325
- is_conversation, self.max_tokens_to_sample, filepath, update_file
326
- )
327
- self.conversation.history_offset = history_offset
328
- self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
329
-
330
- async def ask(
331
- self,
332
- prompt: str,
333
- stream: bool = False,
334
- raw: bool = False,
335
- optimizer: str = None,
336
- conversationally: bool = False,
337
- ) -> dict | AsyncGenerator:
338
- """Chat with AI asynchronously
339
-
340
- Args:
341
- prompt (str): Prompt to be send.
342
- stream (bool, optional): Flag for streaming response. Defaults to False.
343
- raw (bool, optional): Stream back raw response as received. Defaults to False.
344
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
345
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
346
- Returns:
347
- dict|AsyncGenerator : ai content.
348
- ```json
349
- {
350
- "messages": [
351
- {
352
- "content": "Hello there",
353
- "additional_kwargs": {},
354
- "type": "human",
355
- "example": false
356
- },
357
- {
358
- "content": "Hello! How can I assist you today?",
359
- "additional_kwargs": {
360
- "agent": {
361
- "return_values": {
362
- "output": "Hello! How can I assist you today?"
363
- },
364
- "log": "Hello! How can I assist you today?",
365
- "type": "AgentFinish"
366
- }
367
- },
368
- "type": "ai",
369
- "example": false
370
- }]
371
- }
372
- ```
373
- """
374
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
375
- if optimizer:
376
- if optimizer in self.__available_optimizers:
377
- conversation_prompt = getattr(Optimizers, optimizer)(
378
- conversation_prompt if conversationally else prompt
379
- )
380
- else:
381
- raise Exception(
382
- f"Optimizer is not one of {self.__available_optimizers}"
383
- )
384
- self.headers.update(
385
- dict(
386
- cookie=f"opengpts_user_id={uuid4().__str__()}",
387
- )
388
- )
389
- payload = {
390
- "input": [
391
- {
392
- "content": conversation_prompt,
393
- "additional_kwargs": {},
394
- "type": "human",
395
- "example": False,
396
- },
397
- ],
398
- "assistant_id": self.assistant_id,
399
- "thread_id": "",
400
- }
401
-
402
- async def for_stream():
403
- async with self.session.stream(
404
- "POST",
405
- self.chat_endpoint,
406
- json=payload,
407
- timeout=self.timeout,
408
- headers=self.headers,
409
- ) as response:
410
- if (
411
- not response.is_success
412
- or not response.headers.get("Content-Type")
413
- == "text/event-stream; charset=utf-8"
414
- ):
415
- raise exceptions.FailedToGenerateResponseError(
416
- f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
417
- )
418
-
419
- async for value in response.aiter_lines():
420
- try:
421
- modified_value = re.sub("data:", "", value)
422
- resp = json.loads(modified_value)
423
- if len(resp) == 1:
424
- continue
425
- self.last_response.update(resp[1])
426
- yield value if raw else resp[1]
427
- except json.decoder.JSONDecodeError:
428
- pass
429
-
430
- self.conversation.update_chat_history(
431
- prompt, await self.get_message(self.last_response)
432
- )
433
-
434
- async def for_non_stream():
435
- async for _ in for_stream():
436
- pass
437
- return self.last_response
438
-
439
- return for_stream() if stream else await for_non_stream()
440
-
441
- async def chat(
442
- self,
443
- prompt: str,
444
- stream: bool = False,
445
- optimizer: str = None,
446
- conversationally: bool = False,
447
- ) -> str | AsyncGenerator:
448
- """Generate response `str` asynchronously.
449
- Args:
450
- prompt (str): Prompt to be send.
451
- stream (bool, optional): Flag for streaming response. Defaults to False.
452
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
453
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
454
- Returns:
455
- str|AsyncGenerator: Response generated
456
- """
457
-
458
- async def for_stream():
459
- async_ask = await self.ask(
460
- prompt, True, optimizer=optimizer, conversationally=conversationally
461
- )
462
- async for response in async_ask:
463
- yield await self.get_message(response)
464
-
465
- async def for_non_stream():
466
- return await self.get_message(
467
- await self.ask(
468
- prompt,
469
- False,
470
- optimizer=optimizer,
471
- conversationally=conversationally,
472
- )
473
- )
474
-
475
- return for_stream() if stream else await for_non_stream()
476
-
477
- async def get_message(self, response: dict) -> str:
478
- """Retrieves message only from response
479
-
480
- Args:
481
- response (dict): Response generated by `self.ask`
482
-
483
- Returns:
484
- str: Message extracted
485
- """
486
- assert isinstance(response, dict), "Response should be of dict data-type only"
487
- return response["content"]
488
-
489
- class OPENGPTv2(Provider):
490
- def __init__(
491
- self,
492
- generate_new_agents: bool = False,
493
- assistant_name: str = "webscout",
494
- retrieval_description: str = (
495
- "Can be used to look up information that was uploaded to this assistant.\n"
496
- "If the user is referencing particular files, that is often a good hint that information may be here.\n"
497
- "If the user asks a vague question, they are likely meaning to look up info from this retriever, "
498
- "and you should call it!"
499
- ),
500
- agent_system_message: str = "You are a helpful assistant.",
501
- chat_retrieval_llm_type: str = "GPT 3.5 Turbo",
502
- chat_retrieval_system_message: str = "You are a helpful assistant.",
503
- chatbot_llm_type: str = "GPT 3.5 Turbo",
504
- chatbot_system_message: str = "You are a helpful assistant.",
505
- enable_action_server: bool = False,
506
- enable_ddg_search: bool = False,
507
- enable_arxiv: bool = False,
508
- enable_press_releases: bool = False,
509
- enable_pubmed: bool = False,
510
- enable_sec_filings: bool = False,
511
- enable_retrieval: bool = False,
512
- enable_search_tavily: bool = False,
513
- enable_search_short_answer_tavily: bool = False,
514
- enable_you_com_search: bool = False,
515
- enable_wikipedia: bool = False,
516
- is_public: bool = True,
517
- is_conversation: bool = True,
518
- max_tokens: int = 600,
519
- timeout: int = 30,
520
- intro: str = None,
521
- filepath: str = None,
522
- update_file: bool = True,
523
- proxies: dict = {},
524
- history_offset: int = 10250,
525
- act: str = None,
526
-
527
- ):
528
- """
529
- Initializes the OPENGPTv2 Provider.
530
-
531
- Args:
532
- api_endpoint: The API endpoint for OpenGPTs.
533
- generate_new_agents: If True, generates new assistant and user IDs.
534
- assistant_name: The name of the assistant to create if generating new IDs.
535
- agent_type: The type of agent to create.
536
- retrieval_description: Description of the retrieval tool.
537
- agent_system_message: System message for the agent.
538
- chat_retrieval_llm_type: LLM type for chat retrieval.
539
- chat_retrieval_system_message: System message for chat retrieval.
540
- chatbot_llm_type: LLM type for the chatbot.
541
- chatbot_system_message: System message for the chatbot.
542
- enable_action_server: Whether to enable the "Action Server by Robocorp" tool.
543
- enable_ddg_search: Whether to enable the "Duck Duck Go Search" tool.
544
- enable_arxiv: Whether to enable the "Arxiv" tool.
545
- enable_press_releases: Whether to enable the "Press Releases (Kay.ai)" tool.
546
- enable_pubmed: Whether to enable the "PubMed" tool.
547
- enable_sec_filings: Whether to enable the "SEC Filings (Kay.ai)" tool.
548
- enable_retrieval: Whether to enable the "Retrieval" tool.
549
- enable_search_tavily: Whether to enable the "Search (Tavily)" tool.
550
- enable_search_short_answer_tavily: Whether to enable the "Search (short answer, Tavily)" tool.
551
- enable_you_com_search: Whether to enable the "You.com Search" tool.
552
- enable_wikipedia: Whether to enable the "Wikipedia" tool.
553
- is_public: Whether the assistant should be public.
554
- is_conversation: Whether to maintain conversation history.
555
- max_tokens: Maximum tokens for responses.
556
- timeout: Timeout for API requests.
557
- intro: Initial prompt.
558
- filepath: Path to store conversation history.
559
- update_file: Whether to update the conversation history file.
560
- proxies: Proxies to use for requests.
561
- history_offset: Maximum conversation history size.
562
- act: Key for Awesome Prompts to use as intro.
563
- """
564
- self.api_endpoint = "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
565
- self.session = requests.Session()
566
- self.ids_file = "openGPT_IDs.txt"
567
- agent_type="GPT 3.5 Turbo"
568
- (
569
- self.assistant_id,
570
- self.user_id,
571
- ) = self._manage_assistant_and_user_ids(
572
- generate_new_agents,
573
- assistant_name,
574
- agent_type,
575
- retrieval_description,
576
- agent_system_message,
577
- chat_retrieval_llm_type,
578
- chat_retrieval_system_message,
579
- chatbot_llm_type,
580
- chatbot_system_message,
581
- enable_action_server,
582
- enable_ddg_search,
583
- enable_arxiv,
584
- enable_press_releases,
585
- enable_pubmed,
586
- enable_sec_filings,
587
- enable_retrieval,
588
- enable_search_tavily,
589
- enable_search_short_answer_tavily,
590
- enable_you_com_search,
591
- enable_wikipedia,
592
- is_public,
593
- )
594
- self.last_response = {}
595
- self.max_tokens_to_sample = max_tokens
596
- self.stream_chunk_size = 64
597
- self.timeout = timeout
598
- self.__available_optimizers = (
599
- method
600
- for method in dir(Optimizers)
601
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
602
- )
603
- Conversation.intro = (
604
- AwesomePrompts().get_act(
605
- act, raise_not_found=True, default=None, case_insensitive=True
606
- )
607
- if act
608
- else intro or Conversation.intro
609
- )
610
- self.conversation = Conversation(
611
- is_conversation,
612
- self.max_tokens_to_sample,
613
- filepath,
614
- update_file,
615
- )
616
- self.conversation.history_offset = history_offset
617
- self.session.proxies.update(proxies)
618
-
619
- def _manage_assistant_and_user_ids(
620
- self,
621
- generate_new_agents: bool = False,
622
- assistant_name: str = "New Assistant",
623
- agent_type: str = "GPT 3.5 Turbo",
624
- retrieval_description: str = (
625
- "Can be used to look up information that was uploaded to this assistant.\n"
626
- "If the user is referencing particular files, that is often a good hint that information may be here.\n"
627
- "If the user asks a vague question, they are likely meaning to look up info from this retriever, "
628
- "and you should call it!"
629
- ),
630
- agent_system_message: str = "You are a helpful assistant.",
631
- chat_retrieval_llm_type: str = "GPT 3.5 Turbo",
632
- chat_retrieval_system_message: str = "You are a helpful assistant.",
633
- chatbot_llm_type: str = "GPT 3.5 Turbo",
634
- chatbot_system_message: str = "You are a helpful assistant.",
635
- enable_action_server: bool = False,
636
- enable_ddg_search: bool = False,
637
- enable_arxiv: bool = False,
638
- enable_press_releases: bool = False,
639
- enable_pubmed: bool = False,
640
- enable_sec_filings: bool = False,
641
- enable_retrieval: bool = False,
642
- enable_search_tavily: bool = False,
643
- enable_search_short_answer_tavily: bool = False,
644
- enable_you_com_search: bool = False,
645
- enable_wikipedia: bool = False,
646
- is_public: bool = True,
647
- ) -> tuple[str, str]:
648
- """
649
- Generates or retrieves assistant and user IDs.
650
-
651
- If 'generate_new_agents' is True, new IDs are created and saved to 'openGPT_IDs.txt'.
652
- Otherwise, IDs are loaded from 'openGPT_IDs.txt'.
653
-
654
- Args:
655
- generate_new_agents: If True, generate new IDs; otherwise, load from the file.
656
- assistant_name: The name of the assistant (used when generating new IDs).
657
- agent_type: The type of the agent.
658
- retrieval_description: Description for the retrieval tool.
659
- agent_system_message: The system message for the agent.
660
- chat_retrieval_llm_type: The LLM type for chat retrieval.
661
- chat_retrieval_system_message: The system message for chat retrieval.
662
- chatbot_llm_type: The LLM type for the chatbot.
663
- chatbot_system_message: The system message for the chatbot.
664
- enable_action_server: Whether to enable the "Action Server by Robocorp" tool.
665
- enable_ddg_search: Whether to enable the "Duck Duck Go Search" tool.
666
- enable_arxiv: Whether to enable the "Arxiv" tool.
667
- enable_press_releases: Whether to enable the "Press Releases (Kay.ai)" tool.
668
- enable_pubmed: Whether to enable the "PubMed" tool.
669
- enable_sec_filings: Whether to enable the "SEC Filings (Kay.ai)" tool.
670
- enable_retrieval: Whether to enable the "Retrieval" tool.
671
- enable_search_tavily: Whether to enable the "Search (Tavily)" tool.
672
- enable_search_short_answer_tavily: Whether to enable the "Search (short answer, Tavily)" tool.
673
- enable_you_com_search: Whether to enable the "You.com Search" tool.
674
- enable_wikipedia: Whether to enable the "Wikipedia" tool.
675
- is_public: Whether the assistant should be public.
676
-
677
- Returns:
678
- A tuple containing the assistant ID and user ID.
679
- """
680
-
681
- if generate_new_agents:
682
- user_id = str(uuid.uuid4())
683
- assistant_url = f"https://opengpts-example-vz4y4ooboq-uc.a.run.app/assistants/{str(uuid.uuid4())}"
684
-
685
- headers = {"Cookie": f"opengpts_user_id={user_id}"}
686
-
687
- tools = []
688
- if enable_action_server:
689
- tools.append("Action Server by Robocorp")
690
- if enable_ddg_search:
691
- tools.append("DDG Search")
692
- if enable_arxiv:
693
- tools.append("Arxiv")
694
- if enable_press_releases:
695
- tools.append("Press Releases (Kay.ai)")
696
- if enable_pubmed:
697
- tools.append("PubMed")
698
- if enable_sec_filings:
699
- tools.append("SEC Filings (Kay.ai)")
700
- if enable_retrieval:
701
- tools.append("Retrieval")
702
- if enable_search_tavily:
703
- tools.append("Search (Tavily)")
704
- if enable_search_short_answer_tavily:
705
- tools.append("Search (short answer, Tavily)")
706
- if enable_you_com_search:
707
- tools.append("You.com Search")
708
- if enable_wikipedia:
709
- tools.append("Wikipedia")
710
-
711
- payload = {
712
- "name": assistant_name,
713
- "config": {
714
- "configurable": {
715
- "thread_id": "",
716
- "type": "agent",
717
- "type==agent/agent_type": agent_type,
718
- "type==agent/retrieval_description": retrieval_description,
719
- "type==agent/system_message": agent_system_message,
720
- "type==agent/tools": tools,
721
- "type==chat_retrieval/llm_type": chat_retrieval_llm_type,
722
- "type==chat_retrieval/system_message": chat_retrieval_system_message,
723
- "type==chatbot/llm_type": chatbot_llm_type,
724
- "type==chatbot/system_message": chatbot_system_message,
725
- },
726
- "public": is_public,
727
- },
728
- }
729
-
730
- response = requests.put(assistant_url, headers=headers, json=payload)
731
- response.raise_for_status()
732
-
733
- json_data = response.json()
734
- assistant_id = json_data["assistant_id"]
735
-
736
- with open(self.ids_file, "w") as f: # Overwrite the file with new IDs
737
- f.write(f"Assistant ID: {assistant_id}\nUser ID: {user_id}\n")
738
-
739
- return assistant_id, user_id
740
- else:
741
- try:
742
- with open(self.ids_file, "r") as f:
743
- lines = f.readlines()
744
- assistant_id = lines[0].split(":")[1].strip()
745
- user_id = lines[1].split(":")[1].strip()
746
- return assistant_id, user_id
747
- except FileNotFoundError:
748
- return None, None
749
-
750
- def ask(
751
- self,
752
- prompt: str,
753
- stream: bool = False,
754
- raw: bool = False,
755
- optimizer: str = None,
756
- conversationally: bool = False,
757
- ) -> dict:
758
- """Chat with AI
759
-
760
- Args:
761
- prompt (str): Prompt to be send.
762
- stream (bool, optional): Flag for streaming response. Defaults to False.
763
- raw (bool, optional): Stream back raw response as received. Defaults to False.
764
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
765
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
766
- Returns:
767
- dict : {}
768
- ```json
769
- {
770
- "text" : "print('How may I help you today?')"
771
- }
772
- ```
773
- """
774
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
775
- if optimizer:
776
- if optimizer in self.__available_optimizers:
777
- conversation_prompt = getattr(Optimizers, optimizer)(
778
- conversation_prompt if conversationally else prompt
779
- )
780
- else:
781
- raise Exception(
782
- f"Optimizer is not one of {self.__available_optimizers}"
783
- )
784
- headers = {"Cookie": f"opengpts_user_id={self.user_id}"}
785
- payload = {
786
- "input": [
787
- {
788
- "content": conversation_prompt,
789
- "additional_kwargs": {},
790
- "type": "human",
791
- "example": False,
792
- },
793
- ],
794
- "assistant_id": self.assistant_id,
795
- "thread_id": "",
796
- }
797
-
798
- response = self.session.post(
799
- self.api_endpoint, headers=headers, json=payload, stream=stream, timeout=self.timeout
800
- )
801
- complete_response = ""
802
- printed_length = 0
803
- initial_responses_to_ignore = 2
804
-
805
- for line in response.iter_lines(decode_unicode=True, chunk_size=1):
806
- if line:
807
- try:
808
- content = json.loads(re.sub("data:", "", line))[-1]["content"]
809
- if initial_responses_to_ignore > 0:
810
- initial_responses_to_ignore -= 1
811
- else:
812
- if stream:
813
- print(content[printed_length:], end="", flush=True)
814
- printed_length = len(content)
815
- complete_response = content
816
- except:
817
- continue
818
- self.conversation.update_chat_history(prompt, complete_response)
819
- self.last_response.update(dict(text=complete_response))
820
- return self.last_response
821
-
822
- def chat(
823
- self,
824
- prompt: str,
825
- stream: bool = False,
826
- optimizer: str = None,
827
- conversationally: bool = False,
828
- ) -> str:
829
- """Generate response `str`
830
- Args:
831
- prompt (str): Prompt to be send.
832
- stream (bool, optional): Flag for streaming response. Defaults to False.
833
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
834
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
835
- Returns:
836
- str: Response generated
837
- """
838
-
839
- def for_stream():
840
- for response in self.ask(
841
- prompt, True, optimizer=optimizer, conversationally=conversationally
842
- ):
843
- yield self.get_message(response)
844
-
845
- def for_non_stream():
846
- return self.get_message(
847
- self.ask(
848
- prompt,
849
- False,
850
- optimizer=optimizer,
851
- conversationally=conversationally,
852
- )
853
- )
854
-
855
- return for_stream() if stream else for_non_stream()
856
-
857
- def get_message(self, response: dict) -> str:
858
- """Retrieves message only from response
859
-
860
- Args:
861
- response (dict): Response generated by `self.ask`
862
-
863
- Returns:
864
- str: Message extracted
865
- """
866
- assert isinstance(response, dict), "Response should be of dict data-type only"
867
- return response["text"]