webscout 4.1__tar.gz → 4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (75) hide show
  1. {webscout-4.1/webscout.egg-info → webscout-4.3}/PKG-INFO +38 -1
  2. {webscout-4.1 → webscout-4.3}/README.md +37 -0
  3. {webscout-4.1 → webscout-4.3}/setup.py +1 -1
  4. {webscout-4.1 → webscout-4.3}/webscout/AIauto.py +2 -2
  5. {webscout-4.1 → webscout-4.3}/webscout/AIutel.py +8 -9
  6. {webscout-4.1 → webscout-4.3}/webscout/Local/_version.py +1 -1
  7. webscout-4.3/webscout/Provider/FreeGemini.py +169 -0
  8. webscout-4.3/webscout/Provider/Llama.py +211 -0
  9. webscout-4.3/webscout/Provider/OLLAMA.py +187 -0
  10. {webscout-4.1 → webscout-4.3}/webscout/Provider/__init__.py +6 -5
  11. {webscout-4.1 → webscout-4.3}/webscout/__init__.py +2 -1
  12. {webscout-4.1 → webscout-4.3}/webscout/async_providers.py +0 -2
  13. {webscout-4.1 → webscout-4.3}/webscout/version.py +1 -1
  14. {webscout-4.1 → webscout-4.3}/webscout/webai.py +14 -0
  15. {webscout-4.1 → webscout-4.3/webscout.egg-info}/PKG-INFO +38 -1
  16. {webscout-4.1 → webscout-4.3}/webscout.egg-info/SOURCES.txt +3 -1
  17. webscout-4.1/webscout/Provider/Llama2.py +0 -437
  18. {webscout-4.1 → webscout-4.3}/LICENSE.md +0 -0
  19. {webscout-4.1 → webscout-4.3}/setup.cfg +0 -0
  20. {webscout-4.1 → webscout-4.3}/webscout/AIbase.py +0 -0
  21. {webscout-4.1 → webscout-4.3}/webscout/DWEBS.py +0 -0
  22. {webscout-4.1 → webscout-4.3}/webscout/Extra/__init__.py +0 -0
  23. {webscout-4.1 → webscout-4.3}/webscout/Extra/autollama.py +0 -0
  24. {webscout-4.1 → webscout-4.3}/webscout/Extra/gguf.py +0 -0
  25. {webscout-4.1 → webscout-4.3}/webscout/Extra/weather.py +0 -0
  26. {webscout-4.1 → webscout-4.3}/webscout/Extra/weather_ascii.py +0 -0
  27. {webscout-4.1 → webscout-4.3}/webscout/LLM.py +0 -0
  28. {webscout-4.1 → webscout-4.3}/webscout/Local/__init__.py +0 -0
  29. {webscout-4.1 → webscout-4.3}/webscout/Local/formats.py +0 -0
  30. {webscout-4.1 → webscout-4.3}/webscout/Local/model.py +0 -0
  31. {webscout-4.1 → webscout-4.3}/webscout/Local/rawdog.py +0 -0
  32. {webscout-4.1 → webscout-4.3}/webscout/Local/samplers.py +0 -0
  33. {webscout-4.1 → webscout-4.3}/webscout/Local/thread.py +0 -0
  34. {webscout-4.1 → webscout-4.3}/webscout/Local/utils.py +0 -0
  35. {webscout-4.1 → webscout-4.3}/webscout/Provider/BasedGPT.py +0 -0
  36. {webscout-4.1 → webscout-4.3}/webscout/Provider/Berlin4h.py +0 -0
  37. {webscout-4.1 → webscout-4.3}/webscout/Provider/Blackboxai.py +0 -0
  38. {webscout-4.1 → webscout-4.3}/webscout/Provider/ChatGPTUK.py +0 -0
  39. {webscout-4.1 → webscout-4.3}/webscout/Provider/Cohere.py +0 -0
  40. {webscout-4.1 → webscout-4.3}/webscout/Provider/Deepinfra.py +0 -0
  41. {webscout-4.1 → webscout-4.3}/webscout/Provider/Deepseek.py +0 -0
  42. {webscout-4.1 → webscout-4.3}/webscout/Provider/Gemini.py +0 -0
  43. {webscout-4.1 → webscout-4.3}/webscout/Provider/Geminiflash.py +0 -0
  44. {webscout-4.1 → webscout-4.3}/webscout/Provider/Geminipro.py +0 -0
  45. {webscout-4.1 → webscout-4.3}/webscout/Provider/Groq.py +0 -0
  46. {webscout-4.1 → webscout-4.3}/webscout/Provider/Koboldai.py +0 -0
  47. {webscout-4.1 → webscout-4.3}/webscout/Provider/Leo.py +0 -0
  48. {webscout-4.1 → webscout-4.3}/webscout/Provider/OpenGPT.py +0 -0
  49. {webscout-4.1 → webscout-4.3}/webscout/Provider/Openai.py +0 -0
  50. {webscout-4.1 → webscout-4.3}/webscout/Provider/Perplexity.py +0 -0
  51. {webscout-4.1 → webscout-4.3}/webscout/Provider/Phind.py +0 -0
  52. {webscout-4.1 → webscout-4.3}/webscout/Provider/Poe.py +0 -0
  53. {webscout-4.1 → webscout-4.3}/webscout/Provider/Reka.py +0 -0
  54. {webscout-4.1 → webscout-4.3}/webscout/Provider/ThinkAnyAI.py +0 -0
  55. {webscout-4.1 → webscout-4.3}/webscout/Provider/VTLchat.py +0 -0
  56. {webscout-4.1 → webscout-4.3}/webscout/Provider/Xjai.py +0 -0
  57. {webscout-4.1 → webscout-4.3}/webscout/Provider/Yepchat.py +0 -0
  58. {webscout-4.1 → webscout-4.3}/webscout/Provider/Youchat.py +0 -0
  59. {webscout-4.1 → webscout-4.3}/webscout/YTdownloader.py +0 -0
  60. {webscout-4.1 → webscout-4.3}/webscout/__main__.py +0 -0
  61. {webscout-4.1 → webscout-4.3}/webscout/cli.py +0 -0
  62. {webscout-4.1 → webscout-4.3}/webscout/exceptions.py +0 -0
  63. {webscout-4.1 → webscout-4.3}/webscout/g4f.py +0 -0
  64. {webscout-4.1 → webscout-4.3}/webscout/models.py +0 -0
  65. {webscout-4.1 → webscout-4.3}/webscout/tempid.py +0 -0
  66. {webscout-4.1 → webscout-4.3}/webscout/transcriber.py +0 -0
  67. {webscout-4.1 → webscout-4.3}/webscout/utils.py +0 -0
  68. {webscout-4.1 → webscout-4.3}/webscout/voice.py +0 -0
  69. {webscout-4.1 → webscout-4.3}/webscout/webscout_search.py +0 -0
  70. {webscout-4.1 → webscout-4.3}/webscout/webscout_search_async.py +0 -0
  71. {webscout-4.1 → webscout-4.3}/webscout/websx_search.py +0 -0
  72. {webscout-4.1 → webscout-4.3}/webscout.egg-info/dependency_links.txt +0 -0
  73. {webscout-4.1 → webscout-4.3}/webscout.egg-info/entry_points.txt +0 -0
  74. {webscout-4.1 → webscout-4.3}/webscout.egg-info/requires.txt +0 -0
  75. {webscout-4.1 → webscout-4.3}/webscout.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.1
3
+ Version: 4.3
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -1463,6 +1463,43 @@ print(response)
1463
1463
 
1464
1464
  ### 21. GeminiFlash and geminipro
1465
1465
  **Usage similar to other providers**
1466
+
1467
+ ### 22. `Ollama` - chat will AI models locally
1468
+ ```python
1469
+ from webscout import OLLAMA
1470
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
1471
+ response = ollama_provider.chat("What is the meaning of life?")
1472
+ print(response)
1473
+ ```
1474
+
1475
+ ### 22. GROQ
1476
+ ```python
1477
+ from webscout import GROQ
1478
+ ai = GROQ(api_key="")
1479
+ response = ai.chat("What is the meaning of life?")
1480
+ print(response)
1481
+
1482
+ ```
1483
+
1484
+ ### 23. Freegemini - chat with gemini for free
1485
+ ```python
1486
+ from webscout import FreeGemini
1487
+ ai = FreeGemini()
1488
+ response = ai.chat("What is the meaning of life?")
1489
+ print(response)
1490
+ ```
1491
+
1492
+ ### 24. LLama 70b - chat with meta's llama 3 70b
1493
+ ```python
1494
+
1495
+ from webscout import LLAMA
1496
+
1497
+ llama = LLAMA()
1498
+
1499
+ r = llama.chat("What is the meaning of life?")
1500
+ print(r)
1501
+ ```
1502
+
1466
1503
  ### `LLM`
1467
1504
  ```python
1468
1505
  from webscout.LLM import LLM
@@ -1397,6 +1397,43 @@ print(response)
1397
1397
 
1398
1398
  ### 21. GeminiFlash and geminipro
1399
1399
  **Usage similar to other providers**
1400
+
1401
+ ### 22. `Ollama` - chat will AI models locally
1402
+ ```python
1403
+ from webscout import OLLAMA
1404
+ ollama_provider = OLLAMA(model="qwen2:0.5b")
1405
+ response = ollama_provider.chat("What is the meaning of life?")
1406
+ print(response)
1407
+ ```
1408
+
1409
+ ### 22. GROQ
1410
+ ```python
1411
+ from webscout import GROQ
1412
+ ai = GROQ(api_key="")
1413
+ response = ai.chat("What is the meaning of life?")
1414
+ print(response)
1415
+
1416
+ ```
1417
+
1418
+ ### 23. Freegemini - chat with gemini for free
1419
+ ```python
1420
+ from webscout import FreeGemini
1421
+ ai = FreeGemini()
1422
+ response = ai.chat("What is the meaning of life?")
1423
+ print(response)
1424
+ ```
1425
+
1426
+ ### 24. LLama 70b - chat with meta's llama 3 70b
1427
+ ```python
1428
+
1429
+ from webscout import LLAMA
1430
+
1431
+ llama = LLAMA()
1432
+
1433
+ r = llama.chat("What is the meaning of life?")
1434
+ print(r)
1435
+ ```
1436
+
1400
1437
  ### `LLM`
1401
1438
  ```python
1402
1439
  from webscout.LLM import LLM
@@ -5,7 +5,7 @@ with open("README.md", encoding="utf-8") as f:
5
5
 
6
6
  setup(
7
7
  name="webscout",
8
- version="4.1",
8
+ version="4.3",
9
9
  description="Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more",
10
10
  long_description=README,
11
11
  long_description_content_type="text/markdown",
@@ -1,8 +1,8 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
2
  from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
3
  from webscout.Provider.Xjai import Xjai
4
- from webscout.Provider.Llama2 import LLAMA2
5
- from webscout.Provider.Llama2 import AsyncLLAMA2
4
+ from webscout.Provider.Llama import LLAMA2
5
+ from webscout.Provider.Llama import AsyncLLAMA2
6
6
  from webscout.Provider.Leo import LEO
7
7
  from webscout.Provider.Leo import AsyncLEO
8
8
  from webscout.Provider.Koboldai import KOBOLDAI
@@ -52,6 +52,7 @@ webai = [
52
52
  "vtlchat",
53
53
  "geminiflash",
54
54
  "geminipro",
55
+ "ollama"
55
56
  ]
56
57
 
57
58
  gpt4free_providers = [
@@ -196,7 +197,7 @@ class Conversation:
196
197
  """
197
198
  self.status = status
198
199
  self.max_tokens_to_sample = max_tokens
199
- self.chat_history = ""
200
+ self.chat_history = self.intro
200
201
  self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
201
202
  self.file = filepath
202
203
  self.update_file = update_file
@@ -219,17 +220,16 @@ class Conversation:
219
220
  ), f"File '{filepath}' does not exist"
220
221
  if not os.path.isfile(filepath):
221
222
  logging.debug(f"Creating new chat-history file - '{filepath}'")
222
- with open(filepath, "w") as fh: # Try creating new file
223
- # lets add intro here
223
+ with open(filepath, "w", encoding="utf-8") as fh: # Try creating new file with UTF-8 encoding
224
224
  fh.write(self.intro)
225
225
  else:
226
226
  logging.debug(f"Loading conversation from '{filepath}'")
227
- with open(filepath) as fh:
227
+ with open(filepath, encoding="utf-8") as fh: # Open with UTF-8 encoding
228
228
  file_contents = fh.readlines()
229
229
  if file_contents:
230
230
  self.intro = file_contents[0] # Presume first line is the intro.
231
231
  self.chat_history = "\n".join(file_contents[1:])
232
-
232
+
233
233
  def __trim_chat_history(self, chat_history: str, intro: str) -> str:
234
234
  """Ensures the len(prompt) and max_tokens_to_sample is not > 4096"""
235
235
  len_of_intro = len(intro)
@@ -242,7 +242,6 @@ class Conversation:
242
242
  # Remove head of total (n) of chat_history
243
243
  trimmed_chat_history = chat_history[truncate_at:]
244
244
  return "... " + trimmed_chat_history
245
- # print(len(self.chat_history))
246
245
  else:
247
246
  return chat_history
248
247
 
@@ -280,12 +279,12 @@ class Conversation:
280
279
  new_history = self.history_format % dict(user=prompt, llm=response)
281
280
  if self.file and self.update_file:
282
281
  if os.path.exists(self.file):
283
- with open(self.file, "w") as fh:
282
+ with open(self.file, "w", encoding="utf-8") as fh: # Specify UTF-8 encoding
284
283
  fh.write(self.intro + "\n" + new_history)
285
284
  else:
286
- with open(self.file, "a") as fh:
285
+ with open(self.file, "a", encoding="utf-8") as fh: # Specify UTF-8 encoding
287
286
  fh.write(new_history)
288
- self.chat_history += new_history
287
+ self.chat_history += new_history
289
288
 
290
289
 
291
290
 
@@ -1,3 +1,3 @@
1
1
  from llama_cpp import __version__ as __llama_cpp_version__
2
2
 
3
- __version__ = '4.1'
3
+ __version__ = '4.3'
@@ -0,0 +1,169 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class FreeGemini(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 60,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates FreeGemini
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ """
57
+ self.session = requests.Session()
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.chat_endpoint = "https://api.safone.dev/bard"
61
+ self.timeout = timeout
62
+ self.last_response = {}
63
+
64
+ self.headers = {
65
+ "accept": "application/json",
66
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
67
+ }
68
+
69
+ self.__available_optimizers = (
70
+ method
71
+ for method in dir(Optimizers)
72
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
+ )
74
+ self.session.headers.update(self.headers)
75
+ Conversation.intro = (
76
+ AwesomePrompts().get_act(
77
+ act, raise_not_found=True, default=None, case_insensitive=True
78
+ )
79
+ if act
80
+ else intro or Conversation.intro
81
+ )
82
+ self.conversation = Conversation(
83
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
84
+ )
85
+ self.conversation.history_offset = history_offset
86
+ self.session.proxies = proxies
87
+
88
+ def ask(
89
+ self,
90
+ prompt: str,
91
+ stream: bool = False,
92
+ raw: bool = False,
93
+ optimizer: str = None,
94
+ conversationally: bool = False,
95
+ ) -> dict:
96
+ """Chat with AI
97
+
98
+ Args:
99
+ prompt (str): Prompt to be send.
100
+ stream (bool, optional): Flag for streaming response. Defaults to False.
101
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
102
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
103
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
104
+ """
105
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
106
+ if optimizer:
107
+ if optimizer in self.__available_optimizers:
108
+ conversation_prompt = getattr(Optimizers, optimizer)(
109
+ conversation_prompt if conversationally else prompt
110
+ )
111
+ else:
112
+ raise Exception(
113
+ f"Optimizer is not one of {self.__available_optimizers}"
114
+ )
115
+
116
+ self.session.headers.update(self.headers)
117
+ payload = {"message": conversation_prompt}
118
+
119
+ response = self.session.post(
120
+ self.chat_endpoint, json=payload, timeout=self.timeout
121
+ )
122
+
123
+ if not response.ok:
124
+ raise exceptions.FailedToGenerateResponseError(
125
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
126
+ )
127
+
128
+ resp = response.json()
129
+ message_load = self.get_message(resp)
130
+ self.conversation.update_chat_history(
131
+ prompt, message_load
132
+ )
133
+ return resp
134
+
135
+ def chat(
136
+ self,
137
+ prompt: str,
138
+ stream: bool = False,
139
+ optimizer: str = None,
140
+ conversationally: bool = False,
141
+ ) -> str:
142
+ """Generate response `str`
143
+ Args:
144
+ prompt (str): Prompt to be send.
145
+ stream (bool, optional): Flag for streaming response. Defaults to False.
146
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
147
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
148
+ Returns:
149
+ str: Response generated
150
+ """
151
+ return self.get_message(
152
+ self.ask(
153
+ prompt,
154
+ optimizer=optimizer,
155
+ conversationally=conversationally,
156
+ )
157
+ )
158
+
159
+ def get_message(self, response: dict) -> str:
160
+ """Retrieves message only from response
161
+
162
+ Args:
163
+ response (dict): Response generated by `self.ask`
164
+
165
+ Returns:
166
+ str: Message extracted
167
+ """
168
+ assert isinstance(response, dict), "Response should be of dict data-type only"
169
+ return response["message"]
@@ -0,0 +1,211 @@
1
+ import time
2
+ import uuid
3
+ from selenium import webdriver
4
+ from selenium.webdriver.chrome.options import Options
5
+ from selenium.webdriver.common.by import By
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ import click
9
+ import requests
10
+ from requests import get
11
+ from uuid import uuid4
12
+ from re import findall
13
+ from requests.exceptions import RequestException
14
+ from curl_cffi.requests import get, RequestsError
15
+ import g4f
16
+ from random import randint
17
+ from PIL import Image
18
+ import io
19
+ import re
20
+ import json
21
+ import yaml
22
+ from ..AIutel import Optimizers
23
+ from ..AIutel import Conversation
24
+ from ..AIutel import AwesomePrompts, sanitize_stream
25
+ from ..AIbase import Provider, AsyncProvider
26
+ from webscout import exceptions
27
+ from typing import Any, AsyncGenerator, Dict
28
+ import logging
29
+ import httpx
30
+
31
+ class LLAMA(Provider):
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 600,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiates LLAMA
45
+
46
+ Args:
47
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ timeout (int, optional): Http request timeout. Defaults to 30.
50
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
51
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
+ proxies (dict, optional): Http request proxies. Defaults to {}.
54
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
+ model (str, optional): LLM model name. Defaults to "llama3-70b-8192".
57
+ """
58
+ self.is_conversation = is_conversation
59
+ self.max_tokens_to_sample = max_tokens
60
+ self.timeout = timeout
61
+ self.last_response = {}
62
+ self.model = "llama3-70b-8192",
63
+ self.api_endpoint = "https://api.safone.dev/llama"
64
+ self.headers = {
65
+ "accept": "application/json",
66
+ }
67
+
68
+ self.__available_optimizers = (
69
+ method
70
+ for method in dir(Optimizers)
71
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
72
+ )
73
+ Conversation.intro = (
74
+ AwesomePrompts().get_act(
75
+ act, raise_not_found=True, default=None, case_insensitive=True
76
+ )
77
+ if act
78
+ else intro or Conversation.intro
79
+ )
80
+ self.conversation = Conversation(
81
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
82
+ )
83
+ self.conversation.history_offset = history_offset
84
+ self.session = requests.Session()
85
+ self.session.proxies = proxies
86
+
87
+ def ask(
88
+ self,
89
+ prompt: str,
90
+ stream: bool = False,
91
+ raw: bool = False,
92
+ optimizer: str = None,
93
+ conversationally: bool = False,
94
+ ) -> dict | AsyncGenerator:
95
+ """Chat with AI
96
+
97
+ Args:
98
+ prompt (str): Prompt to be send.
99
+ stream (bool, optional): Flag for streaming response. Defaults to False.
100
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
101
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103
+ Returns:
104
+ dict|AsyncGenerator : ai content
105
+ ```json
106
+ {
107
+ "text" : "print('How may I help you today?')"
108
+ }
109
+ ```
110
+ """
111
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
+ if optimizer:
113
+ if optimizer in self.__available_optimizers:
114
+ conversation_prompt = getattr(Optimizers, optimizer)(
115
+ conversation_prompt if conversationally else prompt
116
+ )
117
+ else:
118
+ raise Exception(
119
+ f"Optimizer is not one of {self.__available_optimizers}"
120
+ )
121
+
122
+ self.session.headers.update(self.headers)
123
+ payload = {
124
+ "message": conversation_prompt
125
+ }
126
+
127
+ def for_stream():
128
+ response = self.session.get(
129
+ self.api_endpoint, params=payload, stream=True, timeout=self.timeout
130
+ )
131
+ if not response.ok:
132
+ raise exceptions.FailedToGenerateResponseError(
133
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
134
+ )
135
+
136
+ message_load = ""
137
+ for chunk in response.iter_lines():
138
+ try:
139
+ resp = json.loads(chunk)
140
+ message_load += resp['message']
141
+ yield chunk if raw else dict(text=message_load)
142
+ self.last_response.update(resp)
143
+ except:
144
+ pass
145
+ self.conversation.update_chat_history(
146
+ prompt, self.get_message(self.last_response)
147
+ )
148
+
149
+ def for_non_stream():
150
+ response = self.session.get(
151
+ self.api_endpoint, params=payload, stream=False, timeout=self.timeout
152
+ )
153
+ if not response.ok:
154
+ raise exceptions.FailedToGenerateResponseError(
155
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156
+ )
157
+ resp = response.json()
158
+ self.last_response.update(resp)
159
+ self.conversation.update_chat_history(
160
+ prompt, self.get_message(self.last_response)
161
+ )
162
+ return resp
163
+
164
+ return for_stream() if stream else for_non_stream()
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> str | AsyncGenerator:
173
+ """Generate response `str`
174
+ Args:
175
+ prompt (str): Prompt to be send.
176
+ stream (bool, optional): Flag for streaming response. Defaults to False.
177
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
+ Returns:
180
+ str: Response generated
181
+ """
182
+
183
+ def for_stream():
184
+ for response in self.ask(
185
+ prompt, True, optimizer=optimizer, conversationally=conversationally
186
+ ):
187
+ yield self.get_message(response)
188
+
189
+ def for_non_stream():
190
+ return self.get_message(
191
+ self.ask(
192
+ prompt,
193
+ False,
194
+ optimizer=optimizer,
195
+ conversationally=conversationally,
196
+ )
197
+ )
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def get_message(self, response: dict) -> str:
202
+ """Retrieves message only from response
203
+
204
+ Args:
205
+ response (dict): Response generated by `self.ask`
206
+
207
+ Returns:
208
+ str: Message extracted
209
+ """
210
+ assert isinstance(response, dict), "Response should be of dict data-type only"
211
+ return response["message"]