webscout 1.2.4__py3-none-any.whl → 1.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AI.py CHANGED
@@ -21,10 +21,200 @@ import yaml
21
21
  from webscout.AIutel import Optimizers
22
22
  from webscout.AIutel import Conversation
23
23
  from webscout.AIutel import AwesomePrompts
24
+ from webscout.AIbase import Provider
24
25
  from Helpingai_T2 import Perplexity
25
26
  from typing import Any
26
27
  import logging
27
28
  #------------------------------------------------------OpenGPT-----------------------------------------------------------
29
+ class KOBOLDAI(Provider):
30
+ def __init__(
31
+ self,
32
+ is_conversation: bool = True,
33
+ max_tokens: int = 600,
34
+ temperature: float = 1,
35
+ top_p: float = 1,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ ):
44
+ """Instantiate TGPT
45
+
46
+ Args:
47
+ is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
48
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
+ temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
50
+ top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
51
+ timeout (int, optional): Http requesting timeout. Defaults to 30
52
+ intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
53
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
54
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
55
+ proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
56
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
57
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
58
+ """
59
+ self.session = requests.Session()
60
+ self.is_conversation = is_conversation
61
+ self.max_tokens_to_sample = max_tokens
62
+ self.temperature = temperature
63
+ self.top_p = top_p
64
+ self.chat_endpoint = (
65
+ "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
66
+ )
67
+ self.stream_chunk_size = 64
68
+ self.timeout = timeout
69
+ self.last_response = {}
70
+ self.headers = {
71
+ "Content-Type": "application/json",
72
+ "Accept": "application/json",
73
+ }
74
+
75
+ self.__available_optimizers = (
76
+ method
77
+ for method in dir(Optimizers)
78
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
79
+ )
80
+ self.session.headers.update(self.headers)
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or Conversation.intro
87
+ )
88
+ self.conversation = Conversation(
89
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
90
+ )
91
+ self.conversation.history_offset = history_offset
92
+ self.session.proxies = proxies
93
+
94
+ def ask(
95
+ self,
96
+ prompt: str,
97
+ stream: bool = False,
98
+ raw: bool = False,
99
+ optimizer: str = None,
100
+ conversationally: bool = False,
101
+ ) -> dict:
102
+ """Chat with AI
103
+
104
+ Args:
105
+ prompt (str): Prompt to be send.
106
+ stream (bool, optional): Flag for streaming response. Defaults to False.
107
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
108
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
109
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
110
+ Returns:
111
+ dict : {}
112
+ ```json
113
+ {
114
+ "token" : "How may I assist you today?"
115
+ }
116
+ ```
117
+ """
118
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
119
+ if optimizer:
120
+ if optimizer in self.__available_optimizers:
121
+ conversation_prompt = getattr(Optimizers, optimizer)(
122
+ conversation_prompt if conversationally else prompt
123
+ )
124
+ else:
125
+ raise Exception(
126
+ f"Optimizer is not one of {self.__available_optimizers}"
127
+ )
128
+
129
+ self.session.headers.update(self.headers)
130
+ payload = {
131
+ "prompt": conversation_prompt,
132
+ "temperature": self.temperature,
133
+ "top_p": self.top_p,
134
+ }
135
+
136
+ def for_stream():
137
+ response = self.session.post(
138
+ self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
139
+ )
140
+ if not response.ok:
141
+ raise Exception(
142
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
143
+ )
144
+
145
+ message_load = ""
146
+ for value in response.iter_lines(
147
+ decode_unicode=True,
148
+ delimiter="" if raw else "event: message\ndata:",
149
+ chunk_size=self.stream_chunk_size,
150
+ ):
151
+ try:
152
+ resp = json.loads(value)
153
+ message_load += self.get_message(resp)
154
+ resp["token"] = message_load
155
+ self.last_response.update(resp)
156
+ yield value if raw else resp
157
+ except json.decoder.JSONDecodeError:
158
+ pass
159
+ self.conversation.update_chat_history(
160
+ prompt, self.get_message(self.last_response)
161
+ )
162
+
163
+ def for_non_stream():
164
+ # let's make use of stream
165
+ for _ in for_stream():
166
+ pass
167
+ return self.last_response
168
+
169
+ return for_stream() if stream else for_non_stream()
170
+
171
+ def chat(
172
+ self,
173
+ prompt: str,
174
+ stream: bool = False,
175
+ optimizer: str = None,
176
+ conversationally: bool = False,
177
+ ) -> str:
178
+ """Generate response `str`
179
+ Args:
180
+ prompt (str): Prompt to be send.
181
+ stream (bool, optional): Flag for streaming response. Defaults to False.
182
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
183
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184
+ Returns:
185
+ str: Response generated
186
+ """
187
+
188
+ def for_stream():
189
+ for response in self.ask(
190
+ prompt, True, optimizer=optimizer, conversationally=conversationally
191
+ ):
192
+ yield self.get_message(response)
193
+
194
+ def for_non_stream():
195
+ return self.get_message(
196
+ self.ask(
197
+ prompt,
198
+ False,
199
+ optimizer=optimizer,
200
+ conversationally=conversationally,
201
+ )
202
+ )
203
+
204
+ return for_stream() if stream else for_non_stream()
205
+
206
+ def get_message(self, response: dict) -> str:
207
+ """Retrieves message only from response
208
+
209
+ Args:
210
+ response (dict): Response generated by `self.ask`
211
+
212
+ Returns:
213
+ str: Message extracted
214
+ """
215
+ assert isinstance(response, dict), "Response should be of dict data-type only"
216
+ return response.get("token")
217
+ #------------------------------------------------------OpenGPT-----------------------------------------------------------
28
218
  class OPENGPT:
29
219
  def __init__(
30
220
  self,
@@ -1237,5 +1427,19 @@ def opengpt(prompt, stream):
1237
1427
  else:
1238
1428
  response_str = opengpt.chat(prompt)
1239
1429
  print(response_str)
1430
+
1431
+ @cli.command()
1432
+ @click.option('--prompt', prompt='Enter your prompt', help='The prompt to send.')
1433
+ @click.option('--stream', is_flag=True, help='Flag for streaming response.')
1434
+ @click.option('--raw', is_flag=True, help='Stream back raw response as received.')
1435
+ @click.option('--optimizer', type=str, help='Prompt optimizer name.')
1436
+ @click.option('--conversationally', is_flag=True, help='Chat conversationally when using optimizer.')
1437
+ def koboldai_cli(prompt, stream, raw, optimizer, conversationally):
1438
+ """Chat with KOBOLDAI using the provided prompt."""
1439
+ koboldai_instance = KOBOLDAI() # Initialize a KOBOLDAI instance
1440
+ response = koboldai_instance.ask(prompt, stream, raw, optimizer, conversationally)
1441
+ processed_response = koboldai_instance.get_message(response) # Process the response
1442
+ print(processed_response)
1443
+
1240
1444
  if __name__ == '__main__':
1241
1445
  cli()
webscout/AIutel.py CHANGED
@@ -11,7 +11,7 @@ import click
11
11
  from rich.markdown import Markdown
12
12
  from rich.console import Console
13
13
 
14
- appdir = appdirs.AppDirs("pytgpt", "Smartwa")
14
+ appdir = appdirs.AppDirs("AIWEBS", "vortex")
15
15
 
16
16
  default_path = appdir.user_cache_dir
17
17
 
webscout/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """Webscout.
2
2
 
3
3
  Search for words, documents, images, videos, news, maps and text translation
4
- using the DuckDuckGo.com search engine.
4
+ using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models
5
5
  """
6
6
 
7
7
  import logging
@@ -9,7 +9,8 @@ from .webscout_search import WEBS
9
9
  from .webscout_search_async import AsyncWEBS
10
10
  from .version import __version__
11
11
  from .DWEBS import DeepWEBS
12
- from .offlineAI import GPT4ALL
12
+ from .AIutel import appdir
13
+
13
14
  __all__ = ["WEBS", "AsyncWEBS", "__version__", "cli"]
14
15
 
15
16
  logging.getLogger("webscout").addHandler(logging.NullHandler())
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "1.2.4"
1
+ __version__ = "1.2.5"
2
2
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 1.2.4
3
+ Version: 1.2.5
4
4
  Summary: Search for words, documents, images, videos, news, maps and text translation using the Google, DuckDuckGo.com, yep.com, phind.com, you.com, etc Also containes AI models
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -45,7 +45,6 @@ Requires-Dist: sse-starlette
45
45
  Requires-Dist: termcolor
46
46
  Requires-Dist: tiktoken
47
47
  Requires-Dist: tldextract
48
- Requires-Dist: gpt4all
49
48
  Requires-Dist: orjson
50
49
  Provides-Extra: dev
51
50
  Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
@@ -94,7 +93,7 @@ Also containes AI models that you can use
94
93
  - [6. `BlackBox` - Search/chat With BlackBox](#6-blackbox---searchchat-with-blackbox)
95
94
  - [7. `PERPLEXITY` - Search With PERPLEXITY](#7-perplexity---search-with-perplexity)
96
95
  - [8. `OpenGPT` - chat With OPENGPT](#8-opengpt---chat-with-opengpt)
97
- - [9. `GPT4ALL` - chat offline with Language models using gpt4all from webscout](#9-gpt4all---chat-offline-with-language-models-using-gpt4all-from-webscout)
96
+ - [9. `KOBOLDIA` -](#9-koboldia--)
98
97
  - [usage of special .LLM file from webscout (webscout.LLM)](#usage-of-special-llm-file-from-webscout-webscoutllm)
99
98
  - [`LLM`](#llm)
100
99
 
@@ -608,36 +607,23 @@ prompt = "tell me about india"
608
607
  response_str = opengpt.chat(prompt)
609
608
  print(response_str)
610
609
  ```
611
- ### 9. `GPT4ALL` - chat offline with Language models using gpt4all from webscout
610
+ ### 9. `KOBOLDIA` -
612
611
  ```python
613
- from webscout import GPT4ALL
612
+ from webscout.AI import KOBOLDAI
614
613
 
615
- # Initialize the GPT4ALL class with your model path and other optional parameters
616
- gpt4all_instance = GPT4ALL(
617
- model="path/to/your/model/file", # Replace with the actual path to your model file
618
- is_conversation=True,
619
- max_tokens=800,
620
- temperature=0.7,
621
- presence_penalty=0,
622
- frequency_penalty=1.18,
623
- top_p=0.4,
624
- intro="Hello, how can I assist you today?",
625
- filepath="path/to/conversation/history/file", # Optional, for conversation history
626
- update_file=True,
627
- history_offset=10250,
628
- act=None # Optional, for using an awesome prompt as intro
629
- )
614
+ # Instantiate the KOBOLDAI class with default parameters
615
+ koboldai = KOBOLDAI()
630
616
 
631
- # Generate a response from the AI model
632
- response = gpt4all_instance.chat(
633
- prompt="What is the weather like today?",
634
- stream=False, # Set to True if you want to stream the response
635
- optimizer=None, # Optional, specify an optimizer if needed
636
- conversationally=False # Set to True for conversationally generated responses
637
- )
617
+ # Define a prompt to send to the AI
618
+ prompt = "What is the capital of France?"
619
+
620
+ # Use the 'ask' method to get a response from the AI
621
+ response = koboldai.ask(prompt)
622
+
623
+ # Extract and print the message from the response
624
+ message = koboldai.get_message(response)
625
+ print(message)
638
626
 
639
- # Print the generated response
640
- print(response)
641
627
  ```
642
628
 
643
629
  ## usage of special .LLM file from webscout (webscout.LLM)
@@ -10,25 +10,24 @@ DeepWEBS/networks/webpage_fetcher.py,sha256=d5paDTB3wa_w6YWmLV7RkpAj8Lh8ztuUuyfe
10
10
  DeepWEBS/utilsdw/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  DeepWEBS/utilsdw/enver.py,sha256=vstxg_5P3Rwo1en6oPcuc2SBiATJqxi4C7meGmw5w0M,1754
12
12
  DeepWEBS/utilsdw/logger.py,sha256=Z0nFUcEGyU8r28yKiIyvEtO26xxpmJgbvNToTfwZecc,8174
13
- webscout/AI.py,sha256=CwUCeGnNRL9STd5bAZSyIiLysorBMu065HrkY8UCzAQ,49618
13
+ webscout/AI.py,sha256=RmDi_24_dPY_LuK1Kjh2xZFMkf_W9Y3VOI2HfFPSs8k,57984
14
14
  webscout/AIbase.py,sha256=vQi2ougu5bG-QdmoYmxCQsOg7KTEgG7EF6nZh5qqUGw,2343
15
- webscout/AIutel.py,sha256=cvsuw57hq3GirAiT-PjqwhAiLPf1urOzDb2szJ4bwmo,24124
15
+ webscout/AIutel.py,sha256=fNN4mmjXcxjJGq2CVJP1MU2oQ78p8OyExQBjVif6e-k,24123
16
16
  webscout/DWEBS.py,sha256=QT-7-dUgWhQ_H7EVZD53AVyXxyskoPMKCkFIpzkN56Q,7332
17
17
  webscout/HelpingAI.py,sha256=YeZw0zYVHMcBFFPNdd3_Ghpm9ebt_EScQjHO_IIs4lg,8103
18
18
  webscout/LLM.py,sha256=XByJPiATLA_57FBWKw18Xx_PGRCPOj-GJE96aQH1k2Y,3309
19
- webscout/__init__.py,sha256=auv4OtSXPzH_Bcocya1179UvX4CTLmUqVg3cVXszjaA,457
19
+ webscout/__init__.py,sha256=iH5ifPtGDQiKL3Uf7EvrQe0U4pkLlFD7abaEhSzhW4A,507
20
20
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
21
21
  webscout/cli.py,sha256=F888fdrFUQgczMBN4yMOSf6Nh-IbvkqpPhDsbnA2FtQ,17059
22
22
  webscout/exceptions.py,sha256=4AOO5wexeL96nvUS-badcckcwrPS7UpZyAgB9vknHZE,276
23
23
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
24
- webscout/offlineAI.py,sha256=ieF9fQU-bWFZz5aBAQ8ZNxaCj1O1mI_w5AaAM9E3e8Y,7607
25
24
  webscout/utils.py,sha256=c_98M4oqpb54pUun3fpGGlCerFD6ZHUbghyp5b7Mwgo,2605
26
- webscout/version.py,sha256=w3Y48JpCJLB-DvbXBfEkRgyEnrQoRiXGnyHDTl9pG5M,25
25
+ webscout/version.py,sha256=A8Q2L1VEOcJRDSGB5aSTAg0BmarwaLZZ6kYz3fu5qfk,25
27
26
  webscout/webscout_search.py,sha256=3_lli-hDb8_kCGwscK29xuUcOS833ROgpNhDzrxh0dk,3085
28
27
  webscout/webscout_search_async.py,sha256=Y5frH0k3hLqBCR-8dn7a_b7EvxdYxn6wHiKl3jWosE0,40670
29
- webscout-1.2.4.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
30
- webscout-1.2.4.dist-info/METADATA,sha256=Zh6yfh9n8U_C2QZUYkpluwAk04H7Hj2bcsyd0EHfP9w,23100
31
- webscout-1.2.4.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
32
- webscout-1.2.4.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
33
- webscout-1.2.4.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
34
- webscout-1.2.4.dist-info/RECORD,,
28
+ webscout-1.2.5.dist-info/LICENSE.md,sha256=mRVwJuT4SXC5O93BFdsfWBjlXjGn2Np90Zm5SocUzM0,3150
29
+ webscout-1.2.5.dist-info/METADATA,sha256=tSAdxg14bWG8fdDK9r_xHP62eWb6WoiEUEnDcud_488,22276
30
+ webscout-1.2.5.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
31
+ webscout-1.2.5.dist-info/entry_points.txt,sha256=8-93eRslYrzTHs5E-6yFRJrve00C9q-SkXJD113jzRY,197
32
+ webscout-1.2.5.dist-info/top_level.txt,sha256=OD5YKy6Y3hldL7SmuxsiEDxAG4LgdSSWwzYk22MF9fk,18
33
+ webscout-1.2.5.dist-info/RECORD,,
webscout/offlineAI.py DELETED
@@ -1,206 +0,0 @@
1
- from webscout.AIutel import Optimizers
2
- from webscout.AIutel import Conversation
3
- from webscout.AIutel import AwesomePrompts
4
- from webscout.AIbase import Provider
5
- from gpt4all import GPT4All
6
- from gpt4all.gpt4all import empty_chat_session
7
- from gpt4all.gpt4all import append_extension_if_missing
8
-
9
-
10
- import logging
11
-
12
- my_logger = logging.getLogger("gpt4all")
13
- my_logger.setLevel(logging.CRITICAL)
14
-
15
-
16
- class GPT4ALL(Provider):
17
- def __init__(
18
- self,
19
- model: str,
20
- is_conversation: bool = True,
21
- max_tokens: int = 800,
22
- temperature: float = 0.7,
23
- presence_penalty: int = 0,
24
- frequency_penalty: int = 1.18,
25
- top_p: float = 0.4,
26
- intro: str = None,
27
- filepath: str = None,
28
- update_file: bool = True,
29
- history_offset: int = 10250,
30
- act: str = None,
31
- ):
32
- """Instantiates GPT4ALL
33
-
34
- Args:
35
- model (str, optional): Path to LLM model (.gguf or .bin).
36
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
37
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
38
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.7.
39
- presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
40
- frequency_penalty (int, optional): Chances of word being repeated. Defaults to 1.18.
41
- top_p (float, optional): Sampling threshold during inference time. Defaults to 0.4.
42
- intro (str, optional): Conversation introductory prompt. Defaults to None.
43
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
44
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
45
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
46
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
47
- """
48
- self.is_conversation = is_conversation
49
- self.max_tokens_to_sample = max_tokens
50
- self.model = model
51
- self.temperature = temperature
52
- self.presence_penalty = presence_penalty
53
- self.frequency_penalty = frequency_penalty
54
- self.top_p = top_p
55
- self.last_response = {}
56
-
57
- self.__available_optimizers = (
58
- method
59
- for method in dir(Optimizers)
60
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
61
- )
62
- Conversation.intro = (
63
- AwesomePrompts().get_act(
64
- act, raise_not_found=True, default=None, case_insensitive=True
65
- )
66
- if act
67
- else intro or Conversation.intro
68
- )
69
- self.conversation = Conversation(
70
- is_conversation, self.max_tokens_to_sample, filepath, update_file
71
- )
72
- self.conversation.history_offset = history_offset
73
-
74
- def get_model_name_path():
75
- import os
76
- from pathlib import Path
77
-
78
- initial_model_path = Path(append_extension_if_missing(model))
79
- if initial_model_path.exists:
80
- if not initial_model_path.is_absolute():
81
- initial_model_path = Path(os.getcwd()) / initial_model_path
82
- return os.path.split(initial_model_path.as_posix())
83
- else:
84
- raise FileNotFoundError(
85
- "File does not exist " + initial_model_path.as_posix()
86
- )
87
-
88
- model_dir, model_name = get_model_name_path()
89
-
90
- self.gpt4all = GPT4All(
91
- model_name=model_name,
92
- model_path=model_dir,
93
- allow_download=False,
94
- verbose=False,
95
- )
96
-
97
- def ask(
98
- self,
99
- prompt: str,
100
- stream: bool = False,
101
- raw: bool = False,
102
- optimizer: str = None,
103
- conversationally: bool = False,
104
- ) -> dict:
105
- """Chat with AI
106
-
107
- Args:
108
- prompt (str): Prompt to be send.
109
- stream (bool, optional): Flag for streaming response. Defaults to False.
110
- raw (bool, optional): Stream back raw response as received. Defaults to False.
111
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
112
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
113
- Returns:
114
- dict : {}
115
- ```json
116
- {
117
- "text" : "How may I help you today?"
118
- }
119
- ```
120
- """
121
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122
- if optimizer:
123
- if optimizer in self.__available_optimizers:
124
- conversation_prompt = getattr(Optimizers, optimizer)(
125
- conversation_prompt if conversationally else prompt
126
- )
127
- else:
128
- raise Exception(
129
- f"Optimizer is not one of {self.__available_optimizers}"
130
- )
131
-
132
- def for_stream():
133
- response = self.gpt4all.generate(
134
- prompt=conversation_prompt,
135
- max_tokens=self.max_tokens_to_sample,
136
- temp=self.temperature,
137
- top_p=self.top_p,
138
- repeat_penalty=self.frequency_penalty,
139
- streaming=True,
140
- )
141
-
142
- message_load: str = ""
143
- for token in response:
144
- message_load += token
145
- resp: dict = dict(text=message_load)
146
- yield token if raw else resp
147
- self.last_response.update(resp)
148
-
149
- self.conversation.update_chat_history(
150
- prompt, self.get_message(self.last_response)
151
- )
152
- self.gpt4all.current_chat_session = empty_chat_session()
153
-
154
- def for_non_stream():
155
- for _ in for_stream():
156
- pass
157
- return self.last_response
158
-
159
- return for_stream() if stream else for_non_stream()
160
-
161
- def chat(
162
- self,
163
- prompt: str,
164
- stream: bool = False,
165
- optimizer: str = None,
166
- conversationally: bool = False,
167
- ) -> str:
168
- """Generate response `str`
169
- Args:
170
- prompt (str): Prompt to be send.
171
- stream (bool, optional): Flag for streaming response. Defaults to False.
172
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
173
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
174
- Returns:
175
- str: Response generated
176
- """
177
-
178
- def for_stream():
179
- for response in self.ask(
180
- prompt, True, optimizer=optimizer, conversationally=conversationally
181
- ):
182
- yield self.get_message(response)
183
-
184
- def for_non_stream():
185
- return self.get_message(
186
- self.ask(
187
- prompt,
188
- False,
189
- optimizer=optimizer,
190
- conversationally=conversationally,
191
- )
192
- )
193
-
194
- return for_stream() if stream else for_non_stream()
195
-
196
- def get_message(self, response: dict) -> str:
197
- """Retrieves message only from response
198
-
199
- Args:
200
- response (str): Response generated by `self.ask`
201
-
202
- Returns:
203
- str: Message extracted
204
- """
205
- assert isinstance(response, dict), "Response should be of dict data-type only"
206
- return response["text"]