webscout 1.1.5__py3-none-any.whl → 1.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AI.py +938 -40
- webscout/AIbase.py +70 -0
- webscout/AIutel.py +655 -0
- webscout/HelpingAI.py +192 -192
- webscout/LLM.py +67 -67
- webscout/utils.py +52 -11
- webscout/version.py +1 -1
- {webscout-1.1.5.dist-info → webscout-1.1.7.dist-info}/METADATA +92 -17
- webscout-1.1.7.dist-info/RECORD +20 -0
- webscout-1.1.5.dist-info/RECORD +0 -18
- {webscout-1.1.5.dist-info → webscout-1.1.7.dist-info}/LICENSE.md +0 -0
- {webscout-1.1.5.dist-info → webscout-1.1.7.dist-info}/WHEEL +0 -0
- {webscout-1.1.5.dist-info → webscout-1.1.7.dist-info}/entry_points.txt +0 -0
- {webscout-1.1.5.dist-info → webscout-1.1.7.dist-info}/top_level.txt +0 -0
webscout/AI.py
CHANGED
|
@@ -4,10 +4,8 @@ from selenium.webdriver.chrome.options import Options
|
|
|
4
4
|
from selenium.webdriver.common.by import By
|
|
5
5
|
from selenium.webdriver.support import expected_conditions as EC
|
|
6
6
|
from selenium.webdriver.support.ui import WebDriverWait
|
|
7
|
-
from halo import Halo
|
|
8
7
|
import click
|
|
9
8
|
import requests
|
|
10
|
-
import json
|
|
11
9
|
from requests import get
|
|
12
10
|
from uuid import uuid4
|
|
13
11
|
from re import findall
|
|
@@ -17,47 +15,908 @@ import g4f
|
|
|
17
15
|
from random import randint
|
|
18
16
|
from PIL import Image
|
|
19
17
|
import io
|
|
18
|
+
import re
|
|
19
|
+
import json
|
|
20
|
+
import yaml
|
|
21
|
+
from webscout.AIutel import Optimizers
|
|
22
|
+
from webscout.AIutel import Conversation
|
|
23
|
+
from webscout.AIutel import AwesomePrompts
|
|
24
|
+
from Helpingai_T2 import Perplexity
|
|
25
|
+
from typing import Any
|
|
26
|
+
import logging
|
|
27
|
+
#------------------------------------------------------OpenGPT-----------------------------------------------------------
|
|
28
|
+
class OPENGPT:
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
is_conversation: bool = True,
|
|
32
|
+
max_tokens: int = 600,
|
|
33
|
+
timeout: int = 30,
|
|
34
|
+
intro: str = None,
|
|
35
|
+
filepath: str = None,
|
|
36
|
+
update_file: bool = True,
|
|
37
|
+
proxies: dict = {},
|
|
38
|
+
history_offset: int = 10250,
|
|
39
|
+
act: str = None,
|
|
40
|
+
):
|
|
41
|
+
"""Instantiates OPENGPT
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
45
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
46
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
47
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
48
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
49
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
50
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
51
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
52
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
53
|
+
"""
|
|
54
|
+
self.session = requests.Session()
|
|
55
|
+
self.max_tokens_to_sample = max_tokens
|
|
56
|
+
self.is_conversation = is_conversation
|
|
57
|
+
self.chat_endpoint = (
|
|
58
|
+
"https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
|
|
59
|
+
)
|
|
60
|
+
self.stream_chunk_size = 64
|
|
61
|
+
self.timeout = timeout
|
|
62
|
+
self.last_response = {}
|
|
63
|
+
self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
|
|
64
|
+
self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
|
|
65
|
+
|
|
66
|
+
self.headers = {
|
|
67
|
+
"authority": self.authority,
|
|
68
|
+
"accept": "text/event-stream",
|
|
69
|
+
"accept-language": "en-US,en;q=0.7",
|
|
70
|
+
"cache-control": "no-cache",
|
|
71
|
+
"content-type": "application/json",
|
|
72
|
+
"origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
|
|
73
|
+
"pragma": "no-cache",
|
|
74
|
+
"referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
|
|
75
|
+
"sec-fetch-site": "same-origin",
|
|
76
|
+
"sec-gpc": "1",
|
|
77
|
+
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
self.__available_optimizers = (
|
|
81
|
+
method
|
|
82
|
+
for method in dir(Optimizers)
|
|
83
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
84
|
+
)
|
|
85
|
+
self.session.headers.update(self.headers)
|
|
86
|
+
Conversation.intro = (
|
|
87
|
+
AwesomePrompts().get_act(
|
|
88
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
89
|
+
)
|
|
90
|
+
if act
|
|
91
|
+
else intro or Conversation.intro
|
|
92
|
+
)
|
|
93
|
+
self.conversation = Conversation(
|
|
94
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
95
|
+
)
|
|
96
|
+
self.conversation.history_offset = history_offset
|
|
97
|
+
self.session.proxies = proxies
|
|
98
|
+
|
|
99
|
+
def ask(
|
|
100
|
+
self,
|
|
101
|
+
prompt: str,
|
|
102
|
+
stream: bool = False,
|
|
103
|
+
raw: bool = False,
|
|
104
|
+
optimizer: str = None,
|
|
105
|
+
conversationally: bool = False,
|
|
106
|
+
) -> dict:
|
|
107
|
+
"""Chat with AI
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
prompt (str): Prompt to be send.
|
|
111
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
112
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
113
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
114
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
115
|
+
Returns:
|
|
116
|
+
dict : {}
|
|
117
|
+
```json
|
|
118
|
+
{
|
|
119
|
+
"messages": [
|
|
120
|
+
{
|
|
121
|
+
"content": "Hello there",
|
|
122
|
+
"additional_kwargs": {},
|
|
123
|
+
"type": "human",
|
|
124
|
+
"example": false
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
"content": "Hello! How can I assist you today?",
|
|
128
|
+
"additional_kwargs": {
|
|
129
|
+
"agent": {
|
|
130
|
+
"return_values": {
|
|
131
|
+
"output": "Hello! How can I assist you today?"
|
|
132
|
+
},
|
|
133
|
+
"log": "Hello! How can I assist you today?",
|
|
134
|
+
"type": "AgentFinish"
|
|
135
|
+
}
|
|
136
|
+
},
|
|
137
|
+
"type": "ai",
|
|
138
|
+
"example": false
|
|
139
|
+
}]
|
|
140
|
+
}
|
|
141
|
+
```
|
|
142
|
+
"""
|
|
143
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
144
|
+
if optimizer:
|
|
145
|
+
if optimizer in self.__available_optimizers:
|
|
146
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
147
|
+
conversation_prompt if conversationally else prompt
|
|
148
|
+
)
|
|
149
|
+
else:
|
|
150
|
+
raise Exception(
|
|
151
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
self.session.headers.update(self.headers)
|
|
155
|
+
self.session.headers.update(
|
|
156
|
+
dict(
|
|
157
|
+
cookie=f"opengpts_user_id={uuid4().__str__()}",
|
|
158
|
+
)
|
|
159
|
+
)
|
|
160
|
+
payload = {
|
|
161
|
+
"input": [
|
|
162
|
+
{
|
|
163
|
+
"content": conversation_prompt,
|
|
164
|
+
"additional_kwargs": {},
|
|
165
|
+
"type": "human",
|
|
166
|
+
"example": False,
|
|
167
|
+
},
|
|
168
|
+
],
|
|
169
|
+
"assistant_id": self.assistant_id,
|
|
170
|
+
"thread_id": "",
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
def for_stream():
|
|
174
|
+
response = self.session.post(
|
|
175
|
+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
176
|
+
)
|
|
177
|
+
if (
|
|
178
|
+
not response.ok
|
|
179
|
+
or not response.headers.get("Content-Type")
|
|
180
|
+
== "text/event-stream; charset=utf-8"
|
|
181
|
+
):
|
|
182
|
+
raise Exception(
|
|
183
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
for value in response.iter_lines(
|
|
187
|
+
decode_unicode=True,
|
|
188
|
+
chunk_size=self.stream_chunk_size,
|
|
189
|
+
):
|
|
190
|
+
try:
|
|
191
|
+
modified_value = re.sub("data:", "", value)
|
|
192
|
+
resp = json.loads(modified_value)
|
|
193
|
+
if len(resp) == 1:
|
|
194
|
+
continue
|
|
195
|
+
self.last_response.update(resp[1])
|
|
196
|
+
yield value if raw else resp[1]
|
|
197
|
+
except json.decoder.JSONDecodeError:
|
|
198
|
+
pass
|
|
199
|
+
self.conversation.update_chat_history(
|
|
200
|
+
prompt, self.get_message(self.last_response)
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
def for_non_stream():
|
|
204
|
+
for _ in for_stream():
|
|
205
|
+
pass
|
|
206
|
+
return self.last_response
|
|
207
|
+
|
|
208
|
+
return for_stream() if stream else for_non_stream()
|
|
209
|
+
|
|
210
|
+
def chat(
|
|
211
|
+
self,
|
|
212
|
+
prompt: str,
|
|
213
|
+
stream: bool = False,
|
|
214
|
+
optimizer: str = None,
|
|
215
|
+
conversationally: bool = False,
|
|
216
|
+
) -> str:
|
|
217
|
+
"""Generate response `str`
|
|
218
|
+
Args:
|
|
219
|
+
prompt (str): Prompt to be send.
|
|
220
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
221
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
222
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
223
|
+
Returns:
|
|
224
|
+
str: Response generated
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
def for_stream():
|
|
228
|
+
for response in self.ask(
|
|
229
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
230
|
+
):
|
|
231
|
+
yield self.get_message(response)
|
|
232
|
+
|
|
233
|
+
def for_non_stream():
|
|
234
|
+
return self.get_message(
|
|
235
|
+
self.ask(
|
|
236
|
+
prompt,
|
|
237
|
+
False,
|
|
238
|
+
optimizer=optimizer,
|
|
239
|
+
conversationally=conversationally,
|
|
240
|
+
)
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
return for_stream() if stream else for_non_stream()
|
|
244
|
+
|
|
245
|
+
def get_message(self, response: dict) -> str:
|
|
246
|
+
"""Retrieves message only from response
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
response (dict): Response generated by `self.ask`
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
str: Message extracted
|
|
253
|
+
"""
|
|
254
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
255
|
+
return response["content"]
|
|
256
|
+
#------------------------------------------------------PERPLEXITY--------------------------------------------------------
|
|
257
|
+
class PERPLEXITY:
|
|
258
|
+
def __init__(
|
|
259
|
+
self,
|
|
260
|
+
is_conversation: bool = True,
|
|
261
|
+
max_tokens: int = 8000,
|
|
262
|
+
timeout: int = 30,
|
|
263
|
+
intro: str = None,
|
|
264
|
+
filepath: str = None,
|
|
265
|
+
update_file: bool = True,
|
|
266
|
+
proxies: dict = {},
|
|
267
|
+
history_offset: int = 10250,
|
|
268
|
+
act: str = None,
|
|
269
|
+
quiet: bool = False,
|
|
270
|
+
):
|
|
271
|
+
"""Instantiates PERPLEXITY
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
275
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
276
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
277
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
278
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
279
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
280
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
281
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
282
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
283
|
+
quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
|
|
284
|
+
"""
|
|
285
|
+
logging.getLogger("websocket").setLevel(logging.ERROR)
|
|
286
|
+
self.session = requests.Session()
|
|
287
|
+
self.max_tokens_to_sample = max_tokens
|
|
288
|
+
self.is_conversation = is_conversation
|
|
289
|
+
self.last_response = {}
|
|
290
|
+
self.web_results: dict = {}
|
|
291
|
+
self.quiet = quiet
|
|
292
|
+
|
|
293
|
+
self.__available_optimizers = (
|
|
294
|
+
method
|
|
295
|
+
for method in dir(Optimizers)
|
|
296
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
297
|
+
)
|
|
298
|
+
Conversation.intro = (
|
|
299
|
+
AwesomePrompts().get_act(
|
|
300
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
301
|
+
)
|
|
302
|
+
if act
|
|
303
|
+
else intro or Conversation.intro
|
|
304
|
+
)
|
|
305
|
+
self.conversation = Conversation(
|
|
306
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
307
|
+
)
|
|
308
|
+
self.conversation.history_offset = history_offset
|
|
309
|
+
|
|
310
|
+
def ask(
|
|
311
|
+
self,
|
|
312
|
+
prompt: str,
|
|
313
|
+
stream: bool = False,
|
|
314
|
+
raw: bool = False,
|
|
315
|
+
optimizer: str = None,
|
|
316
|
+
conversationally: bool = False,
|
|
317
|
+
) -> dict:
|
|
318
|
+
"""Chat with AI
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
prompt (str): Prompt to be send.
|
|
322
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
323
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
324
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
325
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
326
|
+
Returns:
|
|
327
|
+
dict : {}
|
|
328
|
+
```json
|
|
329
|
+
{
|
|
330
|
+
"status": "pending",
|
|
331
|
+
"uuid": "3604dfcc-611f-4b7d-989d-edca2a7233c7",
|
|
332
|
+
"read_write_token": null,
|
|
333
|
+
"frontend_context_uuid": "f6d43119-5231-481d-b692-f52e1f52d2c6",
|
|
334
|
+
"final": false,
|
|
335
|
+
"backend_uuid": "a6d6ec9e-da69-4841-af74-0de0409267a8",
|
|
336
|
+
"media_items": [],
|
|
337
|
+
"widget_data": [],
|
|
338
|
+
"knowledge_cards": [],
|
|
339
|
+
"expect_search_results": "false",
|
|
340
|
+
"mode": "concise",
|
|
341
|
+
"search_focus": "internet",
|
|
342
|
+
"gpt4": false,
|
|
343
|
+
"display_model": "turbo",
|
|
344
|
+
"attachments": null,
|
|
345
|
+
"answer": "",
|
|
346
|
+
"web_results": [],
|
|
347
|
+
"chunks": [],
|
|
348
|
+
"extra_web_results": []
|
|
349
|
+
}
|
|
350
|
+
```
|
|
351
|
+
"""
|
|
352
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
353
|
+
if optimizer:
|
|
354
|
+
if optimizer in self.__available_optimizers:
|
|
355
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
356
|
+
conversation_prompt if conversationally else prompt
|
|
357
|
+
)
|
|
358
|
+
else:
|
|
359
|
+
raise Exception(
|
|
360
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
def for_stream():
|
|
364
|
+
for response in Perplexity().generate_answer(conversation_prompt):
|
|
365
|
+
yield json.dumps(response) if raw else response
|
|
366
|
+
self.last_response.update(response)
|
|
367
|
+
|
|
368
|
+
self.conversation.update_chat_history(
|
|
369
|
+
prompt,
|
|
370
|
+
self.get_message(self.last_response),
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
def for_non_stream():
|
|
374
|
+
for _ in for_stream():
|
|
375
|
+
pass
|
|
376
|
+
return self.last_response
|
|
377
|
+
|
|
378
|
+
return for_stream() if stream else for_non_stream()
|
|
379
|
+
|
|
380
|
+
def chat(
|
|
381
|
+
self,
|
|
382
|
+
prompt: str,
|
|
383
|
+
stream: bool = False,
|
|
384
|
+
optimizer: str = None,
|
|
385
|
+
conversationally: bool = False,
|
|
386
|
+
) -> str:
|
|
387
|
+
"""Generate response `str`
|
|
388
|
+
Args:
|
|
389
|
+
prompt (str): Prompt to be send.
|
|
390
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
391
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
392
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
393
|
+
Returns:
|
|
394
|
+
str: Response generated
|
|
395
|
+
"""
|
|
396
|
+
|
|
397
|
+
def for_stream():
|
|
398
|
+
for response in self.ask(
|
|
399
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
400
|
+
):
|
|
401
|
+
yield self.get_message(response)
|
|
402
|
+
|
|
403
|
+
def for_non_stream():
|
|
404
|
+
return self.get_message(
|
|
405
|
+
self.ask(
|
|
406
|
+
prompt,
|
|
407
|
+
False,
|
|
408
|
+
optimizer=optimizer,
|
|
409
|
+
conversationally=conversationally,
|
|
410
|
+
)
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
return for_stream() if stream else for_non_stream()
|
|
414
|
+
|
|
415
|
+
def get_message(self, response: dict) -> str:
|
|
416
|
+
"""Retrieves message only from response
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
response (dict): Response generated by `self.ask`
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
str: Message extracted
|
|
423
|
+
"""
|
|
424
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
425
|
+
text_str: str = response.get("answer", "")
|
|
426
|
+
|
|
427
|
+
def update_web_results(web_results: list) -> None:
|
|
428
|
+
for index, results in enumerate(web_results, start=1):
|
|
429
|
+
self.web_results[str(index) + ". " + results["name"]] = dict(
|
|
430
|
+
url=results.get("url"), snippet=results.get("snippet")
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
if response.get("text"):
|
|
434
|
+
# last chunk
|
|
435
|
+
target: dict[str, Any] = json.loads(response.get("text"))
|
|
436
|
+
text_str = target.get("answer")
|
|
437
|
+
web_results: list[dict] = target.get("web_results")
|
|
438
|
+
self.web_results.clear()
|
|
439
|
+
update_web_results(web_results)
|
|
440
|
+
|
|
441
|
+
return (
|
|
442
|
+
text_str
|
|
443
|
+
if self.quiet or not self.web_results
|
|
444
|
+
else text_str + "\n\n# WEB-RESULTS\n\n" + yaml.dump(self.web_results)
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
else:
|
|
448
|
+
if str(response.get("expect_search_results")).lower() == "true":
|
|
449
|
+
return (
|
|
450
|
+
text_str
|
|
451
|
+
if self.quiet
|
|
452
|
+
else text_str
|
|
453
|
+
+ "\n\n# WEB-RESULTS\n\n"
|
|
454
|
+
+ yaml.dump(response.get("web_results"))
|
|
455
|
+
)
|
|
456
|
+
else:
|
|
457
|
+
return text_str
|
|
458
|
+
#------------------------------------------------------BLACKBOXAI--------------------------------------------------------
|
|
459
|
+
class BLACKBOXAI:
|
|
460
|
+
def __init__(
|
|
461
|
+
self,
|
|
462
|
+
is_conversation: bool = True,
|
|
463
|
+
max_tokens: int = 8000,
|
|
464
|
+
timeout: int = 30,
|
|
465
|
+
intro: str = None,
|
|
466
|
+
filepath: str = None,
|
|
467
|
+
update_file: bool = True,
|
|
468
|
+
proxies: dict = {},
|
|
469
|
+
history_offset: int = 10250,
|
|
470
|
+
act: str = None,
|
|
471
|
+
model: str = None,
|
|
472
|
+
):
|
|
473
|
+
"""Instantiates BLACKBOXAI
|
|
474
|
+
|
|
475
|
+
Args:
|
|
476
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
477
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
478
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
479
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
480
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
481
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
482
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
483
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
484
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
485
|
+
model (str, optional): Model name. Defaults to "Phind Model".
|
|
486
|
+
"""
|
|
487
|
+
self.session = requests.Session()
|
|
488
|
+
self.max_tokens_to_sample = max_tokens
|
|
489
|
+
self.is_conversation = is_conversation
|
|
490
|
+
self.chat_endpoint = "https://www.blackbox.ai/api/chat"
|
|
491
|
+
self.stream_chunk_size = 64
|
|
492
|
+
self.timeout = timeout
|
|
493
|
+
self.last_response = {}
|
|
494
|
+
self.model = model
|
|
495
|
+
self.previewToken: str = None
|
|
496
|
+
self.userId: str = ""
|
|
497
|
+
self.codeModelMode: bool = True
|
|
498
|
+
self.id: str = ""
|
|
499
|
+
self.agentMode: dict = {}
|
|
500
|
+
self.trendingAgentMode: dict = {}
|
|
501
|
+
self.isMicMode: bool = False
|
|
20
502
|
|
|
21
|
-
|
|
503
|
+
self.headers = {
|
|
504
|
+
"Content-Type": "application/json",
|
|
505
|
+
"User-Agent": "",
|
|
506
|
+
"Accept": "*/*",
|
|
507
|
+
"Accept-Encoding": "Identity",
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
self.__available_optimizers = (
|
|
511
|
+
method
|
|
512
|
+
for method in dir(Optimizers)
|
|
513
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
514
|
+
)
|
|
515
|
+
self.session.headers.update(self.headers)
|
|
516
|
+
Conversation.intro = (
|
|
517
|
+
AwesomePrompts().get_act(
|
|
518
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
519
|
+
)
|
|
520
|
+
if act
|
|
521
|
+
else intro or Conversation.intro
|
|
522
|
+
)
|
|
523
|
+
self.conversation = Conversation(
|
|
524
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
525
|
+
)
|
|
526
|
+
self.conversation.history_offset = history_offset
|
|
527
|
+
self.session.proxies = proxies
|
|
528
|
+
|
|
529
|
+
def ask(
|
|
530
|
+
self,
|
|
531
|
+
prompt: str,
|
|
532
|
+
stream: bool = False,
|
|
533
|
+
raw: bool = False,
|
|
534
|
+
optimizer: str = None,
|
|
535
|
+
conversationally: bool = False,
|
|
536
|
+
) -> dict:
|
|
537
|
+
"""Chat with AI
|
|
538
|
+
|
|
539
|
+
Args:
|
|
540
|
+
prompt (str): Prompt to be send.
|
|
541
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
542
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
543
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
544
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
545
|
+
Returns:
|
|
546
|
+
dict : {}
|
|
547
|
+
```json
|
|
548
|
+
{
|
|
549
|
+
"text" : "print('How may I help you today?')"
|
|
550
|
+
}
|
|
551
|
+
```
|
|
552
|
+
"""
|
|
553
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
554
|
+
if optimizer:
|
|
555
|
+
if optimizer in self.__available_optimizers:
|
|
556
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
557
|
+
conversation_prompt if conversationally else prompt
|
|
558
|
+
)
|
|
559
|
+
else:
|
|
560
|
+
raise Exception(
|
|
561
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
self.session.headers.update(self.headers)
|
|
565
|
+
payload = {
|
|
566
|
+
"messages": [
|
|
567
|
+
# json.loads(prev_messages),
|
|
568
|
+
{"content": conversation_prompt, "role": "user"}
|
|
569
|
+
],
|
|
570
|
+
"id": self.id,
|
|
571
|
+
"previewToken": self.previewToken,
|
|
572
|
+
"userId": self.userId,
|
|
573
|
+
"codeModelMode": self.codeModelMode,
|
|
574
|
+
"agentMode": self.agentMode,
|
|
575
|
+
"trendingAgentMode": self.trendingAgentMode,
|
|
576
|
+
"isMicMode": self.isMicMode,
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
def for_stream():
|
|
580
|
+
response = self.session.post(
|
|
581
|
+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
582
|
+
)
|
|
583
|
+
if (
|
|
584
|
+
not response.ok
|
|
585
|
+
or not response.headers.get("Content-Type")
|
|
586
|
+
== "text/plain; charset=utf-8"
|
|
587
|
+
):
|
|
588
|
+
raise Exception(
|
|
589
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
590
|
+
)
|
|
591
|
+
streaming_text = ""
|
|
592
|
+
for value in response.iter_lines(
|
|
593
|
+
decode_unicode=True,
|
|
594
|
+
chunk_size=self.stream_chunk_size,
|
|
595
|
+
delimiter="\n",
|
|
596
|
+
):
|
|
597
|
+
try:
|
|
598
|
+
if bool(value):
|
|
599
|
+
streaming_text += value + ("\n" if stream else "")
|
|
600
|
+
|
|
601
|
+
resp = dict(text=streaming_text)
|
|
602
|
+
self.last_response.update(resp)
|
|
603
|
+
yield value if raw else resp
|
|
604
|
+
except json.decoder.JSONDecodeError:
|
|
605
|
+
pass
|
|
606
|
+
self.conversation.update_chat_history(
|
|
607
|
+
prompt, self.get_message(self.last_response)
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
def for_non_stream():
|
|
611
|
+
for _ in for_stream():
|
|
612
|
+
pass
|
|
613
|
+
return self.last_response
|
|
614
|
+
|
|
615
|
+
return for_stream() if stream else for_non_stream()
|
|
616
|
+
|
|
617
|
+
def chat(
|
|
618
|
+
self,
|
|
619
|
+
prompt: str,
|
|
620
|
+
stream: bool = False,
|
|
621
|
+
optimizer: str = None,
|
|
622
|
+
conversationally: bool = False,
|
|
623
|
+
) -> str:
|
|
624
|
+
"""Generate response `str`
|
|
625
|
+
Args:
|
|
626
|
+
prompt (str): Prompt to be send.
|
|
627
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
628
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
629
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
630
|
+
Returns:
|
|
631
|
+
str: Response generated
|
|
632
|
+
"""
|
|
633
|
+
|
|
634
|
+
def for_stream():
|
|
635
|
+
for response in self.ask(
|
|
636
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
637
|
+
):
|
|
638
|
+
yield self.get_message(response)
|
|
639
|
+
|
|
640
|
+
def for_non_stream():
|
|
641
|
+
return self.get_message(
|
|
642
|
+
self.ask(
|
|
643
|
+
prompt,
|
|
644
|
+
False,
|
|
645
|
+
optimizer=optimizer,
|
|
646
|
+
conversationally=conversationally,
|
|
647
|
+
)
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
return for_stream() if stream else for_non_stream()
|
|
651
|
+
|
|
652
|
+
def get_message(self, response: dict) -> str:
|
|
653
|
+
"""Retrieves message only from response
|
|
654
|
+
|
|
655
|
+
Args:
|
|
656
|
+
response (dict): Response generated by `self.ask`
|
|
657
|
+
|
|
658
|
+
Returns:
|
|
659
|
+
str: Message extracted
|
|
660
|
+
"""
|
|
661
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
662
|
+
return response["text"]
|
|
663
|
+
@staticmethod
|
|
664
|
+
def chat_cli(prompt):
|
|
665
|
+
"""Sends a request to the BLACKBOXAI API and processes the response."""
|
|
666
|
+
blackbox_ai = BLACKBOXAI() # Initialize a BLACKBOXAI instance
|
|
667
|
+
response = blackbox_ai.ask(prompt) # Perform a chat with the given prompt
|
|
668
|
+
processed_response = blackbox_ai.get_message(response) # Process the response
|
|
669
|
+
print(processed_response)
|
|
670
|
+
#------------------------------------------------------phind-------------------------------------------------------------
|
|
22
671
|
class PhindSearch:
|
|
23
|
-
def __init__(
|
|
24
|
-
self
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
672
|
+
def __init__(
|
|
673
|
+
self,
|
|
674
|
+
is_conversation: bool = True,
|
|
675
|
+
max_tokens: int = 8000,
|
|
676
|
+
timeout: int = 30,
|
|
677
|
+
intro: str = None,
|
|
678
|
+
filepath: str = None,
|
|
679
|
+
update_file: bool = True,
|
|
680
|
+
proxies: dict = {},
|
|
681
|
+
history_offset: int = 10250,
|
|
682
|
+
act: str = None,
|
|
683
|
+
model: str = "Phind Model",
|
|
684
|
+
quiet: bool = False,
|
|
685
|
+
):
|
|
686
|
+
"""Instantiates PHIND
|
|
687
|
+
|
|
688
|
+
Args:
|
|
689
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
690
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
691
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
692
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
693
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
694
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
695
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
696
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
697
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
698
|
+
model (str, optional): Model name. Defaults to "Phind Model".
|
|
699
|
+
quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
|
|
700
|
+
"""
|
|
701
|
+
self.session = requests.Session()
|
|
702
|
+
self.max_tokens_to_sample = max_tokens
|
|
703
|
+
self.is_conversation = is_conversation
|
|
704
|
+
self.chat_endpoint = "https://https.extension.phind.com/agent/"
|
|
705
|
+
self.stream_chunk_size = 64
|
|
706
|
+
self.timeout = timeout
|
|
707
|
+
self.last_response = {}
|
|
708
|
+
self.model = model
|
|
709
|
+
self.quiet = quiet
|
|
36
710
|
|
|
37
|
-
|
|
711
|
+
self.headers = {
|
|
712
|
+
"Content-Type": "application/json",
|
|
713
|
+
"User-Agent": "",
|
|
714
|
+
"Accept": "*/*",
|
|
715
|
+
"Accept-Encoding": "Identity",
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
self.__available_optimizers = (
|
|
719
|
+
method
|
|
720
|
+
for method in dir(Optimizers)
|
|
721
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
722
|
+
)
|
|
723
|
+
self.session.headers.update(self.headers)
|
|
724
|
+
Conversation.intro = (
|
|
725
|
+
AwesomePrompts().get_act(
|
|
726
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
727
|
+
)
|
|
728
|
+
if act
|
|
729
|
+
else intro or Conversation.intro
|
|
730
|
+
)
|
|
731
|
+
self.conversation = Conversation(
|
|
732
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
733
|
+
)
|
|
734
|
+
self.conversation.history_offset = history_offset
|
|
735
|
+
self.session.proxies = proxies
|
|
736
|
+
|
|
737
|
+
def ask(
|
|
738
|
+
self,
|
|
739
|
+
prompt: str,
|
|
740
|
+
stream: bool = False,
|
|
741
|
+
raw: bool = False,
|
|
742
|
+
optimizer: str = None,
|
|
743
|
+
conversationally: bool = False,
|
|
744
|
+
) -> dict:
|
|
745
|
+
"""Chat with AI
|
|
746
|
+
|
|
747
|
+
Args:
|
|
748
|
+
prompt (str): Prompt to be send.
|
|
749
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
750
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
751
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
752
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
753
|
+
Returns:
|
|
754
|
+
dict : {}
|
|
755
|
+
```json
|
|
756
|
+
{
|
|
757
|
+
"id": "chatcmpl-r0wujizf2i2xb60mjiwt",
|
|
758
|
+
"object": "chat.completion.chunk",
|
|
759
|
+
"created": 1706775384,
|
|
760
|
+
"model": "trt-llm-phind-model-serving",
|
|
761
|
+
"choices": [
|
|
762
|
+
{
|
|
763
|
+
"index": 0,
|
|
764
|
+
"delta": {
|
|
765
|
+
"content": "Hello! How can I assist you with your programming today?"
|
|
766
|
+
},
|
|
767
|
+
"finish_reason": null
|
|
768
|
+
}
|
|
769
|
+
]
|
|
770
|
+
}
|
|
771
|
+
```
|
|
772
|
+
"""
|
|
773
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
774
|
+
if optimizer:
|
|
775
|
+
if optimizer in self.__available_optimizers:
|
|
776
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
777
|
+
conversation_prompt if conversationally else prompt
|
|
778
|
+
)
|
|
779
|
+
else:
|
|
780
|
+
raise Exception(
|
|
781
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
self.session.headers.update(self.headers)
|
|
785
|
+
payload = {
|
|
786
|
+
"additional_extension_context": "",
|
|
787
|
+
"allow_magic_buttons": True,
|
|
788
|
+
"is_vscode_extension": True,
|
|
789
|
+
"message_history": [
|
|
790
|
+
{"content": conversation_prompt, "metadata": {}, "role": "user"}
|
|
791
|
+
],
|
|
792
|
+
"requested_model": self.model,
|
|
793
|
+
"user_input": prompt,
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
def for_stream():
|
|
797
|
+
response = self.session.post(
|
|
798
|
+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
799
|
+
)
|
|
800
|
+
if (
|
|
801
|
+
not response.ok
|
|
802
|
+
or not response.headers.get("Content-Type")
|
|
803
|
+
== "text/event-stream; charset=utf-8"
|
|
804
|
+
):
|
|
805
|
+
raise Exception(
|
|
806
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
807
|
+
)
|
|
808
|
+
streaming_text = ""
|
|
809
|
+
for value in response.iter_lines(
|
|
810
|
+
decode_unicode=True,
|
|
811
|
+
chunk_size=self.stream_chunk_size,
|
|
812
|
+
):
|
|
813
|
+
try:
|
|
814
|
+
modified_value = re.sub("data:", "", value)
|
|
815
|
+
json_modified_value = json.loads(modified_value)
|
|
816
|
+
retrieved_text = self.get_message(json_modified_value)
|
|
817
|
+
if not retrieved_text:
|
|
818
|
+
continue
|
|
819
|
+
streaming_text += retrieved_text
|
|
820
|
+
json_modified_value["choices"][0]["delta"][
|
|
821
|
+
"content"
|
|
822
|
+
] = streaming_text
|
|
823
|
+
self.last_response.update(json_modified_value)
|
|
824
|
+
yield value if raw else json_modified_value
|
|
825
|
+
except json.decoder.JSONDecodeError:
|
|
826
|
+
pass
|
|
827
|
+
self.conversation.update_chat_history(
|
|
828
|
+
prompt, self.get_message(self.last_response)
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
def for_non_stream():
|
|
832
|
+
for _ in for_stream():
|
|
833
|
+
pass
|
|
834
|
+
return self.last_response
|
|
38
835
|
|
|
39
|
-
|
|
40
|
-
time.sleep(15)
|
|
41
|
-
answer_elements = self.driver.find_elements(By.CSS_SELECTOR, "main div.fs-5")
|
|
836
|
+
return for_stream() if stream else for_non_stream()
|
|
42
837
|
|
|
43
|
-
|
|
838
|
+
def chat(
|
|
839
|
+
self,
|
|
840
|
+
prompt: str,
|
|
841
|
+
stream: bool = False,
|
|
842
|
+
optimizer: str = None,
|
|
843
|
+
conversationally: bool = False,
|
|
844
|
+
) -> str:
|
|
845
|
+
"""Generate response `str`
|
|
846
|
+
Args:
|
|
847
|
+
prompt (str): Prompt to be send.
|
|
848
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
849
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
850
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
851
|
+
Returns:
|
|
852
|
+
str: Response generated
|
|
853
|
+
"""
|
|
854
|
+
|
|
855
|
+
def for_stream():
|
|
856
|
+
for response in self.ask(
|
|
857
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
858
|
+
):
|
|
859
|
+
yield self.get_message(response)
|
|
860
|
+
|
|
861
|
+
def for_non_stream():
|
|
862
|
+
return self.get_message(
|
|
863
|
+
self.ask(
|
|
864
|
+
prompt,
|
|
865
|
+
False,
|
|
866
|
+
optimizer=optimizer,
|
|
867
|
+
conversationally=conversationally,
|
|
868
|
+
)
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
return for_stream() if stream else for_non_stream()
|
|
872
|
+
|
|
873
|
+
def get_message(self, response: dict) -> str:
|
|
874
|
+
"""Retrieves message only from response
|
|
875
|
+
|
|
876
|
+
Args:
|
|
877
|
+
response (dict): Response generated by `self.ask`
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
str: Message extracted
|
|
881
|
+
"""
|
|
882
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
883
|
+
if response.get("type", "") == "metadata":
|
|
884
|
+
return
|
|
885
|
+
|
|
886
|
+
delta: dict = response["choices"][0]["delta"]
|
|
44
887
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
return text
|
|
888
|
+
if not delta:
|
|
889
|
+
return ""
|
|
48
890
|
|
|
49
|
-
|
|
50
|
-
self.
|
|
891
|
+
elif delta.get("function_call"):
|
|
892
|
+
if self.quiet:
|
|
893
|
+
return ""
|
|
51
894
|
|
|
52
|
-
|
|
53
|
-
|
|
895
|
+
function_call: dict = delta["function_call"]
|
|
896
|
+
if function_call.get("name"):
|
|
897
|
+
return function_call["name"]
|
|
898
|
+
elif function_call.get("arguments"):
|
|
899
|
+
return function_call.get("arguments")
|
|
54
900
|
|
|
901
|
+
elif delta.get("metadata"):
|
|
902
|
+
if self.quiet:
|
|
903
|
+
return ""
|
|
904
|
+
return yaml.dump(delta["metadata"])
|
|
905
|
+
|
|
906
|
+
else:
|
|
907
|
+
return (
|
|
908
|
+
response["choices"][0]["delta"].get("content")
|
|
909
|
+
if response["choices"][0].get("finish_reason") is None
|
|
910
|
+
else ""
|
|
911
|
+
)
|
|
55
912
|
@staticmethod
|
|
56
|
-
def
|
|
57
|
-
"""
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
913
|
+
def chat_cli(prompt):
|
|
914
|
+
"""Sends a request to the Phind API and processes the response."""
|
|
915
|
+
phind_search = PhindSearch() # Initialize a PhindSearch instance
|
|
916
|
+
response = phind_search.ask(prompt) # Perform a search with the given prompt
|
|
917
|
+
processed_response = phind_search.get_message(response) # Process the response
|
|
918
|
+
print(processed_response)
|
|
919
|
+
#-------------------------------------------------------yep.com--------------------------------------------------------
|
|
61
920
|
class YepChat:
|
|
62
921
|
def __init__(self, message="hello"):
|
|
63
922
|
self.url = "https://api.yep.com/v1/chat/completions"
|
|
@@ -114,7 +973,7 @@ class YepChat:
|
|
|
114
973
|
response = yep_chat.send_request()
|
|
115
974
|
processed_response = yep_chat.process_response(response)
|
|
116
975
|
print(processed_response)
|
|
117
|
-
|
|
976
|
+
#-------------------------------------------------------youchat--------------------------------------------------------
|
|
118
977
|
class youChat:
|
|
119
978
|
"""
|
|
120
979
|
This class provides methods for generating completions based on prompts.
|
|
@@ -173,7 +1032,7 @@ class youChat:
|
|
|
173
1032
|
you_chat = youChat()
|
|
174
1033
|
completion = you_chat.create(prompt)
|
|
175
1034
|
print(completion)
|
|
176
|
-
|
|
1035
|
+
#-------------------------------------------------------Gemini--------------------------------------------------------
|
|
177
1036
|
class Gemini:
|
|
178
1037
|
def __init__(self):
|
|
179
1038
|
self.messages = []
|
|
@@ -201,7 +1060,7 @@ class Gemini:
|
|
|
201
1060
|
"""Generate completion based on the provided message"""
|
|
202
1061
|
gemini = Gemini()
|
|
203
1062
|
return gemini.chat(message)
|
|
204
|
-
|
|
1063
|
+
#-------------------------------------------------------Prodia-------------------------------------------------------------------------
|
|
205
1064
|
class Prodia:
|
|
206
1065
|
"""
|
|
207
1066
|
This class provides methods for generating images based on prompts.
|
|
@@ -308,12 +1167,17 @@ class Pollinations:
|
|
|
308
1167
|
|
|
309
1168
|
@click.group()
|
|
310
1169
|
def cli():
|
|
1170
|
+
"""Webscout AI command-line interface."""
|
|
311
1171
|
pass
|
|
312
1172
|
|
|
313
1173
|
@cli.command()
|
|
314
|
-
@click.option('--
|
|
315
|
-
def phindsearch(
|
|
316
|
-
PhindSearch.
|
|
1174
|
+
@click.option('--prompt', prompt='Enter your search prompt', help='The prompt to send.')
|
|
1175
|
+
def phindsearch(prompt):
|
|
1176
|
+
"""Perform a search with the given prompt using PhindSearch."""
|
|
1177
|
+
phind_search = PhindSearch() # Initialize a PhindSearch instance
|
|
1178
|
+
response = phind_search.ask(prompt) # Perform a search with the given prompt
|
|
1179
|
+
processed_response = phind_search.get_message(response) # Process the response
|
|
1180
|
+
print(processed_response)
|
|
317
1181
|
|
|
318
1182
|
@cli.command()
|
|
319
1183
|
@click.option('--message', prompt='Enter your message', help='The message to send.')
|
|
@@ -329,15 +1193,49 @@ def youchat(prompt):
|
|
|
329
1193
|
@click.option('--message', prompt='Enter your message', help='The message to send.')
|
|
330
1194
|
def gemini(message):
|
|
331
1195
|
Gemini.chat_cli(message)
|
|
1196
|
+
|
|
332
1197
|
@cli.command()
|
|
333
1198
|
@click.option('--prompt', prompt='Enter your prompt', help='The prompt for generating the image.')
|
|
334
1199
|
def prodia(prompt):
|
|
335
1200
|
"""Generate an image based on the provided prompt."""
|
|
336
1201
|
Prodia.prodia_cli(prompt)
|
|
1202
|
+
|
|
337
1203
|
@cli.command()
|
|
338
1204
|
@click.option('--prompt', prompt='Enter your prompt', help='The prompt for generating the image.')
|
|
339
1205
|
def pollinations(prompt):
|
|
340
1206
|
"""Generate an image based on the provided prompt."""
|
|
341
1207
|
Pollinations.pollinations_cli(prompt)
|
|
1208
|
+
|
|
1209
|
+
@cli.command()
|
|
1210
|
+
@click.option('--prompt', prompt='Enter your prompt', help='The prompt to send.')
|
|
1211
|
+
def blackboxai(prompt):
|
|
1212
|
+
"""Chat with BLACKBOXAI using the provided prompt."""
|
|
1213
|
+
BLACKBOXAI.chat_cli(prompt)
|
|
1214
|
+
|
|
1215
|
+
@cli.command()
|
|
1216
|
+
@click.option('--prompt', prompt='Enter your prompt', help='The prompt to send.')
|
|
1217
|
+
@click.option('--stream', is_flag=True, help='Flag for streaming response.')
|
|
1218
|
+
@click.option('--raw', is_flag=True, help='Stream back raw response as received.')
|
|
1219
|
+
@click.option('--optimizer', type=str, help='Prompt optimizer name.')
|
|
1220
|
+
@click.option('--conversationally', is_flag=True, help='Chat conversationally when using optimizer.')
|
|
1221
|
+
def perplexity(prompt, stream, raw, optimizer, conversationally):
|
|
1222
|
+
"""Chat with PERPLEXITY using the provided prompt."""
|
|
1223
|
+
perplexity_instance = PERPLEXITY() # Initialize a PERPLEXITY instance
|
|
1224
|
+
response = perplexity_instance.ask(prompt, stream, raw, optimizer, conversationally)
|
|
1225
|
+
processed_response = perplexity_instance.get_message(response) # Process the response
|
|
1226
|
+
print(processed_response)
|
|
1227
|
+
|
|
1228
|
+
@cli.command()
|
|
1229
|
+
@click.option('--prompt', prompt='Enter your search prompt', help='The prompt to send.')
|
|
1230
|
+
@click.option('--stream', is_flag=True, help='Flag for streaming response.')
|
|
1231
|
+
def opengpt(prompt, stream):
|
|
1232
|
+
"""Chat with OPENGPT using the provided prompt."""
|
|
1233
|
+
opengpt = OPENGPT(is_conversation=True, max_tokens=8000, timeout=30)
|
|
1234
|
+
if stream:
|
|
1235
|
+
for response in opengpt.chat(prompt, stream=True):
|
|
1236
|
+
print(response)
|
|
1237
|
+
else:
|
|
1238
|
+
response_str = opengpt.chat(prompt)
|
|
1239
|
+
print(response_str)
|
|
342
1240
|
if __name__ == '__main__':
|
|
343
|
-
cli()
|
|
1241
|
+
cli()
|