webscout 5.3__py3-none-any.whl → 5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +8 -12
- webscout/Agents/Onlinesearcher.py +5 -5
- webscout/Agents/functioncall.py +123 -97
- webscout/Local/_version.py +2 -2
- webscout/Provider/Andi.py +1 -21
- webscout/Provider/BasedGPT.py +1 -21
- webscout/Provider/Blackboxai.py +1 -21
- webscout/Provider/Chatify.py +3 -2
- webscout/Provider/Cloudflare.py +1 -22
- webscout/Provider/Cohere.py +2 -23
- webscout/Provider/DARKAI.py +0 -1
- webscout/Provider/Deepinfra.py +2 -16
- webscout/Provider/EDITEE.py +3 -26
- webscout/Provider/Gemini.py +1 -24
- webscout/Provider/Groq.py +0 -2
- webscout/Provider/Koboldai.py +0 -21
- webscout/Provider/Llama.py +4 -21
- webscout/Provider/OLLAMA.py +0 -17
- webscout/Provider/Openai.py +2 -22
- webscout/Provider/Perplexity.py +1 -2
- webscout/Provider/Phind.py +3 -508
- webscout/Provider/Reka.py +4 -21
- webscout/Provider/TTS/streamElements.py +0 -17
- webscout/Provider/TTS/voicepod.py +0 -1
- webscout/Provider/ThinkAnyAI.py +17 -78
- webscout/Provider/Youchat.py +3 -20
- webscout/Provider/__init__.py +12 -5
- webscout/Provider/cleeai.py +212 -0
- webscout/Provider/elmo.py +237 -0
- webscout/Provider/felo_search.py +4 -22
- webscout/Provider/geminiapi.py +198 -0
- webscout/Provider/genspark.py +222 -0
- webscout/Provider/julius.py +3 -20
- webscout/Provider/koala.py +1 -1
- webscout/Provider/lepton.py +194 -0
- webscout/Provider/turboseek.py +4 -21
- webscout/Provider/x0gpt.py +3 -2
- webscout/Provider/xdash.py +2 -22
- webscout/Provider/yep.py +391 -149
- webscout/YTdownloader.py +2 -3
- webscout/version.py +1 -1
- {webscout-5.3.dist-info → webscout-5.4.dist-info}/METADATA +37 -63
- {webscout-5.3.dist-info → webscout-5.4.dist-info}/RECORD +47 -42
- {webscout-5.3.dist-info → webscout-5.4.dist-info}/LICENSE.md +0 -0
- {webscout-5.3.dist-info → webscout-5.4.dist-info}/WHEEL +0 -0
- {webscout-5.3.dist-info → webscout-5.4.dist-info}/entry_points.txt +0 -0
- {webscout-5.3.dist-info → webscout-5.4.dist-info}/top_level.txt +0 -0
webscout/Provider/ThinkAnyAI.py
CHANGED
|
@@ -1,41 +1,21 @@
|
|
|
1
|
-
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
1
|
+
|
|
9
2
|
import requests
|
|
10
|
-
|
|
11
|
-
from
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
|
-
import json
|
|
21
|
-
import yaml
|
|
22
|
-
from ..AIutel import Optimizers
|
|
23
|
-
from ..AIutel import Conversation
|
|
24
|
-
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
-
from ..AIbase import Provider, AsyncProvider
|
|
26
|
-
from Helpingai_T2 import Perplexity
|
|
3
|
+
|
|
4
|
+
from webscout.AIutel import Optimizers
|
|
5
|
+
from webscout.AIutel import Conversation
|
|
6
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
27
8
|
from webscout import exceptions
|
|
28
9
|
from typing import Any, AsyncGenerator, Dict
|
|
29
|
-
|
|
30
|
-
import httpx
|
|
10
|
+
|
|
31
11
|
#------------------------------------ThinkAnyAI------------
|
|
32
12
|
class ThinkAnyAI(Provider):
|
|
33
13
|
def __init__(
|
|
34
14
|
self,
|
|
35
15
|
model: str = "claude-3-haiku",
|
|
36
16
|
locale: str = "en",
|
|
37
|
-
web_search: bool =
|
|
38
|
-
chunk_size: int =
|
|
17
|
+
web_search: bool = True,
|
|
18
|
+
chunk_size: int = 64,
|
|
39
19
|
streaming: bool = True,
|
|
40
20
|
is_conversation: bool = True,
|
|
41
21
|
max_tokens: int = 600,
|
|
@@ -101,54 +81,7 @@ class ThinkAnyAI(Provider):
|
|
|
101
81
|
optimizer: str = None,
|
|
102
82
|
conversationally: bool = False,
|
|
103
83
|
) -> dict | AsyncGenerator:
|
|
104
|
-
"""Chat with AI asynchronously.
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
prompt (str): Prompt to be send.
|
|
108
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
109
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
110
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
|
|
111
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
112
|
-
Returns:
|
|
113
|
-
dict : {}
|
|
114
|
-
```json
|
|
115
|
-
{
|
|
116
|
-
"content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
|
|
117
|
-
"conversation_id": "c_f13f6217f9a997aa",
|
|
118
|
-
"response_id": "r_d3665f95975c368f",
|
|
119
|
-
"factualityQueries": null,
|
|
120
|
-
"textQuery": [
|
|
121
|
-
"hello there",
|
|
122
|
-
1
|
|
123
|
-
],
|
|
124
|
-
"choices": [
|
|
125
|
-
{
|
|
126
|
-
"id": "rc_ea075c9671bfd8cb",
|
|
127
|
-
"content": [
|
|
128
|
-
"General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
|
|
129
|
-
]
|
|
130
|
-
},
|
|
131
|
-
{
|
|
132
|
-
"id": "rc_de6dd3fb793a5402",
|
|
133
|
-
"content": [
|
|
134
|
-
"General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
|
|
135
|
-
]
|
|
136
|
-
},
|
|
137
|
-
{
|
|
138
|
-
"id": "rc_a672ac089caf32db",
|
|
139
|
-
"content": [
|
|
140
|
-
"General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
|
|
141
|
-
]
|
|
142
|
-
}
|
|
143
|
-
],
|
|
144
84
|
|
|
145
|
-
"images": [
|
|
146
|
-
"https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
|
|
147
|
-
]
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
```
|
|
151
|
-
"""
|
|
152
85
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
153
86
|
if optimizer:
|
|
154
87
|
if optimizer in self.__available_optimizers:
|
|
@@ -204,7 +137,7 @@ class ThinkAnyAI(Provider):
|
|
|
204
137
|
conversation_uuid = initiate_conversation(conversation_prompt)
|
|
205
138
|
web_search_result, links = RAG_search(conversation_uuid)
|
|
206
139
|
if not web_search_result:
|
|
207
|
-
print("Failed to generate WEB response. Making normal
|
|
140
|
+
print("Failed to generate WEB response. Making normal Querywebscout..")
|
|
208
141
|
|
|
209
142
|
url = f"{self.base_url}/chat"
|
|
210
143
|
payload = {
|
|
@@ -277,4 +210,10 @@ class ThinkAnyAI(Provider):
|
|
|
277
210
|
str: Message extracted
|
|
278
211
|
"""
|
|
279
212
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
280
|
-
return response["text"]
|
|
213
|
+
return response["text"]
|
|
214
|
+
if __name__ == "__main__":
|
|
215
|
+
from rich import print
|
|
216
|
+
ai = ThinkAnyAI()
|
|
217
|
+
response = ai.chat(input(">>> "))
|
|
218
|
+
for chunk in response:
|
|
219
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Youchat.py
CHANGED
|
@@ -1,32 +1,15 @@
|
|
|
1
|
-
|
|
2
|
-
import uuid
|
|
3
|
-
from selenium import webdriver
|
|
4
|
-
from selenium.webdriver.chrome.options import Options
|
|
5
|
-
from selenium.webdriver.common.by import By
|
|
6
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
9
|
-
import requests
|
|
10
|
-
from requests import get
|
|
1
|
+
|
|
11
2
|
from uuid import uuid4
|
|
12
3
|
from re import findall
|
|
13
|
-
from requests.exceptions import RequestException
|
|
14
|
-
from curl_cffi.requests import get, RequestsError
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
4
|
import json
|
|
21
|
-
|
|
5
|
+
|
|
22
6
|
from webscout.AIutel import Optimizers
|
|
23
7
|
from webscout.AIutel import Conversation
|
|
24
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
26
10
|
from webscout import exceptions
|
|
27
11
|
from typing import Any, AsyncGenerator, Dict
|
|
28
|
-
|
|
29
|
-
import httpx
|
|
12
|
+
|
|
30
13
|
import cloudscraper
|
|
31
14
|
|
|
32
15
|
|
webscout/Provider/__init__.py
CHANGED
|
@@ -15,9 +15,7 @@ from .Perplexity import *
|
|
|
15
15
|
from .Blackboxai import BLACKBOXAI
|
|
16
16
|
from .Blackboxai import AsyncBLACKBOXAI
|
|
17
17
|
from .Phind import PhindSearch
|
|
18
|
-
from .Phind import AsyncPhindSearch
|
|
19
18
|
from .Phind import Phindv2
|
|
20
|
-
from .Phind import AsyncPhindv2
|
|
21
19
|
from .ai4chat import *
|
|
22
20
|
from .Gemini import GEMINI
|
|
23
21
|
from .Poe import POE
|
|
@@ -25,6 +23,7 @@ from .BasedGPT import BasedGPT
|
|
|
25
23
|
from .Deepseek import DeepSeek
|
|
26
24
|
from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
|
|
27
25
|
from .Farfalle import *
|
|
26
|
+
from .cleeai import *
|
|
28
27
|
from .OLLAMA import OLLAMA
|
|
29
28
|
from .Andi import AndiSearch
|
|
30
29
|
from .PizzaGPT import *
|
|
@@ -48,6 +47,10 @@ from .AI21 import *
|
|
|
48
47
|
from .Chatify import *
|
|
49
48
|
from .x0gpt import *
|
|
50
49
|
from .cerebras import *
|
|
50
|
+
from .lepton import *
|
|
51
|
+
from .geminiapi import *
|
|
52
|
+
from .elmo import *
|
|
53
|
+
from .genspark import *
|
|
51
54
|
__all__ = [
|
|
52
55
|
'ThinkAnyAI',
|
|
53
56
|
'Farfalle',
|
|
@@ -64,7 +67,6 @@ __all__ = [
|
|
|
64
67
|
'BLACKBOXAI',
|
|
65
68
|
'AsyncBLACKBOXAI',
|
|
66
69
|
'PhindSearch',
|
|
67
|
-
'AsyncPhindSearch',
|
|
68
70
|
'Felo',
|
|
69
71
|
'GEMINI',
|
|
70
72
|
'POE',
|
|
@@ -74,7 +76,6 @@ __all__ = [
|
|
|
74
76
|
'VLM',
|
|
75
77
|
'AsyncDeepInfra',
|
|
76
78
|
'AI4Chat',
|
|
77
|
-
'AsyncPhindv2',
|
|
78
79
|
'Phindv2',
|
|
79
80
|
'OLLAMA',
|
|
80
81
|
'AndiSearch',
|
|
@@ -98,5 +99,11 @@ __all__ = [
|
|
|
98
99
|
'AI21',
|
|
99
100
|
'Chatify',
|
|
100
101
|
'X0GPT',
|
|
101
|
-
'Cerebras'
|
|
102
|
+
'Cerebras',
|
|
103
|
+
'Lepton',
|
|
104
|
+
'GEMINIAPI',
|
|
105
|
+
'Cleeai',
|
|
106
|
+
'Elmo',
|
|
107
|
+
'Genspark'
|
|
108
|
+
|
|
102
109
|
]
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
|
|
10
|
+
class Cleeai(Provider):
|
|
11
|
+
"""
|
|
12
|
+
A class to interact with the Cleeai.com API.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
is_conversation: bool = True,
|
|
18
|
+
max_tokens: int = 600,
|
|
19
|
+
timeout: int = 30,
|
|
20
|
+
intro: str = None,
|
|
21
|
+
filepath: str = None,
|
|
22
|
+
update_file: bool = True,
|
|
23
|
+
proxies: dict = {},
|
|
24
|
+
history_offset: int = 10250,
|
|
25
|
+
act: str = None,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""Instantiates Cleeai
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
31
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
32
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
33
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
34
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
35
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
36
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
37
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
38
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
39
|
+
"""
|
|
40
|
+
self.session = requests.Session()
|
|
41
|
+
self.is_conversation = is_conversation
|
|
42
|
+
self.max_tokens_to_sample = max_tokens
|
|
43
|
+
self.api_endpoint = "https://qna-api.cleeai.com/open_research"
|
|
44
|
+
self.stream_chunk_size = 64
|
|
45
|
+
self.timeout = timeout
|
|
46
|
+
self.last_response = {}
|
|
47
|
+
self.headers = {
|
|
48
|
+
"accept": "*/*",
|
|
49
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
50
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
51
|
+
"content-type": "application/json",
|
|
52
|
+
"dnt": "1",
|
|
53
|
+
"origin": "https://www.cleeai.com",
|
|
54
|
+
"priority": "u=1, i",
|
|
55
|
+
"referer": "https://www.cleeai.com/",
|
|
56
|
+
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
57
|
+
"sec-ch-ua-mobile": "?0",
|
|
58
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
59
|
+
"sec-fetch-dest": "empty",
|
|
60
|
+
"sec-fetch-mode": "cors",
|
|
61
|
+
"sec-fetch-site": "same-site",
|
|
62
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
self.__available_optimizers = (
|
|
66
|
+
method
|
|
67
|
+
for method in dir(Optimizers)
|
|
68
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
69
|
+
)
|
|
70
|
+
self.session.headers.update(self.headers)
|
|
71
|
+
Conversation.intro = (
|
|
72
|
+
AwesomePrompts().get_act(
|
|
73
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
74
|
+
)
|
|
75
|
+
if act
|
|
76
|
+
else intro or Conversation.intro
|
|
77
|
+
)
|
|
78
|
+
self.conversation = Conversation(
|
|
79
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
80
|
+
)
|
|
81
|
+
self.conversation.history_offset = history_offset
|
|
82
|
+
self.session.proxies = proxies
|
|
83
|
+
|
|
84
|
+
def ask(
|
|
85
|
+
self,
|
|
86
|
+
prompt: str,
|
|
87
|
+
stream: bool = False,
|
|
88
|
+
raw: bool = False,
|
|
89
|
+
optimizer: str = None,
|
|
90
|
+
conversationally: bool = False,
|
|
91
|
+
) -> dict:
|
|
92
|
+
"""Chat with AI
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
prompt (str): Prompt to be send.
|
|
96
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
97
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
98
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
99
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
100
|
+
Returns:
|
|
101
|
+
dict : {}
|
|
102
|
+
```json
|
|
103
|
+
{
|
|
104
|
+
"text" : "How may I assist you today?"
|
|
105
|
+
}
|
|
106
|
+
```
|
|
107
|
+
"""
|
|
108
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
+
if optimizer:
|
|
110
|
+
if optimizer in self.__available_optimizers:
|
|
111
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
112
|
+
conversation_prompt if conversationally else prompt
|
|
113
|
+
)
|
|
114
|
+
else:
|
|
115
|
+
raise Exception(
|
|
116
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
payload = {
|
|
120
|
+
"data": {
|
|
121
|
+
"question": conversation_prompt,
|
|
122
|
+
"question_id": 69237,
|
|
123
|
+
"query_id": uuid4().hex,
|
|
124
|
+
"source_list": [],
|
|
125
|
+
"followup_qas": [],
|
|
126
|
+
"with_upload": True,
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
def for_stream():
|
|
131
|
+
response = self.session.post(
|
|
132
|
+
self.api_endpoint,
|
|
133
|
+
headers=self.headers,
|
|
134
|
+
json=payload,
|
|
135
|
+
stream=True,
|
|
136
|
+
timeout=self.timeout,
|
|
137
|
+
)
|
|
138
|
+
if not response.ok:
|
|
139
|
+
raise Exception(
|
|
140
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
141
|
+
)
|
|
142
|
+
full_response = ''
|
|
143
|
+
for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
|
|
144
|
+
full_response += chunk.decode('utf-8')
|
|
145
|
+
yield chunk.decode('utf-8') if raw else dict(text=full_response)
|
|
146
|
+
|
|
147
|
+
self.last_response.update(dict(text=full_response))
|
|
148
|
+
self.conversation.update_chat_history(
|
|
149
|
+
prompt, self.get_message(self.last_response)
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
def for_non_stream():
|
|
153
|
+
for _ in for_stream():
|
|
154
|
+
pass
|
|
155
|
+
return self.last_response
|
|
156
|
+
|
|
157
|
+
return for_stream() if stream else for_non_stream()
|
|
158
|
+
|
|
159
|
+
def chat(
|
|
160
|
+
self,
|
|
161
|
+
prompt: str,
|
|
162
|
+
stream: bool = False,
|
|
163
|
+
optimizer: str = None,
|
|
164
|
+
conversationally: bool = False,
|
|
165
|
+
) -> str:
|
|
166
|
+
"""Generate response `str`
|
|
167
|
+
Args:
|
|
168
|
+
prompt (str): Prompt to be send.
|
|
169
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
170
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
171
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
172
|
+
Returns:
|
|
173
|
+
str: Response generated
|
|
174
|
+
"""
|
|
175
|
+
|
|
176
|
+
def for_stream():
|
|
177
|
+
for response in self.ask(
|
|
178
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
179
|
+
):
|
|
180
|
+
yield self.get_message(response)
|
|
181
|
+
|
|
182
|
+
def for_non_stream():
|
|
183
|
+
return self.get_message(
|
|
184
|
+
self.ask(
|
|
185
|
+
prompt,
|
|
186
|
+
False,
|
|
187
|
+
optimizer=optimizer,
|
|
188
|
+
conversationally=conversationally,
|
|
189
|
+
)
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
return for_stream() if stream else for_non_stream()
|
|
193
|
+
|
|
194
|
+
def get_message(self, response: dict) -> str:
|
|
195
|
+
"""Retrieves message only from response
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
response (dict): Response generated by `self.ask`
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
str: Message extracted
|
|
202
|
+
"""
|
|
203
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
204
|
+
return response["text"]
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
if __name__ == "__main__":
|
|
208
|
+
from rich import print
|
|
209
|
+
ai = Cleeai()
|
|
210
|
+
response = ai.chat(input(">>> "))
|
|
211
|
+
for chunk in response:
|
|
212
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import textwrap
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Elmo(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Elmo.chat API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
is_conversation: bool = True,
|
|
19
|
+
max_tokens: int = 600,
|
|
20
|
+
timeout: int = 30,
|
|
21
|
+
intro: str = None,
|
|
22
|
+
filepath: str = None,
|
|
23
|
+
update_file: bool = True,
|
|
24
|
+
proxies: dict = {},
|
|
25
|
+
history_offset: int = 10250,
|
|
26
|
+
act: str = None,
|
|
27
|
+
system_prompt: str = "You are a helpful AI assistant. Provide clear, concise, and well-structured information. Organize your responses into paragraphs for better readability.",
|
|
28
|
+
|
|
29
|
+
) -> None:
|
|
30
|
+
"""Instantiates Elmo
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
34
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
35
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
36
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
37
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
38
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
39
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
40
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
41
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
42
|
+
system_prompt (str, optional): System prompt for Elmo. Defaults to the provided string.
|
|
43
|
+
web_search (bool, optional): Enables web search mode when True. Defaults to False.
|
|
44
|
+
"""
|
|
45
|
+
self.session = requests.Session()
|
|
46
|
+
self.is_conversation = is_conversation
|
|
47
|
+
self.max_tokens_to_sample = max_tokens
|
|
48
|
+
self.api_endpoint = "https://www.elmo.chat/api/v1/prompt"
|
|
49
|
+
self.stream_chunk_size = 64
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.last_response = {}
|
|
52
|
+
self.system_prompt = system_prompt
|
|
53
|
+
self.headers = {
|
|
54
|
+
"accept": "*/*",
|
|
55
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
56
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
57
|
+
"content-length": "763",
|
|
58
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
59
|
+
"dnt": "1",
|
|
60
|
+
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
61
|
+
"priority": "u=1, i",
|
|
62
|
+
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
63
|
+
"sec-ch-ua-mobile": "?0",
|
|
64
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
65
|
+
"sec-fetch-dest": "empty",
|
|
66
|
+
"sec-fetch-mode": "cors",
|
|
67
|
+
"sec-fetch-site": "cross-site",
|
|
68
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
self.__available_optimizers = (
|
|
72
|
+
method
|
|
73
|
+
for method in dir(Optimizers)
|
|
74
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
75
|
+
)
|
|
76
|
+
self.session.headers.update(self.headers)
|
|
77
|
+
Conversation.intro = (
|
|
78
|
+
AwesomePrompts().get_act(
|
|
79
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
80
|
+
)
|
|
81
|
+
if act
|
|
82
|
+
else intro or Conversation.intro
|
|
83
|
+
)
|
|
84
|
+
self.conversation = Conversation(
|
|
85
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
86
|
+
)
|
|
87
|
+
self.conversation.history_offset = history_offset
|
|
88
|
+
self.session.proxies = proxies
|
|
89
|
+
|
|
90
|
+
def ask(
|
|
91
|
+
self,
|
|
92
|
+
prompt: str,
|
|
93
|
+
stream: bool = False,
|
|
94
|
+
raw: bool = False,
|
|
95
|
+
optimizer: str = None,
|
|
96
|
+
conversationally: bool = False,
|
|
97
|
+
) -> dict:
|
|
98
|
+
"""Chat with AI
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
prompt (str): Prompt to be send.
|
|
102
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
103
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
104
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
105
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
106
|
+
Returns:
|
|
107
|
+
dict : {}
|
|
108
|
+
```json
|
|
109
|
+
{
|
|
110
|
+
"text" : "How may I assist you today?"
|
|
111
|
+
}
|
|
112
|
+
```
|
|
113
|
+
"""
|
|
114
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
115
|
+
if optimizer:
|
|
116
|
+
if optimizer in self.__available_optimizers:
|
|
117
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
118
|
+
conversation_prompt if conversationally else prompt
|
|
119
|
+
)
|
|
120
|
+
else:
|
|
121
|
+
raise Exception(
|
|
122
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
payload = {
|
|
127
|
+
"metadata": {
|
|
128
|
+
"system": {"language": "en-US"},
|
|
129
|
+
"website": {
|
|
130
|
+
"url": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm/options.html",
|
|
131
|
+
"origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
|
|
132
|
+
"title": "Elmo Chat - Your AI Web Copilot",
|
|
133
|
+
"xpathIndexLength": 0,
|
|
134
|
+
"favicons": [],
|
|
135
|
+
"language": "en",
|
|
136
|
+
"content": "",
|
|
137
|
+
"type": "html",
|
|
138
|
+
"selection": "",
|
|
139
|
+
"hash": "d41d8cd98f00b204e9800998ecf8427e",
|
|
140
|
+
},
|
|
141
|
+
},
|
|
142
|
+
"regenerate": True,
|
|
143
|
+
"conversation": [
|
|
144
|
+
{"role": "system", "content": self.system_prompt},
|
|
145
|
+
{"role": "user", "content": conversation_prompt},
|
|
146
|
+
],
|
|
147
|
+
"enableCache": False,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
def for_stream():
|
|
151
|
+
response = self.session.post(
|
|
152
|
+
self.api_endpoint,
|
|
153
|
+
headers=self.headers,
|
|
154
|
+
json=payload,
|
|
155
|
+
stream=True,
|
|
156
|
+
timeout=self.timeout,
|
|
157
|
+
)
|
|
158
|
+
if not response.ok:
|
|
159
|
+
raise Exception(
|
|
160
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
161
|
+
)
|
|
162
|
+
full_response = ""
|
|
163
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
164
|
+
if line:
|
|
165
|
+
if line.startswith('0:'):
|
|
166
|
+
chunk = line.split(':"')[1].strip('"')
|
|
167
|
+
formatted_output = (
|
|
168
|
+
chunk.replace("\\n", "\n").replace("\\n\\n", "\n\n")
|
|
169
|
+
)
|
|
170
|
+
full_response += formatted_output
|
|
171
|
+
self.last_response.update(dict(text=full_response))
|
|
172
|
+
yield formatted_output if raw else dict(text=full_response)
|
|
173
|
+
self.conversation.update_chat_history(
|
|
174
|
+
prompt, self.get_message(self.last_response)
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
def for_non_stream():
|
|
178
|
+
for _ in for_stream():
|
|
179
|
+
pass
|
|
180
|
+
return self.last_response
|
|
181
|
+
|
|
182
|
+
return for_stream() if stream else for_non_stream()
|
|
183
|
+
|
|
184
|
+
def chat(
|
|
185
|
+
self,
|
|
186
|
+
prompt: str,
|
|
187
|
+
stream: bool = False,
|
|
188
|
+
optimizer: str = None,
|
|
189
|
+
conversationally: bool = False,
|
|
190
|
+
) -> str:
|
|
191
|
+
"""Generate response `str`
|
|
192
|
+
Args:
|
|
193
|
+
prompt (str): Prompt to be send.
|
|
194
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
195
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
196
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
197
|
+
Returns:
|
|
198
|
+
str: Response generated
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
def for_stream():
|
|
202
|
+
for response in self.ask(
|
|
203
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
204
|
+
):
|
|
205
|
+
yield self.get_message(response)
|
|
206
|
+
|
|
207
|
+
def for_non_stream():
|
|
208
|
+
return self.get_message(
|
|
209
|
+
self.ask(
|
|
210
|
+
prompt,
|
|
211
|
+
False,
|
|
212
|
+
optimizer=optimizer,
|
|
213
|
+
conversationally=conversationally,
|
|
214
|
+
)
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return for_stream() if stream else for_non_stream()
|
|
218
|
+
|
|
219
|
+
def get_message(self, response: dict) -> str:
|
|
220
|
+
"""Retrieves message only from response
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
response (dict): Response generated by `self.ask`
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
str: Message extracted
|
|
227
|
+
"""
|
|
228
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
229
|
+
return response["text"]
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
if __name__ == "__main__":
|
|
233
|
+
from rich import print
|
|
234
|
+
ai = Elmo()
|
|
235
|
+
response = ai.chat(input(">>> "))
|
|
236
|
+
for chunk in response:
|
|
237
|
+
print(chunk, end="", flush=True)
|