webscout 4.6__py3-none-any.whl → 4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/functioncall.py +97 -37
- webscout/Bard.py +365 -0
- webscout/Local/_version.py +1 -1
- webscout/Provider/Andi.py +7 -1
- webscout/Provider/BasedGPT.py +11 -5
- webscout/Provider/Berlin4h.py +11 -5
- webscout/Provider/Blackboxai.py +10 -4
- webscout/Provider/Cohere.py +11 -5
- webscout/Provider/DARKAI.py +25 -7
- webscout/Provider/Deepinfra.py +2 -1
- webscout/Provider/Deepseek.py +25 -9
- webscout/Provider/DiscordRocks.py +389 -0
- webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Groq.py +244 -110
- webscout/Provider/Llama.py +13 -5
- webscout/Provider/Llama3.py +15 -2
- webscout/Provider/OLLAMA.py +8 -7
- webscout/Provider/Perplexity.py +422 -52
- webscout/Provider/Phind.py +6 -5
- webscout/Provider/PizzaGPT.py +7 -1
- webscout/Provider/__init__.py +15 -31
- webscout/Provider/ai4chat.py +193 -0
- webscout/Provider/koala.py +11 -5
- webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
- webscout/Provider/meta.py +779 -0
- webscout/exceptions.py +6 -0
- webscout/version.py +1 -1
- webscout/webai.py +2 -64
- webscout/webscout_search.py +1 -1
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/METADATA +254 -297
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/RECORD +36 -40
- webscout/Provider/FreeGemini.py +0 -169
- webscout/Provider/Geminiflash.py +0 -152
- webscout/Provider/Geminipro.py +0 -152
- webscout/Provider/Leo.py +0 -469
- webscout/Provider/OpenGPT.py +0 -867
- webscout/Provider/Xjai.py +0 -230
- webscout/Provider/Yepchat.py +0 -478
- webscout/Provider/Youchat.py +0 -225
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
webscout/Provider/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# webscout/providers/__init__.py
|
|
2
2
|
|
|
3
3
|
from .ThinkAnyAI import ThinkAnyAI
|
|
4
|
-
|
|
4
|
+
|
|
5
5
|
from .Llama import LLAMA
|
|
6
6
|
from .Cohere import Cohere
|
|
7
7
|
from .Reka import REKA
|
|
@@ -9,44 +9,36 @@ from .Groq import GROQ
|
|
|
9
9
|
from .Groq import AsyncGROQ
|
|
10
10
|
from .Openai import OPENAI
|
|
11
11
|
from .Openai import AsyncOPENAI
|
|
12
|
-
from .Leo import LEO
|
|
13
|
-
from .Leo import AsyncLEO
|
|
14
12
|
from .Koboldai import KOBOLDAI
|
|
15
13
|
from .Koboldai import AsyncKOBOLDAI
|
|
16
|
-
from .
|
|
17
|
-
from .OpenGPT import OPENGPTv2
|
|
18
|
-
from .OpenGPT import AsyncOPENGPT
|
|
19
|
-
from .Perplexity import PERPLEXITY
|
|
14
|
+
from .Perplexity import *
|
|
20
15
|
from .Blackboxai import BLACKBOXAI
|
|
21
16
|
from .Blackboxai import AsyncBLACKBOXAI
|
|
22
17
|
from .Phind import PhindSearch
|
|
23
18
|
from .Phind import AsyncPhindSearch
|
|
24
19
|
from .Phind import Phindv2
|
|
25
20
|
from .Phind import AsyncPhindv2
|
|
26
|
-
from .
|
|
27
|
-
from .Yepchat import AsyncYEPCHAT
|
|
28
|
-
from .Youchat import YouChat
|
|
21
|
+
from .ai4chat import *
|
|
29
22
|
from .Gemini import GEMINI
|
|
30
23
|
from .Berlin4h import Berlin4h
|
|
31
|
-
from .ChatGPTUK import ChatGPTUK
|
|
32
24
|
from .Poe import POE
|
|
33
25
|
from .BasedGPT import BasedGPT
|
|
34
26
|
from .Deepseek import DeepSeek
|
|
35
27
|
from .Deepinfra import DeepInfra, VLM, AsyncDeepInfra
|
|
36
|
-
from .
|
|
37
|
-
from .Geminipro import GEMINIPRO
|
|
38
|
-
from .Geminiflash import GEMINIFLASH
|
|
28
|
+
from .Farfalle import *
|
|
39
29
|
from .OLLAMA import OLLAMA
|
|
40
|
-
from .FreeGemini import FreeGemini
|
|
41
30
|
from .Andi import AndiSearch
|
|
42
31
|
from .PizzaGPT import *
|
|
43
32
|
from .Llama3 import *
|
|
44
33
|
from .DARKAI import *
|
|
45
34
|
from .koala import *
|
|
46
35
|
from .RUBIKSAI import *
|
|
36
|
+
from .meta import *
|
|
37
|
+
from .liaobots import *
|
|
38
|
+
from .DiscordRocks import *
|
|
47
39
|
__all__ = [
|
|
48
40
|
'ThinkAnyAI',
|
|
49
|
-
'
|
|
41
|
+
'Farfalle',
|
|
50
42
|
'LLAMA',
|
|
51
43
|
'Cohere',
|
|
52
44
|
'REKA',
|
|
@@ -54,41 +46,33 @@ __all__ = [
|
|
|
54
46
|
'AsyncGROQ',
|
|
55
47
|
'OPENAI',
|
|
56
48
|
'AsyncOPENAI',
|
|
57
|
-
'LEO',
|
|
58
|
-
'AsyncLEO',
|
|
59
49
|
'KOBOLDAI',
|
|
60
50
|
'AsyncKOBOLDAI',
|
|
61
|
-
'
|
|
62
|
-
'AsyncOPENGPT',
|
|
63
|
-
'PERPLEXITY',
|
|
51
|
+
'Perplexity',
|
|
64
52
|
'BLACKBOXAI',
|
|
65
53
|
'AsyncBLACKBOXAI',
|
|
66
54
|
'PhindSearch',
|
|
67
55
|
'AsyncPhindSearch',
|
|
68
|
-
|
|
69
|
-
'AsyncYEPCHAT',
|
|
70
|
-
'YouChat',
|
|
56
|
+
|
|
71
57
|
'GEMINI',
|
|
72
58
|
'Berlin4h',
|
|
73
|
-
'ChatGPTUK',
|
|
74
59
|
'POE',
|
|
75
60
|
'BasedGPT',
|
|
76
61
|
'DeepSeek',
|
|
77
62
|
'DeepInfra',
|
|
78
63
|
'VLM',
|
|
79
64
|
'AsyncDeepInfra',
|
|
80
|
-
'
|
|
65
|
+
'AI4Chat',
|
|
81
66
|
'AsyncPhindv2',
|
|
82
67
|
'Phindv2',
|
|
83
|
-
'OPENGPTv2',
|
|
84
|
-
'GEMINIPRO',
|
|
85
|
-
'GEMINIFLASH',
|
|
86
68
|
'OLLAMA',
|
|
87
|
-
'FreeGemini',
|
|
88
69
|
'AndiSearch',
|
|
89
70
|
'PIZZAGPT',
|
|
90
71
|
'LLAMA3',
|
|
91
72
|
'DARKAI',
|
|
92
73
|
'KOALA',
|
|
93
|
-
'RUBIKSAI'
|
|
74
|
+
'RUBIKSAI',
|
|
75
|
+
'Meta',
|
|
76
|
+
'LiaoBots',
|
|
77
|
+
'DiscordRocks',
|
|
94
78
|
]
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import html
|
|
4
|
+
from re import sub
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
|
|
12
|
+
class AI4Chat(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the AI4Chat API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 600,
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
29
|
+
) -> None:
|
|
30
|
+
"""
|
|
31
|
+
Initializes the AI4Chat API with given parameters.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
35
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
+
system_prompt (str, optional): System prompt to guide the AI's behavior. Defaults to "You are a helpful and informative AI assistant.".
|
|
44
|
+
"""
|
|
45
|
+
self.session = requests.Session()
|
|
46
|
+
self.is_conversation = is_conversation
|
|
47
|
+
self.max_tokens_to_sample = max_tokens
|
|
48
|
+
self.api_endpoint = "https://www.ai4chat.co/generate-response"
|
|
49
|
+
self.timeout = timeout
|
|
50
|
+
self.last_response = {}
|
|
51
|
+
self.headers = {
|
|
52
|
+
"authority": "www.ai4chat.co",
|
|
53
|
+
"method": "POST",
|
|
54
|
+
"path": "/generate-response",
|
|
55
|
+
"scheme": "https",
|
|
56
|
+
"accept": "*/*",
|
|
57
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
58
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
59
|
+
"content-type": "application/json",
|
|
60
|
+
"cookie": "messageCount=1",
|
|
61
|
+
"dnt": "1",
|
|
62
|
+
"origin": "https://www.ai4chat.co",
|
|
63
|
+
"priority": "u=1, i",
|
|
64
|
+
"referer": "https://www.ai4chat.co/gpt/talkdirtytome",
|
|
65
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
66
|
+
"sec-ch-ua-mobile": "?0",
|
|
67
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
68
|
+
"sec-fetch-dest": "empty",
|
|
69
|
+
"sec-fetch-mode": "cors",
|
|
70
|
+
"sec-fetch-site": "same-origin",
|
|
71
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0"
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
self.__available_optimizers = (
|
|
75
|
+
method
|
|
76
|
+
for method in dir(Optimizers)
|
|
77
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
78
|
+
)
|
|
79
|
+
self.session.headers.update(self.headers)
|
|
80
|
+
Conversation.intro = (
|
|
81
|
+
AwesomePrompts().get_act(
|
|
82
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
83
|
+
)
|
|
84
|
+
if act
|
|
85
|
+
else intro or Conversation.intro
|
|
86
|
+
)
|
|
87
|
+
self.conversation = Conversation(
|
|
88
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
89
|
+
)
|
|
90
|
+
self.conversation.history_offset = history_offset
|
|
91
|
+
self.session.proxies = proxies
|
|
92
|
+
self.system_prompt = system_prompt
|
|
93
|
+
|
|
94
|
+
def ask(
|
|
95
|
+
self,
|
|
96
|
+
prompt: str,
|
|
97
|
+
stream: bool = False, # Streaming is not supported by AI4Chat
|
|
98
|
+
raw: bool = False,
|
|
99
|
+
optimizer: str = None,
|
|
100
|
+
conversationally: bool = False,
|
|
101
|
+
) -> Dict[str, Any]:
|
|
102
|
+
"""
|
|
103
|
+
Sends a prompt to the AI4Chat API and returns the response.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
prompt: The text prompt to generate text from.
|
|
107
|
+
stream (bool, optional): Not used (AI4Chat doesn't support streaming).
|
|
108
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
109
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
110
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
dict: A dictionary containing the AI's response.
|
|
114
|
+
"""
|
|
115
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
116
|
+
if optimizer:
|
|
117
|
+
if optimizer in self.__available_optimizers:
|
|
118
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
119
|
+
conversation_prompt if conversationally else prompt
|
|
120
|
+
)
|
|
121
|
+
else:
|
|
122
|
+
raise Exception(
|
|
123
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
payload = {
|
|
127
|
+
"messages": [
|
|
128
|
+
{"role": "system", "content": self.system_prompt},
|
|
129
|
+
{"role": "user", "content": conversation_prompt}
|
|
130
|
+
]
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
|
|
134
|
+
if not response.ok:
|
|
135
|
+
raise Exception(f"Failed to generate response: {response.status_code} - {response.reason}")
|
|
136
|
+
|
|
137
|
+
response_data = response.json()
|
|
138
|
+
message_content = response_data.get('message', 'No message found')
|
|
139
|
+
|
|
140
|
+
# Decode HTML entities and remove HTML tags
|
|
141
|
+
decoded_message = html.unescape(message_content)
|
|
142
|
+
cleaned_text = sub('<[^<]+?>', '', decoded_message)
|
|
143
|
+
|
|
144
|
+
self.last_response.update(dict(text=cleaned_text))
|
|
145
|
+
self.conversation.update_chat_history(prompt, cleaned_text)
|
|
146
|
+
return self.last_response
|
|
147
|
+
|
|
148
|
+
def chat(
|
|
149
|
+
self,
|
|
150
|
+
prompt: str,
|
|
151
|
+
stream: bool = False, # Streaming is not supported by AI4Chat
|
|
152
|
+
optimizer: str = None,
|
|
153
|
+
conversationally: bool = False,
|
|
154
|
+
) -> str:
|
|
155
|
+
"""
|
|
156
|
+
Generates a response from the AI4Chat API.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
prompt (str): The prompt to send to the AI.
|
|
160
|
+
stream (bool, optional): Not used (AI4Chat doesn't support streaming).
|
|
161
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
162
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
str: The response generated by the AI.
|
|
166
|
+
"""
|
|
167
|
+
return self.get_message(
|
|
168
|
+
self.ask(
|
|
169
|
+
prompt,
|
|
170
|
+
optimizer=optimizer,
|
|
171
|
+
conversationally=conversationally,
|
|
172
|
+
)
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
def get_message(self, response: dict) -> str:
|
|
176
|
+
"""Retrieves message only from response
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
response (dict): Response generated by `self.ask`
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
str: Message extracted
|
|
183
|
+
"""
|
|
184
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
185
|
+
return response["text"]
|
|
186
|
+
if __name__ == "__main__":
|
|
187
|
+
from rich import print
|
|
188
|
+
|
|
189
|
+
ai = AI4Chat()
|
|
190
|
+
# Stream the response
|
|
191
|
+
response = ai.chat(input(">>> "))
|
|
192
|
+
for chunk in response:
|
|
193
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/koala.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
3
|
from typing import Any, Dict, Optional
|
|
4
|
-
from
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
4
|
+
from webscout.AIutel import Optimizers
|
|
5
|
+
from webscout.AIutel import Conversation
|
|
6
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
9
|
|
|
10
10
|
class KOALA(Provider):
|
|
@@ -236,4 +236,10 @@ class KOALA(Provider):
|
|
|
236
236
|
str: Message extracted
|
|
237
237
|
"""
|
|
238
238
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
239
|
-
return response["text"]
|
|
239
|
+
return response["text"]
|
|
240
|
+
if __name__ == '__main__':
|
|
241
|
+
from rich import print
|
|
242
|
+
ai = KOALA()
|
|
243
|
+
response = ai.chat("tell me about india")
|
|
244
|
+
for chunk in response:
|
|
245
|
+
print(chunk, end="", flush=True)
|
|
@@ -1,47 +1,42 @@
|
|
|
1
|
-
import
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
2
3
|
import uuid
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
-
import click
|
|
4
|
+
import gzip
|
|
5
|
+
import zlib
|
|
6
|
+
from typing import Any, Dict, Generator, Union
|
|
7
|
+
|
|
9
8
|
import requests
|
|
10
|
-
|
|
11
|
-
from
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
import g4f
|
|
16
|
-
from random import randint
|
|
17
|
-
from PIL import Image
|
|
18
|
-
import io
|
|
19
|
-
import re
|
|
20
|
-
import json
|
|
21
|
-
import yaml
|
|
22
|
-
from ..AIutel import Optimizers
|
|
23
|
-
from ..AIutel import Conversation
|
|
24
|
-
from ..AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
-
from ..AIbase import Provider, AsyncProvider
|
|
26
|
-
from Helpingai_T2 import Perplexity
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers
|
|
11
|
+
from webscout.AIutel import Conversation
|
|
12
|
+
from webscout.AIutel import AwesomePrompts
|
|
13
|
+
from webscout.AIbase import Provider
|
|
27
14
|
from webscout import exceptions
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
import httpx
|
|
31
|
-
class VTLchat(Provider):
|
|
15
|
+
|
|
16
|
+
class LiaoBots(Provider):
|
|
32
17
|
"""
|
|
33
|
-
A class to interact with the
|
|
18
|
+
A class to interact with the LiaoBots API.
|
|
34
19
|
"""
|
|
35
20
|
|
|
21
|
+
# List of available models
|
|
22
|
+
AVAILABLE_MODELS = [
|
|
23
|
+
"gpt-4o-mini",
|
|
24
|
+
"gpt-4o-free",
|
|
25
|
+
"gpt-4o-mini-free",
|
|
26
|
+
"gpt-4-turbo-2024-04-09",
|
|
27
|
+
"gpt-4o",
|
|
28
|
+
"gpt-4-0613",
|
|
29
|
+
"claude-3-5-sonnet-20240620",
|
|
30
|
+
"gemini-1.5-pro-latest",
|
|
31
|
+
"gemini-1.5-flash-latest"
|
|
32
|
+
]
|
|
33
|
+
|
|
36
34
|
def __init__(
|
|
37
35
|
self,
|
|
36
|
+
auth_code: str = "G3USRn7M5zsXn",
|
|
37
|
+
cookie: str = "gkp2=pevIjZCYj8wMcrWPEAq6",
|
|
38
38
|
is_conversation: bool = True,
|
|
39
39
|
max_tokens: int = 600,
|
|
40
|
-
temperature: float = 0.9,
|
|
41
|
-
presence_penalty: float = 0,
|
|
42
|
-
frequency_penalty: float = 0,
|
|
43
|
-
top_p: float = 1,
|
|
44
|
-
model: str = "gpt-3.5-turbo",
|
|
45
40
|
timeout: int = 30,
|
|
46
41
|
intro: str = None,
|
|
47
42
|
filepath: str = None,
|
|
@@ -49,19 +44,17 @@ class VTLchat(Provider):
|
|
|
49
44
|
proxies: dict = {},
|
|
50
45
|
history_offset: int = 10250,
|
|
51
46
|
act: str = None,
|
|
52
|
-
|
|
47
|
+
model: str = "claude-3-5-sonnet-20240620",
|
|
48
|
+
system_prompt: str = "You are a helpful assistant."
|
|
53
49
|
) -> None:
|
|
54
50
|
"""
|
|
55
|
-
Initializes the
|
|
51
|
+
Initializes the LiaoBots API with given parameters.
|
|
56
52
|
|
|
57
53
|
Args:
|
|
54
|
+
auth_code (str): The auth code for authentication.
|
|
55
|
+
cookie (str): The cookie for authentication.
|
|
58
56
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
59
57
|
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
60
|
-
temperature (float, optional): Controls randomness. Default is 0.9.
|
|
61
|
-
presence_penalty (float, optional): Controls repetition. Default is 0.
|
|
62
|
-
frequency_penalty (float, optional): Controls frequency of token usage. Default is 0.
|
|
63
|
-
top_p (float, optional): Controls diversity. Default is 1.
|
|
64
|
-
model (str, optional): The AI model to use. Default is 'gpt-3.5-turbo'.
|
|
65
58
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
66
59
|
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
67
60
|
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
@@ -69,23 +62,44 @@ class VTLchat(Provider):
|
|
|
69
62
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
70
63
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
71
64
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
72
|
-
|
|
65
|
+
model (str, optional): AI model to use for text generation. Defaults to "claude-3-5-sonnet-20240620".
|
|
66
|
+
system_prompt (str, optional): System prompt for LiaoBots. Defaults to "You are a helpful assistant.".
|
|
73
67
|
"""
|
|
68
|
+
|
|
69
|
+
# Check if the chosen model is available
|
|
70
|
+
if model not in self.AVAILABLE_MODELS:
|
|
71
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
72
|
+
|
|
73
|
+
self.auth_code = auth_code
|
|
74
|
+
self.cookie = cookie
|
|
75
|
+
self.api_endpoint = "https://liaobots.work/api/chat"
|
|
76
|
+
self.model = model
|
|
77
|
+
self.system_prompt = system_prompt
|
|
74
78
|
self.session = requests.Session()
|
|
75
79
|
self.is_conversation = is_conversation
|
|
76
80
|
self.max_tokens_to_sample = max_tokens
|
|
77
|
-
self.api_endpoint = "https://vtlchat-g1.vercel.app/api/openai/v1/chat/completions"
|
|
78
81
|
self.stream_chunk_size = 64
|
|
79
82
|
self.timeout = timeout
|
|
80
83
|
self.last_response = {}
|
|
81
|
-
self.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
84
|
+
self.headers = {
|
|
85
|
+
"accept": "*/*",
|
|
86
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
87
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
88
|
+
"content-type": "application/json",
|
|
89
|
+
"cookie": self.cookie,
|
|
90
|
+
"dnt": "1",
|
|
91
|
+
"origin": "https://liaobots.work",
|
|
92
|
+
"priority": "u=1, i",
|
|
93
|
+
"referer": "https://liaobots.work/en",
|
|
94
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
95
|
+
"sec-ch-ua-mobile": "?0",
|
|
96
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
97
|
+
"sec-fetch-dest": "empty",
|
|
98
|
+
"sec-fetch-mode": "cors",
|
|
99
|
+
"sec-fetch-site": "same-origin",
|
|
100
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
101
|
+
"x-Auth-Code": self.auth_code,
|
|
102
|
+
}
|
|
89
103
|
self.__available_optimizers = (
|
|
90
104
|
method
|
|
91
105
|
for method in dir(Optimizers)
|
|
@@ -112,40 +126,19 @@ class VTLchat(Provider):
|
|
|
112
126
|
raw: bool = False,
|
|
113
127
|
optimizer: str = None,
|
|
114
128
|
conversationally: bool = False,
|
|
115
|
-
) ->
|
|
116
|
-
"""
|
|
129
|
+
) -> Dict[str, Any]:
|
|
130
|
+
"""
|
|
131
|
+
Sends a prompt to the LiaoBots API and returns the response.
|
|
117
132
|
|
|
118
133
|
Args:
|
|
119
|
-
prompt
|
|
120
|
-
stream (bool, optional):
|
|
121
|
-
raw (bool, optional):
|
|
122
|
-
optimizer (str, optional):
|
|
123
|
-
conversationally (bool, optional):
|
|
134
|
+
prompt: The text prompt to generate text from.
|
|
135
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
136
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
137
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
138
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
139
|
+
|
|
124
140
|
Returns:
|
|
125
|
-
|
|
126
|
-
```json
|
|
127
|
-
{
|
|
128
|
-
"id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
|
|
129
|
-
"object": "chat.completion",
|
|
130
|
-
"created": 1704623244,
|
|
131
|
-
"model": "gpt-3.5-turbo",
|
|
132
|
-
"usage": {
|
|
133
|
-
"prompt_tokens": 0,
|
|
134
|
-
"completion_tokens": 0,
|
|
135
|
-
"total_tokens": 0
|
|
136
|
-
},
|
|
137
|
-
"choices": [
|
|
138
|
-
{
|
|
139
|
-
"message": {
|
|
140
|
-
"role": "assistant",
|
|
141
|
-
"content": "Hello! How can I assist you today?"
|
|
142
|
-
},
|
|
143
|
-
"finish_reason": "stop",
|
|
144
|
-
"index": 0
|
|
145
|
-
}
|
|
146
|
-
]
|
|
147
|
-
}
|
|
148
|
-
```
|
|
141
|
+
The response from the API.
|
|
149
142
|
"""
|
|
150
143
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
151
144
|
if optimizer:
|
|
@@ -158,45 +151,61 @@ class VTLchat(Provider):
|
|
|
158
151
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
159
152
|
)
|
|
160
153
|
|
|
161
|
-
|
|
162
|
-
|
|
154
|
+
payload: Dict[str, any] = {
|
|
155
|
+
"conversationId": str(uuid.uuid4()),
|
|
156
|
+
"model": {
|
|
157
|
+
"id": self.model
|
|
158
|
+
},
|
|
163
159
|
"messages": [
|
|
164
|
-
{
|
|
165
|
-
|
|
160
|
+
{
|
|
161
|
+
"role": "user",
|
|
162
|
+
"content": conversation_prompt
|
|
163
|
+
}
|
|
166
164
|
],
|
|
167
|
-
"
|
|
168
|
-
"
|
|
169
|
-
"temperature": self.temperature,
|
|
170
|
-
"presence_penalty": self.presence_penalty,
|
|
171
|
-
"frequency_penalty": self.frequency_penalty,
|
|
172
|
-
"top_p": self.top_p
|
|
165
|
+
"key": "",
|
|
166
|
+
"prompt": self.system_prompt
|
|
173
167
|
}
|
|
174
168
|
|
|
175
169
|
def for_stream():
|
|
176
170
|
response = self.session.post(
|
|
177
|
-
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
171
|
+
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
178
172
|
)
|
|
173
|
+
|
|
179
174
|
if not response.ok:
|
|
180
175
|
raise exceptions.FailedToGenerateResponseError(
|
|
181
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})
|
|
176
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
182
177
|
)
|
|
183
178
|
|
|
184
179
|
streaming_response = ""
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
180
|
+
content_encoding = response.headers.get('Content-Encoding')
|
|
181
|
+
# Stream the response
|
|
182
|
+
for chunk in response.iter_content():
|
|
183
|
+
if chunk:
|
|
188
184
|
try:
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
185
|
+
# Decompress the chunk if necessary
|
|
186
|
+
if content_encoding == 'gzip':
|
|
187
|
+
chunk = gzip.decompress(chunk)
|
|
188
|
+
elif content_encoding == 'deflate':
|
|
189
|
+
chunk = zlib.decompress(chunk)
|
|
190
|
+
|
|
191
|
+
# Decode the chunk
|
|
192
|
+
decoded_chunk = chunk.decode('utf-8')
|
|
193
|
+
streaming_response += decoded_chunk
|
|
194
|
+
except UnicodeDecodeError:
|
|
195
|
+
# Handle non-textual data
|
|
196
|
+
pass
|
|
197
|
+
else:
|
|
198
|
+
pass
|
|
195
199
|
self.last_response.update(dict(text=streaming_response))
|
|
196
200
|
self.conversation.update_chat_history(
|
|
197
201
|
prompt, self.get_message(self.last_response)
|
|
198
202
|
)
|
|
199
203
|
|
|
204
|
+
if stream:
|
|
205
|
+
yield from [] # Yield nothing when streaming, focus on side effects
|
|
206
|
+
else:
|
|
207
|
+
return [] # Return empty list for non-streaming case
|
|
208
|
+
|
|
200
209
|
def for_non_stream():
|
|
201
210
|
for _ in for_stream():
|
|
202
211
|
pass
|
|
@@ -249,4 +258,11 @@ class VTLchat(Provider):
|
|
|
249
258
|
str: Message extracted
|
|
250
259
|
"""
|
|
251
260
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
252
|
-
return response["text"]
|
|
261
|
+
return response["text"]
|
|
262
|
+
|
|
263
|
+
if __name__ == '__main__':
|
|
264
|
+
from rich import print
|
|
265
|
+
liaobots = LiaoBots()
|
|
266
|
+
response = liaobots.chat("tell me about india")
|
|
267
|
+
for chunk in response:
|
|
268
|
+
print(chunk, end="", flush=True)
|