webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +5 -25
- webscout/DWEBS.py +476 -476
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -103
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +1 -1
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/AISEARCH/__init__.py +5 -1
- webscout/Provider/AISEARCH/hika_search.py +194 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +320 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/AllenAI.py +255 -122
- webscout/Provider/DeepSeek.py +1 -2
- webscout/Provider/Deepinfra.py +296 -286
- webscout/Provider/ElectronHub.py +709 -716
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +28 -6
- webscout/Provider/Gemini.py +167 -165
- webscout/Provider/GithubChat.py +2 -1
- webscout/Provider/Groq.py +38 -24
- webscout/Provider/LambdaChat.py +2 -1
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OpenGPT.py +199 -0
- webscout/Provider/PI.py +39 -24
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/Youchat.py +326 -296
- webscout/Provider/__init__.py +10 -4
- webscout/Provider/ai4chat.py +58 -56
- webscout/Provider/akashgpt.py +34 -22
- webscout/Provider/copilot.py +427 -427
- webscout/Provider/freeaichat.py +9 -2
- webscout/Provider/labyrinth.py +121 -20
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/scira_chat.py +271 -0
- webscout/Provider/typefully.py +280 -0
- webscout/Provider/uncovr.py +312 -299
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +38 -36
- webscout/cli.py +293 -293
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/update_checker.py +14 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1346 -1282
- webscout/webscout_search_async.py +877 -813
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
- webscout/Provider/DARKAI.py +0 -225
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
webscout/Provider/Gemini.py
CHANGED
|
@@ -1,165 +1,167 @@
|
|
|
1
|
-
from os import path
|
|
2
|
-
from json import load, dumps
|
|
3
|
-
import warnings
|
|
4
|
-
from typing import Union, Any, Dict
|
|
5
|
-
|
|
6
|
-
# Import internal modules and dependencies
|
|
7
|
-
from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
8
|
-
from ..AIbase import Provider, AsyncProvider
|
|
9
|
-
from ..Bard import Chatbot, Model
|
|
10
|
-
|
|
11
|
-
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
|
-
|
|
13
|
-
# Define model aliases for easy usage
|
|
14
|
-
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
|
-
"unspecified": Model.UNSPECIFIED,
|
|
16
|
-
"flash": Model.G_2_0_FLASH,
|
|
17
|
-
"flash-
|
|
18
|
-
"
|
|
19
|
-
"
|
|
20
|
-
"exp-advanced": Model.
|
|
21
|
-
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
if not
|
|
55
|
-
raise
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
self.
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
self.
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
self.session.async_chatbot.
|
|
1
|
+
from os import path
|
|
2
|
+
from json import load, dumps
|
|
3
|
+
import warnings
|
|
4
|
+
from typing import Union, Any, Dict
|
|
5
|
+
|
|
6
|
+
# Import internal modules and dependencies
|
|
7
|
+
from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
8
|
+
from ..AIbase import Provider, AsyncProvider
|
|
9
|
+
from ..Bard import Chatbot, Model
|
|
10
|
+
|
|
11
|
+
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
|
+
|
|
13
|
+
# Define model aliases for easy usage
|
|
14
|
+
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
|
+
"unspecified": Model.UNSPECIFIED,
|
|
16
|
+
"gemini-2.0-flash": Model.G_2_0_FLASH,
|
|
17
|
+
"gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
|
|
18
|
+
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
19
|
+
"gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
|
|
20
|
+
"gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
|
|
21
|
+
# Add shorter aliases for convenience
|
|
22
|
+
"flash": Model.G_2_0_FLASH,
|
|
23
|
+
"thinking": Model.G_2_0_FLASH_THINKING,
|
|
24
|
+
"pro": Model.G_2_5_PRO,
|
|
25
|
+
"advanced": Model.G_2_0_EXP_ADVANCED,
|
|
26
|
+
"advanced-2.5": Model.G_2_5_EXP_ADVANCED,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
# List of available models (friendly names)
|
|
30
|
+
AVAILABLE_MODELS = list(MODEL_ALIASES.keys())
|
|
31
|
+
|
|
32
|
+
class GEMINI(Provider):
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
cookie_file: str,
|
|
36
|
+
model: str = "flash", # Accepts either a Model enum or a str alias.
|
|
37
|
+
proxy: dict = {},
|
|
38
|
+
timeout: int = 30,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initializes GEMINI with model support.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
cookie_file (str): Path to the cookies JSON file.
|
|
45
|
+
model (Model or str): Selected model for the session. Can be a Model enum
|
|
46
|
+
or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
|
|
47
|
+
exp-advanced, 2.5-exp-advanced, 2.5-pro, 1.5-flash, 1.5-pro, 1.5-pro-research.
|
|
48
|
+
proxy (dict, optional): HTTP request proxy. Defaults to {}.
|
|
49
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
|
|
50
|
+
"""
|
|
51
|
+
self.conversation = Conversation(False)
|
|
52
|
+
|
|
53
|
+
# Ensure cookie_file existence.
|
|
54
|
+
if not isinstance(cookie_file, str):
|
|
55
|
+
raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
|
|
56
|
+
if not path.isfile(cookie_file):
|
|
57
|
+
raise Exception(f"{cookie_file} is not a valid file path")
|
|
58
|
+
|
|
59
|
+
# If model is provided as alias (str), convert to Model enum.
|
|
60
|
+
if isinstance(model, str):
|
|
61
|
+
alias = model.lower()
|
|
62
|
+
if alias in MODEL_ALIASES:
|
|
63
|
+
selected_model = MODEL_ALIASES[alias]
|
|
64
|
+
else:
|
|
65
|
+
raise Exception(f"Unknown model alias: '{model}'. Available aliases: {', '.join(AVAILABLE_MODELS)}")
|
|
66
|
+
elif isinstance(model, Model):
|
|
67
|
+
selected_model = model
|
|
68
|
+
else:
|
|
69
|
+
raise TypeError("model must be a string alias or an instance of Model")
|
|
70
|
+
|
|
71
|
+
# Initialize the Chatbot session using the cookie file.
|
|
72
|
+
self.session = Chatbot(cookie_file, proxy, timeout, selected_model)
|
|
73
|
+
self.last_response = {}
|
|
74
|
+
self.__available_optimizers = (
|
|
75
|
+
method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
76
|
+
)
|
|
77
|
+
# Store cookies from Chatbot for later use (e.g. image generation)
|
|
78
|
+
self.session_auth1 = self.session.secure_1psid
|
|
79
|
+
self.session_auth2 = self.session.secure_1psidts
|
|
80
|
+
|
|
81
|
+
def ask(
|
|
82
|
+
self,
|
|
83
|
+
prompt: str,
|
|
84
|
+
stream: bool = False,
|
|
85
|
+
raw: bool = False,
|
|
86
|
+
optimizer: str = None,
|
|
87
|
+
conversationally: bool = False,
|
|
88
|
+
) -> dict:
|
|
89
|
+
"""Chat with AI.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
prompt (str): Prompt to be sent.
|
|
93
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
94
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
95
|
+
optimizer (str, optional): Prompt optimizer name (e.g., 'code', 'shell_command'). Defaults to None.
|
|
96
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
dict: Response generated by the underlying Chatbot.
|
|
100
|
+
"""
|
|
101
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
102
|
+
if optimizer:
|
|
103
|
+
if optimizer in self.__available_optimizers:
|
|
104
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
105
|
+
conversation_prompt if conversationally else prompt
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
raise Exception(f"Optimizer is not one of {', '.join(self.__available_optimizers)}")
|
|
109
|
+
|
|
110
|
+
def for_stream():
|
|
111
|
+
response = self.session.ask(prompt)
|
|
112
|
+
self.last_response.update(response)
|
|
113
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
114
|
+
yield dumps(response) if raw else response
|
|
115
|
+
|
|
116
|
+
def for_non_stream():
|
|
117
|
+
for _ in for_stream():
|
|
118
|
+
pass
|
|
119
|
+
return self.last_response
|
|
120
|
+
|
|
121
|
+
return for_stream() if stream else for_non_stream()
|
|
122
|
+
|
|
123
|
+
def chat(
|
|
124
|
+
self,
|
|
125
|
+
prompt: str,
|
|
126
|
+
stream: bool = False,
|
|
127
|
+
optimizer: str = None,
|
|
128
|
+
conversationally: bool = False,
|
|
129
|
+
) -> str:
|
|
130
|
+
"""Generate response text.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
prompt (str): Prompt to be sent.
|
|
134
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
135
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
136
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
str: Response generated.
|
|
140
|
+
"""
|
|
141
|
+
def for_stream():
|
|
142
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
143
|
+
yield self.get_message(response)
|
|
144
|
+
|
|
145
|
+
def for_non_stream():
|
|
146
|
+
return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
|
|
147
|
+
|
|
148
|
+
return for_stream() if stream else for_non_stream()
|
|
149
|
+
|
|
150
|
+
def get_message(self, response: dict) -> str:
|
|
151
|
+
"""Retrieves message content from the response.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
response (dict): Response generated by `self.ask`.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
str: Extracted message content.
|
|
158
|
+
"""
|
|
159
|
+
if not isinstance(response, dict):
|
|
160
|
+
raise TypeError("Response should be of type dict")
|
|
161
|
+
return response["content"]
|
|
162
|
+
|
|
163
|
+
def reset(self):
|
|
164
|
+
"""Reset the current conversation."""
|
|
165
|
+
self.session.async_chatbot.conversation_id = ""
|
|
166
|
+
self.session.async_chatbot.response_id = ""
|
|
167
|
+
self.session.async_chatbot.choice_id = ""
|
webscout/Provider/GithubChat.py
CHANGED
webscout/Provider/Groq.py
CHANGED
|
@@ -16,26 +16,31 @@ class GROQ(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
|
|
19
|
+
"distil-whisper-large-v3-en",
|
|
20
|
+
"gemma2-9b-it",
|
|
21
|
+
"llama-3.3-70b-versatile",
|
|
22
|
+
"llama-3.1-8b-instant",
|
|
23
|
+
"llama-guard-3-8b",
|
|
20
24
|
"llama3-70b-8192",
|
|
21
|
-
"
|
|
25
|
+
"llama3-8b-8192",
|
|
26
|
+
"whisper-large-v3",
|
|
27
|
+
"whisper-large-v3-turbo",
|
|
28
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
29
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
30
|
+
"playai-tts",
|
|
31
|
+
"playai-tts-arabic",
|
|
32
|
+
"qwen-qwq-32b",
|
|
33
|
+
"mistral-saba-24b",
|
|
22
34
|
"qwen-2.5-coder-32b",
|
|
35
|
+
"qwen-2.5-32b",
|
|
23
36
|
"deepseek-r1-distill-qwen-32b",
|
|
24
37
|
"deepseek-r1-distill-llama-70b",
|
|
38
|
+
"llama-3.3-70b-specdec",
|
|
39
|
+
"llama-3.2-1b-preview",
|
|
25
40
|
"llama-3.2-3b-preview",
|
|
26
|
-
"gemma2-9b-it",
|
|
27
|
-
"llama-3.2-11b-vision-preview",
|
|
28
|
-
"llama3-8b-8192",
|
|
29
|
-
"llama-3.3-70b-versatile",
|
|
30
41
|
"llama-3.2-11b-vision-preview",
|
|
31
|
-
# "distil-whisper-large-v3-en",
|
|
32
|
-
"mixtral-8x7b-32768",
|
|
33
|
-
"llama-3.3-70b-specdec",
|
|
34
42
|
"llama-3.2-90b-vision-preview",
|
|
35
|
-
"
|
|
36
|
-
# "whisper-large-v3-turbo",
|
|
37
|
-
"llama-3.1-8b-instant",
|
|
38
|
-
"llama-guard-3-8b"
|
|
43
|
+
"mixtral-8x7b-32768"
|
|
39
44
|
]
|
|
40
45
|
|
|
41
46
|
def __init__(
|
|
@@ -345,22 +350,31 @@ class AsyncGROQ(AsyncProvider):
|
|
|
345
350
|
"""
|
|
346
351
|
|
|
347
352
|
AVAILABLE_MODELS = [
|
|
348
|
-
|
|
349
|
-
"llama3-70b-8192",
|
|
350
|
-
"llama-3.2-3b-preview",
|
|
353
|
+
"distil-whisper-large-v3-en",
|
|
351
354
|
"gemma2-9b-it",
|
|
352
|
-
"llama-3.2-11b-vision-preview",
|
|
353
|
-
"llama3-8b-8192",
|
|
354
355
|
"llama-3.3-70b-versatile",
|
|
356
|
+
"llama-3.1-8b-instant",
|
|
357
|
+
"llama-guard-3-8b",
|
|
358
|
+
"llama3-70b-8192",
|
|
359
|
+
"llama3-8b-8192",
|
|
360
|
+
"whisper-large-v3",
|
|
361
|
+
"whisper-large-v3-turbo",
|
|
362
|
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
|
363
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
|
364
|
+
"playai-tts",
|
|
365
|
+
"playai-tts-arabic",
|
|
366
|
+
"qwen-qwq-32b",
|
|
367
|
+
"mistral-saba-24b",
|
|
368
|
+
"qwen-2.5-coder-32b",
|
|
369
|
+
"qwen-2.5-32b",
|
|
370
|
+
"deepseek-r1-distill-qwen-32b",
|
|
355
371
|
"deepseek-r1-distill-llama-70b",
|
|
356
|
-
# "distil-whisper-large-v3-en",
|
|
357
|
-
"mixtral-8x7b-32768",
|
|
358
372
|
"llama-3.3-70b-specdec",
|
|
359
|
-
"llama-3.2-90b-vision-preview",
|
|
360
373
|
"llama-3.2-1b-preview",
|
|
361
|
-
|
|
362
|
-
"llama-3.
|
|
363
|
-
"llama-
|
|
374
|
+
"llama-3.2-3b-preview",
|
|
375
|
+
"llama-3.2-11b-vision-preview",
|
|
376
|
+
"llama-3.2-90b-vision-preview",
|
|
377
|
+
"mixtral-8x7b-32768"
|
|
364
378
|
]
|
|
365
379
|
|
|
366
380
|
def __init__(
|
webscout/Provider/LambdaChat.py
CHANGED
webscout/Provider/Netwrck.py
CHANGED
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Dict, Generator, Union
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
|
|
12
|
+
class OpenGPT(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Open-GPT API.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 600,
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
app_id: str = "clf3yg8730000ih08ndbdi2v4",
|
|
29
|
+
):
|
|
30
|
+
"""Initializes the OpenGPT API client.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
is_conversation (bool, optional): Whether to maintain conversation history. Defaults to True.
|
|
34
|
+
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 600.
|
|
35
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
36
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
37
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
38
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
39
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
40
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
41
|
+
act (str, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
42
|
+
app_id (str, optional): The OpenGPT application ID. Defaults to "clf3yg8730000ih08ndbdi2v4".
|
|
43
|
+
"""
|
|
44
|
+
self.session = requests.Session()
|
|
45
|
+
self.agent = LitAgent()
|
|
46
|
+
|
|
47
|
+
self.is_conversation = is_conversation
|
|
48
|
+
self.max_tokens_to_sample = max_tokens
|
|
49
|
+
self.timeout = timeout
|
|
50
|
+
self.last_response = {}
|
|
51
|
+
self.app_id = app_id
|
|
52
|
+
|
|
53
|
+
# Set up headers with dynamic user agent
|
|
54
|
+
self.headers = {
|
|
55
|
+
"Content-Type": "application/json",
|
|
56
|
+
"User-Agent": self.agent.random(),
|
|
57
|
+
"Referer": f"https://open-gpt.app/id/app/{app_id}"
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
self.session.headers.update(self.headers)
|
|
61
|
+
self.session.proxies.update(proxies)
|
|
62
|
+
|
|
63
|
+
# Initialize optimizers
|
|
64
|
+
self.__available_optimizers = (
|
|
65
|
+
method
|
|
66
|
+
for method in dir(Optimizers)
|
|
67
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Setup conversation
|
|
71
|
+
Conversation.intro = (
|
|
72
|
+
AwesomePrompts().get_act(
|
|
73
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
74
|
+
) if act else intro or Conversation.intro
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
self.conversation = Conversation(
|
|
78
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
+
)
|
|
80
|
+
self.conversation.history_offset = history_offset
|
|
81
|
+
|
|
82
|
+
def ask(
|
|
83
|
+
self,
|
|
84
|
+
prompt: str,
|
|
85
|
+
stream: bool = False,
|
|
86
|
+
raw: bool = False,
|
|
87
|
+
optimizer: str = None,
|
|
88
|
+
conversationally: bool = False,
|
|
89
|
+
) -> Union[Dict, Generator]:
|
|
90
|
+
"""
|
|
91
|
+
Send a prompt to the OpenGPT API and get a response.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
prompt: The user input/prompt for the API.
|
|
95
|
+
stream: Whether to stream the response.
|
|
96
|
+
raw: Whether to return the raw API response.
|
|
97
|
+
optimizer: Optimizer to use on the prompt.
|
|
98
|
+
conversationally: Whether to apply the optimizer on the full conversation prompt.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
A dictionary or generator with the response.
|
|
102
|
+
"""
|
|
103
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
104
|
+
|
|
105
|
+
if optimizer:
|
|
106
|
+
if optimizer in self.__available_optimizers:
|
|
107
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
108
|
+
conversation_prompt if conversationally else prompt
|
|
109
|
+
)
|
|
110
|
+
else:
|
|
111
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
112
|
+
|
|
113
|
+
# Prepare the request body payload
|
|
114
|
+
payload = {
|
|
115
|
+
"userInput": conversation_prompt,
|
|
116
|
+
"id": self.app_id,
|
|
117
|
+
"userKey": "" # Assuming userKey is meant to be empty as in the original code
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
def for_non_stream():
|
|
121
|
+
try:
|
|
122
|
+
response = self.session.post(
|
|
123
|
+
"https://open-gpt.app/api/generate",
|
|
124
|
+
data=json.dumps(payload),
|
|
125
|
+
timeout=self.timeout
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Raise an exception for bad status codes
|
|
129
|
+
response.raise_for_status()
|
|
130
|
+
|
|
131
|
+
response_text = response.text
|
|
132
|
+
self.last_response = {"text": response_text}
|
|
133
|
+
self.conversation.update_chat_history(prompt, response_text)
|
|
134
|
+
|
|
135
|
+
return {"text": response_text} if not raw else {"raw": response_text}
|
|
136
|
+
|
|
137
|
+
except requests.exceptions.RequestException as e:
|
|
138
|
+
# Handle potential errors during the request
|
|
139
|
+
error_msg = f"Error fetching data: {e}"
|
|
140
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
141
|
+
except Exception as e:
|
|
142
|
+
# Catch any other unexpected errors
|
|
143
|
+
error_msg = f"An unexpected error occurred: {e}"
|
|
144
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
145
|
+
|
|
146
|
+
# This provider doesn't support streaming, so just return non-stream
|
|
147
|
+
return for_non_stream()
|
|
148
|
+
|
|
149
|
+
def chat(
|
|
150
|
+
self,
|
|
151
|
+
prompt: str,
|
|
152
|
+
stream: bool = False,
|
|
153
|
+
optimizer: str = None,
|
|
154
|
+
conversationally: bool = False,
|
|
155
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
156
|
+
"""
|
|
157
|
+
Send a prompt to the OpenGPT API and get a text response.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
prompt: The user input/prompt for the API.
|
|
161
|
+
stream: Whether to stream the response (not supported).
|
|
162
|
+
optimizer: Optimizer to use on the prompt.
|
|
163
|
+
conversationally: Whether to apply the optimizer on the full conversation prompt.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
A string with the response text.
|
|
167
|
+
"""
|
|
168
|
+
response = self.ask(
|
|
169
|
+
prompt, False, optimizer=optimizer, conversationally=conversationally
|
|
170
|
+
)
|
|
171
|
+
return self.get_message(response)
|
|
172
|
+
|
|
173
|
+
def get_message(self, response: dict) -> str:
|
|
174
|
+
"""
|
|
175
|
+
Extract the message from the response dictionary.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
response: Response dictionary from the ask method.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
The text response as a string.
|
|
182
|
+
"""
|
|
183
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
184
|
+
return response["text"]
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
if __name__ == "__main__":
|
|
188
|
+
# Test the provider
|
|
189
|
+
print("-" * 80)
|
|
190
|
+
print("Testing OpenGPT provider")
|
|
191
|
+
print("-" * 80)
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
test_ai = OpenGPT()
|
|
195
|
+
response = test_ai.chat("Explain quantum physics simply.")
|
|
196
|
+
print(response)
|
|
197
|
+
except Exception as e:
|
|
198
|
+
print(f"Error: {e}")
|
|
199
|
+
|