webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +7 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +3 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/TogetherAI.py +2 -2
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -58
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +6 -6
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +1 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -11
- webscout/Provider/OPENAI/toolbaz.py +14 -11
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTS/__init__.py +18 -10
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -12
- webscout/Provider/TogetherAI.py +86 -87
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -86
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +115 -9
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -12
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/FreeGemini.py
DELETED
|
@@ -1,250 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
FreeGemini API client for the free-gemini.vercel.app service.
|
|
4
|
-
Supports streaming responses from Gemini 2.0 Flash model.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import json
|
|
8
|
-
from curl_cffi.requests import Session
|
|
9
|
-
from curl_cffi import CurlError
|
|
10
|
-
from typing import Dict, Generator, Any, Union, Optional
|
|
11
|
-
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.AIutel import Optimizers, AwesomePrompts, sanitize_stream
|
|
14
|
-
from webscout.conversation import Conversation
|
|
15
|
-
from webscout.litagent import LitAgent
|
|
16
|
-
from webscout.AIbase import Provider
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class FreeGemini(Provider):
|
|
20
|
-
"""
|
|
21
|
-
A class to interact with the free-gemini.vercel.app API,
|
|
22
|
-
which provides access to Gemini models.
|
|
23
|
-
"""
|
|
24
|
-
AVAILABLE_MODELS = ["gemini-2.0-flash"]
|
|
25
|
-
|
|
26
|
-
@staticmethod
|
|
27
|
-
def _gemini_extractor(data: Dict) -> Optional[str]:
|
|
28
|
-
"""Extract text content from Gemini API response."""
|
|
29
|
-
try:
|
|
30
|
-
if "candidates" in data and data["candidates"]:
|
|
31
|
-
candidate = data["candidates"][0]
|
|
32
|
-
if "content" in candidate and "parts" in candidate["content"]:
|
|
33
|
-
parts = candidate["content"]["parts"]
|
|
34
|
-
if parts and "text" in parts[0]:
|
|
35
|
-
return parts[0]["text"]
|
|
36
|
-
except (KeyError, IndexError, TypeError):
|
|
37
|
-
pass
|
|
38
|
-
return None
|
|
39
|
-
|
|
40
|
-
def __init__(
|
|
41
|
-
self,
|
|
42
|
-
is_conversation: bool = True,
|
|
43
|
-
max_tokens: int = 4000,
|
|
44
|
-
temperature: float = 0.5,
|
|
45
|
-
top_p: float = 1.0,
|
|
46
|
-
timeout: int = 120, # Default timeout for this specific API
|
|
47
|
-
proxies: dict = {}, # Standard proxy support
|
|
48
|
-
filepath: str = None,
|
|
49
|
-
update_file: bool = True,
|
|
50
|
-
history_offset: int = 10250,
|
|
51
|
-
intro: str = None,
|
|
52
|
-
act: str = None,
|
|
53
|
-
model: str = "gemini-2.0-flash",
|
|
54
|
-
system_prompt: str = "You are a helpful assistant.", # For consistency, though not directly used in payload
|
|
55
|
-
):
|
|
56
|
-
"""Initialize the FreeGemini client.
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
is_conversation (bool): Enable conversation history. Defaults to True.
|
|
60
|
-
max_tokens (int): Maximum tokens to sample. Defaults to 4000.
|
|
61
|
-
temperature (float): Sampling temperature. Defaults to 0.5.
|
|
62
|
-
top_p (float): Nucleus sampling parameter. Defaults to 1.0.
|
|
63
|
-
timeout (int): Request timeout in seconds. Defaults to 120.
|
|
64
|
-
proxies (dict): HTTP proxies. Defaults to {}.
|
|
65
|
-
filepath (str, optional): Path to save conversation history. Defaults to None.
|
|
66
|
-
update_file (bool): Update conversation history file. Defaults to True.
|
|
67
|
-
history_offset (int): Limit conversation history. Defaults to 10250.
|
|
68
|
-
intro (str, optional): Introduction for the conversation.
|
|
69
|
-
act (str, optional): Act for AwesomePrompts.
|
|
70
|
-
model (str): Model to use. Defaults to "gemini-2.0-flash".
|
|
71
|
-
system_prompt (str): System prompt (primarily for API consistency).
|
|
72
|
-
"""
|
|
73
|
-
if model not in self.AVAILABLE_MODELS:
|
|
74
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
75
|
-
|
|
76
|
-
self.session = Session()
|
|
77
|
-
self.model = model
|
|
78
|
-
self.is_conversation = is_conversation
|
|
79
|
-
self.max_tokens_to_sample = max_tokens # Consistent naming
|
|
80
|
-
self.temperature = temperature
|
|
81
|
-
self.top_p = top_p
|
|
82
|
-
self.timeout = timeout
|
|
83
|
-
self.last_response = {}
|
|
84
|
-
self.system_prompt = system_prompt # Stored for consistency
|
|
85
|
-
|
|
86
|
-
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse"
|
|
87
|
-
|
|
88
|
-
self.agent = LitAgent()
|
|
89
|
-
self.headers = {
|
|
90
|
-
"Content-Type": "application/json",
|
|
91
|
-
"Accept": "application/json, text/event-stream",
|
|
92
|
-
"User-Agent": self.agent.random(),
|
|
93
|
-
"Origin": "https://free-gemini.vercel.app",
|
|
94
|
-
"Referer": "https://free-gemini.vercel.app/",
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
self.session.headers.update(self.headers)
|
|
98
|
-
self.session.proxies = proxies
|
|
99
|
-
|
|
100
|
-
self.__available_optimizers = (
|
|
101
|
-
method for method in dir(Optimizers)
|
|
102
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
103
|
-
)
|
|
104
|
-
Conversation.intro = (
|
|
105
|
-
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
106
|
-
if act else intro or Conversation.intro
|
|
107
|
-
)
|
|
108
|
-
self.conversation = Conversation(
|
|
109
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
110
|
-
)
|
|
111
|
-
self.conversation.history_offset = history_offset
|
|
112
|
-
|
|
113
|
-
def ask(
|
|
114
|
-
self,
|
|
115
|
-
prompt: str,
|
|
116
|
-
stream: bool = False, # Default to False for consistency
|
|
117
|
-
raw: bool = False,
|
|
118
|
-
optimizer: str = None,
|
|
119
|
-
conversationally: bool = False,
|
|
120
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
121
|
-
"""Sends a prompt to the FreeGemini API and returns the response.
|
|
122
|
-
|
|
123
|
-
Args:
|
|
124
|
-
prompt (str): The prompt to send to the model.
|
|
125
|
-
stream (bool): Whether to stream the response. Defaults to False.
|
|
126
|
-
raw (bool): Return raw response instead of parsed text. Defaults to False.
|
|
127
|
-
optimizer (str, optional): Optimizer to use for the prompt.
|
|
128
|
-
conversationally (bool, optional): Whether to apply optimizer conversationally.
|
|
129
|
-
|
|
130
|
-
Returns:
|
|
131
|
-
Union[Dict[str, Any], Generator[Dict[str, Any], None, None]]:
|
|
132
|
-
The generated response as a dictionary or generator.
|
|
133
|
-
"""
|
|
134
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
135
|
-
if optimizer:
|
|
136
|
-
if optimizer in self.__available_optimizers:
|
|
137
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
138
|
-
conversation_prompt if conversationally else prompt
|
|
139
|
-
)
|
|
140
|
-
else:
|
|
141
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
142
|
-
|
|
143
|
-
payload = {
|
|
144
|
-
"contents": [{"role": "user", "parts": [{"text": conversation_prompt}]}],
|
|
145
|
-
"generationConfig": {
|
|
146
|
-
"temperature": self.temperature,
|
|
147
|
-
"maxOutputTokens": self.max_tokens_to_sample,
|
|
148
|
-
"topP": self.top_p
|
|
149
|
-
},
|
|
150
|
-
"safetySettings": [
|
|
151
|
-
# Default safety settings from original class
|
|
152
|
-
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
153
|
-
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
|
|
154
|
-
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
|
|
155
|
-
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"}
|
|
156
|
-
]
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
# Internal generator for handling API call and history update
|
|
160
|
-
def _generate_content_and_update_history():
|
|
161
|
-
streaming_text_accumulator = ""
|
|
162
|
-
try:
|
|
163
|
-
response = self.session.post(
|
|
164
|
-
self.api_endpoint,
|
|
165
|
-
json=payload,
|
|
166
|
-
stream=True, # API always streams
|
|
167
|
-
timeout=self.timeout,
|
|
168
|
-
impersonate="chrome120"
|
|
169
|
-
)
|
|
170
|
-
response.raise_for_status()
|
|
171
|
-
|
|
172
|
-
processed_stream = sanitize_stream(
|
|
173
|
-
data=response.iter_content(chunk_size=None),
|
|
174
|
-
intro_value="data:",
|
|
175
|
-
to_json=True,
|
|
176
|
-
content_extractor=self._gemini_extractor,
|
|
177
|
-
yield_raw_on_error=False
|
|
178
|
-
)
|
|
179
|
-
|
|
180
|
-
for content_chunk_str in processed_stream: # yields string
|
|
181
|
-
if content_chunk_str and isinstance(content_chunk_str, str):
|
|
182
|
-
streaming_text_accumulator += content_chunk_str
|
|
183
|
-
yield content_chunk_str # Yield the raw text chunk
|
|
184
|
-
|
|
185
|
-
except CurlError as e:
|
|
186
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
187
|
-
except Exception as e:
|
|
188
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
189
|
-
finally:
|
|
190
|
-
if streaming_text_accumulator:
|
|
191
|
-
self.last_response = {"text": streaming_text_accumulator}
|
|
192
|
-
self.conversation.update_chat_history(prompt, streaming_text_accumulator)
|
|
193
|
-
|
|
194
|
-
if stream:
|
|
195
|
-
def stream_wrapper():
|
|
196
|
-
for text_chunk in _generate_content_and_update_history():
|
|
197
|
-
yield {"text": text_chunk} if not raw else text_chunk
|
|
198
|
-
return stream_wrapper()
|
|
199
|
-
else: # Not streaming from the perspective of the caller of `ask`
|
|
200
|
-
full_text_response = ""
|
|
201
|
-
for text_chunk in _generate_content_and_update_history():
|
|
202
|
-
full_text_response += text_chunk
|
|
203
|
-
|
|
204
|
-
# self.last_response and history are updated by the generator's `finally`
|
|
205
|
-
return {"text": full_text_response} if not raw else full_text_response
|
|
206
|
-
|
|
207
|
-
def chat(
|
|
208
|
-
self,
|
|
209
|
-
prompt: str,
|
|
210
|
-
stream: bool = False,
|
|
211
|
-
optimizer: str = None,
|
|
212
|
-
conversationally: bool = False,
|
|
213
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
214
|
-
"""Generates a response from the FreeGemini API.
|
|
215
|
-
|
|
216
|
-
Args:
|
|
217
|
-
prompt (str): The prompt to send to the API.
|
|
218
|
-
stream (bool): Whether to stream the response.
|
|
219
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
220
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
221
|
-
|
|
222
|
-
Returns:
|
|
223
|
-
Union[str, Generator[str, None, None]]: The API response.
|
|
224
|
-
"""
|
|
225
|
-
def for_stream_chat():
|
|
226
|
-
gen = self.ask(
|
|
227
|
-
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
228
|
-
optimizer=optimizer, conversationally=conversationally
|
|
229
|
-
)
|
|
230
|
-
for response_dict in gen:
|
|
231
|
-
yield self.get_message(response_dict)
|
|
232
|
-
|
|
233
|
-
def for_non_stream_chat():
|
|
234
|
-
response_data = self.ask(
|
|
235
|
-
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
236
|
-
optimizer=optimizer, conversationally=conversationally
|
|
237
|
-
)
|
|
238
|
-
return self.get_message(response_data)
|
|
239
|
-
|
|
240
|
-
return for_stream_chat() if stream else for_non_stream_chat()
|
|
241
|
-
|
|
242
|
-
def get_message(self, response: dict) -> str:
|
|
243
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
244
|
-
return response.get("text", "")
|
|
245
|
-
|
|
246
|
-
if __name__ == "__main__":
|
|
247
|
-
# Example usage
|
|
248
|
-
free_gemini = FreeGemini()
|
|
249
|
-
response = free_gemini.chat("how many r in strawberry", stream=False)
|
|
250
|
-
print(response) # Should print the response from the API
|
webscout/Provider/GptOss.py
DELETED
|
@@ -1,207 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
import requests
|
|
3
|
-
from typing import Any, Dict, Generator, Optional, Union, List
|
|
4
|
-
from webscout.litagent import LitAgent
|
|
5
|
-
from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
|
|
6
|
-
from webscout.AIbase import Provider
|
|
7
|
-
from webscout import exceptions
|
|
8
|
-
|
|
9
|
-
class GptOss(Provider):
|
|
10
|
-
"""
|
|
11
|
-
Provider for GPT-OSS API.
|
|
12
|
-
"""
|
|
13
|
-
AVAILABLE_MODELS = ["gpt-oss-20b", "gpt-oss-120b"]
|
|
14
|
-
|
|
15
|
-
def __init__(
|
|
16
|
-
self,
|
|
17
|
-
model: str = "gpt-oss-120b",
|
|
18
|
-
is_conversation: bool = True,
|
|
19
|
-
max_tokens: int = 600,
|
|
20
|
-
timeout: int = 30,
|
|
21
|
-
intro: str = None,
|
|
22
|
-
filepath: str = None,
|
|
23
|
-
update_file: bool = True,
|
|
24
|
-
proxies: dict = {},
|
|
25
|
-
history_offset: int = 10250,
|
|
26
|
-
act: str = None,
|
|
27
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
28
|
-
reasoning_effort: str = "high"
|
|
29
|
-
):
|
|
30
|
-
self.api_endpoint = "https://api.gpt-oss.com/chatkit"
|
|
31
|
-
self.model = model if model in self.AVAILABLE_MODELS else self.AVAILABLE_MODELS[0]
|
|
32
|
-
self.is_conversation = is_conversation
|
|
33
|
-
self.max_tokens_to_sample = max_tokens
|
|
34
|
-
self.timeout = timeout
|
|
35
|
-
self.last_response = {}
|
|
36
|
-
self.system_prompt = system_prompt
|
|
37
|
-
self.reasoning_effort = reasoning_effort
|
|
38
|
-
self.agent = LitAgent()
|
|
39
|
-
self.proxies = proxies
|
|
40
|
-
self.__available_optimizers = (
|
|
41
|
-
method
|
|
42
|
-
for method in dir(Optimizers)
|
|
43
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
44
|
-
)
|
|
45
|
-
Conversation.intro = (
|
|
46
|
-
AwesomePrompts().get_act(
|
|
47
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
48
|
-
)
|
|
49
|
-
if act
|
|
50
|
-
else intro or Conversation.intro
|
|
51
|
-
)
|
|
52
|
-
self.conversation = Conversation(
|
|
53
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
54
|
-
)
|
|
55
|
-
self.conversation.history_offset = history_offset
|
|
56
|
-
|
|
57
|
-
def ask(
|
|
58
|
-
self,
|
|
59
|
-
prompt: str,
|
|
60
|
-
stream: bool = False,
|
|
61
|
-
raw: bool = False,
|
|
62
|
-
optimizer: str = None,
|
|
63
|
-
conversationally: bool = False,
|
|
64
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
65
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
66
|
-
if optimizer:
|
|
67
|
-
if optimizer in self.__available_optimizers:
|
|
68
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
69
|
-
conversation_prompt if conversationally else prompt
|
|
70
|
-
)
|
|
71
|
-
else:
|
|
72
|
-
raise Exception(
|
|
73
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
data = {
|
|
77
|
-
"op": "threads.create",
|
|
78
|
-
"params": {
|
|
79
|
-
"input": {
|
|
80
|
-
"text": conversation_prompt,
|
|
81
|
-
"content": [{"type": "input_text", "text": conversation_prompt}],
|
|
82
|
-
"quoted_text": "",
|
|
83
|
-
"attachments": []
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
headers = self.agent.generate_fingerprint()
|
|
88
|
-
headers.update({
|
|
89
|
-
"accept": "text/event-stream",
|
|
90
|
-
"x-reasoning-effort": self.reasoning_effort,
|
|
91
|
-
"x-selected-model": self.model,
|
|
92
|
-
"x-show-reasoning": "true"
|
|
93
|
-
})
|
|
94
|
-
cookies = {}
|
|
95
|
-
|
|
96
|
-
def for_stream():
|
|
97
|
-
full_response_content = ""
|
|
98
|
-
try:
|
|
99
|
-
with requests.post(
|
|
100
|
-
self.api_endpoint,
|
|
101
|
-
headers=headers,
|
|
102
|
-
cookies=cookies,
|
|
103
|
-
json=data,
|
|
104
|
-
stream=True,
|
|
105
|
-
proxies=self.proxies if self.proxies else None,
|
|
106
|
-
timeout=self.timeout
|
|
107
|
-
) as response:
|
|
108
|
-
response.raise_for_status()
|
|
109
|
-
for chunk in sanitize_stream(
|
|
110
|
-
response.iter_lines(),
|
|
111
|
-
intro_value="data: ",
|
|
112
|
-
to_json=True,
|
|
113
|
-
skip_markers=["[DONE]"],
|
|
114
|
-
strip_chars=None,
|
|
115
|
-
content_extractor=lambda d: d.get('update', {}).get('delta') if d.get('type') == 'thread.item_updated' and d.get('update', {}).get('type') == 'assistant_message.content_part.text_delta' else None,
|
|
116
|
-
yield_raw_on_error=False,
|
|
117
|
-
encoding="utf-8",
|
|
118
|
-
raw=raw
|
|
119
|
-
):
|
|
120
|
-
if chunk:
|
|
121
|
-
yield chunk
|
|
122
|
-
full_response_content += chunk
|
|
123
|
-
self.last_response.update(dict(text=full_response_content))
|
|
124
|
-
self.conversation.update_chat_history(
|
|
125
|
-
prompt, self.get_message(self.last_response)
|
|
126
|
-
)
|
|
127
|
-
except Exception as e:
|
|
128
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
129
|
-
|
|
130
|
-
def for_non_stream():
|
|
131
|
-
result = ""
|
|
132
|
-
try:
|
|
133
|
-
with requests.post(
|
|
134
|
-
self.api_endpoint,
|
|
135
|
-
headers=headers,
|
|
136
|
-
cookies=cookies,
|
|
137
|
-
json=data,
|
|
138
|
-
stream=False,
|
|
139
|
-
proxies=self.proxies if self.proxies else None,
|
|
140
|
-
timeout=self.timeout
|
|
141
|
-
) as response:
|
|
142
|
-
response.raise_for_status()
|
|
143
|
-
# The API is event-stream only, so we simulate non-stream by joining all chunks
|
|
144
|
-
for chunk in sanitize_stream(
|
|
145
|
-
response.iter_lines(),
|
|
146
|
-
intro_value="data: ",
|
|
147
|
-
to_json=True,
|
|
148
|
-
skip_markers=["[DONE]"],
|
|
149
|
-
strip_chars=None,
|
|
150
|
-
content_extractor=lambda d: d.get('update', {}).get('delta') if d.get('type') == 'thread.item_updated' and d.get('update', {}).get('type') == 'assistant_message.content_part.text_delta' else None,
|
|
151
|
-
yield_raw_on_error=False,
|
|
152
|
-
encoding="utf-8",
|
|
153
|
-
raw=raw
|
|
154
|
-
):
|
|
155
|
-
if chunk:
|
|
156
|
-
result += chunk
|
|
157
|
-
self.last_response.update(dict(text=result))
|
|
158
|
-
self.conversation.update_chat_history(
|
|
159
|
-
prompt, self.get_message(self.last_response)
|
|
160
|
-
)
|
|
161
|
-
return self.last_response
|
|
162
|
-
except Exception as e:
|
|
163
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
164
|
-
|
|
165
|
-
return for_stream() if stream else for_non_stream()
|
|
166
|
-
|
|
167
|
-
def chat(
|
|
168
|
-
self,
|
|
169
|
-
prompt: str,
|
|
170
|
-
stream: bool = False,
|
|
171
|
-
optimizer: str = None,
|
|
172
|
-
conversationally: bool = False,
|
|
173
|
-
raw: bool = False,
|
|
174
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
175
|
-
def for_stream():
|
|
176
|
-
for response in self.ask(
|
|
177
|
-
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
178
|
-
):
|
|
179
|
-
yield response
|
|
180
|
-
|
|
181
|
-
def for_non_stream():
|
|
182
|
-
result = self.ask(
|
|
183
|
-
prompt,
|
|
184
|
-
False,
|
|
185
|
-
raw=raw,
|
|
186
|
-
optimizer=optimizer,
|
|
187
|
-
conversationally=conversationally,
|
|
188
|
-
)
|
|
189
|
-
return self.get_message(result)
|
|
190
|
-
|
|
191
|
-
return for_stream() if stream else for_non_stream()
|
|
192
|
-
|
|
193
|
-
def get_message(self, response: dict) -> str:
|
|
194
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
195
|
-
text = response.get("text", "")
|
|
196
|
-
return text
|
|
197
|
-
|
|
198
|
-
if __name__ == "__main__":
|
|
199
|
-
from webscout.AIutel import timeIt
|
|
200
|
-
from rich import print
|
|
201
|
-
ai = GptOss(timeout=30)
|
|
202
|
-
@timeIt
|
|
203
|
-
def get_response():
|
|
204
|
-
response = ai.chat("write a poem about AI", stream=True, raw=False)
|
|
205
|
-
for chunk in response:
|
|
206
|
-
print(chunk, end="", flush=True)
|
|
207
|
-
get_response()
|