webscout 8.2.5__py3-none-any.whl → 8.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIutel.py +240 -344
- webscout/Extra/autocoder/autocoder.py +66 -5
- webscout/Provider/AISEARCH/scira_search.py +2 -1
- webscout/Provider/GizAI.py +6 -4
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/OPENAI/scirachat.py +2 -1
- webscout/Provider/TeachAnything.py +8 -5
- webscout/Provider/WiseCat.py +1 -1
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/__init__.py +4 -6
- webscout/Provider/ai4chat.py +5 -3
- webscout/Provider/akashgpt.py +59 -66
- webscout/Provider/freeaichat.py +57 -43
- webscout/Provider/scira_chat.py +2 -1
- webscout/Provider/scnet.py +4 -1
- webscout/__init__.py +0 -1
- webscout/conversation.py +305 -446
- webscout/swiftcli/__init__.py +80 -794
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/METADATA +1 -1
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/RECORD +41 -28
- webscout/LLM.py +0 -442
- webscout/Provider/PizzaGPT.py +0 -228
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/tutorai.py +0 -270
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/WHEEL +0 -0
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -0
|
@@ -1,193 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout.litagent import LitAgent as UserAgent
|
|
10
|
-
|
|
11
|
-
class PromptRefine(Provider):
|
|
12
|
-
"""
|
|
13
|
-
A class to interact with the PromptRefine API.
|
|
14
|
-
"""
|
|
15
|
-
AVAILABLE_MODELS = ["openai/gpt-4", "openai/gpt-4o", "openai/gpt-4-1106-preview"]
|
|
16
|
-
def __init__(
|
|
17
|
-
self,
|
|
18
|
-
is_conversation: bool = True,
|
|
19
|
-
max_tokens: int = 600,
|
|
20
|
-
timeout: int = 30,
|
|
21
|
-
intro: str = None,
|
|
22
|
-
filepath: str = None,
|
|
23
|
-
update_file: bool = True,
|
|
24
|
-
proxies: dict = {},
|
|
25
|
-
history_offset: int = 10250,
|
|
26
|
-
act: str = None,
|
|
27
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
28
|
-
model: str = "openai/gpt-4o", # Default model
|
|
29
|
-
):
|
|
30
|
-
"""
|
|
31
|
-
Initializes the PromptRefine API with given parameters.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
35
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
-
system_prompt (str, optional): System prompt for PromptRefine. Defaults to "You are a helpful AI assistant.".
|
|
44
|
-
model (str, optional): Model to use for generation. Defaults to "openai/gpt-4o".
|
|
45
|
-
"""
|
|
46
|
-
self.session = requests.Session()
|
|
47
|
-
self.is_conversation = is_conversation
|
|
48
|
-
self.max_tokens_to_sample = max_tokens
|
|
49
|
-
self.api_endpoint = 'https://www.promptrefine.com/api/completion'
|
|
50
|
-
self.stream_chunk_size = 64
|
|
51
|
-
self.timeout = timeout
|
|
52
|
-
self.last_response = {}
|
|
53
|
-
self.system_prompt = system_prompt
|
|
54
|
-
self.model = model
|
|
55
|
-
self.headers = {
|
|
56
|
-
'origin': 'https://www.promptrefine.com',
|
|
57
|
-
'referer': 'https://www.promptrefine.com/prompt/new',
|
|
58
|
-
'user-agent': UserAgent().random()
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
self.__available_optimizers = (
|
|
62
|
-
method
|
|
63
|
-
for method in dir(Optimizers)
|
|
64
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
65
|
-
)
|
|
66
|
-
self.session.headers.update(self.headers)
|
|
67
|
-
Conversation.intro = (
|
|
68
|
-
AwesomePrompts().get_act(
|
|
69
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
70
|
-
)
|
|
71
|
-
if act
|
|
72
|
-
else intro or Conversation.intro
|
|
73
|
-
)
|
|
74
|
-
self.conversation = Conversation(
|
|
75
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
76
|
-
)
|
|
77
|
-
self.conversation.history_offset = history_offset
|
|
78
|
-
self.session.proxies = proxies
|
|
79
|
-
if self.model not in self.AVAILABLE_MODELS:
|
|
80
|
-
raise ValueError(f"Invalid model: {self.model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
|
|
81
|
-
|
|
82
|
-
def ask(
|
|
83
|
-
self,
|
|
84
|
-
prompt: str,
|
|
85
|
-
stream: bool = False,
|
|
86
|
-
raw: bool = False,
|
|
87
|
-
optimizer: str = None,
|
|
88
|
-
conversationally: bool = False,
|
|
89
|
-
) -> dict:
|
|
90
|
-
"""Chat with PromptRefine
|
|
91
|
-
|
|
92
|
-
Args:
|
|
93
|
-
prompt (str): Prompt to be send.
|
|
94
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
95
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
96
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
97
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
98
|
-
Returns:
|
|
99
|
-
dict : {}
|
|
100
|
-
```json
|
|
101
|
-
{
|
|
102
|
-
"text" : "How may I assist you today?"
|
|
103
|
-
}
|
|
104
|
-
```
|
|
105
|
-
"""
|
|
106
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
107
|
-
if optimizer:
|
|
108
|
-
if optimizer in self.__available_optimizers:
|
|
109
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
110
|
-
conversation_prompt if conversationally else prompt
|
|
111
|
-
)
|
|
112
|
-
else:
|
|
113
|
-
raise Exception(
|
|
114
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
payload = {
|
|
118
|
-
"messages": [
|
|
119
|
-
{"role": "system", "content": self.system_prompt},
|
|
120
|
-
{"role": "user", "content": conversation_prompt}
|
|
121
|
-
],
|
|
122
|
-
"variables": {},
|
|
123
|
-
"parameters": {},
|
|
124
|
-
"model": self.model,
|
|
125
|
-
"userId": str(uuid.uuid4()),
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
def for_stream():
|
|
129
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
130
|
-
if not response.ok:
|
|
131
|
-
raise Exception(
|
|
132
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
133
|
-
)
|
|
134
|
-
|
|
135
|
-
full_response = ""
|
|
136
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
137
|
-
if line:
|
|
138
|
-
full_response += line # No need to decode here
|
|
139
|
-
yield full_response if raw else dict(text=line)
|
|
140
|
-
self.last_response.update(dict(text=full_response))
|
|
141
|
-
self.conversation.update_chat_history(
|
|
142
|
-
prompt, self.get_message(self.last_response)
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
def for_non_stream():
|
|
146
|
-
for _ in for_stream():
|
|
147
|
-
pass
|
|
148
|
-
return self.last_response
|
|
149
|
-
|
|
150
|
-
return for_stream() if stream else for_non_stream()
|
|
151
|
-
|
|
152
|
-
def chat(
|
|
153
|
-
self,
|
|
154
|
-
prompt: str,
|
|
155
|
-
stream: bool = False,
|
|
156
|
-
optimizer: str = None,
|
|
157
|
-
conversationally: bool = False,
|
|
158
|
-
) -> str:
|
|
159
|
-
"""Generate response `str`
|
|
160
|
-
Args:
|
|
161
|
-
prompt (str): Prompt to be send.
|
|
162
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
163
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
164
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
165
|
-
Returns:
|
|
166
|
-
str: Response generated
|
|
167
|
-
"""
|
|
168
|
-
return self.get_message(
|
|
169
|
-
self.ask(
|
|
170
|
-
prompt,
|
|
171
|
-
optimizer=optimizer,
|
|
172
|
-
conversationally=conversationally,
|
|
173
|
-
)
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
def get_message(self, response: dict) -> str:
|
|
177
|
-
"""Retrieves message only from response
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
response (dict): Response generated by `self.ask`
|
|
181
|
-
|
|
182
|
-
Returns:
|
|
183
|
-
str: Message extracted
|
|
184
|
-
"""
|
|
185
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
186
|
-
return response["text"]
|
|
187
|
-
|
|
188
|
-
if __name__ == '__main__':
|
|
189
|
-
from rich import print
|
|
190
|
-
ai = PromptRefine(timeout=5000)
|
|
191
|
-
response = ai.chat("write a poem about AI", stream=True)
|
|
192
|
-
for chunk in response:
|
|
193
|
-
print(chunk, end="", flush=True)
|
webscout/Provider/tutorai.py
DELETED
|
@@ -1,270 +0,0 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import os
|
|
4
|
-
from typing import Union, List, Optional
|
|
5
|
-
from string import punctuation
|
|
6
|
-
from random import choice
|
|
7
|
-
import json
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
class TutorAI(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the TutorAI.me API.
|
|
18
|
-
"""
|
|
19
|
-
AVAILABLE_MODELS = ["gpt-4o"]
|
|
20
|
-
|
|
21
|
-
def __init__(
|
|
22
|
-
self,
|
|
23
|
-
is_conversation: bool = True,
|
|
24
|
-
max_tokens: int = 600,
|
|
25
|
-
timeout: int = 30,
|
|
26
|
-
intro: str = None,
|
|
27
|
-
filepath: str = None,
|
|
28
|
-
update_file: bool = True,
|
|
29
|
-
proxies: dict = {},
|
|
30
|
-
history_offset: int = 10250,
|
|
31
|
-
act: str = None,
|
|
32
|
-
):
|
|
33
|
-
"""
|
|
34
|
-
Initializes the TutorAI.me API with given parameters.
|
|
35
|
-
|
|
36
|
-
Args:
|
|
37
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
38
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 1024.
|
|
39
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
40
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
41
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
42
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
43
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
44
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
45
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
46
|
-
system_prompt (str, optional): System prompt for TutorAI.
|
|
47
|
-
Defaults to "You are a helpful AI assistant.".
|
|
48
|
-
"""
|
|
49
|
-
# Initialize curl_cffi Session
|
|
50
|
-
self.session = Session()
|
|
51
|
-
self.is_conversation = is_conversation
|
|
52
|
-
self.max_tokens_to_sample = max_tokens
|
|
53
|
-
self.api_endpoint = "https://ai-tutor.ai/api/generate-homeworkify-response"
|
|
54
|
-
self.stream_chunk_size = 1024
|
|
55
|
-
self.timeout = timeout
|
|
56
|
-
self.last_response = {}
|
|
57
|
-
# Remove Cookie header, curl_cffi doesn't use it directly like this
|
|
58
|
-
self.headers = {
|
|
59
|
-
"Accept": "*/*",
|
|
60
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
61
|
-
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
62
|
-
"DNT": "1",
|
|
63
|
-
"Origin": "https://tutorai.me",
|
|
64
|
-
"Priority": "u=1, i",
|
|
65
|
-
"Referer": "https://tutorai.me/homeworkify?ref=taaft&utm_source=taaft&utm_medium=referral",
|
|
66
|
-
"Sec-Ch-Ua": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
67
|
-
"Sec-Ch-Ua-Mobile": "?0",
|
|
68
|
-
"Sec-Ch-Ua-Platform": '"Windows"',
|
|
69
|
-
"Sec-Fetch-Dest": "empty",
|
|
70
|
-
"Sec-Fetch-Mode": "cors",
|
|
71
|
-
"Sec-Fetch-Site": "same-origin",
|
|
72
|
-
"User-Agent": LitAgent().random()
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
self.__available_optimizers = (
|
|
76
|
-
method
|
|
77
|
-
for method in dir(Optimizers)
|
|
78
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
79
|
-
)
|
|
80
|
-
# Update curl_cffi session headers and proxies
|
|
81
|
-
self.session.headers.update(self.headers)
|
|
82
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
83
|
-
Conversation.intro = (
|
|
84
|
-
AwesomePrompts().get_act(
|
|
85
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
86
|
-
)
|
|
87
|
-
if act
|
|
88
|
-
else intro or Conversation.intro
|
|
89
|
-
)
|
|
90
|
-
self.conversation = Conversation(
|
|
91
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
92
|
-
)
|
|
93
|
-
self.conversation.history_offset = history_offset
|
|
94
|
-
|
|
95
|
-
def ask(
|
|
96
|
-
self,
|
|
97
|
-
prompt: str,
|
|
98
|
-
stream: bool = False, # Note: API doesn't seem to truly stream text chunks
|
|
99
|
-
raw: bool = False,
|
|
100
|
-
optimizer: str = None,
|
|
101
|
-
conversationally: bool = False,
|
|
102
|
-
attachment_path: Optional[str] = None
|
|
103
|
-
) -> dict:
|
|
104
|
-
"""Chat with TutorAI
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
prompt (str): Prompt to be send.
|
|
108
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
109
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
110
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
111
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
112
|
-
attachment_path (str, optional): Path to attachment file. Defaults to None.
|
|
113
|
-
|
|
114
|
-
Returns:
|
|
115
|
-
dict : {}
|
|
116
|
-
```json
|
|
117
|
-
{
|
|
118
|
-
"text" : "How may I assist you today?"
|
|
119
|
-
}
|
|
120
|
-
```
|
|
121
|
-
"""
|
|
122
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
123
|
-
if optimizer:
|
|
124
|
-
if optimizer in self.__available_optimizers:
|
|
125
|
-
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
126
|
-
else:
|
|
127
|
-
raise Exception(
|
|
128
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
form_data = {
|
|
132
|
-
"inputMessage": conversation_prompt,
|
|
133
|
-
"attachmentsCount": "1" if attachment_path else "0"
|
|
134
|
-
}
|
|
135
|
-
files = {}
|
|
136
|
-
file_handle = None # To ensure file is closed
|
|
137
|
-
if attachment_path:
|
|
138
|
-
if not os.path.isfile(attachment_path):
|
|
139
|
-
raise FileNotFoundError(f"Error: The file '{attachment_path}' does not exist.")
|
|
140
|
-
try:
|
|
141
|
-
# Open file handle to pass to curl_cffi
|
|
142
|
-
file_handle = open(attachment_path, 'rb')
|
|
143
|
-
files["attachment0"] = (os.path.basename(attachment_path), file_handle, 'image/png') # Adjust mime type if needed
|
|
144
|
-
except Exception as e:
|
|
145
|
-
if file_handle: file_handle.close() # Close if opened
|
|
146
|
-
raise exceptions.FailedToGenerateResponseError(f"Error opening the file: {e}")
|
|
147
|
-
|
|
148
|
-
# The API doesn't seem to support streaming text chunks based on the original code.
|
|
149
|
-
# Both stream=True and stream=False resulted in processing the full response.
|
|
150
|
-
# We will implement the non-stream logic for both cases.
|
|
151
|
-
try:
|
|
152
|
-
# Use curl_cffi session post with impersonate
|
|
153
|
-
# Pass data and files for multipart/form-data
|
|
154
|
-
response = self.session.post(
|
|
155
|
-
self.api_endpoint,
|
|
156
|
-
# headers are set on the session
|
|
157
|
-
data=form_data,
|
|
158
|
-
files=files,
|
|
159
|
-
timeout=self.timeout,
|
|
160
|
-
impersonate="chrome120", # Try a different impersonation profile
|
|
161
|
-
)
|
|
162
|
-
response.raise_for_status() # Check for HTTP errors
|
|
163
|
-
|
|
164
|
-
try:
|
|
165
|
-
response_data = response.json()
|
|
166
|
-
except json.JSONDecodeError as json_err:
|
|
167
|
-
raise exceptions.FailedToGenerateResponseError(f"Error decoding JSON: {json_err} - Response text: {response.text}")
|
|
168
|
-
|
|
169
|
-
homeworkify_html = response_data.get("homeworkifyResponse", "")
|
|
170
|
-
if not homeworkify_html:
|
|
171
|
-
# Return empty if no content, consistent with original non-stream logic
|
|
172
|
-
clean_text = ""
|
|
173
|
-
else:
|
|
174
|
-
# Assuming the response is HTML that needs cleaning/parsing
|
|
175
|
-
# For now, just return the raw HTML content as text
|
|
176
|
-
clean_text = homeworkify_html
|
|
177
|
-
|
|
178
|
-
self.last_response = {"text": clean_text}
|
|
179
|
-
self.conversation.update_chat_history(prompt, clean_text)
|
|
180
|
-
return self.last_response # Return the full response content
|
|
181
|
-
|
|
182
|
-
except CurlError as e: # Catch CurlError
|
|
183
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
184
|
-
except Exception as e: # Catch other potential exceptions
|
|
185
|
-
# Include response text if available in HTTP errors
|
|
186
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
187
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}")
|
|
188
|
-
finally:
|
|
189
|
-
if file_handle: # Ensure file is closed
|
|
190
|
-
file_handle.close()
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
def chat(
|
|
194
|
-
self,
|
|
195
|
-
prompt: str,
|
|
196
|
-
stream: bool = False, # Keep stream param for interface consistency, though API might not support it
|
|
197
|
-
optimizer: str = None,
|
|
198
|
-
conversationally: bool = False,
|
|
199
|
-
attachment_path: Optional[str] = None,
|
|
200
|
-
) -> str:
|
|
201
|
-
"""Generate response `str`
|
|
202
|
-
Args:
|
|
203
|
-
prompt (str): Prompt to be send.
|
|
204
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
205
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
206
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
207
|
-
attachment_path (str, optional): Path to attachment file. Defaults to None.
|
|
208
|
-
Returns:
|
|
209
|
-
str: Response generated
|
|
210
|
-
"""
|
|
211
|
-
|
|
212
|
-
def for_stream():
|
|
213
|
-
for response in self.ask(
|
|
214
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally, attachment_path=attachment_path,
|
|
215
|
-
):
|
|
216
|
-
yield self.get_message(response)
|
|
217
|
-
|
|
218
|
-
def for_non_stream():
|
|
219
|
-
for response in self.ask(
|
|
220
|
-
prompt, False, optimizer=optimizer, conversationally=conversationally, attachment_path=attachment_path,
|
|
221
|
-
):
|
|
222
|
-
yield self.get_message(response)
|
|
223
|
-
|
|
224
|
-
return for_stream() if stream else for_non_stream()
|
|
225
|
-
|
|
226
|
-
def get_message(self, response: dict) -> str:
|
|
227
|
-
"""Retrieves message only from response
|
|
228
|
-
|
|
229
|
-
Args:
|
|
230
|
-
response (dict): Response generated by `self.ask`
|
|
231
|
-
|
|
232
|
-
Returns:
|
|
233
|
-
str: Message extracted
|
|
234
|
-
"""
|
|
235
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
236
|
-
return response["text"]
|
|
237
|
-
|
|
238
|
-
if __name__ == "__main__":
|
|
239
|
-
from rich import print
|
|
240
|
-
|
|
241
|
-
try: # Add try-except block for testing
|
|
242
|
-
ai = TutorAI(timeout=120) # Increased timeout for potential uploads
|
|
243
|
-
# Test without attachment first
|
|
244
|
-
print("[bold blue]Testing Text Prompt:[/bold blue]")
|
|
245
|
-
response_gen = ai.chat("hello buddy", stream=True) # Test stream interface
|
|
246
|
-
full_response = ""
|
|
247
|
-
for chunk in response_gen:
|
|
248
|
-
print(chunk, end="", flush=True)
|
|
249
|
-
full_response += chunk
|
|
250
|
-
print("\n[bold green]Text Test Complete.[/bold green]\n")
|
|
251
|
-
|
|
252
|
-
# Optional: Test with attachment (replace with a valid image path)
|
|
253
|
-
# attachment_file = "path/to/your/image.png"
|
|
254
|
-
# if os.path.exists(attachment_file):
|
|
255
|
-
# print(f"[bold blue]Testing with Attachment ({attachment_file}):[/bold blue]")
|
|
256
|
-
# response_gen_attach = ai.chat("Describe this image", stream=True, attachment_path=attachment_file)
|
|
257
|
-
# full_response_attach = ""
|
|
258
|
-
# for chunk in response_gen_attach:
|
|
259
|
-
# print(chunk, end="", flush=True)
|
|
260
|
-
# full_response_attach += chunk
|
|
261
|
-
# print("\n[bold green]Attachment Test Complete.[/bold green]")
|
|
262
|
-
# else:
|
|
263
|
-
# print(f"[bold yellow]Skipping attachment test: File not found at {attachment_file}[/bold yellow]")
|
|
264
|
-
|
|
265
|
-
except exceptions.FailedToGenerateResponseError as e:
|
|
266
|
-
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
267
|
-
except FileNotFoundError as e:
|
|
268
|
-
print(f"\n[bold red]File Error:[/bold red] {e}")
|
|
269
|
-
except Exception as e:
|
|
270
|
-
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|