webscout 7.0__py3-none-any.whl → 7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/LLM.py +1 -1
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/Cloudflare.py +92 -78
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +26 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +65 -10
- webscout/Provider/Netwrck.py +61 -49
- webscout/Provider/PI.py +77 -122
- webscout/Provider/PizzaGPT.py +129 -82
- webscout/Provider/TextPollinationsAI.py +75 -47
- webscout/Provider/__init__.py +1 -3
- webscout/Provider/dgaf.py +68 -39
- webscout/Provider/gaurish.py +106 -66
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +61 -35
- webscout/Provider/meta.py +6 -6
- webscout/Provider/multichat.py +205 -104
- webscout/Provider/typegpt.py +26 -23
- webscout/Provider/yep.py +3 -3
- webscout/version.py +1 -1
- webscout/webscout_search.py +1141 -1140
- webscout/webscout_search_async.py +635 -635
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/METADATA +18 -26
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/RECORD +31 -32
- webscout/Provider/RUBIKSAI.py +0 -272
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
|
@@ -1,41 +1,28 @@
|
|
|
1
|
+
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
4
|
from typing import Any, Dict, Generator
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
5
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
8
6
|
from webscout.AIbase import Provider
|
|
9
7
|
from webscout import exceptions
|
|
10
|
-
|
|
11
|
-
|
|
8
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
9
|
+
from webscout import LitAgent as Lit
|
|
12
10
|
class TextPollinationsAI(Provider):
|
|
13
11
|
"""
|
|
14
|
-
A class to interact with the Pollinations AI API.
|
|
12
|
+
A class to interact with the Pollinations AI API with comprehensive logging.
|
|
15
13
|
"""
|
|
16
14
|
|
|
17
15
|
AVAILABLE_MODELS = [
|
|
18
|
-
"openai",
|
|
19
|
-
"
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"llama",
|
|
23
|
-
"mistral",
|
|
24
|
-
"unity",
|
|
25
|
-
"midijourney",
|
|
26
|
-
"rtist",
|
|
27
|
-
"searchgpt",
|
|
28
|
-
"evil",
|
|
29
|
-
"deepseek",
|
|
30
|
-
"claude-hybridspace",
|
|
31
|
-
"deepseek-r1",
|
|
32
|
-
"llamalight"
|
|
16
|
+
"openai", "openai-large", "qwen", "qwen-coder", "llama", "mistral",
|
|
17
|
+
"unity", "midijourney", "rtist", "searchgpt", "evil", "deepseek",
|
|
18
|
+
"claude-hybridspace", "deepseek-r1", "llamalight", "llamaguard",
|
|
19
|
+
"gemini", "gemini-thinking", "hormoz"
|
|
33
20
|
]
|
|
34
21
|
|
|
35
22
|
def __init__(
|
|
36
23
|
self,
|
|
37
24
|
is_conversation: bool = True,
|
|
38
|
-
max_tokens: int =
|
|
25
|
+
max_tokens: int = 8096,
|
|
39
26
|
timeout: int = 30,
|
|
40
27
|
intro: str = None,
|
|
41
28
|
filepath: str = None,
|
|
@@ -43,13 +30,23 @@ class TextPollinationsAI(Provider):
|
|
|
43
30
|
proxies: dict = {},
|
|
44
31
|
history_offset: int = 10250,
|
|
45
32
|
act: str = None,
|
|
46
|
-
model: str = "openai",
|
|
33
|
+
model: str = "openai-large",
|
|
47
34
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
35
|
+
logging: bool = False
|
|
48
36
|
):
|
|
49
|
-
"""Initializes the TextPollinationsAI API client."""
|
|
37
|
+
"""Initializes the TextPollinationsAI API client with logging capabilities."""
|
|
50
38
|
if model not in self.AVAILABLE_MODELS:
|
|
51
39
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
52
40
|
|
|
41
|
+
self.logger = LitLogger(
|
|
42
|
+
name="TextPollinationsAI",
|
|
43
|
+
format=LogFormat.MODERN_EMOJI,
|
|
44
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
45
|
+
) if logging else None
|
|
46
|
+
|
|
47
|
+
if self.logger:
|
|
48
|
+
self.logger.info(f"Initializing TextPollinationsAI with model: {model}")
|
|
49
|
+
|
|
53
50
|
self.session = requests.Session()
|
|
54
51
|
self.is_conversation = is_conversation
|
|
55
52
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -59,20 +56,22 @@ class TextPollinationsAI(Provider):
|
|
|
59
56
|
self.last_response = {}
|
|
60
57
|
self.model = model
|
|
61
58
|
self.system_prompt = system_prompt
|
|
59
|
+
|
|
62
60
|
self.headers = {
|
|
63
61
|
'Accept': '*/*',
|
|
64
62
|
'Accept-Language': 'en-US,en;q=0.9',
|
|
65
|
-
'User-Agent':
|
|
63
|
+
'User-Agent': Lit().random(),
|
|
66
64
|
'Content-Type': 'application/json',
|
|
67
65
|
}
|
|
66
|
+
|
|
68
67
|
self.session.headers.update(self.headers)
|
|
69
68
|
self.session.proxies = proxies
|
|
70
69
|
|
|
71
70
|
self.__available_optimizers = (
|
|
72
|
-
method
|
|
73
|
-
for method in dir(Optimizers)
|
|
71
|
+
method for method in dir(Optimizers)
|
|
74
72
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
75
73
|
)
|
|
74
|
+
|
|
76
75
|
Conversation.intro = (
|
|
77
76
|
AwesomePrompts().get_act(
|
|
78
77
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -80,11 +79,15 @@ class TextPollinationsAI(Provider):
|
|
|
80
79
|
if act
|
|
81
80
|
else intro or Conversation.intro
|
|
82
81
|
)
|
|
82
|
+
|
|
83
83
|
self.conversation = Conversation(
|
|
84
84
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
85
|
)
|
|
86
86
|
self.conversation.history_offset = history_offset
|
|
87
87
|
|
|
88
|
+
if self.logger:
|
|
89
|
+
self.logger.info("TextPollinationsAI initialized successfully")
|
|
90
|
+
|
|
88
91
|
def ask(
|
|
89
92
|
self,
|
|
90
93
|
prompt: str,
|
|
@@ -93,26 +96,23 @@ class TextPollinationsAI(Provider):
|
|
|
93
96
|
optimizer: str = None,
|
|
94
97
|
conversationally: bool = False,
|
|
95
98
|
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
96
|
-
"""Chat with AI
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
stream
|
|
100
|
-
|
|
101
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
-
Returns:
|
|
104
|
-
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
105
|
-
"""
|
|
99
|
+
"""Chat with AI with logging capabilities"""
|
|
100
|
+
if self.logger:
|
|
101
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
102
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
103
|
+
|
|
106
104
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
107
105
|
if optimizer:
|
|
108
106
|
if optimizer in self.__available_optimizers:
|
|
109
107
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
110
108
|
conversation_prompt if conversationally else prompt
|
|
111
109
|
)
|
|
110
|
+
if self.logger:
|
|
111
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
112
112
|
else:
|
|
113
|
-
|
|
114
|
-
f"
|
|
115
|
-
)
|
|
113
|
+
if self.logger:
|
|
114
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
115
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
116
116
|
|
|
117
117
|
payload = {
|
|
118
118
|
"messages": [
|
|
@@ -124,26 +124,40 @@ class TextPollinationsAI(Provider):
|
|
|
124
124
|
}
|
|
125
125
|
|
|
126
126
|
def for_stream():
|
|
127
|
+
if self.logger:
|
|
128
|
+
self.logger.debug("Initiating streaming request to API")
|
|
129
|
+
|
|
127
130
|
response = self.session.post(
|
|
128
|
-
self.api_endpoint,
|
|
131
|
+
self.api_endpoint,
|
|
132
|
+
headers=self.headers,
|
|
133
|
+
json=payload,
|
|
134
|
+
stream=True,
|
|
135
|
+
timeout=self.timeout
|
|
129
136
|
)
|
|
137
|
+
|
|
130
138
|
if not response.ok:
|
|
139
|
+
if self.logger:
|
|
140
|
+
self.logger.error(f"API request failed. Status: {response.status_code}, Reason: {response.reason}")
|
|
131
141
|
raise exceptions.FailedToGenerateResponseError(
|
|
132
142
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
133
143
|
)
|
|
144
|
+
|
|
145
|
+
if self.logger:
|
|
146
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
147
|
+
|
|
134
148
|
full_response = ""
|
|
135
149
|
for line in response.iter_lines():
|
|
136
150
|
if line:
|
|
137
151
|
line = line.decode('utf-8').strip()
|
|
138
|
-
# Break if the stream signals completion
|
|
139
152
|
if line == "data: [DONE]":
|
|
153
|
+
if self.logger:
|
|
154
|
+
self.logger.debug("Stream completed")
|
|
140
155
|
break
|
|
141
156
|
if line.startswith('data: '):
|
|
142
157
|
try:
|
|
143
158
|
json_data = json.loads(line[6:])
|
|
144
159
|
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
145
160
|
choice = json_data['choices'][0]
|
|
146
|
-
# Handle delta responses from streaming output
|
|
147
161
|
if 'delta' in choice and 'content' in choice['delta']:
|
|
148
162
|
content = choice['delta']['content']
|
|
149
163
|
else:
|
|
@@ -151,13 +165,21 @@ class TextPollinationsAI(Provider):
|
|
|
151
165
|
full_response += content
|
|
152
166
|
yield content if raw else dict(text=content)
|
|
153
167
|
except json.JSONDecodeError as e:
|
|
154
|
-
|
|
168
|
+
if self.logger:
|
|
169
|
+
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
170
|
+
continue
|
|
171
|
+
|
|
155
172
|
self.last_response.update(dict(text=full_response))
|
|
156
173
|
self.conversation.update_chat_history(
|
|
157
174
|
prompt, self.get_message(self.last_response)
|
|
158
175
|
)
|
|
159
176
|
|
|
177
|
+
if self.logger:
|
|
178
|
+
self.logger.debug("Response processing completed")
|
|
179
|
+
|
|
160
180
|
def for_non_stream():
|
|
181
|
+
if self.logger:
|
|
182
|
+
self.logger.debug("Processing non-streaming request")
|
|
161
183
|
for _ in for_stream():
|
|
162
184
|
pass
|
|
163
185
|
return self.last_response
|
|
@@ -171,12 +193,16 @@ class TextPollinationsAI(Provider):
|
|
|
171
193
|
optimizer: str = None,
|
|
172
194
|
conversationally: bool = False,
|
|
173
195
|
) -> str | Generator[str, None, None]:
|
|
174
|
-
"""Generate response as a string"""
|
|
196
|
+
"""Generate response as a string with logging"""
|
|
197
|
+
if self.logger:
|
|
198
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
199
|
+
|
|
175
200
|
def for_stream():
|
|
176
201
|
for response in self.ask(
|
|
177
202
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
178
203
|
):
|
|
179
204
|
yield self.get_message(response)
|
|
205
|
+
|
|
180
206
|
def for_non_stream():
|
|
181
207
|
return self.get_message(
|
|
182
208
|
self.ask(
|
|
@@ -186,6 +212,7 @@ class TextPollinationsAI(Provider):
|
|
|
186
212
|
conversationally=conversationally,
|
|
187
213
|
)
|
|
188
214
|
)
|
|
215
|
+
|
|
189
216
|
return for_stream() if stream else for_non_stream()
|
|
190
217
|
|
|
191
218
|
def get_message(self, response: dict) -> str:
|
|
@@ -195,7 +222,8 @@ class TextPollinationsAI(Provider):
|
|
|
195
222
|
|
|
196
223
|
if __name__ == "__main__":
|
|
197
224
|
from rich import print
|
|
198
|
-
|
|
225
|
+
# Enable logging for testing
|
|
226
|
+
ai = TextPollinationsAI(model="deepseek-r1", logging=True)
|
|
199
227
|
response = ai.chat(input(">>> "), stream=True)
|
|
200
228
|
for chunk in response:
|
|
201
229
|
print(chunk, end="", flush=True)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -22,7 +22,6 @@ from .PizzaGPT import *
|
|
|
22
22
|
from .Llama3 import *
|
|
23
23
|
from .DARKAI import *
|
|
24
24
|
from .koala import *
|
|
25
|
-
from .RUBIKSAI import *
|
|
26
25
|
from .meta import *
|
|
27
26
|
from .DiscordRocks import *
|
|
28
27
|
from .julius import *
|
|
@@ -87,10 +86,9 @@ __all__ = [
|
|
|
87
86
|
'OLLAMA',
|
|
88
87
|
'AndiSearch',
|
|
89
88
|
'PIZZAGPT',
|
|
90
|
-
'
|
|
89
|
+
'Sambanova',
|
|
91
90
|
'DARKAI',
|
|
92
91
|
'KOALA',
|
|
93
|
-
'RUBIKSAI',
|
|
94
92
|
'Meta',
|
|
95
93
|
'AskMyAI',
|
|
96
94
|
'DiscordRocks',
|
webscout/Provider/dgaf.py
CHANGED
|
@@ -8,10 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts
|
|
9
9
|
from webscout.AIbase import Provider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
11
13
|
|
|
12
14
|
class DGAFAI(Provider):
|
|
13
15
|
"""
|
|
14
|
-
A class to interact with the DGAF.ai API.
|
|
16
|
+
A class to interact with the DGAF.ai API with logging capabilities.
|
|
15
17
|
"""
|
|
16
18
|
|
|
17
19
|
def __init__(
|
|
@@ -25,10 +27,10 @@ class DGAFAI(Provider):
|
|
|
25
27
|
proxies: dict = {},
|
|
26
28
|
history_offset: int = 10250,
|
|
27
29
|
act: str = None,
|
|
28
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
29
|
-
|
|
30
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
31
|
+
logging: bool = False
|
|
30
32
|
):
|
|
31
|
-
"""Initializes the DGAFAI API client."""
|
|
33
|
+
"""Initializes the DGAFAI API client with logging support."""
|
|
32
34
|
self.session = requests.Session()
|
|
33
35
|
self.is_conversation = is_conversation
|
|
34
36
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -37,6 +39,7 @@ class DGAFAI(Provider):
|
|
|
37
39
|
self.timeout = timeout
|
|
38
40
|
self.last_response = {}
|
|
39
41
|
self.system_prompt = system_prompt
|
|
42
|
+
|
|
40
43
|
self.headers = {
|
|
41
44
|
"accept": "*/*",
|
|
42
45
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
@@ -52,17 +55,13 @@ class DGAFAI(Provider):
|
|
|
52
55
|
"sec-fetch-dest": "empty",
|
|
53
56
|
"sec-fetch-mode": "cors",
|
|
54
57
|
"sec-fetch-site": "same-origin",
|
|
55
|
-
"user-agent": (
|
|
56
|
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
57
|
-
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
58
|
-
"Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0"
|
|
59
|
-
),
|
|
58
|
+
"user-agent": LitAgent().random(),
|
|
60
59
|
}
|
|
61
60
|
self.session.headers.update(self.headers)
|
|
62
61
|
self.session.proxies = proxies
|
|
62
|
+
|
|
63
63
|
self.__available_optimizers = (
|
|
64
|
-
method
|
|
65
|
-
for method in dir(Optimizers)
|
|
64
|
+
method for method in dir(Optimizers)
|
|
66
65
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
67
66
|
)
|
|
68
67
|
Conversation.intro = (
|
|
@@ -77,6 +76,16 @@ class DGAFAI(Provider):
|
|
|
77
76
|
)
|
|
78
77
|
self.conversation.history_offset = history_offset
|
|
79
78
|
|
|
79
|
+
# Initialize logger if enabled
|
|
80
|
+
self.logger = LitLogger(
|
|
81
|
+
name="DGAFAI",
|
|
82
|
+
format=LogFormat.MODERN_EMOJI,
|
|
83
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
84
|
+
) if logging else None
|
|
85
|
+
|
|
86
|
+
if self.logger:
|
|
87
|
+
self.logger.info("DGAFAI initialized successfully")
|
|
88
|
+
|
|
80
89
|
def ask(
|
|
81
90
|
self,
|
|
82
91
|
prompt: str,
|
|
@@ -85,39 +94,49 @@ class DGAFAI(Provider):
|
|
|
85
94
|
optimizer: str = None,
|
|
86
95
|
conversationally: bool = False,
|
|
87
96
|
) -> Dict[str, Any] | Generator[str, None, None]:
|
|
88
|
-
"""Chat with AI
|
|
97
|
+
"""Chat with AI.
|
|
98
|
+
|
|
89
99
|
Args:
|
|
90
|
-
prompt (str): Prompt to be
|
|
100
|
+
prompt (str): Prompt to be sent.
|
|
91
101
|
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
92
|
-
raw (bool, optional):
|
|
93
|
-
optimizer (str, optional): Prompt optimizer name
|
|
102
|
+
raw (bool, optional): Return raw streaming response as received. Defaults to False.
|
|
103
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
94
104
|
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
95
105
|
Returns:
|
|
96
|
-
Union[Dict, Generator[Dict, None, None]]:
|
|
106
|
+
Union[Dict, Generator[Dict, None, None]]: Generated response.
|
|
97
107
|
"""
|
|
108
|
+
if self.logger:
|
|
109
|
+
self.logger.debug(f"Processing ask call with prompt: {prompt[:50]}...")
|
|
98
110
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
99
111
|
if optimizer:
|
|
100
112
|
if optimizer in self.__available_optimizers:
|
|
101
113
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
102
114
|
conversation_prompt if conversationally else prompt
|
|
103
115
|
)
|
|
116
|
+
if self.logger:
|
|
117
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
104
118
|
else:
|
|
119
|
+
if self.logger:
|
|
120
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
105
121
|
raise Exception(
|
|
106
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
122
|
+
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
107
123
|
)
|
|
108
124
|
|
|
109
125
|
payload = {
|
|
110
126
|
"messages": [
|
|
111
|
-
|
|
127
|
+
{"role": "system", "content": self.system_prompt},
|
|
112
128
|
{"role": "user", "content": conversation_prompt}
|
|
113
129
|
]
|
|
114
130
|
}
|
|
115
131
|
|
|
116
132
|
def for_stream():
|
|
133
|
+
if self.logger:
|
|
134
|
+
self.logger.debug("Sending streaming request to DGAF.ai API...")
|
|
117
135
|
try:
|
|
118
136
|
with self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
119
|
-
response.raise_for_status()
|
|
120
|
-
|
|
137
|
+
response.raise_for_status() # Check for HTTP errors
|
|
138
|
+
if self.logger:
|
|
139
|
+
self.logger.debug(response.text)
|
|
121
140
|
streaming_text = ""
|
|
122
141
|
for line in response.iter_lines(decode_unicode=True):
|
|
123
142
|
if line:
|
|
@@ -126,14 +145,18 @@ class DGAFAI(Provider):
|
|
|
126
145
|
content = match.group(1)
|
|
127
146
|
if content:
|
|
128
147
|
streaming_text += content
|
|
148
|
+
# if self.logger:
|
|
149
|
+
# self.logger.debug(f"Received content: {content[:30]}...")
|
|
129
150
|
yield content if raw else dict(text=content)
|
|
130
|
-
|
|
131
151
|
self.last_response.update(dict(text=streaming_text))
|
|
132
152
|
self.conversation.update_chat_history(
|
|
133
153
|
prompt, self.get_message(self.last_response)
|
|
134
154
|
)
|
|
135
|
-
|
|
155
|
+
if self.logger:
|
|
156
|
+
self.logger.info("Streaming response completed successfully")
|
|
136
157
|
except requests.exceptions.RequestException as e:
|
|
158
|
+
if self.logger:
|
|
159
|
+
self.logger.error(f"Request error: {e}")
|
|
137
160
|
raise exceptions.ProviderConnectionError(f"Request failed: {e}")
|
|
138
161
|
|
|
139
162
|
def for_non_stream():
|
|
@@ -151,36 +174,42 @@ class DGAFAI(Provider):
|
|
|
151
174
|
optimizer: str = None,
|
|
152
175
|
conversationally: bool = False,
|
|
153
176
|
) -> str | Generator[str, None, None]:
|
|
154
|
-
"""Generate response
|
|
177
|
+
"""Generate chat response as a string.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
prompt (str): Prompt to be sent.
|
|
181
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
182
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
183
|
+
conversationally (bool, optional): Use conversational mode when using optimizer. Defaults to False.
|
|
184
|
+
Returns:
|
|
185
|
+
str or Generator[str, None, None]: Generated response.
|
|
186
|
+
"""
|
|
187
|
+
if self.logger:
|
|
188
|
+
self.logger.debug(f"Chat method invoked with prompt: {prompt[:50]}...")
|
|
155
189
|
def for_stream():
|
|
156
|
-
for response in self.ask(
|
|
157
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
158
|
-
):
|
|
190
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
159
191
|
yield self.get_message(response)
|
|
160
192
|
def for_non_stream():
|
|
161
193
|
return self.get_message(
|
|
162
|
-
self.ask(
|
|
163
|
-
prompt,
|
|
164
|
-
False,
|
|
165
|
-
optimizer=optimizer,
|
|
166
|
-
conversationally=conversationally,
|
|
167
|
-
)
|
|
194
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
168
195
|
)
|
|
169
196
|
return for_stream() if stream else for_non_stream()
|
|
170
197
|
|
|
171
198
|
def get_message(self, response: dict) -> str:
|
|
172
|
-
"""Retrieves message only from response
|
|
199
|
+
"""Retrieves message only from response.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
response (dict): Response from the ask method.
|
|
203
|
+
Returns:
|
|
204
|
+
str: Extracted message.
|
|
205
|
+
"""
|
|
173
206
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
174
207
|
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
175
208
|
|
|
176
|
-
# @staticmethod
|
|
177
|
-
# def clean_content(text: str) -> str:
|
|
178
|
-
# cleaned_text = re.sub(r'\[REF\]\(https?://[^\s]*\)', '', text)
|
|
179
|
-
# return cleaned_text
|
|
180
209
|
|
|
181
210
|
if __name__ == "__main__":
|
|
182
211
|
from rich import print
|
|
183
|
-
ai = DGAFAI()
|
|
212
|
+
ai = DGAFAI(logging=False)
|
|
184
213
|
response = ai.chat("write a poem about AI", stream=True)
|
|
185
214
|
for chunk in response:
|
|
186
|
-
print(chunk, end="", flush=True)
|
|
215
|
+
print(chunk, end="", flush=True)
|