webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +5 -25
- webscout/DWEBS.py +476 -476
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -103
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +1 -1
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/AISEARCH/__init__.py +5 -1
- webscout/Provider/AISEARCH/hika_search.py +194 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +320 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/AllenAI.py +255 -122
- webscout/Provider/DeepSeek.py +1 -2
- webscout/Provider/Deepinfra.py +296 -286
- webscout/Provider/ElectronHub.py +709 -716
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +28 -6
- webscout/Provider/Gemini.py +167 -165
- webscout/Provider/GithubChat.py +2 -1
- webscout/Provider/Groq.py +38 -24
- webscout/Provider/LambdaChat.py +2 -1
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OpenGPT.py +199 -0
- webscout/Provider/PI.py +39 -24
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/Youchat.py +326 -296
- webscout/Provider/__init__.py +10 -4
- webscout/Provider/ai4chat.py +58 -56
- webscout/Provider/akashgpt.py +34 -22
- webscout/Provider/copilot.py +427 -427
- webscout/Provider/freeaichat.py +9 -2
- webscout/Provider/labyrinth.py +121 -20
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/scira_chat.py +271 -0
- webscout/Provider/typefully.py +280 -0
- webscout/Provider/uncovr.py +312 -299
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +38 -36
- webscout/cli.py +293 -293
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/update_checker.py +14 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1346 -1282
- webscout/webscout_search_async.py +877 -813
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
- webscout/Provider/DARKAI.py +0 -225
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
webscout/Provider/PI.py
CHANGED
|
@@ -50,7 +50,7 @@ class PiAI(Provider):
|
|
|
50
50
|
):
|
|
51
51
|
"""
|
|
52
52
|
Initializes PiAI with voice support.
|
|
53
|
-
|
|
53
|
+
|
|
54
54
|
Args:
|
|
55
55
|
voice (bool): Enable/disable voice output
|
|
56
56
|
voice_name (str): Name of the voice to use (if None, uses default)
|
|
@@ -66,7 +66,9 @@ class PiAI(Provider):
|
|
|
66
66
|
|
|
67
67
|
# Initialize other attributes
|
|
68
68
|
self.scraper = cloudscraper.create_scraper()
|
|
69
|
-
self.
|
|
69
|
+
self.primary_url = 'https://pi.ai/api/chat'
|
|
70
|
+
self.fallback_url = 'https://pi.ai/api/v2/chat'
|
|
71
|
+
self.url = self.primary_url
|
|
70
72
|
self.headers = {
|
|
71
73
|
'Accept': 'text/event-stream',
|
|
72
74
|
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
@@ -115,7 +117,7 @@ class PiAI(Provider):
|
|
|
115
117
|
)
|
|
116
118
|
self.conversation.history_offset = history_offset
|
|
117
119
|
self.session.proxies = proxies
|
|
118
|
-
|
|
120
|
+
|
|
119
121
|
if self.is_conversation:
|
|
120
122
|
self.start_conversation()
|
|
121
123
|
|
|
@@ -130,13 +132,13 @@ class PiAI(Provider):
|
|
|
130
132
|
json={},
|
|
131
133
|
timeout=self.timeout
|
|
132
134
|
)
|
|
133
|
-
|
|
135
|
+
|
|
134
136
|
if not response.ok:
|
|
135
137
|
raise Exception(f"Failed to start conversation: {response.status_code}")
|
|
136
|
-
|
|
138
|
+
|
|
137
139
|
data = response.json()
|
|
138
140
|
self.conversation_id = data['conversations'][0]['sid']
|
|
139
|
-
|
|
141
|
+
|
|
140
142
|
return self.conversation_id
|
|
141
143
|
|
|
142
144
|
def ask(
|
|
@@ -152,7 +154,7 @@ class PiAI(Provider):
|
|
|
152
154
|
) -> dict:
|
|
153
155
|
"""
|
|
154
156
|
Interact with Pi.ai by sending a prompt and receiving a response.
|
|
155
|
-
|
|
157
|
+
|
|
156
158
|
Args:
|
|
157
159
|
prompt (str): The prompt to send
|
|
158
160
|
stream (bool): Whether to stream the response
|
|
@@ -186,15 +188,28 @@ class PiAI(Provider):
|
|
|
186
188
|
}
|
|
187
189
|
|
|
188
190
|
def process_stream():
|
|
191
|
+
# Try primary URL first
|
|
189
192
|
response = self.scraper.post(
|
|
190
|
-
self.url,
|
|
191
|
-
headers=self.headers,
|
|
192
|
-
cookies=self.cookies,
|
|
193
|
-
json=data,
|
|
194
|
-
stream=True,
|
|
193
|
+
self.url,
|
|
194
|
+
headers=self.headers,
|
|
195
|
+
cookies=self.cookies,
|
|
196
|
+
json=data,
|
|
197
|
+
stream=True,
|
|
195
198
|
timeout=self.timeout
|
|
196
199
|
)
|
|
197
|
-
|
|
200
|
+
|
|
201
|
+
# If primary URL fails, try fallback URL
|
|
202
|
+
if not response.ok and self.url == self.primary_url:
|
|
203
|
+
self.url = self.fallback_url
|
|
204
|
+
response = self.scraper.post(
|
|
205
|
+
self.url,
|
|
206
|
+
headers=self.headers,
|
|
207
|
+
cookies=self.cookies,
|
|
208
|
+
json=data,
|
|
209
|
+
stream=True,
|
|
210
|
+
timeout=self.timeout
|
|
211
|
+
)
|
|
212
|
+
|
|
198
213
|
if not response.ok:
|
|
199
214
|
raise Exception(f"API request failed: {response.status_code}")
|
|
200
215
|
|
|
@@ -204,7 +219,7 @@ class PiAI(Provider):
|
|
|
204
219
|
|
|
205
220
|
if voice and voice_name and second_sid:
|
|
206
221
|
threading.Thread(
|
|
207
|
-
target=self.download_audio_threaded,
|
|
222
|
+
target=self.download_audio_threaded,
|
|
208
223
|
args=(voice_name, second_sid, output_file)
|
|
209
224
|
).start()
|
|
210
225
|
|
|
@@ -245,7 +260,7 @@ class PiAI(Provider):
|
|
|
245
260
|
) -> str:
|
|
246
261
|
"""
|
|
247
262
|
Generates a response based on the provided prompt.
|
|
248
|
-
|
|
263
|
+
|
|
249
264
|
Args:
|
|
250
265
|
prompt (str): The prompt to send
|
|
251
266
|
stream (bool): Whether to stream the response
|
|
@@ -300,24 +315,24 @@ class PiAI(Provider):
|
|
|
300
315
|
'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
|
|
301
316
|
'messageSid': second_sid,
|
|
302
317
|
}
|
|
303
|
-
|
|
318
|
+
|
|
304
319
|
try:
|
|
305
320
|
audio_response = self.scraper.get(
|
|
306
|
-
'https://pi.ai/api/chat/voice',
|
|
307
|
-
params=params,
|
|
308
|
-
cookies=self.cookies,
|
|
309
|
-
headers=self.headers,
|
|
321
|
+
'https://pi.ai/api/chat/voice',
|
|
322
|
+
params=params,
|
|
323
|
+
cookies=self.cookies,
|
|
324
|
+
headers=self.headers,
|
|
310
325
|
timeout=self.timeout
|
|
311
326
|
)
|
|
312
|
-
|
|
327
|
+
|
|
313
328
|
if not audio_response.ok:
|
|
314
329
|
return
|
|
315
|
-
|
|
330
|
+
|
|
316
331
|
audio_response.raise_for_status()
|
|
317
|
-
|
|
332
|
+
|
|
318
333
|
with open(output_file, "wb") as file:
|
|
319
334
|
file.write(audio_response.content)
|
|
320
|
-
|
|
335
|
+
|
|
321
336
|
except requests.exceptions.RequestException:
|
|
322
337
|
pass
|
|
323
338
|
|
|
@@ -1,230 +1,232 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
from typing import Union, Any, Dict, Generator
|
|
4
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
5
|
-
from webscout.AIbase import Provider
|
|
6
|
-
from webscout import exceptions
|
|
7
|
-
from webscout.litagent import LitAgent as Lit
|
|
8
|
-
|
|
9
|
-
class TextPollinationsAI(Provider):
|
|
10
|
-
"""
|
|
11
|
-
A class to interact with the Pollinations AI API.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
AVAILABLE_MODELS = [
|
|
15
|
-
"openai",
|
|
16
|
-
"openai-large",
|
|
17
|
-
"openai-reasoning",
|
|
18
|
-
"qwen-coder",
|
|
19
|
-
"llama",
|
|
20
|
-
"mistral",
|
|
21
|
-
"unity",
|
|
22
|
-
"midijourney",
|
|
23
|
-
"rtist",
|
|
24
|
-
"searchgpt",
|
|
25
|
-
"evil",
|
|
26
|
-
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
"gemini",
|
|
33
|
-
"gemini-
|
|
34
|
-
"hormoz",
|
|
35
|
-
"hypnosis-tracy",
|
|
36
|
-
"
|
|
37
|
-
"
|
|
38
|
-
|
|
39
|
-
"
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
"
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
self.
|
|
64
|
-
self.
|
|
65
|
-
self.
|
|
66
|
-
self.
|
|
67
|
-
self.
|
|
68
|
-
self.
|
|
69
|
-
self.
|
|
70
|
-
|
|
71
|
-
self.
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
'
|
|
75
|
-
'
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
self.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
if line
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
content =
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
print("-" * 80)
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
print(f"\r{model:<50} {
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Union, Any, Dict, Generator
|
|
4
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
5
|
+
from webscout.AIbase import Provider
|
|
6
|
+
from webscout import exceptions
|
|
7
|
+
from webscout.litagent import LitAgent as Lit
|
|
8
|
+
|
|
9
|
+
class TextPollinationsAI(Provider):
|
|
10
|
+
"""
|
|
11
|
+
A class to interact with the Pollinations AI API.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
AVAILABLE_MODELS = [
|
|
15
|
+
"openai", # OpenAI GPT-4o-mini
|
|
16
|
+
"openai-large", # OpenAI GPT-4o
|
|
17
|
+
"openai-reasoning", # OpenAI o3-mini
|
|
18
|
+
"qwen-coder", # Qwen 2.5 Coder 32B
|
|
19
|
+
"llama", # Llama 3.3 70B
|
|
20
|
+
"mistral", # Mistral Small 3
|
|
21
|
+
"unity", # Unity Mistral Large
|
|
22
|
+
"midijourney", # Midijourney
|
|
23
|
+
"rtist", # Rtist
|
|
24
|
+
"searchgpt", # SearchGPT
|
|
25
|
+
"evil", # Evil
|
|
26
|
+
"deepseek-reasoning", # DeepSeek-R1 Distill Qwen 32B
|
|
27
|
+
"deepseek-reasoning-large",# DeepSeek R1 - Llama 70B
|
|
28
|
+
# "llamalight", # Llama 3.1 8B Instruct # >>> NOT WORKING
|
|
29
|
+
"phi", # Phi-4 Instruct
|
|
30
|
+
"llama-vision", # Llama 3.2 11B Vision
|
|
31
|
+
"pixtral", # Pixtral 12B
|
|
32
|
+
"gemini", # Gemini 2.0 Flash
|
|
33
|
+
"gemini-reasoning", # Gemini 2.0 Flash Thinking
|
|
34
|
+
"hormoz", # Hormoz 8b
|
|
35
|
+
"hypnosis-tracy", # Hypnosis Tracy 7B
|
|
36
|
+
"mistral-roblox", # Mistral Roblox on Scaleway
|
|
37
|
+
"roblox-rp", # Roblox Roleplay Assistant
|
|
38
|
+
"deepseek", # DeepSeek-V3
|
|
39
|
+
"qwen-reasoning", # Qwen QWQ 32B - Advanced Reasoning
|
|
40
|
+
"sur", # Sur AI Assistant (Mistral)
|
|
41
|
+
"llama-scaleway", # Llama (Scaleway)
|
|
42
|
+
"openai-audio", # OpenAI GPT-4o-audio-preview
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
is_conversation: bool = True,
|
|
48
|
+
max_tokens: int = 8096,
|
|
49
|
+
timeout: int = 30,
|
|
50
|
+
intro: str = None,
|
|
51
|
+
filepath: str = None,
|
|
52
|
+
update_file: bool = True,
|
|
53
|
+
proxies: dict = {},
|
|
54
|
+
history_offset: int = 10250,
|
|
55
|
+
act: str = None,
|
|
56
|
+
model: str = "openai-large",
|
|
57
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
58
|
+
):
|
|
59
|
+
"""Initializes the TextPollinationsAI API client."""
|
|
60
|
+
if model not in self.AVAILABLE_MODELS:
|
|
61
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
62
|
+
|
|
63
|
+
self.session = requests.Session()
|
|
64
|
+
self.is_conversation = is_conversation
|
|
65
|
+
self.max_tokens_to_sample = max_tokens
|
|
66
|
+
self.api_endpoint = "https://text.pollinations.ai/openai"
|
|
67
|
+
self.stream_chunk_size = 64
|
|
68
|
+
self.timeout = timeout
|
|
69
|
+
self.last_response = {}
|
|
70
|
+
self.model = model
|
|
71
|
+
self.system_prompt = system_prompt
|
|
72
|
+
|
|
73
|
+
self.headers = {
|
|
74
|
+
'Accept': '*/*',
|
|
75
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
76
|
+
'User-Agent': Lit().random(),
|
|
77
|
+
'Content-Type': 'application/json',
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
self.session.headers.update(self.headers)
|
|
81
|
+
self.session.proxies = proxies
|
|
82
|
+
|
|
83
|
+
self.__available_optimizers = (
|
|
84
|
+
method for method in dir(Optimizers)
|
|
85
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
Conversation.intro = (
|
|
89
|
+
AwesomePrompts().get_act(
|
|
90
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
91
|
+
)
|
|
92
|
+
if act
|
|
93
|
+
else intro or Conversation.intro
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
self.conversation = Conversation(
|
|
97
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
|
+
)
|
|
99
|
+
self.conversation.history_offset = history_offset
|
|
100
|
+
|
|
101
|
+
def ask(
|
|
102
|
+
self,
|
|
103
|
+
prompt: str,
|
|
104
|
+
stream: bool = False,
|
|
105
|
+
raw: bool = False,
|
|
106
|
+
optimizer: str = None,
|
|
107
|
+
conversationally: bool = False,
|
|
108
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
109
|
+
"""Chat with AI"""
|
|
110
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
111
|
+
if optimizer:
|
|
112
|
+
if optimizer in self.__available_optimizers:
|
|
113
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
114
|
+
conversation_prompt if conversationally else prompt
|
|
115
|
+
)
|
|
116
|
+
else:
|
|
117
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
118
|
+
|
|
119
|
+
payload = {
|
|
120
|
+
"messages": [
|
|
121
|
+
{"role": "system", "content": self.system_prompt},
|
|
122
|
+
{"role": "user", "content": conversation_prompt}
|
|
123
|
+
],
|
|
124
|
+
"model": self.model,
|
|
125
|
+
"stream": stream,
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
def for_stream():
|
|
129
|
+
response = self.session.post(
|
|
130
|
+
self.api_endpoint,
|
|
131
|
+
headers=self.headers,
|
|
132
|
+
json=payload,
|
|
133
|
+
stream=True,
|
|
134
|
+
timeout=self.timeout
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if not response.ok:
|
|
138
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
139
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
full_response = ""
|
|
143
|
+
for line in response.iter_lines():
|
|
144
|
+
if line:
|
|
145
|
+
line = line.decode('utf-8').strip()
|
|
146
|
+
if line == "data: [DONE]":
|
|
147
|
+
break
|
|
148
|
+
if line.startswith('data: '):
|
|
149
|
+
try:
|
|
150
|
+
json_data = json.loads(line[6:])
|
|
151
|
+
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
152
|
+
choice = json_data['choices'][0]
|
|
153
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
154
|
+
content = choice['delta']['content']
|
|
155
|
+
else:
|
|
156
|
+
content = ""
|
|
157
|
+
full_response += content
|
|
158
|
+
yield content if raw else dict(text=content)
|
|
159
|
+
except json.JSONDecodeError:
|
|
160
|
+
continue
|
|
161
|
+
|
|
162
|
+
self.last_response.update(dict(text=full_response))
|
|
163
|
+
self.conversation.update_chat_history(
|
|
164
|
+
prompt, self.get_message(self.last_response)
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
def for_non_stream():
|
|
168
|
+
for _ in for_stream():
|
|
169
|
+
pass
|
|
170
|
+
return self.last_response
|
|
171
|
+
|
|
172
|
+
return for_stream() if stream else for_non_stream()
|
|
173
|
+
|
|
174
|
+
def chat(
|
|
175
|
+
self,
|
|
176
|
+
prompt: str,
|
|
177
|
+
stream: bool = False,
|
|
178
|
+
optimizer: str = None,
|
|
179
|
+
conversationally: bool = False,
|
|
180
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
181
|
+
"""Generate response as a string"""
|
|
182
|
+
def for_stream():
|
|
183
|
+
for response in self.ask(
|
|
184
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
185
|
+
):
|
|
186
|
+
yield self.get_message(response)
|
|
187
|
+
|
|
188
|
+
def for_non_stream():
|
|
189
|
+
return self.get_message(
|
|
190
|
+
self.ask(
|
|
191
|
+
prompt,
|
|
192
|
+
False,
|
|
193
|
+
optimizer=optimizer,
|
|
194
|
+
conversationally=conversationally,
|
|
195
|
+
)
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
return for_stream() if stream else for_non_stream()
|
|
199
|
+
|
|
200
|
+
def get_message(self, response: dict) -> str:
|
|
201
|
+
"""Retrieves message only from response"""
|
|
202
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
203
|
+
return response["text"]
|
|
204
|
+
|
|
205
|
+
if __name__ == "__main__":
|
|
206
|
+
print("-" * 80)
|
|
207
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
208
|
+
print("-" * 80)
|
|
209
|
+
|
|
210
|
+
# Test all available models
|
|
211
|
+
working = 0
|
|
212
|
+
total = len(TextPollinationsAI.AVAILABLE_MODELS)
|
|
213
|
+
|
|
214
|
+
for model in TextPollinationsAI.AVAILABLE_MODELS:
|
|
215
|
+
try:
|
|
216
|
+
test_ai = TextPollinationsAI(model=model, timeout=60)
|
|
217
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
218
|
+
response_text = ""
|
|
219
|
+
for chunk in response:
|
|
220
|
+
response_text += chunk
|
|
221
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
222
|
+
|
|
223
|
+
if response_text and len(response_text.strip()) > 0:
|
|
224
|
+
status = "✓"
|
|
225
|
+
# Truncate response if too long
|
|
226
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
227
|
+
else:
|
|
228
|
+
status = "✗"
|
|
229
|
+
display_text = "Empty or invalid response"
|
|
230
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
231
|
+
except Exception as e:
|
|
232
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|