webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/Provider/typegpt.py
CHANGED
|
@@ -1,357 +1,360 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
from typing import *
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import
|
|
7
|
-
from webscout.AIutel import
|
|
8
|
-
from webscout.
|
|
9
|
-
from webscout import
|
|
10
|
-
from webscout
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
"gpt-
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
"
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
"claude
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
"@cf/meta/llama-
|
|
38
|
-
"@cf/meta
|
|
39
|
-
"llama-3
|
|
40
|
-
"llama-3.1-
|
|
41
|
-
"llama-
|
|
42
|
-
"
|
|
43
|
-
"
|
|
44
|
-
"
|
|
45
|
-
"meta-llama/Llama-
|
|
46
|
-
"meta-llama/Llama-3.
|
|
47
|
-
"meta-llama/Llama-3.
|
|
48
|
-
"meta-llama/Llama-3.2-
|
|
49
|
-
"meta-llama/Llama-
|
|
50
|
-
"meta-llama/
|
|
51
|
-
"meta-llama/
|
|
52
|
-
"meta-llama/
|
|
53
|
-
"meta-llama/Meta-Llama-3
|
|
54
|
-
"meta-llama/Meta-Llama-3
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
"
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
"
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"mistralai/
|
|
65
|
-
"mistralai/
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
"
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
"@cf/qwen/qwen1.5-
|
|
72
|
-
"
|
|
73
|
-
"
|
|
74
|
-
"
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
"
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
"google/gemma-
|
|
81
|
-
"google/gemma-
|
|
82
|
-
"
|
|
83
|
-
"
|
|
84
|
-
"
|
|
85
|
-
"gemini-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
"
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
"
|
|
92
|
-
"
|
|
93
|
-
"command
|
|
94
|
-
"command-
|
|
95
|
-
"command-
|
|
96
|
-
"command-
|
|
97
|
-
"command-r
|
|
98
|
-
"
|
|
99
|
-
"
|
|
100
|
-
"
|
|
101
|
-
"rerank-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
"
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
"microsoft/
|
|
108
|
-
"microsoft/
|
|
109
|
-
"microsoft/
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
"
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
"
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
"@cf/
|
|
120
|
-
"@cf/
|
|
121
|
-
"@cf/
|
|
122
|
-
"@cf/
|
|
123
|
-
"@
|
|
124
|
-
"@
|
|
125
|
-
"@
|
|
126
|
-
"@hf/
|
|
127
|
-
"@hf/
|
|
128
|
-
"@hf/thebloke/
|
|
129
|
-
"@hf/thebloke/
|
|
130
|
-
"@hf/thebloke/
|
|
131
|
-
"@hf/thebloke/
|
|
132
|
-
"
|
|
133
|
-
"
|
|
134
|
-
"
|
|
135
|
-
"
|
|
136
|
-
"
|
|
137
|
-
"
|
|
138
|
-
"
|
|
139
|
-
"
|
|
140
|
-
"
|
|
141
|
-
"
|
|
142
|
-
"
|
|
143
|
-
"
|
|
144
|
-
"
|
|
145
|
-
"
|
|
146
|
-
"
|
|
147
|
-
"
|
|
148
|
-
"
|
|
149
|
-
"
|
|
150
|
-
"
|
|
151
|
-
"
|
|
152
|
-
"
|
|
153
|
-
"
|
|
154
|
-
"
|
|
155
|
-
"
|
|
156
|
-
"
|
|
157
|
-
"
|
|
158
|
-
"
|
|
159
|
-
"
|
|
160
|
-
"
|
|
161
|
-
"
|
|
162
|
-
"
|
|
163
|
-
"
|
|
164
|
-
"
|
|
165
|
-
"
|
|
166
|
-
"
|
|
167
|
-
"
|
|
168
|
-
"
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"
|
|
172
|
-
"
|
|
173
|
-
"
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
self.
|
|
202
|
-
self.
|
|
203
|
-
self.
|
|
204
|
-
self.
|
|
205
|
-
self.
|
|
206
|
-
self.
|
|
207
|
-
self.
|
|
208
|
-
self.
|
|
209
|
-
self.
|
|
210
|
-
self.
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
"
|
|
215
|
-
"
|
|
216
|
-
"
|
|
217
|
-
"
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
)
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
)
|
|
236
|
-
self.conversation.history_offset = history_offset
|
|
237
|
-
self.session.proxies = proxies
|
|
238
|
-
|
|
239
|
-
def ask(
|
|
240
|
-
self,
|
|
241
|
-
prompt: str,
|
|
242
|
-
stream: bool = False,
|
|
243
|
-
raw: bool = False,
|
|
244
|
-
optimizer: str = None,
|
|
245
|
-
conversationally: bool = False,
|
|
246
|
-
) -> Dict[str, Any] | Generator:
|
|
247
|
-
"""Sends a prompt to the TypeGPT.net API and returns the response."""
|
|
248
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
249
|
-
if optimizer:
|
|
250
|
-
if optimizer in self.__available_optimizers:
|
|
251
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
252
|
-
conversation_prompt if conversationally else prompt
|
|
253
|
-
)
|
|
254
|
-
else:
|
|
255
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
256
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
257
|
-
)
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
{"role": "
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
"
|
|
266
|
-
"
|
|
267
|
-
"
|
|
268
|
-
"
|
|
269
|
-
"
|
|
270
|
-
"
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
def for_stream():
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
return
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import *
|
|
4
|
+
import requests.exceptions
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
class TypeGPT(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the TypeGPT.net API. Improved to match webscout standards.
|
|
16
|
+
"""
|
|
17
|
+
url = "https://chat.typegpt.net"
|
|
18
|
+
working = True
|
|
19
|
+
supports_message_history = True
|
|
20
|
+
|
|
21
|
+
models = [
|
|
22
|
+
# OpenAI Models
|
|
23
|
+
"gpt-3.5-turbo",
|
|
24
|
+
"chatgpt-4o-latest",
|
|
25
|
+
"gpt-3.5-turbo-202201",
|
|
26
|
+
"gpt-4o",
|
|
27
|
+
"gpt-4o-2024-05-13",
|
|
28
|
+
"o1-preview",
|
|
29
|
+
|
|
30
|
+
# Claude Models
|
|
31
|
+
"claude",
|
|
32
|
+
"claude-3-5-sonnet",
|
|
33
|
+
"claude-sonnet-3.5",
|
|
34
|
+
"claude-3-5-sonnet-20240620",
|
|
35
|
+
|
|
36
|
+
# Meta/LLaMA Models
|
|
37
|
+
"@cf/meta/llama-2-7b-chat-fp16",
|
|
38
|
+
"@cf/meta/llama-2-7b-chat-int8",
|
|
39
|
+
"@cf/meta/llama-3-8b-instruct",
|
|
40
|
+
"@cf/meta/llama-3.1-8b-instruct",
|
|
41
|
+
"@cf/meta-llama/llama-2-7b-chat-hf-lora",
|
|
42
|
+
"llama-3.1-405b",
|
|
43
|
+
"llama-3.1-70b",
|
|
44
|
+
"llama-3.1-8b",
|
|
45
|
+
"meta-llama/Llama-2-7b-chat-hf",
|
|
46
|
+
"meta-llama/Llama-3.1-70B-Instruct",
|
|
47
|
+
"meta-llama/Llama-3.1-8B-Instruct",
|
|
48
|
+
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
49
|
+
"meta-llama/Llama-3.2-1B-Instruct",
|
|
50
|
+
"meta-llama/Llama-3.2-3B-Instruct",
|
|
51
|
+
"meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
52
|
+
"meta-llama/Llama-Guard-3-8B",
|
|
53
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
|
54
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
|
55
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
56
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
57
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
58
|
+
|
|
59
|
+
# Mistral Models
|
|
60
|
+
"mistral",
|
|
61
|
+
"mistral-large",
|
|
62
|
+
"@cf/mistral/mistral-7b-instruct-v0.1",
|
|
63
|
+
"@cf/mistral/mistral-7b-instruct-v0.2-lora",
|
|
64
|
+
"@hf/mistralai/mistral-7b-instruct-v0.2",
|
|
65
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
66
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
67
|
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
|
68
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
69
|
+
|
|
70
|
+
# Qwen Models
|
|
71
|
+
"@cf/qwen/qwen1.5-0.5b-chat",
|
|
72
|
+
"@cf/qwen/qwen1.5-1.8b-chat",
|
|
73
|
+
"@cf/qwen/qwen1.5-7b-chat-awq",
|
|
74
|
+
"@cf/qwen/qwen1.5-14b-chat-awq",
|
|
75
|
+
"Qwen/Qwen2.5-3B-Instruct",
|
|
76
|
+
"Qwen/Qwen2.5-72B-Instruct",
|
|
77
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
78
|
+
|
|
79
|
+
# Google/Gemini Models
|
|
80
|
+
"@cf/google/gemma-2b-it-lora",
|
|
81
|
+
"@cf/google/gemma-7b-it-lora",
|
|
82
|
+
"@hf/google/gemma-7b-it",
|
|
83
|
+
"google/gemma-1.1-2b-it",
|
|
84
|
+
"google/gemma-1.1-7b-it",
|
|
85
|
+
"gemini-pro",
|
|
86
|
+
"gemini-1.5-pro",
|
|
87
|
+
"gemini-1.5-pro-latest",
|
|
88
|
+
"gemini-1.5-flash",
|
|
89
|
+
|
|
90
|
+
# Cohere Models
|
|
91
|
+
"c4ai-aya-23-35b",
|
|
92
|
+
"c4ai-aya-23-8b",
|
|
93
|
+
"command",
|
|
94
|
+
"command-light",
|
|
95
|
+
"command-light-nightly",
|
|
96
|
+
"command-nightly",
|
|
97
|
+
"command-r",
|
|
98
|
+
"command-r-08-2024",
|
|
99
|
+
"command-r-plus",
|
|
100
|
+
"command-r-plus-08-2024",
|
|
101
|
+
"rerank-english-v2.0",
|
|
102
|
+
"rerank-english-v3.0",
|
|
103
|
+
"rerank-multilingual-v2.0",
|
|
104
|
+
"rerank-multilingual-v3.0",
|
|
105
|
+
|
|
106
|
+
# Microsoft Models
|
|
107
|
+
"@cf/microsoft/phi-2",
|
|
108
|
+
"microsoft/DialoGPT-medium",
|
|
109
|
+
"microsoft/Phi-3-medium-4k-instruct",
|
|
110
|
+
"microsoft/Phi-3-mini-4k-instruct",
|
|
111
|
+
"microsoft/Phi-3.5-mini-instruct",
|
|
112
|
+
"microsoft/WizardLM-2-8x22B",
|
|
113
|
+
|
|
114
|
+
# Yi Models
|
|
115
|
+
"01-ai/Yi-1.5-34B-Chat",
|
|
116
|
+
"01-ai/Yi-34B-Chat",
|
|
117
|
+
|
|
118
|
+
# Specialized Models and Tools
|
|
119
|
+
"@cf/deepseek-ai/deepseek-math-7b-base",
|
|
120
|
+
"@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
121
|
+
"@cf/defog/sqlcoder-7b-2",
|
|
122
|
+
"@cf/openchat/openchat-3.5-0106",
|
|
123
|
+
"@cf/thebloke/discolm-german-7b-v1-awq",
|
|
124
|
+
"@cf/tiiuae/falcon-7b-instruct",
|
|
125
|
+
"@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
|
|
126
|
+
"@hf/nexusflow/starling-lm-7b-beta",
|
|
127
|
+
"@hf/nousresearch/hermes-2-pro-mistral-7b",
|
|
128
|
+
"@hf/thebloke/deepseek-coder-6.7b-base-awq",
|
|
129
|
+
"@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
|
|
130
|
+
"@hf/thebloke/llama-2-13b-chat-awq",
|
|
131
|
+
"@hf/thebloke/llamaguard-7b-awq",
|
|
132
|
+
"@hf/thebloke/neural-chat-7b-v3-1-awq",
|
|
133
|
+
"@hf/thebloke/openhermes-2.5-mistral-7b-awq",
|
|
134
|
+
"@hf/thebloke/zephyr-7b-beta-awq",
|
|
135
|
+
"AndroidDeveloper",
|
|
136
|
+
"AngularJSAgent",
|
|
137
|
+
"AzureAgent",
|
|
138
|
+
"BitbucketAgent",
|
|
139
|
+
"DigitalOceanAgent",
|
|
140
|
+
"DockerAgent",
|
|
141
|
+
"ElectronAgent",
|
|
142
|
+
"ErlangAgent",
|
|
143
|
+
"FastAPIAgent",
|
|
144
|
+
"FirebaseAgent",
|
|
145
|
+
"FlaskAgent",
|
|
146
|
+
"FlutterAgent",
|
|
147
|
+
"GitAgent",
|
|
148
|
+
"GitlabAgent",
|
|
149
|
+
"GoAgent",
|
|
150
|
+
"GodotAgent",
|
|
151
|
+
"GoogleCloudAgent",
|
|
152
|
+
"HTMLAgent",
|
|
153
|
+
"HerokuAgent",
|
|
154
|
+
"ImageGeneration",
|
|
155
|
+
"JavaAgent",
|
|
156
|
+
"JavaScriptAgent",
|
|
157
|
+
"MongoDBAgent",
|
|
158
|
+
"Next.jsAgent",
|
|
159
|
+
"PyTorchAgent",
|
|
160
|
+
"PythonAgent",
|
|
161
|
+
"ReactAgent",
|
|
162
|
+
"RepoMap",
|
|
163
|
+
"SwiftDeveloper",
|
|
164
|
+
"XcodeAgent",
|
|
165
|
+
"YoutubeAgent",
|
|
166
|
+
"blackboxai",
|
|
167
|
+
"blackboxai-pro",
|
|
168
|
+
"builderAgent",
|
|
169
|
+
"dify",
|
|
170
|
+
"flux",
|
|
171
|
+
"openchat/openchat-3.6-8b",
|
|
172
|
+
"rtist",
|
|
173
|
+
"searchgpt",
|
|
174
|
+
"sur",
|
|
175
|
+
"sur-mistral",
|
|
176
|
+
"unity"
|
|
177
|
+
]
|
|
178
|
+
|
|
179
|
+
def __init__(
|
|
180
|
+
self,
|
|
181
|
+
is_conversation: bool = True,
|
|
182
|
+
max_tokens: int = 4000, # Set a reasonable default
|
|
183
|
+
timeout: int = 30,
|
|
184
|
+
intro: str = None,
|
|
185
|
+
filepath: str = None,
|
|
186
|
+
update_file: bool = True,
|
|
187
|
+
proxies: dict = {},
|
|
188
|
+
history_offset: int = 10250,
|
|
189
|
+
act: str = None,
|
|
190
|
+
model: str = "gpt-4o",
|
|
191
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
192
|
+
temperature: float = 0.5,
|
|
193
|
+
presence_penalty: int = 0,
|
|
194
|
+
frequency_penalty: int = 0,
|
|
195
|
+
top_p: float = 1,
|
|
196
|
+
):
|
|
197
|
+
"""Initializes the TypeGPT API client."""
|
|
198
|
+
if model not in self.models:
|
|
199
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.models)}")
|
|
200
|
+
|
|
201
|
+
self.session = requests.Session()
|
|
202
|
+
self.is_conversation = is_conversation
|
|
203
|
+
self.max_tokens_to_sample = max_tokens
|
|
204
|
+
self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
|
|
205
|
+
self.timeout = timeout
|
|
206
|
+
self.last_response = {}
|
|
207
|
+
self.model = model
|
|
208
|
+
self.system_prompt = system_prompt
|
|
209
|
+
self.temperature = temperature
|
|
210
|
+
self.presence_penalty = presence_penalty
|
|
211
|
+
self.frequency_penalty = frequency_penalty
|
|
212
|
+
self.top_p = top_p
|
|
213
|
+
self.headers = {
|
|
214
|
+
"authority": "chat.typegpt.net",
|
|
215
|
+
"accept": "application/json, text/event-stream",
|
|
216
|
+
"accept-language": "en-US,en;q=0.9",
|
|
217
|
+
"content-type": "application/json",
|
|
218
|
+
"origin": "https://chat.typegpt.net",
|
|
219
|
+
"referer": "https://chat.typegpt.net/",
|
|
220
|
+
"user-agent": LitAgent().random()
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
self.__available_optimizers = (
|
|
224
|
+
method
|
|
225
|
+
for method in dir(Optimizers)
|
|
226
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
227
|
+
)
|
|
228
|
+
Conversation.intro = (
|
|
229
|
+
AwesomePrompts().get_act(
|
|
230
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
231
|
+
)
|
|
232
|
+
if act
|
|
233
|
+
else intro or Conversation.intro
|
|
234
|
+
)
|
|
235
|
+
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
236
|
+
self.conversation.history_offset = history_offset
|
|
237
|
+
self.session.proxies = proxies
|
|
238
|
+
|
|
239
|
+
def ask(
|
|
240
|
+
self,
|
|
241
|
+
prompt: str,
|
|
242
|
+
stream: bool = False,
|
|
243
|
+
raw: bool = False,
|
|
244
|
+
optimizer: str = None,
|
|
245
|
+
conversationally: bool = False,
|
|
246
|
+
) -> Dict[str, Any] | Generator:
|
|
247
|
+
"""Sends a prompt to the TypeGPT.net API and returns the response."""
|
|
248
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
249
|
+
if optimizer:
|
|
250
|
+
if optimizer in self.__available_optimizers:
|
|
251
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
252
|
+
conversation_prompt if conversationally else prompt
|
|
253
|
+
)
|
|
254
|
+
else:
|
|
255
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
256
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
payload = {
|
|
260
|
+
"messages": [
|
|
261
|
+
{"role": "system", "content": self.system_prompt},
|
|
262
|
+
{"role": "user", "content": conversation_prompt}
|
|
263
|
+
],
|
|
264
|
+
"stream": stream,
|
|
265
|
+
"model": self.model,
|
|
266
|
+
"temperature": self.temperature,
|
|
267
|
+
"presence_penalty": self.presence_penalty,
|
|
268
|
+
"frequency_penalty": self.frequency_penalty,
|
|
269
|
+
"top_p": self.top_p,
|
|
270
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
def for_stream():
|
|
274
|
+
try:
|
|
275
|
+
response = self.session.post(
|
|
276
|
+
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
277
|
+
)
|
|
278
|
+
except requests.exceptions.ConnectionError as ce:
|
|
279
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
280
|
+
f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
|
|
281
|
+
) from ce
|
|
282
|
+
|
|
283
|
+
if not response.ok:
|
|
284
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
285
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
286
|
+
)
|
|
287
|
+
message_load = ""
|
|
288
|
+
for line in response.iter_lines():
|
|
289
|
+
if line:
|
|
290
|
+
line = line.decode("utf-8")
|
|
291
|
+
if line.startswith("data: "):
|
|
292
|
+
line = line[6:] # Remove "data: " prefix
|
|
293
|
+
# Skip [DONE] message
|
|
294
|
+
if line.strip() == "[DONE]":
|
|
295
|
+
break
|
|
296
|
+
try:
|
|
297
|
+
data = json.loads(line)
|
|
298
|
+
# Extract and yield only new content
|
|
299
|
+
if 'choices' in data and len(data['choices']) > 0:
|
|
300
|
+
delta = data['choices'][0].get('delta', {})
|
|
301
|
+
if 'content' in delta:
|
|
302
|
+
new_content = delta['content']
|
|
303
|
+
message_load += new_content
|
|
304
|
+
# Yield only the new content
|
|
305
|
+
yield dict(text=new_content) if not raw else new_content
|
|
306
|
+
self.last_response = dict(text=message_load)
|
|
307
|
+
except json.JSONDecodeError:
|
|
308
|
+
continue
|
|
309
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
310
|
+
|
|
311
|
+
def for_non_stream():
|
|
312
|
+
try:
|
|
313
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
|
|
314
|
+
except requests.exceptions.ConnectionError as ce:
|
|
315
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
316
|
+
f"Network connection failed. Check your firewall or antivirus settings. Original error: {ce}"
|
|
317
|
+
) from ce
|
|
318
|
+
|
|
319
|
+
if not response.ok:
|
|
320
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
321
|
+
f"Request failed - {response.status_code}: {response.text}"
|
|
322
|
+
)
|
|
323
|
+
self.last_response = response.json()
|
|
324
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
325
|
+
return self.last_response
|
|
326
|
+
|
|
327
|
+
return for_stream() if stream else for_non_stream()
|
|
328
|
+
|
|
329
|
+
def chat(
|
|
330
|
+
self,
|
|
331
|
+
prompt: str,
|
|
332
|
+
stream: bool = False,
|
|
333
|
+
optimizer: str = None,
|
|
334
|
+
conversationally: bool = False,
|
|
335
|
+
) -> str | Generator[str, None, None]:
|
|
336
|
+
"""Generate response string or stream."""
|
|
337
|
+
if stream:
|
|
338
|
+
gen = self.ask(
|
|
339
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
340
|
+
)
|
|
341
|
+
for chunk in gen:
|
|
342
|
+
yield self.get_message(chunk) # Extract text from streamed chunks
|
|
343
|
+
else:
|
|
344
|
+
return self.get_message(self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally))
|
|
345
|
+
|
|
346
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
347
|
+
"""Retrieves message from response."""
|
|
348
|
+
if isinstance(response, str): # Handle raw responses
|
|
349
|
+
return response
|
|
350
|
+
elif isinstance(response, dict):
|
|
351
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
352
|
+
return response.get("text", "") # Extract text from dictionary response
|
|
353
|
+
else:
|
|
354
|
+
raise TypeError("Invalid response type. Expected str or dict.")
|
|
355
|
+
|
|
356
|
+
if __name__ == "__main__":
|
|
357
|
+
ai = TypeGPT(model="chatgpt-4o-latest")
|
|
358
|
+
response = ai.chat("hi", stream=True)
|
|
359
|
+
for chunks in response:
|
|
357
360
|
print(chunks, end="", flush=True)
|