webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,362 +1,362 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
import re
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage,
|
|
12
|
-
format_prompt, get_system_prompt, count_tokens # Import format_prompt, get_system_prompt and count_tokens
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
# Import LitAgent for browser fingerprinting
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
|
|
18
|
-
# Import curl_cffi for better request handling
|
|
19
|
-
from curl_cffi.requests import Session
|
|
20
|
-
from curl_cffi import CurlError
|
|
21
|
-
|
|
22
|
-
# ANSI escape codes for formatting
|
|
23
|
-
BOLD = "\033[1m"
|
|
24
|
-
RED = "\033[91m"
|
|
25
|
-
RESET = "\033[0m"
|
|
26
|
-
|
|
27
|
-
class Completions(BaseCompletions):
|
|
28
|
-
def __init__(self, client: 'TypefullyAI'):
|
|
29
|
-
self._client = client
|
|
30
|
-
|
|
31
|
-
def create(
|
|
32
|
-
self,
|
|
33
|
-
*,
|
|
34
|
-
model: str,
|
|
35
|
-
messages: List[Dict[str, str]],
|
|
36
|
-
max_tokens: Optional[int] = None,
|
|
37
|
-
stream: bool = False,
|
|
38
|
-
temperature: Optional[float] = None,
|
|
39
|
-
timeout: Optional[int] = None,
|
|
40
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
41
|
-
**kwargs: Any
|
|
42
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
43
|
-
"""
|
|
44
|
-
Creates a model response for the given chat conversation.
|
|
45
|
-
Mimics openai.chat.completions.create
|
|
46
|
-
"""
|
|
47
|
-
# Extract system message using get_system_prompt utility
|
|
48
|
-
system_prompt = get_system_prompt(messages) or self._client.system_prompt
|
|
49
|
-
|
|
50
|
-
# Format the conversation using format_prompt utility
|
|
51
|
-
# Use add_special_tokens=True to format as "User: ... Assistant: ..."
|
|
52
|
-
# Use do_continue=True to ensure it ends with "Assistant: " for model to continue
|
|
53
|
-
conversation_prompt = format_prompt(
|
|
54
|
-
messages,
|
|
55
|
-
add_special_tokens=True,
|
|
56
|
-
do_continue=True,
|
|
57
|
-
include_system=False # System prompt is sent separately
|
|
58
|
-
)
|
|
59
|
-
|
|
60
|
-
# Prepare the payload for Typefully API
|
|
61
|
-
payload = {
|
|
62
|
-
"prompt": conversation_prompt,
|
|
63
|
-
"systemPrompt": system_prompt,
|
|
64
|
-
"modelIdentifier": self._client.convert_model_name(model),
|
|
65
|
-
"outputLength": max_tokens if max_tokens is not None else self._client.output_length
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
69
|
-
created_time = int(time.time())
|
|
70
|
-
|
|
71
|
-
if stream:
|
|
72
|
-
return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
73
|
-
else:
|
|
74
|
-
return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
75
|
-
|
|
76
|
-
def _create_streaming(
|
|
77
|
-
self,
|
|
78
|
-
request_id: str,
|
|
79
|
-
created_time: int,
|
|
80
|
-
model: str,
|
|
81
|
-
payload: Dict[str, Any],
|
|
82
|
-
timeout: Optional[int] = None,
|
|
83
|
-
proxies: Optional[Dict[str, str]] = None
|
|
84
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
85
|
-
"""Implementation for streaming chat completions."""
|
|
86
|
-
try:
|
|
87
|
-
# Make the streaming request
|
|
88
|
-
response = self._client.session.post(
|
|
89
|
-
self._client.api_endpoint,
|
|
90
|
-
headers=self._client.headers,
|
|
91
|
-
json=payload,
|
|
92
|
-
stream=True,
|
|
93
|
-
timeout=timeout or self._client.timeout,
|
|
94
|
-
proxies=proxies or getattr(self._client, "proxies", None),
|
|
95
|
-
impersonate="chrome120"
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
if not response.ok:
|
|
99
|
-
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
100
|
-
|
|
101
|
-
streaming_text = ""
|
|
102
|
-
|
|
103
|
-
for chunk in response.iter_content(chunk_size=None):
|
|
104
|
-
if not chunk:
|
|
105
|
-
continue
|
|
106
|
-
|
|
107
|
-
chunk_str = chunk.decode('utf-8', errors='replace')
|
|
108
|
-
content = self._client._typefully_extractor(chunk_str)
|
|
109
|
-
|
|
110
|
-
if content:
|
|
111
|
-
streaming_text += content
|
|
112
|
-
|
|
113
|
-
# Create the delta object
|
|
114
|
-
delta = ChoiceDelta(
|
|
115
|
-
content=content,
|
|
116
|
-
role="assistant"
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
# Create the choice object
|
|
120
|
-
choice = Choice(
|
|
121
|
-
index=0,
|
|
122
|
-
delta=delta,
|
|
123
|
-
finish_reason=None
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
# Create the chunk object
|
|
127
|
-
chunk = ChatCompletionChunk(
|
|
128
|
-
id=request_id,
|
|
129
|
-
choices=[choice],
|
|
130
|
-
created=created_time,
|
|
131
|
-
model=model
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
yield chunk
|
|
135
|
-
|
|
136
|
-
# Final chunk with finish_reason="stop"
|
|
137
|
-
delta = ChoiceDelta(
|
|
138
|
-
content=None,
|
|
139
|
-
role=None
|
|
140
|
-
)
|
|
141
|
-
|
|
142
|
-
choice = Choice(
|
|
143
|
-
index=0,
|
|
144
|
-
delta=delta,
|
|
145
|
-
finish_reason="stop"
|
|
146
|
-
)
|
|
147
|
-
|
|
148
|
-
chunk = ChatCompletionChunk(
|
|
149
|
-
id=request_id,
|
|
150
|
-
choices=[choice],
|
|
151
|
-
created=created_time,
|
|
152
|
-
model=model
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
yield chunk
|
|
156
|
-
|
|
157
|
-
except CurlError as e:
|
|
158
|
-
print(f"{RED}Error during Typefully streaming request (CurlError): {e}{RESET}")
|
|
159
|
-
raise IOError(f"Typefully streaming request failed (CurlError): {e}") from e
|
|
160
|
-
except Exception as e:
|
|
161
|
-
print(f"{RED}Error during Typefully streaming request: {e}{RESET}")
|
|
162
|
-
raise IOError(f"Typefully streaming request failed: {e}") from e
|
|
163
|
-
|
|
164
|
-
def _create_non_streaming(
|
|
165
|
-
self,
|
|
166
|
-
request_id: str,
|
|
167
|
-
created_time: int,
|
|
168
|
-
model: str,
|
|
169
|
-
payload: Dict[str, Any],
|
|
170
|
-
timeout: Optional[int] = None,
|
|
171
|
-
proxies: Optional[Dict[str, str]] = None
|
|
172
|
-
) -> ChatCompletion:
|
|
173
|
-
"""Implementation for non-streaming chat completions."""
|
|
174
|
-
try:
|
|
175
|
-
# Make the non-streaming request
|
|
176
|
-
response = self._client.session.post(
|
|
177
|
-
self._client.api_endpoint,
|
|
178
|
-
headers=self._client.headers,
|
|
179
|
-
json=payload,
|
|
180
|
-
stream=True,
|
|
181
|
-
timeout=timeout or self._client.timeout,
|
|
182
|
-
proxies=proxies or getattr(self._client, "proxies", None),
|
|
183
|
-
impersonate="chrome120"
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
if not response.ok:
|
|
187
|
-
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
188
|
-
|
|
189
|
-
# Collect the full response
|
|
190
|
-
full_text = ""
|
|
191
|
-
for chunk in response.iter_content(chunk_size=None):
|
|
192
|
-
if not chunk:
|
|
193
|
-
continue
|
|
194
|
-
|
|
195
|
-
chunk_str = chunk.decode('utf-8', errors='replace')
|
|
196
|
-
content = self._client._typefully_extractor(chunk_str)
|
|
197
|
-
|
|
198
|
-
if content:
|
|
199
|
-
full_text += content
|
|
200
|
-
|
|
201
|
-
# Format the text (replace escaped newlines)
|
|
202
|
-
full_text = full_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
203
|
-
|
|
204
|
-
# Estimate token counts
|
|
205
|
-
prompt_tokens = count_tokens(payload.get("prompt", "")) + count_tokens(payload.get("systemPrompt", ""))
|
|
206
|
-
completion_tokens = count_tokens(full_text)
|
|
207
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
208
|
-
|
|
209
|
-
# Create the message object
|
|
210
|
-
message = ChatCompletionMessage(
|
|
211
|
-
role="assistant",
|
|
212
|
-
content=full_text
|
|
213
|
-
)
|
|
214
|
-
|
|
215
|
-
# Create the choice object
|
|
216
|
-
choice = Choice(
|
|
217
|
-
index=0,
|
|
218
|
-
message=message,
|
|
219
|
-
finish_reason="stop"
|
|
220
|
-
)
|
|
221
|
-
|
|
222
|
-
# Create the usage object
|
|
223
|
-
usage = CompletionUsage(
|
|
224
|
-
prompt_tokens=prompt_tokens,
|
|
225
|
-
completion_tokens=completion_tokens,
|
|
226
|
-
total_tokens=total_tokens
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
# Create the completion object
|
|
230
|
-
completion = ChatCompletion(
|
|
231
|
-
id=request_id,
|
|
232
|
-
choices=[choice],
|
|
233
|
-
created=created_time,
|
|
234
|
-
model=model,
|
|
235
|
-
usage=usage,
|
|
236
|
-
)
|
|
237
|
-
|
|
238
|
-
return completion
|
|
239
|
-
|
|
240
|
-
except CurlError as e:
|
|
241
|
-
print(f"{RED}Error during Typefully non-streaming request (CurlError): {e}{RESET}")
|
|
242
|
-
raise IOError(f"Typefully request failed (CurlError): {e}") from e
|
|
243
|
-
except Exception as e:
|
|
244
|
-
print(f"{RED}Error during Typefully non-streaming request: {e}{RESET}")
|
|
245
|
-
raise IOError(f"Typefully request failed: {e}") from e
|
|
246
|
-
|
|
247
|
-
class Chat(BaseChat):
|
|
248
|
-
def __init__(self, client: 'TypefullyAI'):
|
|
249
|
-
self.completions = Completions(client)
|
|
250
|
-
|
|
251
|
-
class TypefullyAI(OpenAICompatibleProvider):
|
|
252
|
-
"""
|
|
253
|
-
OpenAI-compatible client for Typefully AI API.
|
|
254
|
-
|
|
255
|
-
Usage:
|
|
256
|
-
client = TypefullyAI()
|
|
257
|
-
response = client.chat.completions.create(
|
|
258
|
-
model="openai:gpt-4o-mini",
|
|
259
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
260
|
-
)
|
|
261
|
-
print(response.choices[0].message.content)
|
|
262
|
-
"""
|
|
263
|
-
|
|
264
|
-
AVAILABLE_MODELS = [
|
|
265
|
-
"openai:gpt-4o-mini",
|
|
266
|
-
"openai:gpt-4o",
|
|
267
|
-
"anthropic:claude-3-5-haiku-20241022",
|
|
268
|
-
"groq:llama-3.3-70b-versatile"
|
|
269
|
-
]
|
|
270
|
-
|
|
271
|
-
def __init__(
|
|
272
|
-
self,
|
|
273
|
-
timeout: int = 30,
|
|
274
|
-
|
|
275
|
-
):
|
|
276
|
-
"""
|
|
277
|
-
Initialize the TypefullyAI client.
|
|
278
|
-
|
|
279
|
-
Args:
|
|
280
|
-
timeout: Request timeout in seconds
|
|
281
|
-
proxies: Optional proxy configuration
|
|
282
|
-
system_prompt: Default system prompt
|
|
283
|
-
output_length: Maximum length of the generated output
|
|
284
|
-
"""
|
|
285
|
-
self.timeout = timeout
|
|
286
|
-
self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
|
|
287
|
-
|
|
288
|
-
# Initialize curl_cffi Session
|
|
289
|
-
self.session = Session()
|
|
290
|
-
|
|
291
|
-
# Initialize LitAgent for user agent generation
|
|
292
|
-
agent = LitAgent()
|
|
293
|
-
self.user_agent = agent.random()
|
|
294
|
-
|
|
295
|
-
# Set headers
|
|
296
|
-
self.headers = {
|
|
297
|
-
"authority": "typefully.com",
|
|
298
|
-
"accept": "*/*",
|
|
299
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
300
|
-
"accept-language": "en-US,en;q=0.9",
|
|
301
|
-
"content-type": "application/json",
|
|
302
|
-
"dnt": "1",
|
|
303
|
-
"origin": "https://typefully.com",
|
|
304
|
-
"referer": "https://typefully.com/tools/ai/chat-gpt-alternative",
|
|
305
|
-
"sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
306
|
-
"sec-ch-ua-mobile": "?0",
|
|
307
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
308
|
-
"user-agent": self.user_agent
|
|
309
|
-
}
|
|
310
|
-
|
|
311
|
-
# Update curl_cffi session headers and proxies
|
|
312
|
-
self.session.headers.update(self.headers)
|
|
313
|
-
|
|
314
|
-
# Initialize chat interface
|
|
315
|
-
self.chat = Chat(self)
|
|
316
|
-
|
|
317
|
-
@staticmethod
|
|
318
|
-
def _typefully_extractor(chunk: str) -> Optional[str]:
|
|
319
|
-
"""Extracts content from the Typefully stream format '0:"..."'."""
|
|
320
|
-
if isinstance(chunk, str):
|
|
321
|
-
match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
|
|
322
|
-
if match:
|
|
323
|
-
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
324
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
325
|
-
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
326
|
-
return None
|
|
327
|
-
|
|
328
|
-
def convert_model_name(self, model: str) -> str:
|
|
329
|
-
"""
|
|
330
|
-
Convert model names to ones supported by Typefully.
|
|
331
|
-
|
|
332
|
-
Args:
|
|
333
|
-
model: Model name to convert
|
|
334
|
-
|
|
335
|
-
Returns:
|
|
336
|
-
Typefully model name
|
|
337
|
-
"""
|
|
338
|
-
# If the model is already a valid Typefully model, return it
|
|
339
|
-
if model in self.AVAILABLE_MODELS:
|
|
340
|
-
return model
|
|
341
|
-
|
|
342
|
-
# Map common OpenAI model names to Typefully models
|
|
343
|
-
model_mapping = {
|
|
344
|
-
"gpt-4o-mini": "openai:gpt-4o-mini",
|
|
345
|
-
"gpt-4o": "openai:gpt-4o",
|
|
346
|
-
"claude-3-5-haiku": "anthropic:claude-3-5-haiku-20241022",
|
|
347
|
-
"llama-3.3-70b": "groq:llama-3.3-70b-versatile"
|
|
348
|
-
}
|
|
349
|
-
|
|
350
|
-
if model in model_mapping:
|
|
351
|
-
return model_mapping[model]
|
|
352
|
-
|
|
353
|
-
# Default to the most capable model
|
|
354
|
-
print(f"{RED}Warning: Unknown model '{model}'. Using 'openai:gpt-4o-mini' instead.{RESET}")
|
|
355
|
-
return "openai:gpt-4o-mini"
|
|
356
|
-
|
|
357
|
-
@property
|
|
358
|
-
def models(self):
|
|
359
|
-
class _ModelList:
|
|
360
|
-
def list(inner_self):
|
|
361
|
-
return type(self).AVAILABLE_MODELS
|
|
362
|
-
return _ModelList()
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage,
|
|
12
|
+
format_prompt, get_system_prompt, count_tokens # Import format_prompt, get_system_prompt and count_tokens
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
# Import LitAgent for browser fingerprinting
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
|
|
18
|
+
# Import curl_cffi for better request handling
|
|
19
|
+
from curl_cffi.requests import Session
|
|
20
|
+
from curl_cffi import CurlError
|
|
21
|
+
|
|
22
|
+
# ANSI escape codes for formatting
|
|
23
|
+
BOLD = "\033[1m"
|
|
24
|
+
RED = "\033[91m"
|
|
25
|
+
RESET = "\033[0m"
|
|
26
|
+
|
|
27
|
+
class Completions(BaseCompletions):
|
|
28
|
+
def __init__(self, client: 'TypefullyAI'):
|
|
29
|
+
self._client = client
|
|
30
|
+
|
|
31
|
+
def create(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
model: str,
|
|
35
|
+
messages: List[Dict[str, str]],
|
|
36
|
+
max_tokens: Optional[int] = None,
|
|
37
|
+
stream: bool = False,
|
|
38
|
+
temperature: Optional[float] = None,
|
|
39
|
+
timeout: Optional[int] = None,
|
|
40
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
41
|
+
**kwargs: Any
|
|
42
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
43
|
+
"""
|
|
44
|
+
Creates a model response for the given chat conversation.
|
|
45
|
+
Mimics openai.chat.completions.create
|
|
46
|
+
"""
|
|
47
|
+
# Extract system message using get_system_prompt utility
|
|
48
|
+
system_prompt = get_system_prompt(messages) or self._client.system_prompt
|
|
49
|
+
|
|
50
|
+
# Format the conversation using format_prompt utility
|
|
51
|
+
# Use add_special_tokens=True to format as "User: ... Assistant: ..."
|
|
52
|
+
# Use do_continue=True to ensure it ends with "Assistant: " for model to continue
|
|
53
|
+
conversation_prompt = format_prompt(
|
|
54
|
+
messages,
|
|
55
|
+
add_special_tokens=True,
|
|
56
|
+
do_continue=True,
|
|
57
|
+
include_system=False # System prompt is sent separately
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Prepare the payload for Typefully API
|
|
61
|
+
payload = {
|
|
62
|
+
"prompt": conversation_prompt,
|
|
63
|
+
"systemPrompt": system_prompt,
|
|
64
|
+
"modelIdentifier": self._client.convert_model_name(model),
|
|
65
|
+
"outputLength": max_tokens if max_tokens is not None else self._client.output_length
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
69
|
+
created_time = int(time.time())
|
|
70
|
+
|
|
71
|
+
if stream:
|
|
72
|
+
return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
73
|
+
else:
|
|
74
|
+
return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
75
|
+
|
|
76
|
+
def _create_streaming(
|
|
77
|
+
self,
|
|
78
|
+
request_id: str,
|
|
79
|
+
created_time: int,
|
|
80
|
+
model: str,
|
|
81
|
+
payload: Dict[str, Any],
|
|
82
|
+
timeout: Optional[int] = None,
|
|
83
|
+
proxies: Optional[Dict[str, str]] = None
|
|
84
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
85
|
+
"""Implementation for streaming chat completions."""
|
|
86
|
+
try:
|
|
87
|
+
# Make the streaming request
|
|
88
|
+
response = self._client.session.post(
|
|
89
|
+
self._client.api_endpoint,
|
|
90
|
+
headers=self._client.headers,
|
|
91
|
+
json=payload,
|
|
92
|
+
stream=True,
|
|
93
|
+
timeout=timeout or self._client.timeout,
|
|
94
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
95
|
+
impersonate="chrome120"
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if not response.ok:
|
|
99
|
+
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
100
|
+
|
|
101
|
+
streaming_text = ""
|
|
102
|
+
|
|
103
|
+
for chunk in response.iter_content(chunk_size=None):
|
|
104
|
+
if not chunk:
|
|
105
|
+
continue
|
|
106
|
+
|
|
107
|
+
chunk_str = chunk.decode('utf-8', errors='replace')
|
|
108
|
+
content = self._client._typefully_extractor(chunk_str)
|
|
109
|
+
|
|
110
|
+
if content:
|
|
111
|
+
streaming_text += content
|
|
112
|
+
|
|
113
|
+
# Create the delta object
|
|
114
|
+
delta = ChoiceDelta(
|
|
115
|
+
content=content,
|
|
116
|
+
role="assistant"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Create the choice object
|
|
120
|
+
choice = Choice(
|
|
121
|
+
index=0,
|
|
122
|
+
delta=delta,
|
|
123
|
+
finish_reason=None
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Create the chunk object
|
|
127
|
+
chunk = ChatCompletionChunk(
|
|
128
|
+
id=request_id,
|
|
129
|
+
choices=[choice],
|
|
130
|
+
created=created_time,
|
|
131
|
+
model=model
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
yield chunk
|
|
135
|
+
|
|
136
|
+
# Final chunk with finish_reason="stop"
|
|
137
|
+
delta = ChoiceDelta(
|
|
138
|
+
content=None,
|
|
139
|
+
role=None
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
choice = Choice(
|
|
143
|
+
index=0,
|
|
144
|
+
delta=delta,
|
|
145
|
+
finish_reason="stop"
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
chunk = ChatCompletionChunk(
|
|
149
|
+
id=request_id,
|
|
150
|
+
choices=[choice],
|
|
151
|
+
created=created_time,
|
|
152
|
+
model=model
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
yield chunk
|
|
156
|
+
|
|
157
|
+
except CurlError as e:
|
|
158
|
+
print(f"{RED}Error during Typefully streaming request (CurlError): {e}{RESET}")
|
|
159
|
+
raise IOError(f"Typefully streaming request failed (CurlError): {e}") from e
|
|
160
|
+
except Exception as e:
|
|
161
|
+
print(f"{RED}Error during Typefully streaming request: {e}{RESET}")
|
|
162
|
+
raise IOError(f"Typefully streaming request failed: {e}") from e
|
|
163
|
+
|
|
164
|
+
def _create_non_streaming(
|
|
165
|
+
self,
|
|
166
|
+
request_id: str,
|
|
167
|
+
created_time: int,
|
|
168
|
+
model: str,
|
|
169
|
+
payload: Dict[str, Any],
|
|
170
|
+
timeout: Optional[int] = None,
|
|
171
|
+
proxies: Optional[Dict[str, str]] = None
|
|
172
|
+
) -> ChatCompletion:
|
|
173
|
+
"""Implementation for non-streaming chat completions."""
|
|
174
|
+
try:
|
|
175
|
+
# Make the non-streaming request
|
|
176
|
+
response = self._client.session.post(
|
|
177
|
+
self._client.api_endpoint,
|
|
178
|
+
headers=self._client.headers,
|
|
179
|
+
json=payload,
|
|
180
|
+
stream=True,
|
|
181
|
+
timeout=timeout or self._client.timeout,
|
|
182
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
183
|
+
impersonate="chrome120"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
if not response.ok:
|
|
187
|
+
raise IOError(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
|
|
188
|
+
|
|
189
|
+
# Collect the full response
|
|
190
|
+
full_text = ""
|
|
191
|
+
for chunk in response.iter_content(chunk_size=None):
|
|
192
|
+
if not chunk:
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
chunk_str = chunk.decode('utf-8', errors='replace')
|
|
196
|
+
content = self._client._typefully_extractor(chunk_str)
|
|
197
|
+
|
|
198
|
+
if content:
|
|
199
|
+
full_text += content
|
|
200
|
+
|
|
201
|
+
# Format the text (replace escaped newlines)
|
|
202
|
+
full_text = full_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
203
|
+
|
|
204
|
+
# Estimate token counts
|
|
205
|
+
prompt_tokens = count_tokens(payload.get("prompt", "")) + count_tokens(payload.get("systemPrompt", ""))
|
|
206
|
+
completion_tokens = count_tokens(full_text)
|
|
207
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
208
|
+
|
|
209
|
+
# Create the message object
|
|
210
|
+
message = ChatCompletionMessage(
|
|
211
|
+
role="assistant",
|
|
212
|
+
content=full_text
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Create the choice object
|
|
216
|
+
choice = Choice(
|
|
217
|
+
index=0,
|
|
218
|
+
message=message,
|
|
219
|
+
finish_reason="stop"
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Create the usage object
|
|
223
|
+
usage = CompletionUsage(
|
|
224
|
+
prompt_tokens=prompt_tokens,
|
|
225
|
+
completion_tokens=completion_tokens,
|
|
226
|
+
total_tokens=total_tokens
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
# Create the completion object
|
|
230
|
+
completion = ChatCompletion(
|
|
231
|
+
id=request_id,
|
|
232
|
+
choices=[choice],
|
|
233
|
+
created=created_time,
|
|
234
|
+
model=model,
|
|
235
|
+
usage=usage,
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
return completion
|
|
239
|
+
|
|
240
|
+
except CurlError as e:
|
|
241
|
+
print(f"{RED}Error during Typefully non-streaming request (CurlError): {e}{RESET}")
|
|
242
|
+
raise IOError(f"Typefully request failed (CurlError): {e}") from e
|
|
243
|
+
except Exception as e:
|
|
244
|
+
print(f"{RED}Error during Typefully non-streaming request: {e}{RESET}")
|
|
245
|
+
raise IOError(f"Typefully request failed: {e}") from e
|
|
246
|
+
|
|
247
|
+
class Chat(BaseChat):
|
|
248
|
+
def __init__(self, client: 'TypefullyAI'):
|
|
249
|
+
self.completions = Completions(client)
|
|
250
|
+
|
|
251
|
+
class TypefullyAI(OpenAICompatibleProvider):
|
|
252
|
+
"""
|
|
253
|
+
OpenAI-compatible client for Typefully AI API.
|
|
254
|
+
|
|
255
|
+
Usage:
|
|
256
|
+
client = TypefullyAI()
|
|
257
|
+
response = client.chat.completions.create(
|
|
258
|
+
model="openai:gpt-4o-mini",
|
|
259
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
260
|
+
)
|
|
261
|
+
print(response.choices[0].message.content)
|
|
262
|
+
"""
|
|
263
|
+
|
|
264
|
+
AVAILABLE_MODELS = [
|
|
265
|
+
"openai:gpt-4o-mini",
|
|
266
|
+
"openai:gpt-4o",
|
|
267
|
+
"anthropic:claude-3-5-haiku-20241022",
|
|
268
|
+
"groq:llama-3.3-70b-versatile"
|
|
269
|
+
]
|
|
270
|
+
|
|
271
|
+
def __init__(
|
|
272
|
+
self,
|
|
273
|
+
timeout: int = 30,
|
|
274
|
+
|
|
275
|
+
):
|
|
276
|
+
"""
|
|
277
|
+
Initialize the TypefullyAI client.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
timeout: Request timeout in seconds
|
|
281
|
+
proxies: Optional proxy configuration
|
|
282
|
+
system_prompt: Default system prompt
|
|
283
|
+
output_length: Maximum length of the generated output
|
|
284
|
+
"""
|
|
285
|
+
self.timeout = timeout
|
|
286
|
+
self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
|
|
287
|
+
|
|
288
|
+
# Initialize curl_cffi Session
|
|
289
|
+
self.session = Session()
|
|
290
|
+
|
|
291
|
+
# Initialize LitAgent for user agent generation
|
|
292
|
+
agent = LitAgent()
|
|
293
|
+
self.user_agent = agent.random()
|
|
294
|
+
|
|
295
|
+
# Set headers
|
|
296
|
+
self.headers = {
|
|
297
|
+
"authority": "typefully.com",
|
|
298
|
+
"accept": "*/*",
|
|
299
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
300
|
+
"accept-language": "en-US,en;q=0.9",
|
|
301
|
+
"content-type": "application/json",
|
|
302
|
+
"dnt": "1",
|
|
303
|
+
"origin": "https://typefully.com",
|
|
304
|
+
"referer": "https://typefully.com/tools/ai/chat-gpt-alternative",
|
|
305
|
+
"sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
306
|
+
"sec-ch-ua-mobile": "?0",
|
|
307
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
308
|
+
"user-agent": self.user_agent
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
# Update curl_cffi session headers and proxies
|
|
312
|
+
self.session.headers.update(self.headers)
|
|
313
|
+
|
|
314
|
+
# Initialize chat interface
|
|
315
|
+
self.chat = Chat(self)
|
|
316
|
+
|
|
317
|
+
@staticmethod
|
|
318
|
+
def _typefully_extractor(chunk: str) -> Optional[str]:
|
|
319
|
+
"""Extracts content from the Typefully stream format '0:"..."'."""
|
|
320
|
+
if isinstance(chunk, str):
|
|
321
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk)
|
|
322
|
+
if match:
|
|
323
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
324
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
325
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
326
|
+
return None
|
|
327
|
+
|
|
328
|
+
def convert_model_name(self, model: str) -> str:
|
|
329
|
+
"""
|
|
330
|
+
Convert model names to ones supported by Typefully.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
model: Model name to convert
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
Typefully model name
|
|
337
|
+
"""
|
|
338
|
+
# If the model is already a valid Typefully model, return it
|
|
339
|
+
if model in self.AVAILABLE_MODELS:
|
|
340
|
+
return model
|
|
341
|
+
|
|
342
|
+
# Map common OpenAI model names to Typefully models
|
|
343
|
+
model_mapping = {
|
|
344
|
+
"gpt-4o-mini": "openai:gpt-4o-mini",
|
|
345
|
+
"gpt-4o": "openai:gpt-4o",
|
|
346
|
+
"claude-3-5-haiku": "anthropic:claude-3-5-haiku-20241022",
|
|
347
|
+
"llama-3.3-70b": "groq:llama-3.3-70b-versatile"
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
if model in model_mapping:
|
|
351
|
+
return model_mapping[model]
|
|
352
|
+
|
|
353
|
+
# Default to the most capable model
|
|
354
|
+
print(f"{RED}Warning: Unknown model '{model}'. Using 'openai:gpt-4o-mini' instead.{RESET}")
|
|
355
|
+
return "openai:gpt-4o-mini"
|
|
356
|
+
|
|
357
|
+
@property
|
|
358
|
+
def models(self):
|
|
359
|
+
class _ModelList:
|
|
360
|
+
def list(inner_self):
|
|
361
|
+
return type(self).AVAILABLE_MODELS
|
|
362
|
+
return _ModelList()
|