webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Attempt to import LitAgent, fallback if not available
|
|
15
|
+
try:
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
except ImportError:
|
|
18
|
+
print("Warning: LitAgent not found. Functionality may be limited.")
|
|
19
|
+
|
|
20
|
+
# --- TypeGPT Client ---
|
|
21
|
+
|
|
22
|
+
class Completions(BaseCompletions):
|
|
23
|
+
def __init__(self, client: 'TypeGPT'):
|
|
24
|
+
self._client = client
|
|
25
|
+
|
|
26
|
+
def create(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
model: str,
|
|
30
|
+
messages: List[Dict[str, str]],
|
|
31
|
+
max_tokens: Optional[int] = None,
|
|
32
|
+
stream: bool = False,
|
|
33
|
+
temperature: Optional[float] = None,
|
|
34
|
+
top_p: Optional[float] = None,
|
|
35
|
+
presence_penalty: Optional[float] = None,
|
|
36
|
+
frequency_penalty: Optional[float] = None,
|
|
37
|
+
**kwargs: Any
|
|
38
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
|
+
"""
|
|
40
|
+
Creates a model response for the given chat conversation.
|
|
41
|
+
Mimics openai.chat.completions.create
|
|
42
|
+
"""
|
|
43
|
+
# Prepare the payload for TypeGPT API
|
|
44
|
+
payload = {
|
|
45
|
+
"messages": messages,
|
|
46
|
+
"stream": stream,
|
|
47
|
+
"model": self._client.convert_model_name(model),
|
|
48
|
+
"temperature": temperature if temperature is not None else self._client.temperature,
|
|
49
|
+
"top_p": top_p if top_p is not None else self._client.top_p,
|
|
50
|
+
"presence_penalty": presence_penalty if presence_penalty is not None else self._client.presence_penalty,
|
|
51
|
+
"frequency_penalty": frequency_penalty if frequency_penalty is not None else self._client.frequency_penalty,
|
|
52
|
+
"max_tokens": max_tokens if max_tokens is not None else self._client.max_tokens,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Add any additional parameters
|
|
56
|
+
for key, value in kwargs.items():
|
|
57
|
+
if key not in payload:
|
|
58
|
+
payload[key] = value
|
|
59
|
+
|
|
60
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
61
|
+
created_time = int(time.time())
|
|
62
|
+
|
|
63
|
+
if stream:
|
|
64
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
65
|
+
else:
|
|
66
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
67
|
+
|
|
68
|
+
def _create_stream(
|
|
69
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
70
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
71
|
+
try:
|
|
72
|
+
response = self._client.session.post(
|
|
73
|
+
self._client.api_endpoint,
|
|
74
|
+
headers=self._client.headers,
|
|
75
|
+
json=payload,
|
|
76
|
+
stream=True,
|
|
77
|
+
timeout=self._client.timeout
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Handle non-200 responses
|
|
81
|
+
if not response.ok:
|
|
82
|
+
raise IOError(
|
|
83
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Track token usage across chunks
|
|
87
|
+
prompt_tokens = 0
|
|
88
|
+
completion_tokens = 0
|
|
89
|
+
total_tokens = 0
|
|
90
|
+
|
|
91
|
+
# Estimate prompt tokens based on message length
|
|
92
|
+
for msg in payload.get("messages", []):
|
|
93
|
+
prompt_tokens += count_tokens(msg.get("content", ""))
|
|
94
|
+
|
|
95
|
+
for line in response.iter_lines():
|
|
96
|
+
if not line:
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
decoded_line = line.decode('utf-8').strip()
|
|
100
|
+
|
|
101
|
+
if decoded_line.startswith("data: "):
|
|
102
|
+
json_str = decoded_line[6:]
|
|
103
|
+
if json_str == "[DONE]":
|
|
104
|
+
break
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
data = json.loads(json_str)
|
|
108
|
+
choice_data = data.get('choices', [{}])[0]
|
|
109
|
+
delta_data = choice_data.get('delta', {})
|
|
110
|
+
finish_reason = choice_data.get('finish_reason')
|
|
111
|
+
|
|
112
|
+
# Update token counts if available
|
|
113
|
+
usage_data = data.get('usage', {})
|
|
114
|
+
if usage_data:
|
|
115
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
116
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
117
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
118
|
+
|
|
119
|
+
# Create the delta object
|
|
120
|
+
delta = ChoiceDelta(
|
|
121
|
+
content=delta_data.get('content'),
|
|
122
|
+
role=delta_data.get('role'),
|
|
123
|
+
tool_calls=delta_data.get('tool_calls')
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Create the choice object
|
|
127
|
+
choice = Choice(
|
|
128
|
+
index=choice_data.get('index', 0),
|
|
129
|
+
delta=delta,
|
|
130
|
+
finish_reason=finish_reason,
|
|
131
|
+
logprobs=choice_data.get('logprobs')
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Create the chunk object
|
|
135
|
+
chunk = ChatCompletionChunk(
|
|
136
|
+
id=request_id,
|
|
137
|
+
choices=[choice],
|
|
138
|
+
created=created_time,
|
|
139
|
+
model=model,
|
|
140
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
# Convert chunk to dict using Pydantic's API
|
|
144
|
+
if hasattr(chunk, "model_dump"):
|
|
145
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
146
|
+
else:
|
|
147
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
148
|
+
|
|
149
|
+
# Add usage information to match OpenAI format
|
|
150
|
+
usage_dict = {
|
|
151
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
152
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
153
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
154
|
+
"estimated_cost": None
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
# Update completion_tokens and total_tokens as we receive more content
|
|
158
|
+
if delta_data.get('content'):
|
|
159
|
+
completion_tokens += 1
|
|
160
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
161
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
162
|
+
usage_dict["total_tokens"] = total_tokens
|
|
163
|
+
|
|
164
|
+
chunk_dict["usage"] = usage_dict
|
|
165
|
+
|
|
166
|
+
# Return the chunk object for internal processing
|
|
167
|
+
yield chunk
|
|
168
|
+
except json.JSONDecodeError:
|
|
169
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
# Final chunk with finish_reason="stop"
|
|
173
|
+
delta = ChoiceDelta(
|
|
174
|
+
content=None,
|
|
175
|
+
role=None,
|
|
176
|
+
tool_calls=None
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
choice = Choice(
|
|
180
|
+
index=0,
|
|
181
|
+
delta=delta,
|
|
182
|
+
finish_reason="stop",
|
|
183
|
+
logprobs=None
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
chunk = ChatCompletionChunk(
|
|
187
|
+
id=request_id,
|
|
188
|
+
choices=[choice],
|
|
189
|
+
created=created_time,
|
|
190
|
+
model=model,
|
|
191
|
+
system_fingerprint=None
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
if hasattr(chunk, "model_dump"):
|
|
195
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
196
|
+
else:
|
|
197
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
198
|
+
chunk_dict["usage"] = {
|
|
199
|
+
"prompt_tokens": prompt_tokens,
|
|
200
|
+
"completion_tokens": completion_tokens,
|
|
201
|
+
"total_tokens": total_tokens,
|
|
202
|
+
"estimated_cost": None
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
yield chunk
|
|
206
|
+
|
|
207
|
+
except Exception as e:
|
|
208
|
+
print(f"Error during TypeGPT stream request: {e}")
|
|
209
|
+
raise IOError(f"TypeGPT request failed: {e}") from e
|
|
210
|
+
|
|
211
|
+
def _create_non_stream(
|
|
212
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
213
|
+
) -> ChatCompletion:
|
|
214
|
+
try:
|
|
215
|
+
response = self._client.session.post(
|
|
216
|
+
self._client.api_endpoint,
|
|
217
|
+
headers=self._client.headers,
|
|
218
|
+
json=payload,
|
|
219
|
+
timeout=self._client.timeout
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Handle non-200 responses
|
|
223
|
+
if not response.ok:
|
|
224
|
+
raise IOError(
|
|
225
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Parse the response
|
|
229
|
+
data = response.json()
|
|
230
|
+
|
|
231
|
+
choices_data = data.get('choices', [])
|
|
232
|
+
usage_data = data.get('usage', {})
|
|
233
|
+
|
|
234
|
+
choices = []
|
|
235
|
+
for choice_d in choices_data:
|
|
236
|
+
message_d = choice_d.get('message', {})
|
|
237
|
+
message = ChatCompletionMessage(
|
|
238
|
+
role=message_d.get('role', 'assistant'),
|
|
239
|
+
content=message_d.get('content', '')
|
|
240
|
+
)
|
|
241
|
+
choice = Choice(
|
|
242
|
+
index=choice_d.get('index', 0),
|
|
243
|
+
message=message,
|
|
244
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
245
|
+
)
|
|
246
|
+
choices.append(choice)
|
|
247
|
+
|
|
248
|
+
usage = CompletionUsage(
|
|
249
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
250
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
251
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
completion = ChatCompletion(
|
|
255
|
+
id=request_id,
|
|
256
|
+
choices=choices,
|
|
257
|
+
created=created_time,
|
|
258
|
+
model=data.get('model', model),
|
|
259
|
+
usage=usage,
|
|
260
|
+
)
|
|
261
|
+
return completion
|
|
262
|
+
|
|
263
|
+
except Exception as e:
|
|
264
|
+
print(f"Error during TypeGPT non-stream request: {e}")
|
|
265
|
+
raise IOError(f"TypeGPT request failed: {e}") from e
|
|
266
|
+
|
|
267
|
+
class Chat(BaseChat):
|
|
268
|
+
def __init__(self, client: 'TypeGPT'):
|
|
269
|
+
self.completions = Completions(client)
|
|
270
|
+
|
|
271
|
+
class TypeGPT(OpenAICompatibleProvider):
|
|
272
|
+
"""
|
|
273
|
+
OpenAI-compatible client for TypeGPT API.
|
|
274
|
+
|
|
275
|
+
Usage:
|
|
276
|
+
client = TypeGPT()
|
|
277
|
+
response = client.chat.completions.create(
|
|
278
|
+
model="gpt-4o",
|
|
279
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
280
|
+
)
|
|
281
|
+
"""
|
|
282
|
+
|
|
283
|
+
AVAILABLE_MODELS = [
|
|
284
|
+
# Working Models (based on testing)
|
|
285
|
+
"gpt-4o-mini-2024-07-18",
|
|
286
|
+
"chatgpt-4o-latest",
|
|
287
|
+
"deepseek-r1",
|
|
288
|
+
"deepseek-v3",
|
|
289
|
+
"uncensored-r1",
|
|
290
|
+
"Image-Generator",
|
|
291
|
+
]
|
|
292
|
+
|
|
293
|
+
def __init__(
|
|
294
|
+
self,
|
|
295
|
+
timeout: Optional[int] = None,
|
|
296
|
+
browser: str = "chrome"
|
|
297
|
+
):
|
|
298
|
+
"""
|
|
299
|
+
Initialize the TypeGPT client.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
timeout: Request timeout in seconds (None for no timeout)
|
|
303
|
+
browser: Browser to emulate in user agent
|
|
304
|
+
"""
|
|
305
|
+
self.timeout = timeout or 60 # Default to 30 seconds if None
|
|
306
|
+
self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
|
|
307
|
+
self.session = requests.Session()
|
|
308
|
+
|
|
309
|
+
# Default parameters
|
|
310
|
+
self.max_tokens = 4000
|
|
311
|
+
self.temperature = 0.5
|
|
312
|
+
self.presence_penalty = 0
|
|
313
|
+
self.frequency_penalty = 0
|
|
314
|
+
self.top_p = 1
|
|
315
|
+
|
|
316
|
+
# Initialize LitAgent for user agent generation
|
|
317
|
+
agent = LitAgent()
|
|
318
|
+
self.fingerprint = agent.generate_fingerprint(browser)
|
|
319
|
+
|
|
320
|
+
# Headers for the request
|
|
321
|
+
self.headers = {
|
|
322
|
+
"authority": "chat.typegpt.net",
|
|
323
|
+
"accept": "application/json, text/event-stream",
|
|
324
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
325
|
+
"content-type": "application/json",
|
|
326
|
+
"origin": "https://chat.typegpt.net",
|
|
327
|
+
"referer": "https://chat.typegpt.net/",
|
|
328
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
self.session.headers.update(self.headers)
|
|
332
|
+
|
|
333
|
+
# Initialize the chat interface
|
|
334
|
+
self.chat = Chat(self)
|
|
335
|
+
|
|
336
|
+
def convert_model_name(self, model: str) -> str:
|
|
337
|
+
"""
|
|
338
|
+
Convert model names to ones supported by TypeGPT.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
model: Model name to convert
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
TypeGPT model name
|
|
345
|
+
"""
|
|
346
|
+
# If the model is already a valid TypeGPT model, return it
|
|
347
|
+
if model in self.AVAILABLE_MODELS:
|
|
348
|
+
return model
|
|
349
|
+
|
|
350
|
+
# Default to chatgpt-4o-latest if model not found (this one works reliably)
|
|
351
|
+
print(f"Warning: Unknown model '{model}'. Using 'chatgpt-4o-latest' instead.")
|
|
352
|
+
return "chatgpt-4o-latest"
|
|
353
|
+
|
|
354
|
+
@property
|
|
355
|
+
def models(self):
|
|
356
|
+
class _ModelList:
|
|
357
|
+
def list(inner_self):
|
|
358
|
+
return type(self).AVAILABLE_MODELS
|
|
359
|
+
return _ModelList()
|
|
360
|
+
|
|
361
|
+
@classmethod
|
|
362
|
+
def models(cls):
|
|
363
|
+
"""Return the list of available models for TypeGPT."""
|
|
364
|
+
return cls.AVAILABLE_MODELS
|