webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,535 +1,535 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
import re
|
|
6
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
-
|
|
8
|
-
# Import base classes and utility structures
|
|
9
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
-
from webscout.Provider.OPENAI.utils import (
|
|
11
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
-
ChatCompletionMessage, CompletionUsage, get_system_prompt, count_tokens
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
# Attempt to import LitAgent, fallback if not available
|
|
16
|
-
try:
|
|
17
|
-
from webscout.litagent import LitAgent
|
|
18
|
-
except ImportError:
|
|
19
|
-
pass
|
|
20
|
-
# --- SciraChat Client ---
|
|
21
|
-
|
|
22
|
-
class Completions(BaseCompletions):
|
|
23
|
-
def __init__(self, client: 'SciraChat'):
|
|
24
|
-
self._client = client
|
|
25
|
-
|
|
26
|
-
def create(
|
|
27
|
-
self,
|
|
28
|
-
*,
|
|
29
|
-
model: str,
|
|
30
|
-
messages: List[Dict[str, str]],
|
|
31
|
-
max_tokens: Optional[int] = None,
|
|
32
|
-
stream: bool = False,
|
|
33
|
-
temperature: Optional[float] = None,
|
|
34
|
-
top_p: Optional[float] = None,
|
|
35
|
-
timeout: Optional[int] = None,
|
|
36
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
37
|
-
**kwargs: Any
|
|
38
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
|
-
"""
|
|
40
|
-
Creates a model response for the given chat conversation.
|
|
41
|
-
Mimics openai.chat.completions.create
|
|
42
|
-
"""
|
|
43
|
-
|
|
44
|
-
# Prepare the payload for SciraChat API
|
|
45
|
-
payload = {
|
|
46
|
-
"id": self._client.chat_id,
|
|
47
|
-
"messages": messages,
|
|
48
|
-
"model": self._client.convert_model_name(model),
|
|
49
|
-
"group": "chat", # Always use chat mode (no web search)
|
|
50
|
-
"user_id": self._client.user_id,
|
|
51
|
-
"timezone": "Asia/Calcutta"
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
# Add optional parameters if provided
|
|
55
|
-
if max_tokens is not None and max_tokens > 0:
|
|
56
|
-
payload["max_tokens"] = max_tokens
|
|
57
|
-
|
|
58
|
-
# Add any additional parameters
|
|
59
|
-
for key, value in kwargs.items():
|
|
60
|
-
if key not in payload:
|
|
61
|
-
payload[key] = value
|
|
62
|
-
|
|
63
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
64
|
-
created_time = int(time.time())
|
|
65
|
-
|
|
66
|
-
if stream:
|
|
67
|
-
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
-
else:
|
|
69
|
-
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
-
|
|
71
|
-
def _create_stream(
|
|
72
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
73
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
74
|
-
try:
|
|
75
|
-
response = self._client.session.post(
|
|
76
|
-
self._client.api_endpoint,
|
|
77
|
-
json=payload,
|
|
78
|
-
stream=True,
|
|
79
|
-
timeout=timeout or self._client.timeout,
|
|
80
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
# Handle non-200 responses
|
|
84
|
-
if not response.ok:
|
|
85
|
-
# Try to refresh identity if we get a 403 or 429
|
|
86
|
-
if response.status_code in [403, 429]:
|
|
87
|
-
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
88
|
-
self._client.refresh_identity()
|
|
89
|
-
response = self._client.session.post(
|
|
90
|
-
self._client.api_endpoint,
|
|
91
|
-
json=payload,
|
|
92
|
-
stream=True,
|
|
93
|
-
timeout=timeout or self._client.timeout,
|
|
94
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
95
|
-
)
|
|
96
|
-
if not response.ok:
|
|
97
|
-
raise IOError(
|
|
98
|
-
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
99
|
-
)
|
|
100
|
-
print("Identity refreshed successfully.")
|
|
101
|
-
else:
|
|
102
|
-
raise IOError(
|
|
103
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
# Track token usage across chunks
|
|
107
|
-
prompt_tokens = 0
|
|
108
|
-
completion_tokens = 0
|
|
109
|
-
total_tokens = 0
|
|
110
|
-
|
|
111
|
-
# Estimate prompt tokens based on message length
|
|
112
|
-
prompt_tokens = count_tokens(payload.get("messages", [{}])[0].get("content", ""))
|
|
113
|
-
|
|
114
|
-
for line in response.iter_lines():
|
|
115
|
-
if not line:
|
|
116
|
-
continue
|
|
117
|
-
|
|
118
|
-
try:
|
|
119
|
-
line_str = line.decode('utf-8')
|
|
120
|
-
|
|
121
|
-
# Format: 0:"content" (quoted format)
|
|
122
|
-
match = re.search(r'0:"(.*?)"', line_str)
|
|
123
|
-
if match:
|
|
124
|
-
content = match.group(1)
|
|
125
|
-
|
|
126
|
-
# Format the content (replace escaped newlines)
|
|
127
|
-
content = self._client.format_text(content)
|
|
128
|
-
|
|
129
|
-
# Update token counts using count_tokens
|
|
130
|
-
completion_tokens += count_tokens(content)
|
|
131
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
132
|
-
|
|
133
|
-
# Create the delta object
|
|
134
|
-
delta = ChoiceDelta(
|
|
135
|
-
content=content,
|
|
136
|
-
role="assistant",
|
|
137
|
-
tool_calls=None
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
# Create the choice object
|
|
141
|
-
choice = Choice(
|
|
142
|
-
index=0,
|
|
143
|
-
delta=delta,
|
|
144
|
-
finish_reason=None,
|
|
145
|
-
logprobs=None
|
|
146
|
-
)
|
|
147
|
-
|
|
148
|
-
# Create the chunk object
|
|
149
|
-
chunk = ChatCompletionChunk(
|
|
150
|
-
id=request_id,
|
|
151
|
-
choices=[choice],
|
|
152
|
-
created=created_time,
|
|
153
|
-
model=model,
|
|
154
|
-
system_fingerprint=None
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
# Convert chunk to dict using Pydantic's API
|
|
158
|
-
if hasattr(chunk, "model_dump"):
|
|
159
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
160
|
-
else:
|
|
161
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
162
|
-
|
|
163
|
-
# Add usage information to match OpenAI format
|
|
164
|
-
usage_dict = {
|
|
165
|
-
"prompt_tokens": prompt_tokens,
|
|
166
|
-
"completion_tokens": completion_tokens,
|
|
167
|
-
"total_tokens": total_tokens,
|
|
168
|
-
"estimated_cost": None
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
chunk_dict["usage"] = usage_dict
|
|
172
|
-
|
|
173
|
-
# Return the chunk object for internal processing
|
|
174
|
-
yield chunk
|
|
175
|
-
except Exception as e:
|
|
176
|
-
print(f"Error processing chunk: {e}")
|
|
177
|
-
continue
|
|
178
|
-
|
|
179
|
-
# Final chunk with finish_reason="stop"
|
|
180
|
-
delta = ChoiceDelta(
|
|
181
|
-
content=None,
|
|
182
|
-
role=None,
|
|
183
|
-
tool_calls=None
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
choice = Choice(
|
|
187
|
-
index=0,
|
|
188
|
-
delta=delta,
|
|
189
|
-
finish_reason="stop",
|
|
190
|
-
logprobs=None
|
|
191
|
-
)
|
|
192
|
-
|
|
193
|
-
chunk = ChatCompletionChunk(
|
|
194
|
-
id=request_id,
|
|
195
|
-
choices=[choice],
|
|
196
|
-
created=created_time,
|
|
197
|
-
model=model,
|
|
198
|
-
system_fingerprint=None
|
|
199
|
-
)
|
|
200
|
-
|
|
201
|
-
if hasattr(chunk, "model_dump"):
|
|
202
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
203
|
-
else:
|
|
204
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
205
|
-
chunk_dict["usage"] = {
|
|
206
|
-
"prompt_tokens": prompt_tokens,
|
|
207
|
-
"completion_tokens": completion_tokens,
|
|
208
|
-
"total_tokens": total_tokens,
|
|
209
|
-
"estimated_cost": None
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
yield chunk
|
|
213
|
-
|
|
214
|
-
except Exception as e:
|
|
215
|
-
print(f"Error during SciraChat stream request: {e}")
|
|
216
|
-
raise IOError(f"SciraChat request failed: {e}") from e
|
|
217
|
-
|
|
218
|
-
def _create_non_stream(
|
|
219
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
220
|
-
) -> ChatCompletion:
|
|
221
|
-
try:
|
|
222
|
-
response = self._client.session.post(
|
|
223
|
-
self._client.api_endpoint,
|
|
224
|
-
json=payload,
|
|
225
|
-
timeout=timeout or self._client.timeout,
|
|
226
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
# Handle non-200 responses
|
|
230
|
-
if not response.ok:
|
|
231
|
-
# Try to refresh identity if we get a 403 or 429
|
|
232
|
-
if response.status_code in [403, 429]:
|
|
233
|
-
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
234
|
-
self._client.refresh_identity()
|
|
235
|
-
response = self._client.session.post(
|
|
236
|
-
self._client.api_endpoint,
|
|
237
|
-
json=payload,
|
|
238
|
-
timeout=timeout or self._client.timeout,
|
|
239
|
-
proxies=proxies or getattr(self._client, "proxies", None)
|
|
240
|
-
)
|
|
241
|
-
if not response.ok:
|
|
242
|
-
raise IOError(
|
|
243
|
-
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
244
|
-
)
|
|
245
|
-
print("Identity refreshed successfully.")
|
|
246
|
-
else:
|
|
247
|
-
raise IOError(
|
|
248
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
249
|
-
)
|
|
250
|
-
|
|
251
|
-
# Collect the full response
|
|
252
|
-
full_response = ""
|
|
253
|
-
for line in response.iter_lines():
|
|
254
|
-
if line:
|
|
255
|
-
try:
|
|
256
|
-
line_str = line.decode('utf-8')
|
|
257
|
-
|
|
258
|
-
# Format: 0:"content" (quoted format)
|
|
259
|
-
match = re.search(r'0:"(.*?)"', line_str)
|
|
260
|
-
if match:
|
|
261
|
-
content = match.group(1)
|
|
262
|
-
full_response += content
|
|
263
|
-
except:
|
|
264
|
-
continue
|
|
265
|
-
|
|
266
|
-
# Format the text (replace escaped newlines)
|
|
267
|
-
full_response = self._client.format_text(full_response)
|
|
268
|
-
|
|
269
|
-
# Estimate token counts
|
|
270
|
-
prompt_tokens = count_tokens(payload.get("messages", [{}])[0].get("content", ""))
|
|
271
|
-
completion_tokens = count_tokens(full_response)
|
|
272
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
273
|
-
|
|
274
|
-
# Create the message object
|
|
275
|
-
message = ChatCompletionMessage(
|
|
276
|
-
role="assistant",
|
|
277
|
-
content=full_response
|
|
278
|
-
)
|
|
279
|
-
|
|
280
|
-
# Create the choice object
|
|
281
|
-
choice = Choice(
|
|
282
|
-
index=0,
|
|
283
|
-
message=message,
|
|
284
|
-
finish_reason="stop"
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
# Create the usage object
|
|
288
|
-
usage = CompletionUsage(
|
|
289
|
-
prompt_tokens=prompt_tokens,
|
|
290
|
-
completion_tokens=completion_tokens,
|
|
291
|
-
total_tokens=total_tokens
|
|
292
|
-
)
|
|
293
|
-
|
|
294
|
-
# Create the completion object
|
|
295
|
-
completion = ChatCompletion(
|
|
296
|
-
id=request_id,
|
|
297
|
-
choices=[choice],
|
|
298
|
-
created=created_time,
|
|
299
|
-
model=model,
|
|
300
|
-
usage=usage,
|
|
301
|
-
)
|
|
302
|
-
|
|
303
|
-
return completion
|
|
304
|
-
|
|
305
|
-
except Exception as e:
|
|
306
|
-
print(f"Error during SciraChat non-stream request: {e}")
|
|
307
|
-
raise IOError(f"SciraChat request failed: {e}") from e
|
|
308
|
-
|
|
309
|
-
class Chat(BaseChat):
|
|
310
|
-
def __init__(self, client: 'SciraChat'):
|
|
311
|
-
self.completions = Completions(client)
|
|
312
|
-
|
|
313
|
-
class SciraChat(OpenAICompatibleProvider):
|
|
314
|
-
"""
|
|
315
|
-
OpenAI-compatible client for Scira Chat API.
|
|
316
|
-
|
|
317
|
-
Usage:
|
|
318
|
-
client = SciraChat()
|
|
319
|
-
response = client.chat.completions.create(
|
|
320
|
-
model="scira-default",
|
|
321
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
322
|
-
)
|
|
323
|
-
"""
|
|
324
|
-
# Model mapping: actual model names to Scira API format
|
|
325
|
-
MODEL_MAPPING = {
|
|
326
|
-
"grok-3-mini": "scira-default",
|
|
327
|
-
"grok-3-mini-fast": "scira-x-fast-mini",
|
|
328
|
-
"grok-3-fast": "scira-x-fast",
|
|
329
|
-
"gpt-4.1-nano": "scira-nano",
|
|
330
|
-
"grok-3": "scira-grok-3",
|
|
331
|
-
"grok-4": "scira-grok-4",
|
|
332
|
-
"grok-2-vision-1212": "scira-vision",
|
|
333
|
-
"grok-2-latest": "scira-g2",
|
|
334
|
-
"gpt-4o-mini": "scira-4o-mini",
|
|
335
|
-
"o4-mini-2025-04-16": "scira-o4-mini",
|
|
336
|
-
"o3": "scira-o3",
|
|
337
|
-
"qwen/qwen3-32b": "scira-qwen-32b",
|
|
338
|
-
"qwen3-30b-a3b": "scira-qwen-30b",
|
|
339
|
-
"qwen3-4b": "scira-qwen-4b",
|
|
340
|
-
"qwen3-32b": "scira-qwen-32b",
|
|
341
|
-
"qwen3-4b-thinking": "scira-qwen-4b-thinking",
|
|
342
|
-
"deepseek-v3-0324": "scira-deepseek-v3",
|
|
343
|
-
"claude-3-5-haiku-20241022": "scira-haiku",
|
|
344
|
-
"mistral-small-latest": "scira-mistral",
|
|
345
|
-
"gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
|
|
346
|
-
"gemini-2.5-flash": "scira-google",
|
|
347
|
-
"gemini-2.5-pro": "scira-google-pro",
|
|
348
|
-
"claude-sonnet-4-20250514": "scira-anthropic",
|
|
349
|
-
"claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
|
|
350
|
-
"claude-4-opus-20250514": "scira-opus",
|
|
351
|
-
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
352
|
-
"llama-4-maverick": "scira-llama-4",
|
|
353
|
-
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
354
|
-
"kimi-k2-instruct": "scira-kimi-k2",
|
|
355
|
-
"scira-kimi-k2": "kimi-k2-instruct",
|
|
356
|
-
}
|
|
357
|
-
# Reverse mapping: Scira format to actual model names
|
|
358
|
-
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
359
|
-
# Add special cases for aliases and duplicate mappings
|
|
360
|
-
SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
|
|
361
|
-
SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
|
|
362
|
-
SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
|
|
363
|
-
SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
|
|
364
|
-
SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
|
|
365
|
-
SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
|
|
366
|
-
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
367
|
-
SCIRA_TO_MODEL["scira-qwen-4b"] = "qwen3-4b"
|
|
368
|
-
SCIRA_TO_MODEL["scira-qwen-4b-thinking"] = "qwen3-4b-thinking"
|
|
369
|
-
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
370
|
-
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
371
|
-
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
372
|
-
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
373
|
-
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
374
|
-
# Available models list (actual model names + scira aliases)
|
|
375
|
-
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
376
|
-
# Optional: pretty display names for UI (reverse mapping)
|
|
377
|
-
MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_MAPPING.items()}
|
|
378
|
-
|
|
379
|
-
@classmethod
|
|
380
|
-
def _resolve_model(cls, model: str) -> str:
|
|
381
|
-
"""
|
|
382
|
-
Resolve a model name to its Scira API format.
|
|
383
|
-
|
|
384
|
-
Args:
|
|
385
|
-
model: Either an actual model name or a Scira alias
|
|
386
|
-
|
|
387
|
-
Returns:
|
|
388
|
-
The Scira API format model name
|
|
389
|
-
|
|
390
|
-
Raises:
|
|
391
|
-
ValueError: If the model is not supported
|
|
392
|
-
"""
|
|
393
|
-
# If it's already a Scira format, return as-is
|
|
394
|
-
if model in cls.SCIRA_TO_MODEL:
|
|
395
|
-
return model
|
|
396
|
-
# If it's an actual model name, convert to Scira format
|
|
397
|
-
if model in cls.MODEL_MAPPING:
|
|
398
|
-
return cls.MODEL_MAPPING[model]
|
|
399
|
-
# Model not found
|
|
400
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
|
|
401
|
-
|
|
402
|
-
def convert_model_name(self, model: str) -> str:
|
|
403
|
-
"""
|
|
404
|
-
Convert model display names or internal keys to ones supported by SciraChat.
|
|
405
|
-
Args:
|
|
406
|
-
model: Model name or alias to convert
|
|
407
|
-
Returns:
|
|
408
|
-
SciraChat model name
|
|
409
|
-
"""
|
|
410
|
-
# Use the new _resolve_model logic
|
|
411
|
-
try:
|
|
412
|
-
return self._resolve_model(model)
|
|
413
|
-
except Exception as e:
|
|
414
|
-
print(f"Warning: {e} Using 'scira-default' instead.")
|
|
415
|
-
return "scira-default"
|
|
416
|
-
|
|
417
|
-
def __init__(
|
|
418
|
-
self,
|
|
419
|
-
timeout: Optional[int] = None,
|
|
420
|
-
browser: str = "chrome"
|
|
421
|
-
):
|
|
422
|
-
"""
|
|
423
|
-
Initialize the SciraChat client.
|
|
424
|
-
|
|
425
|
-
Args:
|
|
426
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
427
|
-
browser: Browser to emulate in user agent
|
|
428
|
-
"""
|
|
429
|
-
self.timeout = timeout or 30 # Default to 30 seconds if None
|
|
430
|
-
self.api_endpoint = "https://scira.ai/api/search"
|
|
431
|
-
self.session = requests.Session()
|
|
432
|
-
|
|
433
|
-
# Initialize LitAgent for user agent generation
|
|
434
|
-
self.agent = LitAgent()
|
|
435
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
436
|
-
|
|
437
|
-
# Use the fingerprint for headers
|
|
438
|
-
self.headers = {
|
|
439
|
-
**self.fingerprint,
|
|
440
|
-
"Origin": "https://scira.ai",
|
|
441
|
-
"Referer": "https://scira.ai/",
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
self.session.headers.update(self.headers)
|
|
445
|
-
|
|
446
|
-
# Generate unique IDs for chat session and user
|
|
447
|
-
self.chat_id = str(uuid.uuid4())
|
|
448
|
-
self.user_id = f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
449
|
-
|
|
450
|
-
# Initialize the chat interface
|
|
451
|
-
self.chat = Chat(self)
|
|
452
|
-
|
|
453
|
-
def refresh_identity(self, browser: str = None):
|
|
454
|
-
"""
|
|
455
|
-
Refreshes the browser identity fingerprint.
|
|
456
|
-
|
|
457
|
-
Args:
|
|
458
|
-
browser: Specific browser to use for the new fingerprint
|
|
459
|
-
"""
|
|
460
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
461
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
462
|
-
|
|
463
|
-
# Update headers with new fingerprint
|
|
464
|
-
self.headers.update({
|
|
465
|
-
**self.fingerprint,
|
|
466
|
-
})
|
|
467
|
-
|
|
468
|
-
# Update session headers
|
|
469
|
-
for header, value in self.headers.items():
|
|
470
|
-
self.session.headers[header] = value
|
|
471
|
-
|
|
472
|
-
return self.fingerprint
|
|
473
|
-
|
|
474
|
-
def format_text(self, text: str) -> str:
|
|
475
|
-
"""
|
|
476
|
-
Format text by replacing escaped newlines with actual newlines.
|
|
477
|
-
|
|
478
|
-
Args:
|
|
479
|
-
text: Text to format
|
|
480
|
-
|
|
481
|
-
Returns:
|
|
482
|
-
Formatted text
|
|
483
|
-
"""
|
|
484
|
-
# Use a more comprehensive approach to handle all escape sequences
|
|
485
|
-
try:
|
|
486
|
-
# First handle double backslashes to avoid issues
|
|
487
|
-
text = text.replace('\\\\', '\\')
|
|
488
|
-
|
|
489
|
-
# Handle common escape sequences
|
|
490
|
-
text = text.replace('\\n', '\n')
|
|
491
|
-
text = text.replace('\\r', '\r')
|
|
492
|
-
text = text.replace('\\t', '\t')
|
|
493
|
-
text = text.replace('\\"', '"')
|
|
494
|
-
text = text.replace("\\'", "'")
|
|
495
|
-
|
|
496
|
-
# Handle any remaining escape sequences using JSON decoding
|
|
497
|
-
# This is a fallback in case there are other escape sequences
|
|
498
|
-
try:
|
|
499
|
-
# Add quotes to make it a valid JSON string
|
|
500
|
-
json_str = f'"{text}"'
|
|
501
|
-
# Use json module to decode all escape sequences
|
|
502
|
-
decoded = json.loads(json_str)
|
|
503
|
-
return decoded
|
|
504
|
-
except json.JSONDecodeError:
|
|
505
|
-
# If JSON decoding fails, return the text with the replacements we've already done
|
|
506
|
-
return text
|
|
507
|
-
except Exception as e:
|
|
508
|
-
# If any error occurs, return the original text
|
|
509
|
-
print(f"Warning: Error formatting text: {e}")
|
|
510
|
-
return text
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
@property
|
|
514
|
-
def models(self):
|
|
515
|
-
class _ModelList:
|
|
516
|
-
def list(inner_self):
|
|
517
|
-
# Return display names (aliases)
|
|
518
|
-
return type(self).AVAILABLE_MODELS
|
|
519
|
-
return _ModelList()
|
|
520
|
-
|
|
521
|
-
if __name__ == "__main__":
|
|
522
|
-
ai = SciraChat()
|
|
523
|
-
response = ai.chat.completions.create(
|
|
524
|
-
model="grok-3-mini-fast-latest",
|
|
525
|
-
messages=[
|
|
526
|
-
{"role": "user", "content": "who are u?"}
|
|
527
|
-
],
|
|
528
|
-
stream=True
|
|
529
|
-
)
|
|
530
|
-
for chunk in response:
|
|
531
|
-
if hasattr(chunk, "choices") and chunk.choices and hasattr(chunk.choices[0], "delta"):
|
|
532
|
-
content = getattr(chunk.choices[0].delta, "content", None)
|
|
533
|
-
if content:
|
|
534
|
-
print(content, end="", flush=True)
|
|
535
|
-
print()
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
+
|
|
8
|
+
# Import base classes and utility structures
|
|
9
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage, get_system_prompt, count_tokens
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
# Attempt to import LitAgent, fallback if not available
|
|
16
|
+
try:
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
except ImportError:
|
|
19
|
+
pass
|
|
20
|
+
# --- SciraChat Client ---
|
|
21
|
+
|
|
22
|
+
class Completions(BaseCompletions):
|
|
23
|
+
def __init__(self, client: 'SciraChat'):
|
|
24
|
+
self._client = client
|
|
25
|
+
|
|
26
|
+
def create(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
model: str,
|
|
30
|
+
messages: List[Dict[str, str]],
|
|
31
|
+
max_tokens: Optional[int] = None,
|
|
32
|
+
stream: bool = False,
|
|
33
|
+
temperature: Optional[float] = None,
|
|
34
|
+
top_p: Optional[float] = None,
|
|
35
|
+
timeout: Optional[int] = None,
|
|
36
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
37
|
+
**kwargs: Any
|
|
38
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
|
+
"""
|
|
40
|
+
Creates a model response for the given chat conversation.
|
|
41
|
+
Mimics openai.chat.completions.create
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
# Prepare the payload for SciraChat API
|
|
45
|
+
payload = {
|
|
46
|
+
"id": self._client.chat_id,
|
|
47
|
+
"messages": messages,
|
|
48
|
+
"model": self._client.convert_model_name(model),
|
|
49
|
+
"group": "chat", # Always use chat mode (no web search)
|
|
50
|
+
"user_id": self._client.user_id,
|
|
51
|
+
"timezone": "Asia/Calcutta"
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
# Add optional parameters if provided
|
|
55
|
+
if max_tokens is not None and max_tokens > 0:
|
|
56
|
+
payload["max_tokens"] = max_tokens
|
|
57
|
+
|
|
58
|
+
# Add any additional parameters
|
|
59
|
+
for key, value in kwargs.items():
|
|
60
|
+
if key not in payload:
|
|
61
|
+
payload[key] = value
|
|
62
|
+
|
|
63
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
64
|
+
created_time = int(time.time())
|
|
65
|
+
|
|
66
|
+
if stream:
|
|
67
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
68
|
+
else:
|
|
69
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
70
|
+
|
|
71
|
+
def _create_stream(
|
|
72
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
73
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
74
|
+
try:
|
|
75
|
+
response = self._client.session.post(
|
|
76
|
+
self._client.api_endpoint,
|
|
77
|
+
json=payload,
|
|
78
|
+
stream=True,
|
|
79
|
+
timeout=timeout or self._client.timeout,
|
|
80
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Handle non-200 responses
|
|
84
|
+
if not response.ok:
|
|
85
|
+
# Try to refresh identity if we get a 403 or 429
|
|
86
|
+
if response.status_code in [403, 429]:
|
|
87
|
+
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
88
|
+
self._client.refresh_identity()
|
|
89
|
+
response = self._client.session.post(
|
|
90
|
+
self._client.api_endpoint,
|
|
91
|
+
json=payload,
|
|
92
|
+
stream=True,
|
|
93
|
+
timeout=timeout or self._client.timeout,
|
|
94
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
95
|
+
)
|
|
96
|
+
if not response.ok:
|
|
97
|
+
raise IOError(
|
|
98
|
+
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
99
|
+
)
|
|
100
|
+
print("Identity refreshed successfully.")
|
|
101
|
+
else:
|
|
102
|
+
raise IOError(
|
|
103
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Track token usage across chunks
|
|
107
|
+
prompt_tokens = 0
|
|
108
|
+
completion_tokens = 0
|
|
109
|
+
total_tokens = 0
|
|
110
|
+
|
|
111
|
+
# Estimate prompt tokens based on message length
|
|
112
|
+
prompt_tokens = count_tokens(payload.get("messages", [{}])[0].get("content", ""))
|
|
113
|
+
|
|
114
|
+
for line in response.iter_lines():
|
|
115
|
+
if not line:
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
line_str = line.decode('utf-8')
|
|
120
|
+
|
|
121
|
+
# Format: 0:"content" (quoted format)
|
|
122
|
+
match = re.search(r'0:"(.*?)"', line_str)
|
|
123
|
+
if match:
|
|
124
|
+
content = match.group(1)
|
|
125
|
+
|
|
126
|
+
# Format the content (replace escaped newlines)
|
|
127
|
+
content = self._client.format_text(content)
|
|
128
|
+
|
|
129
|
+
# Update token counts using count_tokens
|
|
130
|
+
completion_tokens += count_tokens(content)
|
|
131
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
132
|
+
|
|
133
|
+
# Create the delta object
|
|
134
|
+
delta = ChoiceDelta(
|
|
135
|
+
content=content,
|
|
136
|
+
role="assistant",
|
|
137
|
+
tool_calls=None
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Create the choice object
|
|
141
|
+
choice = Choice(
|
|
142
|
+
index=0,
|
|
143
|
+
delta=delta,
|
|
144
|
+
finish_reason=None,
|
|
145
|
+
logprobs=None
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Create the chunk object
|
|
149
|
+
chunk = ChatCompletionChunk(
|
|
150
|
+
id=request_id,
|
|
151
|
+
choices=[choice],
|
|
152
|
+
created=created_time,
|
|
153
|
+
model=model,
|
|
154
|
+
system_fingerprint=None
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Convert chunk to dict using Pydantic's API
|
|
158
|
+
if hasattr(chunk, "model_dump"):
|
|
159
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
160
|
+
else:
|
|
161
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
162
|
+
|
|
163
|
+
# Add usage information to match OpenAI format
|
|
164
|
+
usage_dict = {
|
|
165
|
+
"prompt_tokens": prompt_tokens,
|
|
166
|
+
"completion_tokens": completion_tokens,
|
|
167
|
+
"total_tokens": total_tokens,
|
|
168
|
+
"estimated_cost": None
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
chunk_dict["usage"] = usage_dict
|
|
172
|
+
|
|
173
|
+
# Return the chunk object for internal processing
|
|
174
|
+
yield chunk
|
|
175
|
+
except Exception as e:
|
|
176
|
+
print(f"Error processing chunk: {e}")
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
# Final chunk with finish_reason="stop"
|
|
180
|
+
delta = ChoiceDelta(
|
|
181
|
+
content=None,
|
|
182
|
+
role=None,
|
|
183
|
+
tool_calls=None
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
choice = Choice(
|
|
187
|
+
index=0,
|
|
188
|
+
delta=delta,
|
|
189
|
+
finish_reason="stop",
|
|
190
|
+
logprobs=None
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
chunk = ChatCompletionChunk(
|
|
194
|
+
id=request_id,
|
|
195
|
+
choices=[choice],
|
|
196
|
+
created=created_time,
|
|
197
|
+
model=model,
|
|
198
|
+
system_fingerprint=None
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
if hasattr(chunk, "model_dump"):
|
|
202
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
203
|
+
else:
|
|
204
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
205
|
+
chunk_dict["usage"] = {
|
|
206
|
+
"prompt_tokens": prompt_tokens,
|
|
207
|
+
"completion_tokens": completion_tokens,
|
|
208
|
+
"total_tokens": total_tokens,
|
|
209
|
+
"estimated_cost": None
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
yield chunk
|
|
213
|
+
|
|
214
|
+
except Exception as e:
|
|
215
|
+
print(f"Error during SciraChat stream request: {e}")
|
|
216
|
+
raise IOError(f"SciraChat request failed: {e}") from e
|
|
217
|
+
|
|
218
|
+
def _create_non_stream(
|
|
219
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
220
|
+
) -> ChatCompletion:
|
|
221
|
+
try:
|
|
222
|
+
response = self._client.session.post(
|
|
223
|
+
self._client.api_endpoint,
|
|
224
|
+
json=payload,
|
|
225
|
+
timeout=timeout or self._client.timeout,
|
|
226
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
# Handle non-200 responses
|
|
230
|
+
if not response.ok:
|
|
231
|
+
# Try to refresh identity if we get a 403 or 429
|
|
232
|
+
if response.status_code in [403, 429]:
|
|
233
|
+
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
234
|
+
self._client.refresh_identity()
|
|
235
|
+
response = self._client.session.post(
|
|
236
|
+
self._client.api_endpoint,
|
|
237
|
+
json=payload,
|
|
238
|
+
timeout=timeout or self._client.timeout,
|
|
239
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
240
|
+
)
|
|
241
|
+
if not response.ok:
|
|
242
|
+
raise IOError(
|
|
243
|
+
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
244
|
+
)
|
|
245
|
+
print("Identity refreshed successfully.")
|
|
246
|
+
else:
|
|
247
|
+
raise IOError(
|
|
248
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
# Collect the full response
|
|
252
|
+
full_response = ""
|
|
253
|
+
for line in response.iter_lines():
|
|
254
|
+
if line:
|
|
255
|
+
try:
|
|
256
|
+
line_str = line.decode('utf-8')
|
|
257
|
+
|
|
258
|
+
# Format: 0:"content" (quoted format)
|
|
259
|
+
match = re.search(r'0:"(.*?)"', line_str)
|
|
260
|
+
if match:
|
|
261
|
+
content = match.group(1)
|
|
262
|
+
full_response += content
|
|
263
|
+
except:
|
|
264
|
+
continue
|
|
265
|
+
|
|
266
|
+
# Format the text (replace escaped newlines)
|
|
267
|
+
full_response = self._client.format_text(full_response)
|
|
268
|
+
|
|
269
|
+
# Estimate token counts
|
|
270
|
+
prompt_tokens = count_tokens(payload.get("messages", [{}])[0].get("content", ""))
|
|
271
|
+
completion_tokens = count_tokens(full_response)
|
|
272
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
273
|
+
|
|
274
|
+
# Create the message object
|
|
275
|
+
message = ChatCompletionMessage(
|
|
276
|
+
role="assistant",
|
|
277
|
+
content=full_response
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# Create the choice object
|
|
281
|
+
choice = Choice(
|
|
282
|
+
index=0,
|
|
283
|
+
message=message,
|
|
284
|
+
finish_reason="stop"
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
# Create the usage object
|
|
288
|
+
usage = CompletionUsage(
|
|
289
|
+
prompt_tokens=prompt_tokens,
|
|
290
|
+
completion_tokens=completion_tokens,
|
|
291
|
+
total_tokens=total_tokens
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# Create the completion object
|
|
295
|
+
completion = ChatCompletion(
|
|
296
|
+
id=request_id,
|
|
297
|
+
choices=[choice],
|
|
298
|
+
created=created_time,
|
|
299
|
+
model=model,
|
|
300
|
+
usage=usage,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
return completion
|
|
304
|
+
|
|
305
|
+
except Exception as e:
|
|
306
|
+
print(f"Error during SciraChat non-stream request: {e}")
|
|
307
|
+
raise IOError(f"SciraChat request failed: {e}") from e
|
|
308
|
+
|
|
309
|
+
class Chat(BaseChat):
|
|
310
|
+
def __init__(self, client: 'SciraChat'):
|
|
311
|
+
self.completions = Completions(client)
|
|
312
|
+
|
|
313
|
+
class SciraChat(OpenAICompatibleProvider):
|
|
314
|
+
"""
|
|
315
|
+
OpenAI-compatible client for Scira Chat API.
|
|
316
|
+
|
|
317
|
+
Usage:
|
|
318
|
+
client = SciraChat()
|
|
319
|
+
response = client.chat.completions.create(
|
|
320
|
+
model="scira-default",
|
|
321
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
322
|
+
)
|
|
323
|
+
"""
|
|
324
|
+
# Model mapping: actual model names to Scira API format
|
|
325
|
+
MODEL_MAPPING = {
|
|
326
|
+
"grok-3-mini": "scira-default",
|
|
327
|
+
"grok-3-mini-fast": "scira-x-fast-mini",
|
|
328
|
+
"grok-3-fast": "scira-x-fast",
|
|
329
|
+
"gpt-4.1-nano": "scira-nano",
|
|
330
|
+
"grok-3": "scira-grok-3",
|
|
331
|
+
"grok-4": "scira-grok-4",
|
|
332
|
+
"grok-2-vision-1212": "scira-vision",
|
|
333
|
+
"grok-2-latest": "scira-g2",
|
|
334
|
+
"gpt-4o-mini": "scira-4o-mini",
|
|
335
|
+
"o4-mini-2025-04-16": "scira-o4-mini",
|
|
336
|
+
"o3": "scira-o3",
|
|
337
|
+
"qwen/qwen3-32b": "scira-qwen-32b",
|
|
338
|
+
"qwen3-30b-a3b": "scira-qwen-30b",
|
|
339
|
+
"qwen3-4b": "scira-qwen-4b",
|
|
340
|
+
"qwen3-32b": "scira-qwen-32b",
|
|
341
|
+
"qwen3-4b-thinking": "scira-qwen-4b-thinking",
|
|
342
|
+
"deepseek-v3-0324": "scira-deepseek-v3",
|
|
343
|
+
"claude-3-5-haiku-20241022": "scira-haiku",
|
|
344
|
+
"mistral-small-latest": "scira-mistral",
|
|
345
|
+
"gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
|
|
346
|
+
"gemini-2.5-flash": "scira-google",
|
|
347
|
+
"gemini-2.5-pro": "scira-google-pro",
|
|
348
|
+
"claude-sonnet-4-20250514": "scira-anthropic",
|
|
349
|
+
"claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
|
|
350
|
+
"claude-4-opus-20250514": "scira-opus",
|
|
351
|
+
"claude-4-opus-20250514-pro": "scira-opus-pro",
|
|
352
|
+
"llama-4-maverick": "scira-llama-4",
|
|
353
|
+
"meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
|
|
354
|
+
"kimi-k2-instruct": "scira-kimi-k2",
|
|
355
|
+
"scira-kimi-k2": "kimi-k2-instruct",
|
|
356
|
+
}
|
|
357
|
+
# Reverse mapping: Scira format to actual model names
|
|
358
|
+
SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
|
|
359
|
+
# Add special cases for aliases and duplicate mappings
|
|
360
|
+
SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
|
|
361
|
+
SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
|
|
362
|
+
SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
|
|
363
|
+
SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
|
|
364
|
+
SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
|
|
365
|
+
SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
|
|
366
|
+
SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
|
|
367
|
+
SCIRA_TO_MODEL["scira-qwen-4b"] = "qwen3-4b"
|
|
368
|
+
SCIRA_TO_MODEL["scira-qwen-4b-thinking"] = "qwen3-4b-thinking"
|
|
369
|
+
SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
|
|
370
|
+
SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
|
|
371
|
+
SCIRA_TO_MODEL["scira-kimi-k2"] = "kimi-k2-instruct"
|
|
372
|
+
SCIRA_TO_MODEL["kimi-k2-instruct"] = "scira-kimi-k2"
|
|
373
|
+
MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
|
|
374
|
+
# Available models list (actual model names + scira aliases)
|
|
375
|
+
AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
|
|
376
|
+
# Optional: pretty display names for UI (reverse mapping)
|
|
377
|
+
MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_MAPPING.items()}
|
|
378
|
+
|
|
379
|
+
@classmethod
|
|
380
|
+
def _resolve_model(cls, model: str) -> str:
|
|
381
|
+
"""
|
|
382
|
+
Resolve a model name to its Scira API format.
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
model: Either an actual model name or a Scira alias
|
|
386
|
+
|
|
387
|
+
Returns:
|
|
388
|
+
The Scira API format model name
|
|
389
|
+
|
|
390
|
+
Raises:
|
|
391
|
+
ValueError: If the model is not supported
|
|
392
|
+
"""
|
|
393
|
+
# If it's already a Scira format, return as-is
|
|
394
|
+
if model in cls.SCIRA_TO_MODEL:
|
|
395
|
+
return model
|
|
396
|
+
# If it's an actual model name, convert to Scira format
|
|
397
|
+
if model in cls.MODEL_MAPPING:
|
|
398
|
+
return cls.MODEL_MAPPING[model]
|
|
399
|
+
# Model not found
|
|
400
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
|
|
401
|
+
|
|
402
|
+
def convert_model_name(self, model: str) -> str:
|
|
403
|
+
"""
|
|
404
|
+
Convert model display names or internal keys to ones supported by SciraChat.
|
|
405
|
+
Args:
|
|
406
|
+
model: Model name or alias to convert
|
|
407
|
+
Returns:
|
|
408
|
+
SciraChat model name
|
|
409
|
+
"""
|
|
410
|
+
# Use the new _resolve_model logic
|
|
411
|
+
try:
|
|
412
|
+
return self._resolve_model(model)
|
|
413
|
+
except Exception as e:
|
|
414
|
+
print(f"Warning: {e} Using 'scira-default' instead.")
|
|
415
|
+
return "scira-default"
|
|
416
|
+
|
|
417
|
+
def __init__(
|
|
418
|
+
self,
|
|
419
|
+
timeout: Optional[int] = None,
|
|
420
|
+
browser: str = "chrome"
|
|
421
|
+
):
|
|
422
|
+
"""
|
|
423
|
+
Initialize the SciraChat client.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
timeout: Request timeout in seconds (None for no timeout)
|
|
427
|
+
browser: Browser to emulate in user agent
|
|
428
|
+
"""
|
|
429
|
+
self.timeout = timeout or 30 # Default to 30 seconds if None
|
|
430
|
+
self.api_endpoint = "https://scira.ai/api/search"
|
|
431
|
+
self.session = requests.Session()
|
|
432
|
+
|
|
433
|
+
# Initialize LitAgent for user agent generation
|
|
434
|
+
self.agent = LitAgent()
|
|
435
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
436
|
+
|
|
437
|
+
# Use the fingerprint for headers
|
|
438
|
+
self.headers = {
|
|
439
|
+
**self.fingerprint,
|
|
440
|
+
"Origin": "https://scira.ai",
|
|
441
|
+
"Referer": "https://scira.ai/",
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
self.session.headers.update(self.headers)
|
|
445
|
+
|
|
446
|
+
# Generate unique IDs for chat session and user
|
|
447
|
+
self.chat_id = str(uuid.uuid4())
|
|
448
|
+
self.user_id = f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
449
|
+
|
|
450
|
+
# Initialize the chat interface
|
|
451
|
+
self.chat = Chat(self)
|
|
452
|
+
|
|
453
|
+
def refresh_identity(self, browser: str = None):
|
|
454
|
+
"""
|
|
455
|
+
Refreshes the browser identity fingerprint.
|
|
456
|
+
|
|
457
|
+
Args:
|
|
458
|
+
browser: Specific browser to use for the new fingerprint
|
|
459
|
+
"""
|
|
460
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
461
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
462
|
+
|
|
463
|
+
# Update headers with new fingerprint
|
|
464
|
+
self.headers.update({
|
|
465
|
+
**self.fingerprint,
|
|
466
|
+
})
|
|
467
|
+
|
|
468
|
+
# Update session headers
|
|
469
|
+
for header, value in self.headers.items():
|
|
470
|
+
self.session.headers[header] = value
|
|
471
|
+
|
|
472
|
+
return self.fingerprint
|
|
473
|
+
|
|
474
|
+
def format_text(self, text: str) -> str:
|
|
475
|
+
"""
|
|
476
|
+
Format text by replacing escaped newlines with actual newlines.
|
|
477
|
+
|
|
478
|
+
Args:
|
|
479
|
+
text: Text to format
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
Formatted text
|
|
483
|
+
"""
|
|
484
|
+
# Use a more comprehensive approach to handle all escape sequences
|
|
485
|
+
try:
|
|
486
|
+
# First handle double backslashes to avoid issues
|
|
487
|
+
text = text.replace('\\\\', '\\')
|
|
488
|
+
|
|
489
|
+
# Handle common escape sequences
|
|
490
|
+
text = text.replace('\\n', '\n')
|
|
491
|
+
text = text.replace('\\r', '\r')
|
|
492
|
+
text = text.replace('\\t', '\t')
|
|
493
|
+
text = text.replace('\\"', '"')
|
|
494
|
+
text = text.replace("\\'", "'")
|
|
495
|
+
|
|
496
|
+
# Handle any remaining escape sequences using JSON decoding
|
|
497
|
+
# This is a fallback in case there are other escape sequences
|
|
498
|
+
try:
|
|
499
|
+
# Add quotes to make it a valid JSON string
|
|
500
|
+
json_str = f'"{text}"'
|
|
501
|
+
# Use json module to decode all escape sequences
|
|
502
|
+
decoded = json.loads(json_str)
|
|
503
|
+
return decoded
|
|
504
|
+
except json.JSONDecodeError:
|
|
505
|
+
# If JSON decoding fails, return the text with the replacements we've already done
|
|
506
|
+
return text
|
|
507
|
+
except Exception as e:
|
|
508
|
+
# If any error occurs, return the original text
|
|
509
|
+
print(f"Warning: Error formatting text: {e}")
|
|
510
|
+
return text
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
@property
|
|
514
|
+
def models(self):
|
|
515
|
+
class _ModelList:
|
|
516
|
+
def list(inner_self):
|
|
517
|
+
# Return display names (aliases)
|
|
518
|
+
return type(self).AVAILABLE_MODELS
|
|
519
|
+
return _ModelList()
|
|
520
|
+
|
|
521
|
+
if __name__ == "__main__":
|
|
522
|
+
ai = SciraChat()
|
|
523
|
+
response = ai.chat.completions.create(
|
|
524
|
+
model="grok-3-mini-fast-latest",
|
|
525
|
+
messages=[
|
|
526
|
+
{"role": "user", "content": "who are u?"}
|
|
527
|
+
],
|
|
528
|
+
stream=True
|
|
529
|
+
)
|
|
530
|
+
for chunk in response:
|
|
531
|
+
if hasattr(chunk, "choices") and chunk.choices and hasattr(chunk.choices[0], "delta"):
|
|
532
|
+
content = getattr(chunk.choices[0].delta, "content", None)
|
|
533
|
+
if content:
|
|
534
|
+
print(content, end="", flush=True)
|
|
535
|
+
print()
|