webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
|
@@ -1,488 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
import random
|
|
6
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
-
|
|
8
|
-
# Import base classes and utility structures
|
|
9
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
-
from .utils import (
|
|
11
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
-
ChatCompletionMessage, CompletionUsage
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
# ANSI escape codes for formatting
|
|
16
|
-
BOLD = "\033[1m"
|
|
17
|
-
RED = "\033[91m"
|
|
18
|
-
RESET = "\033[0m"
|
|
19
|
-
|
|
20
|
-
class Completions(BaseCompletions):
|
|
21
|
-
def __init__(self, client: 'OPKFC'):
|
|
22
|
-
self._client = client
|
|
23
|
-
|
|
24
|
-
def create(
|
|
25
|
-
self,
|
|
26
|
-
*,
|
|
27
|
-
model: str,
|
|
28
|
-
messages: List[Dict[str, str]],
|
|
29
|
-
max_tokens: Optional[int] = None,
|
|
30
|
-
stream: bool = False,
|
|
31
|
-
temperature: Optional[float] = None,
|
|
32
|
-
top_p: Optional[float] = None,
|
|
33
|
-
**kwargs: Any
|
|
34
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
|
-
"""
|
|
36
|
-
Create a chat completion with OPKFC API.
|
|
37
|
-
|
|
38
|
-
Args:
|
|
39
|
-
model: The model to use (from AVAILABLE_MODELS)
|
|
40
|
-
messages: List of message dictionaries with 'role' and 'content'
|
|
41
|
-
max_tokens: Maximum number of tokens to generate
|
|
42
|
-
stream: Whether to stream the response
|
|
43
|
-
temperature: Sampling temperature (0-1)
|
|
44
|
-
top_p: Nucleus sampling parameter (0-1)
|
|
45
|
-
**kwargs: Additional parameters to pass to the API
|
|
46
|
-
|
|
47
|
-
Returns:
|
|
48
|
-
If stream=False, returns a ChatCompletion object
|
|
49
|
-
If stream=True, returns a Generator yielding ChatCompletionChunk objects
|
|
50
|
-
"""
|
|
51
|
-
# Use streaming implementation if requested
|
|
52
|
-
if stream:
|
|
53
|
-
return self._create_streaming(
|
|
54
|
-
model=model,
|
|
55
|
-
messages=messages,
|
|
56
|
-
max_tokens=max_tokens,
|
|
57
|
-
temperature=temperature,
|
|
58
|
-
top_p=top_p,
|
|
59
|
-
**kwargs
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
# Otherwise use non-streaming implementation
|
|
63
|
-
return self._create_non_streaming(
|
|
64
|
-
model=model,
|
|
65
|
-
messages=messages,
|
|
66
|
-
max_tokens=max_tokens,
|
|
67
|
-
temperature=temperature,
|
|
68
|
-
top_p=top_p,
|
|
69
|
-
**kwargs
|
|
70
|
-
)
|
|
71
|
-
|
|
72
|
-
def _create_streaming(
|
|
73
|
-
self,
|
|
74
|
-
*,
|
|
75
|
-
model: str,
|
|
76
|
-
messages: List[Dict[str, str]],
|
|
77
|
-
max_tokens: Optional[int] = None,
|
|
78
|
-
temperature: Optional[float] = None,
|
|
79
|
-
top_p: Optional[float] = None,
|
|
80
|
-
**kwargs: Any
|
|
81
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
|
-
"""Implementation for streaming chat completions."""
|
|
83
|
-
try:
|
|
84
|
-
# Generate request ID and timestamp
|
|
85
|
-
request_id = str(uuid.uuid4())
|
|
86
|
-
created_time = int(time.time())
|
|
87
|
-
|
|
88
|
-
# Generate a random 6-digit auth token
|
|
89
|
-
auth_token = str(random.randint(0, 999999)).zfill(6)
|
|
90
|
-
|
|
91
|
-
# Prepare headers exactly as in the original script
|
|
92
|
-
headers = {
|
|
93
|
-
"Accept": "text/event-stream",
|
|
94
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
95
|
-
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
96
|
-
"Authorization": f"Bearer {auth_token}",
|
|
97
|
-
"Cache-Control": "no-cache",
|
|
98
|
-
"Content-Type": "application/json",
|
|
99
|
-
"Cookie": self._client.cookie,
|
|
100
|
-
"DNT": "1",
|
|
101
|
-
"Origin": "https://www.opkfc.com",
|
|
102
|
-
"Pragma": "no-cache",
|
|
103
|
-
"Referer": "https://www.opkfc.com/",
|
|
104
|
-
"Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
|
|
105
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
106
|
-
"Sec-CH-UA-Platform": "\"Windows\"",
|
|
107
|
-
"Sec-Fetch-Dest": "empty",
|
|
108
|
-
"Sec-Fetch-Mode": "cors",
|
|
109
|
-
"Sec-Fetch-Site": "same-origin",
|
|
110
|
-
"Sec-GPC": "1",
|
|
111
|
-
"User-Agent": self._client.user_agent,
|
|
112
|
-
"openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
# Prepare payload with individual messages
|
|
116
|
-
payload = {
|
|
117
|
-
"action": "next",
|
|
118
|
-
"messages": [
|
|
119
|
-
{
|
|
120
|
-
"id": str(uuid.uuid4()),
|
|
121
|
-
"author": {"role": msg["role"]},
|
|
122
|
-
"content": {"content_type": "text", "parts": [msg["content"]]},
|
|
123
|
-
"create_time": time.time()
|
|
124
|
-
}
|
|
125
|
-
for msg in messages
|
|
126
|
-
],
|
|
127
|
-
"parent_message_id": str(uuid.uuid4()),
|
|
128
|
-
"model": model,
|
|
129
|
-
"timezone_offset_min": -330,
|
|
130
|
-
"timezone": "Asia/Calcutta"
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
# Add optional parameters if provided
|
|
134
|
-
if max_tokens is not None:
|
|
135
|
-
payload["max_tokens"] = max_tokens
|
|
136
|
-
if temperature is not None:
|
|
137
|
-
payload["temperature"] = temperature
|
|
138
|
-
if top_p is not None:
|
|
139
|
-
payload["top_p"] = top_p
|
|
140
|
-
|
|
141
|
-
# Make the streaming request
|
|
142
|
-
response = self._client.session.post(
|
|
143
|
-
self._client.api_endpoint,
|
|
144
|
-
headers=headers,
|
|
145
|
-
json=payload,
|
|
146
|
-
stream=True,
|
|
147
|
-
timeout=self._client.timeout
|
|
148
|
-
)
|
|
149
|
-
response.raise_for_status()
|
|
150
|
-
|
|
151
|
-
# Process the streaming response
|
|
152
|
-
content_buffer = ""
|
|
153
|
-
response_started = False
|
|
154
|
-
assistant_message_found = False
|
|
155
|
-
|
|
156
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
157
|
-
if not line:
|
|
158
|
-
continue
|
|
159
|
-
|
|
160
|
-
if line.startswith("data:"):
|
|
161
|
-
part = line[len("data:"):].strip()
|
|
162
|
-
|
|
163
|
-
if part == "[DONE]":
|
|
164
|
-
break
|
|
165
|
-
|
|
166
|
-
try:
|
|
167
|
-
# Skip the delta_encoding event
|
|
168
|
-
if part == '"v1"':
|
|
169
|
-
continue
|
|
170
|
-
|
|
171
|
-
obj = json.loads(part)
|
|
172
|
-
if isinstance(obj, dict):
|
|
173
|
-
# Check if this is an assistant message
|
|
174
|
-
if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
|
|
175
|
-
assistant_message_found = True
|
|
176
|
-
# Reset content buffer when we find a new assistant message
|
|
177
|
-
content_buffer = ""
|
|
178
|
-
response_started = False
|
|
179
|
-
continue
|
|
180
|
-
|
|
181
|
-
# Skip until we find an assistant message
|
|
182
|
-
if not assistant_message_found:
|
|
183
|
-
continue
|
|
184
|
-
|
|
185
|
-
# Handle different response formats
|
|
186
|
-
content_to_add = None
|
|
187
|
-
|
|
188
|
-
# Format 1: Direct content in 'v' field
|
|
189
|
-
if isinstance(obj.get("v"), str):
|
|
190
|
-
content_to_add = obj["v"]
|
|
191
|
-
|
|
192
|
-
# Format 2: Path-based content with append operation
|
|
193
|
-
elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
|
|
194
|
-
content_to_add = obj["v"]
|
|
195
|
-
|
|
196
|
-
# Format 3: Nested content in complex structure
|
|
197
|
-
elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
|
|
198
|
-
parts = obj["v"]["message"]["content"]["parts"]
|
|
199
|
-
if parts and isinstance(parts[0], str):
|
|
200
|
-
content_to_add = parts[0]
|
|
201
|
-
|
|
202
|
-
# Format 4: Patch operation with append to content
|
|
203
|
-
elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
|
|
204
|
-
for patch in obj["v"]:
|
|
205
|
-
if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
|
|
206
|
-
content_to_add = patch["v"]
|
|
207
|
-
|
|
208
|
-
# If we found content to add
|
|
209
|
-
if content_to_add:
|
|
210
|
-
# Skip the first part if it's repeating the user's message
|
|
211
|
-
if not response_started and content_buffer == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
|
|
212
|
-
# This is likely the user's message being echoed back, skip it
|
|
213
|
-
continue
|
|
214
|
-
|
|
215
|
-
response_started = True
|
|
216
|
-
content_buffer += content_to_add
|
|
217
|
-
|
|
218
|
-
# Create and yield a chunk
|
|
219
|
-
delta = ChoiceDelta(content=content_to_add)
|
|
220
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
221
|
-
chunk = ChatCompletionChunk(
|
|
222
|
-
id=request_id,
|
|
223
|
-
choices=[choice],
|
|
224
|
-
created=created_time,
|
|
225
|
-
model=model
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
yield chunk
|
|
229
|
-
except (ValueError, json.JSONDecodeError) as e:
|
|
230
|
-
print(f"{RED}Error parsing streaming response: {e} - {part}{RESET}")
|
|
231
|
-
pass
|
|
232
|
-
|
|
233
|
-
# Final chunk with finish_reason
|
|
234
|
-
delta = ChoiceDelta(content=None)
|
|
235
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
236
|
-
chunk = ChatCompletionChunk(
|
|
237
|
-
id=request_id,
|
|
238
|
-
choices=[choice],
|
|
239
|
-
created=created_time,
|
|
240
|
-
model=model
|
|
241
|
-
)
|
|
242
|
-
|
|
243
|
-
yield chunk
|
|
244
|
-
|
|
245
|
-
except Exception as e:
|
|
246
|
-
print(f"{RED}Error during OPKFC streaming request: {e}{RESET}")
|
|
247
|
-
raise IOError(f"OPKFC streaming request failed: {e}") from e
|
|
248
|
-
|
|
249
|
-
def _create_non_streaming(
|
|
250
|
-
self,
|
|
251
|
-
*,
|
|
252
|
-
model: str,
|
|
253
|
-
messages: List[Dict[str, str]],
|
|
254
|
-
max_tokens: Optional[int] = None,
|
|
255
|
-
temperature: Optional[float] = None,
|
|
256
|
-
top_p: Optional[float] = None,
|
|
257
|
-
**kwargs: Any
|
|
258
|
-
) -> ChatCompletion:
|
|
259
|
-
"""Implementation for non-streaming chat completions."""
|
|
260
|
-
try:
|
|
261
|
-
# Generate request ID and timestamp
|
|
262
|
-
request_id = str(uuid.uuid4())
|
|
263
|
-
created_time = int(time.time())
|
|
264
|
-
|
|
265
|
-
# Generate a random 6-digit auth token
|
|
266
|
-
auth_token = str(random.randint(0, 999999)).zfill(6)
|
|
267
|
-
|
|
268
|
-
# Prepare headers exactly as in the original script
|
|
269
|
-
headers = {
|
|
270
|
-
"Accept": "text/event-stream",
|
|
271
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
272
|
-
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
273
|
-
"Authorization": f"Bearer {auth_token}",
|
|
274
|
-
"Cache-Control": "no-cache",
|
|
275
|
-
"Content-Type": "application/json",
|
|
276
|
-
"Cookie": self._client.cookie,
|
|
277
|
-
"DNT": "1",
|
|
278
|
-
"Origin": "https://www.opkfc.com",
|
|
279
|
-
"Pragma": "no-cache",
|
|
280
|
-
"Referer": "https://www.opkfc.com/",
|
|
281
|
-
"Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
|
|
282
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
283
|
-
"Sec-CH-UA-Platform": "\"Windows\"",
|
|
284
|
-
"Sec-Fetch-Dest": "empty",
|
|
285
|
-
"Sec-Fetch-Mode": "cors",
|
|
286
|
-
"Sec-Fetch-Site": "same-origin",
|
|
287
|
-
"Sec-GPC": "1",
|
|
288
|
-
"User-Agent": self._client.user_agent,
|
|
289
|
-
"openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
# Prepare payload with individual messages
|
|
293
|
-
payload = {
|
|
294
|
-
"action": "next",
|
|
295
|
-
"messages": [
|
|
296
|
-
{
|
|
297
|
-
"id": str(uuid.uuid4()),
|
|
298
|
-
"author": {"role": msg["role"]},
|
|
299
|
-
"content": {"content_type": "text", "parts": [msg["content"]]},
|
|
300
|
-
"create_time": time.time()
|
|
301
|
-
}
|
|
302
|
-
for msg in messages
|
|
303
|
-
],
|
|
304
|
-
"parent_message_id": str(uuid.uuid4()),
|
|
305
|
-
"model": model,
|
|
306
|
-
"timezone_offset_min": -330,
|
|
307
|
-
"timezone": "Asia/Calcutta"
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
# Add optional parameters if provided
|
|
311
|
-
if max_tokens is not None:
|
|
312
|
-
payload["max_tokens"] = max_tokens
|
|
313
|
-
if temperature is not None:
|
|
314
|
-
payload["temperature"] = temperature
|
|
315
|
-
if top_p is not None:
|
|
316
|
-
payload["top_p"] = top_p
|
|
317
|
-
|
|
318
|
-
# Make the non-streaming request but process it as streaming
|
|
319
|
-
# since the API only supports streaming responses
|
|
320
|
-
response = self._client.session.post(
|
|
321
|
-
self._client.api_endpoint,
|
|
322
|
-
headers=headers,
|
|
323
|
-
json=payload,
|
|
324
|
-
stream=True,
|
|
325
|
-
timeout=self._client.timeout
|
|
326
|
-
)
|
|
327
|
-
response.raise_for_status()
|
|
328
|
-
|
|
329
|
-
# Process the streaming response to collect the full content
|
|
330
|
-
full_content = ""
|
|
331
|
-
response_started = False
|
|
332
|
-
assistant_message_found = False
|
|
333
|
-
|
|
334
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
335
|
-
if not line:
|
|
336
|
-
continue
|
|
337
|
-
|
|
338
|
-
if line.startswith("data:"):
|
|
339
|
-
part = line[len("data:"):].strip()
|
|
340
|
-
|
|
341
|
-
if part == "[DONE]":
|
|
342
|
-
break
|
|
343
|
-
|
|
344
|
-
try:
|
|
345
|
-
# Skip the delta_encoding event
|
|
346
|
-
if part == '"v1"':
|
|
347
|
-
continue
|
|
348
|
-
|
|
349
|
-
obj = json.loads(part)
|
|
350
|
-
if isinstance(obj, dict):
|
|
351
|
-
# Check if this is an assistant message
|
|
352
|
-
if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
|
|
353
|
-
assistant_message_found = True
|
|
354
|
-
# Reset content buffer when we find a new assistant message
|
|
355
|
-
full_content = ""
|
|
356
|
-
response_started = False
|
|
357
|
-
continue
|
|
358
|
-
|
|
359
|
-
# Skip until we find an assistant message
|
|
360
|
-
if not assistant_message_found:
|
|
361
|
-
continue
|
|
362
|
-
|
|
363
|
-
# Handle different response formats
|
|
364
|
-
content_to_add = None
|
|
365
|
-
|
|
366
|
-
# Format 1: Direct content in 'v' field
|
|
367
|
-
if isinstance(obj.get("v"), str):
|
|
368
|
-
content_to_add = obj["v"]
|
|
369
|
-
|
|
370
|
-
# Format 2: Path-based content with append operation
|
|
371
|
-
elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
|
|
372
|
-
content_to_add = obj["v"]
|
|
373
|
-
|
|
374
|
-
# Format 3: Nested content in complex structure
|
|
375
|
-
elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
|
|
376
|
-
parts = obj["v"]["message"]["content"]["parts"]
|
|
377
|
-
if parts and isinstance(parts[0], str):
|
|
378
|
-
content_to_add = parts[0]
|
|
379
|
-
|
|
380
|
-
# Format 4: Patch operation with append to content
|
|
381
|
-
elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
|
|
382
|
-
for patch in obj["v"]:
|
|
383
|
-
if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
|
|
384
|
-
content_to_add = patch["v"]
|
|
385
|
-
|
|
386
|
-
# If we found content to add
|
|
387
|
-
if content_to_add:
|
|
388
|
-
# Skip the first part if it's repeating the user's message
|
|
389
|
-
if not response_started and full_content == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
|
|
390
|
-
# This is likely the user's message being echoed back, skip it
|
|
391
|
-
continue
|
|
392
|
-
|
|
393
|
-
response_started = True
|
|
394
|
-
full_content += content_to_add
|
|
395
|
-
except (ValueError, json.JSONDecodeError) as e:
|
|
396
|
-
print(f"{RED}Error parsing non-streaming response: {e} - {part}{RESET}")
|
|
397
|
-
pass
|
|
398
|
-
|
|
399
|
-
# Create the completion message
|
|
400
|
-
message = ChatCompletionMessage(
|
|
401
|
-
role="assistant",
|
|
402
|
-
content=full_content
|
|
403
|
-
)
|
|
404
|
-
|
|
405
|
-
# Create the choice
|
|
406
|
-
choice = Choice(
|
|
407
|
-
index=0,
|
|
408
|
-
message=message,
|
|
409
|
-
finish_reason="stop"
|
|
410
|
-
)
|
|
411
|
-
|
|
412
|
-
# Estimate token usage (very rough estimate)
|
|
413
|
-
prompt_tokens = sum(len(msg.get("content", "")) // 4 for msg in messages)
|
|
414
|
-
completion_tokens = len(full_content) // 4
|
|
415
|
-
usage = CompletionUsage(
|
|
416
|
-
prompt_tokens=prompt_tokens,
|
|
417
|
-
completion_tokens=completion_tokens,
|
|
418
|
-
total_tokens=prompt_tokens + completion_tokens
|
|
419
|
-
)
|
|
420
|
-
|
|
421
|
-
# Create the completion object
|
|
422
|
-
completion = ChatCompletion(
|
|
423
|
-
id=request_id,
|
|
424
|
-
choices=[choice],
|
|
425
|
-
created=created_time,
|
|
426
|
-
model=model,
|
|
427
|
-
usage=usage,
|
|
428
|
-
)
|
|
429
|
-
|
|
430
|
-
return completion
|
|
431
|
-
|
|
432
|
-
except Exception as e:
|
|
433
|
-
print(f"{RED}Error during OPKFC non-stream request: {e}{RESET}")
|
|
434
|
-
raise IOError(f"OPKFC request failed: {e}") from e
|
|
435
|
-
|
|
436
|
-
class Chat(BaseChat):
|
|
437
|
-
def __init__(self, client: 'OPKFC'):
|
|
438
|
-
self.completions = Completions(client)
|
|
439
|
-
|
|
440
|
-
class OPKFC(OpenAICompatibleProvider):
|
|
441
|
-
"""
|
|
442
|
-
OpenAI-compatible client for OPKFC API.
|
|
443
|
-
|
|
444
|
-
Usage:
|
|
445
|
-
client = OPKFC()
|
|
446
|
-
response = client.chat.completions.create(
|
|
447
|
-
model="auto",
|
|
448
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
449
|
-
)
|
|
450
|
-
print(response.choices[0].message.content)
|
|
451
|
-
"""
|
|
452
|
-
|
|
453
|
-
AVAILABLE_MODELS = [
|
|
454
|
-
"auto",
|
|
455
|
-
"o4-mini",
|
|
456
|
-
"gpt-4o-mini",
|
|
457
|
-
"gpt-4o"
|
|
458
|
-
]
|
|
459
|
-
|
|
460
|
-
def __init__(
|
|
461
|
-
self,
|
|
462
|
-
timeout: int = 30,
|
|
463
|
-
proxies: dict = {}
|
|
464
|
-
):
|
|
465
|
-
"""
|
|
466
|
-
Initialize the OPKFC client.
|
|
467
|
-
|
|
468
|
-
Args:
|
|
469
|
-
timeout: Request timeout in seconds
|
|
470
|
-
proxies: Optional proxy configuration
|
|
471
|
-
"""
|
|
472
|
-
self.timeout = timeout
|
|
473
|
-
self.api_endpoint = "https://www.opkfc.com/backend-api/conversation"
|
|
474
|
-
self.proxies = proxies
|
|
475
|
-
|
|
476
|
-
# Initialize session
|
|
477
|
-
self.session = requests.Session()
|
|
478
|
-
if proxies:
|
|
479
|
-
self.session.proxies.update(proxies)
|
|
480
|
-
|
|
481
|
-
# Set the user agent to match the original script
|
|
482
|
-
self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0"
|
|
483
|
-
|
|
484
|
-
# Set the cookie from the original script
|
|
485
|
-
self.cookie = "__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%228fab09e3-c23e-5f60-b369-9697fbb821ce%22%2C%20%22vd%22%3A%201%2C%20%22stt%22%3A%200%2C%20%22dr%22%3A%200%2C%20%22expires%22%3A%201744896723481%2C%20%22ct%22%3A%201744894923481%7D; __51uvsct__KUc0LhjVWFNXQv11=1; __51vcke__KUc0LhjVWFNXQv11=06da852c-bb56-547c-91a8-43a0d485ffed; __51vuft__KUc0LhjVWFNXQv11=1744894923504; gfsessionid=1ochrgv17vy4sbd98xmwt6crpmkxwlqf; oai-nav-state=1; p_uv_id=ad86646801bc60d6d95f6098e4ee7450; _dd_s=rum=0&expire=1744895920821&logs=1&id=a39221c9-e8ed-44e6-a2c8-03192699c71e&created=1744894970625"
|
|
486
|
-
|
|
487
|
-
# Initialize chat interface
|
|
488
|
-
self.chat = Chat(self)
|