webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (304) hide show
  1. webscout/AIauto.py +250 -250
  2. webscout/AIbase.py +379 -379
  3. webscout/AIutel.py +60 -58
  4. webscout/Bard.py +1012 -1012
  5. webscout/Bing_search.py +417 -417
  6. webscout/DWEBS.py +529 -529
  7. webscout/Extra/Act.md +309 -309
  8. webscout/Extra/GitToolkit/__init__.py +10 -10
  9. webscout/Extra/GitToolkit/gitapi/README.md +110 -110
  10. webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
  11. webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
  12. webscout/Extra/GitToolkit/gitapi/user.py +96 -96
  13. webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
  14. webscout/Extra/YTToolkit/README.md +375 -375
  15. webscout/Extra/YTToolkit/YTdownloader.py +956 -956
  16. webscout/Extra/YTToolkit/__init__.py +2 -2
  17. webscout/Extra/YTToolkit/transcriber.py +475 -475
  18. webscout/Extra/YTToolkit/ytapi/README.md +44 -44
  19. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
  20. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  21. webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
  22. webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
  23. webscout/Extra/YTToolkit/ytapi/https.py +88 -88
  24. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
  25. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  26. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  27. webscout/Extra/YTToolkit/ytapi/query.py +39 -39
  28. webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
  29. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  30. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  31. webscout/Extra/autocoder/__init__.py +9 -9
  32. webscout/Extra/autocoder/autocoder.py +1105 -1105
  33. webscout/Extra/autocoder/autocoder_utiles.py +332 -332
  34. webscout/Extra/gguf.md +429 -429
  35. webscout/Extra/gguf.py +1213 -1213
  36. webscout/Extra/tempmail/README.md +487 -487
  37. webscout/Extra/tempmail/__init__.py +27 -27
  38. webscout/Extra/tempmail/async_utils.py +140 -140
  39. webscout/Extra/tempmail/base.py +160 -160
  40. webscout/Extra/tempmail/cli.py +186 -186
  41. webscout/Extra/tempmail/emailnator.py +84 -84
  42. webscout/Extra/tempmail/mail_tm.py +360 -360
  43. webscout/Extra/tempmail/temp_mail_io.py +291 -291
  44. webscout/Extra/weather.md +281 -281
  45. webscout/Extra/weather.py +193 -193
  46. webscout/Litlogger/README.md +10 -10
  47. webscout/Litlogger/__init__.py +15 -15
  48. webscout/Litlogger/formats.py +13 -13
  49. webscout/Litlogger/handlers.py +121 -121
  50. webscout/Litlogger/levels.py +13 -13
  51. webscout/Litlogger/logger.py +134 -134
  52. webscout/Provider/AISEARCH/Perplexity.py +332 -332
  53. webscout/Provider/AISEARCH/README.md +279 -279
  54. webscout/Provider/AISEARCH/__init__.py +33 -11
  55. webscout/Provider/AISEARCH/felo_search.py +206 -206
  56. webscout/Provider/AISEARCH/genspark_search.py +323 -323
  57. webscout/Provider/AISEARCH/hika_search.py +185 -185
  58. webscout/Provider/AISEARCH/iask_search.py +410 -410
  59. webscout/Provider/AISEARCH/monica_search.py +219 -219
  60. webscout/Provider/AISEARCH/scira_search.py +316 -314
  61. webscout/Provider/AISEARCH/stellar_search.py +177 -177
  62. webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
  63. webscout/Provider/Aitopia.py +314 -315
  64. webscout/Provider/Andi.py +3 -3
  65. webscout/Provider/Apriel.py +306 -0
  66. webscout/Provider/ChatGPTClone.py +236 -236
  67. webscout/Provider/ChatSandbox.py +343 -342
  68. webscout/Provider/Cloudflare.py +324 -324
  69. webscout/Provider/Cohere.py +208 -207
  70. webscout/Provider/Deepinfra.py +370 -369
  71. webscout/Provider/ExaAI.py +260 -260
  72. webscout/Provider/ExaChat.py +308 -387
  73. webscout/Provider/Flowith.py +221 -221
  74. webscout/Provider/GMI.py +293 -0
  75. webscout/Provider/Gemini.py +164 -162
  76. webscout/Provider/GeminiProxy.py +167 -166
  77. webscout/Provider/GithubChat.py +371 -370
  78. webscout/Provider/Groq.py +800 -800
  79. webscout/Provider/HeckAI.py +383 -379
  80. webscout/Provider/Jadve.py +282 -297
  81. webscout/Provider/K2Think.py +308 -0
  82. webscout/Provider/Koboldai.py +206 -384
  83. webscout/Provider/LambdaChat.py +423 -425
  84. webscout/Provider/Nemotron.py +244 -245
  85. webscout/Provider/Netwrck.py +248 -247
  86. webscout/Provider/OLLAMA.py +395 -394
  87. webscout/Provider/OPENAI/Cloudflare.py +394 -395
  88. webscout/Provider/OPENAI/FalconH1.py +452 -457
  89. webscout/Provider/OPENAI/FreeGemini.py +297 -299
  90. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
  91. webscout/Provider/OPENAI/NEMOTRON.py +241 -244
  92. webscout/Provider/OPENAI/PI.py +428 -427
  93. webscout/Provider/OPENAI/README.md +959 -959
  94. webscout/Provider/OPENAI/TogetherAI.py +345 -345
  95. webscout/Provider/OPENAI/TwoAI.py +466 -467
  96. webscout/Provider/OPENAI/__init__.py +33 -59
  97. webscout/Provider/OPENAI/ai4chat.py +313 -303
  98. webscout/Provider/OPENAI/base.py +249 -269
  99. webscout/Provider/OPENAI/chatglm.py +528 -0
  100. webscout/Provider/OPENAI/chatgpt.py +593 -588
  101. webscout/Provider/OPENAI/chatgptclone.py +521 -524
  102. webscout/Provider/OPENAI/chatsandbox.py +202 -177
  103. webscout/Provider/OPENAI/deepinfra.py +319 -315
  104. webscout/Provider/OPENAI/e2b.py +1665 -1665
  105. webscout/Provider/OPENAI/exaai.py +420 -420
  106. webscout/Provider/OPENAI/exachat.py +452 -452
  107. webscout/Provider/OPENAI/friendli.py +232 -232
  108. webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
  109. webscout/Provider/OPENAI/groq.py +364 -364
  110. webscout/Provider/OPENAI/heckai.py +314 -311
  111. webscout/Provider/OPENAI/llmchatco.py +337 -337
  112. webscout/Provider/OPENAI/netwrck.py +355 -354
  113. webscout/Provider/OPENAI/oivscode.py +290 -290
  114. webscout/Provider/OPENAI/opkfc.py +518 -518
  115. webscout/Provider/OPENAI/pydantic_imports.py +1 -1
  116. webscout/Provider/OPENAI/scirachat.py +535 -529
  117. webscout/Provider/OPENAI/sonus.py +308 -308
  118. webscout/Provider/OPENAI/standardinput.py +442 -442
  119. webscout/Provider/OPENAI/textpollinations.py +340 -348
  120. webscout/Provider/OPENAI/toolbaz.py +419 -413
  121. webscout/Provider/OPENAI/typefully.py +362 -362
  122. webscout/Provider/OPENAI/utils.py +295 -295
  123. webscout/Provider/OPENAI/venice.py +436 -436
  124. webscout/Provider/OPENAI/wisecat.py +387 -387
  125. webscout/Provider/OPENAI/writecream.py +166 -166
  126. webscout/Provider/OPENAI/x0gpt.py +378 -378
  127. webscout/Provider/OPENAI/yep.py +389 -389
  128. webscout/Provider/OpenGPT.py +230 -230
  129. webscout/Provider/Openai.py +244 -496
  130. webscout/Provider/PI.py +405 -404
  131. webscout/Provider/Perplexitylabs.py +430 -431
  132. webscout/Provider/QwenLM.py +272 -254
  133. webscout/Provider/STT/__init__.py +32 -2
  134. webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
  135. webscout/Provider/StandardInput.py +309 -309
  136. webscout/Provider/TTI/README.md +82 -82
  137. webscout/Provider/TTI/__init__.py +33 -12
  138. webscout/Provider/TTI/aiarta.py +413 -413
  139. webscout/Provider/TTI/base.py +136 -136
  140. webscout/Provider/TTI/bing.py +243 -243
  141. webscout/Provider/TTI/gpt1image.py +149 -149
  142. webscout/Provider/TTI/imagen.py +196 -196
  143. webscout/Provider/TTI/infip.py +211 -211
  144. webscout/Provider/TTI/magicstudio.py +232 -232
  145. webscout/Provider/TTI/monochat.py +219 -219
  146. webscout/Provider/TTI/piclumen.py +214 -214
  147. webscout/Provider/TTI/pixelmuse.py +232 -232
  148. webscout/Provider/TTI/pollinations.py +232 -232
  149. webscout/Provider/TTI/together.py +288 -288
  150. webscout/Provider/TTI/utils.py +12 -12
  151. webscout/Provider/TTI/venice.py +367 -367
  152. webscout/Provider/TTS/README.md +192 -192
  153. webscout/Provider/TTS/__init__.py +33 -10
  154. webscout/Provider/TTS/parler.py +110 -110
  155. webscout/Provider/TTS/streamElements.py +333 -333
  156. webscout/Provider/TTS/utils.py +280 -280
  157. webscout/Provider/TeachAnything.py +237 -236
  158. webscout/Provider/TextPollinationsAI.py +311 -318
  159. webscout/Provider/TogetherAI.py +356 -357
  160. webscout/Provider/TwoAI.py +313 -569
  161. webscout/Provider/TypliAI.py +312 -311
  162. webscout/Provider/UNFINISHED/ChatHub.py +208 -208
  163. webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
  164. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
  165. webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
  166. webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
  167. webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
  168. webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
  169. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  170. webscout/Provider/UNFINISHED/liner.py +334 -0
  171. webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
  172. webscout/Provider/UNFINISHED/puterjs.py +634 -634
  173. webscout/Provider/UNFINISHED/samurai.py +223 -223
  174. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  175. webscout/Provider/Venice.py +251 -250
  176. webscout/Provider/VercelAI.py +256 -255
  177. webscout/Provider/WiseCat.py +232 -231
  178. webscout/Provider/WrDoChat.py +367 -366
  179. webscout/Provider/__init__.py +33 -86
  180. webscout/Provider/ai4chat.py +174 -174
  181. webscout/Provider/akashgpt.py +331 -334
  182. webscout/Provider/cerebras.py +446 -340
  183. webscout/Provider/chatglm.py +394 -214
  184. webscout/Provider/cleeai.py +211 -212
  185. webscout/Provider/deepseek_assistant.py +1 -1
  186. webscout/Provider/elmo.py +282 -282
  187. webscout/Provider/geminiapi.py +208 -208
  188. webscout/Provider/granite.py +261 -261
  189. webscout/Provider/hermes.py +263 -265
  190. webscout/Provider/julius.py +223 -222
  191. webscout/Provider/learnfastai.py +309 -309
  192. webscout/Provider/llama3mitril.py +214 -214
  193. webscout/Provider/llmchat.py +243 -243
  194. webscout/Provider/llmchatco.py +290 -290
  195. webscout/Provider/meta.py +801 -801
  196. webscout/Provider/oivscode.py +309 -309
  197. webscout/Provider/scira_chat.py +384 -457
  198. webscout/Provider/searchchat.py +292 -291
  199. webscout/Provider/sonus.py +258 -258
  200. webscout/Provider/toolbaz.py +370 -364
  201. webscout/Provider/turboseek.py +274 -265
  202. webscout/Provider/typefully.py +208 -207
  203. webscout/Provider/x0gpt.py +1 -0
  204. webscout/Provider/yep.py +372 -371
  205. webscout/__init__.py +30 -31
  206. webscout/__main__.py +5 -5
  207. webscout/auth/api_key_manager.py +189 -189
  208. webscout/auth/config.py +175 -175
  209. webscout/auth/models.py +185 -185
  210. webscout/auth/routes.py +664 -664
  211. webscout/auth/simple_logger.py +236 -236
  212. webscout/cli.py +523 -523
  213. webscout/conversation.py +438 -438
  214. webscout/exceptions.py +361 -361
  215. webscout/litagent/Readme.md +298 -298
  216. webscout/litagent/__init__.py +28 -28
  217. webscout/litagent/agent.py +581 -581
  218. webscout/litagent/constants.py +59 -59
  219. webscout/litprinter/__init__.py +58 -58
  220. webscout/models.py +181 -181
  221. webscout/optimizers.py +419 -419
  222. webscout/prompt_manager.py +288 -288
  223. webscout/sanitize.py +1078 -1078
  224. webscout/scout/README.md +401 -401
  225. webscout/scout/__init__.py +8 -8
  226. webscout/scout/core/__init__.py +6 -6
  227. webscout/scout/core/crawler.py +297 -297
  228. webscout/scout/core/scout.py +706 -706
  229. webscout/scout/core/search_result.py +95 -95
  230. webscout/scout/core/text_analyzer.py +62 -62
  231. webscout/scout/core/text_utils.py +277 -277
  232. webscout/scout/core/web_analyzer.py +51 -51
  233. webscout/scout/element.py +599 -599
  234. webscout/scout/parsers/__init__.py +69 -69
  235. webscout/scout/parsers/html5lib_parser.py +172 -172
  236. webscout/scout/parsers/html_parser.py +236 -236
  237. webscout/scout/parsers/lxml_parser.py +178 -178
  238. webscout/scout/utils.py +37 -37
  239. webscout/swiftcli/Readme.md +323 -323
  240. webscout/swiftcli/__init__.py +95 -95
  241. webscout/swiftcli/core/__init__.py +7 -7
  242. webscout/swiftcli/core/cli.py +308 -308
  243. webscout/swiftcli/core/context.py +104 -104
  244. webscout/swiftcli/core/group.py +241 -241
  245. webscout/swiftcli/decorators/__init__.py +28 -28
  246. webscout/swiftcli/decorators/command.py +221 -221
  247. webscout/swiftcli/decorators/options.py +220 -220
  248. webscout/swiftcli/decorators/output.py +302 -302
  249. webscout/swiftcli/exceptions.py +21 -21
  250. webscout/swiftcli/plugins/__init__.py +9 -9
  251. webscout/swiftcli/plugins/base.py +135 -135
  252. webscout/swiftcli/plugins/manager.py +269 -269
  253. webscout/swiftcli/utils/__init__.py +59 -59
  254. webscout/swiftcli/utils/formatting.py +252 -252
  255. webscout/swiftcli/utils/parsing.py +267 -267
  256. webscout/update_checker.py +117 -117
  257. webscout/version.py +1 -1
  258. webscout/webscout_search.py +1183 -1183
  259. webscout/webscout_search_async.py +649 -649
  260. webscout/yep_search.py +346 -346
  261. webscout/zeroart/README.md +89 -89
  262. webscout/zeroart/__init__.py +134 -134
  263. webscout/zeroart/base.py +66 -66
  264. webscout/zeroart/effects.py +100 -100
  265. webscout/zeroart/fonts.py +1238 -1238
  266. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
  267. webscout-2025.10.11.dist-info/RECORD +300 -0
  268. webscout/Provider/AISEARCH/DeepFind.py +0 -254
  269. webscout/Provider/AllenAI.py +0 -440
  270. webscout/Provider/Blackboxai.py +0 -793
  271. webscout/Provider/FreeGemini.py +0 -250
  272. webscout/Provider/GptOss.py +0 -207
  273. webscout/Provider/Hunyuan.py +0 -283
  274. webscout/Provider/Kimi.py +0 -445
  275. webscout/Provider/MCPCore.py +0 -322
  276. webscout/Provider/MiniMax.py +0 -207
  277. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  278. webscout/Provider/OPENAI/MiniMax.py +0 -298
  279. webscout/Provider/OPENAI/Qwen3.py +0 -304
  280. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  281. webscout/Provider/OPENAI/copilot.py +0 -321
  282. webscout/Provider/OPENAI/gptoss.py +0 -288
  283. webscout/Provider/OPENAI/kimi.py +0 -469
  284. webscout/Provider/OPENAI/mcpcore.py +0 -431
  285. webscout/Provider/OPENAI/multichat.py +0 -378
  286. webscout/Provider/OPENAI/qodo.py +0 -630
  287. webscout/Provider/OPENAI/xenai.py +0 -514
  288. webscout/Provider/Reka.py +0 -214
  289. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  290. webscout/Provider/asksteve.py +0 -220
  291. webscout/Provider/copilot.py +0 -441
  292. webscout/Provider/freeaichat.py +0 -294
  293. webscout/Provider/koala.py +0 -182
  294. webscout/Provider/lmarena.py +0 -198
  295. webscout/Provider/monochat.py +0 -275
  296. webscout/Provider/multichat.py +0 -375
  297. webscout/Provider/scnet.py +0 -244
  298. webscout/Provider/talkai.py +0 -194
  299. webscout/tempid.py +0 -128
  300. webscout-8.3.6.dist-info/RECORD +0 -327
  301. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
  302. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
  303. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
  304. {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
@@ -1,519 +1,519 @@
1
- from datetime import datetime
2
- import time
3
- import uuid
4
- import requests
5
- import json
6
- import random
7
- from typing import List, Dict, Optional, Union, Generator, Any
8
-
9
- # Import base classes and utility structures
10
- from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
11
- from webscout.Provider.OPENAI.utils import (
12
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
13
- ChatCompletionMessage, CompletionUsage, count_tokens
14
- )
15
- from webscout.litagent import LitAgent
16
- # ANSI escape codes for formatting
17
- BOLD = "\033[1m"
18
- RED = "\033[91m"
19
- RESET = "\033[0m"
20
-
21
- class Completions(BaseCompletions):
22
- def __init__(self, client: 'OPKFC'):
23
- self._client = client
24
-
25
- def create(
26
- self,
27
- *,
28
- model: str,
29
- messages: List[Dict[str, str]],
30
- max_tokens: Optional[int] = None,
31
- stream: bool = False,
32
- temperature: Optional[float] = None,
33
- top_p: Optional[float] = None,
34
- timeout: Optional[int] = None,
35
- proxies: Optional[Dict[str, str]] = None,
36
- **kwargs: Any
37
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
- """
39
- Create a chat completion with OPKFC API.
40
-
41
- Args:
42
- model: The model to use (from AVAILABLE_MODELS)
43
- messages: List of message dictionaries with 'role' and 'content'
44
- max_tokens: Maximum number of tokens to generate
45
- stream: Whether to stream the response
46
- temperature: Sampling temperature (0-1)
47
- top_p: Nucleus sampling parameter (0-1)
48
- **kwargs: Additional parameters to pass to the API
49
-
50
- Returns:
51
- If stream=False, returns a ChatCompletion object
52
- If stream=True, returns a Generator yielding ChatCompletionChunk objects
53
- """
54
- # Use streaming implementation if requested
55
- if stream:
56
- return self._create_streaming(
57
- model=model,
58
- messages=messages,
59
- max_tokens=max_tokens,
60
- temperature=temperature,
61
- top_p=top_p,
62
- timeout=timeout,
63
- proxies=proxies,
64
- **kwargs
65
- )
66
-
67
- # Otherwise use non-streaming implementation
68
- return self._create_non_streaming(
69
- model=model,
70
- messages=messages,
71
- max_tokens=max_tokens,
72
- temperature=temperature,
73
- top_p=top_p,
74
- timeout=timeout,
75
- proxies=proxies,
76
- **kwargs
77
- )
78
-
79
- def _create_streaming(
80
- self,
81
- *,
82
- model: str,
83
- messages: List[Dict[str, str]],
84
- max_tokens: Optional[int] = None,
85
- temperature: Optional[float] = None,
86
- top_p: Optional[float] = None,
87
- timeout: Optional[int] = None,
88
- proxies: Optional[Dict[str, str]] = None,
89
- **kwargs: Any
90
- ) -> Generator[ChatCompletionChunk, None, None]:
91
- """Implementation for streaming chat completions."""
92
- try:
93
- # Generate request ID and timestamp
94
- request_id = str(uuid.uuid4())
95
- created_time = int(time.time())
96
-
97
- # Generate a random 6-digit auth token
98
- auth_token = str(random.randint(0, 999999)).zfill(6)
99
-
100
- # Prepare headers exactly as in the original script
101
- headers = {
102
- "Accept": "text/event-stream",
103
- "Accept-Encoding": "gzip, deflate, br, zstd",
104
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
105
- "Authorization": f"Bearer {auth_token}",
106
- "Cache-Control": "no-cache",
107
- "Content-Type": "application/json",
108
- "Cookie": self._client.cookie,
109
- "DNT": "1",
110
- "Origin": "https://www.opkfc.com",
111
- "Pragma": "no-cache",
112
- "Referer": "https://www.opkfc.com/",
113
- "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
114
- "Sec-CH-UA-Mobile": "?0",
115
- "Sec-CH-UA-Platform": "\"Windows\"",
116
- "Sec-Fetch-Dest": "empty",
117
- "Sec-Fetch-Mode": "cors",
118
- "Sec-Fetch-Site": "same-origin",
119
- "Sec-GPC": "1",
120
- "User-Agent": self._client.user_agent,
121
- "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
122
- }
123
-
124
- # Prepare payload with individual messages
125
- payload = {
126
- "action": "next",
127
- "messages": [
128
- {
129
- "id": str(uuid.uuid4()),
130
- "author": {"role": msg["role"]},
131
- "content": {"content_type": "text", "parts": [msg["content"]]},
132
- "create_time": time.time()
133
- }
134
- for msg in messages
135
- ],
136
- "parent_message_id": str(uuid.uuid4()),
137
- "model": model,
138
- "timezone_offset_min": -330,
139
- "timezone": "Asia/Calcutta"
140
- }
141
-
142
- # Add optional parameters if provided
143
- if max_tokens is not None:
144
- payload["max_tokens"] = max_tokens
145
- if temperature is not None:
146
- payload["temperature"] = temperature
147
- if top_p is not None:
148
- payload["top_p"] = top_p
149
-
150
- # Make the streaming request
151
- response = self._client.session.post(
152
- self._client.api_endpoint,
153
- headers=headers,
154
- json=payload,
155
- stream=True,
156
- timeout=timeout or self._client.timeout,
157
- proxies=proxies or getattr(self._client, "proxies", None)
158
- )
159
- response.raise_for_status()
160
-
161
- # Process the streaming response
162
- content_buffer = ""
163
- response_started = False
164
- assistant_message_found = False
165
-
166
- for line in response.iter_lines(decode_unicode=True):
167
- if not line:
168
- continue
169
-
170
- if line.startswith("data:"):
171
- part = line[len("data:"):].strip()
172
-
173
- if part == "[DONE]":
174
- break
175
-
176
- try:
177
- # Skip the delta_encoding event
178
- if part == '"v1"':
179
- continue
180
-
181
- obj = json.loads(part)
182
- if isinstance(obj, dict):
183
- # Check if this is an assistant message
184
- if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
185
- assistant_message_found = True
186
- # Reset content buffer when we find a new assistant message
187
- content_buffer = ""
188
- response_started = False
189
- continue
190
-
191
- # Skip until we find an assistant message
192
- if not assistant_message_found:
193
- continue
194
-
195
- # Handle different response formats
196
- content_to_add = None
197
-
198
- # Format 1: Direct content in 'v' field
199
- if isinstance(obj.get("v"), str):
200
- content_to_add = obj["v"]
201
-
202
- # Format 2: Path-based content with append operation
203
- elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
204
- content_to_add = obj["v"]
205
-
206
- # Format 3: Nested content in complex structure
207
- elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
208
- parts = obj["v"]["message"]["content"]["parts"]
209
- if parts and isinstance(parts[0], str):
210
- content_to_add = parts[0]
211
-
212
- # Format 4: Patch operation with append to content
213
- elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
214
- for patch in obj["v"]:
215
- if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
216
- content_to_add = patch["v"]
217
-
218
- # If we found content to add
219
- if content_to_add:
220
- # Skip the first part if it's repeating the user's message
221
- if not response_started and content_buffer == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
222
- # This is likely the user's message being echoed back, skip it
223
- continue
224
-
225
- response_started = True
226
- content_buffer += content_to_add
227
-
228
- # Create and yield a chunk
229
- delta = ChoiceDelta(content=content_to_add)
230
- choice = Choice(index=0, delta=delta, finish_reason=None)
231
- chunk = ChatCompletionChunk(
232
- id=request_id,
233
- choices=[choice],
234
- created=created_time,
235
- model=model
236
- )
237
-
238
- yield chunk
239
- except (ValueError, json.JSONDecodeError) as e:
240
- print(f"{RED}Error parsing streaming response: {e} - {part}{RESET}")
241
- pass
242
-
243
- # Final chunk with finish_reason
244
- delta = ChoiceDelta(content=None)
245
- choice = Choice(index=0, delta=delta, finish_reason="stop")
246
- chunk = ChatCompletionChunk(
247
- id=request_id,
248
- choices=[choice],
249
- created=created_time,
250
- model=model
251
- )
252
-
253
- yield chunk
254
-
255
- except Exception as e:
256
- print(f"{RED}Error during OPKFC streaming request: {e}{RESET}")
257
- raise IOError(f"OPKFC streaming request failed: {e}") from e
258
-
259
- def _create_non_streaming(
260
- self,
261
- *,
262
- model: str,
263
- messages: List[Dict[str, str]],
264
- max_tokens: Optional[int] = None,
265
- temperature: Optional[float] = None,
266
- top_p: Optional[float] = None,
267
- timeout: Optional[int] = None,
268
- proxies: Optional[Dict[str, str]] = None,
269
- **kwargs: Any
270
- ) -> ChatCompletion:
271
- """Implementation for non-streaming chat completions."""
272
- try:
273
- # Generate request ID and timestamp
274
- request_id = str(uuid.uuid4())
275
- created_time = int(time.time())
276
-
277
- # Generate a random 6-digit auth token
278
- auth_token = str(random.randint(0, 999999)).zfill(6)
279
-
280
- # Prepare headers exactly as in the original script
281
- headers = {
282
- "Accept": "text/event-stream",
283
- "Accept-Encoding": "gzip, deflate, br, zstd",
284
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
285
- "Authorization": f"Bearer {auth_token}",
286
- "Cache-Control": "no-cache",
287
- "Content-Type": "application/json",
288
- "Cookie": self._client.cookie,
289
- "DNT": "1",
290
- "Origin": "https://www.opkfc.com",
291
- "Pragma": "no-cache",
292
- "Referer": "https://www.opkfc.com/",
293
- "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
294
- "Sec-CH-UA-Mobile": "?0",
295
- "Sec-CH-UA-Platform": "\"Windows\"",
296
- "Sec-Fetch-Dest": "empty",
297
- "Sec-Fetch-Mode": "cors",
298
- "Sec-Fetch-Site": "same-origin",
299
- "Sec-GPC": "1",
300
- "User-Agent": self._client.user_agent,
301
- "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
302
- }
303
-
304
- # Prepare payload with individual messages
305
- payload = {
306
- "action": "next",
307
- "messages": [
308
- {
309
- "id": str(uuid.uuid4()),
310
- "author": {"role": msg["role"]},
311
- "content": {"content_type": "text", "parts": [msg["content"]]},
312
- "create_time": time.time()
313
- }
314
- for msg in messages
315
- ],
316
- "parent_message_id": str(uuid.uuid4()),
317
- "model": model,
318
- "timezone_offset_min": -330,
319
- "timezone": "Asia/Calcutta"
320
- }
321
-
322
- # Add optional parameters if provided
323
- if max_tokens is not None:
324
- payload["max_tokens"] = max_tokens
325
- if temperature is not None:
326
- payload["temperature"] = temperature
327
- if top_p is not None:
328
- payload["top_p"] = top_p
329
-
330
- # Make the non-streaming request but process it as streaming
331
- # since the API only supports streaming responses
332
- response = self._client.session.post(
333
- self._client.api_endpoint,
334
- headers=headers,
335
- json=payload,
336
- stream=True,
337
- timeout=timeout or self._client.timeout,
338
- proxies=proxies or getattr(self._client, "proxies", None)
339
- )
340
- response.raise_for_status()
341
-
342
- # Process the streaming response to collect the full content
343
- full_content = ""
344
- response_started = False
345
- assistant_message_found = False
346
-
347
- for line in response.iter_lines(decode_unicode=True):
348
- if not line:
349
- continue
350
-
351
- if line.startswith("data:"):
352
- part = line[len("data:"):].strip()
353
-
354
- if part == "[DONE]":
355
- break
356
-
357
- try:
358
- # Skip the delta_encoding event
359
- if part == '"v1"':
360
- continue
361
-
362
- obj = json.loads(part)
363
- if isinstance(obj, dict):
364
- # Check if this is an assistant message
365
- if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
366
- assistant_message_found = True
367
- # Reset content buffer when we find a new assistant message
368
- full_content = ""
369
- response_started = False
370
- continue
371
-
372
- # Skip until we find an assistant message
373
- if not assistant_message_found:
374
- continue
375
-
376
- # Handle different response formats
377
- content_to_add = None
378
-
379
- # Format 1: Direct content in 'v' field
380
- if isinstance(obj.get("v"), str):
381
- content_to_add = obj["v"]
382
-
383
- # Format 2: Path-based content with append operation
384
- elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
385
- content_to_add = obj["v"]
386
-
387
- # Format 3: Nested content in complex structure
388
- elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
389
- parts = obj["v"]["message"]["content"]["parts"]
390
- if parts and isinstance(parts[0], str):
391
- content_to_add = parts[0]
392
-
393
- # Format 4: Patch operation with append to content
394
- elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
395
- for patch in obj["v"]:
396
- if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
397
- content_to_add = patch["v"]
398
-
399
- # If we found content to add
400
- if content_to_add:
401
- # Skip the first part if it's repeating the user's message
402
- if not response_started and full_content == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
403
- # This is likely the user's message being echoed back, skip it
404
- continue
405
-
406
- response_started = True
407
- full_content += content_to_add
408
- except (ValueError, json.JSONDecodeError) as e:
409
- print(f"{RED}Error parsing non-streaming response: {e} - {part}{RESET}")
410
- pass
411
-
412
- # Create the completion message
413
- message = ChatCompletionMessage(
414
- role="assistant",
415
- content=full_content
416
- )
417
-
418
- # Create the choice
419
- choice = Choice(
420
- index=0,
421
- message=message,
422
- finish_reason="stop"
423
- )
424
-
425
- # Estimate token usage using count_tokens
426
- prompt_tokens = count_tokens([msg.get("content", "") for msg in messages])
427
- completion_tokens = count_tokens(full_content)
428
- usage = CompletionUsage(
429
- prompt_tokens=prompt_tokens,
430
- completion_tokens=completion_tokens,
431
- total_tokens=prompt_tokens + completion_tokens
432
- )
433
-
434
- # Create the completion object
435
- completion = ChatCompletion(
436
- id=request_id,
437
- choices=[choice],
438
- created=created_time,
439
- model=model,
440
- usage=usage,
441
- )
442
-
443
- return completion
444
-
445
- except Exception as e:
446
- print(f"{RED}Error during OPKFC non-stream request: {e}{RESET}")
447
- raise IOError(f"OPKFC request failed: {e}") from e
448
-
449
- class Chat(BaseChat):
450
- def __init__(self, client: 'OPKFC'):
451
- self.completions = Completions(client)
452
-
453
- class OPKFC(OpenAICompatibleProvider):
454
- """
455
- OpenAI-compatible client for OPKFC API.
456
-
457
- Usage:
458
- client = OPKFC()
459
- response = client.chat.completions.create(
460
- model="auto",
461
- messages=[{"role": "user", "content": "Hello!"}]
462
- )
463
- print(response.choices[0].message.content)
464
- """
465
-
466
- AVAILABLE_MODELS = [
467
- "auto",
468
- "o4-mini",
469
- "gpt-4o-mini",
470
- "gpt-4o",
471
- "gpt-4-1-mini",
472
-
473
- ]
474
-
475
- def __init__(
476
- self,
477
- timeout: int = 30,
478
- proxies: dict = {}
479
- ):
480
- """
481
- Initialize the OPKFC client.
482
-
483
- Args:
484
- timeout: Request timeout in seconds
485
- proxies: Optional proxy configuration
486
- """
487
- self.timeout = timeout
488
- self.api_endpoint = "https://www.opkfc.com/backend-api/conversation"
489
- self.proxies = proxies
490
-
491
- # Initialize session
492
- self.session = requests.Session()
493
- if proxies:
494
- self.session.proxies.update(proxies)
495
-
496
- # Set the user agent to match the original script
497
- self.user_agent = LitAgent().random()
498
-
499
- # Set the cookie from the original script
500
- self.cookie = f"__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%{uuid.uuid4().hex}%22%2C%20%22vd%22%3A%201%2C%20%22stt%22%3A%200%2C%20%22dr%22%3A%200%2C%20%22expires%22%3A%201744896723481%2C%20%22ct%22%3A%201744894923481%7D; __51uvsct__KUc0LhjVWFNXQv11=1; __51vcke__KUc0LhjVWFNXQv11=06da852c-bb56-547c-91a8-43a0d485ffed; __51vuft__KUc0LhjVWFNXQv11=1744894923504; gfsessionid=1ochrgv17vy4sbd98xmwt6crpmkxwlqf; oai-nav-state=1; p_uv_id=ad86646801bc60d6d95f6098e4ee7450; _dd_s=rum=0&expire=1744895920821&logs=1&id={uuid.uuid4().hex}&created={int(datetime.utcnow().timestamp() * 1000)}"
501
-
502
- # Initialize chat interface
503
- self.chat = Chat(self)
504
-
505
- @property
506
- def models(self):
507
- class _ModelList:
508
- def list(inner_self):
509
- return type(self).AVAILABLE_MODELS
510
- return _ModelList()
511
-
512
- if __name__ == "__main__":
513
- # Example usage
514
- client = OPKFC()
515
- response = client.chat.completions.create(
516
- model="auto",
517
- messages=[{"role": "user", "content": "Hello!"}]
518
- )
1
+ from datetime import datetime
2
+ import time
3
+ import uuid
4
+ import requests
5
+ import json
6
+ import random
7
+ from typing import List, Dict, Optional, Union, Generator, Any
8
+
9
+ # Import base classes and utility structures
10
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
11
+ from webscout.Provider.OPENAI.utils import (
12
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
13
+ ChatCompletionMessage, CompletionUsage, count_tokens
14
+ )
15
+ from webscout.litagent import LitAgent
16
+ # ANSI escape codes for formatting
17
+ BOLD = "\033[1m"
18
+ RED = "\033[91m"
19
+ RESET = "\033[0m"
20
+
21
+ class Completions(BaseCompletions):
22
+ def __init__(self, client: 'OPKFC'):
23
+ self._client = client
24
+
25
+ def create(
26
+ self,
27
+ *,
28
+ model: str,
29
+ messages: List[Dict[str, str]],
30
+ max_tokens: Optional[int] = None,
31
+ stream: bool = False,
32
+ temperature: Optional[float] = None,
33
+ top_p: Optional[float] = None,
34
+ timeout: Optional[int] = None,
35
+ proxies: Optional[Dict[str, str]] = None,
36
+ **kwargs: Any
37
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
+ """
39
+ Create a chat completion with OPKFC API.
40
+
41
+ Args:
42
+ model: The model to use (from AVAILABLE_MODELS)
43
+ messages: List of message dictionaries with 'role' and 'content'
44
+ max_tokens: Maximum number of tokens to generate
45
+ stream: Whether to stream the response
46
+ temperature: Sampling temperature (0-1)
47
+ top_p: Nucleus sampling parameter (0-1)
48
+ **kwargs: Additional parameters to pass to the API
49
+
50
+ Returns:
51
+ If stream=False, returns a ChatCompletion object
52
+ If stream=True, returns a Generator yielding ChatCompletionChunk objects
53
+ """
54
+ # Use streaming implementation if requested
55
+ if stream:
56
+ return self._create_streaming(
57
+ model=model,
58
+ messages=messages,
59
+ max_tokens=max_tokens,
60
+ temperature=temperature,
61
+ top_p=top_p,
62
+ timeout=timeout,
63
+ proxies=proxies,
64
+ **kwargs
65
+ )
66
+
67
+ # Otherwise use non-streaming implementation
68
+ return self._create_non_streaming(
69
+ model=model,
70
+ messages=messages,
71
+ max_tokens=max_tokens,
72
+ temperature=temperature,
73
+ top_p=top_p,
74
+ timeout=timeout,
75
+ proxies=proxies,
76
+ **kwargs
77
+ )
78
+
79
+ def _create_streaming(
80
+ self,
81
+ *,
82
+ model: str,
83
+ messages: List[Dict[str, str]],
84
+ max_tokens: Optional[int] = None,
85
+ temperature: Optional[float] = None,
86
+ top_p: Optional[float] = None,
87
+ timeout: Optional[int] = None,
88
+ proxies: Optional[Dict[str, str]] = None,
89
+ **kwargs: Any
90
+ ) -> Generator[ChatCompletionChunk, None, None]:
91
+ """Implementation for streaming chat completions."""
92
+ try:
93
+ # Generate request ID and timestamp
94
+ request_id = str(uuid.uuid4())
95
+ created_time = int(time.time())
96
+
97
+ # Generate a random 6-digit auth token
98
+ auth_token = str(random.randint(0, 999999)).zfill(6)
99
+
100
+ # Prepare headers exactly as in the original script
101
+ headers = {
102
+ "Accept": "text/event-stream",
103
+ "Accept-Encoding": "gzip, deflate, br, zstd",
104
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
105
+ "Authorization": f"Bearer {auth_token}",
106
+ "Cache-Control": "no-cache",
107
+ "Content-Type": "application/json",
108
+ "Cookie": self._client.cookie,
109
+ "DNT": "1",
110
+ "Origin": "https://www.opkfc.com",
111
+ "Pragma": "no-cache",
112
+ "Referer": "https://www.opkfc.com/",
113
+ "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
114
+ "Sec-CH-UA-Mobile": "?0",
115
+ "Sec-CH-UA-Platform": "\"Windows\"",
116
+ "Sec-Fetch-Dest": "empty",
117
+ "Sec-Fetch-Mode": "cors",
118
+ "Sec-Fetch-Site": "same-origin",
119
+ "Sec-GPC": "1",
120
+ "User-Agent": self._client.user_agent,
121
+ "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
122
+ }
123
+
124
+ # Prepare payload with individual messages
125
+ payload = {
126
+ "action": "next",
127
+ "messages": [
128
+ {
129
+ "id": str(uuid.uuid4()),
130
+ "author": {"role": msg["role"]},
131
+ "content": {"content_type": "text", "parts": [msg["content"]]},
132
+ "create_time": time.time()
133
+ }
134
+ for msg in messages
135
+ ],
136
+ "parent_message_id": str(uuid.uuid4()),
137
+ "model": model,
138
+ "timezone_offset_min": -330,
139
+ "timezone": "Asia/Calcutta"
140
+ }
141
+
142
+ # Add optional parameters if provided
143
+ if max_tokens is not None:
144
+ payload["max_tokens"] = max_tokens
145
+ if temperature is not None:
146
+ payload["temperature"] = temperature
147
+ if top_p is not None:
148
+ payload["top_p"] = top_p
149
+
150
+ # Make the streaming request
151
+ response = self._client.session.post(
152
+ self._client.api_endpoint,
153
+ headers=headers,
154
+ json=payload,
155
+ stream=True,
156
+ timeout=timeout or self._client.timeout,
157
+ proxies=proxies or getattr(self._client, "proxies", None)
158
+ )
159
+ response.raise_for_status()
160
+
161
+ # Process the streaming response
162
+ content_buffer = ""
163
+ response_started = False
164
+ assistant_message_found = False
165
+
166
+ for line in response.iter_lines(decode_unicode=True):
167
+ if not line:
168
+ continue
169
+
170
+ if line.startswith("data:"):
171
+ part = line[len("data:"):].strip()
172
+
173
+ if part == "[DONE]":
174
+ break
175
+
176
+ try:
177
+ # Skip the delta_encoding event
178
+ if part == '"v1"':
179
+ continue
180
+
181
+ obj = json.loads(part)
182
+ if isinstance(obj, dict):
183
+ # Check if this is an assistant message
184
+ if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
185
+ assistant_message_found = True
186
+ # Reset content buffer when we find a new assistant message
187
+ content_buffer = ""
188
+ response_started = False
189
+ continue
190
+
191
+ # Skip until we find an assistant message
192
+ if not assistant_message_found:
193
+ continue
194
+
195
+ # Handle different response formats
196
+ content_to_add = None
197
+
198
+ # Format 1: Direct content in 'v' field
199
+ if isinstance(obj.get("v"), str):
200
+ content_to_add = obj["v"]
201
+
202
+ # Format 2: Path-based content with append operation
203
+ elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
204
+ content_to_add = obj["v"]
205
+
206
+ # Format 3: Nested content in complex structure
207
+ elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
208
+ parts = obj["v"]["message"]["content"]["parts"]
209
+ if parts and isinstance(parts[0], str):
210
+ content_to_add = parts[0]
211
+
212
+ # Format 4: Patch operation with append to content
213
+ elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
214
+ for patch in obj["v"]:
215
+ if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
216
+ content_to_add = patch["v"]
217
+
218
+ # If we found content to add
219
+ if content_to_add:
220
+ # Skip the first part if it's repeating the user's message
221
+ if not response_started and content_buffer == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
222
+ # This is likely the user's message being echoed back, skip it
223
+ continue
224
+
225
+ response_started = True
226
+ content_buffer += content_to_add
227
+
228
+ # Create and yield a chunk
229
+ delta = ChoiceDelta(content=content_to_add)
230
+ choice = Choice(index=0, delta=delta, finish_reason=None)
231
+ chunk = ChatCompletionChunk(
232
+ id=request_id,
233
+ choices=[choice],
234
+ created=created_time,
235
+ model=model
236
+ )
237
+
238
+ yield chunk
239
+ except (ValueError, json.JSONDecodeError) as e:
240
+ print(f"{RED}Error parsing streaming response: {e} - {part}{RESET}")
241
+ pass
242
+
243
+ # Final chunk with finish_reason
244
+ delta = ChoiceDelta(content=None)
245
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
246
+ chunk = ChatCompletionChunk(
247
+ id=request_id,
248
+ choices=[choice],
249
+ created=created_time,
250
+ model=model
251
+ )
252
+
253
+ yield chunk
254
+
255
+ except Exception as e:
256
+ print(f"{RED}Error during OPKFC streaming request: {e}{RESET}")
257
+ raise IOError(f"OPKFC streaming request failed: {e}") from e
258
+
259
+ def _create_non_streaming(
260
+ self,
261
+ *,
262
+ model: str,
263
+ messages: List[Dict[str, str]],
264
+ max_tokens: Optional[int] = None,
265
+ temperature: Optional[float] = None,
266
+ top_p: Optional[float] = None,
267
+ timeout: Optional[int] = None,
268
+ proxies: Optional[Dict[str, str]] = None,
269
+ **kwargs: Any
270
+ ) -> ChatCompletion:
271
+ """Implementation for non-streaming chat completions."""
272
+ try:
273
+ # Generate request ID and timestamp
274
+ request_id = str(uuid.uuid4())
275
+ created_time = int(time.time())
276
+
277
+ # Generate a random 6-digit auth token
278
+ auth_token = str(random.randint(0, 999999)).zfill(6)
279
+
280
+ # Prepare headers exactly as in the original script
281
+ headers = {
282
+ "Accept": "text/event-stream",
283
+ "Accept-Encoding": "gzip, deflate, br, zstd",
284
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
285
+ "Authorization": f"Bearer {auth_token}",
286
+ "Cache-Control": "no-cache",
287
+ "Content-Type": "application/json",
288
+ "Cookie": self._client.cookie,
289
+ "DNT": "1",
290
+ "Origin": "https://www.opkfc.com",
291
+ "Pragma": "no-cache",
292
+ "Referer": "https://www.opkfc.com/",
293
+ "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
294
+ "Sec-CH-UA-Mobile": "?0",
295
+ "Sec-CH-UA-Platform": "\"Windows\"",
296
+ "Sec-Fetch-Dest": "empty",
297
+ "Sec-Fetch-Mode": "cors",
298
+ "Sec-Fetch-Site": "same-origin",
299
+ "Sec-GPC": "1",
300
+ "User-Agent": self._client.user_agent,
301
+ "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
302
+ }
303
+
304
+ # Prepare payload with individual messages
305
+ payload = {
306
+ "action": "next",
307
+ "messages": [
308
+ {
309
+ "id": str(uuid.uuid4()),
310
+ "author": {"role": msg["role"]},
311
+ "content": {"content_type": "text", "parts": [msg["content"]]},
312
+ "create_time": time.time()
313
+ }
314
+ for msg in messages
315
+ ],
316
+ "parent_message_id": str(uuid.uuid4()),
317
+ "model": model,
318
+ "timezone_offset_min": -330,
319
+ "timezone": "Asia/Calcutta"
320
+ }
321
+
322
+ # Add optional parameters if provided
323
+ if max_tokens is not None:
324
+ payload["max_tokens"] = max_tokens
325
+ if temperature is not None:
326
+ payload["temperature"] = temperature
327
+ if top_p is not None:
328
+ payload["top_p"] = top_p
329
+
330
+ # Make the non-streaming request but process it as streaming
331
+ # since the API only supports streaming responses
332
+ response = self._client.session.post(
333
+ self._client.api_endpoint,
334
+ headers=headers,
335
+ json=payload,
336
+ stream=True,
337
+ timeout=timeout or self._client.timeout,
338
+ proxies=proxies or getattr(self._client, "proxies", None)
339
+ )
340
+ response.raise_for_status()
341
+
342
+ # Process the streaming response to collect the full content
343
+ full_content = ""
344
+ response_started = False
345
+ assistant_message_found = False
346
+
347
+ for line in response.iter_lines(decode_unicode=True):
348
+ if not line:
349
+ continue
350
+
351
+ if line.startswith("data:"):
352
+ part = line[len("data:"):].strip()
353
+
354
+ if part == "[DONE]":
355
+ break
356
+
357
+ try:
358
+ # Skip the delta_encoding event
359
+ if part == '"v1"':
360
+ continue
361
+
362
+ obj = json.loads(part)
363
+ if isinstance(obj, dict):
364
+ # Check if this is an assistant message
365
+ if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
366
+ assistant_message_found = True
367
+ # Reset content buffer when we find a new assistant message
368
+ full_content = ""
369
+ response_started = False
370
+ continue
371
+
372
+ # Skip until we find an assistant message
373
+ if not assistant_message_found:
374
+ continue
375
+
376
+ # Handle different response formats
377
+ content_to_add = None
378
+
379
+ # Format 1: Direct content in 'v' field
380
+ if isinstance(obj.get("v"), str):
381
+ content_to_add = obj["v"]
382
+
383
+ # Format 2: Path-based content with append operation
384
+ elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
385
+ content_to_add = obj["v"]
386
+
387
+ # Format 3: Nested content in complex structure
388
+ elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
389
+ parts = obj["v"]["message"]["content"]["parts"]
390
+ if parts and isinstance(parts[0], str):
391
+ content_to_add = parts[0]
392
+
393
+ # Format 4: Patch operation with append to content
394
+ elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
395
+ for patch in obj["v"]:
396
+ if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
397
+ content_to_add = patch["v"]
398
+
399
+ # If we found content to add
400
+ if content_to_add:
401
+ # Skip the first part if it's repeating the user's message
402
+ if not response_started and full_content == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
403
+ # This is likely the user's message being echoed back, skip it
404
+ continue
405
+
406
+ response_started = True
407
+ full_content += content_to_add
408
+ except (ValueError, json.JSONDecodeError) as e:
409
+ print(f"{RED}Error parsing non-streaming response: {e} - {part}{RESET}")
410
+ pass
411
+
412
+ # Create the completion message
413
+ message = ChatCompletionMessage(
414
+ role="assistant",
415
+ content=full_content
416
+ )
417
+
418
+ # Create the choice
419
+ choice = Choice(
420
+ index=0,
421
+ message=message,
422
+ finish_reason="stop"
423
+ )
424
+
425
+ # Estimate token usage using count_tokens
426
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in messages])
427
+ completion_tokens = count_tokens(full_content)
428
+ usage = CompletionUsage(
429
+ prompt_tokens=prompt_tokens,
430
+ completion_tokens=completion_tokens,
431
+ total_tokens=prompt_tokens + completion_tokens
432
+ )
433
+
434
+ # Create the completion object
435
+ completion = ChatCompletion(
436
+ id=request_id,
437
+ choices=[choice],
438
+ created=created_time,
439
+ model=model,
440
+ usage=usage,
441
+ )
442
+
443
+ return completion
444
+
445
+ except Exception as e:
446
+ print(f"{RED}Error during OPKFC non-stream request: {e}{RESET}")
447
+ raise IOError(f"OPKFC request failed: {e}") from e
448
+
449
+ class Chat(BaseChat):
450
+ def __init__(self, client: 'OPKFC'):
451
+ self.completions = Completions(client)
452
+
453
+ class OPKFC(OpenAICompatibleProvider):
454
+ """
455
+ OpenAI-compatible client for OPKFC API.
456
+
457
+ Usage:
458
+ client = OPKFC()
459
+ response = client.chat.completions.create(
460
+ model="auto",
461
+ messages=[{"role": "user", "content": "Hello!"}]
462
+ )
463
+ print(response.choices[0].message.content)
464
+ """
465
+
466
+ AVAILABLE_MODELS = [
467
+ "auto",
468
+ "o4-mini",
469
+ "gpt-4o-mini",
470
+ "gpt-4o",
471
+ "gpt-4-1-mini",
472
+
473
+ ]
474
+
475
+ def __init__(
476
+ self,
477
+ timeout: int = 30,
478
+ proxies: dict = {}
479
+ ):
480
+ """
481
+ Initialize the OPKFC client.
482
+
483
+ Args:
484
+ timeout: Request timeout in seconds
485
+ proxies: Optional proxy configuration
486
+ """
487
+ self.timeout = timeout
488
+ self.api_endpoint = "https://www.opkfc.com/backend-api/conversation"
489
+ self.proxies = proxies
490
+
491
+ # Initialize session
492
+ self.session = requests.Session()
493
+ if proxies:
494
+ self.session.proxies.update(proxies)
495
+
496
+ # Set the user agent to match the original script
497
+ self.user_agent = LitAgent().random()
498
+
499
+ # Set the cookie from the original script
500
+ self.cookie = f"__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%{uuid.uuid4().hex}%22%2C%20%22vd%22%3A%201%2C%20%22stt%22%3A%200%2C%20%22dr%22%3A%200%2C%20%22expires%22%3A%201744896723481%2C%20%22ct%22%3A%201744894923481%7D; __51uvsct__KUc0LhjVWFNXQv11=1; __51vcke__KUc0LhjVWFNXQv11=06da852c-bb56-547c-91a8-43a0d485ffed; __51vuft__KUc0LhjVWFNXQv11=1744894923504; gfsessionid=1ochrgv17vy4sbd98xmwt6crpmkxwlqf; oai-nav-state=1; p_uv_id=ad86646801bc60d6d95f6098e4ee7450; _dd_s=rum=0&expire=1744895920821&logs=1&id={uuid.uuid4().hex}&created={int(datetime.utcnow().timestamp() * 1000)}"
501
+
502
+ # Initialize chat interface
503
+ self.chat = Chat(self)
504
+
505
+ @property
506
+ def models(self):
507
+ class _ModelList:
508
+ def list(inner_self):
509
+ return type(self).AVAILABLE_MODELS
510
+ return _ModelList()
511
+
512
+ if __name__ == "__main__":
513
+ # Example usage
514
+ client = OPKFC()
515
+ response = client.chat.completions.create(
516
+ model="auto",
517
+ messages=[{"role": "user", "content": "Hello!"}]
518
+ )
519
519
  print(response.choices[0].message.content)