webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,496 +1,507 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- import random
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage
13
- )
14
-
15
- # ANSI escape codes for formatting
16
- BOLD = "\033[1m"
17
- RED = "\033[91m"
18
- RESET = "\033[0m"
19
-
20
- class Completions(BaseCompletions):
21
- def __init__(self, client: 'OPKFC'):
22
- self._client = client
23
-
24
- def create(
25
- self,
26
- *,
27
- model: str,
28
- messages: List[Dict[str, str]],
29
- max_tokens: Optional[int] = None,
30
- stream: bool = False,
31
- temperature: Optional[float] = None,
32
- top_p: Optional[float] = None,
33
- **kwargs: Any
34
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
- """
36
- Create a chat completion with OPKFC API.
37
-
38
- Args:
39
- model: The model to use (from AVAILABLE_MODELS)
40
- messages: List of message dictionaries with 'role' and 'content'
41
- max_tokens: Maximum number of tokens to generate
42
- stream: Whether to stream the response
43
- temperature: Sampling temperature (0-1)
44
- top_p: Nucleus sampling parameter (0-1)
45
- **kwargs: Additional parameters to pass to the API
46
-
47
- Returns:
48
- If stream=False, returns a ChatCompletion object
49
- If stream=True, returns a Generator yielding ChatCompletionChunk objects
50
- """
51
- # Use streaming implementation if requested
52
- if stream:
53
- return self._create_streaming(
54
- model=model,
55
- messages=messages,
56
- max_tokens=max_tokens,
57
- temperature=temperature,
58
- top_p=top_p,
59
- **kwargs
60
- )
61
-
62
- # Otherwise use non-streaming implementation
63
- return self._create_non_streaming(
64
- model=model,
65
- messages=messages,
66
- max_tokens=max_tokens,
67
- temperature=temperature,
68
- top_p=top_p,
69
- **kwargs
70
- )
71
-
72
- def _create_streaming(
73
- self,
74
- *,
75
- model: str,
76
- messages: List[Dict[str, str]],
77
- max_tokens: Optional[int] = None,
78
- temperature: Optional[float] = None,
79
- top_p: Optional[float] = None,
80
- **kwargs: Any
81
- ) -> Generator[ChatCompletionChunk, None, None]:
82
- """Implementation for streaming chat completions."""
83
- try:
84
- # Generate request ID and timestamp
85
- request_id = str(uuid.uuid4())
86
- created_time = int(time.time())
87
-
88
- # Generate a random 6-digit auth token
89
- auth_token = str(random.randint(0, 999999)).zfill(6)
90
-
91
- # Prepare headers exactly as in the original script
92
- headers = {
93
- "Accept": "text/event-stream",
94
- "Accept-Encoding": "gzip, deflate, br, zstd",
95
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
96
- "Authorization": f"Bearer {auth_token}",
97
- "Cache-Control": "no-cache",
98
- "Content-Type": "application/json",
99
- "Cookie": self._client.cookie,
100
- "DNT": "1",
101
- "Origin": "https://www.opkfc.com",
102
- "Pragma": "no-cache",
103
- "Referer": "https://www.opkfc.com/",
104
- "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
105
- "Sec-CH-UA-Mobile": "?0",
106
- "Sec-CH-UA-Platform": "\"Windows\"",
107
- "Sec-Fetch-Dest": "empty",
108
- "Sec-Fetch-Mode": "cors",
109
- "Sec-Fetch-Site": "same-origin",
110
- "Sec-GPC": "1",
111
- "User-Agent": self._client.user_agent,
112
- "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
113
- }
114
-
115
- # Prepare payload with individual messages
116
- payload = {
117
- "action": "next",
118
- "messages": [
119
- {
120
- "id": str(uuid.uuid4()),
121
- "author": {"role": msg["role"]},
122
- "content": {"content_type": "text", "parts": [msg["content"]]},
123
- "create_time": time.time()
124
- }
125
- for msg in messages
126
- ],
127
- "parent_message_id": str(uuid.uuid4()),
128
- "model": model,
129
- "timezone_offset_min": -330,
130
- "timezone": "Asia/Calcutta"
131
- }
132
-
133
- # Add optional parameters if provided
134
- if max_tokens is not None:
135
- payload["max_tokens"] = max_tokens
136
- if temperature is not None:
137
- payload["temperature"] = temperature
138
- if top_p is not None:
139
- payload["top_p"] = top_p
140
-
141
- # Make the streaming request
142
- response = self._client.session.post(
143
- self._client.api_endpoint,
144
- headers=headers,
145
- json=payload,
146
- stream=True,
147
- timeout=self._client.timeout
148
- )
149
- response.raise_for_status()
150
-
151
- # Process the streaming response
152
- content_buffer = ""
153
- response_started = False
154
- assistant_message_found = False
155
-
156
- for line in response.iter_lines(decode_unicode=True):
157
- if not line:
158
- continue
159
-
160
- if line.startswith("data:"):
161
- part = line[len("data:"):].strip()
162
-
163
- if part == "[DONE]":
164
- break
165
-
166
- try:
167
- # Skip the delta_encoding event
168
- if part == '"v1"':
169
- continue
170
-
171
- obj = json.loads(part)
172
- if isinstance(obj, dict):
173
- # Check if this is an assistant message
174
- if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
175
- assistant_message_found = True
176
- # Reset content buffer when we find a new assistant message
177
- content_buffer = ""
178
- response_started = False
179
- continue
180
-
181
- # Skip until we find an assistant message
182
- if not assistant_message_found:
183
- continue
184
-
185
- # Handle different response formats
186
- content_to_add = None
187
-
188
- # Format 1: Direct content in 'v' field
189
- if isinstance(obj.get("v"), str):
190
- content_to_add = obj["v"]
191
-
192
- # Format 2: Path-based content with append operation
193
- elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
194
- content_to_add = obj["v"]
195
-
196
- # Format 3: Nested content in complex structure
197
- elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
198
- parts = obj["v"]["message"]["content"]["parts"]
199
- if parts and isinstance(parts[0], str):
200
- content_to_add = parts[0]
201
-
202
- # Format 4: Patch operation with append to content
203
- elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
204
- for patch in obj["v"]:
205
- if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
206
- content_to_add = patch["v"]
207
-
208
- # If we found content to add
209
- if content_to_add:
210
- # Skip the first part if it's repeating the user's message
211
- if not response_started and content_buffer == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
212
- # This is likely the user's message being echoed back, skip it
213
- continue
214
-
215
- response_started = True
216
- content_buffer += content_to_add
217
-
218
- # Create and yield a chunk
219
- delta = ChoiceDelta(content=content_to_add)
220
- choice = Choice(index=0, delta=delta, finish_reason=None)
221
- chunk = ChatCompletionChunk(
222
- id=request_id,
223
- choices=[choice],
224
- created=created_time,
225
- model=model
226
- )
227
-
228
- yield chunk
229
- except (ValueError, json.JSONDecodeError) as e:
230
- print(f"{RED}Error parsing streaming response: {e} - {part}{RESET}")
231
- pass
232
-
233
- # Final chunk with finish_reason
234
- delta = ChoiceDelta(content=None)
235
- choice = Choice(index=0, delta=delta, finish_reason="stop")
236
- chunk = ChatCompletionChunk(
237
- id=request_id,
238
- choices=[choice],
239
- created=created_time,
240
- model=model
241
- )
242
-
243
- yield chunk
244
-
245
- except Exception as e:
246
- print(f"{RED}Error during OPKFC streaming request: {e}{RESET}")
247
- raise IOError(f"OPKFC streaming request failed: {e}") from e
248
-
249
- def _create_non_streaming(
250
- self,
251
- *,
252
- model: str,
253
- messages: List[Dict[str, str]],
254
- max_tokens: Optional[int] = None,
255
- temperature: Optional[float] = None,
256
- top_p: Optional[float] = None,
257
- **kwargs: Any
258
- ) -> ChatCompletion:
259
- """Implementation for non-streaming chat completions."""
260
- try:
261
- # Generate request ID and timestamp
262
- request_id = str(uuid.uuid4())
263
- created_time = int(time.time())
264
-
265
- # Generate a random 6-digit auth token
266
- auth_token = str(random.randint(0, 999999)).zfill(6)
267
-
268
- # Prepare headers exactly as in the original script
269
- headers = {
270
- "Accept": "text/event-stream",
271
- "Accept-Encoding": "gzip, deflate, br, zstd",
272
- "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
273
- "Authorization": f"Bearer {auth_token}",
274
- "Cache-Control": "no-cache",
275
- "Content-Type": "application/json",
276
- "Cookie": self._client.cookie,
277
- "DNT": "1",
278
- "Origin": "https://www.opkfc.com",
279
- "Pragma": "no-cache",
280
- "Referer": "https://www.opkfc.com/",
281
- "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
282
- "Sec-CH-UA-Mobile": "?0",
283
- "Sec-CH-UA-Platform": "\"Windows\"",
284
- "Sec-Fetch-Dest": "empty",
285
- "Sec-Fetch-Mode": "cors",
286
- "Sec-Fetch-Site": "same-origin",
287
- "Sec-GPC": "1",
288
- "User-Agent": self._client.user_agent,
289
- "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
290
- }
291
-
292
- # Prepare payload with individual messages
293
- payload = {
294
- "action": "next",
295
- "messages": [
296
- {
297
- "id": str(uuid.uuid4()),
298
- "author": {"role": msg["role"]},
299
- "content": {"content_type": "text", "parts": [msg["content"]]},
300
- "create_time": time.time()
301
- }
302
- for msg in messages
303
- ],
304
- "parent_message_id": str(uuid.uuid4()),
305
- "model": model,
306
- "timezone_offset_min": -330,
307
- "timezone": "Asia/Calcutta"
308
- }
309
-
310
- # Add optional parameters if provided
311
- if max_tokens is not None:
312
- payload["max_tokens"] = max_tokens
313
- if temperature is not None:
314
- payload["temperature"] = temperature
315
- if top_p is not None:
316
- payload["top_p"] = top_p
317
-
318
- # Make the non-streaming request but process it as streaming
319
- # since the API only supports streaming responses
320
- response = self._client.session.post(
321
- self._client.api_endpoint,
322
- headers=headers,
323
- json=payload,
324
- stream=True,
325
- timeout=self._client.timeout
326
- )
327
- response.raise_for_status()
328
-
329
- # Process the streaming response to collect the full content
330
- full_content = ""
331
- response_started = False
332
- assistant_message_found = False
333
-
334
- for line in response.iter_lines(decode_unicode=True):
335
- if not line:
336
- continue
337
-
338
- if line.startswith("data:"):
339
- part = line[len("data:"):].strip()
340
-
341
- if part == "[DONE]":
342
- break
343
-
344
- try:
345
- # Skip the delta_encoding event
346
- if part == '"v1"':
347
- continue
348
-
349
- obj = json.loads(part)
350
- if isinstance(obj, dict):
351
- # Check if this is an assistant message
352
- if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
353
- assistant_message_found = True
354
- # Reset content buffer when we find a new assistant message
355
- full_content = ""
356
- response_started = False
357
- continue
358
-
359
- # Skip until we find an assistant message
360
- if not assistant_message_found:
361
- continue
362
-
363
- # Handle different response formats
364
- content_to_add = None
365
-
366
- # Format 1: Direct content in 'v' field
367
- if isinstance(obj.get("v"), str):
368
- content_to_add = obj["v"]
369
-
370
- # Format 2: Path-based content with append operation
371
- elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
372
- content_to_add = obj["v"]
373
-
374
- # Format 3: Nested content in complex structure
375
- elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
376
- parts = obj["v"]["message"]["content"]["parts"]
377
- if parts and isinstance(parts[0], str):
378
- content_to_add = parts[0]
379
-
380
- # Format 4: Patch operation with append to content
381
- elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
382
- for patch in obj["v"]:
383
- if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
384
- content_to_add = patch["v"]
385
-
386
- # If we found content to add
387
- if content_to_add:
388
- # Skip the first part if it's repeating the user's message
389
- if not response_started and full_content == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
390
- # This is likely the user's message being echoed back, skip it
391
- continue
392
-
393
- response_started = True
394
- full_content += content_to_add
395
- except (ValueError, json.JSONDecodeError) as e:
396
- print(f"{RED}Error parsing non-streaming response: {e} - {part}{RESET}")
397
- pass
398
-
399
- # Create the completion message
400
- message = ChatCompletionMessage(
401
- role="assistant",
402
- content=full_content
403
- )
404
-
405
- # Create the choice
406
- choice = Choice(
407
- index=0,
408
- message=message,
409
- finish_reason="stop"
410
- )
411
-
412
- # Estimate token usage (very rough estimate)
413
- prompt_tokens = sum(len(msg.get("content", "")) // 4 for msg in messages)
414
- completion_tokens = len(full_content) // 4
415
- usage = CompletionUsage(
416
- prompt_tokens=prompt_tokens,
417
- completion_tokens=completion_tokens,
418
- total_tokens=prompt_tokens + completion_tokens
419
- )
420
-
421
- # Create the completion object
422
- completion = ChatCompletion(
423
- id=request_id,
424
- choices=[choice],
425
- created=created_time,
426
- model=model,
427
- usage=usage,
428
- )
429
-
430
- return completion
431
-
432
- except Exception as e:
433
- print(f"{RED}Error during OPKFC non-stream request: {e}{RESET}")
434
- raise IOError(f"OPKFC request failed: {e}") from e
435
-
436
- class Chat(BaseChat):
437
- def __init__(self, client: 'OPKFC'):
438
- self.completions = Completions(client)
439
-
440
- class OPKFC(OpenAICompatibleProvider):
441
- """
442
- OpenAI-compatible client for OPKFC API.
443
-
444
- Usage:
445
- client = OPKFC()
446
- response = client.chat.completions.create(
447
- model="auto",
448
- messages=[{"role": "user", "content": "Hello!"}]
449
- )
450
- print(response.choices[0].message.content)
451
- """
452
-
453
- AVAILABLE_MODELS = [
454
- "auto",
455
- "o4-mini",
456
- "gpt-4o-mini",
457
- "gpt-4o"
458
- ]
459
-
460
- def __init__(
461
- self,
462
- timeout: int = 30,
463
- proxies: dict = {}
464
- ):
465
- """
466
- Initialize the OPKFC client.
467
-
468
- Args:
469
- timeout: Request timeout in seconds
470
- proxies: Optional proxy configuration
471
- """
472
- self.timeout = timeout
473
- self.api_endpoint = "https://www.opkfc.com/backend-api/conversation"
474
- self.proxies = proxies
475
-
476
- # Initialize session
477
- self.session = requests.Session()
478
- if proxies:
479
- self.session.proxies.update(proxies)
480
-
481
- # Set the user agent to match the original script
482
- self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0"
483
-
484
- # Set the cookie from the original script
485
- self.cookie = "__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%228fab09e3-c23e-5f60-b369-9697fbb821ce%22%2C%20%22vd%22%3A%201%2C%20%22stt%22%3A%200%2C%20%22dr%22%3A%200%2C%20%22expires%22%3A%201744896723481%2C%20%22ct%22%3A%201744894923481%7D; __51uvsct__KUc0LhjVWFNXQv11=1; __51vcke__KUc0LhjVWFNXQv11=06da852c-bb56-547c-91a8-43a0d485ffed; __51vuft__KUc0LhjVWFNXQv11=1744894923504; gfsessionid=1ochrgv17vy4sbd98xmwt6crpmkxwlqf; oai-nav-state=1; p_uv_id=ad86646801bc60d6d95f6098e4ee7450; _dd_s=rum=0&expire=1744895920821&logs=1&id=a39221c9-e8ed-44e6-a2c8-03192699c71e&created=1744894970625"
486
-
487
- # Initialize chat interface
488
- self.chat = Chat(self)
489
-
490
- @property
491
- def models(self):
492
- class _ModelList:
493
- def list(inner_self):
494
- return type(self).AVAILABLE_MODELS
495
- return _ModelList()
496
-
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import random
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, count_tokens
13
+ )
14
+
15
+ # ANSI escape codes for formatting
16
+ BOLD = "\033[1m"
17
+ RED = "\033[91m"
18
+ RESET = "\033[0m"
19
+
20
+ class Completions(BaseCompletions):
21
+ def __init__(self, client: 'OPKFC'):
22
+ self._client = client
23
+
24
+ def create(
25
+ self,
26
+ *,
27
+ model: str,
28
+ messages: List[Dict[str, str]],
29
+ max_tokens: Optional[int] = None,
30
+ stream: bool = False,
31
+ temperature: Optional[float] = None,
32
+ top_p: Optional[float] = None,
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
35
+ **kwargs: Any
36
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
+ """
38
+ Create a chat completion with OPKFC API.
39
+
40
+ Args:
41
+ model: The model to use (from AVAILABLE_MODELS)
42
+ messages: List of message dictionaries with 'role' and 'content'
43
+ max_tokens: Maximum number of tokens to generate
44
+ stream: Whether to stream the response
45
+ temperature: Sampling temperature (0-1)
46
+ top_p: Nucleus sampling parameter (0-1)
47
+ **kwargs: Additional parameters to pass to the API
48
+
49
+ Returns:
50
+ If stream=False, returns a ChatCompletion object
51
+ If stream=True, returns a Generator yielding ChatCompletionChunk objects
52
+ """
53
+ # Use streaming implementation if requested
54
+ if stream:
55
+ return self._create_streaming(
56
+ model=model,
57
+ messages=messages,
58
+ max_tokens=max_tokens,
59
+ temperature=temperature,
60
+ top_p=top_p,
61
+ timeout=timeout,
62
+ proxies=proxies,
63
+ **kwargs
64
+ )
65
+
66
+ # Otherwise use non-streaming implementation
67
+ return self._create_non_streaming(
68
+ model=model,
69
+ messages=messages,
70
+ max_tokens=max_tokens,
71
+ temperature=temperature,
72
+ top_p=top_p,
73
+ timeout=timeout,
74
+ proxies=proxies,
75
+ **kwargs
76
+ )
77
+
78
+ def _create_streaming(
79
+ self,
80
+ *,
81
+ model: str,
82
+ messages: List[Dict[str, str]],
83
+ max_tokens: Optional[int] = None,
84
+ temperature: Optional[float] = None,
85
+ top_p: Optional[float] = None,
86
+ timeout: Optional[int] = None,
87
+ proxies: Optional[Dict[str, str]] = None,
88
+ **kwargs: Any
89
+ ) -> Generator[ChatCompletionChunk, None, None]:
90
+ """Implementation for streaming chat completions."""
91
+ try:
92
+ # Generate request ID and timestamp
93
+ request_id = str(uuid.uuid4())
94
+ created_time = int(time.time())
95
+
96
+ # Generate a random 6-digit auth token
97
+ auth_token = str(random.randint(0, 999999)).zfill(6)
98
+
99
+ # Prepare headers exactly as in the original script
100
+ headers = {
101
+ "Accept": "text/event-stream",
102
+ "Accept-Encoding": "gzip, deflate, br, zstd",
103
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
104
+ "Authorization": f"Bearer {auth_token}",
105
+ "Cache-Control": "no-cache",
106
+ "Content-Type": "application/json",
107
+ "Cookie": self._client.cookie,
108
+ "DNT": "1",
109
+ "Origin": "https://www.opkfc.com",
110
+ "Pragma": "no-cache",
111
+ "Referer": "https://www.opkfc.com/",
112
+ "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
113
+ "Sec-CH-UA-Mobile": "?0",
114
+ "Sec-CH-UA-Platform": "\"Windows\"",
115
+ "Sec-Fetch-Dest": "empty",
116
+ "Sec-Fetch-Mode": "cors",
117
+ "Sec-Fetch-Site": "same-origin",
118
+ "Sec-GPC": "1",
119
+ "User-Agent": self._client.user_agent,
120
+ "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
121
+ }
122
+
123
+ # Prepare payload with individual messages
124
+ payload = {
125
+ "action": "next",
126
+ "messages": [
127
+ {
128
+ "id": str(uuid.uuid4()),
129
+ "author": {"role": msg["role"]},
130
+ "content": {"content_type": "text", "parts": [msg["content"]]},
131
+ "create_time": time.time()
132
+ }
133
+ for msg in messages
134
+ ],
135
+ "parent_message_id": str(uuid.uuid4()),
136
+ "model": model,
137
+ "timezone_offset_min": -330,
138
+ "timezone": "Asia/Calcutta"
139
+ }
140
+
141
+ # Add optional parameters if provided
142
+ if max_tokens is not None:
143
+ payload["max_tokens"] = max_tokens
144
+ if temperature is not None:
145
+ payload["temperature"] = temperature
146
+ if top_p is not None:
147
+ payload["top_p"] = top_p
148
+
149
+ # Make the streaming request
150
+ response = self._client.session.post(
151
+ self._client.api_endpoint,
152
+ headers=headers,
153
+ json=payload,
154
+ stream=True,
155
+ timeout=timeout or self._client.timeout,
156
+ proxies=proxies or getattr(self._client, "proxies", None)
157
+ )
158
+ response.raise_for_status()
159
+
160
+ # Process the streaming response
161
+ content_buffer = ""
162
+ response_started = False
163
+ assistant_message_found = False
164
+
165
+ for line in response.iter_lines(decode_unicode=True):
166
+ if not line:
167
+ continue
168
+
169
+ if line.startswith("data:"):
170
+ part = line[len("data:"):].strip()
171
+
172
+ if part == "[DONE]":
173
+ break
174
+
175
+ try:
176
+ # Skip the delta_encoding event
177
+ if part == '"v1"':
178
+ continue
179
+
180
+ obj = json.loads(part)
181
+ if isinstance(obj, dict):
182
+ # Check if this is an assistant message
183
+ if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
184
+ assistant_message_found = True
185
+ # Reset content buffer when we find a new assistant message
186
+ content_buffer = ""
187
+ response_started = False
188
+ continue
189
+
190
+ # Skip until we find an assistant message
191
+ if not assistant_message_found:
192
+ continue
193
+
194
+ # Handle different response formats
195
+ content_to_add = None
196
+
197
+ # Format 1: Direct content in 'v' field
198
+ if isinstance(obj.get("v"), str):
199
+ content_to_add = obj["v"]
200
+
201
+ # Format 2: Path-based content with append operation
202
+ elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
203
+ content_to_add = obj["v"]
204
+
205
+ # Format 3: Nested content in complex structure
206
+ elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
207
+ parts = obj["v"]["message"]["content"]["parts"]
208
+ if parts and isinstance(parts[0], str):
209
+ content_to_add = parts[0]
210
+
211
+ # Format 4: Patch operation with append to content
212
+ elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
213
+ for patch in obj["v"]:
214
+ if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
215
+ content_to_add = patch["v"]
216
+
217
+ # If we found content to add
218
+ if content_to_add:
219
+ # Skip the first part if it's repeating the user's message
220
+ if not response_started and content_buffer == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
221
+ # This is likely the user's message being echoed back, skip it
222
+ continue
223
+
224
+ response_started = True
225
+ content_buffer += content_to_add
226
+
227
+ # Create and yield a chunk
228
+ delta = ChoiceDelta(content=content_to_add)
229
+ choice = Choice(index=0, delta=delta, finish_reason=None)
230
+ chunk = ChatCompletionChunk(
231
+ id=request_id,
232
+ choices=[choice],
233
+ created=created_time,
234
+ model=model
235
+ )
236
+
237
+ yield chunk
238
+ except (ValueError, json.JSONDecodeError) as e:
239
+ print(f"{RED}Error parsing streaming response: {e} - {part}{RESET}")
240
+ pass
241
+
242
+ # Final chunk with finish_reason
243
+ delta = ChoiceDelta(content=None)
244
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
245
+ chunk = ChatCompletionChunk(
246
+ id=request_id,
247
+ choices=[choice],
248
+ created=created_time,
249
+ model=model
250
+ )
251
+
252
+ yield chunk
253
+
254
+ except Exception as e:
255
+ print(f"{RED}Error during OPKFC streaming request: {e}{RESET}")
256
+ raise IOError(f"OPKFC streaming request failed: {e}") from e
257
+
258
+ def _create_non_streaming(
259
+ self,
260
+ *,
261
+ model: str,
262
+ messages: List[Dict[str, str]],
263
+ max_tokens: Optional[int] = None,
264
+ temperature: Optional[float] = None,
265
+ top_p: Optional[float] = None,
266
+ timeout: Optional[int] = None,
267
+ proxies: Optional[Dict[str, str]] = None,
268
+ **kwargs: Any
269
+ ) -> ChatCompletion:
270
+ """Implementation for non-streaming chat completions."""
271
+ try:
272
+ # Generate request ID and timestamp
273
+ request_id = str(uuid.uuid4())
274
+ created_time = int(time.time())
275
+
276
+ # Generate a random 6-digit auth token
277
+ auth_token = str(random.randint(0, 999999)).zfill(6)
278
+
279
+ # Prepare headers exactly as in the original script
280
+ headers = {
281
+ "Accept": "text/event-stream",
282
+ "Accept-Encoding": "gzip, deflate, br, zstd",
283
+ "Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
284
+ "Authorization": f"Bearer {auth_token}",
285
+ "Cache-Control": "no-cache",
286
+ "Content-Type": "application/json",
287
+ "Cookie": self._client.cookie,
288
+ "DNT": "1",
289
+ "Origin": "https://www.opkfc.com",
290
+ "Pragma": "no-cache",
291
+ "Referer": "https://www.opkfc.com/",
292
+ "Sec-CH-UA": "\"Microsoft Edge\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
293
+ "Sec-CH-UA-Mobile": "?0",
294
+ "Sec-CH-UA-Platform": "\"Windows\"",
295
+ "Sec-Fetch-Dest": "empty",
296
+ "Sec-Fetch-Mode": "cors",
297
+ "Sec-Fetch-Site": "same-origin",
298
+ "Sec-GPC": "1",
299
+ "User-Agent": self._client.user_agent,
300
+ "openai-sentinel-chat-requirements-token": "0cb55714-5810-47d4-a9c0-648406004279"
301
+ }
302
+
303
+ # Prepare payload with individual messages
304
+ payload = {
305
+ "action": "next",
306
+ "messages": [
307
+ {
308
+ "id": str(uuid.uuid4()),
309
+ "author": {"role": msg["role"]},
310
+ "content": {"content_type": "text", "parts": [msg["content"]]},
311
+ "create_time": time.time()
312
+ }
313
+ for msg in messages
314
+ ],
315
+ "parent_message_id": str(uuid.uuid4()),
316
+ "model": model,
317
+ "timezone_offset_min": -330,
318
+ "timezone": "Asia/Calcutta"
319
+ }
320
+
321
+ # Add optional parameters if provided
322
+ if max_tokens is not None:
323
+ payload["max_tokens"] = max_tokens
324
+ if temperature is not None:
325
+ payload["temperature"] = temperature
326
+ if top_p is not None:
327
+ payload["top_p"] = top_p
328
+
329
+ # Make the non-streaming request but process it as streaming
330
+ # since the API only supports streaming responses
331
+ response = self._client.session.post(
332
+ self._client.api_endpoint,
333
+ headers=headers,
334
+ json=payload,
335
+ stream=True,
336
+ timeout=timeout or self._client.timeout,
337
+ proxies=proxies or getattr(self._client, "proxies", None)
338
+ )
339
+ response.raise_for_status()
340
+
341
+ # Process the streaming response to collect the full content
342
+ full_content = ""
343
+ response_started = False
344
+ assistant_message_found = False
345
+
346
+ for line in response.iter_lines(decode_unicode=True):
347
+ if not line:
348
+ continue
349
+
350
+ if line.startswith("data:"):
351
+ part = line[len("data:"):].strip()
352
+
353
+ if part == "[DONE]":
354
+ break
355
+
356
+ try:
357
+ # Skip the delta_encoding event
358
+ if part == '"v1"':
359
+ continue
360
+
361
+ obj = json.loads(part)
362
+ if isinstance(obj, dict):
363
+ # Check if this is an assistant message
364
+ if isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("author", {}).get("role") == "assistant":
365
+ assistant_message_found = True
366
+ # Reset content buffer when we find a new assistant message
367
+ full_content = ""
368
+ response_started = False
369
+ continue
370
+
371
+ # Skip until we find an assistant message
372
+ if not assistant_message_found:
373
+ continue
374
+
375
+ # Handle different response formats
376
+ content_to_add = None
377
+
378
+ # Format 1: Direct content in 'v' field
379
+ if isinstance(obj.get("v"), str):
380
+ content_to_add = obj["v"]
381
+
382
+ # Format 2: Path-based content with append operation
383
+ elif obj.get("p") == "/message/content/parts/0" and obj.get("o") == "append" and isinstance(obj.get("v"), str):
384
+ content_to_add = obj["v"]
385
+
386
+ # Format 3: Nested content in complex structure
387
+ elif isinstance(obj.get("v"), dict) and obj.get("v", {}).get("message", {}).get("content", {}).get("parts"):
388
+ parts = obj["v"]["message"]["content"]["parts"]
389
+ if parts and isinstance(parts[0], str):
390
+ content_to_add = parts[0]
391
+
392
+ # Format 4: Patch operation with append to content
393
+ elif obj.get("o") == "patch" and isinstance(obj.get("v"), list):
394
+ for patch in obj["v"]:
395
+ if patch.get("p") == "/message/content/parts/0" and patch.get("o") == "append" and isinstance(patch.get("v"), str):
396
+ content_to_add = patch["v"]
397
+
398
+ # If we found content to add
399
+ if content_to_add:
400
+ # Skip the first part if it's repeating the user's message
401
+ if not response_started and full_content == "" and any(msg["content"] in content_to_add for msg in messages if msg["role"] == "user"):
402
+ # This is likely the user's message being echoed back, skip it
403
+ continue
404
+
405
+ response_started = True
406
+ full_content += content_to_add
407
+ except (ValueError, json.JSONDecodeError) as e:
408
+ print(f"{RED}Error parsing non-streaming response: {e} - {part}{RESET}")
409
+ pass
410
+
411
+ # Create the completion message
412
+ message = ChatCompletionMessage(
413
+ role="assistant",
414
+ content=full_content
415
+ )
416
+
417
+ # Create the choice
418
+ choice = Choice(
419
+ index=0,
420
+ message=message,
421
+ finish_reason="stop"
422
+ )
423
+
424
+ # Estimate token usage using count_tokens
425
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in messages])
426
+ completion_tokens = count_tokens(full_content)
427
+ usage = CompletionUsage(
428
+ prompt_tokens=prompt_tokens,
429
+ completion_tokens=completion_tokens,
430
+ total_tokens=prompt_tokens + completion_tokens
431
+ )
432
+
433
+ # Create the completion object
434
+ completion = ChatCompletion(
435
+ id=request_id,
436
+ choices=[choice],
437
+ created=created_time,
438
+ model=model,
439
+ usage=usage,
440
+ )
441
+
442
+ return completion
443
+
444
+ except Exception as e:
445
+ print(f"{RED}Error during OPKFC non-stream request: {e}{RESET}")
446
+ raise IOError(f"OPKFC request failed: {e}") from e
447
+
448
+ class Chat(BaseChat):
449
+ def __init__(self, client: 'OPKFC'):
450
+ self.completions = Completions(client)
451
+
452
+ class OPKFC(OpenAICompatibleProvider):
453
+ """
454
+ OpenAI-compatible client for OPKFC API.
455
+
456
+ Usage:
457
+ client = OPKFC()
458
+ response = client.chat.completions.create(
459
+ model="auto",
460
+ messages=[{"role": "user", "content": "Hello!"}]
461
+ )
462
+ print(response.choices[0].message.content)
463
+ """
464
+
465
+ AVAILABLE_MODELS = [
466
+ "auto",
467
+ "o4-mini",
468
+ "gpt-4o-mini",
469
+ "gpt-4o"
470
+ ]
471
+
472
+ def __init__(
473
+ self,
474
+ timeout: int = 30,
475
+ proxies: dict = {}
476
+ ):
477
+ """
478
+ Initialize the OPKFC client.
479
+
480
+ Args:
481
+ timeout: Request timeout in seconds
482
+ proxies: Optional proxy configuration
483
+ """
484
+ self.timeout = timeout
485
+ self.api_endpoint = "https://www.opkfc.com/backend-api/conversation"
486
+ self.proxies = proxies
487
+
488
+ # Initialize session
489
+ self.session = requests.Session()
490
+ if proxies:
491
+ self.session.proxies.update(proxies)
492
+
493
+ # Set the user agent to match the original script
494
+ self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0"
495
+
496
+ # Set the cookie from the original script
497
+ self.cookie = "__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%228fab09e3-c23e-5f60-b369-9697fbb821ce%22%2C%20%22vd%22%3A%201%2C%20%22stt%22%3A%200%2C%20%22dr%22%3A%200%2C%20%22expires%22%3A%201744896723481%2C%20%22ct%22%3A%201744894923481%7D; __51uvsct__KUc0LhjVWFNXQv11=1; __51vcke__KUc0LhjVWFNXQv11=06da852c-bb56-547c-91a8-43a0d485ffed; __51vuft__KUc0LhjVWFNXQv11=1744894923504; gfsessionid=1ochrgv17vy4sbd98xmwt6crpmkxwlqf; oai-nav-state=1; p_uv_id=ad86646801bc60d6d95f6098e4ee7450; _dd_s=rum=0&expire=1744895920821&logs=1&id=a39221c9-e8ed-44e6-a2c8-03192699c71e&created=1744894970625"
498
+
499
+ # Initialize chat interface
500
+ self.chat = Chat(self)
501
+
502
+ @property
503
+ def models(self):
504
+ class _ModelList:
505
+ def list(inner_self):
506
+ return type(self).AVAILABLE_MODELS
507
+ return _ModelList()