webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -251,20 +251,18 @@ class FreeGemini(OpenAICompatibleProvider):
251
251
 
252
252
  AVAILABLE_MODELS = ["gemini-2.0-flash"]
253
253
 
254
- def __init__(
255
- self,
256
- ):
254
+ def __init__(self, proxies: Optional[dict] = None):
257
255
  """
258
256
  Initialize the FreeGemini client.
257
+
258
+ Args:
259
+ proxies: Optional proxy configuration dictionary
259
260
  """
261
+ super().__init__(proxies=proxies)
260
262
  self.timeout = 30
261
263
  # Update the API endpoint to match the working implementation
262
264
  self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
263
265
 
264
- # Initialize session with curl_cffi for better Cloudflare handling
265
- self.session = Session()
266
- self.session.proxies = {}
267
-
268
266
  # Use LitAgent for fingerprinting
269
267
  self.agent = LitAgent()
270
268
 
@@ -296,4 +294,4 @@ if __name__ == "__main__":
296
294
  model="gemini-2.0-flash",
297
295
  messages=[{"role": "user", "parts": [{"text": conversation_prompt}]}]
298
296
  )
299
- print(response.choices[0].message.content)
297
+ print(response.choices[0].message.content)
@@ -1,22 +1,27 @@
1
1
  import time
2
2
  import uuid
3
3
  import requests
4
- import json
5
4
  import re
5
+ import json
6
6
  from typing import List, Dict, Optional, Union, Generator, Any
7
7
 
8
+ # Import base classes and utility structures
8
9
  from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
10
  from webscout.Provider.OPENAI.utils import (
10
11
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
12
  ChatCompletionMessage, CompletionUsage, count_tokens
12
13
  )
13
14
 
15
+ # Import LitAgent
14
16
  from webscout.litagent import LitAgent
15
17
 
16
- # --- MonoChat Client ---
18
+ # Import logger
19
+ from webscout.Litlogger import Logger, LogLevel
20
+
21
+ logger = Logger(name="K2Think", level=LogLevel.INFO)
17
22
 
18
23
  class Completions(BaseCompletions):
19
- def __init__(self, client: 'MonoChat'):
24
+ def __init__(self, client: 'K2Think'):
20
25
  self._client = client
21
26
 
22
27
  def create(
@@ -29,77 +34,111 @@ class Completions(BaseCompletions):
29
34
  temperature: Optional[float] = None,
30
35
  top_p: Optional[float] = None,
31
36
  timeout: Optional[int] = None,
32
- proxies: Optional[Dict[str, str]] = None,
33
37
  **kwargs: Any
34
38
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
39
  """
36
40
  Creates a model response for the given chat conversation.
37
41
  Mimics openai.chat.completions.create
38
42
  """
39
- # Prepare the payload for MonoChat API
43
+ # Prepare the payload for K2Think API
40
44
  payload = {
45
+ "stream": stream,
46
+ "model": model,
41
47
  "messages": messages,
42
- "model": model
48
+ "params": {}
43
49
  }
50
+
51
+ # Add optional parameters if provided
44
52
  if max_tokens is not None and max_tokens > 0:
45
- payload["max_tokens"] = max_tokens
53
+ payload["params"]["max_tokens"] = max_tokens
54
+
46
55
  if temperature is not None:
47
- payload["temperature"] = temperature
56
+ payload["params"]["temperature"] = temperature
57
+
48
58
  if top_p is not None:
49
- payload["top_p"] = top_p
59
+ payload["params"]["top_p"] = top_p
60
+
61
+ # Add any additional parameters
50
62
  payload.update(kwargs)
51
63
 
52
64
  request_id = f"chatcmpl-{uuid.uuid4()}"
53
65
  created_time = int(time.time())
54
66
 
55
67
  if stream:
56
- return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
68
+ return self._create_stream(request_id, created_time, model, payload, timeout)
57
69
  else:
58
- return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
70
+ return self._create_non_stream(request_id, created_time, model, payload, timeout)
59
71
 
60
72
  def _create_stream(
61
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
73
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None
62
74
  ) -> Generator[ChatCompletionChunk, None, None]:
63
75
  try:
64
76
  response = self._client.session.post(
65
- self._client.api_endpoint,
77
+ self._client.base_url,
66
78
  headers=self._client.headers,
67
79
  json=payload,
68
80
  stream=True,
69
- timeout=timeout or self._client.timeout,
70
- proxies=proxies or getattr(self._client, "proxies", None)
81
+ timeout=timeout or self._client.timeout
71
82
  )
83
+
84
+ # Handle non-200 responses
72
85
  if not response.ok:
73
86
  raise IOError(
74
87
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
75
88
  )
76
89
 
90
+ # Use count_tokens for prompt tokens
77
91
  prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
78
92
  completion_tokens = 0
79
93
  total_tokens = 0
94
+ seen_content = set() # Track seen content to avoid duplicates
80
95
 
81
96
  for line in response.iter_lines():
82
97
  if line:
83
98
  decoded_line = line.decode('utf-8').strip()
84
- # MonoChat returns lines like: 0:"Hello" or 0:"!" etc.
85
- match = re.search(r'0:"(.*?)"', decoded_line)
86
- if match:
87
- content = match.group(1)
99
+
100
+ # Extract content using regex patterns (similar to x0gpt)
101
+ extract_regexes = [
102
+ r'<answer>([\s\S]*?)<\/answer>',
103
+ ]
104
+
105
+ content = ""
106
+ for regex in extract_regexes:
107
+ match = re.search(regex, decoded_line)
108
+ if match:
109
+ content = match.group(1)
110
+ break
111
+
112
+ if content:
113
+ # Format the content
88
114
  content = self._client.format_text(content)
115
+
116
+ # Skip if we've already seen this exact content
117
+ if content in seen_content:
118
+ continue
119
+
120
+ seen_content.add(content)
121
+
122
+ # Update token counts using count_tokens
89
123
  completion_tokens += count_tokens(content)
90
124
  total_tokens = prompt_tokens + completion_tokens
91
125
 
126
+ # Create the delta object
92
127
  delta = ChoiceDelta(
93
128
  content=content,
94
129
  role="assistant",
95
130
  tool_calls=None
96
131
  )
132
+
133
+ # Create the choice object
97
134
  choice = Choice(
98
135
  index=0,
99
136
  delta=delta,
100
137
  finish_reason=None,
101
138
  logprobs=None
102
139
  )
140
+
141
+ # Create the chunk object
103
142
  chunk = ChatCompletionChunk(
104
143
  id=request_id,
105
144
  choices=[choice],
@@ -107,12 +146,16 @@ class Completions(BaseCompletions):
107
146
  model=model,
108
147
  system_fingerprint=None
109
148
  )
149
+
150
+ # Set usage directly on the chunk object
110
151
  chunk.usage = {
111
152
  "prompt_tokens": prompt_tokens,
112
153
  "completion_tokens": completion_tokens,
113
154
  "total_tokens": total_tokens,
114
155
  "estimated_cost": None
115
156
  }
157
+
158
+ # Return the chunk object with usage information
116
159
  yield chunk
117
160
 
118
161
  # Final chunk with finish_reason="stop"
@@ -121,12 +164,14 @@ class Completions(BaseCompletions):
121
164
  role=None,
122
165
  tool_calls=None
123
166
  )
167
+
124
168
  choice = Choice(
125
169
  index=0,
126
170
  delta=delta,
127
171
  finish_reason="stop",
128
172
  logprobs=None
129
173
  )
174
+
130
175
  chunk = ChatCompletionChunk(
131
176
  id=request_id,
132
177
  choices=[choice],
@@ -134,63 +179,89 @@ class Completions(BaseCompletions):
134
179
  model=model,
135
180
  system_fingerprint=None
136
181
  )
182
+
183
+ # Set usage directly on the chunk object
137
184
  chunk.usage = {
138
185
  "prompt_tokens": prompt_tokens,
139
186
  "completion_tokens": completion_tokens,
140
187
  "total_tokens": total_tokens,
141
188
  "estimated_cost": None
142
189
  }
190
+
143
191
  yield chunk
144
192
 
145
193
  except Exception as e:
146
- print(f"Error during MonoChat stream request: {e}")
147
- raise IOError(f"MonoChat request failed: {e}") from e
194
+ print(f"Error during K2Think stream request: {e}")
195
+ raise IOError(f"K2Think request failed: {e}") from e
148
196
 
149
197
  def _create_non_stream(
150
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
198
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None
151
199
  ) -> ChatCompletion:
152
200
  try:
153
201
  response = self._client.session.post(
154
- self._client.api_endpoint,
202
+ self._client.base_url,
155
203
  headers=self._client.headers,
156
204
  json=payload,
157
205
  stream=True,
158
- timeout=timeout or self._client.timeout,
159
- proxies=proxies or getattr(self._client, "proxies", None)
206
+ timeout=timeout or self._client.timeout
160
207
  )
208
+
209
+ # Handle non-200 responses
161
210
  if not response.ok:
162
211
  raise IOError(
163
212
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
164
213
  )
165
214
 
215
+ # Collect the full response
166
216
  full_text = ""
217
+ seen_content_parts = set() # Track seen content parts to avoid duplicates
218
+
167
219
  for line in response.iter_lines(decode_unicode=True):
168
220
  if line:
169
- match = re.search(r'0:"(.*?)"', line)
170
- if match:
171
- content = match.group(1)
172
- full_text += content
173
-
221
+ # Extract content using regex patterns
222
+ extract_regexes = [
223
+ r'<answer>([\s\S]*?)<\/answer>',
224
+ ]
225
+
226
+ for regex in extract_regexes:
227
+ match = re.search(regex, line)
228
+ if match:
229
+ content = match.group(1)
230
+ # Only add if we haven't seen this exact content before
231
+ if content not in seen_content_parts:
232
+ seen_content_parts.add(content)
233
+ full_text += content
234
+ break
235
+
236
+ # Format the text
174
237
  full_text = self._client.format_text(full_text)
175
238
 
239
+ # Use count_tokens for accurate token counts
176
240
  prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
177
241
  completion_tokens = count_tokens(full_text)
178
242
  total_tokens = prompt_tokens + completion_tokens
179
243
 
244
+ # Create the message object
180
245
  message = ChatCompletionMessage(
181
246
  role="assistant",
182
247
  content=full_text
183
248
  )
249
+
250
+ # Create the choice object
184
251
  choice = Choice(
185
252
  index=0,
186
253
  message=message,
187
254
  finish_reason="stop"
188
255
  )
256
+
257
+ # Create the usage object
189
258
  usage = CompletionUsage(
190
259
  prompt_tokens=prompt_tokens,
191
260
  completion_tokens=completion_tokens,
192
261
  total_tokens=total_tokens
193
262
  )
263
+
264
+ # Create the completion object
194
265
  completion = ChatCompletion(
195
266
  id=request_id,
196
267
  choices=[choice],
@@ -198,83 +269,97 @@ class Completions(BaseCompletions):
198
269
  model=model,
199
270
  usage=usage,
200
271
  )
272
+
201
273
  return completion
202
274
 
203
275
  except Exception as e:
204
- print(f"Error during MonoChat non-stream request: {e}")
205
- raise IOError(f"MonoChat request failed: {e}") from e
276
+ print(f"Error during K2Think non-stream request: {e}")
277
+ raise IOError(f"K2Think request failed: {e}") from e
206
278
 
207
279
  class Chat(BaseChat):
208
- def __init__(self, client: 'MonoChat'):
280
+ def __init__(self, client: 'K2Think'):
209
281
  self.completions = Completions(client)
210
282
 
211
- class MonoChat(OpenAICompatibleProvider):
283
+ class Models:
284
+ """Models class to mimic OpenAI models.list()"""
285
+ def __init__(self):
286
+ self.available_models = [
287
+ "MBZUAI-IFM/K2-Think",
288
+ ]
289
+
290
+ def list(self):
291
+ """Return list of available models"""
292
+ return [
293
+ {
294
+ "id": model,
295
+ "object": "model",
296
+ "created": 0,
297
+ "owned_by": "k2think"
298
+ }
299
+ for model in self.available_models
300
+ ]
301
+
302
+ class K2Think(OpenAICompatibleProvider):
212
303
  """
213
- OpenAI-compatible client for MonoChat API.
304
+ OpenAI-compatible client for K2Think API.
214
305
 
215
306
  Usage:
216
- client = MonoChat()
307
+ client = K2Think()
217
308
  response = client.chat.completions.create(
218
- model="gpt-4.1",
309
+ model="MBZUAI-IFM/K2-Think",
219
310
  messages=[{"role": "user", "content": "Hello!"}]
220
311
  )
221
312
  """
222
313
 
223
- AVAILABLE_MODELS = [
224
- "deepseek-r1",
225
- "deepseek-v3",
226
- "uncensored-r1-32b",
227
- "o3-pro",
228
- "o4-mini",
229
- "o3",
230
- "gpt-4.5-preview",
231
- "gpt-4.1",
232
- "gpt-4.1-mini",
233
- "gpt-4.1-nano",
234
- "gpt-4o",
235
- "gpt-4o-mini",
236
- "gpt-4o-search-preview",
237
- "gpt-4o-mini-search-preview",
238
- "gpt-4-turbo"
239
-
240
-
241
- ]
314
+ AVAILABLE_MODELS = ["MBZUAI-IFM/K2-Think"]
242
315
 
243
316
  def __init__(
244
317
  self,
245
- browser: str = "chrome"
318
+ browser: str = "chrome",
319
+ proxies: Optional[dict] = None
246
320
  ):
247
321
  """
248
- Initialize the MonoChat client.
322
+ Initialize the K2Think client.
249
323
 
250
324
  Args:
251
325
  browser: Browser to emulate in user agent
326
+ proxies: Optional proxy configuration dictionary
252
327
  """
253
- self.timeout = None
254
- self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
255
- self.session = requests.Session()
328
+ super().__init__(proxies=proxies)
329
+ self.timeout = 30
330
+ self.base_url = "https://www.k2think.ai/api/guest/chat/completions"
256
331
 
332
+ # Initialize LitAgent for user agent generation
257
333
  agent = LitAgent()
258
334
  self.fingerprint = agent.generate_fingerprint(browser)
259
335
 
260
336
  self.headers = {
261
- "accept": "*/*",
262
- "accept-encoding": "gzip, deflate, br, zstd",
263
- "accept-language": self.fingerprint["accept_language"],
264
- "content-type": "application/json",
265
- "origin": "https://gg.is-a-furry.dev",
266
- "referer": "https://gg.is-a-furry.dev/",
267
- "user-agent": self.fingerprint["user_agent"]
337
+ "Accept": "*/*",
338
+ "Accept-Encoding": "gzip, deflate, br, zstd",
339
+ "Accept-Language": self.fingerprint["accept_language"],
340
+ "Content-Type": "application/json",
341
+ "User-Agent": self.fingerprint["user_agent"],
342
+ "Origin": "https://www.k2think.ai",
343
+ "Referer": "https://www.k2think.ai/guest",
344
+ "Sec-Fetch-Dest": "empty",
345
+ "Sec-Fetch-Mode": "cors",
346
+ "Sec-Fetch-Site": "same-origin",
347
+ "Sec-Ch-Ua": '"Chromium";v="140", "Not=A?Brand";v="24", "Microsoft Edge";v="140"',
348
+ "Sec-Ch-Ua-Mobile": "?0",
349
+ "Sec-Ch-Ua-Platform": f'"{self.fingerprint["platform"]}"',
350
+ "Priority": "u=1, i"
268
351
  }
269
352
 
270
353
  self.session.headers.update(self.headers)
354
+
355
+ # Initialize the chat interface
271
356
  self.chat = Chat(self)
272
357
 
273
358
  @property
274
359
  def models(self):
275
360
  class _ModelList:
276
361
  def list(inner_self):
277
- return MonoChat.AVAILABLE_MODELS
362
+ return K2Think.AVAILABLE_MODELS
278
363
  return _ModelList()
279
364
 
280
365
  def format_text(self, text: str) -> str:
@@ -287,43 +372,61 @@ class MonoChat(OpenAICompatibleProvider):
287
372
  Returns:
288
373
  Formatted text
289
374
  """
375
+ # Use a more comprehensive approach to handle all escape sequences
290
376
  try:
377
+ # First handle double backslashes to avoid issues
291
378
  text = text.replace('\\\\', '\\')
379
+
380
+ # Handle common escape sequences
292
381
  text = text.replace('\\n', '\n')
293
382
  text = text.replace('\\r', '\r')
294
383
  text = text.replace('\\t', '\t')
295
384
  text = text.replace('\\"', '"')
296
385
  text = text.replace("\\'", "'")
386
+
387
+ # Handle any remaining escape sequences using JSON decoding
388
+ # This is a fallback in case there are other escape sequences
297
389
  try:
390
+ # Add quotes to make it a valid JSON string
298
391
  json_str = f'"{text}"'
392
+ # Use json module to decode all escape sequences
299
393
  decoded = json.loads(json_str)
300
394
  return decoded
301
395
  except json.JSONDecodeError:
396
+ # If JSON decoding fails, return the text with the replacements we've already done
302
397
  return text
303
398
  except Exception as e:
399
+ # If any error occurs, return the original text
304
400
  print(f"Warning: Error formatting text: {e}")
305
401
  return text
306
402
 
307
403
  def convert_model_name(self, model: str) -> str:
308
404
  """
309
- Convert model names to ones supported by MonoChat.
405
+ Convert model names to ones supported by K2Think.
310
406
 
311
407
  Args:
312
408
  model: Model name to convert
313
409
 
314
410
  Returns:
315
- MonoChat model name
411
+ K2Think model name
316
412
  """
413
+ # K2Think doesn't actually use model names, but we'll keep this for compatibility
317
414
  return model
318
415
 
416
+ # Convenience function for backward compatibility
417
+ def K2ThinkClient(**kwargs):
418
+ """Create a new K2Think client instance"""
419
+ return K2Think(**kwargs)
420
+
319
421
  if __name__ == "__main__":
320
- client = MonoChat()
422
+ from rich import print
423
+ client = K2Think()
321
424
  response = client.chat.completions.create(
322
- model="gpt-4.1",
323
- messages=[{"role": "user", "content": "tell me about humans"}],
324
- max_tokens=1000,
425
+ model="MBZUAI-IFM/K2-Think",
426
+ messages=[{"role": "user", "content": "Hello!"}],
325
427
  stream=True
326
428
  )
429
+
327
430
  for chunk in response:
328
- if chunk.choices and hasattr(chunk.choices[0], "delta") and getattr(chunk.choices[0].delta, "content", None):
329
- print(chunk.choices[0].delta.content, end="", flush=True)
431
+ if chunk.choices[0].delta.content:
432
+ print(chunk.choices[0].delta.content, end='', flush=True)
@@ -120,12 +120,9 @@ class NEMOTRON(OpenAICompatibleProvider):
120
120
  ]
121
121
 
122
122
  API_BASE_URL = "https://nemotron.one/api/chat"
123
- def __init__(
124
- self
125
- ):
126
- self.session = requests.Session()
123
+ def __init__(self, proxies: Optional[dict] = None):
124
+ super().__init__(proxies=proxies)
127
125
  self.timeout = 30
128
- self.session.proxies = {}
129
126
  agent = LitAgent()
130
127
  user_agent = agent.random()
131
128
  self.base_headers = {
@@ -241,4 +238,4 @@ class NEMOTRON(OpenAICompatibleProvider):
241
238
  class _ModelList:
242
239
  def list(inner_self):
243
240
  return type(self).AVAILABLE_MODELS
244
- return _ModelList()
241
+ return _ModelList()
@@ -289,12 +289,10 @@ class PiAI(OpenAICompatibleProvider):
289
289
  proxies: Proxy configuration
290
290
  **kwargs: Additional arguments
291
291
  """
292
+ super().__init__(proxies=proxies)
292
293
  self.timeout = timeout
293
294
  self.conversation_id = None
294
295
 
295
- # Initialize curl_cffi Session
296
- self.session = Session()
297
-
298
296
  # Setup URLs
299
297
  self.primary_url = 'https://pi.ai/api/chat'
300
298
  self.fallback_url = 'https://pi.ai/api/v2/chat'
@@ -320,6 +318,9 @@ class PiAI(OpenAICompatibleProvider):
320
318
  '__cf_bm': uuid4().hex
321
319
  }
322
320
 
321
+ # Replace the base session with curl_cffi Session
322
+ self.session = Session()
323
+
323
324
  # Configure session
324
325
  self.session.headers.update(self.headers)
325
326
  if proxies:
@@ -424,4 +425,4 @@ if __name__ == "__main__":
424
425
  )
425
426
 
426
427
  print(response.choices[0].message.content)
427
- print(f"Usage: {response.usage}")
428
+ print(f"Usage: {response.usage}")
@@ -251,10 +251,9 @@ class Qwen3(OpenAICompatibleProvider):
251
251
  "qwen-3-0.6b": "qwen3-0.6b"
252
252
  }
253
253
 
254
- def __init__(self):
254
+ def __init__(self, proxies: Optional[Dict[str, str]] = None):
255
+ super().__init__(proxies=proxies)
255
256
  self.timeout = 30
256
- self.session = requests.Session()
257
- self.session.proxies = {}
258
257
  self.headers = {
259
258
  'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
260
259
  'Accept': '*/*',
@@ -73,7 +73,8 @@ Currently, the following providers are implemented with OpenAI-compatible interf
73
73
  - Friendli
74
74
  - MiniMax
75
75
  - QodoAI
76
-
76
+ - Kimi
77
+ - GptOss
77
78
  ## 💻 Usage Examples
78
79
 
79
80
  Here are examples of how to use the OpenAI-compatible providers in your code.