webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,367 +1,373 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- import re
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage,
13
- get_system_prompt, get_last_user_message, format_prompt # Import format_prompt
14
- )
15
-
16
- # Attempt to import LitAgent, fallback if not available
17
- try:
18
- from webscout.litagent import LitAgent
19
- except ImportError:
20
- # Define a dummy LitAgent if webscout is not installed or accessible
21
- class LitAgent:
22
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
23
- # Return minimal default headers if LitAgent is unavailable
24
- print("Warning: LitAgent not found. Using default minimal headers.")
25
- return {
26
- "accept": "*/*",
27
- "accept_language": "en-US,en;q=0.9",
28
- "platform": "Windows",
29
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
30
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
31
- "browser_type": browser,
32
- }
33
- def random(self) -> str:
34
- # Return a default user agent if LitAgent is unavailable
35
- print("Warning: LitAgent not found. Using default user agent.")
36
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
37
-
38
-
39
- class Completions(BaseCompletions):
40
- def __init__(self, client: 'C4AI'):
41
- self._client = client
42
-
43
- def create(
44
- self,
45
- *,
46
- model: str,
47
- messages: List[Dict[str, str]],
48
- max_tokens: Optional[int] = 2000,
49
- stream: bool = False,
50
- temperature: Optional[float] = None,
51
- top_p: Optional[float] = None,
52
- **kwargs: Any
53
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
- """
55
- Creates a model response for the given chat conversation.
56
- Mimics openai.chat.completions.create
57
- """
58
- # Extract system prompt using utility function
59
- system_prompt = get_system_prompt(messages)
60
- if not system_prompt:
61
- system_prompt = "You are a helpful assistant."
62
-
63
- # Format the conversation history using format_prompt
64
- # Note: C4AI API might expect only the *last* user message here.
65
- # Sending the full history might cause issues.
66
- # We exclude the system prompt from format_prompt as it's sent separately.
67
- # We also set do_continue=True as C4AI adds its own assistant prompt implicitly.
68
- conversation_prompt = format_prompt(messages, include_system=False, do_continue=True)
69
-
70
- if not conversation_prompt:
71
- # Fallback to last user message if formatted prompt is empty
72
- last_user_message = get_last_user_message(messages)
73
- if not last_user_message:
74
- raise ValueError("No user message found or formatted prompt is empty.")
75
- conversation_prompt = last_user_message
76
-
77
- # Create or get conversation ID
78
- if model not in self._client._conversation_data:
79
- conversation_id = self._client.create_conversation(model, system_prompt)
80
- if not conversation_id:
81
- raise IOError(f"Failed to create conversation with model {model}")
82
- else:
83
- conversation_id = self._client._conversation_data[model]["conversationId"]
84
- self._client._conversation_data[model]["messageId"] = self._client.fetch_message_id(conversation_id)
85
-
86
- request_id = f"chatcmpl-{uuid.uuid4()}"
87
- created_time = int(time.time())
88
-
89
- # Pass the formatted conversation prompt
90
- if stream:
91
- return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
92
- else:
93
- return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
94
-
95
- def _create_stream(
96
- self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
97
- ) -> Generator[ChatCompletionChunk, None, None]:
98
- try:
99
- message_id = self._client._conversation_data[model]["messageId"]
100
- url = f"{self._client.url}/api/chat/message"
101
- payload = {
102
- "conversationId": conversation_id,
103
- "messageId": message_id,
104
- "model": model,
105
- "prompt": prompt, # Use the formatted conversation history as prompt
106
- "preprompt": system_prompt,
107
- "temperature": 0.7,
108
- "top_p": 1,
109
- "top_k": 50,
110
- "max_tokens": self._client.max_tokens_to_sample,
111
- "stop": [],
112
- "stream": True
113
- }
114
-
115
- response = self._client.session.post(
116
- url,
117
- headers=self._client.headers,
118
- json=payload,
119
- stream=True,
120
- timeout=self._client.timeout
121
- )
122
- response.raise_for_status()
123
-
124
- full_text = ""
125
- for line in response.iter_lines():
126
- if line:
127
- line = line.decode('utf-8')
128
- if line.startswith('data: '):
129
- data = line[6:]
130
- if data == "[DONE]":
131
- break
132
-
133
- try:
134
- json_data = json.loads(data)
135
- delta_text = json_data.get('text', '')
136
- new_content = delta_text[len(full_text):]
137
- full_text = delta_text
138
- delta = ChoiceDelta(content=new_content)
139
- choice = Choice(index=0, delta=delta, finish_reason=None)
140
- chunk = ChatCompletionChunk(
141
- id=request_id,
142
- choices=[choice],
143
- created=created_time,
144
- model=model
145
- )
146
- yield chunk
147
- except json.JSONDecodeError:
148
- continue
149
-
150
- delta = ChoiceDelta(content=None)
151
- choice = Choice(index=0, delta=delta, finish_reason="stop")
152
- chunk = ChatCompletionChunk(
153
- id=request_id,
154
- choices=[choice],
155
- created=created_time,
156
- model=model
157
- )
158
- yield chunk
159
-
160
- except Exception as e:
161
- print(f"Error during C4AI stream request: {e}")
162
- raise IOError(f"C4AI request failed: {e}") from e
163
-
164
- def _create_non_stream(
165
- self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
166
- ) -> ChatCompletion:
167
- try:
168
- message_id = self._client._conversation_data[model]["messageId"]
169
- url = f"{self._client.url}/api/chat/message"
170
- payload = {
171
- "conversationId": conversation_id,
172
- "messageId": message_id,
173
- "model": model,
174
- "prompt": prompt, # Use the formatted conversation history as prompt
175
- "preprompt": system_prompt,
176
- "temperature": 0.7,
177
- "top_p": 1,
178
- "top_k": 50,
179
- "max_tokens": self._client.max_tokens_to_sample,
180
- "stop": [],
181
- "stream": False
182
- }
183
-
184
- response = self._client.session.post(
185
- url,
186
- headers=self._client.headers,
187
- json=payload,
188
- timeout=self._client.timeout
189
- )
190
- response.raise_for_status()
191
-
192
- data = response.json()
193
- response_text = data.get('text', '')
194
- message = ChatCompletionMessage(role="assistant", content=response_text)
195
- choice = Choice(index=0, message=message, finish_reason="stop")
196
- # Estimate tokens based on the formatted prompt
197
- prompt_tokens = len(prompt) // 4
198
- completion_tokens = len(response_text) // 4
199
- usage = CompletionUsage(
200
- prompt_tokens=prompt_tokens,
201
- completion_tokens=completion_tokens,
202
- total_tokens=prompt_tokens + completion_tokens
203
- )
204
- completion = ChatCompletion(
205
- id=request_id,
206
- choices=[choice],
207
- created=created_time,
208
- model=model,
209
- usage=usage
210
- )
211
- return completion
212
-
213
- except Exception as e:
214
- print(f"Error during C4AI non-stream request: {e}")
215
- raise IOError(f"C4AI request failed: {e}") from e
216
-
217
- class Chat(BaseChat):
218
- def __init__(self, client: 'C4AI'):
219
- self.completions = Completions(client)
220
-
221
- class C4AI(OpenAICompatibleProvider):
222
- """
223
- OpenAI-compatible client for C4AI API.
224
-
225
- Usage:
226
- client = C4AI()
227
- response = client.chat.completions.create(
228
- model="command-a-03-2025",
229
- messages=[{"role": "user", "content": "Hello!"}]
230
- )
231
- """
232
-
233
- AVAILABLE_MODELS = [
234
- 'command-a-03-2025',
235
- 'command-r-plus-08-2024',
236
- 'command-r-08-2024',
237
- 'command-r-plus',
238
- 'command-r',
239
- 'command-r7b-12-2024',
240
- 'command-r7b-arabic-02-2025'
241
- ]
242
-
243
- def __init__(
244
- self,
245
- timeout: Optional[int] = None,
246
- browser: str = "chrome"
247
- ):
248
- """
249
- Initialize the C4AI client.
250
-
251
- Args:
252
- timeout: Request timeout in seconds.
253
- browser: Browser name for LitAgent to generate User-Agent.
254
- """
255
- self.timeout = timeout
256
- self.url = "https://cohereforai-c4ai-command.hf.space"
257
- self.session = requests.Session()
258
- self.max_tokens_to_sample = 2000
259
-
260
- agent = LitAgent()
261
- fingerprint = agent.generate_fingerprint(browser)
262
-
263
- self.headers = {
264
- "Content-Type": "application/json",
265
- "User-Agent": fingerprint["user_agent"],
266
- "Accept": "*/*",
267
- "Accept-Encoding": "gzip, deflate, br, zstd",
268
- "Accept-Language": fingerprint["accept_language"],
269
- "Origin": "https://cohereforai-c4ai-command.hf.space",
270
- "Referer": "https://cohereforai-c4ai-command.hf.space/",
271
- "Sec-Ch-Ua": fingerprint["sec_ch_ua"] or "\"Chromium\";v=\"120\"",
272
- "Sec-Ch-Ua-Mobile": "?0",
273
- "Sec-Ch-Ua-Platform": f"\"{fingerprint['platform']}\"",
274
- "Sec-Fetch-Dest": "empty",
275
- "Sec-Fetch-Mode": "cors",
276
- "Sec-Fetch-Site": "same-origin",
277
- "DNT": "1",
278
- "Priority": "u=1, i"
279
- }
280
-
281
- self._conversation_data = {}
282
- self.chat = Chat(self)
283
- self.update_available_models()
284
-
285
- def update_available_models(self):
286
- """Update the list of available models from the server."""
287
- try:
288
- response = requests.get("https://cohereforai-c4ai-command.hf.space/")
289
- text = response.text
290
- models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
291
-
292
- if not models_match:
293
- return
294
-
295
- models_text = models_match.group(1)
296
- models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
297
- models_text = models_text.replace('void 0', 'null')
298
-
299
- def add_quotation_mark(match):
300
- return f'{match.group(1)}"{match.group(2)}":'
301
-
302
- models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
303
-
304
- models_data = json.loads(models_text)
305
- self.AVAILABLE_MODELS = [model["id"] for model in models_data]
306
- except Exception:
307
- pass
308
-
309
- def create_conversation(self, model: str, system_prompt: str):
310
- """Create a new conversation with the specified model."""
311
- url = f"{self.url}/api/conversation"
312
- payload = {
313
- "model": model,
314
- "preprompt": system_prompt,
315
- }
316
-
317
- try:
318
- response = self.session.post(
319
- url,
320
- headers=self.headers,
321
- json=payload,
322
- timeout=self.timeout
323
- )
324
- response.raise_for_status()
325
-
326
- data = response.json()
327
- conversation_id = data.get("conversationId")
328
-
329
- if conversation_id:
330
- self._conversation_data[model] = {
331
- "conversationId": conversation_id,
332
- "messageId": self.fetch_message_id(conversation_id)
333
- }
334
- return conversation_id
335
-
336
- return None
337
-
338
- except Exception as e:
339
- print(f"Error creating conversation: {e}")
340
- return None
341
-
342
- def fetch_message_id(self, conversation_id: str):
343
- """Fetch the latest message ID for a conversation."""
344
- url = f"{self.url}/api/conversation/{conversation_id}"
345
-
346
- try:
347
- response = self.session.get(
348
- url,
349
- headers=self.headers,
350
- timeout=self.timeout
351
- )
352
- response.raise_for_status()
353
-
354
- json_data = response.json()
355
-
356
- if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
357
- return str(uuid.uuid4())
358
-
359
- data = json_data["nodes"][1]["data"]
360
- keys = data[data[0]["messages"]]
361
- message_keys = data[keys[-1]]
362
- message_id = data[message_keys["id"]]
363
-
364
- return message_id
365
-
366
- except Exception:
367
- return str(uuid.uuid4())
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage,
13
+ get_system_prompt, get_last_user_message, format_prompt # Import format_prompt
14
+ )
15
+
16
+ # Attempt to import LitAgent, fallback if not available
17
+ try:
18
+ from webscout.litagent import LitAgent
19
+ except ImportError:
20
+ # Define a dummy LitAgent if webscout is not installed or accessible
21
+ class LitAgent:
22
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
23
+ # Return minimal default headers if LitAgent is unavailable
24
+ print("Warning: LitAgent not found. Using default minimal headers.")
25
+ return {
26
+ "accept": "*/*",
27
+ "accept_language": "en-US,en;q=0.9",
28
+ "platform": "Windows",
29
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
30
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
31
+ "browser_type": browser,
32
+ }
33
+ def random(self) -> str:
34
+ # Return a default user agent if LitAgent is unavailable
35
+ print("Warning: LitAgent not found. Using default user agent.")
36
+ return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
37
+
38
+
39
+ class Completions(BaseCompletions):
40
+ def __init__(self, client: 'C4AI'):
41
+ self._client = client
42
+
43
+ def create(
44
+ self,
45
+ *,
46
+ model: str,
47
+ messages: List[Dict[str, str]],
48
+ max_tokens: Optional[int] = 2000,
49
+ stream: bool = False,
50
+ temperature: Optional[float] = None,
51
+ top_p: Optional[float] = None,
52
+ **kwargs: Any
53
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
+ """
55
+ Creates a model response for the given chat conversation.
56
+ Mimics openai.chat.completions.create
57
+ """
58
+ # Extract system prompt using utility function
59
+ system_prompt = get_system_prompt(messages)
60
+ if not system_prompt:
61
+ system_prompt = "You are a helpful assistant."
62
+
63
+ # Format the conversation history using format_prompt
64
+ # Note: C4AI API might expect only the *last* user message here.
65
+ # Sending the full history might cause issues.
66
+ # We exclude the system prompt from format_prompt as it's sent separately.
67
+ # We also set do_continue=True as C4AI adds its own assistant prompt implicitly.
68
+ conversation_prompt = format_prompt(messages, include_system=False, do_continue=True)
69
+
70
+ if not conversation_prompt:
71
+ # Fallback to last user message if formatted prompt is empty
72
+ last_user_message = get_last_user_message(messages)
73
+ if not last_user_message:
74
+ raise ValueError("No user message found or formatted prompt is empty.")
75
+ conversation_prompt = last_user_message
76
+
77
+ # Create or get conversation ID
78
+ if model not in self._client._conversation_data:
79
+ conversation_id = self._client.create_conversation(model, system_prompt)
80
+ if not conversation_id:
81
+ raise IOError(f"Failed to create conversation with model {model}")
82
+ else:
83
+ conversation_id = self._client._conversation_data[model]["conversationId"]
84
+ self._client._conversation_data[model]["messageId"] = self._client.fetch_message_id(conversation_id)
85
+
86
+ request_id = f"chatcmpl-{uuid.uuid4()}"
87
+ created_time = int(time.time())
88
+
89
+ # Pass the formatted conversation prompt
90
+ if stream:
91
+ return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
92
+ else:
93
+ return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
94
+
95
+ def _create_stream(
96
+ self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
97
+ ) -> Generator[ChatCompletionChunk, None, None]:
98
+ try:
99
+ message_id = self._client._conversation_data[model]["messageId"]
100
+ url = f"{self._client.url}/api/chat/message"
101
+ payload = {
102
+ "conversationId": conversation_id,
103
+ "messageId": message_id,
104
+ "model": model,
105
+ "prompt": prompt, # Use the formatted conversation history as prompt
106
+ "preprompt": system_prompt,
107
+ "temperature": 0.7,
108
+ "top_p": 1,
109
+ "top_k": 50,
110
+ "max_tokens": self._client.max_tokens_to_sample,
111
+ "stop": [],
112
+ "stream": True
113
+ }
114
+
115
+ response = self._client.session.post(
116
+ url,
117
+ headers=self._client.headers,
118
+ json=payload,
119
+ stream=True,
120
+ timeout=self._client.timeout
121
+ )
122
+ response.raise_for_status()
123
+
124
+ full_text = ""
125
+ for line in response.iter_lines():
126
+ if line:
127
+ line = line.decode('utf-8')
128
+ if line.startswith('data: '):
129
+ data = line[6:]
130
+ if data == "[DONE]":
131
+ break
132
+
133
+ try:
134
+ json_data = json.loads(data)
135
+ delta_text = json_data.get('text', '')
136
+ new_content = delta_text[len(full_text):]
137
+ full_text = delta_text
138
+ delta = ChoiceDelta(content=new_content)
139
+ choice = Choice(index=0, delta=delta, finish_reason=None)
140
+ chunk = ChatCompletionChunk(
141
+ id=request_id,
142
+ choices=[choice],
143
+ created=created_time,
144
+ model=model
145
+ )
146
+ yield chunk
147
+ except json.JSONDecodeError:
148
+ continue
149
+
150
+ delta = ChoiceDelta(content=None)
151
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
152
+ chunk = ChatCompletionChunk(
153
+ id=request_id,
154
+ choices=[choice],
155
+ created=created_time,
156
+ model=model
157
+ )
158
+ yield chunk
159
+
160
+ except Exception as e:
161
+ print(f"Error during C4AI stream request: {e}")
162
+ raise IOError(f"C4AI request failed: {e}") from e
163
+
164
+ def _create_non_stream(
165
+ self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
166
+ ) -> ChatCompletion:
167
+ try:
168
+ message_id = self._client._conversation_data[model]["messageId"]
169
+ url = f"{self._client.url}/api/chat/message"
170
+ payload = {
171
+ "conversationId": conversation_id,
172
+ "messageId": message_id,
173
+ "model": model,
174
+ "prompt": prompt, # Use the formatted conversation history as prompt
175
+ "preprompt": system_prompt,
176
+ "temperature": 0.7,
177
+ "top_p": 1,
178
+ "top_k": 50,
179
+ "max_tokens": self._client.max_tokens_to_sample,
180
+ "stop": [],
181
+ "stream": False
182
+ }
183
+
184
+ response = self._client.session.post(
185
+ url,
186
+ headers=self._client.headers,
187
+ json=payload,
188
+ timeout=self._client.timeout
189
+ )
190
+ response.raise_for_status()
191
+
192
+ data = response.json()
193
+ response_text = data.get('text', '')
194
+ message = ChatCompletionMessage(role="assistant", content=response_text)
195
+ choice = Choice(index=0, message=message, finish_reason="stop")
196
+ # Estimate tokens based on the formatted prompt
197
+ prompt_tokens = len(prompt) // 4
198
+ completion_tokens = len(response_text) // 4
199
+ usage = CompletionUsage(
200
+ prompt_tokens=prompt_tokens,
201
+ completion_tokens=completion_tokens,
202
+ total_tokens=prompt_tokens + completion_tokens
203
+ )
204
+ completion = ChatCompletion(
205
+ id=request_id,
206
+ choices=[choice],
207
+ created=created_time,
208
+ model=model,
209
+ usage=usage
210
+ )
211
+ return completion
212
+
213
+ except Exception as e:
214
+ print(f"Error during C4AI non-stream request: {e}")
215
+ raise IOError(f"C4AI request failed: {e}") from e
216
+
217
+ class Chat(BaseChat):
218
+ def __init__(self, client: 'C4AI'):
219
+ self.completions = Completions(client)
220
+
221
+ class C4AI(OpenAICompatibleProvider):
222
+ """
223
+ OpenAI-compatible client for C4AI API.
224
+
225
+ Usage:
226
+ client = C4AI()
227
+ response = client.chat.completions.create(
228
+ model="command-a-03-2025",
229
+ messages=[{"role": "user", "content": "Hello!"}]
230
+ )
231
+ """
232
+
233
+ AVAILABLE_MODELS = [
234
+ 'command-a-03-2025',
235
+ 'command-r-plus-08-2024',
236
+ 'command-r-08-2024',
237
+ 'command-r-plus',
238
+ 'command-r',
239
+ 'command-r7b-12-2024',
240
+ 'command-r7b-arabic-02-2025'
241
+ ]
242
+
243
+ def __init__(
244
+ self,
245
+ timeout: Optional[int] = None,
246
+ browser: str = "chrome"
247
+ ):
248
+ """
249
+ Initialize the C4AI client.
250
+
251
+ Args:
252
+ timeout: Request timeout in seconds.
253
+ browser: Browser name for LitAgent to generate User-Agent.
254
+ """
255
+ self.timeout = timeout
256
+ self.url = "https://cohereforai-c4ai-command.hf.space"
257
+ self.session = requests.Session()
258
+ self.max_tokens_to_sample = 2000
259
+
260
+ agent = LitAgent()
261
+ fingerprint = agent.generate_fingerprint(browser)
262
+
263
+ self.headers = {
264
+ "Content-Type": "application/json",
265
+ "User-Agent": fingerprint["user_agent"],
266
+ "Accept": "*/*",
267
+ "Accept-Encoding": "gzip, deflate, br, zstd",
268
+ "Accept-Language": fingerprint["accept_language"],
269
+ "Origin": "https://cohereforai-c4ai-command.hf.space",
270
+ "Referer": "https://cohereforai-c4ai-command.hf.space/",
271
+ "Sec-Ch-Ua": fingerprint["sec_ch_ua"] or "\"Chromium\";v=\"120\"",
272
+ "Sec-Ch-Ua-Mobile": "?0",
273
+ "Sec-Ch-Ua-Platform": f"\"{fingerprint['platform']}\"",
274
+ "Sec-Fetch-Dest": "empty",
275
+ "Sec-Fetch-Mode": "cors",
276
+ "Sec-Fetch-Site": "same-origin",
277
+ "DNT": "1",
278
+ "Priority": "u=1, i"
279
+ }
280
+
281
+ self._conversation_data = {}
282
+ self.chat = Chat(self)
283
+ self.update_available_models()
284
+
285
+ def update_available_models(self):
286
+ """Update the list of available models from the server."""
287
+ try:
288
+ response = requests.get("https://cohereforai-c4ai-command.hf.space/")
289
+ text = response.text
290
+ models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
291
+
292
+ if not models_match:
293
+ return
294
+
295
+ models_text = models_match.group(1)
296
+ models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
297
+ models_text = models_text.replace('void 0', 'null')
298
+
299
+ def add_quotation_mark(match):
300
+ return f'{match.group(1)}"{match.group(2)}":'
301
+
302
+ models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
303
+
304
+ models_data = json.loads(models_text)
305
+ self.AVAILABLE_MODELS = [model["id"] for model in models_data]
306
+ except Exception:
307
+ pass
308
+
309
+ def create_conversation(self, model: str, system_prompt: str):
310
+ """Create a new conversation with the specified model."""
311
+ url = f"{self.url}/api/conversation"
312
+ payload = {
313
+ "model": model,
314
+ "preprompt": system_prompt,
315
+ }
316
+
317
+ try:
318
+ response = self.session.post(
319
+ url,
320
+ headers=self.headers,
321
+ json=payload,
322
+ timeout=self.timeout
323
+ )
324
+ response.raise_for_status()
325
+
326
+ data = response.json()
327
+ conversation_id = data.get("conversationId")
328
+
329
+ if conversation_id:
330
+ self._conversation_data[model] = {
331
+ "conversationId": conversation_id,
332
+ "messageId": self.fetch_message_id(conversation_id)
333
+ }
334
+ return conversation_id
335
+
336
+ return None
337
+
338
+ except Exception as e:
339
+ print(f"Error creating conversation: {e}")
340
+ return None
341
+
342
+ def fetch_message_id(self, conversation_id: str):
343
+ """Fetch the latest message ID for a conversation."""
344
+ url = f"{self.url}/api/conversation/{conversation_id}"
345
+
346
+ try:
347
+ response = self.session.get(
348
+ url,
349
+ headers=self.headers,
350
+ timeout=self.timeout
351
+ )
352
+ response.raise_for_status()
353
+
354
+ json_data = response.json()
355
+
356
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
357
+ return str(uuid.uuid4())
358
+
359
+ data = json_data["nodes"][1]["data"]
360
+ keys = data[data[0]["messages"]]
361
+ message_keys = data[keys[-1]]
362
+ message_id = data[message_keys["id"]]
363
+
364
+ return message_id
365
+
366
+ except Exception:
367
+ return str(uuid.uuid4())
368
+ @property
369
+ def models(self):
370
+ class _ModelList:
371
+ def list(inner_self):
372
+ return type(self).AVAILABLE_MODELS
373
+ return _ModelList()