webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (80) hide show
  1. inferno/__init__.py +6 -0
  2. inferno/__main__.py +9 -0
  3. inferno/cli.py +6 -0
  4. webscout/Local/__init__.py +6 -0
  5. webscout/Local/__main__.py +9 -0
  6. webscout/Local/api.py +576 -0
  7. webscout/Local/cli.py +338 -0
  8. webscout/Local/config.py +75 -0
  9. webscout/Local/llm.py +188 -0
  10. webscout/Local/model_manager.py +205 -0
  11. webscout/Local/server.py +187 -0
  12. webscout/Local/utils.py +93 -0
  13. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  14. webscout/Provider/AISEARCH/ISou.py +1 -1
  15. webscout/Provider/AISEARCH/Perplexity.py +359 -0
  16. webscout/Provider/AISEARCH/__init__.py +3 -1
  17. webscout/Provider/AISEARCH/felo_search.py +1 -1
  18. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  19. webscout/Provider/AISEARCH/hika_search.py +1 -1
  20. webscout/Provider/AISEARCH/iask_search.py +436 -0
  21. webscout/Provider/AISEARCH/scira_search.py +9 -5
  22. webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
  23. webscout/Provider/ExaAI.py +1 -1
  24. webscout/Provider/ExaChat.py +18 -8
  25. webscout/Provider/GithubChat.py +5 -1
  26. webscout/Provider/Glider.py +4 -2
  27. webscout/Provider/Jadve.py +2 -2
  28. webscout/Provider/OPENAI/__init__.py +24 -0
  29. webscout/Provider/OPENAI/base.py +46 -0
  30. webscout/Provider/OPENAI/c4ai.py +347 -0
  31. webscout/Provider/OPENAI/chatgpt.py +549 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  33. webscout/Provider/OPENAI/deepinfra.py +284 -0
  34. webscout/Provider/OPENAI/exaai.py +419 -0
  35. webscout/Provider/OPENAI/exachat.py +433 -0
  36. webscout/Provider/OPENAI/freeaichat.py +355 -0
  37. webscout/Provider/OPENAI/glider.py +316 -0
  38. webscout/Provider/OPENAI/heckai.py +337 -0
  39. webscout/Provider/OPENAI/llmchatco.py +327 -0
  40. webscout/Provider/OPENAI/netwrck.py +348 -0
  41. webscout/Provider/OPENAI/opkfc.py +488 -0
  42. webscout/Provider/OPENAI/scirachat.py +463 -0
  43. webscout/Provider/OPENAI/sonus.py +294 -0
  44. webscout/Provider/OPENAI/standardinput.py +425 -0
  45. webscout/Provider/OPENAI/textpollinations.py +285 -0
  46. webscout/Provider/OPENAI/toolbaz.py +405 -0
  47. webscout/Provider/OPENAI/typegpt.py +361 -0
  48. webscout/Provider/OPENAI/uncovrAI.py +455 -0
  49. webscout/Provider/OPENAI/utils.py +211 -0
  50. webscout/Provider/OPENAI/venice.py +428 -0
  51. webscout/Provider/OPENAI/wisecat.py +381 -0
  52. webscout/Provider/OPENAI/writecream.py +158 -0
  53. webscout/Provider/OPENAI/x0gpt.py +389 -0
  54. webscout/Provider/OPENAI/yep.py +329 -0
  55. webscout/Provider/StandardInput.py +278 -0
  56. webscout/Provider/TextPollinationsAI.py +27 -28
  57. webscout/Provider/Venice.py +1 -1
  58. webscout/Provider/Writecream.py +211 -0
  59. webscout/Provider/WritingMate.py +197 -0
  60. webscout/Provider/Youchat.py +30 -26
  61. webscout/Provider/__init__.py +14 -6
  62. webscout/Provider/koala.py +2 -2
  63. webscout/Provider/llmchatco.py +5 -0
  64. webscout/Provider/scira_chat.py +18 -12
  65. webscout/Provider/scnet.py +187 -0
  66. webscout/Provider/toolbaz.py +320 -0
  67. webscout/Provider/typegpt.py +3 -184
  68. webscout/Provider/uncovr.py +3 -3
  69. webscout/conversation.py +32 -32
  70. webscout/prompt_manager.py +2 -1
  71. webscout/version.py +1 -1
  72. webscout-8.2.dist-info/METADATA +734 -0
  73. {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
  74. webscout-8.2.dist-info/entry_points.txt +5 -0
  75. {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
  76. webscout/Provider/flowith.py +0 -207
  77. webscout-8.0.dist-info/METADATA +0 -995
  78. webscout-8.0.dist-info/entry_points.txt +0 -3
  79. {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
  80. {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,455 @@
1
+ import time
2
+ import uuid
3
+ import re
4
+ import json
5
+ import cloudscraper
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.litagent import LitAgent
9
+ from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
+ from .utils import (
11
+ ChatCompletion,
12
+ ChatCompletionChunk,
13
+ Choice,
14
+ ChatCompletionMessage,
15
+ ChoiceDelta,
16
+ CompletionUsage,
17
+ format_prompt,
18
+ get_system_prompt,
19
+ get_last_user_message
20
+ )
21
+
22
+ # ANSI escape codes for formatting
23
+ BOLD = "\033[1m"
24
+ RED = "\033[91m"
25
+ RESET = "\033[0m"
26
+
27
+ class Completions(BaseCompletions):
28
+ def __init__(self, client: 'UncovrAI'):
29
+ self._client = client
30
+
31
+ def create(
32
+ self,
33
+ *,
34
+ model: str,
35
+ messages: List[Dict[str, str]],
36
+ max_tokens: Optional[int] = None,
37
+ stream: bool = False,
38
+ temperature: Optional[float] = None,
39
+ top_p: Optional[float] = None,
40
+ **kwargs: Any
41
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
+ """
43
+ Create a chat completion using the UncovrAI API.
44
+
45
+ Args:
46
+ model: The model to use for completion
47
+ messages: A list of messages in the conversation
48
+ max_tokens: Maximum number of tokens to generate
49
+ stream: Whether to stream the response
50
+ temperature: Controls randomness (mapped to UncovrAI's temperature)
51
+ top_p: Controls diversity (not directly used by UncovrAI)
52
+ **kwargs: Additional parameters
53
+
54
+ Returns:
55
+ A ChatCompletion object or a generator of ChatCompletionChunk objects
56
+ """
57
+ # Validate model
58
+ if model not in self._client.AVAILABLE_MODELS:
59
+ raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
60
+
61
+ # Map temperature to UncovrAI's scale (0-100)
62
+ # Default to 32 (medium) if not provided
63
+ uncovr_temperature = 32
64
+ if temperature is not None:
65
+ # Map from 0-1 scale to 0-100 scale
66
+ uncovr_temperature = int(temperature * 100)
67
+ # Ensure it's within bounds
68
+ uncovr_temperature = max(0, min(100, uncovr_temperature))
69
+
70
+ # Map creativity from kwargs or use default
71
+ creativity = kwargs.get("creativity", "medium")
72
+
73
+ # Get focus and tools from kwargs or use defaults
74
+ selected_focus = kwargs.get("selected_focus", ["web"])
75
+ selected_tools = kwargs.get("selected_tools", ["quick-cards"])
76
+
77
+ # Generate request ID and timestamp
78
+ request_id = str(uuid.uuid4())
79
+ created_time = int(time.time())
80
+
81
+ # Format the conversation using utility functions
82
+ conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
83
+
84
+ # Prepare the request payload
85
+ payload = {
86
+ "content": conversation_prompt,
87
+ "chatId": self._client.chat_id,
88
+ "userMessageId": str(uuid.uuid4()),
89
+ "ai_config": {
90
+ "selectedFocus": selected_focus,
91
+ "selectedTools": selected_tools,
92
+ "agentId": "chat",
93
+ "modelId": model,
94
+ "temperature": uncovr_temperature,
95
+ "creativity": creativity
96
+ }
97
+ }
98
+
99
+ # Handle streaming response
100
+ if stream:
101
+ return self._handle_streaming_response(
102
+ payload=payload,
103
+ model=model,
104
+ request_id=request_id,
105
+ created_time=created_time
106
+ )
107
+
108
+ # Handle non-streaming response
109
+ return self._handle_non_streaming_response(
110
+ payload=payload,
111
+ model=model,
112
+ request_id=request_id,
113
+ created_time=created_time
114
+ )
115
+
116
+ def _handle_streaming_response(
117
+ self,
118
+ *,
119
+ payload: Dict[str, Any],
120
+ model: str,
121
+ request_id: str,
122
+ created_time: int
123
+ ) -> Generator[ChatCompletionChunk, None, None]:
124
+ """Handle streaming response from UncovrAI API."""
125
+ try:
126
+ with self._client.session.post(
127
+ self._client.url,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self._client.timeout
131
+ ) as response:
132
+ if response.status_code != 200:
133
+ # If we get a non-200 response, try refreshing our identity once
134
+ if response.status_code in [403, 429]:
135
+ self._client.refresh_identity()
136
+ # Retry with new identity
137
+ with self._client.session.post(
138
+ self._client.url,
139
+ json=payload,
140
+ stream=True,
141
+ timeout=self._client.timeout
142
+ ) as retry_response:
143
+ if not retry_response.ok:
144
+ raise IOError(
145
+ f"Failed to generate response after identity refresh - "
146
+ f"({retry_response.status_code}, {retry_response.reason}) - "
147
+ f"{retry_response.text}"
148
+ )
149
+ response = retry_response
150
+ else:
151
+ raise IOError(f"Request failed with status code {response.status_code}")
152
+
153
+ # Process the streaming response
154
+ streaming_text = ""
155
+ for line in response.iter_lines():
156
+ if line:
157
+ try:
158
+ line = line.decode('utf-8')
159
+
160
+ # Use regex to match content messages
161
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
162
+ if content_match: # Content message
163
+ content = content_match.group(1)
164
+ # Format the content to handle escape sequences
165
+ content = self._client.format_text(content)
166
+ streaming_text += content
167
+
168
+ # Create a chunk for this part of the response
169
+ delta = ChoiceDelta(content=content)
170
+ choice = Choice(
171
+ index=0,
172
+ delta=delta,
173
+ finish_reason=None
174
+ )
175
+ chunk = ChatCompletionChunk(
176
+ id=request_id,
177
+ choices=[choice],
178
+ created=created_time,
179
+ model=model
180
+ )
181
+
182
+ yield chunk
183
+
184
+ # Check for error messages
185
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
186
+ if error_match:
187
+ error_msg = error_match.group(1)
188
+ raise IOError(f"API Error: {error_msg}")
189
+
190
+ except (json.JSONDecodeError, UnicodeDecodeError):
191
+ continue
192
+
193
+ # Yield a final chunk with finish_reason="stop"
194
+ delta = ChoiceDelta()
195
+ choice = Choice(
196
+ index=0,
197
+ delta=delta,
198
+ finish_reason="stop"
199
+ )
200
+ chunk = ChatCompletionChunk(
201
+ id=request_id,
202
+ choices=[choice],
203
+ created=created_time,
204
+ model=model
205
+ )
206
+ yield chunk
207
+
208
+ except Exception as e:
209
+ print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
210
+ raise IOError(f"UncovrAI streaming request failed: {e}") from e
211
+
212
+ def _handle_non_streaming_response(
213
+ self,
214
+ *,
215
+ payload: Dict[str, Any],
216
+ model: str,
217
+ request_id: str,
218
+ created_time: int
219
+ ) -> ChatCompletion:
220
+ """Handle non-streaming response from UncovrAI API."""
221
+ try:
222
+ response = self._client.session.post(
223
+ self._client.url,
224
+ json=payload,
225
+ timeout=self._client.timeout
226
+ )
227
+
228
+ if response.status_code != 200:
229
+ if response.status_code in [403, 429]:
230
+ self._client.refresh_identity()
231
+ response = self._client.session.post(
232
+ self._client.url,
233
+ json=payload,
234
+ timeout=self._client.timeout
235
+ )
236
+ if not response.ok:
237
+ raise IOError(
238
+ f"Failed to generate response after identity refresh - "
239
+ f"({response.status_code}, {response.reason}) - "
240
+ f"{response.text}"
241
+ )
242
+ else:
243
+ raise IOError(f"Request failed with status code {response.status_code}")
244
+
245
+ full_response = ""
246
+ for line in response.iter_lines():
247
+ if line:
248
+ try:
249
+ line = line.decode('utf-8')
250
+ content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
251
+ if content_match:
252
+ content = content_match.group(1)
253
+ full_response += content
254
+
255
+ # Check for error messages
256
+ error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
257
+ if error_match:
258
+ error_msg = error_match.group(1)
259
+ raise IOError(f"API Error: {error_msg}")
260
+
261
+ except (json.JSONDecodeError, UnicodeDecodeError):
262
+ continue
263
+
264
+ # Format the full response to handle escape sequences
265
+ full_response = self._client.format_text(full_response)
266
+
267
+ # Create message, choice, and usage objects
268
+ message = ChatCompletionMessage(
269
+ role="assistant",
270
+ content=full_response
271
+ )
272
+
273
+ choice = Choice(
274
+ index=0,
275
+ message=message,
276
+ finish_reason="stop"
277
+ )
278
+
279
+ # Estimate token usage (this is approximate)
280
+ prompt_tokens = len(payload["content"]) // 4
281
+ completion_tokens = len(full_response) // 4
282
+ total_tokens = prompt_tokens + completion_tokens
283
+
284
+ usage = CompletionUsage(
285
+ prompt_tokens=prompt_tokens,
286
+ completion_tokens=completion_tokens,
287
+ total_tokens=total_tokens
288
+ )
289
+
290
+ # Create the completion object
291
+ completion = ChatCompletion(
292
+ id=request_id,
293
+ choices=[choice],
294
+ created=created_time,
295
+ model=model,
296
+ usage=usage,
297
+ )
298
+
299
+ return completion
300
+
301
+ except Exception as e:
302
+ print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
303
+ raise IOError(f"UncovrAI request failed: {e}") from e
304
+
305
+ class Chat(BaseChat):
306
+ def __init__(self, client: 'UncovrAI'):
307
+ self.completions = Completions(client)
308
+
309
+ class UncovrAI(OpenAICompatibleProvider):
310
+ """
311
+ OpenAI-compatible client for Uncovr AI API.
312
+
313
+ Usage:
314
+ client = UncovrAI()
315
+ response = client.chat.completions.create(
316
+ model="default",
317
+ messages=[{"role": "user", "content": "Hello!"}]
318
+ )
319
+ print(response.choices[0].message.content)
320
+ """
321
+
322
+ AVAILABLE_MODELS = [
323
+ "default",
324
+ "gpt-4o-mini",
325
+ "gemini-2-flash",
326
+ "gemini-2-flash-lite",
327
+ "groq-llama-3-1-8b",
328
+ "o3-mini",
329
+ "deepseek-r1-distill-qwen-32b",
330
+ # The following models are not available in the free plan:
331
+ # "claude-3-7-sonnet",
332
+ # "gpt-4o",
333
+ # "claude-3-5-sonnet-v2",
334
+ # "deepseek-r1-distill-llama-70b",
335
+ # "gemini-2-flash-lite-preview",
336
+ # "qwen-qwq-32b"
337
+ ]
338
+
339
+ def __init__(
340
+ self,
341
+ timeout: int = 30,
342
+ browser: str = "chrome",
343
+ chat_id: Optional[str] = None,
344
+ user_id: Optional[str] = None,
345
+ proxies: dict = {}
346
+ ):
347
+ """
348
+ Initialize the UncovrAI client.
349
+
350
+ Args:
351
+ timeout: Request timeout in seconds
352
+ browser: Browser name for LitAgent to generate fingerprint
353
+ chat_id: Optional chat ID (will generate one if not provided)
354
+ user_id: Optional user ID (will generate one if not provided)
355
+ proxies: Optional proxy configuration
356
+ """
357
+ self.url = "https://uncovr.app/api/workflows/chat"
358
+ self.timeout = timeout
359
+
360
+ # Initialize LitAgent for user agent generation
361
+ self.agent = LitAgent()
362
+
363
+ # Use fingerprinting to create a consistent browser identity
364
+ self.fingerprint = self.agent.generate_fingerprint(browser)
365
+
366
+ # Use the fingerprint for headers
367
+ self.headers = {
368
+ "Accept": self.fingerprint["accept"],
369
+ "Accept-Encoding": "gzip, deflate, br, zstd",
370
+ "Accept-Language": self.fingerprint["accept_language"],
371
+ "Content-Type": "application/json",
372
+ "Origin": "https://uncovr.app",
373
+ "Referer": "https://uncovr.app/",
374
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
375
+ "Sec-CH-UA-Mobile": "?0",
376
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
377
+ "User-Agent": self.fingerprint["user_agent"],
378
+ "Sec-Fetch-Dest": "empty",
379
+ "Sec-Fetch-Mode": "cors",
380
+ "Sec-Fetch-Site": "same-origin"
381
+ }
382
+
383
+ # Use cloudscraper to bypass Cloudflare protection
384
+ self.session = cloudscraper.create_scraper()
385
+ self.session.headers.update(self.headers)
386
+ self.session.proxies.update(proxies)
387
+
388
+ # Set chat and user IDs
389
+ self.chat_id = chat_id or str(uuid.uuid4())
390
+ self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
391
+
392
+ # Initialize chat interface
393
+ self.chat = Chat(self)
394
+
395
+ def refresh_identity(self, browser: str = None):
396
+ """
397
+ Refreshes the browser identity fingerprint.
398
+
399
+ Args:
400
+ browser: Specific browser to use for the new fingerprint
401
+ """
402
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
403
+ self.fingerprint = self.agent.generate_fingerprint(browser)
404
+
405
+ # Update headers with new fingerprint
406
+ self.headers.update({
407
+ "Accept": self.fingerprint["accept"],
408
+ "Accept-Language": self.fingerprint["accept_language"],
409
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
410
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
411
+ "User-Agent": self.fingerprint["user_agent"],
412
+ })
413
+
414
+ # Update session headers
415
+ for header, value in self.headers.items():
416
+ self.session.headers[header] = value
417
+
418
+ return self.fingerprint
419
+
420
+ def format_text(self, text: str) -> str:
421
+ """
422
+ Format text by replacing escaped newlines with actual newlines.
423
+
424
+ Args:
425
+ text: Text to format
426
+
427
+ Returns:
428
+ Formatted text
429
+ """
430
+ # Use a more comprehensive approach to handle all escape sequences
431
+ try:
432
+ # First handle double backslashes to avoid issues
433
+ text = text.replace('\\\\', '\\')
434
+
435
+ # Handle common escape sequences
436
+ text = text.replace('\\n', '\n')
437
+ text = text.replace('\\r', '\r')
438
+ text = text.replace('\\t', '\t')
439
+ text = text.replace('\\"', '"')
440
+ text = text.replace("\\'", "'")
441
+
442
+ # Handle any remaining escape sequences using JSON decoding
443
+ try:
444
+ # Add quotes to make it a valid JSON string
445
+ json_str = f'"{text}"'
446
+ # Use json module to decode all escape sequences
447
+ decoded = json.loads(json_str)
448
+ return decoded
449
+ except json.JSONDecodeError:
450
+ # If JSON decoding fails, return the text with the replacements we've already done
451
+ return text
452
+ except Exception as e:
453
+ # If any error occurs, return the original text
454
+ print(f"{RED}Warning: Error formatting text: {e}{RESET}")
455
+ return text
@@ -0,0 +1,211 @@
1
+ from typing import List, Dict, Optional, Any, Union
2
+ from dataclasses import dataclass, asdict, is_dataclass
3
+ from enum import Enum
4
+
5
+ # --- OpenAI Response Structure Mimics ---
6
+ # Moved here for reusability across different OpenAI-compatible providers
7
+
8
+ class ToolCallType(str, Enum):
9
+ """Type of tool call."""
10
+ FUNCTION = "function"
11
+
12
+ @dataclass
13
+ class BaseModel:
14
+ """Base class for all models."""
15
+ def to_dict(self) -> Dict[str, Any]:
16
+ """Convert the model to a dictionary."""
17
+ def _convert(obj: Any) -> Any:
18
+ if is_dataclass(obj):
19
+ return {k: _convert(v) for k, v in asdict(obj).items() if v is not None}
20
+ elif isinstance(obj, list):
21
+ return [_convert(item) for item in obj]
22
+ elif isinstance(obj, dict):
23
+ return {k: _convert(v) for k, v in obj.items() if v is not None}
24
+ elif isinstance(obj, Enum):
25
+ return obj.value
26
+ return obj
27
+ return _convert(self)
28
+
29
+ def __getitem__(self, key):
30
+ """Support dictionary-style access."""
31
+ if hasattr(self, key):
32
+ return getattr(self, key)
33
+ raise KeyError(f"{key} not found in {self.__class__.__name__}")
34
+
35
+ def get(self, key, default=None):
36
+ """Dictionary-style get method with default value."""
37
+ try:
38
+ return self[key]
39
+ except KeyError:
40
+ return default
41
+
42
+ def __contains__(self, key):
43
+ """Support 'in' operator."""
44
+ return hasattr(self, key)
45
+
46
+ @dataclass
47
+ class FunctionCall(BaseModel):
48
+ """Function call specification."""
49
+ name: str
50
+ arguments: str
51
+
52
+ @dataclass
53
+ class ToolFunction(BaseModel):
54
+ """Function specification in a tool."""
55
+ name: str
56
+ arguments: str
57
+
58
+ @dataclass
59
+ class ToolCall(BaseModel):
60
+ """Tool call specification."""
61
+ id: str
62
+ type: str
63
+ function: ToolFunction
64
+
65
+ @dataclass
66
+ class CompletionUsage(BaseModel):
67
+ """Token usage information."""
68
+ prompt_tokens: int
69
+ completion_tokens: int
70
+ total_tokens: int
71
+ prompt_tokens_details: Optional[Dict[str, Any]] = None
72
+
73
+ @dataclass
74
+ class ChoiceDelta(BaseModel):
75
+ """Delta content in streaming response."""
76
+ content: Optional[str] = None
77
+ function_call: Optional[FunctionCall] = None
78
+ role: Optional[str] = None
79
+ tool_calls: Optional[List[ToolCall]] = None
80
+
81
+ @dataclass
82
+ class ChatCompletionMessage(BaseModel):
83
+ """Chat message in completion response."""
84
+ role: str
85
+ content: Optional[str] = None
86
+ function_call: Optional[FunctionCall] = None
87
+ tool_calls: Optional[List[ToolCall]] = None
88
+
89
+ @dataclass
90
+ class Choice(BaseModel):
91
+ """Choice in completion response."""
92
+ index: int
93
+ message: Optional[ChatCompletionMessage] = None
94
+ delta: Optional[ChoiceDelta] = None
95
+ finish_reason: Optional[str] = None
96
+ logprobs: Optional[Dict[str, Any]] = None
97
+
98
+ @dataclass
99
+ class ChatCompletion(BaseModel):
100
+ """Chat completion response."""
101
+ id: str
102
+ created: int
103
+ model: str
104
+ choices: List[Choice]
105
+ object: str = "chat.completion"
106
+ system_fingerprint: Optional[str] = None
107
+ usage: Optional[CompletionUsage] = None
108
+
109
+ @dataclass
110
+ class ChatCompletionChunk(BaseModel):
111
+ """Streaming chat completion response chunk."""
112
+ id: str
113
+ created: int
114
+ model: str
115
+ choices: List[Choice]
116
+ object: str = "chat.completion.chunk"
117
+ system_fingerprint: Optional[str] = None
118
+
119
+
120
+ # --- Helper Functions ---
121
+
122
+ def format_prompt(messages: List[Dict[str, Any]], add_special_tokens: bool = False,
123
+ do_continue: bool = False, include_system: bool = True) -> str:
124
+ """
125
+ Format a series of messages into a single string, optionally adding special tokens.
126
+
127
+ Args:
128
+ messages: A list of message dictionaries, each containing 'role' and 'content'.
129
+ add_special_tokens: Whether to add special formatting tokens.
130
+ do_continue: If True, don't add the final "Assistant:" prompt.
131
+ include_system: Whether to include system messages in the formatted output.
132
+
133
+ Returns:
134
+ A formatted string containing all messages.
135
+ """
136
+ # Helper function to convert content to string
137
+ def to_string(value) -> str:
138
+ if isinstance(value, str):
139
+ return value
140
+ elif isinstance(value, dict):
141
+ if "text" in value:
142
+ return value.get("text", "")
143
+ return ""
144
+ elif isinstance(value, list):
145
+ return "".join([to_string(v) for v in value])
146
+ return str(value)
147
+
148
+ # If there's only one message and no special tokens needed, just return its content
149
+ if not add_special_tokens and len(messages) <= 1:
150
+ return to_string(messages[0]["content"])
151
+
152
+ # Filter and process messages
153
+ processed_messages = [
154
+ (message["role"], to_string(message["content"]))
155
+ for message in messages
156
+ if include_system or message.get("role") != "system"
157
+ ]
158
+
159
+ # Format each message as "Role: Content"
160
+ formatted = "\n".join([
161
+ f'{role.capitalize()}: {content}'
162
+ for role, content in processed_messages
163
+ if content.strip()
164
+ ])
165
+
166
+ # Add final prompt for assistant if needed
167
+ if do_continue:
168
+ return formatted
169
+
170
+ return f"{formatted}\nAssistant:"
171
+
172
+
173
+ def get_system_prompt(messages: List[Dict[str, Any]]) -> str:
174
+ """
175
+ Extract and concatenate all system messages.
176
+
177
+ Args:
178
+ messages: A list of message dictionaries.
179
+
180
+ Returns:
181
+ A string containing all system messages concatenated with newlines.
182
+ """
183
+ return "\n".join([m["content"] for m in messages if m["role"] == "system"])
184
+
185
+
186
+ def get_last_user_message(messages: List[Dict[str, Any]]) -> str:
187
+ """
188
+ Get the content of the last user message in the conversation.
189
+
190
+ Args:
191
+ messages: A list of message dictionaries.
192
+
193
+ Returns:
194
+ The content of the last user message as a string.
195
+ """
196
+ for message in reversed(messages):
197
+ if message["role"] == "user":
198
+ if isinstance(message["content"], str):
199
+ return message["content"]
200
+ # Handle complex content structures
201
+ if isinstance(message["content"], dict) and "text" in message["content"]:
202
+ return message["content"]["text"]
203
+ if isinstance(message["content"], list):
204
+ text_parts = []
205
+ for part in message["content"]:
206
+ if isinstance(part, dict) and part.get("type") == "text":
207
+ text_parts.append(part.get("text", ""))
208
+ elif isinstance(part, str):
209
+ text_parts.append(part)
210
+ return "".join(text_parts)
211
+ return ""