lollms-client 0.20.3__py3-none-any.whl → 0.20.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/gradio_chat_app.py +228 -0
- examples/internet_search_with_rag.py +1 -2
- examples/run_remote_mcp_example copy.py +226 -0
- lollms_client/__init__.py +2 -2
- lollms_client/llm_bindings/llamacpp/__init__.py +104 -0
- lollms_client/llm_bindings/lollms/__init__.py +102 -1
- lollms_client/llm_bindings/ollama/__init__.py +99 -0
- lollms_client/llm_bindings/openai/__init__.py +109 -0
- lollms_client/lollms_core.py +60 -0
- lollms_client/lollms_discussion.py +478 -33
- lollms_client/lollms_llm_binding.py +43 -0
- lollms_client/mcp_bindings/remote_mcp/__init__.py +233 -132
- {lollms_client-0.20.3.dist-info → lollms_client-0.20.6.dist-info}/METADATA +1 -1
- {lollms_client-0.20.3.dist-info → lollms_client-0.20.6.dist-info}/RECORD +17 -15
- {lollms_client-0.20.3.dist-info → lollms_client-0.20.6.dist-info}/WHEEL +0 -0
- {lollms_client-0.20.3.dist-info → lollms_client-0.20.6.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.20.3.dist-info → lollms_client-0.20.6.dist-info}/top_level.txt +0 -0
|
@@ -1,54 +1,499 @@
|
|
|
1
1
|
import yaml
|
|
2
|
-
from lollms_client.lollms_core import LollmsClient
|
|
3
2
|
from dataclasses import dataclass, field
|
|
4
|
-
from typing import List, Dict
|
|
3
|
+
from typing import List, Dict, Optional, Union
|
|
5
4
|
import uuid
|
|
6
5
|
import os
|
|
6
|
+
from collections import defaultdict
|
|
7
7
|
|
|
8
|
-
# LollmsMessage Class
|
|
8
|
+
# LollmsMessage Class with parent_id support
|
|
9
9
|
@dataclass
|
|
10
10
|
class LollmsMessage:
|
|
11
11
|
sender: str
|
|
12
12
|
content: str
|
|
13
13
|
id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
|
14
|
+
parent_id: Optional[str] = None
|
|
14
15
|
metadata: str = "{}"
|
|
16
|
+
images: List[Dict[str, str]] = field(default_factory=list)
|
|
17
|
+
|
|
15
18
|
def to_dict(self):
|
|
16
|
-
return {
|
|
19
|
+
return {
|
|
20
|
+
'sender': self.sender,
|
|
21
|
+
'content': self.content,
|
|
22
|
+
'id': self.id,
|
|
23
|
+
'parent_id': self.parent_id,
|
|
24
|
+
'metadata': self.metadata,
|
|
25
|
+
'images': self.images
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
17
29
|
|
|
18
|
-
# LollmsDiscussion Class
|
|
30
|
+
# Enhanced LollmsDiscussion Class with branching support
|
|
19
31
|
class LollmsDiscussion:
|
|
20
|
-
def __init__(self, lollmsClient:LollmsClient):
|
|
21
|
-
self.messages:List[LollmsMessage] = []
|
|
32
|
+
def __init__(self, lollmsClient: 'LollmsClient'):
|
|
33
|
+
self.messages: List[LollmsMessage] = []
|
|
22
34
|
self.lollmsClient = lollmsClient
|
|
35
|
+
self.active_branch_id: Optional[str] = None
|
|
36
|
+
self.message_index: Dict[str, LollmsMessage] = {}
|
|
37
|
+
self.children_index: Dict[Optional[str], List[str]] = defaultdict(list)
|
|
38
|
+
self.version: int = 2 # Current version of the format
|
|
39
|
+
self.participants: Dict[str, str] = {} # name -> type ("user" or "assistant")
|
|
40
|
+
self.system_prompt: Optional[str] = None
|
|
41
|
+
|
|
42
|
+
def set_system_prompt(self, prompt: str):
|
|
43
|
+
self.system_prompt = prompt
|
|
44
|
+
|
|
45
|
+
def set_participants(self, participants: Dict[str, str]):
|
|
46
|
+
for name, role in participants.items():
|
|
47
|
+
if role not in ["user", "assistant"]:
|
|
48
|
+
raise ValueError(f"Invalid role '{role}' for participant '{name}'")
|
|
49
|
+
self.participants = participants
|
|
50
|
+
|
|
51
|
+
def add_message(
|
|
52
|
+
self,
|
|
53
|
+
sender: str,
|
|
54
|
+
content: str,
|
|
55
|
+
metadata: Dict = {},
|
|
56
|
+
parent_id: Optional[str] = None,
|
|
57
|
+
images: Optional[List[Dict[str, str]]] = None,
|
|
58
|
+
override_id: Optional[str] = None
|
|
59
|
+
) -> str:
|
|
60
|
+
if parent_id is None:
|
|
61
|
+
parent_id = self.active_branch_id
|
|
62
|
+
if parent_id is None:
|
|
63
|
+
parent_id = "main"
|
|
64
|
+
|
|
65
|
+
message = LollmsMessage(
|
|
66
|
+
sender=sender,
|
|
67
|
+
content=content,
|
|
68
|
+
parent_id=parent_id,
|
|
69
|
+
metadata=str(metadata),
|
|
70
|
+
images=images or []
|
|
71
|
+
)
|
|
72
|
+
if override_id:
|
|
73
|
+
message.id = override_id
|
|
23
74
|
|
|
24
|
-
def add_message(self, sender, content, metadata={}):
|
|
25
|
-
message = LollmsMessage(sender, content, str(metadata))
|
|
26
75
|
self.messages.append(message)
|
|
76
|
+
self.message_index[message.id] = message
|
|
77
|
+
self.children_index[parent_id].append(message.id)
|
|
78
|
+
|
|
79
|
+
self.active_branch_id = message.id
|
|
80
|
+
return message.id
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def get_branch(self, leaf_id: str) -> List[LollmsMessage]:
|
|
84
|
+
"""Get full branch from root to specified leaf"""
|
|
85
|
+
branch = []
|
|
86
|
+
current_id = leaf_id
|
|
87
|
+
|
|
88
|
+
while current_id in self.message_index:
|
|
89
|
+
msg = self.message_index[current_id]
|
|
90
|
+
branch.append(msg)
|
|
91
|
+
current_id = msg.parent_id
|
|
92
|
+
|
|
93
|
+
# Return from root to leaf
|
|
94
|
+
return list(reversed(branch))
|
|
95
|
+
|
|
96
|
+
def set_active_branch(self, message_id: str):
|
|
97
|
+
if message_id not in self.message_index:
|
|
98
|
+
raise ValueError(f"Message ID {message_id} not found")
|
|
99
|
+
self.active_branch_id = message_id
|
|
100
|
+
|
|
101
|
+
def remove_message(self, message_id: str):
|
|
102
|
+
if message_id not in self.message_index:
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
msg = self.message_index[message_id]
|
|
106
|
+
parent_id = msg.parent_id
|
|
107
|
+
|
|
108
|
+
# Reassign children to parent
|
|
109
|
+
for child_id in self.children_index[message_id]:
|
|
110
|
+
child = self.message_index[child_id]
|
|
111
|
+
child.parent_id = parent_id
|
|
112
|
+
self.children_index[parent_id].append(child_id)
|
|
113
|
+
|
|
114
|
+
# Clean up indexes
|
|
115
|
+
del self.message_index[message_id]
|
|
116
|
+
del self.children_index[message_id]
|
|
117
|
+
|
|
118
|
+
# Remove from parent's children list
|
|
119
|
+
if parent_id in self.children_index and message_id in self.children_index[parent_id]:
|
|
120
|
+
self.children_index[parent_id].remove(message_id)
|
|
121
|
+
|
|
122
|
+
# Remove from main messages list
|
|
123
|
+
self.messages = [m for m in self.messages if m.id != message_id]
|
|
124
|
+
|
|
125
|
+
# Update active branch if needed
|
|
126
|
+
if self.active_branch_id == message_id:
|
|
127
|
+
self.active_branch_id = parent_id
|
|
128
|
+
|
|
129
|
+
def save_to_disk(self, file_path: str):
|
|
130
|
+
data = {
|
|
131
|
+
'version': self.version,
|
|
132
|
+
'active_branch_id': self.active_branch_id,
|
|
133
|
+
'system_prompt': self.system_prompt,
|
|
134
|
+
'participants': self.participants,
|
|
135
|
+
'messages': [m.to_dict() for m in self.messages]
|
|
136
|
+
}
|
|
137
|
+
with open(file_path, 'w', encoding='utf-8') as file:
|
|
138
|
+
yaml.dump(data, file, allow_unicode=True)
|
|
139
|
+
|
|
27
140
|
|
|
28
|
-
def
|
|
29
|
-
with open(file_path, '
|
|
30
|
-
|
|
31
|
-
yaml.dump(yaml_data, file)
|
|
141
|
+
def load_from_disk(self, file_path: str):
|
|
142
|
+
with open(file_path, 'r', encoding='utf-8') as file:
|
|
143
|
+
data = yaml.safe_load(file)
|
|
32
144
|
|
|
145
|
+
# Reset
|
|
146
|
+
self.messages = []
|
|
147
|
+
self.message_index = {}
|
|
148
|
+
self.children_index = defaultdict(list)
|
|
33
149
|
|
|
34
|
-
|
|
150
|
+
if isinstance(data, list):
|
|
151
|
+
# Legacy v1 format
|
|
152
|
+
prev_id = None
|
|
153
|
+
for msg_data in data:
|
|
154
|
+
msg = LollmsMessage(
|
|
155
|
+
sender=msg_data['sender'],
|
|
156
|
+
content=msg_data['content'],
|
|
157
|
+
parent_id=prev_id,
|
|
158
|
+
id=msg_data.get('id', str(uuid.uuid4())),
|
|
159
|
+
metadata=msg_data.get('metadata', '{}')
|
|
160
|
+
)
|
|
161
|
+
self.messages.append(msg)
|
|
162
|
+
self.message_index[msg.id] = msg
|
|
163
|
+
self.children_index[prev_id].append(msg.id)
|
|
164
|
+
prev_id = msg.id
|
|
165
|
+
self.active_branch_id = prev_id if self.messages else None
|
|
166
|
+
self.system_prompt = None
|
|
167
|
+
self.participants = {}
|
|
168
|
+
self.save_to_disk(file_path) # Upgrade
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
# v2 format
|
|
172
|
+
version = data.get("version", 1)
|
|
173
|
+
if version != self.version:
|
|
174
|
+
raise ValueError(f"Unsupported version: {version}")
|
|
175
|
+
|
|
176
|
+
self.active_branch_id = data.get('active_branch_id')
|
|
177
|
+
self.system_prompt = data.get('system_prompt', None)
|
|
178
|
+
self.participants = data.get('participants', {})
|
|
179
|
+
|
|
180
|
+
for msg_data in data.get('messages', []):
|
|
181
|
+
# FIXED: Added `images=msg_data.get('images', [])` to correctly load images from the file.
|
|
182
|
+
msg = LollmsMessage(
|
|
183
|
+
sender=msg_data['sender'],
|
|
184
|
+
content=msg_data['content'],
|
|
185
|
+
parent_id=msg_data.get('parent_id'),
|
|
186
|
+
id=msg_data.get('id'),
|
|
187
|
+
metadata=msg_data.get('metadata', '{}'),
|
|
188
|
+
images=msg_data.get('images', [])
|
|
189
|
+
)
|
|
190
|
+
self.messages.append(msg)
|
|
191
|
+
self.message_index[msg.id] = msg
|
|
192
|
+
self.children_index[msg.parent_id].append(msg.id)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def format_discussion(self, max_allowed_tokens: int, splitter_text: str = "!@>", branch_tip_id: Optional[str] = None) -> str:
|
|
196
|
+
if branch_tip_id is None:
|
|
197
|
+
branch_tip_id = self.active_branch_id
|
|
198
|
+
|
|
199
|
+
branch_msgs = self.get_branch(branch_tip_id) if branch_tip_id else []
|
|
35
200
|
formatted_text = ""
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
201
|
+
current_tokens = 0
|
|
202
|
+
|
|
203
|
+
# Start with system prompt if defined
|
|
204
|
+
if self.system_prompt:
|
|
205
|
+
sys_msg = f"!>system:\n{self.system_prompt.strip()}\n"
|
|
206
|
+
sys_tokens = len(self.lollmsClient.tokenize(sys_msg))
|
|
207
|
+
if max_allowed_tokens and current_tokens + sys_tokens <= max_allowed_tokens:
|
|
208
|
+
formatted_text += sys_msg
|
|
209
|
+
current_tokens += sys_tokens
|
|
210
|
+
|
|
211
|
+
for msg in reversed(branch_msgs):
|
|
212
|
+
content = msg.content.strip()
|
|
213
|
+
# FIXED: Add a placeholder for images to represent them in text-only formats.
|
|
214
|
+
if msg.images:
|
|
215
|
+
content += f"\n({len(msg.images)} image(s) attached)"
|
|
216
|
+
|
|
217
|
+
msg_text = f"{splitter_text}{msg.sender.replace(':', '').replace('!@>', '')}:\n{content}\n"
|
|
218
|
+
msg_tokens = len(self.lollmsClient.tokenize(msg_text))
|
|
219
|
+
if current_tokens + msg_tokens > max_allowed_tokens:
|
|
220
|
+
break
|
|
221
|
+
formatted_text = msg_text + formatted_text
|
|
222
|
+
current_tokens += msg_tokens
|
|
223
|
+
|
|
224
|
+
return formatted_text.strip()
|
|
225
|
+
|
|
226
|
+
# gradio helpers -------------------------
|
|
227
|
+
def get_branch_as_chatbot_history(self, branch_tip_id: Optional[str] = None) -> List[List[str]]:
|
|
228
|
+
"""
|
|
229
|
+
Converts a discussion branch into Gradio's chatbot list format.
|
|
230
|
+
[[user_msg, ai_reply], [user_msg, ai_reply], ...]
|
|
231
|
+
"""
|
|
232
|
+
if branch_tip_id is None:
|
|
233
|
+
branch_tip_id = self.active_branch_id
|
|
234
|
+
if not branch_tip_id:
|
|
235
|
+
return []
|
|
236
|
+
|
|
237
|
+
branch = self.get_branch(branch_tip_id)
|
|
238
|
+
history = []
|
|
239
|
+
for msg in branch:
|
|
240
|
+
# Determine the role from participants, default to 'user'
|
|
241
|
+
role = self.participants.get(msg.sender, "user")
|
|
242
|
+
|
|
243
|
+
if role == "user":
|
|
244
|
+
history.append([msg.content, None])
|
|
245
|
+
else: # assistant
|
|
246
|
+
# If the last user message has no reply yet, append to it
|
|
247
|
+
if history and history[-1][1] is None:
|
|
248
|
+
history[-1][1] = msg.content
|
|
249
|
+
else: # Standalone assistant message (e.g., the first message)
|
|
250
|
+
history.append([None, msg.content])
|
|
251
|
+
return history
|
|
252
|
+
|
|
253
|
+
def render_discussion_tree(self, active_branch_highlight: bool = True) -> str:
|
|
254
|
+
"""
|
|
255
|
+
Renders the entire discussion tree as formatted Markdown for display.
|
|
256
|
+
"""
|
|
257
|
+
if not self.messages:
|
|
258
|
+
return "No messages yet."
|
|
259
|
+
|
|
260
|
+
tree_markdown = "### Discussion Tree\n\n"
|
|
261
|
+
tree_markdown += "Click a message in the dropdown to switch branches.\n\n"
|
|
262
|
+
|
|
263
|
+
# Find root nodes (messages with no parent)
|
|
264
|
+
root_ids = [msg.id for msg in self.messages if msg.parent_id is None]
|
|
265
|
+
|
|
266
|
+
# Recursive function to render a node and its children
|
|
267
|
+
def _render_node(node_id: str, depth: int) -> str:
|
|
268
|
+
node = self.message_index.get(node_id)
|
|
269
|
+
if not node:
|
|
270
|
+
return ""
|
|
271
|
+
|
|
272
|
+
indent = " " * depth
|
|
273
|
+
# Highlight the active message
|
|
274
|
+
is_active = ""
|
|
275
|
+
if active_branch_highlight and node.id == self.active_branch_id:
|
|
276
|
+
is_active = " <span class='activ'>[ACTIVE]</span>"
|
|
277
|
+
|
|
278
|
+
# Format the message line
|
|
279
|
+
prefix = f"{indent}- **{node.sender}**: "
|
|
280
|
+
content_preview = node.content.replace('\n', ' ').strip()[:80]
|
|
281
|
+
line = f"{prefix} _{content_preview}..._{is_active}\n"
|
|
282
|
+
|
|
283
|
+
# Recursively render children
|
|
284
|
+
children_ids = self.children_index.get(node.id, [])
|
|
285
|
+
for child_id in children_ids:
|
|
286
|
+
line += _render_node(child_id, depth + 1)
|
|
287
|
+
|
|
288
|
+
return line
|
|
289
|
+
|
|
290
|
+
for root_id in root_ids:
|
|
291
|
+
tree_markdown += _render_node(root_id, 0)
|
|
292
|
+
|
|
293
|
+
return tree_markdown
|
|
294
|
+
|
|
295
|
+
def get_message_choices(self) -> List[tuple]:
|
|
296
|
+
"""
|
|
297
|
+
Creates a list of (label, id) tuples for a Gradio Dropdown component.
|
|
298
|
+
"""
|
|
299
|
+
choices = [(f"{msg.sender}: {msg.content[:40]}... (ID: ...{msg.id[-4:]})", msg.id) for msg in self.messages]
|
|
300
|
+
# Sort by message creation order (assuming self.messages is ordered)
|
|
301
|
+
return choices
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def export(self, format_type: str, branch_tip_id: Optional[str] = None) -> Union[List[Dict], str]:
|
|
305
|
+
"""
|
|
306
|
+
Exports the discussion history in a specific format suitable for different model APIs.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
format_type (str): The target format. Supported values are:
|
|
310
|
+
- "openai_chat": For OpenAI, llama.cpp, and other compatible chat APIs.
|
|
311
|
+
- "ollama_chat": For Ollama's chat API.
|
|
312
|
+
- "lollms_text": For the native lollms-webui text/image endpoints.
|
|
313
|
+
- "openai_completion": For legacy text completion APIs.
|
|
314
|
+
branch_tip_id (Optional[str]): The ID of the message to use as the
|
|
315
|
+
tip of the conversation branch. Defaults to the active branch.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
Union[List[Dict], str]: The formatted conversation history, either as a
|
|
319
|
+
list of dictionaries (for chat formats) or a single string.
|
|
320
|
+
"""
|
|
321
|
+
if branch_tip_id is None:
|
|
322
|
+
branch_tip_id = self.active_branch_id
|
|
323
|
+
|
|
324
|
+
# Handle case of an empty or uninitialized discussion
|
|
325
|
+
if branch_tip_id is None:
|
|
326
|
+
return "" if format_type in ["lollms_text", "openai_completion"] else []
|
|
327
|
+
|
|
328
|
+
branch = self.get_branch(branch_tip_id)
|
|
329
|
+
|
|
330
|
+
# --------------------- OpenAI Chat Format ---------------------
|
|
331
|
+
# Used by: OpenAI API, llama.cpp server, and many other compatible services.
|
|
332
|
+
# Structure: List of dictionaries with 'role' and 'content'.
|
|
333
|
+
# Images are handled via multi-part 'content'.
|
|
334
|
+
# --------------------------------------------------------------
|
|
335
|
+
if format_type == "openai_chat":
|
|
336
|
+
messages = []
|
|
337
|
+
if self.system_prompt:
|
|
338
|
+
messages.append({"role": "system", "content": self.system_prompt.strip()})
|
|
339
|
+
|
|
340
|
+
def openai_image_block(image: Dict[str, str]) -> Dict:
|
|
341
|
+
"""Creates a dict for an image URL, either from a URL or base64 data."""
|
|
342
|
+
image_url = image['data'] if image['type'] == 'url' else f"data:image/jpeg;base64,{image['data']}"
|
|
343
|
+
return {"type": "image_url", "image_url": {"url": image_url, "detail": "auto"}}
|
|
344
|
+
|
|
345
|
+
for msg in branch:
|
|
346
|
+
role = self.participants.get(msg.sender, "user")
|
|
347
|
+
if msg.images:
|
|
348
|
+
content_parts = []
|
|
349
|
+
if msg.content.strip(): # Add text part only if content exists
|
|
350
|
+
content_parts.append({"type": "text", "text": msg.content.strip()})
|
|
351
|
+
content_parts.extend(openai_image_block(img) for img in msg.images)
|
|
352
|
+
messages.append({"role": role, "content": content_parts})
|
|
353
|
+
else:
|
|
354
|
+
messages.append({"role": role, "content": msg.content.strip()})
|
|
355
|
+
return messages
|
|
356
|
+
|
|
357
|
+
# --------------------- Ollama Chat Format ---------------------
|
|
358
|
+
# Used by: Ollama's '/api/chat' endpoint.
|
|
359
|
+
# Structure: List of dictionaries with 'role', 'content', and an optional 'images' key.
|
|
360
|
+
# Images must be a list of base64-encoded strings. URLs are ignored.
|
|
361
|
+
# --------------------------------------------------------------
|
|
362
|
+
elif format_type == "ollama_chat":
|
|
363
|
+
messages = []
|
|
364
|
+
if self.system_prompt:
|
|
365
|
+
messages.append({"role": "system", "content": self.system_prompt.strip()})
|
|
366
|
+
|
|
367
|
+
for msg in branch:
|
|
368
|
+
role = self.participants.get(msg.sender, "user")
|
|
369
|
+
message_dict = {"role": role, "content": msg.content.strip()}
|
|
370
|
+
|
|
371
|
+
# Filter for and add base64 images, as required by Ollama
|
|
372
|
+
ollama_images = [img['data'] for img in msg.images if img['type'] == 'base64']
|
|
373
|
+
if ollama_images:
|
|
374
|
+
message_dict["images"] = ollama_images
|
|
375
|
+
|
|
376
|
+
messages.append(message_dict)
|
|
377
|
+
return messages
|
|
378
|
+
|
|
379
|
+
# --------------------- LoLLMs Native Text Format ---------------------
|
|
380
|
+
# Used by: lollms-webui's '/lollms_generate' and '/lollms_generate_with_images' endpoints.
|
|
381
|
+
# Structure: A single string with messages separated by special tokens like '!@>user:'.
|
|
382
|
+
# Images are not part of the string but are sent separately by the binding.
|
|
383
|
+
# --------------------------------------------------------------------
|
|
384
|
+
elif format_type == "lollms_text":
|
|
385
|
+
full_prompt_parts = []
|
|
386
|
+
if self.system_prompt:
|
|
387
|
+
full_prompt_parts.append(f"!@>system:\n{self.system_prompt.strip()}")
|
|
388
|
+
|
|
389
|
+
for msg in branch:
|
|
390
|
+
sender_str = msg.sender.replace(':', '').replace('!@>', '')
|
|
391
|
+
content = msg.content.strip()
|
|
392
|
+
# Images are handled separately by the binding, but a placeholder can be useful for context
|
|
393
|
+
if msg.images:
|
|
394
|
+
content += f"\n({len(msg.images)} image(s) attached)"
|
|
395
|
+
full_prompt_parts.append(f"!@>{sender_str}:\n{content}")
|
|
396
|
+
|
|
397
|
+
return "\n".join(full_prompt_parts)
|
|
398
|
+
|
|
399
|
+
# ------------------ Legacy OpenAI Completion Format ------------------
|
|
400
|
+
# Used by: Older text-completion models.
|
|
401
|
+
# Structure: A single string with human-readable roles (e.g., "User:", "Assistant:").
|
|
402
|
+
# Images are represented by a text placeholder.
|
|
403
|
+
# ----------------------------------------------------------------------
|
|
404
|
+
elif format_type == "openai_completion":
|
|
405
|
+
full_prompt_parts = []
|
|
406
|
+
if self.system_prompt:
|
|
407
|
+
full_prompt_parts.append(f"System:\n{self.system_prompt.strip()}")
|
|
408
|
+
|
|
409
|
+
for msg in branch:
|
|
410
|
+
role_label = self.participants.get(msg.sender, "user").capitalize()
|
|
411
|
+
content = msg.content.strip()
|
|
412
|
+
if msg.images:
|
|
413
|
+
content += f"\n({len(msg.images)} image(s) attached)"
|
|
414
|
+
full_prompt_parts.append(f"{role_label}:\n{content}")
|
|
415
|
+
|
|
416
|
+
return "\n\n".join(full_prompt_parts)
|
|
417
|
+
|
|
418
|
+
else:
|
|
419
|
+
raise ValueError(f"Unsupported export format_type: {format_type}")
|
|
420
|
+
# Example usage
|
|
421
|
+
if __name__ == "__main__":
|
|
422
|
+
import base64
|
|
423
|
+
|
|
424
|
+
# 🔧 Mock client for token counting
|
|
425
|
+
from lollms_client import LollmsClient
|
|
426
|
+
client = LollmsClient(binding_name="ollama",model_name="mistral:latest")
|
|
427
|
+
discussion = LollmsDiscussion(client)
|
|
428
|
+
|
|
429
|
+
# 👥 Set participants
|
|
430
|
+
discussion.set_participants({
|
|
431
|
+
"Alice": "user",
|
|
432
|
+
"Bob": "assistant"
|
|
433
|
+
})
|
|
434
|
+
|
|
435
|
+
# 📝 Set a system prompt
|
|
436
|
+
discussion.set_system_prompt("You are a helpful and friendly assistant.")
|
|
437
|
+
|
|
438
|
+
# 📩 Add root message
|
|
439
|
+
msg1 = discussion.add_message('Alice', 'Hello!')
|
|
440
|
+
|
|
441
|
+
# 📩 Add reply
|
|
442
|
+
msg2 = discussion.add_message('Bob', 'Hi there!')
|
|
443
|
+
|
|
444
|
+
# 🌿 Branch from msg1 with an image
|
|
445
|
+
msg3 = discussion.add_message(
|
|
446
|
+
'Alice',
|
|
447
|
+
'Here is an image of my dog.',
|
|
448
|
+
parent_id=msg1,
|
|
449
|
+
images=[{"type": "url", "data": "https://example.com/alices_dog.jpg"}]
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
# 🖼️ FIXED: Add another message with images using the 'images' parameter directly.
|
|
453
|
+
sample_base64 = base64.b64encode(b'This is a test image of a cat').decode('utf-8')
|
|
454
|
+
msg4 = discussion.add_message(
|
|
455
|
+
'Bob',
|
|
456
|
+
"Nice! Here's my cat.",
|
|
457
|
+
parent_id=msg3,
|
|
458
|
+
images=[
|
|
459
|
+
{"type": "url", "data": "https://example.com/bobs_cat.jpg"},
|
|
460
|
+
{"type": "base64", "data": sample_base64}
|
|
461
|
+
]
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
# 🌿 Switch to the new branch
|
|
465
|
+
discussion.set_active_branch(msg4)
|
|
466
|
+
|
|
467
|
+
# 📁 Save and load discussion
|
|
468
|
+
discussion.save_to_disk("test_discussion.yaml")
|
|
469
|
+
|
|
470
|
+
print("\n💾 Discussion saved to test_discussion.yaml")
|
|
471
|
+
|
|
472
|
+
new_discussion = LollmsDiscussion(client)
|
|
473
|
+
new_discussion.load_from_disk("test_discussion.yaml")
|
|
474
|
+
# Participants must be set again as they are part of the runtime configuration
|
|
475
|
+
# but the loader now correctly loads them from the file.
|
|
476
|
+
print("📂 Discussion loaded from test_discussion.yaml")
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
# 🧾 Format the discussion
|
|
480
|
+
formatted = new_discussion.format_discussion(1000)
|
|
481
|
+
print("\n📜 Formatted discussion (text-only with placeholders):\n", formatted)
|
|
482
|
+
|
|
483
|
+
# 🔁 Export to OpenAI Chat format
|
|
484
|
+
openai_chat = new_discussion.export("openai_chat")
|
|
485
|
+
print("\n📦 OpenAI Chat format:\n", yaml.dump(openai_chat, allow_unicode=True, sort_keys=False))
|
|
486
|
+
|
|
487
|
+
# 🔁 Export to OpenAI Completion format
|
|
488
|
+
openai_completion = new_discussion.export("openai_completion")
|
|
489
|
+
print("\n📜 OpenAI Completion format:\n", openai_completion)
|
|
490
|
+
|
|
491
|
+
# 🔁 Export to Ollama Chat format
|
|
492
|
+
ollama_export = new_discussion.export("ollama_chat")
|
|
493
|
+
print("\n🤖 Ollama Chat format:\n", yaml.dump(ollama_export, allow_unicode=True, sort_keys=False))
|
|
494
|
+
|
|
495
|
+
# Test that images were loaded correctly
|
|
496
|
+
final_message = new_discussion.message_index[new_discussion.active_branch_id]
|
|
497
|
+
assert len(final_message.images) == 2
|
|
498
|
+
assert final_message.images[1]['type'] == 'base64'
|
|
499
|
+
print("\n✅ Verification successful: Images were loaded correctly from the file.")
|
|
@@ -9,6 +9,7 @@ from pathlib import Path
|
|
|
9
9
|
from typing import Optional
|
|
10
10
|
from ascii_colors import trace_exception
|
|
11
11
|
from lollms_client.lollms_types import MSG_TYPE
|
|
12
|
+
from lollms_client.lollms_discussion import LollmsDiscussion
|
|
12
13
|
import re
|
|
13
14
|
class LollmsLLMBinding(ABC):
|
|
14
15
|
"""Abstract base class for all LOLLMS LLM bindings"""
|
|
@@ -73,6 +74,48 @@ class LollmsLLMBinding(ABC):
|
|
|
73
74
|
"""
|
|
74
75
|
pass
|
|
75
76
|
|
|
77
|
+
@abstractmethod
|
|
78
|
+
def chat(self,
|
|
79
|
+
discussion: LollmsDiscussion,
|
|
80
|
+
branch_tip_id: Optional[str] = None,
|
|
81
|
+
n_predict: Optional[int] = None,
|
|
82
|
+
stream: Optional[bool] = None,
|
|
83
|
+
temperature: Optional[float] = None,
|
|
84
|
+
top_k: Optional[int] = None,
|
|
85
|
+
top_p: Optional[float] = None,
|
|
86
|
+
repeat_penalty: Optional[float] = None,
|
|
87
|
+
repeat_last_n: Optional[int] = None,
|
|
88
|
+
seed: Optional[int] = None,
|
|
89
|
+
n_threads: Optional[int] = None,
|
|
90
|
+
ctx_size: Optional[int] = None,
|
|
91
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
|
|
92
|
+
) -> Union[str, dict]:
|
|
93
|
+
"""
|
|
94
|
+
A method to conduct a chat session with the model using a LollmsDiscussion object.
|
|
95
|
+
This method is responsible for formatting the discussion into the specific
|
|
96
|
+
format required by the model's API and then calling the generation endpoint.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
discussion (LollmsDiscussion): The discussion object containing the conversation history.
|
|
100
|
+
branch_tip_id (Optional[str]): The ID of the message to use as the tip of the conversation branch. Defaults to the active branch.
|
|
101
|
+
n_predict (Optional[int]): Maximum number of tokens to generate.
|
|
102
|
+
stream (Optional[bool]): Whether to stream the output.
|
|
103
|
+
temperature (Optional[float]): Sampling temperature.
|
|
104
|
+
top_k (Optional[int]): Top-k sampling parameter.
|
|
105
|
+
top_p (Optional[float]): Top-p sampling parameter.
|
|
106
|
+
repeat_penalty (Optional[float]): Penalty for repeated tokens.
|
|
107
|
+
repeat_last_n (Optional[int]): Number of previous tokens to consider for repeat penalty.
|
|
108
|
+
seed (Optional[int]): Random seed for generation.
|
|
109
|
+
n_threads (Optional[int]): Number of threads to use.
|
|
110
|
+
ctx_size (Optional[int]): Context size override for this generation.
|
|
111
|
+
streaming_callback (Optional[Callable[[str, MSG_TYPE], None]]): Callback for streaming output.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Union[str, dict]: The generated text or an error dictionary.
|
|
115
|
+
"""
|
|
116
|
+
pass
|
|
117
|
+
|
|
118
|
+
|
|
76
119
|
@abstractmethod
|
|
77
120
|
def tokenize(self, text: str) -> list:
|
|
78
121
|
"""
|