lollms-client 0.20.3__py3-none-any.whl → 0.20.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -1,54 +1,494 @@
1
1
  import yaml
2
- from lollms_client.lollms_core import LollmsClient
3
2
  from dataclasses import dataclass, field
4
- from typing import List, Dict
3
+ from typing import List, Dict, Optional, Union
5
4
  import uuid
6
5
  import os
6
+ from collections import defaultdict
7
7
 
8
- # LollmsMessage Class
8
+ # LollmsMessage Class with parent_id support
9
9
  @dataclass
10
10
  class LollmsMessage:
11
11
  sender: str
12
12
  content: str
13
13
  id: str = field(default_factory=lambda: str(uuid.uuid4()))
14
+ parent_id: Optional[str] = None
14
15
  metadata: str = "{}"
16
+ images: List[Dict[str, str]] = field(default_factory=list)
17
+
15
18
  def to_dict(self):
16
- return {'sender': self.sender, 'content': self.content, 'metadata': self.metadata, 'id': self.id}
19
+ return {
20
+ 'sender': self.sender,
21
+ 'content': self.content,
22
+ 'id': self.id,
23
+ 'parent_id': self.parent_id,
24
+ 'metadata': self.metadata,
25
+ 'images': self.images
26
+ }
27
+
28
+
17
29
 
18
- # LollmsDiscussion Class
30
+ # Enhanced LollmsDiscussion Class with branching support
19
31
  class LollmsDiscussion:
20
- def __init__(self, lollmsClient:LollmsClient):
21
- self.messages:List[LollmsMessage] = []
32
+ def __init__(self, lollmsClient: 'LollmsClient'):
33
+ self.messages: List[LollmsMessage] = []
22
34
  self.lollmsClient = lollmsClient
35
+ self.active_branch_id: Optional[str] = None
36
+ self.message_index: Dict[str, LollmsMessage] = {}
37
+ self.children_index: Dict[Optional[str], List[str]] = defaultdict(list)
38
+ self.version: int = 2 # Current version of the format
39
+ self.participants: Dict[str, str] = {} # name -> type ("user" or "assistant")
40
+ self.system_prompt: Optional[str] = None
41
+
42
+ def set_system_prompt(self, prompt: str):
43
+ self.system_prompt = prompt
44
+
45
+ def set_participants(self, participants: Dict[str, str]):
46
+ for name, role in participants.items():
47
+ if role not in ["user", "assistant"]:
48
+ raise ValueError(f"Invalid role '{role}' for participant '{name}'")
49
+ self.participants = participants
50
+
51
+ def add_message(
52
+ self,
53
+ sender: str,
54
+ content: str,
55
+ metadata: Dict = {},
56
+ parent_id: Optional[str] = None,
57
+ images: Optional[List[Dict[str, str]]] = None
58
+ ) -> str:
59
+ if parent_id is None:
60
+ parent_id = self.active_branch_id
61
+
62
+ message = LollmsMessage(
63
+ sender=sender,
64
+ content=content,
65
+ parent_id=parent_id,
66
+ metadata=str(metadata),
67
+ images=images or []
68
+ )
23
69
 
24
- def add_message(self, sender, content, metadata={}):
25
- message = LollmsMessage(sender, content, str(metadata))
26
70
  self.messages.append(message)
71
+ self.message_index[message.id] = message
72
+ self.children_index[parent_id].append(message.id)
73
+
74
+ self.active_branch_id = message.id
75
+ return message.id
76
+
77
+
78
+ def get_branch(self, leaf_id: str) -> List[LollmsMessage]:
79
+ """Get full branch from root to specified leaf"""
80
+ branch = []
81
+ current_id = leaf_id
82
+
83
+ while current_id in self.message_index:
84
+ msg = self.message_index[current_id]
85
+ branch.append(msg)
86
+ current_id = msg.parent_id
87
+
88
+ # Return from root to leaf
89
+ return list(reversed(branch))
90
+
91
+ def set_active_branch(self, message_id: str):
92
+ if message_id not in self.message_index:
93
+ raise ValueError(f"Message ID {message_id} not found")
94
+ self.active_branch_id = message_id
95
+
96
+ def remove_message(self, message_id: str):
97
+ if message_id not in self.message_index:
98
+ return
99
+
100
+ msg = self.message_index[message_id]
101
+ parent_id = msg.parent_id
102
+
103
+ # Reassign children to parent
104
+ for child_id in self.children_index[message_id]:
105
+ child = self.message_index[child_id]
106
+ child.parent_id = parent_id
107
+ self.children_index[parent_id].append(child_id)
108
+
109
+ # Clean up indexes
110
+ del self.message_index[message_id]
111
+ del self.children_index[message_id]
112
+
113
+ # Remove from parent's children list
114
+ if parent_id in self.children_index and message_id in self.children_index[parent_id]:
115
+ self.children_index[parent_id].remove(message_id)
116
+
117
+ # Remove from main messages list
118
+ self.messages = [m for m in self.messages if m.id != message_id]
119
+
120
+ # Update active branch if needed
121
+ if self.active_branch_id == message_id:
122
+ self.active_branch_id = parent_id
123
+
124
+ def save_to_disk(self, file_path: str):
125
+ data = {
126
+ 'version': self.version,
127
+ 'active_branch_id': self.active_branch_id,
128
+ 'system_prompt': self.system_prompt,
129
+ 'participants': self.participants,
130
+ 'messages': [m.to_dict() for m in self.messages]
131
+ }
132
+ with open(file_path, 'w', encoding='utf-8') as file:
133
+ yaml.dump(data, file, allow_unicode=True)
134
+
27
135
 
28
- def save_to_disk(self, file_path):
29
- with open(file_path, 'w') as file:
30
- yaml_data = [message.to_dict() for message in self.messages]
31
- yaml.dump(yaml_data, file)
136
+ def load_from_disk(self, file_path: str):
137
+ with open(file_path, 'r', encoding='utf-8') as file:
138
+ data = yaml.safe_load(file)
32
139
 
140
+ # Reset
141
+ self.messages = []
142
+ self.message_index = {}
143
+ self.children_index = defaultdict(list)
33
144
 
34
- def format_discussion(self, max_allowed_tokens, splitter_text="!@>"):
145
+ if isinstance(data, list):
146
+ # Legacy v1 format
147
+ prev_id = None
148
+ for msg_data in data:
149
+ msg = LollmsMessage(
150
+ sender=msg_data['sender'],
151
+ content=msg_data['content'],
152
+ parent_id=prev_id,
153
+ id=msg_data.get('id', str(uuid.uuid4())),
154
+ metadata=msg_data.get('metadata', '{}')
155
+ )
156
+ self.messages.append(msg)
157
+ self.message_index[msg.id] = msg
158
+ self.children_index[prev_id].append(msg.id)
159
+ prev_id = msg.id
160
+ self.active_branch_id = prev_id if self.messages else None
161
+ self.system_prompt = None
162
+ self.participants = {}
163
+ self.save_to_disk(file_path) # Upgrade
164
+ return
165
+
166
+ # v2 format
167
+ version = data.get("version", 1)
168
+ if version != self.version:
169
+ raise ValueError(f"Unsupported version: {version}")
170
+
171
+ self.active_branch_id = data.get('active_branch_id')
172
+ self.system_prompt = data.get('system_prompt', None)
173
+ self.participants = data.get('participants', {})
174
+
175
+ for msg_data in data.get('messages', []):
176
+ # FIXED: Added `images=msg_data.get('images', [])` to correctly load images from the file.
177
+ msg = LollmsMessage(
178
+ sender=msg_data['sender'],
179
+ content=msg_data['content'],
180
+ parent_id=msg_data.get('parent_id'),
181
+ id=msg_data.get('id'),
182
+ metadata=msg_data.get('metadata', '{}'),
183
+ images=msg_data.get('images', [])
184
+ )
185
+ self.messages.append(msg)
186
+ self.message_index[msg.id] = msg
187
+ self.children_index[msg.parent_id].append(msg.id)
188
+
189
+
190
+ def format_discussion(self, max_allowed_tokens: int, splitter_text: str = "!@>", branch_tip_id: Optional[str] = None) -> str:
191
+ if branch_tip_id is None:
192
+ branch_tip_id = self.active_branch_id
193
+
194
+ branch_msgs = self.get_branch(branch_tip_id) if branch_tip_id else []
35
195
  formatted_text = ""
36
- for message in reversed(self.messages): # Start from the newest message
37
- formatted_message = f"{splitter_text}{message.sender.replace(':','').replace('!@>','')}:\n{message.content}\n"
38
- tokenized_message = self.lollmsClient.tokenize(formatted_message)
39
- if len(tokenized_message) + len(self.lollmsClient.tokenize(formatted_text)) <= max_allowed_tokens:
40
- formatted_text = formatted_message + formatted_text
41
- else:
42
- break # Stop if adding the next message would exceed the limit
43
- return formatted_text
44
-
45
- if __name__=="__main__":
46
- # Usage
47
- discussion = LollmsDiscussion()
48
- discussion.add_message(sender='Alice', content='Hi there, welcome to Lollms!')
49
- discussion.add_message(sender='Bob', content='See ya, thanks for using Lollms!')
50
- discussion.save_to_disk('lollms_discussion.yaml')
51
-
52
- # Dependency Installation
53
- # Ensure to install the PyYAML library using pip:
54
- # pip install PyYAML
196
+ current_tokens = 0
197
+
198
+ # Start with system prompt if defined
199
+ if self.system_prompt:
200
+ sys_msg = f"!>system:\n{self.system_prompt.strip()}\n"
201
+ sys_tokens = len(self.lollmsClient.tokenize(sys_msg))
202
+ if max_allowed_tokens and current_tokens + sys_tokens <= max_allowed_tokens:
203
+ formatted_text += sys_msg
204
+ current_tokens += sys_tokens
205
+
206
+ for msg in reversed(branch_msgs):
207
+ content = msg.content.strip()
208
+ # FIXED: Add a placeholder for images to represent them in text-only formats.
209
+ if msg.images:
210
+ content += f"\n({len(msg.images)} image(s) attached)"
211
+
212
+ msg_text = f"{splitter_text}{msg.sender.replace(':', '').replace('!@>', '')}:\n{content}\n"
213
+ msg_tokens = len(self.lollmsClient.tokenize(msg_text))
214
+ if current_tokens + msg_tokens > max_allowed_tokens:
215
+ break
216
+ formatted_text = msg_text + formatted_text
217
+ current_tokens += msg_tokens
218
+
219
+ return formatted_text.strip()
220
+
221
+ # gradio helpers -------------------------
222
+ def get_branch_as_chatbot_history(self, branch_tip_id: Optional[str] = None) -> List[List[str]]:
223
+ """
224
+ Converts a discussion branch into Gradio's chatbot list format.
225
+ [[user_msg, ai_reply], [user_msg, ai_reply], ...]
226
+ """
227
+ if branch_tip_id is None:
228
+ branch_tip_id = self.active_branch_id
229
+ if not branch_tip_id:
230
+ return []
231
+
232
+ branch = self.get_branch(branch_tip_id)
233
+ history = []
234
+ for msg in branch:
235
+ # Determine the role from participants, default to 'user'
236
+ role = self.participants.get(msg.sender, "user")
237
+
238
+ if role == "user":
239
+ history.append([msg.content, None])
240
+ else: # assistant
241
+ # If the last user message has no reply yet, append to it
242
+ if history and history[-1][1] is None:
243
+ history[-1][1] = msg.content
244
+ else: # Standalone assistant message (e.g., the first message)
245
+ history.append([None, msg.content])
246
+ return history
247
+
248
+ def render_discussion_tree(self, active_branch_highlight: bool = True) -> str:
249
+ """
250
+ Renders the entire discussion tree as formatted Markdown for display.
251
+ """
252
+ if not self.messages:
253
+ return "No messages yet."
254
+
255
+ tree_markdown = "### Discussion Tree\n\n"
256
+ tree_markdown += "Click a message in the dropdown to switch branches.\n\n"
257
+
258
+ # Find root nodes (messages with no parent)
259
+ root_ids = [msg.id for msg in self.messages if msg.parent_id is None]
260
+
261
+ # Recursive function to render a node and its children
262
+ def _render_node(node_id: str, depth: int) -> str:
263
+ node = self.message_index.get(node_id)
264
+ if not node:
265
+ return ""
266
+
267
+ indent = " " * depth
268
+ # Highlight the active message
269
+ is_active = ""
270
+ if active_branch_highlight and node.id == self.active_branch_id:
271
+ is_active = " <span class='activ'>[ACTIVE]</span>"
272
+
273
+ # Format the message line
274
+ prefix = f"{indent}- **{node.sender}**: "
275
+ content_preview = node.content.replace('\n', ' ').strip()[:80]
276
+ line = f"{prefix} _{content_preview}..._{is_active}\n"
277
+
278
+ # Recursively render children
279
+ children_ids = self.children_index.get(node.id, [])
280
+ for child_id in children_ids:
281
+ line += _render_node(child_id, depth + 1)
282
+
283
+ return line
284
+
285
+ for root_id in root_ids:
286
+ tree_markdown += _render_node(root_id, 0)
287
+
288
+ return tree_markdown
289
+
290
+ def get_message_choices(self) -> List[tuple]:
291
+ """
292
+ Creates a list of (label, id) tuples for a Gradio Dropdown component.
293
+ """
294
+ choices = [(f"{msg.sender}: {msg.content[:40]}... (ID: ...{msg.id[-4:]})", msg.id) for msg in self.messages]
295
+ # Sort by message creation order (assuming self.messages is ordered)
296
+ return choices
297
+
298
+
299
+ def export(self, format_type: str, branch_tip_id: Optional[str] = None) -> Union[List[Dict], str]:
300
+ """
301
+ Exports the discussion history in a specific format suitable for different model APIs.
302
+
303
+ Args:
304
+ format_type (str): The target format. Supported values are:
305
+ - "openai_chat": For OpenAI, llama.cpp, and other compatible chat APIs.
306
+ - "ollama_chat": For Ollama's chat API.
307
+ - "lollms_text": For the native lollms-webui text/image endpoints.
308
+ - "openai_completion": For legacy text completion APIs.
309
+ branch_tip_id (Optional[str]): The ID of the message to use as the
310
+ tip of the conversation branch. Defaults to the active branch.
311
+
312
+ Returns:
313
+ Union[List[Dict], str]: The formatted conversation history, either as a
314
+ list of dictionaries (for chat formats) or a single string.
315
+ """
316
+ if branch_tip_id is None:
317
+ branch_tip_id = self.active_branch_id
318
+
319
+ # Handle case of an empty or uninitialized discussion
320
+ if branch_tip_id is None:
321
+ return "" if format_type in ["lollms_text", "openai_completion"] else []
322
+
323
+ branch = self.get_branch(branch_tip_id)
324
+
325
+ # --------------------- OpenAI Chat Format ---------------------
326
+ # Used by: OpenAI API, llama.cpp server, and many other compatible services.
327
+ # Structure: List of dictionaries with 'role' and 'content'.
328
+ # Images are handled via multi-part 'content'.
329
+ # --------------------------------------------------------------
330
+ if format_type == "openai_chat":
331
+ messages = []
332
+ if self.system_prompt:
333
+ messages.append({"role": "system", "content": self.system_prompt.strip()})
334
+
335
+ def openai_image_block(image: Dict[str, str]) -> Dict:
336
+ """Creates a dict for an image URL, either from a URL or base64 data."""
337
+ image_url = image['data'] if image['type'] == 'url' else f"data:image/jpeg;base64,{image['data']}"
338
+ return {"type": "image_url", "image_url": {"url": image_url, "detail": "auto"}}
339
+
340
+ for msg in branch:
341
+ role = self.participants.get(msg.sender, "user")
342
+ if msg.images:
343
+ content_parts = []
344
+ if msg.content.strip(): # Add text part only if content exists
345
+ content_parts.append({"type": "text", "text": msg.content.strip()})
346
+ content_parts.extend(openai_image_block(img) for img in msg.images)
347
+ messages.append({"role": role, "content": content_parts})
348
+ else:
349
+ messages.append({"role": role, "content": msg.content.strip()})
350
+ return messages
351
+
352
+ # --------------------- Ollama Chat Format ---------------------
353
+ # Used by: Ollama's '/api/chat' endpoint.
354
+ # Structure: List of dictionaries with 'role', 'content', and an optional 'images' key.
355
+ # Images must be a list of base64-encoded strings. URLs are ignored.
356
+ # --------------------------------------------------------------
357
+ elif format_type == "ollama_chat":
358
+ messages = []
359
+ if self.system_prompt:
360
+ messages.append({"role": "system", "content": self.system_prompt.strip()})
361
+
362
+ for msg in branch:
363
+ role = self.participants.get(msg.sender, "user")
364
+ message_dict = {"role": role, "content": msg.content.strip()}
365
+
366
+ # Filter for and add base64 images, as required by Ollama
367
+ ollama_images = [img['data'] for img in msg.images if img['type'] == 'base64']
368
+ if ollama_images:
369
+ message_dict["images"] = ollama_images
370
+
371
+ messages.append(message_dict)
372
+ return messages
373
+
374
+ # --------------------- LoLLMs Native Text Format ---------------------
375
+ # Used by: lollms-webui's '/lollms_generate' and '/lollms_generate_with_images' endpoints.
376
+ # Structure: A single string with messages separated by special tokens like '!@>user:'.
377
+ # Images are not part of the string but are sent separately by the binding.
378
+ # --------------------------------------------------------------------
379
+ elif format_type == "lollms_text":
380
+ full_prompt_parts = []
381
+ if self.system_prompt:
382
+ full_prompt_parts.append(f"!@>system:\n{self.system_prompt.strip()}")
383
+
384
+ for msg in branch:
385
+ sender_str = msg.sender.replace(':', '').replace('!@>', '')
386
+ content = msg.content.strip()
387
+ # Images are handled separately by the binding, but a placeholder can be useful for context
388
+ if msg.images:
389
+ content += f"\n({len(msg.images)} image(s) attached)"
390
+ full_prompt_parts.append(f"!@>{sender_str}:\n{content}")
391
+
392
+ return "\n".join(full_prompt_parts)
393
+
394
+ # ------------------ Legacy OpenAI Completion Format ------------------
395
+ # Used by: Older text-completion models.
396
+ # Structure: A single string with human-readable roles (e.g., "User:", "Assistant:").
397
+ # Images are represented by a text placeholder.
398
+ # ----------------------------------------------------------------------
399
+ elif format_type == "openai_completion":
400
+ full_prompt_parts = []
401
+ if self.system_prompt:
402
+ full_prompt_parts.append(f"System:\n{self.system_prompt.strip()}")
403
+
404
+ for msg in branch:
405
+ role_label = self.participants.get(msg.sender, "user").capitalize()
406
+ content = msg.content.strip()
407
+ if msg.images:
408
+ content += f"\n({len(msg.images)} image(s) attached)"
409
+ full_prompt_parts.append(f"{role_label}:\n{content}")
410
+
411
+ return "\n\n".join(full_prompt_parts)
412
+
413
+ else:
414
+ raise ValueError(f"Unsupported export format_type: {format_type}")
415
+ # Example usage
416
+ if __name__ == "__main__":
417
+ import base64
418
+
419
+ # 🔧 Mock client for token counting
420
+ from lollms_client import LollmsClient
421
+ client = LollmsClient(binding_name="ollama",model_name="mistral:latest")
422
+ discussion = LollmsDiscussion(client)
423
+
424
+ # 👥 Set participants
425
+ discussion.set_participants({
426
+ "Alice": "user",
427
+ "Bob": "assistant"
428
+ })
429
+
430
+ # 📝 Set a system prompt
431
+ discussion.set_system_prompt("You are a helpful and friendly assistant.")
432
+
433
+ # 📩 Add root message
434
+ msg1 = discussion.add_message('Alice', 'Hello!')
435
+
436
+ # 📩 Add reply
437
+ msg2 = discussion.add_message('Bob', 'Hi there!')
438
+
439
+ # 🌿 Branch from msg1 with an image
440
+ msg3 = discussion.add_message(
441
+ 'Alice',
442
+ 'Here is an image of my dog.',
443
+ parent_id=msg1,
444
+ images=[{"type": "url", "data": "https://example.com/alices_dog.jpg"}]
445
+ )
446
+
447
+ # 🖼️ FIXED: Add another message with images using the 'images' parameter directly.
448
+ sample_base64 = base64.b64encode(b'This is a test image of a cat').decode('utf-8')
449
+ msg4 = discussion.add_message(
450
+ 'Bob',
451
+ "Nice! Here's my cat.",
452
+ parent_id=msg3,
453
+ images=[
454
+ {"type": "url", "data": "https://example.com/bobs_cat.jpg"},
455
+ {"type": "base64", "data": sample_base64}
456
+ ]
457
+ )
458
+
459
+ # 🌿 Switch to the new branch
460
+ discussion.set_active_branch(msg4)
461
+
462
+ # 📁 Save and load discussion
463
+ discussion.save_to_disk("test_discussion.yaml")
464
+
465
+ print("\n💾 Discussion saved to test_discussion.yaml")
466
+
467
+ new_discussion = LollmsDiscussion(client)
468
+ new_discussion.load_from_disk("test_discussion.yaml")
469
+ # Participants must be set again as they are part of the runtime configuration
470
+ # but the loader now correctly loads them from the file.
471
+ print("📂 Discussion loaded from test_discussion.yaml")
472
+
473
+
474
+ # 🧾 Format the discussion
475
+ formatted = new_discussion.format_discussion(1000)
476
+ print("\n📜 Formatted discussion (text-only with placeholders):\n", formatted)
477
+
478
+ # 🔁 Export to OpenAI Chat format
479
+ openai_chat = new_discussion.export("openai_chat")
480
+ print("\n📦 OpenAI Chat format:\n", yaml.dump(openai_chat, allow_unicode=True, sort_keys=False))
481
+
482
+ # 🔁 Export to OpenAI Completion format
483
+ openai_completion = new_discussion.export("openai_completion")
484
+ print("\n📜 OpenAI Completion format:\n", openai_completion)
485
+
486
+ # 🔁 Export to Ollama Chat format
487
+ ollama_export = new_discussion.export("ollama_chat")
488
+ print("\n🤖 Ollama Chat format:\n", yaml.dump(ollama_export, allow_unicode=True, sort_keys=False))
489
+
490
+ # Test that images were loaded correctly
491
+ final_message = new_discussion.message_index[new_discussion.active_branch_id]
492
+ assert len(final_message.images) == 2
493
+ assert final_message.images[1]['type'] == 'base64'
494
+ print("\n✅ Verification successful: Images were loaded correctly from the file.")
@@ -9,6 +9,7 @@ from pathlib import Path
9
9
  from typing import Optional
10
10
  from ascii_colors import trace_exception
11
11
  from lollms_client.lollms_types import MSG_TYPE
12
+ from lollms_client.lollms_discussion import LollmsDiscussion
12
13
  import re
13
14
  class LollmsLLMBinding(ABC):
14
15
  """Abstract base class for all LOLLMS LLM bindings"""
@@ -73,6 +74,48 @@ class LollmsLLMBinding(ABC):
73
74
  """
74
75
  pass
75
76
 
77
+ @abstractmethod
78
+ def chat(self,
79
+ discussion: LollmsDiscussion,
80
+ branch_tip_id: Optional[str] = None,
81
+ n_predict: Optional[int] = None,
82
+ stream: Optional[bool] = None,
83
+ temperature: Optional[float] = None,
84
+ top_k: Optional[int] = None,
85
+ top_p: Optional[float] = None,
86
+ repeat_penalty: Optional[float] = None,
87
+ repeat_last_n: Optional[int] = None,
88
+ seed: Optional[int] = None,
89
+ n_threads: Optional[int] = None,
90
+ ctx_size: Optional[int] = None,
91
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
92
+ ) -> Union[str, dict]:
93
+ """
94
+ A method to conduct a chat session with the model using a LollmsDiscussion object.
95
+ This method is responsible for formatting the discussion into the specific
96
+ format required by the model's API and then calling the generation endpoint.
97
+
98
+ Args:
99
+ discussion (LollmsDiscussion): The discussion object containing the conversation history.
100
+ branch_tip_id (Optional[str]): The ID of the message to use as the tip of the conversation branch. Defaults to the active branch.
101
+ n_predict (Optional[int]): Maximum number of tokens to generate.
102
+ stream (Optional[bool]): Whether to stream the output.
103
+ temperature (Optional[float]): Sampling temperature.
104
+ top_k (Optional[int]): Top-k sampling parameter.
105
+ top_p (Optional[float]): Top-p sampling parameter.
106
+ repeat_penalty (Optional[float]): Penalty for repeated tokens.
107
+ repeat_last_n (Optional[int]): Number of previous tokens to consider for repeat penalty.
108
+ seed (Optional[int]): Random seed for generation.
109
+ n_threads (Optional[int]): Number of threads to use.
110
+ ctx_size (Optional[int]): Context size override for this generation.
111
+ streaming_callback (Optional[Callable[[str, MSG_TYPE], None]]): Callback for streaming output.
112
+
113
+ Returns:
114
+ Union[str, dict]: The generated text or an error dictionary.
115
+ """
116
+ pass
117
+
118
+
76
119
  @abstractmethod
77
120
  def tokenize(self, text: str) -> list:
78
121
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.20.3
3
+ Version: 0.20.4
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -2,7 +2,8 @@ examples/external_mcp.py,sha256=swx1KCOz6jk8jGTAycq-xu7GXPAhRMDe1x--SKocugE,1337
2
2
  examples/function_calling_with_local_custom_mcp.py,sha256=g6wOFRB8-p9Cv7hKmQaGzPvtMX3H77gas01QVNEOduM,12407
3
3
  examples/generate_a_benchmark_for_safe_store.py,sha256=bkSt0mrpNsN0krZAUShm0jgVM1ukrPpjI7VwSgcNdSA,3974
4
4
  examples/generate_text_with_multihop_rag_example.py,sha256=riEyVYo97r6ZYdySL-NJkRhE4MnpwbZku1sN8RNvbvs,11519
5
- examples/internet_search_with_rag.py,sha256=cbUoGgY3rxZpQ5INoaA0Nhm0cutii-2AQ9WCz71Ch3o,12369
5
+ examples/gradio_chat_app.py,sha256=ZZ_D1U0wvvwE9THmAPXUvNKkFG2gi7tQq1f2pQx_2ug,15315
6
+ examples/internet_search_with_rag.py,sha256=ioTb_WI2M6kFeh1Dg-EGcKjccphnCsIGD_e9PZgZshw,12314
6
7
  examples/local_mcp.py,sha256=w40dgayvHYe01yvekEE0LjcbkpwKjWwJ-9v4_wGYsUk,9113
7
8
  examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE-pGeFY,11060
8
9
  examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
@@ -23,12 +24,12 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
23
24
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
24
25
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
25
26
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
26
- lollms_client/__init__.py,sha256=wRi23qidXTyMK09HNTzeoYEMKhB_7ZImWScltfYgntE,910
27
+ lollms_client/__init__.py,sha256=bBGPNcYNlEP-IxspmHZvJ3M-GLhfR9DDR1H2xqA3otg,910
27
28
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
28
- lollms_client/lollms_core.py,sha256=iKvH20tckzdYLlIpp-srWswdI4lb0vo5po7xo3Ogsgk,114865
29
- lollms_client/lollms_discussion.py,sha256=EV90dIgw8a-f-82vB2GspR60RniYz7WnBmAWSIg5mW0,2158
29
+ lollms_client/lollms_core.py,sha256=Jr9VQCvyxtsdy3VstNjOOoMCx4uS50VHSzaFuyu754o,118714
30
+ lollms_client/lollms_discussion.py,sha256=fUtae7R-PcS2-rwCdd7BPX2FRdgXD8xwBgYs_17SCyA,20802
30
31
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
31
- lollms_client/lollms_llm_binding.py,sha256=bdElz_IBx0zZ-85YTT1fyY_mSoHo46tKIMiHYJlKCkM,9809
32
+ lollms_client/lollms_llm_binding.py,sha256=E81g4yBlQn76WTSLicnTETJuQhf_WZUMZaxotgRnOcA,12096
32
33
  lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
33
34
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
34
35
  lollms_client/lollms_stt_binding.py,sha256=jAUhLouEhh2hmm1bK76ianfw_6B59EHfY3FmLv6DU-g,5111
@@ -39,10 +40,10 @@ lollms_client/lollms_ttv_binding.py,sha256=KkTaHLBhEEdt4sSVBlbwr5i_g_TlhcrwrT-7D
39
40
  lollms_client/lollms_types.py,sha256=CLiodudFgTbuXTGgupDt6IgMvJkrfiOHdw1clx_5UjA,2863
40
41
  lollms_client/lollms_utilities.py,sha256=WiG-HHMdo86j3LBndcBQ-PbMqQ8kGKLp1e9WuLDzRVU,7048
41
42
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
42
- lollms_client/llm_bindings/llamacpp/__init__.py,sha256=tUdCh00Tcg2VtavM5uRNsAoEkdeHI4p3nFsF9YUcYuk,58402
43
- lollms_client/llm_bindings/lollms/__init__.py,sha256=poGr9H3UshRUqmiAiiRW8_1Q8rBj3Q-mhBacdnp7C7Y,13157
44
- lollms_client/llm_bindings/ollama/__init__.py,sha256=8Kn8OI0PcT8QWVv5w7NDcsP99AvQ9TKMj-1VL3DZLfU,27770
45
- lollms_client/llm_bindings/openai/__init__.py,sha256=IGvsWbI0uw6x04IQ7u4GAM1AaVFSendLjkbKvjQ6-AM,13993
43
+ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=Qj5RvsgPeHGNfb5AEwZSzFwAp4BOWjyxmm9qBNtstrc,63716
44
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=17TwGMDJMxRPjZjZZSysR8AwjMXZeRfDBy8RqWWuaIY,17769
45
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=fno1UEcXIy37Z3-bZAwOMuCfbAYOzztcO4oBfLqK_JA,32538
46
+ lollms_client/llm_bindings/openai/__init__.py,sha256=ay_2JJi4La258Eg3alUhnh6Y5IRyOWnHaFLXqvN_4ao,19144
46
47
  lollms_client/llm_bindings/openllm/__init__.py,sha256=xv2XDhJNCYe6NPnWBboDs24AQ1VJBOzsTuMcmuQ6xYY,29864
47
48
  lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=7dM42TCGKh0eV0njNL1tc9cInhyvBRIXzN3dcy12Gl0,33551
48
49
  lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=nPaNhGRd-bsG0UlYwcEqjd_UagCMEf5VEbBUW-GWu6A,32203
@@ -75,8 +76,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
75
76
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
76
77
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
77
78
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
- lollms_client-0.20.3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
79
- lollms_client-0.20.3.dist-info/METADATA,sha256=ShF3nPm1Y3KT9UkJ_3zkA8BudOb8O-zNn9iuo3KxCe4,13374
80
- lollms_client-0.20.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
81
- lollms_client-0.20.3.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
82
- lollms_client-0.20.3.dist-info/RECORD,,
79
+ lollms_client-0.20.4.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
80
+ lollms_client-0.20.4.dist-info/METADATA,sha256=L7jL_v8lKQEjpFLmOVAv97A_Gz_jAqVJMNuO9IimHx4,13374
81
+ lollms_client-0.20.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
82
+ lollms_client-0.20.4.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
83
+ lollms_client-0.20.4.dist-info/RECORD,,