lollms-client 0.22.0__py3-none-any.whl → 0.24.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/console_discussion/console_app.py +266 -0
- examples/{run_remote_mcp_example copy.py → mcp_examples/run_remote_mcp_example_v2.py} +65 -1
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +408 -274
- lollms_client/lollms_discussion.py +599 -294
- lollms_client/lollms_llm_binding.py +3 -0
- lollms_client/lollms_types.py +1 -1
- {lollms_client-0.22.0.dist-info → lollms_client-0.24.0.dist-info}/METADATA +2 -1
- {lollms_client-0.22.0.dist-info → lollms_client-0.24.0.dist-info}/RECORD +16 -16
- {lollms_client-0.22.0.dist-info → lollms_client-0.24.0.dist-info}/top_level.txt +0 -1
- personalities/parrot.py +0 -10
- /examples/{external_mcp.py → mcp_examples/external_mcp.py} +0 -0
- /examples/{local_mcp.py → mcp_examples/local_mcp.py} +0 -0
- /examples/{openai_mcp.py → mcp_examples/openai_mcp.py} +0 -0
- /examples/{run_standard_mcp_example.py → mcp_examples/run_standard_mcp_example.py} +0 -0
- {lollms_client-0.22.0.dist-info → lollms_client-0.24.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.22.0.dist-info → lollms_client-0.24.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,60 +1,94 @@
|
|
|
1
|
-
import yaml
|
|
2
|
-
import json
|
|
3
1
|
import base64
|
|
4
|
-
import
|
|
5
|
-
import uuid
|
|
6
|
-
import shutil
|
|
2
|
+
import json
|
|
7
3
|
import re
|
|
8
|
-
|
|
4
|
+
import uuid
|
|
9
5
|
from datetime import datetime
|
|
10
|
-
from typing import List, Dict, Optional, Union, Any, Type, Callable
|
|
11
6
|
from pathlib import Path
|
|
12
7
|
from types import SimpleNamespace
|
|
13
|
-
|
|
14
|
-
from
|
|
15
|
-
|
|
16
|
-
from sqlalchemy
|
|
17
|
-
|
|
8
|
+
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
|
9
|
+
from ascii_colors import trace_exception
|
|
10
|
+
import yaml
|
|
11
|
+
from sqlalchemy import (Column, DateTime, Float, ForeignKey, Integer, JSON,
|
|
12
|
+
LargeBinary, String, Text, create_engine)
|
|
13
|
+
from sqlalchemy.orm import (Session, declarative_base, declared_attr,
|
|
14
|
+
relationship, sessionmaker)
|
|
18
15
|
from sqlalchemy.orm.exc import NoResultFound
|
|
19
|
-
|
|
16
|
+
from sqlalchemy.types import TypeDecorator
|
|
17
|
+
from sqlalchemy import text
|
|
20
18
|
try:
|
|
21
19
|
from cryptography.fernet import Fernet, InvalidToken
|
|
20
|
+
from cryptography.hazmat.backends import default_backend
|
|
22
21
|
from cryptography.hazmat.primitives import hashes
|
|
23
22
|
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
|
24
|
-
from cryptography.hazmat.backends import default_backend
|
|
25
23
|
ENCRYPTION_AVAILABLE = True
|
|
26
24
|
except ImportError:
|
|
27
25
|
ENCRYPTION_AVAILABLE = False
|
|
28
26
|
|
|
29
27
|
# Type hint placeholders for classes defined externally
|
|
30
|
-
if False:
|
|
28
|
+
if False:
|
|
31
29
|
from lollms_client import LollmsClient
|
|
32
|
-
from lollms_client.lollms_types import MSG_TYPE
|
|
33
30
|
from lollms_personality import LollmsPersonality
|
|
34
31
|
|
|
32
|
+
|
|
35
33
|
class EncryptedString(TypeDecorator):
|
|
36
|
-
"""A SQLAlchemy TypeDecorator for field-level database encryption.
|
|
34
|
+
"""A SQLAlchemy TypeDecorator for field-level database encryption.
|
|
35
|
+
|
|
36
|
+
This class provides transparent encryption and decryption for string-based
|
|
37
|
+
database columns. It derives a stable encryption key from a user-provided
|
|
38
|
+
password and a fixed salt using PBKDF2HMAC, then uses Fernet for
|
|
39
|
+
symmetric encryption.
|
|
40
|
+
|
|
41
|
+
Requires the 'cryptography' library to be installed.
|
|
42
|
+
"""
|
|
37
43
|
impl = LargeBinary
|
|
38
44
|
cache_ok = True
|
|
39
45
|
|
|
40
46
|
def __init__(self, key: str, *args, **kwargs):
|
|
47
|
+
"""Initializes the encryption engine.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
key: The secret key (password) to use for encryption.
|
|
51
|
+
"""
|
|
41
52
|
super().__init__(*args, **kwargs)
|
|
42
53
|
if not ENCRYPTION_AVAILABLE:
|
|
43
54
|
raise ImportError("'cryptography' is required for DB encryption.")
|
|
55
|
+
|
|
44
56
|
self.salt = b'lollms-fixed-salt-for-db-encryption'
|
|
45
57
|
kdf = PBKDF2HMAC(
|
|
46
|
-
algorithm=hashes.SHA256(),
|
|
47
|
-
|
|
58
|
+
algorithm=hashes.SHA256(),
|
|
59
|
+
length=32,
|
|
60
|
+
salt=self.salt,
|
|
61
|
+
iterations=480000,
|
|
62
|
+
backend=default_backend()
|
|
48
63
|
)
|
|
49
64
|
derived_key = base64.urlsafe_b64encode(kdf.derive(key.encode()))
|
|
50
65
|
self.fernet = Fernet(derived_key)
|
|
51
66
|
|
|
52
67
|
def process_bind_param(self, value: Optional[str], dialect) -> Optional[bytes]:
|
|
68
|
+
"""Encrypts the string value before writing it to the database.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
value: The plaintext string to encrypt.
|
|
72
|
+
dialect: The database dialect in use.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
The encrypted value as bytes, or None if the input was None.
|
|
76
|
+
"""
|
|
53
77
|
if value is None:
|
|
54
78
|
return None
|
|
55
79
|
return self.fernet.encrypt(value.encode('utf-8'))
|
|
56
80
|
|
|
57
81
|
def process_result_value(self, value: Optional[bytes], dialect) -> Optional[str]:
|
|
82
|
+
"""Decrypts the byte value from the database into a string.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
value: The encrypted bytes from the database.
|
|
86
|
+
dialect: The database dialect in use.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
The decrypted plaintext string, a special error message if decryption
|
|
90
|
+
fails, or None if the input was None.
|
|
91
|
+
"""
|
|
58
92
|
if value is None:
|
|
59
93
|
return None
|
|
60
94
|
try:
|
|
@@ -62,12 +96,32 @@ class EncryptedString(TypeDecorator):
|
|
|
62
96
|
except InvalidToken:
|
|
63
97
|
return "<DECRYPTION_FAILED: Invalid Key or Corrupt Data>"
|
|
64
98
|
|
|
65
|
-
|
|
66
|
-
|
|
99
|
+
|
|
100
|
+
def create_dynamic_models(
|
|
101
|
+
discussion_mixin: Optional[Type] = None,
|
|
102
|
+
message_mixin: Optional[Type] = None,
|
|
103
|
+
encryption_key: Optional[str] = None
|
|
104
|
+
) -> tuple[Type, Type, Type]:
|
|
105
|
+
"""Factory to dynamically create SQLAlchemy ORM models.
|
|
106
|
+
|
|
107
|
+
This function builds the `Discussion` and `Message` SQLAlchemy models,
|
|
108
|
+
optionally including custom mixin classes for extending functionality and
|
|
109
|
+
applying encryption to text fields if a key is provided.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
discussion_mixin: An optional class to mix into the Discussion model.
|
|
113
|
+
message_mixin: An optional class to mix into the Message model.
|
|
114
|
+
encryption_key: An optional key to enable database field encryption.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
A tuple containing the declarative Base, the created Discussion model,
|
|
118
|
+
and the created Message model.
|
|
119
|
+
"""
|
|
67
120
|
Base = declarative_base()
|
|
68
121
|
EncryptedText = EncryptedString(encryption_key) if encryption_key else Text
|
|
69
122
|
|
|
70
123
|
class DiscussionBase:
|
|
124
|
+
"""Abstract base for the Discussion ORM model."""
|
|
71
125
|
__abstract__ = True
|
|
72
126
|
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
|
73
127
|
system_prompt = Column(EncryptedText, nullable=True)
|
|
@@ -76,78 +130,152 @@ def create_dynamic_models(discussion_mixin: Optional[Type] = None, message_mixin
|
|
|
76
130
|
discussion_metadata = Column(JSON, nullable=True, default=dict)
|
|
77
131
|
created_at = Column(DateTime, default=datetime.utcnow)
|
|
78
132
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
|
79
|
-
|
|
133
|
+
|
|
134
|
+
# Fields for non-destructive context pruning
|
|
135
|
+
pruning_summary = Column(EncryptedText, nullable=True)
|
|
136
|
+
pruning_point_id = Column(String, nullable=True)
|
|
137
|
+
|
|
80
138
|
@declared_attr
|
|
81
139
|
def messages(cls):
|
|
82
140
|
return relationship("Message", back_populates="discussion", cascade="all, delete-orphan", lazy="joined")
|
|
83
141
|
|
|
84
142
|
class MessageBase:
|
|
143
|
+
"""Abstract base for the Message ORM model."""
|
|
85
144
|
__abstract__ = True
|
|
86
145
|
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
|
87
146
|
discussion_id = Column(String, ForeignKey('discussions.id'), nullable=False, index=True)
|
|
88
147
|
parent_id = Column(String, ForeignKey('messages.id'), nullable=True, index=True)
|
|
89
148
|
sender = Column(String, nullable=False)
|
|
90
149
|
sender_type = Column(String, nullable=False)
|
|
91
|
-
|
|
150
|
+
|
|
92
151
|
raw_content = Column(EncryptedText, nullable=True)
|
|
93
152
|
thoughts = Column(EncryptedText, nullable=True)
|
|
94
153
|
content = Column(EncryptedText, nullable=False)
|
|
95
154
|
scratchpad = Column(EncryptedText, nullable=True)
|
|
96
|
-
|
|
155
|
+
|
|
97
156
|
tokens = Column(Integer, nullable=True)
|
|
98
157
|
binding_name = Column(String, nullable=True)
|
|
99
158
|
model_name = Column(String, nullable=True)
|
|
100
159
|
generation_speed = Column(Float, nullable=True)
|
|
101
|
-
|
|
160
|
+
|
|
102
161
|
message_metadata = Column(JSON, nullable=True, default=dict)
|
|
103
162
|
images = Column(JSON, nullable=True, default=list)
|
|
104
163
|
created_at = Column(DateTime, default=datetime.utcnow)
|
|
105
|
-
|
|
164
|
+
|
|
106
165
|
@declared_attr
|
|
107
166
|
def discussion(cls):
|
|
108
167
|
return relationship("Discussion", back_populates="messages")
|
|
109
|
-
|
|
168
|
+
|
|
110
169
|
discussion_bases = (discussion_mixin, DiscussionBase, Base) if discussion_mixin else (DiscussionBase, Base)
|
|
111
170
|
DynamicDiscussion = type('Discussion', discussion_bases, {'__tablename__': 'discussions'})
|
|
112
171
|
|
|
113
172
|
message_bases = (message_mixin, MessageBase, Base) if message_mixin else (MessageBase, Base)
|
|
114
173
|
DynamicMessage = type('Message', message_bases, {'__tablename__': 'messages'})
|
|
115
|
-
|
|
174
|
+
|
|
116
175
|
return Base, DynamicDiscussion, DynamicMessage
|
|
117
176
|
|
|
177
|
+
|
|
118
178
|
class LollmsDataManager:
|
|
119
|
-
"""Manages database connection, session, and table creation.
|
|
179
|
+
"""Manages database connection, session, and table creation.
|
|
180
|
+
|
|
181
|
+
This class serves as the central point of contact for all database
|
|
182
|
+
operations, abstracting away the SQLAlchemy engine and session management.
|
|
183
|
+
"""
|
|
184
|
+
|
|
120
185
|
def __init__(self, db_path: str, discussion_mixin: Optional[Type] = None, message_mixin: Optional[Type] = None, encryption_key: Optional[str] = None):
|
|
186
|
+
"""Initializes the data manager.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
db_path: The connection string for the SQLAlchemy database
|
|
190
|
+
(e.g., 'sqlite:///mydatabase.db').
|
|
191
|
+
discussion_mixin: Optional mixin class for the Discussion model.
|
|
192
|
+
message_mixin: Optional mixin class for the Message model.
|
|
193
|
+
encryption_key: Optional key to enable database encryption.
|
|
194
|
+
"""
|
|
121
195
|
if not db_path:
|
|
122
196
|
raise ValueError("Database path cannot be empty.")
|
|
197
|
+
|
|
123
198
|
self.Base, self.DiscussionModel, self.MessageModel = create_dynamic_models(
|
|
124
199
|
discussion_mixin, message_mixin, encryption_key
|
|
125
200
|
)
|
|
126
201
|
self.engine = create_engine(db_path)
|
|
127
202
|
self.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
|
|
128
|
-
self.
|
|
203
|
+
self.create_and_migrate_tables()
|
|
129
204
|
|
|
130
|
-
def
|
|
205
|
+
def create_and_migrate_tables(self):
|
|
206
|
+
"""Creates all tables if they don't exist and performs simple schema migrations."""
|
|
131
207
|
self.Base.metadata.create_all(bind=self.engine)
|
|
208
|
+
try:
|
|
209
|
+
with self.engine.connect() as connection:
|
|
210
|
+
print("Checking for database schema upgrades...")
|
|
211
|
+
|
|
212
|
+
# --- THIS IS THE FIX ---
|
|
213
|
+
# We must wrap raw SQL strings in the `text()` function for direct execution.
|
|
214
|
+
cursor = connection.execute(text("PRAGMA table_info(discussions)"))
|
|
215
|
+
columns = [row[1] for row in cursor.fetchall()]
|
|
216
|
+
|
|
217
|
+
if 'pruning_summary' not in columns:
|
|
218
|
+
print(" -> Upgrading 'discussions' table: Adding 'pruning_summary' column.")
|
|
219
|
+
connection.execute(text("ALTER TABLE discussions ADD COLUMN pruning_summary TEXT"))
|
|
220
|
+
|
|
221
|
+
if 'pruning_point_id' not in columns:
|
|
222
|
+
print(" -> Upgrading 'discussions' table: Adding 'pruning_point_id' column.")
|
|
223
|
+
connection.execute(text("ALTER TABLE discussions ADD COLUMN pruning_point_id VARCHAR"))
|
|
224
|
+
|
|
225
|
+
print("Database schema is up to date.")
|
|
226
|
+
# This is important to apply the ALTER TABLE statements
|
|
227
|
+
connection.commit()
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
print(f"\n--- DATABASE MIGRATION WARNING ---")
|
|
231
|
+
print(f"An error occurred during database schema migration: {e}")
|
|
232
|
+
print("The application might not function correctly if the schema is outdated.")
|
|
233
|
+
print("If problems persist, consider backing up and deleting the database file.")
|
|
234
|
+
print("---")
|
|
132
235
|
|
|
133
236
|
def get_session(self) -> Session:
|
|
237
|
+
"""Returns a new SQLAlchemy session."""
|
|
134
238
|
return self.SessionLocal()
|
|
135
239
|
|
|
136
240
|
def list_discussions(self) -> List[Dict]:
|
|
241
|
+
"""Retrieves a list of all discussions from the database.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
A list of dictionaries, where each dictionary represents a discussion.
|
|
245
|
+
"""
|
|
137
246
|
with self.get_session() as session:
|
|
138
247
|
discussions = session.query(self.DiscussionModel).all()
|
|
139
248
|
return [{c.name: getattr(disc, c.name) for c in disc.__table__.columns} for disc in discussions]
|
|
140
249
|
|
|
141
250
|
def get_discussion(self, lollms_client: 'LollmsClient', discussion_id: str, **kwargs) -> Optional['LollmsDiscussion']:
|
|
251
|
+
"""Retrieves a single discussion by its ID and wraps it.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
lollms_client: The LollmsClient instance for the discussion to use.
|
|
255
|
+
discussion_id: The unique ID of the discussion to retrieve.
|
|
256
|
+
**kwargs: Additional arguments to pass to the LollmsDiscussion constructor.
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
An LollmsDiscussion instance if found, otherwise None.
|
|
260
|
+
"""
|
|
142
261
|
with self.get_session() as session:
|
|
143
262
|
try:
|
|
144
263
|
db_disc = session.query(self.DiscussionModel).filter_by(id=discussion_id).one()
|
|
145
|
-
session.expunge(db_disc)
|
|
264
|
+
session.expunge(db_disc) # Detach from session before returning
|
|
146
265
|
return LollmsDiscussion(lollmsClient=lollms_client, db_manager=self, db_discussion_obj=db_disc, **kwargs)
|
|
147
266
|
except NoResultFound:
|
|
148
267
|
return None
|
|
149
268
|
|
|
150
269
|
def search_discussions(self, **criteria) -> List[Dict]:
|
|
270
|
+
"""Searches for discussions based on provided criteria.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
**criteria: Keyword arguments where the key is a column name and
|
|
274
|
+
the value is the string to search for.
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
A list of dictionaries representing the matching discussions.
|
|
278
|
+
"""
|
|
151
279
|
with self.get_session() as session:
|
|
152
280
|
query = session.query(self.DiscussionModel)
|
|
153
281
|
for key, value in criteria.items():
|
|
@@ -157,24 +285,43 @@ class LollmsDataManager:
|
|
|
157
285
|
return [{c.name: getattr(disc, c.name) for c in disc.__table__.columns} for disc in discussions]
|
|
158
286
|
|
|
159
287
|
def delete_discussion(self, discussion_id: str):
|
|
288
|
+
"""Deletes a discussion and all its associated messages from the database.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
discussion_id: The ID of the discussion to delete.
|
|
292
|
+
"""
|
|
160
293
|
with self.get_session() as session:
|
|
161
294
|
db_disc = session.query(self.DiscussionModel).filter_by(id=discussion_id).first()
|
|
162
295
|
if db_disc:
|
|
163
296
|
session.delete(db_disc)
|
|
164
297
|
session.commit()
|
|
165
298
|
|
|
299
|
+
|
|
166
300
|
class LollmsMessage:
|
|
167
|
-
"""A wrapper for a message ORM object
|
|
301
|
+
"""A lightweight proxy wrapper for a message ORM object.
|
|
302
|
+
|
|
303
|
+
This class provides a more direct and convenient API for interacting with a
|
|
304
|
+
message's data, proxying attribute access to the underlying database object.
|
|
305
|
+
"""
|
|
306
|
+
|
|
168
307
|
def __init__(self, discussion: 'LollmsDiscussion', db_message: Any):
|
|
308
|
+
"""Initializes the message proxy.
|
|
309
|
+
|
|
310
|
+
Args:
|
|
311
|
+
discussion: The parent LollmsDiscussion instance.
|
|
312
|
+
db_message: The underlying SQLAlchemy ORM message object or a SimpleNamespace.
|
|
313
|
+
"""
|
|
169
314
|
object.__setattr__(self, '_discussion', discussion)
|
|
170
315
|
object.__setattr__(self, '_db_message', db_message)
|
|
171
316
|
|
|
172
317
|
def __getattr__(self, name: str) -> Any:
|
|
318
|
+
"""Proxies attribute getting to the underlying DB object."""
|
|
173
319
|
if name == 'metadata':
|
|
174
320
|
return getattr(self._db_message, 'message_metadata', None)
|
|
175
321
|
return getattr(self._db_message, name)
|
|
176
322
|
|
|
177
323
|
def __setattr__(self, name: str, value: Any):
|
|
324
|
+
"""Proxies attribute setting to the underlying DB object and marks discussion as dirty."""
|
|
178
325
|
if name == 'metadata':
|
|
179
326
|
setattr(self._db_message, 'message_metadata', value)
|
|
180
327
|
else:
|
|
@@ -182,23 +329,44 @@ class LollmsMessage:
|
|
|
182
329
|
self._discussion.touch()
|
|
183
330
|
|
|
184
331
|
def __repr__(self) -> str:
|
|
332
|
+
"""Provides a developer-friendly representation of the message."""
|
|
185
333
|
return f"<LollmsMessage id={self.id} sender='{self.sender}'>"
|
|
186
334
|
|
|
335
|
+
|
|
187
336
|
class LollmsDiscussion:
|
|
188
|
-
"""Represents and manages a single discussion
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
337
|
+
"""Represents and manages a single discussion.
|
|
338
|
+
|
|
339
|
+
This class is the primary user-facing interface for interacting with a
|
|
340
|
+
conversation. It can be database-backed or entirely in-memory. It handles
|
|
341
|
+
message management, branching, context formatting, and automatic,
|
|
342
|
+
non-destructive context pruning.
|
|
343
|
+
"""
|
|
344
|
+
|
|
345
|
+
def __init__(self,
|
|
346
|
+
lollmsClient: 'LollmsClient',
|
|
347
|
+
db_manager: Optional[LollmsDataManager] = None,
|
|
348
|
+
discussion_id: Optional[str] = None,
|
|
349
|
+
db_discussion_obj: Optional[Any] = None,
|
|
350
|
+
autosave: bool = False,
|
|
351
|
+
max_context_size: Optional[int] = None):
|
|
352
|
+
"""Initializes a discussion instance.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
lollmsClient: The LollmsClient instance used for generation and token counting.
|
|
356
|
+
db_manager: An optional LollmsDataManager for database persistence.
|
|
357
|
+
discussion_id: The ID of the discussion to load (if db_manager is provided).
|
|
358
|
+
db_discussion_obj: A pre-loaded ORM object to wrap.
|
|
359
|
+
autosave: If True, commits changes to the DB automatically after modifications.
|
|
360
|
+
max_context_size: The maximum number of tokens to allow in the context
|
|
361
|
+
before triggering automatic pruning.
|
|
362
|
+
"""
|
|
193
363
|
object.__setattr__(self, 'lollmsClient', lollmsClient)
|
|
194
364
|
object.__setattr__(self, 'db_manager', db_manager)
|
|
195
365
|
object.__setattr__(self, 'autosave', autosave)
|
|
196
366
|
object.__setattr__(self, 'max_context_size', max_context_size)
|
|
197
367
|
object.__setattr__(self, 'scratchpad', "")
|
|
198
|
-
object.__setattr__(self, 'show_thoughts', False)
|
|
199
|
-
object.__setattr__(self, 'include_thoughts_in_context', False)
|
|
200
|
-
object.__setattr__(self, 'thought_placeholder', "<thought process hidden>")
|
|
201
368
|
|
|
369
|
+
# Internal state
|
|
202
370
|
object.__setattr__(self, '_session', None)
|
|
203
371
|
object.__setattr__(self, '_db_discussion', None)
|
|
204
372
|
object.__setattr__(self, '_message_index', None)
|
|
@@ -220,21 +388,23 @@ class LollmsDiscussion:
|
|
|
220
388
|
raise ValueError(f"No discussion found with ID: {discussion_id}")
|
|
221
389
|
else:
|
|
222
390
|
self._create_in_memory_proxy(id=discussion_id)
|
|
391
|
+
|
|
223
392
|
self._rebuild_message_index()
|
|
224
|
-
|
|
225
|
-
@property
|
|
226
|
-
def remaining_tokens(self) -> Optional[int]:
|
|
227
|
-
"""Calculates the remaining tokens available in the context window."""
|
|
228
|
-
binding = self.lollmsClient.binding
|
|
229
|
-
if not binding or not hasattr(binding, 'ctx_size') or not binding.ctx_size:
|
|
230
|
-
return None
|
|
231
|
-
max_ctx = binding.ctx_size
|
|
232
|
-
current_prompt = self.format_discussion(max_ctx)
|
|
233
|
-
current_tokens = self.lollmsClient.count_tokens(current_prompt)
|
|
234
|
-
return max_ctx - current_tokens
|
|
235
393
|
|
|
236
394
|
@classmethod
|
|
237
395
|
def create_new(cls, lollms_client: 'LollmsClient', db_manager: Optional[LollmsDataManager] = None, **kwargs) -> 'LollmsDiscussion':
|
|
396
|
+
"""Creates a new discussion and persists it if a db_manager is provided.
|
|
397
|
+
|
|
398
|
+
This is the recommended factory method for creating new discussions.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
lollms_client: The LollmsClient instance to associate with the discussion.
|
|
402
|
+
db_manager: An optional LollmsDataManager to make the discussion persistent.
|
|
403
|
+
**kwargs: Attributes for the new discussion (e.g., id, title).
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
A new LollmsDiscussion instance.
|
|
407
|
+
"""
|
|
238
408
|
init_args = {
|
|
239
409
|
'autosave': kwargs.pop('autosave', False),
|
|
240
410
|
'max_context_size': kwargs.pop('max_context_size', None)
|
|
@@ -252,6 +422,7 @@ class LollmsDiscussion:
|
|
|
252
422
|
return cls(lollmsClient=lollms_client, discussion_id=kwargs.get('id'), **init_args)
|
|
253
423
|
|
|
254
424
|
def __getattr__(self, name: str) -> Any:
|
|
425
|
+
"""Proxies attribute getting to the underlying discussion object."""
|
|
255
426
|
if name == 'metadata':
|
|
256
427
|
return getattr(self._db_discussion, 'discussion_metadata', None)
|
|
257
428
|
if name == 'messages':
|
|
@@ -259,10 +430,10 @@ class LollmsDiscussion:
|
|
|
259
430
|
return getattr(self._db_discussion, name)
|
|
260
431
|
|
|
261
432
|
def __setattr__(self, name: str, value: Any):
|
|
433
|
+
"""Proxies attribute setting to the underlying discussion object."""
|
|
262
434
|
internal_attrs = [
|
|
263
|
-
'lollmsClient','db_manager','autosave','max_context_size','scratchpad',
|
|
264
|
-
'
|
|
265
|
-
'_session','_db_discussion','_message_index','_messages_to_delete_from_db', '_is_db_backed'
|
|
435
|
+
'lollmsClient', 'db_manager', 'autosave', 'max_context_size', 'scratchpad',
|
|
436
|
+
'_session', '_db_discussion', '_message_index', '_messages_to_delete_from_db', '_is_db_backed'
|
|
266
437
|
]
|
|
267
438
|
if name in internal_attrs:
|
|
268
439
|
object.__setattr__(self, name, value)
|
|
@@ -274,32 +445,47 @@ class LollmsDiscussion:
|
|
|
274
445
|
self.touch()
|
|
275
446
|
|
|
276
447
|
def _create_in_memory_proxy(self, id: Optional[str] = None):
|
|
448
|
+
"""Creates a SimpleNamespace object to mimic a DB record for in-memory discussions."""
|
|
277
449
|
proxy = SimpleNamespace()
|
|
278
|
-
proxy.id
|
|
279
|
-
proxy.
|
|
280
|
-
proxy.
|
|
450
|
+
proxy.id = id or str(uuid.uuid4())
|
|
451
|
+
proxy.system_prompt = None
|
|
452
|
+
proxy.participants = {}
|
|
453
|
+
proxy.active_branch_id = None
|
|
454
|
+
proxy.discussion_metadata = {}
|
|
455
|
+
proxy.created_at = datetime.utcnow()
|
|
456
|
+
proxy.updated_at = datetime.utcnow()
|
|
281
457
|
proxy.messages = []
|
|
458
|
+
proxy.pruning_summary = None
|
|
459
|
+
proxy.pruning_point_id = None
|
|
282
460
|
object.__setattr__(self, '_db_discussion', proxy)
|
|
283
461
|
|
|
284
462
|
def _rebuild_message_index(self):
|
|
463
|
+
"""Rebuilds the internal dictionary mapping message IDs to message objects."""
|
|
285
464
|
if self._is_db_backed and self._session.is_active and self._db_discussion in self._session:
|
|
286
465
|
self._session.refresh(self._db_discussion, ['messages'])
|
|
287
466
|
self._message_index = {msg.id: msg for msg in self._db_discussion.messages}
|
|
288
467
|
|
|
289
468
|
def touch(self):
|
|
469
|
+
"""Marks the discussion as updated and saves it if autosave is enabled."""
|
|
290
470
|
setattr(self._db_discussion, 'updated_at', datetime.utcnow())
|
|
291
471
|
if self._is_db_backed and self.autosave:
|
|
292
472
|
self.commit()
|
|
293
473
|
|
|
294
474
|
def commit(self):
|
|
475
|
+
"""Commits all pending changes to the database.
|
|
476
|
+
|
|
477
|
+
This includes new/modified discussion attributes and any pending message deletions.
|
|
478
|
+
"""
|
|
295
479
|
if not self._is_db_backed or not self._session:
|
|
296
480
|
return
|
|
481
|
+
|
|
297
482
|
if self._messages_to_delete_from_db:
|
|
298
483
|
for msg_id in self._messages_to_delete_from_db:
|
|
299
484
|
msg_to_del = self._session.get(self.db_manager.MessageModel, msg_id)
|
|
300
485
|
if msg_to_del:
|
|
301
486
|
self._session.delete(msg_to_del)
|
|
302
487
|
self._messages_to_delete_from_db.clear()
|
|
488
|
+
|
|
303
489
|
try:
|
|
304
490
|
self._session.commit()
|
|
305
491
|
self._rebuild_message_index()
|
|
@@ -308,15 +494,34 @@ class LollmsDiscussion:
|
|
|
308
494
|
raise e
|
|
309
495
|
|
|
310
496
|
def close(self):
|
|
497
|
+
"""Commits any final changes and closes the database session."""
|
|
311
498
|
if self._session:
|
|
312
499
|
self.commit()
|
|
313
500
|
self._session.close()
|
|
314
501
|
|
|
315
502
|
def add_message(self, **kwargs) -> LollmsMessage:
|
|
316
|
-
|
|
317
|
-
|
|
503
|
+
"""Adds a new message to the discussion.
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
**kwargs: Attributes for the new message (e.g., sender, content, parent_id).
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
The newly created LollmsMessage instance.
|
|
510
|
+
"""
|
|
511
|
+
msg_id = kwargs.get('id', str(uuid.uuid4()))
|
|
512
|
+
parent_id = kwargs.get('parent_id', self.active_branch_id)
|
|
513
|
+
|
|
514
|
+
message_data = {
|
|
515
|
+
'id': msg_id,
|
|
516
|
+
'parent_id': parent_id,
|
|
517
|
+
'discussion_id': self.id,
|
|
518
|
+
'created_at': datetime.utcnow(),
|
|
519
|
+
**kwargs
|
|
520
|
+
}
|
|
521
|
+
|
|
318
522
|
if 'metadata' in message_data:
|
|
319
523
|
message_data['message_metadata'] = message_data.pop('metadata')
|
|
524
|
+
|
|
320
525
|
if self._is_db_backed:
|
|
321
526
|
valid_keys = {c.name for c in self.db_manager.MessageModel.__table__.columns}
|
|
322
527
|
filtered_data = {k: v for k, v in message_data.items() if k in valid_keys}
|
|
@@ -327,338 +532,438 @@ class LollmsDiscussion:
|
|
|
327
532
|
else:
|
|
328
533
|
new_msg_orm = SimpleNamespace(**message_data)
|
|
329
534
|
self._db_discussion.messages.append(new_msg_orm)
|
|
330
|
-
|
|
535
|
+
|
|
536
|
+
self._message_index[msg_id] = new_msg_orm
|
|
537
|
+
self.active_branch_id = msg_id
|
|
331
538
|
self.touch()
|
|
332
539
|
return LollmsMessage(self, new_msg_orm)
|
|
333
540
|
|
|
334
541
|
def get_branch(self, leaf_id: Optional[str]) -> List[LollmsMessage]:
|
|
542
|
+
"""Traces a branch of the conversation from a leaf message back to the root.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
leaf_id: The ID of the message at the end of the branch.
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
A list of LollmsMessage objects, ordered from the root to the leaf.
|
|
549
|
+
"""
|
|
335
550
|
if not leaf_id:
|
|
336
551
|
return []
|
|
337
|
-
|
|
552
|
+
|
|
553
|
+
branch_orms = []
|
|
554
|
+
current_id = leaf_id
|
|
338
555
|
while current_id and current_id in self._message_index:
|
|
339
556
|
msg_orm = self._message_index[current_id]
|
|
340
557
|
branch_orms.append(msg_orm)
|
|
341
558
|
current_id = msg_orm.parent_id
|
|
559
|
+
|
|
342
560
|
return [LollmsMessage(self, orm) for orm in reversed(branch_orms)]
|
|
343
561
|
|
|
344
|
-
|
|
562
|
+
|
|
563
|
+
def chat(
|
|
564
|
+
self,
|
|
565
|
+
user_message: str,
|
|
566
|
+
personality: Optional['LollmsPersonality'] = None,
|
|
567
|
+
use_mcps: Union[None, bool, List[str]] = None,
|
|
568
|
+
use_data_store: Union[None, Dict[str, Callable]] = None,
|
|
569
|
+
add_user_message: bool = True,
|
|
570
|
+
max_reasoning_steps: int = 10,
|
|
571
|
+
images: Optional[List[str]] = None,
|
|
572
|
+
**kwargs
|
|
573
|
+
) -> Dict[str, 'LollmsMessage']:
|
|
574
|
+
"""Main interaction method that can invoke the dynamic, multi-modal agent.
|
|
575
|
+
|
|
576
|
+
This method orchestrates the entire response generation process. It can
|
|
577
|
+
trigger a simple, direct chat with the language model, or it can invoke
|
|
578
|
+
the powerful `generate_with_mcp_rag` agent.
|
|
579
|
+
|
|
580
|
+
When an agentic turn is used, the agent's full reasoning process (the
|
|
581
|
+
`final_scratchpad`), tool calls, and any retrieved RAG sources are
|
|
582
|
+
automatically stored in the resulting AI message object for full persistence
|
|
583
|
+
and auditability. It also handles clarification requests from the agent.
|
|
584
|
+
|
|
585
|
+
Args:
|
|
586
|
+
user_message: The new message from the user.
|
|
587
|
+
personality: An optional LollmsPersonality to use for the response,
|
|
588
|
+
which can influence system prompts and other behaviors.
|
|
589
|
+
use_mcps: Controls MCP tool usage for the agent. Can be None (disabled),
|
|
590
|
+
True (all tools), or a list of specific tool names.
|
|
591
|
+
use_data_store: Controls RAG usage for the agent. A dictionary mapping
|
|
592
|
+
store names to their query callables.
|
|
593
|
+
add_user_message: If True, a new user message is created from the prompt.
|
|
594
|
+
If False, it assumes regeneration on the current active
|
|
595
|
+
user message.
|
|
596
|
+
max_reasoning_steps: The maximum number of reasoning cycles for the agent
|
|
597
|
+
before it must provide a final answer.
|
|
598
|
+
images: A list of base64-encoded images provided by the user, which will
|
|
599
|
+
be passed to the agent or a multi-modal LLM.
|
|
600
|
+
**kwargs: Additional keyword arguments passed to the underlying generation
|
|
601
|
+
methods, such as 'streaming_callback'.
|
|
602
|
+
|
|
603
|
+
Returns:
|
|
604
|
+
A dictionary with 'user_message' and 'ai_message' LollmsMessage objects,
|
|
605
|
+
where the 'ai_message' will contain rich metadata if an agentic turn was used.
|
|
606
|
+
"""
|
|
345
607
|
if self.max_context_size is not None:
|
|
346
608
|
self.summarize_and_prune(self.max_context_size)
|
|
347
609
|
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
610
|
+
# Step 1: Add user message, now including any images.
|
|
611
|
+
if add_user_message:
|
|
612
|
+
# Pass kwargs through to capture images and other potential message attributes
|
|
613
|
+
user_msg = self.add_message(
|
|
614
|
+
sender="user",
|
|
615
|
+
sender_type="user",
|
|
616
|
+
content=user_message,
|
|
617
|
+
images=images,
|
|
618
|
+
**kwargs # Use kwargs to allow other fields to be set from the caller
|
|
619
|
+
)
|
|
620
|
+
else: # Regeneration logic
|
|
621
|
+
if self.active_branch_id not in self._message_index:
|
|
622
|
+
raise ValueError("Regeneration failed: active branch tip not found or is invalid.")
|
|
623
|
+
user_msg_orm = self._message_index[self.active_branch_id]
|
|
624
|
+
if user_msg_orm.sender_type != 'user':
|
|
625
|
+
raise ValueError(f"Regeneration failed: active branch tip is a '{user_msg_orm.sender_type}' message, not 'user'.")
|
|
626
|
+
user_msg = LollmsMessage(self, user_msg_orm)
|
|
627
|
+
# For regeneration, we use the images from the original user message
|
|
628
|
+
images = user_msg.images
|
|
629
|
+
|
|
630
|
+
# Step 2: Determine if this is a simple chat or a complex agentic turn.
|
|
631
|
+
is_agentic_turn = (use_mcps is not None and use_mcps) or (use_data_store is not None and use_data_store)
|
|
357
632
|
|
|
358
|
-
|
|
359
|
-
self.system_prompt = f"{original_system_prompt or ''}\n\n--- Relevant Information ---\n{rag_context}\n---"
|
|
360
|
-
|
|
361
|
-
from lollms_client.lollms_types import MSG_TYPE
|
|
362
|
-
is_streaming = "streaming_callback" in kwargs and kwargs.get("streaming_callback") is not None
|
|
633
|
+
start_time = datetime.now()
|
|
363
634
|
|
|
635
|
+
agent_result = None
|
|
636
|
+
final_scratchpad = None
|
|
364
637
|
final_raw_response = ""
|
|
365
|
-
|
|
638
|
+
final_content = ""
|
|
639
|
+
|
|
640
|
+
# Step 3: Execute the appropriate generation logic.
|
|
641
|
+
if is_agentic_turn:
|
|
642
|
+
# --- AGENTIC TURN ---
|
|
643
|
+
agent_result = self.lollmsClient.generate_with_mcp_rag(
|
|
644
|
+
prompt=user_message,
|
|
645
|
+
use_mcps=use_mcps,
|
|
646
|
+
use_data_store=use_data_store,
|
|
647
|
+
max_reasoning_steps=max_reasoning_steps,
|
|
648
|
+
images=images,
|
|
649
|
+
**kwargs
|
|
650
|
+
)
|
|
651
|
+
final_content = agent_result.get("final_answer", "The agent did not produce a final answer.")
|
|
652
|
+
final_scratchpad = agent_result.get("final_scratchpad", "")
|
|
653
|
+
final_raw_response = json.dumps(agent_result, indent=2)
|
|
366
654
|
|
|
367
|
-
if personality and personality.script_module and hasattr(personality.script_module, 'run'):
|
|
368
|
-
try:
|
|
369
|
-
print(f"[{personality.name}] Running custom script...")
|
|
370
|
-
final_raw_response = personality.script_module.run(self, kwargs.get("streaming_callback"))
|
|
371
|
-
except Exception as e:
|
|
372
|
-
print(f"[{personality.name}] Error in custom script: {e}")
|
|
373
|
-
final_raw_response = f"Error executing personality script: {e}"
|
|
374
655
|
else:
|
|
375
|
-
|
|
376
|
-
if
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
nonlocal token_buffer, in_thought_block
|
|
381
|
-
raw_response_accumulator.append(token)
|
|
382
|
-
continue_streaming = True
|
|
383
|
-
if token: token_buffer += token
|
|
384
|
-
while True:
|
|
385
|
-
if in_thought_block:
|
|
386
|
-
end_tag_pos = token_buffer.find("</think>")
|
|
387
|
-
if end_tag_pos != -1:
|
|
388
|
-
thought_chunk = token_buffer[:end_tag_pos]
|
|
389
|
-
if self.show_thoughts and original_callback and thought_chunk:
|
|
390
|
-
if not original_callback(thought_chunk, MSG_TYPE.MSG_TYPE_THOUGHT_CHUNK): continue_streaming = False
|
|
391
|
-
in_thought_block, token_buffer = False, token_buffer[end_tag_pos + len("</think>"):]
|
|
392
|
-
else:
|
|
393
|
-
if self.show_thoughts and original_callback and token_buffer:
|
|
394
|
-
if not original_callback(token_buffer, MSG_TYPE.MSG_TYPE_THOUGHT_CHUNK): continue_streaming = False
|
|
395
|
-
token_buffer = ""; break
|
|
396
|
-
else:
|
|
397
|
-
start_tag_pos = token_buffer.find("<think>")
|
|
398
|
-
if start_tag_pos != -1:
|
|
399
|
-
response_chunk = token_buffer[:start_tag_pos]
|
|
400
|
-
if response_chunk:
|
|
401
|
-
full_response_parts.append(response_chunk)
|
|
402
|
-
if original_callback:
|
|
403
|
-
if not original_callback(response_chunk, MSG_TYPE.MSG_TYPE_CHUNK): continue_streaming = False
|
|
404
|
-
in_thought_block, token_buffer = True, token_buffer[start_tag_pos + len("<think>"):]
|
|
405
|
-
else:
|
|
406
|
-
if token_buffer:
|
|
407
|
-
full_response_parts.append(token_buffer)
|
|
408
|
-
if original_callback:
|
|
409
|
-
if not original_callback(token_buffer, MSG_TYPE.MSG_TYPE_CHUNK): continue_streaming = False
|
|
410
|
-
token_buffer = ""; break
|
|
411
|
-
return continue_streaming
|
|
412
|
-
kwargs["streaming_callback"], kwargs["stream"] = accumulating_callback, True
|
|
413
|
-
self.lollmsClient.chat(self, **kwargs)
|
|
414
|
-
final_raw_response = "".join(raw_response_accumulator)
|
|
415
|
-
else:
|
|
416
|
-
kwargs["stream"] = False
|
|
417
|
-
final_raw_response = self.lollmsClient.chat(self, **kwargs) or ""
|
|
656
|
+
# --- SIMPLE CHAT TURN ---
|
|
657
|
+
# For simple chat, we also need to consider images if the model is multi-modal
|
|
658
|
+
final_raw_response = self.lollmsClient.chat(self, images=images, **kwargs) or ""
|
|
659
|
+
final_content = self.lollmsClient.remove_thinking_blocks(final_raw_response)
|
|
660
|
+
final_scratchpad = None # No agentic scratchpad in a simple turn
|
|
418
661
|
|
|
662
|
+
# Step 4: Post-generation processing and statistics.
|
|
419
663
|
end_time = datetime.now()
|
|
420
|
-
if rag_context:
|
|
421
|
-
self.system_prompt = original_system_prompt
|
|
422
|
-
|
|
423
664
|
duration = (end_time - start_time).total_seconds()
|
|
424
|
-
thoughts_match = re.search(r"<think>(.*?)</think>", final_raw_response, re.DOTALL)
|
|
425
|
-
thoughts_text = thoughts_match.group(1).strip() if thoughts_match else None
|
|
426
|
-
final_content = self.lollmsClient.remove_thinking_blocks(final_raw_response)
|
|
427
665
|
token_count = self.lollmsClient.count_tokens(final_content)
|
|
428
666
|
tok_per_sec = (token_count / duration) if duration > 0 else 0
|
|
429
|
-
|
|
430
|
-
ai_message_obj = self.add_message(
|
|
431
|
-
sender="assistant", sender_type="assistant", content=final_content,
|
|
432
|
-
raw_content=final_raw_response, thoughts=thoughts_text, tokens=token_count,
|
|
433
|
-
binding_name=self.lollmsClient.binding.binding_name, model_name=self.lollmsClient.binding.model_name,
|
|
434
|
-
generation_speed=tok_per_sec
|
|
435
|
-
)
|
|
436
667
|
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
prompt = f"""The user wants to know: "{user_prompt}"\nHere is the first part of the document (chunk 1 of {total_chunks}). \nRead it and create a detailed summary of all information relevant to the user's prompt.\n\nDOCUMENT CHUNK:\n---\n{chunk}\n---\nSUMMARY:"""
|
|
449
|
-
else:
|
|
450
|
-
prompt = f"""The user wants to know: "{user_prompt}"\nYou are processing a large document sequentially. Here is the summary of the previous chunks and the content of the next chunk ({i+1} of {total_chunks}).\nUpdate your summary by integrating new relevant information from the new chunk. Do not repeat information you already have. Output ONLY the new, updated, complete summary.\n\nPREVIOUS SUMMARY:\n---\n{current_summary}\n---\n\nNEW DOCUMENT CHUNK:\n---\n{chunk}\n---\nUPDATED SUMMARY:"""
|
|
451
|
-
current_summary = self.lollmsClient.generate_text(prompt, **kwargs).strip()
|
|
452
|
-
final_prompt = f"""Based on the following comprehensive summary of a document, provide a final answer to the user's original prompt.\nUser's prompt: "{user_prompt}"\n\nCOMPREHENSIVE SUMMARY:\n---\n{current_summary}\n---\nFINAL ANSWER:"""
|
|
453
|
-
final_answer = self.lollmsClient.generate_text(final_prompt, **kwargs).strip()
|
|
668
|
+
# Step 5: Collect metadata from the agentic turn for storage.
|
|
669
|
+
message_meta = {}
|
|
670
|
+
if is_agentic_turn and isinstance(agent_result, dict):
|
|
671
|
+
if "tool_calls" in agent_result:
|
|
672
|
+
message_meta["tool_calls"] = agent_result["tool_calls"]
|
|
673
|
+
if "sources" in agent_result:
|
|
674
|
+
message_meta["sources"] = agent_result["sources"]
|
|
675
|
+
if agent_result.get("clarification_required", False):
|
|
676
|
+
message_meta["clarification_required"] = True
|
|
677
|
+
|
|
678
|
+
# Step 6: Add the final AI message to the discussion.
|
|
454
679
|
ai_message_obj = self.add_message(
|
|
455
|
-
sender=
|
|
456
|
-
|
|
680
|
+
sender=personality.name if personality else "assistant",
|
|
681
|
+
sender_type="assistant",
|
|
682
|
+
content=final_content,
|
|
683
|
+
raw_content=final_raw_response,
|
|
684
|
+
# Store the agent's full reasoning log in the message's dedicated scratchpad field
|
|
685
|
+
scratchpad=final_scratchpad,
|
|
686
|
+
tokens=token_count,
|
|
687
|
+
generation_speed=tok_per_sec,
|
|
688
|
+
parent_id=user_msg.id,
|
|
689
|
+
metadata=message_meta
|
|
457
690
|
)
|
|
458
|
-
|
|
691
|
+
|
|
692
|
+
if self._is_db_backed and self.autosave:
|
|
459
693
|
self.commit()
|
|
460
|
-
|
|
694
|
+
|
|
695
|
+
return {"user_message": user_msg, "ai_message": ai_message_obj}
|
|
461
696
|
|
|
462
|
-
def regenerate_branch(self, **kwargs) -> LollmsMessage:
|
|
697
|
+
def regenerate_branch(self, **kwargs) -> Dict[str, 'LollmsMessage']:
|
|
698
|
+
"""Regenerates the last AI response in the active branch.
|
|
699
|
+
|
|
700
|
+
It deletes the previous AI response and calls chat() again with the
|
|
701
|
+
same user prompt.
|
|
702
|
+
|
|
703
|
+
Args:
|
|
704
|
+
**kwargs: Additional arguments for the chat() method.
|
|
705
|
+
|
|
706
|
+
Returns:
|
|
707
|
+
A dictionary with the user and the newly generated AI message.
|
|
708
|
+
"""
|
|
463
709
|
if not self.active_branch_id or self.active_branch_id not in self._message_index:
|
|
464
710
|
raise ValueError("No active message to regenerate from.")
|
|
711
|
+
|
|
465
712
|
last_message_orm = self._message_index[self.active_branch_id]
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
713
|
+
|
|
714
|
+
if last_message_orm.sender_type == 'assistant':
|
|
715
|
+
parent_id = last_message_orm.parent_id
|
|
716
|
+
if not parent_id:
|
|
717
|
+
raise ValueError("Cannot regenerate from an assistant message with no parent.")
|
|
718
|
+
|
|
719
|
+
last_message_id = last_message_orm.id
|
|
720
|
+
self._db_discussion.messages.remove(last_message_orm)
|
|
721
|
+
del self._message_index[last_message_id]
|
|
722
|
+
if self._is_db_backed:
|
|
723
|
+
self._messages_to_delete_from_db.add(last_message_id)
|
|
724
|
+
|
|
725
|
+
self.active_branch_id = parent_id
|
|
726
|
+
self.touch()
|
|
476
727
|
|
|
728
|
+
prompt_to_regenerate = self._message_index[self.active_branch_id].content
|
|
729
|
+
return self.chat(user_message=prompt_to_regenerate, add_user_message=False, **kwargs)
|
|
477
730
|
def delete_branch(self, message_id: str):
|
|
731
|
+
"""Deletes a message and its entire descendant branch.
|
|
732
|
+
|
|
733
|
+
This method removes the specified message and any messages that have it
|
|
734
|
+
as a parent or an ancestor. After deletion, the active branch is moved
|
|
735
|
+
to the parent of the deleted message.
|
|
736
|
+
|
|
737
|
+
This operation is only supported for database-backed discussions.
|
|
738
|
+
|
|
739
|
+
Args:
|
|
740
|
+
message_id: The ID of the message at the root of the branch to be deleted.
|
|
741
|
+
|
|
742
|
+
Raises:
|
|
743
|
+
NotImplementedError: If the discussion is not database-backed.
|
|
744
|
+
ValueError: If the message ID is not found in the discussion.
|
|
745
|
+
"""
|
|
478
746
|
if not self._is_db_backed:
|
|
479
747
|
raise NotImplementedError("Branch deletion is only supported for database-backed discussions.")
|
|
748
|
+
|
|
480
749
|
if message_id not in self._message_index:
|
|
481
|
-
raise ValueError("Message not found.")
|
|
482
|
-
msg_to_delete = self._session.query(self.db_manager.MessageModel).filter_by(id=message_id).first()
|
|
483
|
-
if msg_to_delete:
|
|
484
|
-
self.active_branch_id = msg_to_delete.parent_id
|
|
485
|
-
self._session.delete(msg_to_delete)
|
|
486
|
-
self.commit()
|
|
750
|
+
raise ValueError(f"Message with ID '{message_id}' not found in the discussion.")
|
|
487
751
|
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
752
|
+
# --- 1. Identify all messages to delete ---
|
|
753
|
+
# We start with the target message and find all of its descendants.
|
|
754
|
+
messages_to_delete_ids = set()
|
|
755
|
+
queue = [message_id] # A queue for breadth-first search of descendants
|
|
756
|
+
|
|
757
|
+
while queue:
|
|
758
|
+
current_id = queue.pop(0)
|
|
759
|
+
if current_id in messages_to_delete_ids:
|
|
760
|
+
continue # Already processed
|
|
761
|
+
|
|
762
|
+
messages_to_delete_ids.add(current_id)
|
|
493
763
|
|
|
494
|
-
|
|
495
|
-
|
|
764
|
+
# Find all direct children of the current message
|
|
765
|
+
children = [msg.id for msg in self._db_discussion.messages if msg.parent_id == current_id]
|
|
766
|
+
queue.extend(children)
|
|
767
|
+
|
|
768
|
+
# --- 2. Get the parent of the starting message to reset the active branch ---
|
|
769
|
+
original_message_orm = self._message_index[message_id]
|
|
770
|
+
new_active_branch_id = original_message_orm.parent_id
|
|
771
|
+
|
|
772
|
+
# --- 3. Perform the deletion ---
|
|
773
|
+
# Remove from the ORM object's list
|
|
774
|
+
self._db_discussion.messages = [
|
|
775
|
+
msg for msg in self._db_discussion.messages if msg.id not in messages_to_delete_ids
|
|
776
|
+
]
|
|
777
|
+
|
|
778
|
+
# Remove from the quick-access index
|
|
779
|
+
for mid in messages_to_delete_ids:
|
|
780
|
+
if mid in self._message_index:
|
|
781
|
+
del self._message_index[mid]
|
|
782
|
+
|
|
783
|
+
# Add to the set of messages to be deleted from the DB on next commit
|
|
784
|
+
self._messages_to_delete_from_db.update(messages_to_delete_ids)
|
|
496
785
|
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
if self.
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
return "\n\n".join(parts) if parts else None
|
|
786
|
+
# --- 4. Update the active branch ---
|
|
787
|
+
# If we deleted the branch that was active, move to its parent.
|
|
788
|
+
if self.active_branch_id in messages_to_delete_ids:
|
|
789
|
+
self.active_branch_id = new_active_branch_id
|
|
790
|
+
|
|
791
|
+
self.touch() # Mark discussion as updated and save if autosave is on
|
|
504
792
|
|
|
793
|
+
print(f"Marked branch starting at {message_id} ({len(messages_to_delete_ids)} messages) for deletion.")
|
|
794
|
+
|
|
505
795
|
def export(self, format_type: str, branch_tip_id: Optional[str] = None, max_allowed_tokens: Optional[int] = None) -> Union[List[Dict], str]:
|
|
796
|
+
"""Exports the discussion history into a specified format.
|
|
797
|
+
|
|
798
|
+
This method can format the conversation for different backends like OpenAI,
|
|
799
|
+
Ollama, or the native `lollms_text` format. It intelligently handles
|
|
800
|
+
context limits and non-destructive pruning summaries.
|
|
801
|
+
|
|
802
|
+
Args:
|
|
803
|
+
format_type: The target format. Can be "lollms_text", "openai_chat",
|
|
804
|
+
or "ollama_chat".
|
|
805
|
+
branch_tip_id: The ID of the message to use as the end of the context.
|
|
806
|
+
Defaults to the active branch ID.
|
|
807
|
+
max_allowed_tokens: The maximum number of tokens the final prompt can contain.
|
|
808
|
+
This is primarily used by "lollms_text".
|
|
809
|
+
|
|
810
|
+
Returns:
|
|
811
|
+
A string for "lollms_text" or a list of dictionaries for "openai_chat"
|
|
812
|
+
and "ollama_chat".
|
|
813
|
+
|
|
814
|
+
Raises:
|
|
815
|
+
ValueError: If an unsupported format_type is provided.
|
|
816
|
+
"""
|
|
506
817
|
branch_tip_id = branch_tip_id or self.active_branch_id
|
|
507
818
|
if not branch_tip_id and format_type in ["lollms_text", "openai_chat", "ollama_chat"]:
|
|
508
819
|
return "" if format_type == "lollms_text" else []
|
|
509
|
-
|
|
820
|
+
|
|
821
|
+
branch = self.get_branch(branch_tip_id)
|
|
822
|
+
full_system_prompt = self.system_prompt # Simplified for clarity
|
|
823
|
+
participants = self.participants or {}
|
|
510
824
|
|
|
511
|
-
def get_full_content(msg: LollmsMessage) -> str:
|
|
825
|
+
def get_full_content(msg: 'LollmsMessage') -> str:
|
|
512
826
|
content_to_use = msg.content
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
content_to_use = re.sub(r"<think>.*?</think>", f"<think>{self.thought_placeholder}</think>", msg.raw_content, flags=re.DOTALL)
|
|
516
|
-
else:
|
|
517
|
-
content_to_use = msg.raw_content
|
|
518
|
-
|
|
519
|
-
parts = [f"--- Internal Scratchpad ---\n{msg.scratchpad.strip()}\n---"] if msg.scratchpad and msg.scratchpad.strip() else []
|
|
520
|
-
parts.append(content_to_use.strip())
|
|
521
|
-
return "\n".join(parts)
|
|
827
|
+
# You can expand this logic to include thoughts, scratchpads etc. based on settings
|
|
828
|
+
return content_to_use.strip()
|
|
522
829
|
|
|
830
|
+
# --- NATIVE LOLLMS_TEXT FORMAT ---
|
|
523
831
|
if format_type == "lollms_text":
|
|
524
|
-
|
|
832
|
+
# --- FIX STARTS HERE ---
|
|
833
|
+
final_prompt_parts = []
|
|
834
|
+
message_parts = [] # Temporary list for correctly ordered messages
|
|
835
|
+
|
|
836
|
+
current_tokens = 0
|
|
837
|
+
messages_to_render = branch
|
|
838
|
+
|
|
839
|
+
# 1. Handle non-destructive pruning summary
|
|
840
|
+
summary_text = ""
|
|
841
|
+
if self.pruning_summary and self.pruning_point_id:
|
|
842
|
+
pruning_index = -1
|
|
843
|
+
for i, msg in enumerate(branch):
|
|
844
|
+
if msg.id == self.pruning_point_id:
|
|
845
|
+
pruning_index = i
|
|
846
|
+
break
|
|
847
|
+
if pruning_index != -1:
|
|
848
|
+
messages_to_render = branch[pruning_index:]
|
|
849
|
+
summary_text = f"!@>system:\n--- Conversation Summary ---\n{self.pruning_summary.strip()}\n"
|
|
850
|
+
|
|
851
|
+
# 2. Add main system prompt to the final list
|
|
852
|
+
sys_msg_text = ""
|
|
525
853
|
if full_system_prompt:
|
|
526
|
-
sys_msg_text = f"!@>system:\n{full_system_prompt}\n"
|
|
854
|
+
sys_msg_text = f"!@>system:\n{full_system_prompt.strip()}\n"
|
|
527
855
|
sys_tokens = self.lollmsClient.count_tokens(sys_msg_text)
|
|
528
856
|
if max_allowed_tokens is None or sys_tokens <= max_allowed_tokens:
|
|
529
|
-
|
|
857
|
+
final_prompt_parts.append(sys_msg_text)
|
|
530
858
|
current_tokens += sys_tokens
|
|
531
|
-
|
|
859
|
+
|
|
860
|
+
# 3. Add pruning summary (if it exists) to the final list
|
|
861
|
+
if summary_text:
|
|
862
|
+
summary_tokens = self.lollmsClient.count_tokens(summary_text)
|
|
863
|
+
if max_allowed_tokens is None or current_tokens + summary_tokens <= max_allowed_tokens:
|
|
864
|
+
final_prompt_parts.append(summary_text)
|
|
865
|
+
current_tokens += summary_tokens
|
|
866
|
+
|
|
867
|
+
# 4. Build the message list in correct order, respecting token limits
|
|
868
|
+
for msg in reversed(messages_to_render):
|
|
532
869
|
sender_str = msg.sender.replace(':', '').replace('!@>', '')
|
|
533
870
|
content = get_full_content(msg)
|
|
534
871
|
if msg.images:
|
|
535
872
|
content += f"\n({len(msg.images)} image(s) attached)"
|
|
536
873
|
msg_text = f"!@>{sender_str}:\n{content}\n"
|
|
537
874
|
msg_tokens = self.lollmsClient.count_tokens(msg_text)
|
|
875
|
+
|
|
538
876
|
if max_allowed_tokens is not None and current_tokens + msg_tokens > max_allowed_tokens:
|
|
539
877
|
break
|
|
540
|
-
|
|
878
|
+
|
|
879
|
+
# Always insert at the beginning of the temporary list
|
|
880
|
+
message_parts.insert(0, msg_text)
|
|
541
881
|
current_tokens += msg_tokens
|
|
542
|
-
|
|
882
|
+
|
|
883
|
+
# 5. Combine system/summary prompts with the message parts
|
|
884
|
+
final_prompt_parts.extend(message_parts)
|
|
885
|
+
return "".join(final_prompt_parts).strip()
|
|
886
|
+
# --- FIX ENDS HERE ---
|
|
543
887
|
|
|
888
|
+
# --- OPENAI & OLLAMA CHAT FORMATS (remains the same and is correct) ---
|
|
544
889
|
messages = []
|
|
545
890
|
if full_system_prompt:
|
|
546
891
|
messages.append({"role": "system", "content": full_system_prompt})
|
|
892
|
+
|
|
547
893
|
for msg in branch:
|
|
548
|
-
|
|
894
|
+
if msg.sender_type == 'user':
|
|
895
|
+
role = participants.get(msg.sender, "user")
|
|
896
|
+
else:
|
|
897
|
+
role = participants.get(msg.sender, "assistant")
|
|
898
|
+
|
|
899
|
+
content, images = get_full_content(msg), msg.images or []
|
|
900
|
+
|
|
549
901
|
if format_type == "openai_chat":
|
|
550
902
|
if images:
|
|
551
903
|
content_parts = [{"type": "text", "text": content}] if content else []
|
|
552
904
|
for img in images:
|
|
553
|
-
|
|
905
|
+
img_data = img['data']
|
|
906
|
+
url = f"data:image/jpeg;base64,{img_data}" if img['type'] == 'base64' else img_data
|
|
907
|
+
content_parts.append({"type": "image_url", "image_url": {"url": url, "detail": "auto"}})
|
|
554
908
|
messages.append({"role": role, "content": content_parts})
|
|
555
909
|
else:
|
|
556
910
|
messages.append({"role": role, "content": content})
|
|
911
|
+
|
|
557
912
|
elif format_type == "ollama_chat":
|
|
558
913
|
message_dict = {"role": role, "content": content}
|
|
559
914
|
base64_images = [img['data'] for img in images if img['type'] == 'base64']
|
|
560
915
|
if base64_images:
|
|
561
916
|
message_dict["images"] = base64_images
|
|
562
917
|
messages.append(message_dict)
|
|
918
|
+
|
|
563
919
|
else:
|
|
564
920
|
raise ValueError(f"Unsupported export format_type: {format_type}")
|
|
921
|
+
|
|
565
922
|
return messages
|
|
923
|
+
|
|
566
924
|
|
|
567
925
|
def summarize_and_prune(self, max_tokens: int, preserve_last_n: int = 4):
|
|
926
|
+
"""Non-destructively prunes the discussion by summarizing older messages.
|
|
927
|
+
|
|
928
|
+
This method does NOT delete messages. Instead, it generates a summary of
|
|
929
|
+
the older parts of the conversation and bookmarks the point from which
|
|
930
|
+
the full conversation should resume. The `export()` method then uses this
|
|
931
|
+
information to build a context-window-friendly prompt.
|
|
932
|
+
|
|
933
|
+
Args:
|
|
934
|
+
max_tokens: The token limit that triggers the pruning process.
|
|
935
|
+
preserve_last_n: The number of recent messages to keep in full detail.
|
|
936
|
+
"""
|
|
568
937
|
branch_tip_id = self.active_branch_id
|
|
569
938
|
if not branch_tip_id:
|
|
570
939
|
return
|
|
571
|
-
|
|
940
|
+
|
|
941
|
+
current_formatted_text = self.export("lollms_text", branch_tip_id, 999999)
|
|
942
|
+
current_tokens = self.lollmsClient.count_tokens(current_formatted_text)
|
|
943
|
+
|
|
572
944
|
if current_tokens <= max_tokens:
|
|
573
945
|
return
|
|
946
|
+
|
|
574
947
|
branch = self.get_branch(branch_tip_id)
|
|
575
948
|
if len(branch) <= preserve_last_n:
|
|
576
949
|
return
|
|
950
|
+
|
|
577
951
|
messages_to_prune = branch[:-preserve_last_n]
|
|
952
|
+
pruning_point_message = branch[-preserve_last_n]
|
|
953
|
+
|
|
578
954
|
text_to_summarize = "\n\n".join([f"{m.sender}: {m.content}" for m in messages_to_prune])
|
|
579
|
-
summary_prompt = f"Concisely summarize this conversation excerpt:\n---\n{text_to_summarize}\n---\nSUMMARY:"
|
|
955
|
+
summary_prompt = f"Concisely summarize this conversation excerpt, capturing all key facts, questions, and decisions:\n---\n{text_to_summarize}\n---\nSUMMARY:"
|
|
956
|
+
|
|
580
957
|
try:
|
|
581
|
-
|
|
958
|
+
print("\n[INFO] Context window is full. Summarizing older messages...")
|
|
959
|
+
summary = self.lollmsClient.generate_text(summary_prompt, n_predict=512, temperature=0.1)
|
|
582
960
|
except Exception as e:
|
|
583
961
|
print(f"\n[WARNING] Pruning failed, couldn't generate summary: {e}")
|
|
584
962
|
return
|
|
585
|
-
self.scratchpad = f"{self.scratchpad}\n\n--- Summary of earlier conversation ---\n{summary.strip()}".strip()
|
|
586
|
-
pruned_ids = {msg.id for msg in messages_to_prune}
|
|
587
|
-
if self._is_db_backed:
|
|
588
|
-
self._messages_to_delete_from_db.update(pruned_ids)
|
|
589
|
-
self._db_discussion.messages = [m for m in self._db_discussion.messages if m.id not in pruned_ids]
|
|
590
|
-
else:
|
|
591
|
-
self._db_discussion.messages = [m for m in self._db_discussion.messages if m.id not in pruned_ids]
|
|
592
|
-
self._rebuild_message_index()
|
|
593
|
-
self.touch()
|
|
594
|
-
print(f"\n[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized.")
|
|
595
|
-
|
|
596
|
-
def to_dict(self):
|
|
597
|
-
return {
|
|
598
|
-
"id": self.id, "system_prompt": self.system_prompt, "participants": self.participants,
|
|
599
|
-
"active_branch_id": self.active_branch_id, "metadata": self.metadata, "scratchpad": self.scratchpad,
|
|
600
|
-
"messages": [{ 'id': m.id, 'parent_id': m.parent_id, 'discussion_id': m.discussion_id, 'sender': m.sender,
|
|
601
|
-
'sender_type': m.sender_type, 'content': m.content, 'scratchpad': m.scratchpad, 'images': m.images,
|
|
602
|
-
'created_at': m.created_at.isoformat(), 'metadata': m.metadata } for m in self.messages],
|
|
603
|
-
"created_at": self.created_at.isoformat() if self.created_at else None,
|
|
604
|
-
"updated_at": self.updated_at.isoformat() if self.updated_at else None
|
|
605
|
-
}
|
|
606
963
|
|
|
607
|
-
|
|
608
|
-
self.
|
|
609
|
-
self.
|
|
610
|
-
|
|
611
|
-
self.
|
|
612
|
-
|
|
613
|
-
if 'created_at' in msg_data and isinstance(msg_data['created_at'], str):
|
|
614
|
-
try:
|
|
615
|
-
msg_data['created_at'] = datetime.fromisoformat(msg_data['created_at'])
|
|
616
|
-
except ValueError:
|
|
617
|
-
msg_data['created_at'] = datetime.utcnow()
|
|
618
|
-
self.add_message(**msg_data)
|
|
619
|
-
self.created_at = datetime.fromisoformat(data['created_at']) if data.get('created_at') else datetime.utcnow()
|
|
620
|
-
self.updated_at = datetime.fromisoformat(data['updated_at']) if data.get('updated_at') else self.created_at
|
|
621
|
-
|
|
622
|
-
@staticmethod
|
|
623
|
-
def migrate(lollms_client: 'LollmsClient', db_manager: LollmsDataManager, folder_path: Union[str, Path]):
|
|
624
|
-
folder = Path(folder_path)
|
|
625
|
-
if not folder.is_dir():
|
|
626
|
-
print(f"Error: Path '{folder}' is not a valid directory.")
|
|
627
|
-
return
|
|
628
|
-
print(f"\n--- Starting Migration from '{folder}' ---")
|
|
629
|
-
files = list(folder.glob("*.json")) + list(folder.glob("*.yaml"))
|
|
630
|
-
with db_manager.get_session() as session:
|
|
631
|
-
valid_disc_keys = {c.name for c in db_manager.DiscussionModel.__table__.columns}
|
|
632
|
-
valid_msg_keys = {c.name for c in db_manager.MessageModel.__table__.columns}
|
|
633
|
-
for i, file_path in enumerate(files):
|
|
634
|
-
print(f"Migrating file {i+1}/{len(files)}: {file_path.name} ... ", end="")
|
|
635
|
-
try:
|
|
636
|
-
data = yaml.safe_load(file_path.read_text(encoding='utf-8'))
|
|
637
|
-
discussion_id = data.get("id", str(uuid.uuid4()))
|
|
638
|
-
if session.query(db_manager.DiscussionModel).filter_by(id=discussion_id).first():
|
|
639
|
-
print("SKIPPED (already exists)")
|
|
640
|
-
continue
|
|
641
|
-
discussion_data = data.copy()
|
|
642
|
-
if 'metadata' in discussion_data:
|
|
643
|
-
discussion_data['discussion_metadata'] = discussion_data.pop('metadata')
|
|
644
|
-
for key in ['created_at', 'updated_at']:
|
|
645
|
-
if key in discussion_data and isinstance(discussion_data[key], str):
|
|
646
|
-
discussion_data[key] = datetime.fromisoformat(discussion_data[key])
|
|
647
|
-
db_discussion = db_manager.DiscussionModel(**{k: v for k, v in discussion_data.items() if k in valid_disc_keys})
|
|
648
|
-
session.add(db_discussion)
|
|
649
|
-
for msg_data in data.get("messages", []):
|
|
650
|
-
msg_data['discussion_id'] = db_discussion.id
|
|
651
|
-
if 'metadata' in msg_data:
|
|
652
|
-
msg_data['message_metadata'] = msg_data.pop('metadata')
|
|
653
|
-
if 'created_at' in msg_data and isinstance(msg_data['created_at'], str):
|
|
654
|
-
msg_data['created_at'] = datetime.fromisoformat(msg_data['created_at'])
|
|
655
|
-
msg_orm = db_manager.MessageModel(**{k: v for k, v in msg_data.items() if k in valid_msg_keys})
|
|
656
|
-
session.add(msg_orm)
|
|
657
|
-
session.flush()
|
|
658
|
-
print("OK")
|
|
659
|
-
except Exception as e:
|
|
660
|
-
print(f"FAILED. Error: {e}")
|
|
661
|
-
session.rollback()
|
|
662
|
-
continue
|
|
663
|
-
session.commit()
|
|
664
|
-
print("--- Migration Finished ---")
|
|
964
|
+
current_summary = self.pruning_summary or ""
|
|
965
|
+
self.pruning_summary = f"{current_summary}\n\n--- Summary of earlier conversation ---\n{summary.strip()}".strip()
|
|
966
|
+
self.pruning_point_id = pruning_point_message.id
|
|
967
|
+
|
|
968
|
+
self.touch()
|
|
969
|
+
print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
|