lollms-client 0.29.1__py3-none-any.whl → 0.29.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/llamacpp/__init__.py +5 -2
- lollms_client/lollms_core.py +35 -10
- lollms_client/lollms_discussion.py +86 -65
- {lollms_client-0.29.1.dist-info → lollms_client-0.29.3.dist-info}/METADATA +86 -34
- {lollms_client-0.29.1.dist-info → lollms_client-0.29.3.dist-info}/RECORD +9 -9
- {lollms_client-0.29.1.dist-info → lollms_client-0.29.3.dist-info}/WHEEL +0 -0
- {lollms_client-0.29.1.dist-info → lollms_client-0.29.3.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.29.1.dist-info → lollms_client-0.29.3.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "0.29.
|
|
11
|
+
__version__ = "0.29.3" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
|
@@ -352,8 +352,11 @@ class LlamaCppServerBinding(LollmsLLMBinding):
|
|
|
352
352
|
|
|
353
353
|
|
|
354
354
|
def load_model(self, model_name_or_path: str) -> bool:
|
|
355
|
-
|
|
356
|
-
|
|
355
|
+
try:
|
|
356
|
+
resolved_model_path = self._resolve_model_path(model_name_or_path)
|
|
357
|
+
except Exception as ex:
|
|
358
|
+
trace_exception(ex)
|
|
359
|
+
return False
|
|
357
360
|
# Determine the clip_model_path for this server instance
|
|
358
361
|
# Priority: 1. Explicit `clip_model_path` from init (if exists) 2. Auto-detection
|
|
359
362
|
final_clip_model_path: Optional[Path] = None
|
lollms_client/lollms_core.py
CHANGED
|
@@ -147,9 +147,6 @@ class LollmsClient():
|
|
|
147
147
|
available = self.binding_manager.get_available_bindings()
|
|
148
148
|
raise ValueError(f"Failed to create LLM binding: {binding_name}. Available: {available}")
|
|
149
149
|
|
|
150
|
-
# Determine the effective host address (use LLM binding's if initial was None)
|
|
151
|
-
effective_host_address = self.host_address
|
|
152
|
-
|
|
153
150
|
# --- Modality Binding Setup ---
|
|
154
151
|
self.tts_binding_manager = LollmsTTSBindingManager(tts_bindings_dir)
|
|
155
152
|
self.tti_binding_manager = LollmsTTIBindingManager(tti_bindings_dir)
|
|
@@ -2961,7 +2958,6 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
2961
2958
|
callback("Deep analysis complete.", MSG_TYPE.MSG_TYPE_STEP_END)
|
|
2962
2959
|
return final_output
|
|
2963
2960
|
|
|
2964
|
-
|
|
2965
2961
|
def summarize(
|
|
2966
2962
|
self,
|
|
2967
2963
|
text_to_summarize: str,
|
|
@@ -2990,6 +2986,7 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
2990
2986
|
is not lost at the boundaries. Defaults to 250.
|
|
2991
2987
|
streaming_callback (Optional[Callable], optional): A callback function to receive real-time updates
|
|
2992
2988
|
on the process (e.g., which chunk is being processed).
|
|
2989
|
+
It receives a message, a message type, and optional metadata.
|
|
2993
2990
|
Defaults to None.
|
|
2994
2991
|
**kwargs: Additional keyword arguments to be passed to the generation method (e.g., temperature, top_p).
|
|
2995
2992
|
|
|
@@ -3004,12 +3001,17 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3004
3001
|
|
|
3005
3002
|
if len(tokens) <= chunk_size_tokens:
|
|
3006
3003
|
if streaming_callback:
|
|
3007
|
-
streaming_callback("Text is short enough for a single summary.", MSG_TYPE.MSG_TYPE_STEP)
|
|
3004
|
+
streaming_callback("Text is short enough for a single summary.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 0})
|
|
3008
3005
|
|
|
3009
3006
|
prompt_objective = contextual_prompt or "Provide a comprehensive summary of the following text."
|
|
3010
3007
|
final_prompt = f"{prompt_objective}\n\n--- Text to Summarize ---\n{text_to_summarize}"
|
|
3011
3008
|
|
|
3012
|
-
|
|
3009
|
+
summary = self.generate_text(final_prompt, **kwargs)
|
|
3010
|
+
|
|
3011
|
+
if streaming_callback:
|
|
3012
|
+
streaming_callback("Summary generated.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 100})
|
|
3013
|
+
|
|
3014
|
+
return summary
|
|
3013
3015
|
|
|
3014
3016
|
# --- Stage 1: Chunking and Independent Summarization ---
|
|
3015
3017
|
chunks = []
|
|
@@ -3021,13 +3023,21 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3021
3023
|
|
|
3022
3024
|
chunk_summaries = []
|
|
3023
3025
|
|
|
3026
|
+
# Total steps include each chunk plus the final synthesis step
|
|
3027
|
+
total_steps = len(chunks) + 1
|
|
3028
|
+
|
|
3024
3029
|
# Define the prompt for summarizing each chunk
|
|
3025
3030
|
summarization_objective = contextual_prompt or "Summarize the key points of the following text excerpt."
|
|
3026
3031
|
chunk_summary_prompt_template = f"{summarization_objective}\n\n--- Text Excerpt ---\n{{chunk_text}}"
|
|
3027
3032
|
|
|
3028
3033
|
for i, chunk in enumerate(chunks):
|
|
3034
|
+
progress_before = (i / total_steps) * 100
|
|
3029
3035
|
if streaming_callback:
|
|
3030
|
-
streaming_callback(
|
|
3036
|
+
streaming_callback(
|
|
3037
|
+
f"Summarizing chunk {i + 1} of {len(chunks)}...",
|
|
3038
|
+
MSG_TYPE.MSG_TYPE_STEP_START,
|
|
3039
|
+
{"id": f"chunk_{i+1}", "progress": progress_before}
|
|
3040
|
+
)
|
|
3031
3041
|
|
|
3032
3042
|
prompt = chunk_summary_prompt_template.format(chunk_text=chunk)
|
|
3033
3043
|
|
|
@@ -3035,8 +3045,14 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3035
3045
|
# Generate summary for the current chunk
|
|
3036
3046
|
chunk_summary = self.generate_text(prompt, **kwargs)
|
|
3037
3047
|
chunk_summaries.append(chunk_summary)
|
|
3048
|
+
|
|
3049
|
+
progress_after = ((i + 1) / total_steps) * 100
|
|
3038
3050
|
if streaming_callback:
|
|
3039
|
-
streaming_callback(
|
|
3051
|
+
streaming_callback(
|
|
3052
|
+
f"Chunk {i + 1} summarized. Progress: {progress_after:.0f}%",
|
|
3053
|
+
MSG_TYPE.MSG_TYPE_STEP_END,
|
|
3054
|
+
{"id": f"chunk_{i+1}", "summary_snippet": chunk_summary[:100], "progress": progress_after}
|
|
3055
|
+
)
|
|
3040
3056
|
except Exception as e:
|
|
3041
3057
|
trace_exception(e)
|
|
3042
3058
|
if streaming_callback:
|
|
@@ -3045,8 +3061,13 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3045
3061
|
chunk_summaries.append(f"[Error summarizing chunk {i+1}]")
|
|
3046
3062
|
|
|
3047
3063
|
# --- Stage 2: Final Synthesis of All Chunk Summaries ---
|
|
3064
|
+
progress_before_synthesis = (len(chunks) / total_steps) * 100
|
|
3048
3065
|
if streaming_callback:
|
|
3049
|
-
streaming_callback(
|
|
3066
|
+
streaming_callback(
|
|
3067
|
+
"Synthesizing all chunk summaries into a final version...",
|
|
3068
|
+
MSG_TYPE.MSG_TYPE_STEP_START,
|
|
3069
|
+
{"id": "final_synthesis", "progress": progress_before_synthesis}
|
|
3070
|
+
)
|
|
3050
3071
|
|
|
3051
3072
|
combined_summaries = "\n\n---\n\n".join(chunk_summaries)
|
|
3052
3073
|
|
|
@@ -3064,7 +3085,11 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3064
3085
|
final_summary = self.generate_text(final_synthesis_prompt, **kwargs)
|
|
3065
3086
|
|
|
3066
3087
|
if streaming_callback:
|
|
3067
|
-
streaming_callback(
|
|
3088
|
+
streaming_callback(
|
|
3089
|
+
"Final summary synthesized.",
|
|
3090
|
+
MSG_TYPE.MSG_TYPE_STEP_END,
|
|
3091
|
+
{"id": "final_synthesis", "progress": 100}
|
|
3092
|
+
)
|
|
3068
3093
|
|
|
3069
3094
|
return final_summary.strip()
|
|
3070
3095
|
|
|
@@ -1180,6 +1180,7 @@ class LollmsDiscussion:
|
|
|
1180
1180
|
"- Key decisions or conclusions reached.\n"
|
|
1181
1181
|
"- Important entities, projects, or topics mentioned that are likely to recur.\n"
|
|
1182
1182
|
"Format the output as a concise list of bullet points. Be brief and factual. "
|
|
1183
|
+
"Do not repeat information that is already in the User Data Zone or the Memory"
|
|
1183
1184
|
"If no new, significant long-term information is present, output the single word: 'NOTHING'."
|
|
1184
1185
|
)
|
|
1185
1186
|
|
|
@@ -1251,19 +1252,16 @@ class LollmsDiscussion:
|
|
|
1251
1252
|
text_to_count = "\n".join(full_content)
|
|
1252
1253
|
|
|
1253
1254
|
return self.lollmsClient.count_tokens(text_to_count)
|
|
1254
|
-
|
|
1255
1255
|
def get_context_status(self, branch_tip_id: Optional[str] = None) -> Dict[str, Any]:
|
|
1256
1256
|
"""
|
|
1257
1257
|
Returns a detailed breakdown of the context size and its components.
|
|
1258
1258
|
|
|
1259
|
-
This provides a comprehensive snapshot of the context usage
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
"lollms_text" export format, which is the format used for pruning calculations.
|
|
1259
|
+
This provides a comprehensive snapshot of the context usage. It accurately calculates
|
|
1260
|
+
the token count of the combined system context (prompt, all data zones, summary)
|
|
1261
|
+
and the message history, reflecting how the `lollms_text` export format works.
|
|
1263
1262
|
|
|
1264
1263
|
Args:
|
|
1265
|
-
branch_tip_id: The ID of the message branch to measure. Defaults
|
|
1266
|
-
to the active branch.
|
|
1264
|
+
branch_tip_id: The ID of the message branch to measure. Defaults to the active branch.
|
|
1267
1265
|
|
|
1268
1266
|
Returns:
|
|
1269
1267
|
A dictionary with a detailed breakdown:
|
|
@@ -1271,72 +1269,97 @@ class LollmsDiscussion:
|
|
|
1271
1269
|
"max_tokens": int | None,
|
|
1272
1270
|
"current_tokens": int,
|
|
1273
1271
|
"zones": {
|
|
1274
|
-
"
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1272
|
+
"system_context": {
|
|
1273
|
+
"content": str,
|
|
1274
|
+
"tokens": int,
|
|
1275
|
+
"breakdown": {
|
|
1276
|
+
"system_prompt": {"content": str, "tokens": int},
|
|
1277
|
+
"memory": {"content": str, "tokens": int},
|
|
1278
|
+
...
|
|
1279
|
+
}
|
|
1280
|
+
},
|
|
1281
|
+
"message_history": {
|
|
1282
|
+
"content": str,
|
|
1283
|
+
"tokens": int,
|
|
1284
|
+
"message_count": int
|
|
1285
|
+
}
|
|
1281
1286
|
}
|
|
1282
1287
|
}
|
|
1283
|
-
Zones are only included if they contain content.
|
|
1288
|
+
Zones and breakdown components are only included if they contain content.
|
|
1284
1289
|
"""
|
|
1285
1290
|
result = {
|
|
1286
1291
|
"max_tokens": self.max_context_size,
|
|
1287
1292
|
"current_tokens": 0,
|
|
1288
1293
|
"zones": {}
|
|
1289
1294
|
}
|
|
1290
|
-
|
|
1295
|
+
tokenizer = self.lollmsClient.count_tokens
|
|
1291
1296
|
|
|
1292
|
-
# 1. System
|
|
1297
|
+
# --- 1. Assemble and Tokenize the Entire System Context Block ---
|
|
1293
1298
|
system_prompt_text = (self._system_prompt or "").strip()
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1299
|
+
data_zone_text = self.get_full_data_zone()
|
|
1300
|
+
pruning_summary_content = (self.pruning_summary or "").strip()
|
|
1301
|
+
|
|
1302
|
+
pruning_summary_block = ""
|
|
1303
|
+
if pruning_summary_content and self.pruning_point_id:
|
|
1304
|
+
pruning_summary_block = f"--- Conversation Summary ---\n{pruning_summary_content}"
|
|
1305
|
+
|
|
1306
|
+
full_system_content_parts = [
|
|
1307
|
+
part for part in [system_prompt_text, data_zone_text, pruning_summary_block] if part
|
|
1308
|
+
]
|
|
1309
|
+
full_system_content = "\n\n".join(full_system_content_parts).strip()
|
|
1310
|
+
|
|
1311
|
+
if full_system_content:
|
|
1312
|
+
system_block = f"!@>system:\n{full_system_content}\n"
|
|
1313
|
+
system_tokens = tokenizer(system_block)
|
|
1303
1314
|
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1315
|
+
breakdown = {}
|
|
1316
|
+
if system_prompt_text:
|
|
1317
|
+
breakdown["system_prompt"] = {
|
|
1318
|
+
"content": system_prompt_text,
|
|
1319
|
+
"tokens": tokenizer(system_prompt_text)
|
|
1320
|
+
}
|
|
1321
|
+
|
|
1322
|
+
memory_text = (self.memory or "").strip()
|
|
1323
|
+
if memory_text:
|
|
1324
|
+
breakdown["memory"] = {
|
|
1325
|
+
"content": memory_text,
|
|
1326
|
+
"tokens": tokenizer(memory_text)
|
|
1327
|
+
}
|
|
1311
1328
|
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
full_block = f"{header}{content_text}"
|
|
1318
|
-
# In lollms_text format, zones are part of the system message, so we add separators
|
|
1319
|
-
# This counts the standalone block.
|
|
1320
|
-
tokens = self.lollmsClient.count_tokens(full_block)
|
|
1321
|
-
result["zones"][name] = {
|
|
1322
|
-
"content": content_text,
|
|
1323
|
-
"tokens": tokens
|
|
1329
|
+
user_data_text = (self.user_data_zone or "").strip()
|
|
1330
|
+
if user_data_text:
|
|
1331
|
+
breakdown["user_data_zone"] = {
|
|
1332
|
+
"content": user_data_text,
|
|
1333
|
+
"tokens": tokenizer(user_data_text)
|
|
1324
1334
|
}
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
"
|
|
1335
|
+
|
|
1336
|
+
discussion_data_text = (self.discussion_data_zone or "").strip()
|
|
1337
|
+
if discussion_data_text:
|
|
1338
|
+
breakdown["discussion_data_zone"] = {
|
|
1339
|
+
"content": discussion_data_text,
|
|
1340
|
+
"tokens": tokenizer(discussion_data_text)
|
|
1341
|
+
}
|
|
1342
|
+
|
|
1343
|
+
personality_data_text = (self.personality_data_zone or "").strip()
|
|
1344
|
+
if personality_data_text:
|
|
1345
|
+
breakdown["personality_data_zone"] = {
|
|
1346
|
+
"content": personality_data_text,
|
|
1347
|
+
"tokens": tokenizer(personality_data_text)
|
|
1348
|
+
}
|
|
1349
|
+
|
|
1350
|
+
if pruning_summary_content:
|
|
1351
|
+
breakdown["pruning_summary"] = {
|
|
1352
|
+
"content": pruning_summary_content,
|
|
1353
|
+
"tokens": tokenizer(pruning_summary_content)
|
|
1354
|
+
}
|
|
1355
|
+
|
|
1356
|
+
result["zones"]["system_context"] = {
|
|
1357
|
+
"content": full_system_content,
|
|
1358
|
+
"tokens": system_tokens,
|
|
1359
|
+
"breakdown": breakdown
|
|
1336
1360
|
}
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
# 4. Message History
|
|
1361
|
+
|
|
1362
|
+
# --- 2. Assemble and Tokenize the Message History Block ---
|
|
1340
1363
|
branch_tip_id = branch_tip_id or self.active_branch_id
|
|
1341
1364
|
messages_text = ""
|
|
1342
1365
|
message_count = 0
|
|
@@ -1344,7 +1367,6 @@ class LollmsDiscussion:
|
|
|
1344
1367
|
branch = self.get_branch(branch_tip_id)
|
|
1345
1368
|
messages_to_render = branch
|
|
1346
1369
|
|
|
1347
|
-
# Adjust for pruning to get the active set of messages
|
|
1348
1370
|
if self.pruning_summary and self.pruning_point_id:
|
|
1349
1371
|
pruning_index = -1
|
|
1350
1372
|
for i, msg in enumerate(branch):
|
|
@@ -1367,19 +1389,18 @@ class LollmsDiscussion:
|
|
|
1367
1389
|
message_count = len(messages_to_render)
|
|
1368
1390
|
|
|
1369
1391
|
if messages_text:
|
|
1370
|
-
tokens =
|
|
1392
|
+
tokens = tokenizer(messages_text)
|
|
1371
1393
|
result["zones"]["message_history"] = {
|
|
1372
1394
|
"content": messages_text,
|
|
1373
1395
|
"tokens": tokens,
|
|
1374
1396
|
"message_count": message_count
|
|
1375
1397
|
}
|
|
1376
|
-
total_tokens += tokens
|
|
1377
1398
|
|
|
1378
|
-
#
|
|
1379
|
-
# for maximum accuracy, as combining zones can slightly change tokenization.
|
|
1399
|
+
# --- 3. Finalize the Total Count ---
|
|
1380
1400
|
result["current_tokens"] = self.count_discussion_tokens("lollms_text", branch_tip_id)
|
|
1381
1401
|
|
|
1382
1402
|
return result
|
|
1403
|
+
|
|
1383
1404
|
def switch_to_branch(self, branch_id):
|
|
1384
1405
|
self.active_branch_id = branch_id
|
|
1385
1406
|
|
|
@@ -1416,4 +1437,4 @@ class LollmsDiscussion:
|
|
|
1416
1437
|
new_metadata = (self.metadata or {}).copy()
|
|
1417
1438
|
new_metadata[itemname] = item_value
|
|
1418
1439
|
self.metadata = new_metadata
|
|
1419
|
-
self.commit()
|
|
1440
|
+
self.commit()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 0.29.
|
|
3
|
+
Version: 0.29.3
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache Software License
|
|
@@ -296,9 +296,22 @@ This example showcases how `lollms-client` allows you to build powerful, knowled
|
|
|
296
296
|
|
|
297
297
|
### Building Stateful Agents with Memory and Data Zones
|
|
298
298
|
|
|
299
|
-
The
|
|
299
|
+
The `LollmsDiscussion` class provides a sophisticated system for creating stateful agents that can remember information across conversations. This is achieved through a layered system of "context zones" that are automatically combined into the AI's system prompt.
|
|
300
300
|
|
|
301
|
-
|
|
301
|
+
#### Understanding the Context Zones
|
|
302
|
+
|
|
303
|
+
The AI's context is more than just chat history. It's built from several distinct components, each with a specific purpose:
|
|
304
|
+
|
|
305
|
+
* **`system_prompt`**: The foundational layer defining the AI's core identity, persona, and primary instructions.
|
|
306
|
+
* **`memory`**: The AI's long-term, persistent memory. It stores key facts about the user or topics, built up over time using the `memorize()` method.
|
|
307
|
+
* **`user_data_zone`**: Holds session-specific information about the user's current state or goals (e.g., "User is currently working on 'file.py'").
|
|
308
|
+
* **`discussion_data_zone`**: Contains state or meta-information about the current conversational task (e.g., "Step 1 of the plan is complete").
|
|
309
|
+
* **`personality_data_zone`**: A knowledge base or set of rules automatically injected from a `LollmsPersonality`'s `data_source`.
|
|
310
|
+
* **`pruning_summary`**: An automatic, AI-generated summary of the oldest messages in a very long chat, used to conserve tokens without losing the gist of the early conversation.
|
|
311
|
+
|
|
312
|
+
The `get_context_status()` method is your window into this system, showing you exactly how these zones are combined and how many tokens they consume.
|
|
313
|
+
|
|
314
|
+
Let's see this in action with a "Personal Assistant" agent that learns about the user over time.
|
|
302
315
|
|
|
303
316
|
```python
|
|
304
317
|
from lollms_client import LollmsClient, LollmsDataManager, LollmsDiscussion, MSG_TYPE
|
|
@@ -320,7 +333,8 @@ if not discussion:
|
|
|
320
333
|
id=discussion_id,
|
|
321
334
|
autosave=True # Important for persistence
|
|
322
335
|
)
|
|
323
|
-
# Let's preset some
|
|
336
|
+
# Let's preset some data in different zones
|
|
337
|
+
discussion.system_prompt = "You are a helpful Personal Assistant."
|
|
324
338
|
discussion.user_data_zone = "User's Name: Alex\nUser's Goal: Learn about AI development."
|
|
325
339
|
discussion.commit()
|
|
326
340
|
else:
|
|
@@ -331,13 +345,24 @@ def run_chat_turn(prompt: str):
|
|
|
331
345
|
"""Helper function to run a single chat turn and print details."""
|
|
332
346
|
ASCIIColors.cyan(f"\n> User: {prompt}")
|
|
333
347
|
|
|
334
|
-
# --- A. Check context status BEFORE the turn ---
|
|
348
|
+
# --- A. Check context status BEFORE the turn using get_context_status() ---
|
|
335
349
|
ASCIIColors.magenta("\n--- Context Status (Before Generation) ---")
|
|
336
350
|
status = discussion.get_context_status()
|
|
337
|
-
print(f"Max Tokens: {status.get('max_tokens')}, Current
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
351
|
+
print(f"Max Tokens: {status.get('max_tokens')}, Current Tokens: {status.get('current_tokens')}")
|
|
352
|
+
|
|
353
|
+
# Print the system context details
|
|
354
|
+
if 'system_context' in status['zones']:
|
|
355
|
+
sys_ctx = status['zones']['system_context']
|
|
356
|
+
print(f" - System Context Tokens: {sys_ctx['tokens']}")
|
|
357
|
+
# The 'breakdown' shows the individual zones that were combined
|
|
358
|
+
for name, content in sys_ctx.get('breakdown', {}).items():
|
|
359
|
+
print(f" -> Contains '{name}': {content.split(chr(10))[0]}...")
|
|
360
|
+
|
|
361
|
+
# Print the message history details
|
|
362
|
+
if 'message_history' in status['zones']:
|
|
363
|
+
msg_hist = status['zones']['message_history']
|
|
364
|
+
print(f" - Message History Tokens: {msg_hist['tokens']} ({msg_hist['message_count']} messages)")
|
|
365
|
+
|
|
341
366
|
print("------------------------------------------")
|
|
342
367
|
|
|
343
368
|
# --- B. Run the chat ---
|
|
@@ -348,7 +373,7 @@ def run_chat_turn(prompt: str):
|
|
|
348
373
|
)
|
|
349
374
|
print() # Newline after stream
|
|
350
375
|
|
|
351
|
-
# --- C. Trigger memorization ---
|
|
376
|
+
# --- C. Trigger memorization to update the 'memory' zone ---
|
|
352
377
|
ASCIIColors.yellow("\nTriggering memorization process...")
|
|
353
378
|
discussion.memorize()
|
|
354
379
|
discussion.commit() # Save the new memory to the DB
|
|
@@ -359,24 +384,30 @@ run_chat_turn("Hi there! Can you recommend a good Python library for building we
|
|
|
359
384
|
run_chat_turn("That sounds great. By the way, my favorite programming language is Rust, I find its safety features amazing.")
|
|
360
385
|
run_chat_turn("What was my favorite programming language again?")
|
|
361
386
|
|
|
362
|
-
# --- Final Inspection ---
|
|
387
|
+
# --- Final Inspection of Memory ---
|
|
363
388
|
ASCIIColors.magenta("\n--- Final Context Status ---")
|
|
364
389
|
status = discussion.get_context_status()
|
|
365
|
-
print(f"Max Tokens: {status.get('max_tokens')}, Current
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
print(f"
|
|
390
|
+
print(f"Max Tokens: {status.get('max_tokens')}, Current Tokens: {status.get('current_tokens')}")
|
|
391
|
+
if 'system_context' in status['zones']:
|
|
392
|
+
sys_ctx = status['zones']['system_context']
|
|
393
|
+
print(f" - System Context Tokens: {sys_ctx['tokens']}")
|
|
394
|
+
for name, content in sys_ctx.get('breakdown', {}).items():
|
|
395
|
+
# Print the full content of the memory zone to verify it was updated
|
|
396
|
+
if name == 'memory':
|
|
397
|
+
ASCIIColors.yellow(f" -> Full '{name}' content:\n{content}")
|
|
398
|
+
else:
|
|
399
|
+
print(f" -> Contains '{name}': {content.split(chr(10))[0]}...")
|
|
369
400
|
print("------------------------------------------")
|
|
370
401
|
|
|
371
402
|
```
|
|
372
403
|
|
|
373
404
|
#### How it Works:
|
|
374
405
|
|
|
375
|
-
1. **Persistence:** The `LollmsDataManager` and
|
|
376
|
-
2. **`
|
|
377
|
-
3. **`
|
|
378
|
-
4.
|
|
379
|
-
|
|
406
|
+
1. **Persistence & Initialization:** The `LollmsDataManager` saves and loads the discussion. We initialize the `system_prompt` and `user_data_zone` to provide initial context.
|
|
407
|
+
2. **`get_context_status()`:** Before each generation, we call this method. The output shows a `system_context` block with a token count for all combined zones and a `breakdown` field that lets us see the content of each individual zone that contributed to it.
|
|
408
|
+
3. **`memorize()`:** After the user mentions their favorite language, `memorize()` is called. The LLM analyzes the last turn, identifies this new, important fact, and appends it to the `discussion.memory` zone.
|
|
409
|
+
4. **Recall:** In the final turn, when asked to recall the favorite language, the AI has access to the updated `memory` content within its system context and can correctly answer "Rust". This demonstrates true long-term, stateful memory.
|
|
410
|
+
|
|
380
411
|
|
|
381
412
|
## Documentation
|
|
382
413
|
|
|
@@ -922,33 +953,54 @@ discussion.commit() # Save the updated memory to the database
|
|
|
922
953
|
```
|
|
923
954
|
|
|
924
955
|
#### `get_context_status()`
|
|
925
|
-
Provides a detailed, real-time breakdown of the current prompt context, showing exactly what will be sent to the model and how many tokens each part occupies.
|
|
926
956
|
|
|
927
|
-
|
|
928
|
-
|
|
957
|
+
Provides a detailed, real-time breakdown of the current prompt context, showing exactly what will be sent to the model and how many tokens each major component occupies. This is crucial for debugging context issues and understanding token usage.
|
|
958
|
+
|
|
959
|
+
The method accurately reflects the structure of the `lollms_text` format, where all system-level instructions (the main prompt, all data zones, and the pruning summary) are combined into a single system block.
|
|
960
|
+
|
|
961
|
+
- **Return Value:** A dictionary containing:
|
|
962
|
+
- `max_tokens`: The configured maximum token limit for the discussion.
|
|
963
|
+
- `current_tokens`: The total, most accurate token count for the entire prompt, calculated using the same logic as the `chat()` method.
|
|
964
|
+
- `zones`: A dictionary with up to two keys:
|
|
965
|
+
- **`system_context`**: Present if there is any system-level content. It contains:
|
|
966
|
+
- `tokens`: The total token count for the **entire combined system block** (e.g., `!@>system:\n...\n`).
|
|
967
|
+
- `content`: The full string content of the system block, showing exactly how all zones are merged.
|
|
968
|
+
- `breakdown`: A sub-dictionary showing the raw text of each individual component (e.g., `system_prompt`, `memory`, `user_data_zone`) that was used to build the `content`.
|
|
969
|
+
- **`message_history`**: Present if there are messages in the branch. It contains:
|
|
970
|
+
- `tokens`: The total token count for the message history part of the prompt.
|
|
971
|
+
- `content`: The full string of the formatted message history.
|
|
972
|
+
- `message_count`: The number of messages included in the history.
|
|
973
|
+
|
|
974
|
+
- **Use Case:** Essential for debugging context issues, visualizing how different data zones contribute to the final prompt, and monitoring token consumption.
|
|
929
975
|
|
|
930
976
|
```python
|
|
931
977
|
import json
|
|
932
978
|
|
|
979
|
+
# Assuming 'discussion' is an LollmsDiscussion object with some data
|
|
980
|
+
discussion.system_prompt = "You are a helpful AI."
|
|
981
|
+
discussion.user_data_zone = "User is named Bob."
|
|
982
|
+
discussion.add_message(sender="user", content="Hello!")
|
|
983
|
+
discussion.add_message(sender="assistant", content="Hi Bob!")
|
|
984
|
+
|
|
933
985
|
status = discussion.get_context_status()
|
|
934
986
|
print(json.dumps(status, indent=2))
|
|
935
987
|
|
|
936
988
|
# Expected Output Structure:
|
|
937
989
|
# {
|
|
938
|
-
# "max_tokens":
|
|
939
|
-
# "current_tokens":
|
|
990
|
+
# "max_tokens": null,
|
|
991
|
+
# "current_tokens": 46,
|
|
940
992
|
# "zones": {
|
|
941
|
-
# "
|
|
942
|
-
# "content": "You are a helpful
|
|
943
|
-
# "tokens":
|
|
944
|
-
#
|
|
945
|
-
#
|
|
946
|
-
#
|
|
947
|
-
#
|
|
993
|
+
# "system_context": {
|
|
994
|
+
# "content": "You are a helpful AI.\n\n-- User Data Zone --\nUser is named Bob.",
|
|
995
|
+
# "tokens": 25,
|
|
996
|
+
# "breakdown": {
|
|
997
|
+
# "system_prompt": "You are a helpful AI.",
|
|
998
|
+
# "user_data_zone": "User is named Bob."
|
|
999
|
+
# }
|
|
948
1000
|
# },
|
|
949
1001
|
# "message_history": {
|
|
950
|
-
# "content": "!@>user:\
|
|
951
|
-
# "tokens":
|
|
1002
|
+
# "content": "!@>user:\nHello!\n!@>assistant:\nHi Bob!\n",
|
|
1003
|
+
# "tokens": 21,
|
|
952
1004
|
# "message_count": 2
|
|
953
1005
|
# }
|
|
954
1006
|
# }
|
|
@@ -29,10 +29,10 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
|
|
|
29
29
|
examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
|
|
30
30
|
examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
31
31
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
32
|
-
lollms_client/__init__.py,sha256=
|
|
32
|
+
lollms_client/__init__.py,sha256=5WRehZnsWKKGP_lLPJ_PCHk1NMQBGRcezXExtvvtKzg,1147
|
|
33
33
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
34
|
-
lollms_client/lollms_core.py,sha256=
|
|
35
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
34
|
+
lollms_client/lollms_core.py,sha256=ABfUq13P_zo_qpLwHNhtvzmiA1nHZyqbBLKoaVECNi4,171407
|
|
35
|
+
lollms_client/lollms_discussion.py,sha256=zdm02lzd3cQNPaZfJ3zCa8yQTYw7mogqWk1cve3UOao,67697
|
|
36
36
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
37
37
|
lollms_client/lollms_llm_binding.py,sha256=cU0cmxZfIrp-ofutbRLx7W_59dxzPXpU-vO98MqVnQA,14788
|
|
38
38
|
lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
|
|
@@ -53,7 +53,7 @@ lollms_client/llm_bindings/grok/__init__.py,sha256=5tIf3348RgAEaSp6FdG-LM9N8R7aR
|
|
|
53
53
|
lollms_client/llm_bindings/groq/__init__.py,sha256=zyWKM78qHwSt5g0Bb8Njj7Jy8CYuLMyplx2maOKFFpg,12218
|
|
54
54
|
lollms_client/llm_bindings/hugging_face_inference_api/__init__.py,sha256=PxgeRqT8dpa9GZoXwtSncy9AUgAN2cDKrvp_nbaWq0E,14027
|
|
55
55
|
lollms_client/llm_bindings/litellm/__init__.py,sha256=pNkwyRPeENvTM4CDh6Pj3kQfxHfhX2pvXhGJDjKjp30,12340
|
|
56
|
-
lollms_client/llm_bindings/llamacpp/__init__.py,sha256=
|
|
56
|
+
lollms_client/llm_bindings/llamacpp/__init__.py,sha256=4cotP3cYhiA0501UnGVljlEBBVatNyfIyrZsHUPJk24,63878
|
|
57
57
|
lollms_client/llm_bindings/lollms/__init__.py,sha256=scGHEKzlGX5fw2XwefVicsf28GrwgN3wU5nl4EPJ_Sk,24424
|
|
58
58
|
lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=Thoq3PJR2e03Y2Kd_FBb-DULJK0zT5-2ID1YIJLcPlw,17864
|
|
59
59
|
lollms_client/llm_bindings/mistral/__init__.py,sha256=624Gr462yBh52ttHFOapKgJOn8zZ1vZcTEcC3i4FYt8,12750
|
|
@@ -92,8 +92,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
92
92
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
93
93
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
94
94
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
95
|
-
lollms_client-0.29.
|
|
96
|
-
lollms_client-0.29.
|
|
97
|
-
lollms_client-0.29.
|
|
98
|
-
lollms_client-0.29.
|
|
99
|
-
lollms_client-0.29.
|
|
95
|
+
lollms_client-0.29.3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
96
|
+
lollms_client-0.29.3.dist-info/METADATA,sha256=wWn-0CasMd51exqHdQXynjYTcPtYVSiycyCGMv7aTII,47847
|
|
97
|
+
lollms_client-0.29.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
98
|
+
lollms_client-0.29.3.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
|
|
99
|
+
lollms_client-0.29.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|