lollms-client 0.29.2__py3-none-any.whl → 0.29.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +35 -10
- lollms_client/lollms_discussion.py +53 -28
- {lollms_client-0.29.2.dist-info → lollms_client-0.29.3.dist-info}/METADATA +1 -1
- {lollms_client-0.29.2.dist-info → lollms_client-0.29.3.dist-info}/RECORD +8 -8
- {lollms_client-0.29.2.dist-info → lollms_client-0.29.3.dist-info}/WHEEL +0 -0
- {lollms_client-0.29.2.dist-info → lollms_client-0.29.3.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.29.2.dist-info → lollms_client-0.29.3.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "0.29.
|
|
11
|
+
__version__ = "0.29.3" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
lollms_client/lollms_core.py
CHANGED
|
@@ -147,9 +147,6 @@ class LollmsClient():
|
|
|
147
147
|
available = self.binding_manager.get_available_bindings()
|
|
148
148
|
raise ValueError(f"Failed to create LLM binding: {binding_name}. Available: {available}")
|
|
149
149
|
|
|
150
|
-
# Determine the effective host address (use LLM binding's if initial was None)
|
|
151
|
-
effective_host_address = self.host_address
|
|
152
|
-
|
|
153
150
|
# --- Modality Binding Setup ---
|
|
154
151
|
self.tts_binding_manager = LollmsTTSBindingManager(tts_bindings_dir)
|
|
155
152
|
self.tti_binding_manager = LollmsTTIBindingManager(tti_bindings_dir)
|
|
@@ -2961,7 +2958,6 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
2961
2958
|
callback("Deep analysis complete.", MSG_TYPE.MSG_TYPE_STEP_END)
|
|
2962
2959
|
return final_output
|
|
2963
2960
|
|
|
2964
|
-
|
|
2965
2961
|
def summarize(
|
|
2966
2962
|
self,
|
|
2967
2963
|
text_to_summarize: str,
|
|
@@ -2990,6 +2986,7 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
2990
2986
|
is not lost at the boundaries. Defaults to 250.
|
|
2991
2987
|
streaming_callback (Optional[Callable], optional): A callback function to receive real-time updates
|
|
2992
2988
|
on the process (e.g., which chunk is being processed).
|
|
2989
|
+
It receives a message, a message type, and optional metadata.
|
|
2993
2990
|
Defaults to None.
|
|
2994
2991
|
**kwargs: Additional keyword arguments to be passed to the generation method (e.g., temperature, top_p).
|
|
2995
2992
|
|
|
@@ -3004,12 +3001,17 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3004
3001
|
|
|
3005
3002
|
if len(tokens) <= chunk_size_tokens:
|
|
3006
3003
|
if streaming_callback:
|
|
3007
|
-
streaming_callback("Text is short enough for a single summary.", MSG_TYPE.MSG_TYPE_STEP)
|
|
3004
|
+
streaming_callback("Text is short enough for a single summary.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 0})
|
|
3008
3005
|
|
|
3009
3006
|
prompt_objective = contextual_prompt or "Provide a comprehensive summary of the following text."
|
|
3010
3007
|
final_prompt = f"{prompt_objective}\n\n--- Text to Summarize ---\n{text_to_summarize}"
|
|
3011
3008
|
|
|
3012
|
-
|
|
3009
|
+
summary = self.generate_text(final_prompt, **kwargs)
|
|
3010
|
+
|
|
3011
|
+
if streaming_callback:
|
|
3012
|
+
streaming_callback("Summary generated.", MSG_TYPE.MSG_TYPE_STEP, {"progress": 100})
|
|
3013
|
+
|
|
3014
|
+
return summary
|
|
3013
3015
|
|
|
3014
3016
|
# --- Stage 1: Chunking and Independent Summarization ---
|
|
3015
3017
|
chunks = []
|
|
@@ -3021,13 +3023,21 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3021
3023
|
|
|
3022
3024
|
chunk_summaries = []
|
|
3023
3025
|
|
|
3026
|
+
# Total steps include each chunk plus the final synthesis step
|
|
3027
|
+
total_steps = len(chunks) + 1
|
|
3028
|
+
|
|
3024
3029
|
# Define the prompt for summarizing each chunk
|
|
3025
3030
|
summarization_objective = contextual_prompt or "Summarize the key points of the following text excerpt."
|
|
3026
3031
|
chunk_summary_prompt_template = f"{summarization_objective}\n\n--- Text Excerpt ---\n{{chunk_text}}"
|
|
3027
3032
|
|
|
3028
3033
|
for i, chunk in enumerate(chunks):
|
|
3034
|
+
progress_before = (i / total_steps) * 100
|
|
3029
3035
|
if streaming_callback:
|
|
3030
|
-
streaming_callback(
|
|
3036
|
+
streaming_callback(
|
|
3037
|
+
f"Summarizing chunk {i + 1} of {len(chunks)}...",
|
|
3038
|
+
MSG_TYPE.MSG_TYPE_STEP_START,
|
|
3039
|
+
{"id": f"chunk_{i+1}", "progress": progress_before}
|
|
3040
|
+
)
|
|
3031
3041
|
|
|
3032
3042
|
prompt = chunk_summary_prompt_template.format(chunk_text=chunk)
|
|
3033
3043
|
|
|
@@ -3035,8 +3045,14 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3035
3045
|
# Generate summary for the current chunk
|
|
3036
3046
|
chunk_summary = self.generate_text(prompt, **kwargs)
|
|
3037
3047
|
chunk_summaries.append(chunk_summary)
|
|
3048
|
+
|
|
3049
|
+
progress_after = ((i + 1) / total_steps) * 100
|
|
3038
3050
|
if streaming_callback:
|
|
3039
|
-
streaming_callback(
|
|
3051
|
+
streaming_callback(
|
|
3052
|
+
f"Chunk {i + 1} summarized. Progress: {progress_after:.0f}%",
|
|
3053
|
+
MSG_TYPE.MSG_TYPE_STEP_END,
|
|
3054
|
+
{"id": f"chunk_{i+1}", "summary_snippet": chunk_summary[:100], "progress": progress_after}
|
|
3055
|
+
)
|
|
3040
3056
|
except Exception as e:
|
|
3041
3057
|
trace_exception(e)
|
|
3042
3058
|
if streaming_callback:
|
|
@@ -3045,8 +3061,13 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3045
3061
|
chunk_summaries.append(f"[Error summarizing chunk {i+1}]")
|
|
3046
3062
|
|
|
3047
3063
|
# --- Stage 2: Final Synthesis of All Chunk Summaries ---
|
|
3064
|
+
progress_before_synthesis = (len(chunks) / total_steps) * 100
|
|
3048
3065
|
if streaming_callback:
|
|
3049
|
-
streaming_callback(
|
|
3066
|
+
streaming_callback(
|
|
3067
|
+
"Synthesizing all chunk summaries into a final version...",
|
|
3068
|
+
MSG_TYPE.MSG_TYPE_STEP_START,
|
|
3069
|
+
{"id": "final_synthesis", "progress": progress_before_synthesis}
|
|
3070
|
+
)
|
|
3050
3071
|
|
|
3051
3072
|
combined_summaries = "\n\n---\n\n".join(chunk_summaries)
|
|
3052
3073
|
|
|
@@ -3064,7 +3085,11 @@ Provide the final aggregated answer in {output_format} format, directly addressi
|
|
|
3064
3085
|
final_summary = self.generate_text(final_synthesis_prompt, **kwargs)
|
|
3065
3086
|
|
|
3066
3087
|
if streaming_callback:
|
|
3067
|
-
streaming_callback(
|
|
3088
|
+
streaming_callback(
|
|
3089
|
+
"Final summary synthesized.",
|
|
3090
|
+
MSG_TYPE.MSG_TYPE_STEP_END,
|
|
3091
|
+
{"id": "final_synthesis", "progress": 100}
|
|
3092
|
+
)
|
|
3068
3093
|
|
|
3069
3094
|
return final_summary.strip()
|
|
3070
3095
|
|
|
@@ -1180,6 +1180,7 @@ class LollmsDiscussion:
|
|
|
1180
1180
|
"- Key decisions or conclusions reached.\n"
|
|
1181
1181
|
"- Important entities, projects, or topics mentioned that are likely to recur.\n"
|
|
1182
1182
|
"Format the output as a concise list of bullet points. Be brief and factual. "
|
|
1183
|
+
"Do not repeat information that is already in the User Data Zone or the Memory"
|
|
1183
1184
|
"If no new, significant long-term information is present, output the single word: 'NOTHING'."
|
|
1184
1185
|
)
|
|
1185
1186
|
|
|
@@ -1272,8 +1273,8 @@ class LollmsDiscussion:
|
|
|
1272
1273
|
"content": str,
|
|
1273
1274
|
"tokens": int,
|
|
1274
1275
|
"breakdown": {
|
|
1275
|
-
"system_prompt": str,
|
|
1276
|
-
"memory": str,
|
|
1276
|
+
"system_prompt": {"content": str, "tokens": int},
|
|
1277
|
+
"memory": {"content": str, "tokens": int},
|
|
1277
1278
|
...
|
|
1278
1279
|
}
|
|
1279
1280
|
},
|
|
@@ -1291,40 +1292,66 @@ class LollmsDiscussion:
|
|
|
1291
1292
|
"current_tokens": 0,
|
|
1292
1293
|
"zones": {}
|
|
1293
1294
|
}
|
|
1295
|
+
tokenizer = self.lollmsClient.count_tokens
|
|
1294
1296
|
|
|
1295
1297
|
# --- 1. Assemble and Tokenize the Entire System Context Block ---
|
|
1296
1298
|
system_prompt_text = (self._system_prompt or "").strip()
|
|
1297
|
-
data_zone_text = self.get_full_data_zone()
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1299
|
+
data_zone_text = self.get_full_data_zone()
|
|
1300
|
+
pruning_summary_content = (self.pruning_summary or "").strip()
|
|
1301
|
+
|
|
1302
|
+
pruning_summary_block = ""
|
|
1303
|
+
if pruning_summary_content and self.pruning_point_id:
|
|
1304
|
+
pruning_summary_block = f"--- Conversation Summary ---\n{pruning_summary_content}"
|
|
1302
1305
|
|
|
1303
|
-
# Combine all parts that go into the system block, separated by newlines
|
|
1304
1306
|
full_system_content_parts = [
|
|
1305
|
-
part for part in [system_prompt_text, data_zone_text,
|
|
1307
|
+
part for part in [system_prompt_text, data_zone_text, pruning_summary_block] if part
|
|
1306
1308
|
]
|
|
1307
1309
|
full_system_content = "\n\n".join(full_system_content_parts).strip()
|
|
1308
1310
|
|
|
1309
1311
|
if full_system_content:
|
|
1310
|
-
# Create the final system block as it would be exported
|
|
1311
1312
|
system_block = f"!@>system:\n{full_system_content}\n"
|
|
1312
|
-
system_tokens =
|
|
1313
|
+
system_tokens = tokenizer(system_block)
|
|
1313
1314
|
|
|
1314
|
-
# Create the breakdown for user visibility
|
|
1315
1315
|
breakdown = {}
|
|
1316
1316
|
if system_prompt_text:
|
|
1317
|
-
breakdown["system_prompt"] =
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1317
|
+
breakdown["system_prompt"] = {
|
|
1318
|
+
"content": system_prompt_text,
|
|
1319
|
+
"tokens": tokenizer(system_prompt_text)
|
|
1320
|
+
}
|
|
1321
|
+
|
|
1322
|
+
memory_text = (self.memory or "").strip()
|
|
1323
|
+
if memory_text:
|
|
1324
|
+
breakdown["memory"] = {
|
|
1325
|
+
"content": memory_text,
|
|
1326
|
+
"tokens": tokenizer(memory_text)
|
|
1327
|
+
}
|
|
1328
|
+
|
|
1329
|
+
user_data_text = (self.user_data_zone or "").strip()
|
|
1330
|
+
if user_data_text:
|
|
1331
|
+
breakdown["user_data_zone"] = {
|
|
1332
|
+
"content": user_data_text,
|
|
1333
|
+
"tokens": tokenizer(user_data_text)
|
|
1334
|
+
}
|
|
1335
|
+
|
|
1336
|
+
discussion_data_text = (self.discussion_data_zone or "").strip()
|
|
1337
|
+
if discussion_data_text:
|
|
1338
|
+
breakdown["discussion_data_zone"] = {
|
|
1339
|
+
"content": discussion_data_text,
|
|
1340
|
+
"tokens": tokenizer(discussion_data_text)
|
|
1341
|
+
}
|
|
1342
|
+
|
|
1343
|
+
personality_data_text = (self.personality_data_zone or "").strip()
|
|
1344
|
+
if personality_data_text:
|
|
1345
|
+
breakdown["personality_data_zone"] = {
|
|
1346
|
+
"content": personality_data_text,
|
|
1347
|
+
"tokens": tokenizer(personality_data_text)
|
|
1348
|
+
}
|
|
1349
|
+
|
|
1350
|
+
if pruning_summary_content:
|
|
1351
|
+
breakdown["pruning_summary"] = {
|
|
1352
|
+
"content": pruning_summary_content,
|
|
1353
|
+
"tokens": tokenizer(pruning_summary_content)
|
|
1354
|
+
}
|
|
1328
1355
|
|
|
1329
1356
|
result["zones"]["system_context"] = {
|
|
1330
1357
|
"content": full_system_content,
|
|
@@ -1340,7 +1367,6 @@ class LollmsDiscussion:
|
|
|
1340
1367
|
branch = self.get_branch(branch_tip_id)
|
|
1341
1368
|
messages_to_render = branch
|
|
1342
1369
|
|
|
1343
|
-
# Adjust for pruning to get the active set of messages
|
|
1344
1370
|
if self.pruning_summary and self.pruning_point_id:
|
|
1345
1371
|
pruning_index = -1
|
|
1346
1372
|
for i, msg in enumerate(branch):
|
|
@@ -1363,7 +1389,7 @@ class LollmsDiscussion:
|
|
|
1363
1389
|
message_count = len(messages_to_render)
|
|
1364
1390
|
|
|
1365
1391
|
if messages_text:
|
|
1366
|
-
tokens =
|
|
1392
|
+
tokens = tokenizer(messages_text)
|
|
1367
1393
|
result["zones"]["message_history"] = {
|
|
1368
1394
|
"content": messages_text,
|
|
1369
1395
|
"tokens": tokens,
|
|
@@ -1371,11 +1397,10 @@ class LollmsDiscussion:
|
|
|
1371
1397
|
}
|
|
1372
1398
|
|
|
1373
1399
|
# --- 3. Finalize the Total Count ---
|
|
1374
|
-
# This remains the most accurate way to get the final count, as it uses the
|
|
1375
|
-
# exact same export logic as the chat method.
|
|
1376
1400
|
result["current_tokens"] = self.count_discussion_tokens("lollms_text", branch_tip_id)
|
|
1377
1401
|
|
|
1378
1402
|
return result
|
|
1403
|
+
|
|
1379
1404
|
def switch_to_branch(self, branch_id):
|
|
1380
1405
|
self.active_branch_id = branch_id
|
|
1381
1406
|
|
|
@@ -1412,4 +1437,4 @@ class LollmsDiscussion:
|
|
|
1412
1437
|
new_metadata = (self.metadata or {}).copy()
|
|
1413
1438
|
new_metadata[itemname] = item_value
|
|
1414
1439
|
self.metadata = new_metadata
|
|
1415
|
-
self.commit()
|
|
1440
|
+
self.commit()
|
|
@@ -29,10 +29,10 @@ examples/mcp_examples/openai_mcp.py,sha256=7IEnPGPXZgYZyiES_VaUbQ6viQjenpcUxGiHE
|
|
|
29
29
|
examples/mcp_examples/run_remote_mcp_example_v2.py,sha256=bbNn93NO_lKcFzfIsdvJJijGx2ePFTYfknofqZxMuRM,14626
|
|
30
30
|
examples/mcp_examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
31
31
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
32
|
-
lollms_client/__init__.py,sha256=
|
|
32
|
+
lollms_client/__init__.py,sha256=5WRehZnsWKKGP_lLPJ_PCHk1NMQBGRcezXExtvvtKzg,1147
|
|
33
33
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
34
|
-
lollms_client/lollms_core.py,sha256=
|
|
35
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
34
|
+
lollms_client/lollms_core.py,sha256=ABfUq13P_zo_qpLwHNhtvzmiA1nHZyqbBLKoaVECNi4,171407
|
|
35
|
+
lollms_client/lollms_discussion.py,sha256=zdm02lzd3cQNPaZfJ3zCa8yQTYw7mogqWk1cve3UOao,67697
|
|
36
36
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
37
37
|
lollms_client/lollms_llm_binding.py,sha256=cU0cmxZfIrp-ofutbRLx7W_59dxzPXpU-vO98MqVnQA,14788
|
|
38
38
|
lollms_client/lollms_mcp_binding.py,sha256=0rK9HQCBEGryNc8ApBmtOlhKE1Yfn7X7xIQssXxS2Zc,8933
|
|
@@ -92,8 +92,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
92
92
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
93
93
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
94
94
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
95
|
-
lollms_client-0.29.
|
|
96
|
-
lollms_client-0.29.
|
|
97
|
-
lollms_client-0.29.
|
|
98
|
-
lollms_client-0.29.
|
|
99
|
-
lollms_client-0.29.
|
|
95
|
+
lollms_client-0.29.3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
96
|
+
lollms_client-0.29.3.dist-info/METADATA,sha256=wWn-0CasMd51exqHdQXynjYTcPtYVSiycyCGMv7aTII,47847
|
|
97
|
+
lollms_client-0.29.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
98
|
+
lollms_client-0.29.3.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
|
|
99
|
+
lollms_client-0.29.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|