lollms-client 1.5.6__py3-none-any.whl → 1.7.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
- lollms_client/llm_bindings/claude/__init__.py +125 -35
- lollms_client/llm_bindings/gemini/__init__.py +261 -159
- lollms_client/llm_bindings/grok/__init__.py +52 -15
- lollms_client/llm_bindings/groq/__init__.py +2 -2
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
- lollms_client/llm_bindings/litellm/__init__.py +1 -1
- lollms_client/llm_bindings/llama_cpp_server/__init__.py +605 -0
- lollms_client/llm_bindings/llamacpp/__init__.py +18 -11
- lollms_client/llm_bindings/lollms/__init__.py +76 -21
- lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
- lollms_client/llm_bindings/mistral/__init__.py +2 -2
- lollms_client/llm_bindings/novita_ai/__init__.py +142 -6
- lollms_client/llm_bindings/ollama/__init__.py +345 -89
- lollms_client/llm_bindings/open_router/__init__.py +2 -2
- lollms_client/llm_bindings/openai/__init__.py +81 -20
- lollms_client/llm_bindings/openllm/__init__.py +362 -506
- lollms_client/llm_bindings/openwebui/__init__.py +333 -171
- lollms_client/llm_bindings/perplexity/__init__.py +2 -2
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
- lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
- lollms_client/llm_bindings/transformers/__init__.py +428 -632
- lollms_client/llm_bindings/vllm/__init__.py +1 -1
- lollms_client/lollms_agentic.py +4 -2
- lollms_client/lollms_base_binding.py +61 -0
- lollms_client/lollms_core.py +512 -1890
- lollms_client/lollms_discussion.py +65 -39
- lollms_client/lollms_llm_binding.py +126 -261
- lollms_client/lollms_mcp_binding.py +49 -77
- lollms_client/lollms_stt_binding.py +99 -52
- lollms_client/lollms_tti_binding.py +38 -38
- lollms_client/lollms_ttm_binding.py +38 -42
- lollms_client/lollms_tts_binding.py +43 -18
- lollms_client/lollms_ttv_binding.py +38 -42
- lollms_client/lollms_types.py +4 -2
- lollms_client/stt_bindings/whisper/__init__.py +108 -23
- lollms_client/stt_bindings/whispercpp/__init__.py +7 -1
- lollms_client/tti_bindings/diffusers/__init__.py +464 -803
- lollms_client/tti_bindings/diffusers/server/main.py +1062 -0
- lollms_client/tti_bindings/gemini/__init__.py +182 -239
- lollms_client/tti_bindings/leonardo_ai/__init__.py +6 -3
- lollms_client/tti_bindings/lollms/__init__.py +4 -1
- lollms_client/tti_bindings/novita_ai/__init__.py +5 -2
- lollms_client/tti_bindings/openai/__init__.py +10 -11
- lollms_client/tti_bindings/stability_ai/__init__.py +5 -3
- lollms_client/ttm_bindings/audiocraft/__init__.py +7 -12
- lollms_client/ttm_bindings/beatoven_ai/__init__.py +7 -3
- lollms_client/ttm_bindings/lollms/__init__.py +4 -17
- lollms_client/ttm_bindings/replicate/__init__.py +7 -4
- lollms_client/ttm_bindings/stability_ai/__init__.py +7 -4
- lollms_client/ttm_bindings/topmediai/__init__.py +6 -3
- lollms_client/tts_bindings/bark/__init__.py +7 -10
- lollms_client/tts_bindings/lollms/__init__.py +6 -1
- lollms_client/tts_bindings/piper_tts/__init__.py +8 -11
- lollms_client/tts_bindings/xtts/__init__.py +157 -74
- lollms_client/tts_bindings/xtts/server/main.py +241 -280
- {lollms_client-1.5.6.dist-info → lollms_client-1.7.13.dist-info}/METADATA +113 -5
- lollms_client-1.7.13.dist-info/RECORD +90 -0
- lollms_client-1.5.6.dist-info/RECORD +0 -87
- {lollms_client-1.5.6.dist-info → lollms_client-1.7.13.dist-info}/WHEEL +0 -0
- {lollms_client-1.5.6.dist-info → lollms_client-1.7.13.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.5.6.dist-info → lollms_client-1.7.13.dist-info}/top_level.txt +0 -0
|
@@ -34,8 +34,7 @@ if False:
|
|
|
34
34
|
|
|
35
35
|
from lollms_client.lollms_utilities import build_image_dicts, robust_json_parser
|
|
36
36
|
from ascii_colors import ASCIIColors, trace_exception
|
|
37
|
-
|
|
38
|
-
# from .lollms_types import MSG_TYPE
|
|
37
|
+
from lollms_client.lollms_types import MSG_TYPE
|
|
39
38
|
|
|
40
39
|
class EncryptedString(TypeDecorator):
|
|
41
40
|
"""A SQLAlchemy TypeDecorator for field-level database encryption.
|
|
@@ -1054,7 +1053,7 @@ class LollmsDiscussion:
|
|
|
1054
1053
|
debug: bool = False,
|
|
1055
1054
|
remove_thinking_blocks:bool = True,
|
|
1056
1055
|
**kwargs
|
|
1057
|
-
) -> Dict[str,
|
|
1056
|
+
) -> Dict[str, Any]:
|
|
1058
1057
|
"""Main interaction method that can invoke the dynamic, multi-modal agent.
|
|
1059
1058
|
|
|
1060
1059
|
This method orchestrates the entire response generation process. It can
|
|
@@ -1095,14 +1094,35 @@ class LollmsDiscussion:
|
|
|
1095
1094
|
where the 'ai_message' will contain rich metadata if an agentic turn was used.
|
|
1096
1095
|
"""
|
|
1097
1096
|
callback = kwargs.get("streaming_callback")
|
|
1097
|
+
collected_sources = []
|
|
1098
|
+
|
|
1099
|
+
|
|
1100
|
+
# Step 1: Add user message, now including any images.
|
|
1101
|
+
if add_user_message:
|
|
1102
|
+
user_msg = self.add_message(
|
|
1103
|
+
sender=kwargs.get("user_name", "user"),
|
|
1104
|
+
sender_type="user",
|
|
1105
|
+
content=user_message,
|
|
1106
|
+
images=images,
|
|
1107
|
+
**kwargs
|
|
1108
|
+
)
|
|
1109
|
+
else: # Regeneration logic
|
|
1110
|
+
# _validate_and_set_active_branch ensures active_branch_id is valid and a leaf.
|
|
1111
|
+
# So, if we are regenerating, active_branch_id must be valid.
|
|
1112
|
+
if self.active_branch_id not in self._message_index: # Redundant check, but safe
|
|
1113
|
+
raise ValueError("Regeneration failed: active branch tip not found or is invalid.")
|
|
1114
|
+
user_msg_orm = self._message_index[self.active_branch_id]
|
|
1115
|
+
if user_msg_orm.sender_type != 'user':
|
|
1116
|
+
raise ValueError(f"Regeneration failed: active branch tip is a '{user_msg_orm.sender_type}' message, not 'user'.")
|
|
1117
|
+
user_msg = LollmsMessage(self, user_msg_orm)
|
|
1118
|
+
images = user_msg.images
|
|
1119
|
+
|
|
1098
1120
|
# extract personality data
|
|
1099
1121
|
if personality is not None:
|
|
1100
1122
|
object.__setattr__(self, '_system_prompt', personality.system_prompt)
|
|
1101
1123
|
|
|
1102
1124
|
# --- New Data Source Handling Logic ---
|
|
1103
1125
|
if hasattr(personality, 'data_source') and personality.data_source is not None:
|
|
1104
|
-
# Placeholder for MSG_TYPE if not imported
|
|
1105
|
-
MSG_TYPE = SimpleNamespace(MSG_TYPE_STEP="step", MSG_TYPE_STEP_START="step_start", MSG_TYPE_STEP_END="step_end", MSG_TYPE_EXCEPTION="exception")
|
|
1106
1126
|
|
|
1107
1127
|
if isinstance(personality.data_source, str):
|
|
1108
1128
|
# --- Static Data Source ---
|
|
@@ -1117,7 +1137,7 @@ class LollmsDiscussion:
|
|
|
1117
1137
|
if callback:
|
|
1118
1138
|
qg_id = callback("Generating query for dynamic personality data...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "dynamic_data_query_gen"})
|
|
1119
1139
|
|
|
1120
|
-
context_for_query = self.export('markdown')
|
|
1140
|
+
context_for_query = self.export('markdown', suppress_system_prompt=True)
|
|
1121
1141
|
query_prompt = (
|
|
1122
1142
|
"You are an expert query generator. Based on the current conversation, formulate a concise and specific query to retrieve relevant information from a knowledge base. "
|
|
1123
1143
|
"The query will be used to fetch data that will help you answer the user's latest request.\n\n"
|
|
@@ -1154,6 +1174,15 @@ class LollmsDiscussion:
|
|
|
1154
1174
|
|
|
1155
1175
|
if retrieved_data:
|
|
1156
1176
|
self.personality_data_zone = retrieved_data.strip()
|
|
1177
|
+
source_item = {
|
|
1178
|
+
"title": "Personality Data Source",
|
|
1179
|
+
"content": retrieved_data,
|
|
1180
|
+
"source": personality.name if hasattr(personality, 'name') else "Personality",
|
|
1181
|
+
"query": generated_query
|
|
1182
|
+
}
|
|
1183
|
+
collected_sources.append(source_item)
|
|
1184
|
+
if callback:
|
|
1185
|
+
callback([source_item], MSG_TYPE.MSG_TYPE_SOURCES_LIST)
|
|
1157
1186
|
|
|
1158
1187
|
except Exception as e:
|
|
1159
1188
|
trace_exception(e)
|
|
@@ -1175,26 +1204,6 @@ class LollmsDiscussion:
|
|
|
1175
1204
|
if self.max_context_size is not None:
|
|
1176
1205
|
self.summarize_and_prune(self.max_context_size)
|
|
1177
1206
|
|
|
1178
|
-
# Step 1: Add user message, now including any images.
|
|
1179
|
-
if add_user_message:
|
|
1180
|
-
user_msg = self.add_message(
|
|
1181
|
-
sender=kwargs.get("user_name", "user"),
|
|
1182
|
-
sender_type="user",
|
|
1183
|
-
content=user_message,
|
|
1184
|
-
images=images,
|
|
1185
|
-
**kwargs
|
|
1186
|
-
)
|
|
1187
|
-
else: # Regeneration logic
|
|
1188
|
-
# _validate_and_set_active_branch ensures active_branch_id is valid and a leaf.
|
|
1189
|
-
# So, if we are regenerating, active_branch_id must be valid.
|
|
1190
|
-
if self.active_branch_id not in self._message_index: # Redundant check, but safe
|
|
1191
|
-
raise ValueError("Regeneration failed: active branch tip not found or is invalid.")
|
|
1192
|
-
user_msg_orm = self._message_index[self.active_branch_id]
|
|
1193
|
-
if user_msg_orm.sender_type != 'user':
|
|
1194
|
-
raise ValueError(f"Regeneration failed: active branch tip is a '{user_msg_orm.sender_type}' message, not 'user'.")
|
|
1195
|
-
user_msg = LollmsMessage(self, user_msg_orm)
|
|
1196
|
-
images = user_msg.images
|
|
1197
|
-
|
|
1198
1207
|
is_agentic_turn = (effective_use_mcps is not None and effective_use_mcps) or (use_data_store is not None and use_data_store)
|
|
1199
1208
|
|
|
1200
1209
|
start_time = datetime.now()
|
|
@@ -1205,17 +1214,30 @@ class LollmsDiscussion:
|
|
|
1205
1214
|
final_content = ""
|
|
1206
1215
|
|
|
1207
1216
|
if is_agentic_turn:
|
|
1208
|
-
prompt_for_agent = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
|
|
1217
|
+
prompt_for_agent = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id, suppress_system_prompt=True)
|
|
1209
1218
|
if debug:
|
|
1210
1219
|
ASCIIColors.cyan("\n" + "="*50 + "\n--- DEBUG: AGENTIC TURN TRIGGERED ---\n" + f"--- PROMPT FOR AGENT (from discussion history) ---\n{prompt_for_agent}\n" + "="*50 + "\n")
|
|
1211
|
-
|
|
1220
|
+
|
|
1221
|
+
|
|
1222
|
+
# Combine system prompt and data zones
|
|
1223
|
+
system_prompt_part = (self._system_prompt or "").strip()
|
|
1224
|
+
data_zone_part = self.get_full_data_zone() # This now returns a clean, multi-part block or an empty string
|
|
1225
|
+
full_system_prompt = ""
|
|
1226
|
+
|
|
1227
|
+
# Combine them intelligently
|
|
1228
|
+
if system_prompt_part and data_zone_part:
|
|
1229
|
+
full_system_prompt = f"{system_prompt_part}\n\n{data_zone_part}"
|
|
1230
|
+
elif system_prompt_part:
|
|
1231
|
+
full_system_prompt = system_prompt_part
|
|
1232
|
+
else:
|
|
1233
|
+
full_system_prompt = data_zone_part
|
|
1212
1234
|
agent_result = self.lollmsClient.generate_with_mcp_rag(
|
|
1213
1235
|
prompt=prompt_for_agent,
|
|
1214
1236
|
use_mcps=effective_use_mcps,
|
|
1215
1237
|
use_data_store=use_data_store,
|
|
1216
1238
|
max_reasoning_steps=max_reasoning_steps,
|
|
1217
1239
|
images=images,
|
|
1218
|
-
system_prompt =
|
|
1240
|
+
system_prompt = full_system_prompt,
|
|
1219
1241
|
debug=debug,
|
|
1220
1242
|
**kwargs
|
|
1221
1243
|
)
|
|
@@ -1249,9 +1271,12 @@ class LollmsDiscussion:
|
|
|
1249
1271
|
message_meta = {}
|
|
1250
1272
|
if is_agentic_turn and isinstance(agent_result, dict):
|
|
1251
1273
|
if "tool_calls" in agent_result: message_meta["tool_calls"] = agent_result["tool_calls"]
|
|
1252
|
-
if "sources" in agent_result:
|
|
1274
|
+
if "sources" in agent_result: collected_sources.extend(agent_result["sources"])
|
|
1253
1275
|
if agent_result.get("clarification_required", False): message_meta["clarification_required"] = True
|
|
1254
1276
|
|
|
1277
|
+
if collected_sources:
|
|
1278
|
+
message_meta["sources"] = collected_sources
|
|
1279
|
+
|
|
1255
1280
|
ai_message_obj = self.add_message(
|
|
1256
1281
|
sender=personality.name if personality else "assistant",
|
|
1257
1282
|
sender_type="assistant",
|
|
@@ -1269,9 +1294,9 @@ class LollmsDiscussion:
|
|
|
1269
1294
|
if self._is_db_backed and self.autosave:
|
|
1270
1295
|
self.commit()
|
|
1271
1296
|
|
|
1272
|
-
return {"user_message": user_msg, "ai_message": ai_message_obj}
|
|
1297
|
+
return {"user_message": user_msg, "ai_message": ai_message_obj, "sources": collected_sources}
|
|
1273
1298
|
|
|
1274
|
-
def regenerate_branch(self, branch_tip_id: Optional[str] = None, **kwargs) -> Dict[str,
|
|
1299
|
+
def regenerate_branch(self, branch_tip_id: Optional[str] = None, **kwargs) -> Dict[str, Any]:
|
|
1275
1300
|
"""Regenerates the AI response for a given message or the active branch's AI response.
|
|
1276
1301
|
|
|
1277
1302
|
If the target is an AI message, it's deleted and its children are re-parented to its parent
|
|
@@ -1426,7 +1451,7 @@ class LollmsDiscussion:
|
|
|
1426
1451
|
self.touch() # Mark for update and auto-save if configured
|
|
1427
1452
|
print(f"Branch starting at {message_id} ({len(messages_to_delete_ids)} messages) removed. New active branch: {self.active_branch_id}")
|
|
1428
1453
|
|
|
1429
|
-
def export(self, format_type: str, branch_tip_id: Optional[str] = None, max_allowed_tokens: Optional[int] = None) -> Union[List[Dict], str]:
|
|
1454
|
+
def export(self, format_type: str, branch_tip_id: Optional[str] = None, max_allowed_tokens: Optional[int] = None, suppress_system_prompt=False) -> Union[List[Dict], str]:
|
|
1430
1455
|
"""Exports the discussion history into a specified format.
|
|
1431
1456
|
|
|
1432
1457
|
This method can format the conversation for different backends like OpenAI,
|
|
@@ -1464,12 +1489,13 @@ class LollmsDiscussion:
|
|
|
1464
1489
|
full_system_prompt = ""
|
|
1465
1490
|
|
|
1466
1491
|
# Combine them intelligently
|
|
1467
|
-
if
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1492
|
+
if not suppress_system_prompt:
|
|
1493
|
+
if system_prompt_part and data_zone_part:
|
|
1494
|
+
full_system_prompt = f"{system_prompt_part}\n\n{data_zone_part}"
|
|
1495
|
+
elif system_prompt_part:
|
|
1496
|
+
full_system_prompt = system_prompt_part
|
|
1497
|
+
else:
|
|
1498
|
+
full_system_prompt = data_zone_part
|
|
1473
1499
|
|
|
1474
1500
|
|
|
1475
1501
|
participants = self.participants or {}
|