lollms-client 0.24.2__py3-none-any.whl → 0.27.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +3 -2
- lollms_client/llm_bindings/azure_openai/__init__.py +364 -0
- lollms_client/llm_bindings/claude/__init__.py +549 -0
- lollms_client/llm_bindings/gemini/__init__.py +501 -0
- lollms_client/llm_bindings/grok/__init__.py +536 -0
- lollms_client/llm_bindings/groq/__init__.py +292 -0
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +307 -0
- lollms_client/llm_bindings/litellm/__init__.py +201 -0
- lollms_client/llm_bindings/lollms/__init__.py +2 -0
- lollms_client/llm_bindings/mistral/__init__.py +298 -0
- lollms_client/llm_bindings/open_router/__init__.py +304 -0
- lollms_client/llm_bindings/openai/__init__.py +30 -9
- lollms_client/lollms_core.py +338 -162
- lollms_client/lollms_discussion.py +135 -37
- lollms_client/lollms_llm_binding.py +4 -0
- lollms_client/lollms_types.py +9 -1
- lollms_client/lollms_utilities.py +68 -0
- lollms_client/mcp_bindings/remote_mcp/__init__.py +82 -4
- lollms_client-0.27.0.dist-info/METADATA +604 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/RECORD +23 -14
- lollms_client-0.24.2.dist-info/METADATA +0 -239
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.24.2.dist-info → lollms_client-0.27.0.dist-info}/top_level.txt +0 -0
|
@@ -29,6 +29,8 @@ if False:
|
|
|
29
29
|
from lollms_client import LollmsClient
|
|
30
30
|
from lollms_personality import LollmsPersonality
|
|
31
31
|
|
|
32
|
+
from lollms_client.lollms_utilities import build_image_dicts, robust_json_parser
|
|
33
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
32
34
|
|
|
33
35
|
class EncryptedString(TypeDecorator):
|
|
34
36
|
"""A SQLAlchemy TypeDecorator for field-level database encryption.
|
|
@@ -372,7 +374,9 @@ class LollmsDiscussion:
|
|
|
372
374
|
object.__setattr__(self, '_message_index', None)
|
|
373
375
|
object.__setattr__(self, '_messages_to_delete_from_db', set())
|
|
374
376
|
object.__setattr__(self, '_is_db_backed', db_manager is not None)
|
|
375
|
-
|
|
377
|
+
|
|
378
|
+
object.__setattr__(self, '_system_prompt', None)
|
|
379
|
+
|
|
376
380
|
if self._is_db_backed:
|
|
377
381
|
if not db_discussion_obj and not discussion_id:
|
|
378
382
|
raise ValueError("Either discussion_id or db_discussion_obj must be provided for DB-backed discussions.")
|
|
@@ -421,6 +425,31 @@ class LollmsDiscussion:
|
|
|
421
425
|
else:
|
|
422
426
|
return cls(lollmsClient=lollms_client, discussion_id=kwargs.get('id'), **init_args)
|
|
423
427
|
|
|
428
|
+
def get_messages(self, branch_id: Optional[str] = None) -> Optional[List[LollmsMessage]]:
|
|
429
|
+
"""
|
|
430
|
+
Returns a list of messages forming a branch, from root to a specific leaf.
|
|
431
|
+
|
|
432
|
+
- If no branch_id is provided, it returns the full message list of the
|
|
433
|
+
currently active branch.
|
|
434
|
+
- If a branch_id is provided, it returns the list of all messages from the
|
|
435
|
+
root up to (and including) the message with that ID.
|
|
436
|
+
|
|
437
|
+
Args:
|
|
438
|
+
branch_id: The ID of the leaf message of the desired branch.
|
|
439
|
+
If None, the active branch's leaf is used.
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
A list of LollmsMessage objects for the specified branch, ordered
|
|
443
|
+
from root to leaf, or None if the branch_id does not exist.
|
|
444
|
+
"""
|
|
445
|
+
# Determine which leaf message ID to use
|
|
446
|
+
leaf_id = branch_id if branch_id is not None else self.active_branch_id
|
|
447
|
+
|
|
448
|
+
# Return the full branch leading to that leaf
|
|
449
|
+
# We assume self.get_branch() correctly handles non-existent IDs by returning None or an empty list.
|
|
450
|
+
return self.get_branch(leaf_id)
|
|
451
|
+
|
|
452
|
+
|
|
424
453
|
def __getattr__(self, name: str) -> Any:
|
|
425
454
|
"""Proxies attribute getting to the underlying discussion object."""
|
|
426
455
|
if name == 'metadata':
|
|
@@ -564,11 +593,13 @@ class LollmsDiscussion:
|
|
|
564
593
|
self,
|
|
565
594
|
user_message: str,
|
|
566
595
|
personality: Optional['LollmsPersonality'] = None,
|
|
596
|
+
branch_tip_id: Optional[str | None] = None,
|
|
567
597
|
use_mcps: Union[None, bool, List[str]] = None,
|
|
568
598
|
use_data_store: Union[None, Dict[str, Callable]] = None,
|
|
569
599
|
add_user_message: bool = True,
|
|
570
|
-
max_reasoning_steps: int =
|
|
600
|
+
max_reasoning_steps: int = 20,
|
|
571
601
|
images: Optional[List[str]] = None,
|
|
602
|
+
debug: bool = False,
|
|
572
603
|
**kwargs
|
|
573
604
|
) -> Dict[str, 'LollmsMessage']:
|
|
574
605
|
"""Main interaction method that can invoke the dynamic, multi-modal agent.
|
|
@@ -597,6 +628,7 @@ class LollmsDiscussion:
|
|
|
597
628
|
before it must provide a final answer.
|
|
598
629
|
images: A list of base64-encoded images provided by the user, which will
|
|
599
630
|
be passed to the agent or a multi-modal LLM.
|
|
631
|
+
debug: If True, prints full prompts and raw AI responses to the console.
|
|
600
632
|
**kwargs: Additional keyword arguments passed to the underlying generation
|
|
601
633
|
methods, such as 'streaming_callback'.
|
|
602
634
|
|
|
@@ -604,6 +636,9 @@ class LollmsDiscussion:
|
|
|
604
636
|
A dictionary with 'user_message' and 'ai_message' LollmsMessage objects,
|
|
605
637
|
where the 'ai_message' will contain rich metadata if an agentic turn was used.
|
|
606
638
|
"""
|
|
639
|
+
if personality is not None:
|
|
640
|
+
object.__setattr__(self, '_system_prompt', personality.system_prompt)
|
|
641
|
+
|
|
607
642
|
if self.max_context_size is not None:
|
|
608
643
|
self.summarize_and_prune(self.max_context_size)
|
|
609
644
|
|
|
@@ -640,12 +675,22 @@ class LollmsDiscussion:
|
|
|
640
675
|
# Step 3: Execute the appropriate generation logic.
|
|
641
676
|
if is_agentic_turn:
|
|
642
677
|
# --- AGENTIC TURN ---
|
|
678
|
+
prompt_for_agent = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
|
|
679
|
+
if debug:
|
|
680
|
+
ASCIIColors.cyan("\n" + "="*50)
|
|
681
|
+
ASCIIColors.cyan("--- DEBUG: AGENTIC TURN TRIGGERED ---")
|
|
682
|
+
ASCIIColors.cyan(f"--- PROMPT FOR AGENT (from discussion history) ---")
|
|
683
|
+
ASCIIColors.magenta(prompt_for_agent)
|
|
684
|
+
ASCIIColors.cyan("="*50 + "\n")
|
|
685
|
+
|
|
643
686
|
agent_result = self.lollmsClient.generate_with_mcp_rag(
|
|
644
|
-
prompt=
|
|
687
|
+
prompt=prompt_for_agent,
|
|
645
688
|
use_mcps=use_mcps,
|
|
646
689
|
use_data_store=use_data_store,
|
|
647
690
|
max_reasoning_steps=max_reasoning_steps,
|
|
648
691
|
images=images,
|
|
692
|
+
system_prompt = self._system_prompt,
|
|
693
|
+
debug=debug, # Pass the debug flag down
|
|
649
694
|
**kwargs
|
|
650
695
|
)
|
|
651
696
|
final_content = agent_result.get("final_answer", "The agent did not produce a final answer.")
|
|
@@ -654,9 +699,27 @@ class LollmsDiscussion:
|
|
|
654
699
|
|
|
655
700
|
else:
|
|
656
701
|
# --- SIMPLE CHAT TURN ---
|
|
702
|
+
if debug:
|
|
703
|
+
prompt_for_chat = self.export("markdown", branch_tip_id if branch_tip_id else self.active_branch_id)
|
|
704
|
+
ASCIIColors.cyan("\n" + "="*50)
|
|
705
|
+
ASCIIColors.cyan("--- DEBUG: SIMPLE CHAT PROMPT ---")
|
|
706
|
+
ASCIIColors.magenta(prompt_for_chat)
|
|
707
|
+
ASCIIColors.cyan("="*50 + "\n")
|
|
708
|
+
|
|
657
709
|
# For simple chat, we also need to consider images if the model is multi-modal
|
|
658
710
|
final_raw_response = self.lollmsClient.chat(self, images=images, **kwargs) or ""
|
|
659
|
-
|
|
711
|
+
|
|
712
|
+
if debug:
|
|
713
|
+
ASCIIColors.cyan("\n" + "="*50)
|
|
714
|
+
ASCIIColors.cyan("--- DEBUG: RAW SIMPLE CHAT RESPONSE ---")
|
|
715
|
+
ASCIIColors.magenta(final_raw_response)
|
|
716
|
+
ASCIIColors.cyan("="*50 + "\n")
|
|
717
|
+
|
|
718
|
+
if isinstance(final_raw_response, dict) and final_raw_response.get("status") == "error":
|
|
719
|
+
raise Exception(final_raw_response.get("message", "Unknown error from lollmsClient.chat"))
|
|
720
|
+
else:
|
|
721
|
+
final_content = self.lollmsClient.remove_thinking_blocks(final_raw_response)
|
|
722
|
+
|
|
660
723
|
final_scratchpad = None # No agentic scratchpad in a simple turn
|
|
661
724
|
|
|
662
725
|
# Step 4: Post-generation processing and statistics.
|
|
@@ -694,7 +757,7 @@ class LollmsDiscussion:
|
|
|
694
757
|
|
|
695
758
|
return {"user_message": user_msg, "ai_message": ai_message_obj}
|
|
696
759
|
|
|
697
|
-
def regenerate_branch(self, **kwargs) -> Dict[str, 'LollmsMessage']:
|
|
760
|
+
def regenerate_branch(self, branch_tip_id=None, **kwargs) -> Dict[str, 'LollmsMessage']:
|
|
698
761
|
"""Regenerates the last AI response in the active branch.
|
|
699
762
|
|
|
700
763
|
It deletes the previous AI response and calls chat() again with the
|
|
@@ -706,8 +769,15 @@ class LollmsDiscussion:
|
|
|
706
769
|
Returns:
|
|
707
770
|
A dictionary with the user and the newly generated AI message.
|
|
708
771
|
"""
|
|
772
|
+
if not branch_tip_id:
|
|
773
|
+
branch_tip_id = self.active_branch_id
|
|
709
774
|
if not self.active_branch_id or self.active_branch_id not in self._message_index:
|
|
710
|
-
|
|
775
|
+
if len(self._message_index)>0:
|
|
776
|
+
ASCIIColors.warning("No active message to regenerate from.\n")
|
|
777
|
+
ASCIIColors.warning(f"Using last available message:{list(self._message_index.keys())[-1]}\n")
|
|
778
|
+
else:
|
|
779
|
+
branch_tip_id = list(self._message_index.keys())[-1]
|
|
780
|
+
raise ValueError("No active message to regenerate from.")
|
|
711
781
|
|
|
712
782
|
last_message_orm = self._message_index[self.active_branch_id]
|
|
713
783
|
|
|
@@ -722,11 +792,8 @@ class LollmsDiscussion:
|
|
|
722
792
|
if self._is_db_backed:
|
|
723
793
|
self._messages_to_delete_from_db.add(last_message_id)
|
|
724
794
|
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
prompt_to_regenerate = self._message_index[self.active_branch_id].content
|
|
729
|
-
return self.chat(user_message=prompt_to_regenerate, add_user_message=False, **kwargs)
|
|
795
|
+
return self.chat(user_message="", add_user_message=False, branch_tip_id=branch_tip_id, **kwargs)
|
|
796
|
+
|
|
730
797
|
def delete_branch(self, message_id: str):
|
|
731
798
|
"""Deletes a message and its entire descendant branch.
|
|
732
799
|
|
|
@@ -801,7 +868,7 @@ class LollmsDiscussion:
|
|
|
801
868
|
|
|
802
869
|
Args:
|
|
803
870
|
format_type: The target format. Can be "lollms_text", "openai_chat",
|
|
804
|
-
or "
|
|
871
|
+
"ollama_chat", or "markdown".
|
|
805
872
|
branch_tip_id: The ID of the message to use as the end of the context.
|
|
806
873
|
Defaults to the active branch ID.
|
|
807
874
|
max_allowed_tokens: The maximum number of tokens the final prompt can contain.
|
|
@@ -809,17 +876,17 @@ class LollmsDiscussion:
|
|
|
809
876
|
|
|
810
877
|
Returns:
|
|
811
878
|
A string for "lollms_text" or a list of dictionaries for "openai_chat"
|
|
812
|
-
and "ollama_chat".
|
|
879
|
+
and "ollama_chat". For "markdown", returns a Markdown-formatted string.
|
|
813
880
|
|
|
814
881
|
Raises:
|
|
815
882
|
ValueError: If an unsupported format_type is provided.
|
|
816
883
|
"""
|
|
817
884
|
branch_tip_id = branch_tip_id or self.active_branch_id
|
|
818
|
-
if not branch_tip_id and format_type in ["lollms_text", "openai_chat", "ollama_chat"]:
|
|
885
|
+
if not branch_tip_id and format_type in ["lollms_text", "openai_chat", "ollama_chat", "markdown"]:
|
|
819
886
|
return "" if format_type == "lollms_text" else []
|
|
820
|
-
|
|
887
|
+
|
|
821
888
|
branch = self.get_branch(branch_tip_id)
|
|
822
|
-
full_system_prompt = self.
|
|
889
|
+
full_system_prompt = self._system_prompt # Simplified for clarity
|
|
823
890
|
participants = self.participants or {}
|
|
824
891
|
|
|
825
892
|
def get_full_content(msg: 'LollmsMessage') -> str:
|
|
@@ -829,14 +896,12 @@ class LollmsDiscussion:
|
|
|
829
896
|
|
|
830
897
|
# --- NATIVE LOLLMS_TEXT FORMAT ---
|
|
831
898
|
if format_type == "lollms_text":
|
|
832
|
-
# --- FIX STARTS HERE ---
|
|
833
899
|
final_prompt_parts = []
|
|
834
900
|
message_parts = [] # Temporary list for correctly ordered messages
|
|
835
|
-
|
|
901
|
+
|
|
836
902
|
current_tokens = 0
|
|
837
903
|
messages_to_render = branch
|
|
838
904
|
|
|
839
|
-
# 1. Handle non-destructive pruning summary
|
|
840
905
|
summary_text = ""
|
|
841
906
|
if self.pruning_summary and self.pruning_point_id:
|
|
842
907
|
pruning_index = -1
|
|
@@ -848,7 +913,6 @@ class LollmsDiscussion:
|
|
|
848
913
|
messages_to_render = branch[pruning_index:]
|
|
849
914
|
summary_text = f"!@>system:\n--- Conversation Summary ---\n{self.pruning_summary.strip()}\n"
|
|
850
915
|
|
|
851
|
-
# 2. Add main system prompt to the final list
|
|
852
916
|
sys_msg_text = ""
|
|
853
917
|
if full_system_prompt:
|
|
854
918
|
sys_msg_text = f"!@>system:\n{full_system_prompt.strip()}\n"
|
|
@@ -856,15 +920,13 @@ class LollmsDiscussion:
|
|
|
856
920
|
if max_allowed_tokens is None or sys_tokens <= max_allowed_tokens:
|
|
857
921
|
final_prompt_parts.append(sys_msg_text)
|
|
858
922
|
current_tokens += sys_tokens
|
|
859
|
-
|
|
860
|
-
# 3. Add pruning summary (if it exists) to the final list
|
|
923
|
+
|
|
861
924
|
if summary_text:
|
|
862
925
|
summary_tokens = self.lollmsClient.count_tokens(summary_text)
|
|
863
926
|
if max_allowed_tokens is None or current_tokens + summary_tokens <= max_allowed_tokens:
|
|
864
927
|
final_prompt_parts.append(summary_text)
|
|
865
928
|
current_tokens += summary_tokens
|
|
866
929
|
|
|
867
|
-
# 4. Build the message list in correct order, respecting token limits
|
|
868
930
|
for msg in reversed(messages_to_render):
|
|
869
931
|
sender_str = msg.sender.replace(':', '').replace('!@>', '')
|
|
870
932
|
content = get_full_content(msg)
|
|
@@ -872,24 +934,24 @@ class LollmsDiscussion:
|
|
|
872
934
|
content += f"\n({len(msg.images)} image(s) attached)"
|
|
873
935
|
msg_text = f"!@>{sender_str}:\n{content}\n"
|
|
874
936
|
msg_tokens = self.lollmsClient.count_tokens(msg_text)
|
|
875
|
-
|
|
937
|
+
|
|
876
938
|
if max_allowed_tokens is not None and current_tokens + msg_tokens > max_allowed_tokens:
|
|
877
939
|
break
|
|
878
|
-
|
|
879
|
-
# Always insert at the beginning of the temporary list
|
|
940
|
+
|
|
880
941
|
message_parts.insert(0, msg_text)
|
|
881
942
|
current_tokens += msg_tokens
|
|
882
|
-
|
|
883
|
-
# 5. Combine system/summary prompts with the message parts
|
|
943
|
+
|
|
884
944
|
final_prompt_parts.extend(message_parts)
|
|
885
945
|
return "".join(final_prompt_parts).strip()
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
# --- OPENAI & OLLAMA CHAT FORMATS (remains the same and is correct) ---
|
|
946
|
+
|
|
947
|
+
# --- OPENAI & OLLAMA CHAT FORMATS ---
|
|
889
948
|
messages = []
|
|
890
949
|
if full_system_prompt:
|
|
891
|
-
|
|
892
|
-
|
|
950
|
+
if format_type == "markdown":
|
|
951
|
+
messages.append(f"system: {full_system_prompt}")
|
|
952
|
+
else:
|
|
953
|
+
messages.append({"role": "system", "content": full_system_prompt})
|
|
954
|
+
|
|
893
955
|
for msg in branch:
|
|
894
956
|
if msg.sender_type == 'user':
|
|
895
957
|
role = participants.get(msg.sender, "user")
|
|
@@ -897,6 +959,8 @@ class LollmsDiscussion:
|
|
|
897
959
|
role = participants.get(msg.sender, "assistant")
|
|
898
960
|
|
|
899
961
|
content, images = get_full_content(msg), msg.images or []
|
|
962
|
+
images = build_image_dicts(images)
|
|
963
|
+
|
|
900
964
|
|
|
901
965
|
if format_type == "openai_chat":
|
|
902
966
|
if images:
|
|
@@ -908,18 +972,29 @@ class LollmsDiscussion:
|
|
|
908
972
|
messages.append({"role": role, "content": content_parts})
|
|
909
973
|
else:
|
|
910
974
|
messages.append({"role": role, "content": content})
|
|
911
|
-
|
|
975
|
+
|
|
912
976
|
elif format_type == "ollama_chat":
|
|
913
977
|
message_dict = {"role": role, "content": content}
|
|
978
|
+
|
|
914
979
|
base64_images = [img['data'] for img in images if img['type'] == 'base64']
|
|
915
980
|
if base64_images:
|
|
916
981
|
message_dict["images"] = base64_images
|
|
917
982
|
messages.append(message_dict)
|
|
918
983
|
|
|
984
|
+
elif format_type == "markdown":
|
|
985
|
+
# Create Markdown content based on the role and content
|
|
986
|
+
markdown_line = f"**{role.capitalize()}**: {content}\n"
|
|
987
|
+
if images:
|
|
988
|
+
for img in images:
|
|
989
|
+
img_data = img['data']
|
|
990
|
+
url = f"" if img['type'] == 'base64' else f""
|
|
991
|
+
markdown_line += f"\n{url}\n"
|
|
992
|
+
messages.append(markdown_line)
|
|
993
|
+
|
|
919
994
|
else:
|
|
920
995
|
raise ValueError(f"Unsupported export format_type: {format_type}")
|
|
921
|
-
|
|
922
|
-
return messages
|
|
996
|
+
|
|
997
|
+
return "\n".join(messages) if format_type == "markdown" else messages
|
|
923
998
|
|
|
924
999
|
|
|
925
1000
|
def summarize_and_prune(self, max_tokens: int, preserve_last_n: int = 4):
|
|
@@ -966,4 +1041,27 @@ class LollmsDiscussion:
|
|
|
966
1041
|
self.pruning_point_id = pruning_point_message.id
|
|
967
1042
|
|
|
968
1043
|
self.touch()
|
|
969
|
-
print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
|
|
1044
|
+
print(f"[INFO] Discussion auto-pruned. {len(messages_to_prune)} messages summarized. History preserved.")
|
|
1045
|
+
|
|
1046
|
+
def switch_to_branch(self, branch_id):
|
|
1047
|
+
self.active_branch_id = branch_id
|
|
1048
|
+
|
|
1049
|
+
def auto_title(self):
|
|
1050
|
+
try:
|
|
1051
|
+
if self.metadata is None:
|
|
1052
|
+
self.metadata = {}
|
|
1053
|
+
discussion = self.export("markdown")[0:1000]
|
|
1054
|
+
prompt = f"""You are a title builder. Your oibjective is to build a title for the following discussion:
|
|
1055
|
+
{discussion}
|
|
1056
|
+
...
|
|
1057
|
+
"""
|
|
1058
|
+
template = """{
|
|
1059
|
+
"title": "An short but comprehensive discussion title"
|
|
1060
|
+
}"""
|
|
1061
|
+
infos = self.lollmsClient.generate_code(prompt = prompt, template = template)
|
|
1062
|
+
discussion_title = robust_json_parser(infos)["title"]
|
|
1063
|
+
self.metadata['title'] = discussion_title
|
|
1064
|
+
self.commit()
|
|
1065
|
+
return discussion_title
|
|
1066
|
+
except Exception as ex:
|
|
1067
|
+
trace_exception(ex)
|
|
@@ -302,3 +302,7 @@ class LollmsLLMBindingManager:
|
|
|
302
302
|
list[str]: List of binding names.
|
|
303
303
|
"""
|
|
304
304
|
return [binding_dir.name for binding_dir in self.llm_bindings_dir.iterdir() if binding_dir.is_dir() and (binding_dir / "__init__.py").exists()]
|
|
305
|
+
|
|
306
|
+
def get_available_bindings():
|
|
307
|
+
bindings_dir = Path(__file__).parent/"llm_bindings"
|
|
308
|
+
return [binding_dir.name for binding_dir in bindings_dir.iterdir() if binding_dir.is_dir() and (binding_dir / "__init__.py").exists()]
|
lollms_client/lollms_types.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
class MSG_TYPE(Enum):
|
|
3
3
|
# Messaging
|
|
4
|
-
MSG_TYPE_CHUNK
|
|
4
|
+
MSG_TYPE_CHUNK = 0 # A chunk of a message (used for classical chat)
|
|
5
5
|
MSG_TYPE_CONTENT = 1 # A full message (for some personality the answer is sent in bulk)
|
|
6
6
|
MSG_TYPE_CONTENT_INVISIBLE_TO_AI = 2 # A full message (for some personality the answer is sent in bulk)
|
|
7
7
|
MSG_TYPE_CONTENT_INVISIBLE_TO_USER = 3 # A full message (for some personality the answer is sent in bulk)
|
|
@@ -36,6 +36,14 @@ class MSG_TYPE(Enum):
|
|
|
36
36
|
MSG_TYPE_TOOL_CALL = 19# a tool call
|
|
37
37
|
MSG_TYPE_TOOL_OUTPUT = 20# the output of the tool
|
|
38
38
|
|
|
39
|
+
MSG_TYPE_REASONING = 21# the ai shows its reasoning
|
|
40
|
+
MSG_TYPE_SCRATCHPAD = 22# the ai shows its scratchpad
|
|
41
|
+
MSG_TYPE_OBSERVATION = 23# the ai shows its reasoning
|
|
42
|
+
|
|
43
|
+
MSG_TYPE_ERROR = 24#a severe error hapened
|
|
44
|
+
MSG_TYPE_GENERATING_TITLE_START = 25#a severe error hapened
|
|
45
|
+
MSG_TYPE_GENERATING_TITLE_END = 26#a severe error hapened
|
|
46
|
+
|
|
39
47
|
|
|
40
48
|
class SENDER_TYPES(Enum):
|
|
41
49
|
SENDER_TYPES_USER = 0 # Sent by user
|
|
@@ -11,6 +11,74 @@ import numpy as np
|
|
|
11
11
|
import json
|
|
12
12
|
from ascii_colors import ASCIIColors, trace_exception
|
|
13
13
|
|
|
14
|
+
def dict_to_markdown(d, indent=0):
|
|
15
|
+
"""
|
|
16
|
+
Formats a dictionary (with potential nested lists and dicts) as a markdown list.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
d (dict): The dictionary to format.
|
|
20
|
+
indent (int): Current indentation level (used recursively).
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
str: The formatted markdown string.
|
|
24
|
+
"""
|
|
25
|
+
lines = []
|
|
26
|
+
indent_str = ' ' * (indent * 2)
|
|
27
|
+
|
|
28
|
+
for key, value in d.items():
|
|
29
|
+
if isinstance(value, dict):
|
|
30
|
+
# Recursively handle nested dictionary
|
|
31
|
+
lines.append(f"{indent_str}- {key}:")
|
|
32
|
+
lines.append(dict_to_markdown(value, indent + 1))
|
|
33
|
+
elif isinstance(value, list):
|
|
34
|
+
lines.append(f"{indent_str}- {key}:")
|
|
35
|
+
for item in value:
|
|
36
|
+
if isinstance(item, dict):
|
|
37
|
+
# Render nested dicts in the list
|
|
38
|
+
lines.append(dict_to_markdown(item, indent + 1))
|
|
39
|
+
else:
|
|
40
|
+
# Render strings or other simple items in the list
|
|
41
|
+
lines.append(f"{' ' * (indent + 1) * 2}- {item}")
|
|
42
|
+
else:
|
|
43
|
+
# Simple key-value pair
|
|
44
|
+
lines.append(f"{indent_str}- {key}: {value}")
|
|
45
|
+
|
|
46
|
+
return "\n".join(lines)
|
|
47
|
+
|
|
48
|
+
def is_base64(s):
|
|
49
|
+
"""Check if the string is a valid base64 encoded string."""
|
|
50
|
+
try:
|
|
51
|
+
# Try to decode and then encode back to check for validity
|
|
52
|
+
import base64
|
|
53
|
+
base64.b64decode(s)
|
|
54
|
+
return True
|
|
55
|
+
except Exception as e:
|
|
56
|
+
return False
|
|
57
|
+
|
|
58
|
+
def build_image_dicts(images):
|
|
59
|
+
"""
|
|
60
|
+
Convert a list of image strings (base64 or URLs) into a list of dictionaries with type and data.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
images (list): List of image strings (either base64-encoded or URLs).
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
list: List of dictionaries in the format {'type': 'base64'/'url', 'data': <image string>}.
|
|
67
|
+
"""
|
|
68
|
+
result = []
|
|
69
|
+
|
|
70
|
+
for img in images:
|
|
71
|
+
if isinstance(img, str):
|
|
72
|
+
if is_base64(img):
|
|
73
|
+
result.append({'type': 'base64', 'data': img})
|
|
74
|
+
else:
|
|
75
|
+
# Assuming it's a URL if not base64
|
|
76
|
+
result.append({'type': 'url', 'data': img})
|
|
77
|
+
else:
|
|
78
|
+
result.append(img)
|
|
79
|
+
|
|
80
|
+
return result
|
|
81
|
+
|
|
14
82
|
def robust_json_parser(json_string: str) -> dict:
|
|
15
83
|
"""
|
|
16
84
|
Parses a possibly malformed JSON string using a series of corrective strategies.
|
|
@@ -119,7 +119,26 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
119
119
|
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
|
|
120
120
|
return future.result(timeout)
|
|
121
121
|
|
|
122
|
-
|
|
122
|
+
def _prepare_headers(self, alias: str) -> Dict[str, str]:
|
|
123
|
+
"""Prepares the headers dictionary from the server's auth_config."""
|
|
124
|
+
server_info = self.servers[alias]
|
|
125
|
+
auth_config = server_info.get("auth_config", {})
|
|
126
|
+
headers = {}
|
|
127
|
+
auth_type = auth_config.get("type")
|
|
128
|
+
if auth_type == "api_key":
|
|
129
|
+
api_key = auth_config.get("key")
|
|
130
|
+
header_name = auth_config.get("header_name", "X-API-Key") # Default to X-API-Key
|
|
131
|
+
if api_key:
|
|
132
|
+
headers[header_name] = api_key
|
|
133
|
+
ASCIIColors.info(f"{self.binding_name}: Using API Key authentication for server '{alias}'.")
|
|
134
|
+
|
|
135
|
+
elif auth_type == "bearer": # <-- NEW BLOCK
|
|
136
|
+
token = auth_config.get("token")
|
|
137
|
+
if token:
|
|
138
|
+
headers["Authorization"] = f"Bearer {token}"
|
|
139
|
+
|
|
140
|
+
return headers
|
|
141
|
+
|
|
123
142
|
async def _initialize_connection_async(self, alias: str) -> bool:
|
|
124
143
|
server_info = self.servers[alias]
|
|
125
144
|
if server_info["initialized"]:
|
|
@@ -128,10 +147,13 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
128
147
|
server_url = server_info["url"]
|
|
129
148
|
ASCIIColors.info(f"{self.binding_name}: Initializing connection to '{alias}' ({server_url})...")
|
|
130
149
|
try:
|
|
150
|
+
# Prepare authentication headers
|
|
151
|
+
auth_headers = self._prepare_headers(alias)
|
|
152
|
+
|
|
131
153
|
exit_stack = AsyncExitStack()
|
|
132
154
|
|
|
133
155
|
client_streams = await exit_stack.enter_async_context(
|
|
134
|
-
streamablehttp_client(server_url)
|
|
156
|
+
streamablehttp_client(url=server_url, headers=auth_headers) # Pass the headers here
|
|
135
157
|
)
|
|
136
158
|
read_stream, write_stream, _ = client_streams
|
|
137
159
|
|
|
@@ -294,7 +316,7 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
294
316
|
|
|
295
317
|
try:
|
|
296
318
|
# Ensure this specific server is connected before executing
|
|
297
|
-
self._ensure_initialized_sync(alias, timeout=
|
|
319
|
+
self._ensure_initialized_sync(alias, timeout=timeout)
|
|
298
320
|
return self._run_async(self._execute_tool_async(alias, actual_tool_name, params), timeout=timeout)
|
|
299
321
|
except (ConnectionError, RuntimeError) as e:
|
|
300
322
|
return {"error": f"{self.binding_name}: Connection issue for server '{alias}': {e}", "status_code": 503}
|
|
@@ -342,4 +364,60 @@ class RemoteMCPBinding(LollmsMCPBinding):
|
|
|
342
364
|
ASCIIColors.info(f"{self.binding_name}: Remote connection binding closed.")
|
|
343
365
|
|
|
344
366
|
def get_binding_config(self) -> Dict[str, Any]:
|
|
345
|
-
return self.config
|
|
367
|
+
return self.config
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def set_auth_config(self, alias: str, auth_config: Dict[str, Any]):
|
|
371
|
+
"""
|
|
372
|
+
Dynamically updates the authentication configuration for a specific server.
|
|
373
|
+
|
|
374
|
+
If a connection was already active for this server, it will be closed to force
|
|
375
|
+
a new connection with the new authentication details on the next call.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
alias (str): The alias of the server to update (the key in servers_infos).
|
|
379
|
+
auth_config (Dict[str, Any]): The new authentication configuration dictionary.
|
|
380
|
+
Example: {"type": "bearer", "token": "new-token-here"}
|
|
381
|
+
"""
|
|
382
|
+
ASCIIColors.info(f"{self.binding_name}: Updating auth_config for server '{alias}'.")
|
|
383
|
+
|
|
384
|
+
server_info = self.servers.get(alias)
|
|
385
|
+
if not server_info:
|
|
386
|
+
raise ValueError(f"Server alias '{alias}' does not exist in the configuration.")
|
|
387
|
+
|
|
388
|
+
# Update the configuration in the binding's internal state
|
|
389
|
+
server_info["config"]["auth_config"] = auth_config
|
|
390
|
+
|
|
391
|
+
# If the server was already initialized, its connection is now obsolete.
|
|
392
|
+
# We must close it and mark it as uninitialized.
|
|
393
|
+
if server_info["initialized"]:
|
|
394
|
+
ASCIIColors.warning(f"{self.binding_name}: Existing connection for '{alias}' is outdated due to new authentication. It will be reset.")
|
|
395
|
+
try:
|
|
396
|
+
# Execute the close operation asynchronously on the event loop thread
|
|
397
|
+
self._run_async(self._close_connection_async(alias), timeout=10.0)
|
|
398
|
+
except Exception as e:
|
|
399
|
+
ASCIIColors.error(f"{self.binding_name}: Error while closing the outdated connection for '{alias}': {e}")
|
|
400
|
+
# Even on error, reset the state to force a new connection attempt
|
|
401
|
+
server_info.update({"session": None, "exit_stack": None, "initialized": False})
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
# --- NEW INTERNAL HELPER METHOD ---
|
|
405
|
+
async def _close_connection_async(self, alias: str):
|
|
406
|
+
"""Cleanly closes the connection for a specific server alias."""
|
|
407
|
+
server_info = self.servers.get(alias)
|
|
408
|
+
if not server_info or not server_info.get("exit_stack"):
|
|
409
|
+
return # Nothing to do.
|
|
410
|
+
|
|
411
|
+
ASCIIColors.info(f"{self.binding_name}: Closing connection for '{alias}'...")
|
|
412
|
+
try:
|
|
413
|
+
await server_info["exit_stack"].aclose()
|
|
414
|
+
except Exception as e:
|
|
415
|
+
trace_exception(e)
|
|
416
|
+
ASCIIColors.error(f"{self.binding_name}: Exception while closing the exit_stack for '{alias}': {e}")
|
|
417
|
+
finally:
|
|
418
|
+
# Reset the state for this alias, no matter what.
|
|
419
|
+
server_info.update({
|
|
420
|
+
"session": None,
|
|
421
|
+
"exit_stack": None,
|
|
422
|
+
"initialized": False
|
|
423
|
+
})
|