lollms-client 0.17.1__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (25) hide show
  1. examples/function_calling_with_local_custom_mcp.py +250 -0
  2. examples/local_mcp.py +171 -0
  3. examples/text_2_image.py +8 -3
  4. examples/text_2_image_diffusers.py +274 -0
  5. lollms_client/__init__.py +7 -6
  6. lollms_client/llm_bindings/llamacpp/__init__.py +8 -8
  7. lollms_client/lollms_core.py +345 -10
  8. lollms_client/lollms_mcp_binding.py +198 -0
  9. lollms_client/mcp_bindings/local_mcp/__init__.py +311 -0
  10. lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +74 -0
  11. lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +195 -0
  12. lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +107 -0
  13. lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +141 -0
  14. lollms_client/stt_bindings/whisper/__init__.py +1 -1
  15. lollms_client/tti_bindings/dalle/__init__.py +433 -0
  16. lollms_client/tti_bindings/diffusers/__init__.py +692 -0
  17. lollms_client/tti_bindings/gemini/__init__.py +0 -0
  18. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/METADATA +1 -1
  19. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/RECORD +22 -13
  20. examples/function_call/functions_call_with images.py +0 -52
  21. lollms_client/lollms_functions.py +0 -72
  22. lollms_client/lollms_tasks.py +0 -691
  23. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/WHEEL +0 -0
  24. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/licenses/LICENSE +0 -0
  25. {lollms_client-0.17.1.dist-info → lollms_client-0.19.0.dist-info}/top_level.txt +0 -0
@@ -10,12 +10,13 @@ from lollms_client.lollms_tti_binding import LollmsTTIBinding, LollmsTTIBindingM
10
10
  from lollms_client.lollms_stt_binding import LollmsSTTBinding, LollmsSTTBindingManager
11
11
  from lollms_client.lollms_ttv_binding import LollmsTTVBinding, LollmsTTVBindingManager
12
12
  from lollms_client.lollms_ttm_binding import LollmsTTMBinding, LollmsTTMBindingManager
13
+ from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
13
14
 
14
- import re
15
+ import json
15
16
  from enum import Enum
16
17
  import base64
17
18
  import requests
18
- from typing import List, Optional, Callable, Union, Dict
19
+ from typing import List, Optional, Callable, Union, Dict, Any
19
20
  import numpy as np
20
21
  from pathlib import Path
21
22
  import os
@@ -32,7 +33,7 @@ class LollmsClient():
32
33
  models_path: Optional[str] = None, # Shared models folder path (for local file based bindings) default for all bindings if not specified
33
34
  model_name: str = "",
34
35
  llm_bindings_dir: Path = Path(__file__).parent / "llm_bindings",
35
- llm_binding_config: Optional[Dict[str, any]] = None, # Renamed for clarity
36
+ llm_binding_config: Optional[Dict[str, any]] = None,
36
37
 
37
38
  # Optional Modality Binding Names
38
39
  tts_binding_name: Optional[str] = None,
@@ -40,6 +41,7 @@ class LollmsClient():
40
41
  stt_binding_name: Optional[str] = None,
41
42
  ttv_binding_name: Optional[str] = None,
42
43
  ttm_binding_name: Optional[str] = None,
44
+ mcp_binding_name: Optional[str] = None,
43
45
 
44
46
  # Modality Binding Directories
45
47
  tts_bindings_dir: Path = Path(__file__).parent / "tts_bindings",
@@ -47,13 +49,15 @@ class LollmsClient():
47
49
  stt_bindings_dir: Path = Path(__file__).parent / "stt_bindings",
48
50
  ttv_bindings_dir: Path = Path(__file__).parent / "ttv_bindings",
49
51
  ttm_bindings_dir: Path = Path(__file__).parent / "ttm_bindings",
52
+ mcp_bindings_dir: Path = Path(__file__).parent / "mcp_bindings",
50
53
 
51
54
  # Configurations
52
- tts_binding_config: Optional[Dict[str, any]] = None, # Renamed for clarity
53
- tti_binding_config: Optional[Dict[str, any]] = None, # Renamed for clarity
54
- stt_binding_config: Optional[Dict[str, any]] = None, # Renamed for clarity
55
- ttv_binding_config: Optional[Dict[str, any]] = None, # Renamed for clarity
56
- ttm_binding_config: Optional[Dict[str, any]] = None, # Renamed for clarity
55
+ tts_binding_config: Optional[Dict[str, any]] = None,
56
+ tti_binding_config: Optional[Dict[str, any]] = None,
57
+ stt_binding_config: Optional[Dict[str, any]] = None,
58
+ ttv_binding_config: Optional[Dict[str, any]] = None,
59
+ ttm_binding_config: Optional[Dict[str, any]] = None,
60
+ mcp_binding_config: Optional[Dict[str, any]] = None,
57
61
 
58
62
  # General Parameters (mostly defaults for LLM generation)
59
63
  service_key: Optional[str] = None, # Shared service key/client_id
@@ -147,12 +151,14 @@ class LollmsClient():
147
151
  self.stt_binding_manager = LollmsSTTBindingManager(stt_bindings_dir)
148
152
  self.ttv_binding_manager = LollmsTTVBindingManager(ttv_bindings_dir)
149
153
  self.ttm_binding_manager = LollmsTTMBindingManager(ttm_bindings_dir)
154
+ self.mcp_binding_manager = LollmsMCPBindingManager(mcp_bindings_dir)
150
155
 
151
156
  self.tts: Optional[LollmsTTSBinding] = None
152
157
  self.tti: Optional[LollmsTTIBinding] = None
153
158
  self.stt: Optional[LollmsSTTBinding] = None
154
159
  self.ttv: Optional[LollmsTTVBinding] = None
155
160
  self.ttm: Optional[LollmsTTMBinding] = None
161
+ self.mcp: Optional[LollmsMCPBinding] = None
156
162
 
157
163
  if tts_binding_name:
158
164
  self.tts = self.tts_binding_manager.create_binding(
@@ -213,6 +219,19 @@ class LollmsClient():
213
219
  if self.ttm is None:
214
220
  ASCIIColors.warning(f"Failed to create TTM binding: {ttm_binding_name}. Available: {self.ttm_binding_manager.get_available_bindings()}")
215
221
 
222
+ if mcp_binding_name:
223
+ if mcp_binding_config:
224
+ self.mcp = self.mcp_binding_manager.create_binding(
225
+ mcp_binding_name,
226
+ **mcp_binding_config
227
+ )
228
+ else:
229
+ self.mcp = self.mcp_binding_manager.create_binding(
230
+ mcp_binding_name
231
+ )
232
+ if self.mcp is None:
233
+ ASCIIColors.warning(f"Failed to create MCP binding: {mcp_binding_name}. Available: {self.mcp_binding_manager.get_available_bindings()}")
234
+
216
235
  # --- Store Default Generation Parameters ---
217
236
  self.default_ctx_size = ctx_size
218
237
  self.default_n_predict = n_predict
@@ -340,7 +359,7 @@ class LollmsClient():
340
359
  return self.binding.load_model(model_name)
341
360
  raise RuntimeError("LLM binding not initialized.")
342
361
 
343
- def get_available_llm_bindings(self) -> List[str]: # Renamed for clarity
362
+ def get_available_llm_bindings(self) -> List[str]:
344
363
  """
345
364
  Get list of available LLM binding names.
346
365
 
@@ -519,6 +538,322 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
519
538
  codes = self.extract_code_blocks(response, format=code_tag_format)
520
539
  return codes
521
540
 
541
+ # --- Function Calling with MCP ---
542
+ def generate_with_mcp(
543
+ self,
544
+ prompt: str,
545
+ discussion_history: Optional[List[Dict[str, str]]] = None, # e.g. [{"role":"user", "content":"..."}, {"role":"assistant", "content":"..."}]
546
+ images: Optional[List[str]] = None,
547
+ tools: Optional[List[Dict[str, Any]]] = None, # List of MCP tool definitions
548
+ max_tool_calls: int = 5,
549
+ max_llm_iterations: int = 10, # Safety break for LLM deciding to call tools repeatedly
550
+ tool_call_decision_temperature: float = 0.1, # Lower temp for more deterministic decision making
551
+ final_answer_temperature: float = None, # Use instance default if None
552
+ streaming_callback: Optional[Callable[[str, MSG_TYPE, Optional[Dict], Optional[List]], bool]] = None,
553
+ interactive_tool_execution: bool = False, # If true, prompts user before executing a tool
554
+ **llm_generation_kwargs
555
+ ) -> Dict[str, Any]:
556
+ """
557
+ Generates a response that may involve calling one or more tools via MCP.
558
+
559
+ Args:
560
+ prompt (str): The user's initial prompt.
561
+ discussion_history (Optional[List[Dict[str, str]]]): Previous turns of conversation.
562
+ images (Optional[List[str]]): Images provided with the current user prompt.
563
+ tools (Optional[List[Dict[str, Any]]]): A list of MCP tool definitions available for this call.
564
+ If None, tools will be discovered from the MCP binding.
565
+ max_tool_calls (int): Maximum number of distinct tool calls allowed in one interaction turn.
566
+ max_llm_iterations (int): Maximum number of times the LLM can decide to call a tool
567
+ before being forced to generate a final answer.
568
+ tool_call_decision_temperature (float): Temperature for LLM when deciding on tool calls.
569
+ final_answer_temperature (float): Temperature for LLM when generating the final answer.
570
+ streaming_callback (Optional[Callable]): Callback for streaming LLM responses (tool decisions/final answer).
571
+ Signature: (chunk_str, msg_type, metadata_dict, history_list_of_dicts_for_this_turn) -> bool
572
+ interactive_tool_execution (bool): If True, ask user for confirmation before executing each tool.
573
+
574
+ Returns:
575
+ Dict[str, Any]: A dictionary containing:
576
+ - "final_answer" (str): The LLM's final textual answer.
577
+ - "tool_calls" (List[Dict]): A list of tools called, their params, and results.
578
+ - "error" (Optional[str]): Error message if something went wrong.
579
+ """
580
+ if not self.binding:
581
+ return {"final_answer": "", "tool_calls": [], "error": "LLM binding not initialized."}
582
+ if not self.mcp:
583
+ return {"final_answer": "", "tool_calls": [], "error": "MCP binding not initialized."}
584
+
585
+ turn_history: List[Dict[str, Any]] = [] # Tracks this specific turn's interactions (LLM thoughts, tool calls, tool results)
586
+
587
+ # 1. Discover tools if not provided
588
+ if tools is None:
589
+ try:
590
+ tools = self.mcp.discover_tools()
591
+ if not tools:
592
+ ASCIIColors.warning("No MCP tools discovered by the binding.")
593
+ except Exception as e_disc:
594
+ return {"final_answer": "", "tool_calls": [], "error": f"Failed to discover MCP tools: {e_disc}"}
595
+
596
+ if not tools: # If still no tools after discovery attempt
597
+ ASCIIColors.info("No tools available for function calling. Generating direct response.")
598
+ final_answer = self.remove_thinking_blocks(self.generate_text(
599
+ prompt=prompt,
600
+ system_prompt= (discussion_history[0]['content'] if discussion_history and discussion_history[0]['role'] == 'system' else "") + "\nYou are a helpful assistant.", # Basic system prompt
601
+ images=images,
602
+ stream=streaming_callback is not None, # stream if callback is provided
603
+ streaming_callback=lambda chunk, msg_type: streaming_callback(chunk, msg_type, None, turn_history) if streaming_callback else None, # Adapt callback
604
+ temperature=final_answer_temperature if final_answer_temperature is not None else self.default_temperature,
605
+ **(llm_generation_kwargs or {})
606
+ ))
607
+ if isinstance(final_answer, dict) and "error" in final_answer: # Handle generation error
608
+ return {"final_answer": "", "tool_calls": [], "error": final_answer["error"]}
609
+ return {"final_answer": final_answer, "tool_calls": [], "error": None}
610
+
611
+
612
+ formatted_tools_list = "\n".join([
613
+ f"- Name: {t.get('name')}\n Description: {t.get('description')}\n Input Schema: {json.dumps(t.get('input_schema'))}"
614
+ for t in tools
615
+ ])
616
+
617
+ current_conversation: List[Dict[str, str]] = []
618
+ if discussion_history:
619
+ current_conversation.extend(discussion_history)
620
+ current_conversation.append({"role": "user", "content": prompt})
621
+ if images: # Add image representations to the last user message if supported by LLM and chat format
622
+ # This part is highly dependent on how the specific LLM binding handles images in chat.
623
+ # For simplicity, we'll assume if images are passed, the underlying generate_text handles it.
624
+ # A more robust solution would modify current_conversation[-1]['content'] structure.
625
+ ASCIIColors.info("Images provided. Ensure LLM binding's generate_text handles them with chat history.")
626
+
627
+
628
+ tool_calls_made_this_turn = []
629
+ llm_iterations = 0
630
+
631
+ while llm_iterations < max_llm_iterations:
632
+ llm_iterations += 1
633
+
634
+ # 2. Construct prompt for LLM to decide on tool call or direct answer
635
+ # We need to convert current_conversation into a single string prompt for `generate_code`
636
+ # or adapt `generate_code` to take a message list if underlying LLM supports chat for structured output.
637
+ # For now, let's assume `generate_code` takes a flat prompt.
638
+
639
+ # Create a string representation of the conversation history
640
+ history_str = ""
641
+ for msg in current_conversation:
642
+ role_prefix = self.user_custom_header(msg["role"]) if msg["role"]=="user" else self.ai_custom_header(msg["role"]) if msg["role"]=="assistant" else self.system_custom_header(msg["role"]) if msg["role"]=="system" else "!@>unknown:"
643
+ history_str += f"{role_prefix}{msg['content']}\n"
644
+
645
+ # Add tool execution results from previous iterations in this turn to the history string
646
+ for tc_info in tool_calls_made_this_turn:
647
+ if tc_info.get("result"): # Only add if there's a result (successful or error)
648
+ history_str += f"{self.ai_full_header}(Executed tool '{tc_info['name']}' with params {tc_info['params']}. Result: {json.dumps(tc_info['result'])})\n"
649
+
650
+
651
+ decision_prompt_template = f"""You are an AI assistant that can use tools to answer user requests.
652
+ Available tools:
653
+ {formatted_tools_list}
654
+
655
+ Current conversation:
656
+ {history_str}
657
+
658
+ Based on the available tools and the current conversation, decide the next step.
659
+ Respond with a JSON object containing ONE of the following structures:
660
+ 1. If you need to use a tool:
661
+ {{"action": "call_tool", "tool_name": "<name_of_tool_to_call>", "tool_params": {{<parameters_for_tool_as_json_object>}}}}
662
+ 2. If you can answer directly without using a tool OR if you have sufficient information from previous tool calls:
663
+ {{"action": "final_answer"}}
664
+ 3. If the user's request is unclear or you need more information before deciding:
665
+ {{"action": "clarify", "clarification_request": "<your_question_to_the_user>"}}
666
+ """ # No {self.ai_full_header} here, generate_code will get raw JSON
667
+
668
+ if streaming_callback:
669
+ streaming_callback(f"LLM deciding next step (iteration {llm_iterations})...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "decision_making"}, turn_history)
670
+
671
+ # Use generate_code to get structured JSON output from LLM
672
+ # Note: generate_code itself uses generate_text. We are asking for JSON here.
673
+ raw_llm_decision_json = self.generate_text(
674
+ prompt=decision_prompt_template, # This is the full prompt for the LLM
675
+ n_predict=512, # Reasonable size for decision JSON
676
+ temperature=tool_call_decision_temperature,
677
+ images=images
678
+ # `images` are part of the history_str if relevant to the binding
679
+ # streaming_callback=None, # Decisions are usually not streamed chunk by chunk
680
+ )
681
+ if streaming_callback:
682
+ streaming_callback(f"LLM decision received.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "decision_making"}, turn_history)
683
+
684
+
685
+ if not raw_llm_decision_json:
686
+ ASCIIColors.error("LLM failed to provide a decision JSON.")
687
+ turn_history.append({"type": "error", "content": "LLM failed to provide a decision."})
688
+ return {"final_answer": "I'm sorry, I encountered an issue trying to process your request.", "tool_calls": tool_calls_made_this_turn, "error": "LLM decision JSON was empty."}
689
+
690
+ processed_raw_json = raw_llm_decision_json.strip() # Strip whitespace first
691
+ try:
692
+ llm_decision = json.loads(processed_raw_json)
693
+ turn_history.append({"type": "llm_decision", "content": llm_decision})
694
+ except json.JSONDecodeError:
695
+ ASCIIColors.error(f"Failed to parse LLM decision JSON: {raw_llm_decision_json}")
696
+ try:
697
+ decoder = json.JSONDecoder()
698
+ # Try to decode the first JSON object from the (stripped) string
699
+ llm_decision, end_index = decoder.raw_decode(processed_raw_json)
700
+ turn_history.append({"type": "llm_decision_extracted", "content": llm_decision, "raw_trimmed": processed_raw_json[:end_index]})
701
+
702
+ remaining_text = processed_raw_json[end_index:].strip()
703
+ if remaining_text:
704
+ ASCIIColors.warning(f"LLM output contained additional text after the first JSON object: '{remaining_text}'. Processing only the first object.")
705
+ turn_history.append({"type": "llm_extra_output_ignored", "content": remaining_text})
706
+ except json.JSONDecodeError as e_inner:
707
+ ASCIIColors.error(f"Failed to parse LLM decision JSON even after attempting to extract first object: {raw_llm_decision_json}. Error: {e_inner}")
708
+ turn_history.append({"type": "error", "content": "Failed to parse LLM decision JSON.", "raw_json": raw_llm_decision_json, "error_details": str(e_inner)})
709
+ # Provide a generic error message, as the LLM's output was malformed.
710
+ # Adding the raw output or a snippet to the conversation history might help the LLM recover or inform the user.
711
+ current_conversation.append({
712
+ "role": "assistant",
713
+ "content": "(I encountered an internal error trying to understand my next step. I will try to answer directly based on what I have so far.)"
714
+ })
715
+ break # Break to generate final answer with current info
716
+
717
+ if llm_decision is None: # If parsing failed and couldn't recover
718
+ return {"final_answer": "I'm sorry, I had trouble understanding the next step due to a formatting issue.", "tool_calls": tool_calls_made_this_turn, "error": "Invalid JSON from LLM for decision."}
719
+
720
+ action = llm_decision.get("action")
721
+
722
+ if action == "call_tool":
723
+ if len(tool_calls_made_this_turn) >= max_tool_calls:
724
+ ASCIIColors.warning("Maximum tool calls reached for this turn. Forcing final answer.")
725
+ current_conversation.append({"role":"assistant", "content":"(Max tool calls reached. I will now try to formulate an answer based on available information.)"})
726
+ break # Exit loop to generate final answer
727
+
728
+ tool_name = llm_decision.get("tool_name")
729
+ tool_params = llm_decision.get("tool_params", {})
730
+
731
+ if not tool_name:
732
+ ASCIIColors.warning("LLM decided to call a tool but didn't specify tool_name.")
733
+ current_conversation.append({"role":"assistant", "content":"(I decided to use a tool, but I'm unsure which one. Could you clarify?)"})
734
+ break # Or ask LLM to try again without this faulty decision in history
735
+
736
+ tool_call_info = {"type": "tool_call_request", "name": tool_name, "params": tool_params}
737
+ turn_history.append(tool_call_info)
738
+ if streaming_callback:
739
+ streaming_callback(f"LLM requests to call tool: {tool_name} with params: {tool_params}", MSG_TYPE.MSG_TYPE_INFO, tool_call_info, turn_history)
740
+
741
+ # Interactive execution if enabled
742
+ if interactive_tool_execution:
743
+ try:
744
+ user_confirmation = input(f"AI wants to execute tool '{tool_name}' with params {tool_params}. Allow? (yes/no/details): ").lower()
745
+ if user_confirmation == "details":
746
+ tool_def_for_details = next((t for t in tools if t.get("name") == tool_name), None)
747
+ print(f"Tool details: {json.dumps(tool_def_for_details, indent=2)}")
748
+ user_confirmation = input(f"Allow execution of '{tool_name}'? (yes/no): ").lower()
749
+
750
+ if user_confirmation != "yes":
751
+ ASCIIColors.info("Tool execution cancelled by user.")
752
+ tool_result = {"error": "Tool execution cancelled by user."}
753
+ # Add this info to conversation for LLM
754
+ current_conversation.append({"role": "assistant", "content": f"(Tool '{tool_name}' execution was cancelled by the user. What should I do next?)"})
755
+ tool_call_info["result"] = tool_result # Record cancellation
756
+ tool_calls_made_this_turn.append(tool_call_info)
757
+ continue # Back to LLM for next decision
758
+ except Exception as e_input: # Catch issues with input() e.g. in non-interactive env
759
+ ASCIIColors.warning(f"Error during interactive confirmation: {e_input}. Proceeding without confirmation.")
760
+
761
+
762
+ if streaming_callback:
763
+ streaming_callback(f"Executing tool: {tool_name}...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "tool_execution", "tool_name": tool_name}, turn_history)
764
+
765
+ tool_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
766
+
767
+ tool_call_info["result"] = tool_result # Add result to this call's info
768
+ tool_calls_made_this_turn.append(tool_call_info) # Log the completed call
769
+
770
+ if streaming_callback:
771
+ streaming_callback(f"Tool {tool_name} execution finished. Result: {json.dumps(tool_result)}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "tool_execution", "tool_name": tool_name, "result": tool_result}, turn_history)
772
+
773
+ # Add tool execution result to conversation for the LLM
774
+ # The format of this message can influence how the LLM uses the tool output.
775
+ # current_conversation.append({"role": "tool_result", "tool_name": tool_name, "content": json.dumps(tool_result)}) # More structured
776
+ current_conversation.append({"role": "assistant", "content": f"(Tool '{tool_name}' executed. Result: {json.dumps(tool_result)})"})
777
+
778
+
779
+ elif action == "clarify":
780
+ clarification_request = llm_decision.get("clarification_request", "I need more information. Could you please clarify?")
781
+ if streaming_callback:
782
+ streaming_callback(clarification_request, MSG_TYPE.MSG_TYPE_FULL, {"type": "clarification_request"}, turn_history)
783
+ turn_history.append({"type":"clarification_request_sent", "content": clarification_request})
784
+ return {"final_answer": clarification_request, "tool_calls": tool_calls_made_this_turn, "error": None}
785
+
786
+ elif action == "final_answer":
787
+ ASCIIColors.info("LLM decided to formulate a final answer.")
788
+ current_conversation.append({"role":"assistant", "content":"(I will now formulate the final answer based on the information gathered.)"}) # Inform LLM's "thought process"
789
+ break # Exit loop to generate final answer
790
+
791
+ else:
792
+ ASCIIColors.warning(f"LLM returned unknown action: {action}")
793
+ current_conversation.append({"role":"assistant", "content":f"(Received an unexpected decision: {action}. I will try to answer directly.)"})
794
+ break # Exit loop
795
+
796
+ # Safety break if too many iterations without reaching final answer or max_tool_calls
797
+ if llm_iterations >= max_llm_iterations:
798
+ ASCIIColors.warning("Max LLM iterations reached. Forcing final answer.")
799
+ current_conversation.append({"role":"assistant", "content":"(Max iterations reached. I will now try to formulate an answer.)"})
800
+ break
801
+
802
+ # 3. Generate final answer if LLM decided to, or if loop broke
803
+ if streaming_callback:
804
+ streaming_callback("LLM generating final answer...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "final_answer_generation"}, turn_history)
805
+
806
+ # Construct the final prompt string for generate_text from current_conversation
807
+ final_prompt_str = ""
808
+ final_system_prompt = ""
809
+
810
+ # Consolidate system messages if any
811
+ interim_history_for_final_answer = []
812
+ for msg in current_conversation:
813
+ if msg["role"] == "system":
814
+ final_system_prompt += msg["content"] + "\n"
815
+ else:
816
+ interim_history_for_final_answer.append(msg)
817
+
818
+ if not any(msg['role'] == 'user' for msg in interim_history_for_final_answer): # Ensure there's a user turn if only system + tool calls
819
+ interim_history_for_final_answer.append({'role':'user', 'content': prompt}) # Add original prompt if lost
820
+
821
+
822
+ # The generate_text method needs a single prompt and an optional system_prompt.
823
+ # We need to format the interim_history_for_final_answer into a single prompt string,
824
+ # or modify generate_text to accept a list of messages.
825
+ # For now, flatten to string:
826
+ current_prompt_for_final_answer = ""
827
+ for i, msg in enumerate(interim_history_for_final_answer):
828
+ role_prefix = self.user_custom_header(msg["role"]) if msg["role"]=="user" else self.ai_custom_header(msg["role"]) if msg["role"]=="assistant" else f"!@>{msg['role']}:"
829
+ current_prompt_for_final_answer += f"{role_prefix}{msg['content']}"
830
+ if i < len(interim_history_for_final_answer) -1 : # Add newline separator except for last
831
+ current_prompt_for_final_answer += "\n"
832
+ # Add AI header to prompt AI to speak
833
+ current_prompt_for_final_answer += f"\n{self.ai_full_header}"
834
+
835
+
836
+ final_answer_text = self.generate_text(
837
+ prompt=current_prompt_for_final_answer, # Pass the conversation history as the prompt
838
+ system_prompt=final_system_prompt.strip(),
839
+ images=images if not tool_calls_made_this_turn else None, # Only pass initial images if no tool calls happened (context might be lost)
840
+ stream=streaming_callback is not None,
841
+ streaming_callback=lambda chunk, msg_type: streaming_callback(chunk, msg_type, {"type":"final_answer_chunk"}, turn_history) if streaming_callback else None,
842
+ temperature=final_answer_temperature if final_answer_temperature is not None else self.default_temperature,
843
+ **(llm_generation_kwargs or {})
844
+ )
845
+
846
+ if streaming_callback:
847
+ streaming_callback("Final answer generation complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "final_answer_generation"}, turn_history)
848
+
849
+ if isinstance(final_answer_text, dict) and "error" in final_answer_text: # Handle generation error
850
+ turn_history.append({"type":"error", "content":f"LLM failed to generate final answer: {final_answer_text['error']}"})
851
+ return {"final_answer": "", "tool_calls": tool_calls_made_this_turn, "error": final_answer_text["error"]}
852
+
853
+ turn_history.append({"type":"final_answer_generated", "content":final_answer_text})
854
+ return {"final_answer": final_answer_text, "tool_calls": tool_calls_made_this_turn, "error": None}
855
+
856
+
522
857
  def generate_code(
523
858
  self,
524
859
  prompt,
@@ -540,7 +875,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
540
875
  Handles potential continuation if the code block is incomplete.
541
876
  """
542
877
 
543
- system_prompt = f"""{self.system_full_header}Act as a code generation assistant that generates code from user prompt."""
878
+ system_prompt = f"""Act as a code generation assistant that generates code from user prompt."""
544
879
 
545
880
  if template:
546
881
  system_prompt += "Here is a template of the answer:\n"
@@ -0,0 +1,198 @@
1
+ # lollms_client/lollms_mcp_binding.py
2
+ from abc import ABC, abstractmethod
3
+ import importlib
4
+ from pathlib import Path
5
+ from typing import Optional, List, Dict, Any, Union
6
+ from ascii_colors import trace_exception, ASCIIColors
7
+
8
+ class LollmsMCPBinding(ABC):
9
+ """
10
+ Abstract Base Class for LOLLMS Model Context Protocol (MCP) Bindings.
11
+
12
+ MCP bindings are responsible for interacting with MCP-compliant tool servers
13
+ or emulating MCP tool interactions locally. They handle tool discovery
14
+ and execution based on requests, typically orchestrated by an LLM.
15
+ """
16
+
17
+ def __init__(self,
18
+ binding_name: str
19
+ ):
20
+ """
21
+ Initialize the LollmsMCPBinding.
22
+
23
+ Args:
24
+ binding_name (str): The unique name of this binding.
25
+ """
26
+ self.binding_name = binding_name
27
+
28
+
29
+ @abstractmethod
30
+ def discover_tools(self, **kwargs) -> List[Dict[str, Any]]:
31
+ """
32
+ Discover available tools compliant with the MCP specification.
33
+
34
+ Each tool definition should follow the MCP standard, typically including:
35
+ - name (str): Unique name of the tool.
36
+ - description (str): Natural language description of what the tool does.
37
+ - input_schema (dict): JSON schema defining the tool's input parameters.
38
+ - output_schema (dict): JSON schema defining the tool's output.
39
+ (Other MCP fields like `prompts`, `resources` could be supported by specific bindings)
40
+
41
+ Args:
42
+ **kwargs: Additional arguments specific to the binding's discovery mechanism
43
+ (e.g., tool_server_url, specific_tool_names_to_filter).
44
+
45
+ Returns:
46
+ List[Dict[str, Any]]: A list of tool definitions. Each dictionary
47
+ should conform to the MCP tool definition structure.
48
+ Returns an empty list if no tools are found or an error occurs.
49
+ """
50
+ pass
51
+
52
+ @abstractmethod
53
+ def execute_tool(self,
54
+ tool_name: str,
55
+ params: Dict[str, Any],
56
+ **kwargs) -> Dict[str, Any]:
57
+ """
58
+ Execute a specified tool with the given parameters.
59
+
60
+ The execution should adhere to the input and output schemas defined in the
61
+ tool's MCP definition.
62
+
63
+ Args:
64
+ tool_name (str): The name of the tool to execute.
65
+ params (Dict[str, Any]): A dictionary of parameters to pass to the tool,
66
+ conforming to the tool's `input_schema`.
67
+ **kwargs: Additional arguments specific to the binding's execution mechanism
68
+ (e.g., timeout, user_context).
69
+
70
+ Returns:
71
+ Dict[str, Any]: The result of the tool execution, conforming to the
72
+ tool's `output_schema`. If an error occurs during
73
+ execution, the dictionary should ideally include an 'error'
74
+ key with a descriptive message.
75
+ Example success: {"result": "Weather is sunny"}
76
+ Example error: {"error": "API call failed", "details": "..."}
77
+ """
78
+ pass
79
+
80
+ def get_binding_config(self) -> Dict[str, Any]:
81
+ """
82
+ Returns the configuration of the binding.
83
+
84
+ Returns:
85
+ Dict[str, Any]: The configuration dictionary.
86
+ """
87
+ return self.config
88
+
89
+ class LollmsMCPBindingManager:
90
+ """
91
+ Manages discovery and instantiation of MCP bindings.
92
+ """
93
+
94
+ def __init__(self, mcp_bindings_dir: Union[str, Path] = Path(__file__).parent / "mcp_bindings"):
95
+ """
96
+ Initialize the LollmsMCPBindingManager.
97
+
98
+ Args:
99
+ mcp_bindings_dir (Union[str, Path]): Directory containing MCP binding implementations.
100
+ Defaults to "mcp_bindings" subdirectory relative to this file.
101
+ """
102
+ self.mcp_bindings_dir = Path(mcp_bindings_dir)
103
+ if not self.mcp_bindings_dir.is_absolute():
104
+ # If relative, assume it's relative to the parent of this file (lollms_client directory)
105
+ self.mcp_bindings_dir = (Path(__file__).parent.parent / mcp_bindings_dir).resolve()
106
+
107
+ self.available_bindings: Dict[str, type[LollmsMCPBinding]] = {}
108
+ ASCIIColors.info(f"LollmsMCPBindingManager initialized. Bindings directory: {self.mcp_bindings_dir}")
109
+
110
+
111
+ def _load_binding_class(self, binding_name: str) -> Optional[type[LollmsMCPBinding]]:
112
+ """
113
+ Dynamically load a specific MCP binding class from the mcp_bindings directory.
114
+ Assumes each binding is in a subdirectory named after the binding_name,
115
+ and has an __init__.py that defines a `BindingName` variable and the binding class.
116
+ """
117
+ binding_dir = self.mcp_bindings_dir / binding_name
118
+ if binding_dir.is_dir():
119
+ init_file_path = binding_dir / "__init__.py"
120
+ if init_file_path.exists():
121
+ try:
122
+ module_spec = importlib.util.spec_from_file_location(
123
+ f"lollms_client.mcp_bindings.{binding_name}",
124
+ str(init_file_path)
125
+ )
126
+ if module_spec and module_spec.loader:
127
+ module = importlib.util.module_from_spec(module_spec)
128
+ module_spec.loader.exec_module(module)
129
+
130
+ # Ensure BindingName is defined in the module, and it matches the class name
131
+ if not hasattr(module, 'BindingName'):
132
+ ASCIIColors.warning(f"Binding '{binding_name}' __init__.py does not define BindingName variable.")
133
+ return None
134
+
135
+ binding_class_name = module.BindingName
136
+ if not hasattr(module, binding_class_name):
137
+ ASCIIColors.warning(f"Binding '{binding_name}' __init__.py defines BindingName='{binding_class_name}', but class not found.")
138
+ return None
139
+
140
+ binding_class = getattr(module, binding_class_name)
141
+ if not issubclass(binding_class, LollmsMCPBinding):
142
+ ASCIIColors.warning(f"Class {binding_class_name} in {binding_name} is not a subclass of LollmsMCPBinding.")
143
+ return None
144
+ return binding_class
145
+ else:
146
+ ASCIIColors.warning(f"Could not create module spec for MCP binding '{binding_name}'.")
147
+ except Exception as e:
148
+ ASCIIColors.error(f"Failed to load MCP binding '{binding_name}': {e}")
149
+ trace_exception(e)
150
+ return None
151
+
152
+ def create_binding(self,
153
+ binding_name: str,
154
+ **kwargs
155
+ ) -> Optional[LollmsMCPBinding]:
156
+ """
157
+ Create an instance of a specific MCP binding.
158
+
159
+ Args:
160
+ binding_name (str): Name of the MCP binding to create.
161
+ config (Optional[Dict[str, Any]]): Configuration for the binding.
162
+ lollms_paths (Optional[Dict[str, Union[str, Path]]]): LOLLMS specific paths.
163
+
164
+
165
+ Returns:
166
+ Optional[LollmsMCPBinding]: Binding instance or None if creation failed.
167
+ """
168
+ if binding_name not in self.available_bindings:
169
+ binding_class = self._load_binding_class(binding_name)
170
+ if binding_class:
171
+ self.available_bindings[binding_name] = binding_class
172
+ else:
173
+ ASCIIColors.error(f"MCP binding '{binding_name}' class not found or failed to load.")
174
+ return None
175
+
176
+ binding_class_to_instantiate = self.available_bindings.get(binding_name)
177
+ if binding_class_to_instantiate:
178
+ try:
179
+ return binding_class_to_instantiate(
180
+ **kwargs
181
+ )
182
+ except Exception as e:
183
+ ASCIIColors.error(f"Failed to instantiate MCP binding '{binding_name}': {e}")
184
+ trace_exception(e)
185
+ return None
186
+ return None
187
+
188
+ def get_available_bindings(self) -> List[str]:
189
+ """
190
+ Return list of available MCP binding names based on subdirectories.
191
+ This method scans the directory structure.
192
+ """
193
+ available = []
194
+ if self.mcp_bindings_dir.is_dir():
195
+ for item in self.mcp_bindings_dir.iterdir():
196
+ if item.is_dir() and (item / "__init__.py").exists():
197
+ available.append(item.name)
198
+ return available