sonika-langchain-bot 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -142,28 +142,44 @@ class LangChainBot:
142
142
  return self.language_model.model
143
143
 
144
144
  def _build_modern_instructions(self) -> str:
145
- """
146
- Build modern system instructions with automatic tool descriptions.
147
-
148
- This method enhances the base instructions with professional tool descriptions
149
- that leverage native function calling capabilities, eliminating the need for
150
- manual tool instruction formatting.
151
-
152
- Returns:
153
- str: Complete system instructions including tool descriptions
154
- """
155
145
  instructions = self.base_instructions
156
146
 
157
147
  if self.tools:
158
- tools_description = "\n\nYou have access to the following tools:\n"
148
+ tools_description = "\n\n# Available Tools\n\n"
149
+
159
150
  for tool in self.tools:
160
- tools_description += f"- {tool.name}: {tool.description}\n"
151
+ tools_description += f"## {tool.name}\n"
152
+ tools_description += f"**Description:** {tool.description}\n\n"
153
+
154
+ # Opción 1: Tool con args_schema explícito (tu HTTPTool)
155
+ if hasattr(tool, 'args_schema') and tool.args_schema:
156
+ if hasattr(tool.args_schema, '__fields__'):
157
+ tools_description += f"**Parameters:**\n"
158
+ for field_name, field_info in tool.args_schema.__fields__.items():
159
+ required = "**REQUIRED**" if field_info.is_required() else "*optional*"
160
+ tools_description += f"- `{field_name}` ({field_info.annotation.__name__}, {required}): {field_info.description}\n"
161
+
162
+ # Opción 2: Tool básico sin args_schema (EmailTool)
163
+ elif hasattr(tool, '_run'):
164
+ tools_description += f"**Parameters:**\n"
165
+ import inspect
166
+ sig = inspect.signature(tool._run)
167
+ for param_name, param in sig.parameters.items():
168
+ if param_name != 'self':
169
+ param_type = param.annotation.__name__ if param.annotation != inspect.Parameter.empty else 'any'
170
+ required = "*optional*" if param.default != inspect.Parameter.empty else "**REQUIRED**"
171
+ default_info = f" (default: {param.default})" if param.default != inspect.Parameter.empty else ""
172
+ tools_description += f"- `{param_name}` ({param_type}, {required}){default_info}\n"
173
+
174
+ tools_description += "\n"
161
175
 
162
- tools_description += ("\nCall these tools when needed using the standard function calling format. "
163
- "You can call multiple tools in sequence if necessary to fully answer the user's question.")
176
+ tools_description += ("## Usage Instructions\n"
177
+ "- Use the standard function calling format\n"
178
+ "- **MUST** provide all REQUIRED parameters\n"
179
+ "- Do NOT call tools with empty arguments\n")
164
180
 
165
181
  instructions += tools_description
166
-
182
+
167
183
  return instructions
168
184
 
169
185
  def _create_modern_workflow(self) -> StateGraph:
@@ -542,181 +558,4 @@ class LangChainBot:
542
558
  if self.vector_store:
543
559
  docs = self.vector_store.similarity_search(query, k=4)
544
560
  return "\n".join([doc.page_content for doc in docs])
545
- return ""
546
-
547
- # ===== MODERN ENHANCED CAPABILITIES =====
548
-
549
- def get_response_with_thread(self, user_input: str, thread_id: str) -> ResponseModel:
550
- """
551
- Generate response with automatic conversation persistence using thread IDs.
552
-
553
- This method leverages LangGraph's checkpointing system to automatically
554
- persist and retrieve conversation state based on thread identifiers.
555
-
556
- Args:
557
- user_input (str): The user's message or query
558
- thread_id (str): Unique identifier for the conversation thread
559
-
560
- Returns:
561
- ResponseModel: Structured response with token usage and content
562
-
563
- Raises:
564
- ValueError: If checkpointer is not configured during initialization
565
-
566
- Note:
567
- Each thread_id maintains independent conversation state, enabling
568
- multiple concurrent conversations per user or session.
569
- """
570
- if not self.checkpointer:
571
- raise ValueError("Checkpointer not configured. Initialize with use_checkpointer=True")
572
-
573
- config = {"configurable": {"thread_id": thread_id}}
574
-
575
- initial_state = {
576
- "messages": [HumanMessage(content=user_input)],
577
- "context": ""
578
- }
579
-
580
- result = self.graph.invoke(initial_state, config=config)
581
-
582
- # Extract final response
583
- final_response = ""
584
- for msg in reversed(result["messages"]):
585
- if isinstance(msg, AIMessage) and msg.content:
586
- final_response = msg.content
587
- break
588
-
589
- # Extract token usage
590
- token_usage = {}
591
- last_message = result["messages"][-1]
592
- if hasattr(last_message, 'response_metadata'):
593
- token_usage = last_message.response_metadata.get('token_usage', {})
594
-
595
- return ResponseModel(
596
- user_tokens=token_usage.get('prompt_tokens', 0),
597
- bot_tokens=token_usage.get('completion_tokens', 0),
598
- response=final_response
599
- )
600
-
601
- def stream_with_thread(self, user_input: str, thread_id: str) -> Generator[Dict[str, Any], None, None]:
602
- """
603
- Stream response with automatic conversation persistence.
604
-
605
- This method combines streaming capabilities with thread-based persistence,
606
- allowing real-time response generation while maintaining conversation state.
607
-
608
- Args:
609
- user_input (str): The user's message or query
610
- thread_id (str): Unique identifier for the conversation thread
611
-
612
- Yields:
613
- Dict[str, Any]: Workflow execution chunks containing intermediate states
614
-
615
- Raises:
616
- ValueError: If checkpointer is not configured during initialization
617
- """
618
- if not self.checkpointer:
619
- raise ValueError("Checkpointer not configured. Initialize with use_checkpointer=True")
620
-
621
- config = {"configurable": {"thread_id": thread_id}}
622
-
623
- initial_state = {
624
- "messages": [HumanMessage(content=user_input)],
625
- "context": ""
626
- }
627
-
628
- for chunk in self.graph.stream(initial_state, config=config):
629
- yield chunk
630
-
631
- def get_mcp_status(self) -> Dict[str, Any]:
632
- """
633
- Retrieve the current status of MCP (Model Context Protocol) integration.
634
-
635
- This method provides diagnostic information about MCP server connections
636
- and tool availability for monitoring and debugging purposes.
637
-
638
- Returns:
639
- Dict[str, Any]: MCP status information containing:
640
- - mcp_enabled: Whether MCP is active
641
- - servers: List of connected server names
642
- - tools_count: Number of MCP-sourced tools
643
- - total_tools: Total number of available tools
644
- """
645
- if not self.mcp_client:
646
- return {"mcp_enabled": False, "servers": [], "tools_count": 0}
647
-
648
- mcp_tools_count = len([
649
- tool for tool in self.tools
650
- if hasattr(tool, '__module__') and tool.__module__ and 'mcp' in tool.__module__
651
- ])
652
-
653
- return {
654
- "mcp_enabled": True,
655
- "servers": list(getattr(self.mcp_client, '_servers', {}).keys()),
656
- "tools_count": mcp_tools_count,
657
- "total_tools": len(self.tools)
658
- }
659
-
660
- def add_tool_dynamically(self, tool: BaseTool):
661
- """
662
- Add a tool to the bot's capabilities at runtime.
663
-
664
- This method allows dynamic tool addition after initialization, automatically
665
- updating the model binding and workflow configuration.
666
-
667
- Args:
668
- tool (BaseTool): The LangChain tool to add to the bot's capabilities
669
-
670
- Note:
671
- Adding tools dynamically triggers a complete workflow reconstruction
672
- to ensure proper tool integration and binding.
673
- """
674
- self.tools.append(tool)
675
- # Reconstruct model binding and workflow with new tool
676
- self.model_with_tools = self._prepare_model_with_tools()
677
- self.instructions = self._build_modern_instructions()
678
- self.graph = self._create_modern_workflow()
679
-
680
- # ===== UTILITY AND DIAGNOSTIC METHODS =====
681
-
682
- def get_workflow_state(self) -> Dict[str, Any]:
683
- """
684
- Get current workflow configuration for debugging and monitoring.
685
-
686
- Returns:
687
- Dict[str, Any]: Workflow state information including:
688
- - tools_count: Number of available tools
689
- - has_checkpointer: Whether persistence is enabled
690
- - has_vector_store: Whether file processing is active
691
- - chat_history_length: Current conversation length
692
- """
693
- return {
694
- "tools_count": len(self.tools),
695
- "has_checkpointer": self.checkpointer is not None,
696
- "has_vector_store": self.vector_store is not None,
697
- "chat_history_length": len(self.chat_history),
698
- "mcp_enabled": self.mcp_client is not None
699
- }
700
-
701
- def reset_conversation(self):
702
- """
703
- Reset conversation state while preserving configuration and processed files.
704
-
705
- This method clears only the conversation history while maintaining
706
- tool configurations, file context, and other persistent settings.
707
- """
708
- self.chat_history.clear()
709
-
710
- def get_tool_names(self) -> List[str]:
711
- """
712
- Get list of available tool names for diagnostic purposes.
713
-
714
- Returns:
715
- List[str]: Names of all currently available tools
716
- """
717
- return [tool.name for tool in self.tools]
718
-
719
- # ===== FIN DE LA CLASE =====
720
- # No hay métodos legacy innecesarios
721
-
722
-
561
+ return ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sonika-langchain-bot
3
- Version: 0.0.12
3
+ Version: 0.0.13
4
4
  Summary: Agente langchain con LLM
5
5
  Author: Erley Blanco Carvajal
6
6
  License: MIT License
@@ -1,14 +1,14 @@
1
1
  sonika_langchain_bot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  sonika_langchain_bot/langchain_bdi.py,sha256=ithc55azP5XSPb8AGRUrDGYnVI6I4IqpqElLNat4BAQ,7024
3
- sonika_langchain_bot/langchain_bot_agent.py,sha256=SBqiLWWTpSHi_v_pC6XelHyMpiSC-g2n1fGipZbgUQk,28631
3
+ sonika_langchain_bot/langchain_bot_agent.py,sha256=3K8HiUzizIz7v_KmTFX9geOqiXTEwEqlm5jPXdPQeaM,23072
4
4
  sonika_langchain_bot/langchain_bot_agent_bdi.py,sha256=Ev0hhRQYe6kyGAHiFDhFsfu6QnTwUFaA9oB8DfNV7u4,8613
5
5
  sonika_langchain_bot/langchain_clasificator.py,sha256=GR85ZAliymBSoDa5PXB31BvJkuiokGjS2v3RLdXnzzk,1381
6
6
  sonika_langchain_bot/langchain_class.py,sha256=5anB6v_wCzEoAJRb8fV9lPPS72E7-k51y_aeiip8RAw,1114
7
7
  sonika_langchain_bot/langchain_files.py,sha256=SEyqnJgBc_nbCIG31eypunBbO33T5AHFOhQZcghTks4,381
8
8
  sonika_langchain_bot/langchain_models.py,sha256=vqSSZ48tNofrTMLv1QugDdyey2MuIeSdlLSD37AnzkI,2235
9
9
  sonika_langchain_bot/langchain_tools.py,sha256=y7wLf1DbUua3QIvz938Ek-JIMOuQhrOIptJadW8OIsU,466
10
- sonika_langchain_bot-0.0.12.dist-info/licenses/LICENSE,sha256=O8VZ4aU_rUMAArvYTm2bshcZ991huv_tpfB5BKHH9Q8,1064
11
- sonika_langchain_bot-0.0.12.dist-info/METADATA,sha256=hgBZ7RuN4683itsMD6gggfJrOtg-IrqV5tNbcIgnWb0,6380
12
- sonika_langchain_bot-0.0.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
- sonika_langchain_bot-0.0.12.dist-info/top_level.txt,sha256=UsTTSZFEw2wrPSVh4ufu01e2m_E7O_QVYT_k4zCQaAE,21
14
- sonika_langchain_bot-0.0.12.dist-info/RECORD,,
10
+ sonika_langchain_bot-0.0.13.dist-info/licenses/LICENSE,sha256=O8VZ4aU_rUMAArvYTm2bshcZ991huv_tpfB5BKHH9Q8,1064
11
+ sonika_langchain_bot-0.0.13.dist-info/METADATA,sha256=RyRtV63QD_s53I3GCCa8uuJasHU1811CoORRIYDmmuY,6380
12
+ sonika_langchain_bot-0.0.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ sonika_langchain_bot-0.0.13.dist-info/top_level.txt,sha256=UsTTSZFEw2wrPSVh4ufu01e2m_E7O_QVYT_k4zCQaAE,21
14
+ sonika_langchain_bot-0.0.13.dist-info/RECORD,,