lollms-client 0.24.2__tar.gz → 0.25.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (91) hide show
  1. {lollms_client-0.24.2/lollms_client.egg-info → lollms_client-0.25.0}/PKG-INFO +1 -1
  2. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/__init__.py +1 -1
  3. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/lollms/__init__.py +1 -0
  4. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/openai/__init__.py +3 -2
  5. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_core.py +226 -158
  6. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_discussion.py +98 -34
  7. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_types.py +9 -1
  8. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_utilities.py +68 -0
  9. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/remote_mcp/__init__.py +2 -2
  10. {lollms_client-0.24.2 → lollms_client-0.25.0/lollms_client.egg-info}/PKG-INFO +1 -1
  11. {lollms_client-0.24.2 → lollms_client-0.25.0}/LICENSE +0 -0
  12. {lollms_client-0.24.2 → lollms_client-0.25.0}/README.md +0 -0
  13. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/article_summary/article_summary.py +0 -0
  14. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/console_discussion/console_app.py +0 -0
  15. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/console_discussion.py +0 -0
  16. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/deep_analyze/deep_analyse.py +0 -0
  17. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  18. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/function_calling_with_local_custom_mcp.py +0 -0
  19. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  20. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/generate_and_speak/generate_and_speak.py +0 -0
  21. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  22. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/generate_text_with_multihop_rag_example.py +0 -0
  23. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/gradio_chat_app.py +0 -0
  24. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/gradio_lollms_chat.py +0 -0
  25. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/internet_search_with_rag.py +0 -0
  26. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/lollms_discussions_test.py +0 -0
  27. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/mcp_examples/external_mcp.py +0 -0
  28. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/mcp_examples/local_mcp.py +0 -0
  29. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/mcp_examples/openai_mcp.py +0 -0
  30. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
  31. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
  32. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/simple_text_gen_test.py +0 -0
  33. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/simple_text_gen_with_image_test.py +0 -0
  34. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/test_local_models/local_chat.py +0 -0
  35. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/text_2_audio.py +0 -0
  36. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/text_2_image.py +0 -0
  37. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/text_2_image_diffusers.py +0 -0
  38. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/text_and_image_2_audio.py +0 -0
  39. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/text_gen.py +0 -0
  40. {lollms_client-0.24.2 → lollms_client-0.25.0}/examples/text_gen_system_prompt.py +0 -0
  41. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/__init__.py +0 -0
  42. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  43. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  44. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  45. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  46. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  47. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  48. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  49. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_config.py +0 -0
  50. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_js_analyzer.py +0 -0
  51. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_llm_binding.py +0 -0
  52. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_mcp_binding.py +0 -0
  53. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_personality.py +0 -0
  54. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_python_analyzer.py +0 -0
  55. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_stt_binding.py +0 -0
  56. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_tti_binding.py +0 -0
  57. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_ttm_binding.py +0 -0
  58. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_tts_binding.py +0 -0
  59. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/lollms_ttv_binding.py +0 -0
  60. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  61. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  62. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  63. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  64. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  65. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  66. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/stt_bindings/__init__.py +0 -0
  67. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  68. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  69. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  70. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tti_bindings/__init__.py +0 -0
  71. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  72. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  73. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  74. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  75. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/ttm_bindings/__init__.py +0 -0
  76. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  77. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  78. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  79. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tts_bindings/__init__.py +0 -0
  80. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  81. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  82. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  83. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  84. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/ttv_bindings/__init__.py +0 -0
  85. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  86. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client.egg-info/SOURCES.txt +0 -0
  87. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client.egg-info/dependency_links.txt +0 -0
  88. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client.egg-info/requires.txt +0 -0
  89. {lollms_client-0.24.2 → lollms_client-0.25.0}/lollms_client.egg-info/top_level.txt +0 -0
  90. {lollms_client-0.24.2 → lollms_client-0.25.0}/pyproject.toml +0 -0
  91. {lollms_client-0.24.2 → lollms_client-0.25.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.24.2
3
+ Version: 0.25.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
 
10
10
 
11
- __version__ = "0.24.2" # Updated version
11
+ __version__ = "0.25.0" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -4,6 +4,7 @@ from lollms_client.lollms_llm_binding import LollmsLLMBinding
4
4
  from lollms_client.lollms_types import MSG_TYPE
5
5
  from lollms_client.lollms_utilities import encode_image
6
6
  from lollms_client.lollms_types import ELF_COMPLETION_FORMAT
7
+ from lollms_client.lollms_discussion import LollmsDiscussion
7
8
  from ascii_colors import ASCIIColors, trace_exception
8
9
  from typing import Optional, Callable, List, Union
9
10
  import json
@@ -30,7 +30,8 @@ class OpenAIBinding(LollmsLLMBinding):
30
30
  model_name: str = "",
31
31
  service_key: str = None,
32
32
  verify_ssl_certificate: bool = True,
33
- default_completion_format: ELF_COMPLETION_FORMAT = ELF_COMPLETION_FORMAT.Chat):
33
+ default_completion_format: ELF_COMPLETION_FORMAT = ELF_COMPLETION_FORMAT.Chat,
34
+ **kwargs):
34
35
  """
35
36
  Initialize the OpenAI binding.
36
37
 
@@ -52,7 +53,7 @@ class OpenAIBinding(LollmsLLMBinding):
52
53
 
53
54
  if not self.service_key:
54
55
  self.service_key = os.getenv("OPENAI_API_KEY", self.service_key)
55
- self.client = openai.OpenAI(api_key=self.service_key, base_url=host_address)
56
+ self.client = openai.OpenAI(api_key=self.service_key, base_url=None if host_address is None else host_address if len(host_address)>0 else None)
56
57
  self.completion_format = ELF_COMPLETION_FORMAT.Chat
57
58
 
58
59
 
@@ -13,6 +13,8 @@ from lollms_client.lollms_ttm_binding import LollmsTTMBinding, LollmsTTMBindingM
13
13
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
14
14
 
15
15
  from lollms_client.lollms_discussion import LollmsDiscussion
16
+
17
+ from lollms_client.lollms_utilities import build_image_dicts, dict_to_markdown
16
18
  import json, re
17
19
  from enum import Enum
18
20
  import base64
@@ -846,7 +848,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
846
848
  "2. **Check for a Single-Step Solution:** Scrutinize the available tools. Can a single tool call directly achieve the user's current goal? \n"
847
849
  "3. **Formulate a Plan:** Based on your analysis, create a concise, numbered list of steps to achieve the goal. If the goal is simple, this may be only one step. If it is complex or multi-turn, it may be several steps.\n\n"
848
850
  "**CRITICAL RULES:**\n"
849
- "* **MANDATORY: NEVER add steps the user did not ask for.** Do not embellish or add 'nice-to-have' features.\n"
851
+ "* **MANDATORY: Be helpful, curious and creative.\n"
850
852
  "* **Focus on the Goal:** Your plan should directly address the user's request as it stands now in the conversation.\n\n"
851
853
  "---\n"
852
854
  "**Available Tools:**\n"
@@ -1078,7 +1080,7 @@ Provide your response as a single JSON object with one key, "query".
1078
1080
  """
1079
1081
  try:
1080
1082
  raw_initial_query_response = self.generate_code(initial_query_gen_prompt, system_prompt="You are a query generation expert.", temperature=0.0)
1081
- initial_plan = json.loads(raw_initial_query_response)
1083
+ initial_plan = robust_json_parser(raw_initial_query_response)
1082
1084
  current_query_for_rag = initial_plan.get("query")
1083
1085
  if not current_query_for_rag:
1084
1086
  raise ValueError("LLM returned an empty initial query.")
@@ -1434,7 +1436,6 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1434
1436
  new_scratchpad_text = self.generate_text(prompt=synthesis_prompt, n_predict=1024, temperature=0.0)
1435
1437
  return self.remove_thinking_blocks(new_scratchpad_text).strip()
1436
1438
 
1437
- # In lollms_client/lollms_discussion.py -> LollmsClient class
1438
1439
 
1439
1440
  def generate_with_mcp_rag(
1440
1441
  self,
@@ -1444,13 +1445,14 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1444
1445
  system_prompt: str = None,
1445
1446
  reasoning_system_prompt: str = "You are a logical and adaptive AI assistant.",
1446
1447
  images: Optional[List[str]] = None,
1447
- max_reasoning_steps: int = 10,
1448
- decision_temperature: float = 0.0,
1448
+ max_reasoning_steps: int = None,
1449
+ decision_temperature: float = None,
1449
1450
  final_answer_temperature: float = None,
1450
1451
  streaming_callback: Optional[Callable[[str, 'MSG_TYPE', Optional[Dict], Optional[List]], bool]] = None,
1451
- rag_top_k: int = 5,
1452
- rag_min_similarity_percent: float = 70.0,
1453
- output_summarization_threshold: int = 500, # In tokens
1452
+ rag_top_k: int = None,
1453
+ rag_min_similarity_percent: float = None,
1454
+ output_summarization_threshold: int = None, # In tokens
1455
+ debug: bool = False,
1454
1456
  **llm_generation_kwargs
1455
1457
  ) -> Dict[str, Any]:
1456
1458
  """Generates a response using a dynamic agent with stateful, ID-based step tracking.
@@ -1483,6 +1485,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1483
1485
  rag_min_similarity_percent: Minimum similarity for RAG results.
1484
1486
  output_summarization_threshold: The token count that triggers automatic
1485
1487
  summarization of a tool's text output.
1488
+ debug : If true, we'll report the detailed promptin and response information
1486
1489
  **llm_generation_kwargs: Additional keyword arguments for LLM calls.
1487
1490
 
1488
1491
  Returns:
@@ -1490,12 +1493,28 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1490
1493
  answer, the complete internal scratchpad, a log of tool calls,
1491
1494
  any retrieved RAG sources, and other metadata.
1492
1495
  """
1496
+ reasoning_step_id = None
1493
1497
  if not self.binding:
1494
1498
  return {"final_answer": "", "tool_calls": [], "sources": [], "error": "LLM binding not initialized."}
1495
1499
 
1500
+ if not max_reasoning_steps:
1501
+ max_reasoning_steps= 10
1502
+ if not rag_min_similarity_percent:
1503
+ rag_min_similarity_percent= 50
1504
+ if not rag_top_k:
1505
+ rag_top_k = 5
1506
+ if not decision_temperature:
1507
+ decision_temperature = 0.7
1508
+ if not output_summarization_threshold:
1509
+ output_summarization_threshold = 500
1510
+
1511
+ events = []
1512
+
1513
+
1496
1514
  # --- Initialize Agent State ---
1497
1515
  sources_this_turn: List[Dict[str, Any]] = []
1498
1516
  tool_calls_this_turn: List[Dict[str, Any]] = []
1517
+ generated_code_store: Dict[str, str] = {} # NEW: Store for UUID -> code
1499
1518
  original_user_prompt = prompt
1500
1519
 
1501
1520
  initial_state_parts = [
@@ -1507,41 +1526,48 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1507
1526
  initial_state_parts.append(f"- The user has provided {len(images)} image(s) for context.")
1508
1527
  current_scratchpad = "\n".join(initial_state_parts)
1509
1528
 
1510
- # --- Define Inner Helper Function for Stateful Step Logging ---
1511
- def log_step(
1529
+ def log_prompt(prompt, type="prompt"):
1530
+ ASCIIColors.cyan(f"** DEBUG: {type} **")
1531
+ ASCIIColors.magenta(prompt[-15000:])
1532
+ ASCIIColors.cyan(f"** DEBUG: DONE **")
1533
+
1534
+ # --- Define Inner Helper Functions ---
1535
+ def log_event(
1512
1536
  description: str,
1513
- step_type: str,
1537
+ event_type: MSG_TYPE = MSG_TYPE.MSG_TYPE_CHUNK,
1514
1538
  metadata: Optional[Dict] = None,
1515
- is_start: bool = True
1539
+ event_id=None
1516
1540
  ) -> Optional[str]:
1517
- """
1518
- Logs a step start or end, generating a unique ID for correlation.
1519
- This is an inner function that has access to the `streaming_callback`.
1520
-
1521
- Returns the ID for start events so it can be used for the end event.
1522
- """
1523
- if not streaming_callback:
1524
- return None
1525
-
1526
- event_id = str(uuid.uuid4()) if is_start else None
1527
-
1528
- params = {"type": step_type, "description": description, **(metadata or {})}
1529
-
1530
- if is_start:
1531
- params["id"] = event_id
1532
- streaming_callback(description, MSG_TYPE.MSG_TYPE_STEP_START, params)
1533
- return event_id
1534
- else:
1535
- if 'id' in params:
1536
- streaming_callback(description, MSG_TYPE.MSG_TYPE_STEP_END, params)
1537
- else: # Fallback for simple, non-duration steps
1538
- streaming_callback(description, MSG_TYPE.MSG_TYPE_STEP, params)
1539
- return None
1541
+ if not streaming_callback: return None
1542
+ event_id = str(uuid.uuid4()) if event_type==MSG_TYPE.MSG_TYPE_STEP_START else event_id
1543
+ params = {"type": event_type, "description": description, **(metadata or {})}
1544
+ params["id"] = event_id
1545
+ streaming_callback(description, event_type, params)
1546
+ return event_id
1547
+
1548
+ def _substitute_code_uuids_recursive(data: Any, code_store: Dict[str, str]):
1549
+ """Recursively finds and replaces code UUIDs in tool parameters."""
1550
+ if isinstance(data, dict):
1551
+ for key, value in data.items():
1552
+ if isinstance(value, str) and value in code_store:
1553
+ data[key] = code_store[value]
1554
+ else:
1555
+ _substitute_code_uuids_recursive(value, code_store)
1556
+ elif isinstance(data, list):
1557
+ for i, item in enumerate(data):
1558
+ if isinstance(item, str) and item in code_store:
1559
+ data[i] = code_store[item]
1560
+ else:
1561
+ _substitute_code_uuids_recursive(item, code_store)
1540
1562
 
1563
+ discovery_step_id = log_event("Discovering tools",MSG_TYPE.MSG_TYPE_STEP_START)
1541
1564
  # --- 1. Discover Available Tools ---
1542
1565
  available_tools = []
1543
1566
  if use_mcps and self.mcp:
1544
- available_tools.extend(self.mcp.discover_tools(force_refresh=True))
1567
+ discovered_tools = self.mcp.discover_tools(force_refresh=True)
1568
+ if isinstance(use_mcps, list):
1569
+ available_tools.extend([t for t in discovered_tools if t["name"] in use_mcps])
1570
+
1545
1571
  if use_data_store:
1546
1572
  for store_name in use_data_store:
1547
1573
  available_tools.append({
@@ -1550,19 +1576,33 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1550
1576
  "input_schema": {"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}
1551
1577
  })
1552
1578
 
1553
- formatted_tools_list = "\n".join([f"- {t['name']}: {t['description']}" for t in available_tools])
1554
- formatted_tools_list += "\n- request_clarification: Use if the user's request is ambiguous."
1555
- formatted_tools_list += "\n- final_answer: Use when you are ready to respond to the user."
1579
+ # Add the new put_code_in_buffer tool definition
1580
+ available_tools.append({
1581
+ "name": "put_code_in_buffer",
1582
+ "description": "Generates a block of code (e.g., Python, SQL) to be used by another tool. It returns a unique 'code_id'. You must then use this 'code_id' as the value for the code parameter in the subsequent tool call. This **does not** execute the code. It only buffers it for future use. Only use it if another tool requires code.",
1583
+ "input_schema": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed natural language description of the code's purpose and requirements."}}, "required": ["prompt"]}
1584
+ })
1585
+ # Add the new refactor_scratchpad tool definition
1586
+ available_tools.append({
1587
+ "name": "refactor_scratchpad",
1588
+ "description": "Rewrites the scratchpad content to clean it and reorganize it. Only use if the scratchpad is messy or contains too much information compared to what you need.",
1589
+ "input_schema": {"type": "object", "properties": {}}
1590
+ })
1591
+
1592
+ formatted_tools_list = "\n".join([f"**{t['name']}**:\n{t['description']}\ninput schema:\n{json.dumps(t['input_schema'])}" for t in available_tools])
1593
+ formatted_tools_list += "\n**request_clarification**:\nUse if the user's request is ambiguous and you can not infer a clear idea of his intent. this tool has no parameters."
1594
+ formatted_tools_list += "\n**final_answer**:\nUse when you are ready to respond to the user. this tool has no parameters."
1595
+
1596
+ if discovery_step_id: log_event("Discovering tools",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1556
1597
 
1557
1598
  # --- 2. Dynamic Reasoning Loop ---
1558
1599
  for i in range(max_reasoning_steps):
1559
- reasoning_step_id = log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", is_start=True)
1560
-
1561
- user_context = f'Original User Request: "{original_user_prompt}"'
1562
- if images:
1563
- user_context += f'\n(Note: {len(images)} image(s) were provided with this request.)'
1564
-
1565
- reasoning_prompt_template = f"""You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.
1600
+ try:
1601
+ reasoning_step_id = log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_START)
1602
+ user_context = f'Original User Request: "{original_user_prompt}"'
1603
+ if images: user_context += f'\n(Note: {len(images)} image(s) were provided with this request.)'
1604
+
1605
+ reasoning_prompt_template = f"""You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.
1566
1606
 
1567
1607
  --- AVAILABLE TOOLS ---
1568
1608
  {formatted_tools_list}
@@ -1577,120 +1617,150 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1577
1617
  2. **THINK:**
1578
1618
  - Does the latest observation completely fulfill the user's original request?
1579
1619
  - If YES, your next action MUST be to use the `final_answer` tool.
1580
- - If NO, what is the single next logical step needed?
1620
+ - If NO, what is the single next logical step needed? This may involve writing code first with `put_code_in_buffer`, then using another tool.
1581
1621
  - If you are stuck or the request is ambiguous, use `request_clarification`.
1582
1622
  3. **ACT:** Formulate your decision as a JSON object.
1583
1623
  """
1584
- action_template = {
1585
- "thought": "My detailed analysis of the last observation and my reasoning for the next action.",
1586
- "action": {
1587
- "tool_name": "The single tool to use (e.g., 'time_machine::get_current_time', 'final_answer').",
1588
- "tool_params": {"param1": "value1"},
1589
- "clarification_question": "(string, ONLY if tool_name is 'request_clarification')"
1624
+ action_template = {
1625
+ "thought": "My detailed analysis of the last observation and my reasoning for the next action and how it integrates with my global plan.",
1626
+ "action": {
1627
+ "tool_name": "The single tool to use (e.g., 'put_code_in_buffer', 'time_machine::get_current_time', 'final_answer').",
1628
+ "tool_params": {"param1": "value1"},
1629
+ "clarification_question": "(string, ONLY if tool_name is 'request_clarification')"
1630
+ }
1590
1631
  }
1591
- }
1592
-
1593
- structured_action_response = self.generate_code(
1594
- prompt=reasoning_prompt_template,
1595
- template=json.dumps(action_template, indent=2),
1596
- system_prompt=reasoning_system_prompt,
1597
- temperature=decision_temperature,
1598
- images=images if i == 0 else None
1599
- )
1600
-
1601
- try:
1602
- action_data = json.loads(structured_action_response)
1603
- thought = action_data.get("thought", "No thought was generated.")
1604
- action = action_data.get("action", {})
1605
- tool_name = action.get("tool_name")
1606
- tool_params = action.get("tool_params", {})
1607
- except (json.JSONDecodeError, TypeError) as e:
1608
- current_scratchpad += f"\n\n### Step {i+1} Failure\n- **Error:** Failed to generate a valid JSON action: {e}"
1609
- log_step(f"\n\n### Step {i+1} Failure\n- **Error:** Failed to generate a valid JSON action: {e}", "scratchpad", is_start=False)
1610
- if reasoning_step_id:
1611
- log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id, "error": str(e)}, is_start=False)
1612
- break
1632
+ if debug: log_prompt(reasoning_prompt_template, f"REASONING PROMPT (Step {i+1})")
1633
+ structured_action_response = self.generate_code(
1634
+ prompt=reasoning_prompt_template, template=json.dumps(action_template, indent=2),
1635
+ system_prompt=reasoning_system_prompt, temperature=decision_temperature,
1636
+ images=images if i == 0 else None
1637
+ )
1638
+ if debug: log_prompt(structured_action_response, f"RAW REASONING RESPONSE (Step {i+1})")
1613
1639
 
1614
- current_scratchpad += f"\n\n### Step {i+1}: Thought\n{thought}"
1615
- log_step(f"\n\n### Step {i+1}: Thought\n{thought}", "scratchpad", is_start=False)
1640
+ try:
1641
+ action_data = robust_json_parser(structured_action_response)
1642
+ thought = action_data.get("thought", "No thought was generated.")
1643
+ action = action_data.get("action", {})
1644
+ if isinstance(action,str):
1645
+ tool_name = action
1646
+ tool_params = {}
1647
+ else:
1648
+ tool_name = action.get("tool_name")
1649
+ tool_params = action.get("tool_params", {})
1650
+ except (json.JSONDecodeError, TypeError) as e:
1651
+ current_scratchpad += f"\n\n### Step {i+1} Failure\n- **Error:** Failed to generate a valid JSON action: {e}"
1652
+ log_event(f"Step Failure: Invalid JSON action.", MSG_TYPE.MSG_TYPE_EXCEPTION, metadata={"details": str(e)})
1653
+ if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, metadata={"error": str(e)}, event_id=reasoning_step_id)
1654
+
1616
1655
 
1617
- if not tool_name:
1618
- current_scratchpad += f"\n\n### Step {i+1} Failure\n- **Error:** Did not specify a tool name."
1619
- log_step(f"\n\n### Step {i+1} Failure\n- **Error:** Did not specify a tool name.", "scratchpad", is_start=False)
1620
- if reasoning_step_id:
1621
- log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id}, is_start=False)
1622
- break
1656
+ current_scratchpad += f"\n\n### Step {i+1}: Thought\n{thought}"
1657
+ log_event(f"Thought: {thought}", MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT)
1623
1658
 
1624
- if tool_name == "request_clarification":
1625
- clarification_question = action.get("clarification_question", "Could you please provide more details?")
1626
- current_scratchpad += f"\n\n### Step {i+1}: Action\n- **Action:** Decided to request clarification.\n- **Question:** {clarification_question}"
1627
- log_step(f"\n\n### Step {i+1}: Action\n- **Action:** Decided to request clarification.\n- **Question:** {clarification_question}", "scratchpad", is_start=False)
1628
- if reasoning_step_id:
1629
- log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id}, is_start=False)
1630
- return {"final_answer": clarification_question, "final_scratchpad": current_scratchpad, "tool_calls": tool_calls_this_turn, "sources": sources_this_turn, "clarification_required": True, "error": None}
1631
-
1632
- if tool_name == "final_answer":
1633
- current_scratchpad += f"\n\n### Step {i+1}: Action\n- **Action:** Decided to formulate the final answer."
1634
- log_step(f"\n\n### Step {i+1}: Action\n- **Action:** Decided to formulate the final answer.", "scratchpad", is_start=False)
1635
- if reasoning_step_id:
1636
- log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id}, is_start=False)
1637
- break
1659
+ if not tool_name:
1660
+ # Handle error...
1661
+ break
1662
+
1663
+ # --- Handle special, non-executing tools ---
1664
+ if tool_name == "request_clarification":
1665
+ # Handle clarification...
1666
+ return {"final_answer": action.get("clarification_question", "Could you please provide more details?"), "final_scratchpad": current_scratchpad, "tool_calls": tool_calls_this_turn, "sources": sources_this_turn, "clarification_required": True, "error": None}
1667
+
1668
+ if tool_name == "final_answer":
1669
+ current_scratchpad += f"\n\n### Step {i+1}: Action\n- **Action:** Decided to formulate the final answer."
1670
+ log_event("Action: Formulate final answer.", MSG_TYPE.MSG_TYPE_THOUGHT_CHUNK)
1671
+ if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}",MSG_TYPE.MSG_TYPE_STEP_END, event_id=reasoning_step_id)
1672
+ break
1638
1673
 
1639
- tool_call_id = log_step(f"Executing tool: {tool_name}", "tool_call", metadata={"name": tool_name, "parameters": tool_params}, is_start=True)
1640
- tool_result = None
1641
- try:
1642
- if tool_name.startswith("research::") and use_data_store:
1643
- store_name = tool_name.split("::")[1]
1644
- rag_callable = use_data_store.get(store_name, {}).get("callable")
1645
- query = tool_params.get("query", "")
1646
- retrieved_chunks = rag_callable(query, rag_top_k=rag_top_k, rag_min_similarity_percent=rag_min_similarity_percent)
1647
- if retrieved_chunks:
1648
- sources_this_turn.extend(retrieved_chunks)
1649
- tool_result = {"status": "success", "summary": f"Found {len(retrieved_chunks)} relevant chunks.", "chunks": retrieved_chunks}
1674
+ # --- Handle the `put_code_in_buffer` tool specifically ---
1675
+ if tool_name == 'put_code_in_buffer':
1676
+ code_gen_id = log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_STEP_START, metadata={"name": "put_code_in_buffer", "id": "gencode"})
1677
+ code_prompt = tool_params.get("prompt", "Generate the requested code.")
1678
+
1679
+ # Use a specific system prompt to get raw code
1680
+ code_generation_system_prompt = "You are a code generation assistant. Generate ONLY the raw code based on the user's request. Do not add any explanations, markdown code fences, or other text outside of the code itself."
1681
+ generated_code = self.generate_code(prompt=code_prompt, system_prompt=code_generation_system_prompt + "\n----\n" + reasoning_prompt_template, **llm_generation_kwargs)
1682
+
1683
+ code_uuid = str(uuid.uuid4())
1684
+ generated_code_store[code_uuid] = generated_code
1685
+
1686
+ tool_result = {"status": "success", "code_id": code_uuid, "summary": f"Code generated successfully. Use this ID in the next tool call that requires code."}
1687
+ tool_calls_this_turn.append({"name": "put_code_in_buffer", "params": tool_params, "result": tool_result})
1688
+ observation_text = f"```json\n{json.dumps(tool_result, indent=2)}\n```"
1689
+ current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1690
+ log_event(f"Observation: Code generated with ID: {code_uuid}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1691
+ if code_gen_id: log_event(f"Generating code...", MSG_TYPE.MSG_TYPE_TOOL_CALL, metadata={"id": code_gen_id, "result": tool_result})
1692
+ if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, event_id= reasoning_step_id)
1693
+ continue # Go to the next reasoning step immediately
1694
+ if tool_name == 'refactor_scratchpad':
1695
+ scratchpad_cleaning_prompt = f"""Enhance this scratchpad content to be more organized and comprehensive. Keep relevant experience information and remove any useless redundancies. Try to log learned things from the context so that you won't make the same mistakes again. Do not remove the main objective information or any crucial information that may be useful for the next iterations. Answer directly with the new scratchpad content without any comments.
1696
+ --- YOUR INTERNAL SCRATCHPAD (Work History & Analysis) ---
1697
+ {current_scratchpad}
1698
+ --- END OF SCRATCHPAD ---"""
1699
+ current_scratchpad = self.generate_text(scratchpad_cleaning_prompt)
1700
+ log_event(f"New scratchpad:\n{current_scratchpad}")
1701
+
1702
+ # --- Substitute UUIDs and Execute Standard Tools ---
1703
+ log_event(f"Calling tool: `{tool_name}` with params:\n{dict_to_markdown(tool_params)}", MSG_TYPE.MSG_TYPE_STEP)
1704
+ _substitute_code_uuids_recursive(tool_params, generated_code_store)
1705
+
1706
+ tool_call_id = log_event(f"Executing tool: {tool_name}",MSG_TYPE.MSG_TYPE_STEP_START, metadata={"name": tool_name, "parameters": tool_params, "id":"executing tool"})
1707
+ tool_result = None
1708
+ try:
1709
+ if tool_name.startswith("research::") and use_data_store:
1710
+ store_name = tool_name.split("::")[1]
1711
+ rag_callable = use_data_store.get(store_name, {}).get("callable")
1712
+ query = tool_params.get("query", "")
1713
+ retrieved_chunks = rag_callable(query, rag_top_k=rag_top_k, rag_min_similarity_percent=rag_min_similarity_percent)
1714
+ if retrieved_chunks:
1715
+ sources_this_turn.extend(retrieved_chunks)
1716
+ tool_result = {"status": "success", "summary": f"Found {len(retrieved_chunks)} relevant chunks.", "chunks": retrieved_chunks}
1717
+ else:
1718
+ tool_result = {"status": "success", "summary": "No relevant documents found."}
1719
+ elif use_mcps and self.mcp:
1720
+ mcp_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
1721
+ tool_result = {"status": "success", "output": mcp_result} if not (isinstance(mcp_result, dict) and "error" in mcp_result) else {"status": "failure", **mcp_result}
1650
1722
  else:
1651
- tool_result = {"status": "success", "summary": "No relevant documents found."}
1652
- elif use_mcps and self.mcp:
1653
- mcp_result = self.mcp.execute_tool(tool_name, tool_params, lollms_client_instance=self)
1654
- tool_result = {"status": "success", "output": mcp_result} if not (isinstance(mcp_result, dict) and "error" in mcp_result) else {"status": "failure", **mcp_result}
1723
+ tool_result = {"status": "failure", "error": f"Tool '{tool_name}' not found."}
1724
+ except Exception as e:
1725
+ trace_exception(e)
1726
+ tool_result = {"status": "failure", "error": f"Exception executing tool: {str(e)}"}
1727
+
1728
+ if tool_call_id: log_event(f"Executing tool: {tool_name}", MSG_TYPE.MSG_TYPE_STEP_END, metadata={"result": tool_result}, event_id= tool_call_id)
1729
+
1730
+ observation_text = ""
1731
+ sanitized_result = {}
1732
+ if isinstance(tool_result, dict):
1733
+ sanitized_result = tool_result.copy()
1734
+ summarized_fields = {}
1735
+ for key, value in tool_result.items():
1736
+ if isinstance(value, str) and key.endswith("_base64") and len(value) > 256:
1737
+ sanitized_result[key] = f"[Image was generated. Size: {len(value)} bytes]"
1738
+ continue
1739
+ if isinstance(value, str) and len(self.tokenize(value)) > output_summarization_threshold:
1740
+ if streaming_callback: streaming_callback(f"Summarizing long output from field '{key}'...", MSG_TYPE.MSG_TYPE_STEP, {"type": "summarization"})
1741
+ summary = self.sequential_summarize(text=value, chunk_processing_prompt=f"Summarize key info from this chunk of '{key}'.", callback=streaming_callback)
1742
+ summarized_fields[key] = summary
1743
+ sanitized_result[key] = f"[Content summarized, see summary below. Original length: {len(value)} chars]"
1744
+ observation_text = f"```json\n{json.dumps(sanitized_result, indent=2)}\n```"
1745
+ if summarized_fields:
1746
+ observation_text += "\n\n**Summaries of Long Outputs:**"
1747
+ for key, summary in summarized_fields.items():
1748
+ observation_text += f"\n- **Summary of '{key}':**\n{summary}"
1655
1749
  else:
1656
- tool_result = {"status": "failure", "error": f"Tool '{tool_name}' not found."}
1657
- except Exception as e:
1658
- trace_exception(e)
1659
- tool_result = {"status": "failure", "error": f"Exception executing tool: {str(e)}"}
1660
-
1661
- if tool_call_id:
1662
- log_step(f"Executing tool: {tool_name}", "tool_call", metadata={"id": tool_call_id, "result": tool_result}, is_start=False)
1663
-
1664
- observation_text = ""
1665
- if isinstance(tool_result, dict):
1666
- sanitized_result = tool_result.copy()
1667
- summarized_fields = {}
1668
- for key, value in tool_result.items():
1669
- if isinstance(value, str) and key.endswith("_base64") and len(value) > 256:
1670
- sanitized_result[key] = f"[Image was generated. Size: {len(value)} bytes]"
1671
- continue
1672
- if isinstance(value, str) and len(self.tokenize(value)) > output_summarization_threshold:
1673
- if streaming_callback: streaming_callback(f"Summarizing long output from field '{key}'...", MSG_TYPE.MSG_TYPE_STEP, {"type": "summarization"})
1674
- summary = self.sequential_summarize(text=value, chunk_processing_prompt=f"Summarize key info from this chunk of '{key}'.", callback=streaming_callback)
1675
- summarized_fields[key] = summary
1676
- sanitized_result[key] = f"[Content summarized, see summary below. Original length: {len(value)} chars]"
1677
- observation_text = f"```json\n{json.dumps(sanitized_result, indent=2)}\n```"
1678
- if summarized_fields:
1679
- observation_text += "\n\n**Summaries of Long Outputs:**"
1680
- for key, summary in summarized_fields.items():
1681
- observation_text += f"\n- **Summary of '{key}':**\n{summary}"
1682
- else:
1683
- observation_text = f"Tool returned non-dictionary output: {str(tool_result)}"
1684
-
1685
- tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
1686
- current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1687
- log_step(f"### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n", "scratchpad", is_start=False)
1688
-
1689
- if reasoning_step_id:
1690
- log_step(f"Reasoning Step {i+1}/{max_reasoning_steps}", "reasoning_step", metadata={"id": reasoning_step_id}, is_start=False)
1691
-
1750
+ observation_text = f"Tool returned non-dictionary output: {str(tool_result)}"
1751
+
1752
+ tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
1753
+ current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1754
+ log_event(f"Observation: Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1755
+
1756
+ if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
1757
+ except Exception as ex:
1758
+ trace_exception(ex)
1759
+ current_scratchpad += f"\n\n### Error : {ex}"
1760
+ if reasoning_step_id: log_event(f"Reasoning Step {i+1}/{max_reasoning_steps}", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
1761
+
1692
1762
  # --- Final Answer Synthesis ---
1693
- synthesis_id = log_step("Synthesizing final answer...", "final_answer_synthesis", is_start=True)
1763
+ synthesis_id = log_event("Synthesizing final answer...", MSG_TYPE.MSG_TYPE_STEP_START)
1694
1764
 
1695
1765
  final_answer_prompt = f"""You are an AI assistant. Provide a final, comprehensive answer based on your work.
1696
1766
  --- Original User Request ---
@@ -1702,11 +1772,12 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1702
1772
  - If images were provided by the user, incorporate your analysis of them into the answer.
1703
1773
  - Do not talk about your internal process unless it's necessary to explain why you couldn't find an answer.
1704
1774
  """
1775
+ if debug: log_prompt(final_answer_prompt, "FINAL ANSWER SYNTHESIS PROMPT")
1705
1776
  final_answer_text = self.generate_text(prompt=final_answer_prompt, system_prompt=system_prompt, images=images, stream=streaming_callback is not None, streaming_callback=streaming_callback, temperature=final_answer_temperature, **llm_generation_kwargs)
1706
1777
  final_answer = self.remove_thinking_blocks(final_answer_text)
1778
+ if debug: log_prompt(final_answer_text, "FINAL ANSWER RESPONSE")
1707
1779
 
1708
- if synthesis_id:
1709
- log_step("Synthesizing final answer...", "final_answer_synthesis", metadata={"id": synthesis_id}, is_start=False)
1780
+ if synthesis_id: log_event("Synthesizing final answer...", MSG_TYPE.MSG_TYPE_STEP_END, event_id= synthesis_id)
1710
1781
 
1711
1782
  return {
1712
1783
  "final_answer": final_answer,
@@ -1716,7 +1787,6 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1716
1787
  "clarification_required": False,
1717
1788
  "error": None
1718
1789
  }
1719
-
1720
1790
  def generate_code(
1721
1791
  self,
1722
1792
  prompt,
@@ -1795,7 +1865,7 @@ Do not split the code in multiple tags.
1795
1865
  while not last_code["is_complete"] and retries < max_retries:
1796
1866
  retries += 1
1797
1867
  ASCIIColors.info(f"Code block seems incomplete. Attempting continuation ({retries}/{max_retries})...")
1798
- continuation_prompt = f"{full_prompt}{code_content}\n\n{self.user_full_header}The previous code block was incomplete. Continue the code exactly from where it left off. Do not repeat the previous part. Only provide the continuation inside a single {code_tag_format} code tag.\n{self.ai_full_header}"
1868
+ continuation_prompt = f"{prompt}\n\nAssistant:\n{code_content}\n\n{self.user_full_header}The previous code block was incomplete. Continue the code exactly from where it left off. Do not repeat the previous part. Only provide the continuation inside a single {code_tag_format} code tag.\n{self.ai_full_header}"
1799
1869
 
1800
1870
  continuation_response = self.generate_text(
1801
1871
  continuation_prompt,
@@ -2065,7 +2135,7 @@ Do not split the code in multiple tags.
2065
2135
  response_json_str = re.sub(r",\s*}", "}", response_json_str)
2066
2136
  response_json_str = re.sub(r",\s*]", "]", response_json_str)
2067
2137
 
2068
- parsed_response = json.loads(response_json_str)
2138
+ parsed_response = robust_json_parser(response_json_str)
2069
2139
  answer = parsed_response.get("answer")
2070
2140
  explanation = parsed_response.get("explanation", "")
2071
2141
 
@@ -2159,7 +2229,7 @@ Do not split the code in multiple tags.
2159
2229
  response_json_str = re.sub(r",\s*}", "}", response_json_str)
2160
2230
  response_json_str = re.sub(r",\s*]", "]", response_json_str)
2161
2231
 
2162
- result = json.loads(response_json_str)
2232
+ result = robust_json_parser(response_json_str)
2163
2233
  index = result.get("index")
2164
2234
  explanation = result.get("explanation", "")
2165
2235
 
@@ -2232,7 +2302,7 @@ Do not split the code in multiple tags.
2232
2302
  response_json_str = re.sub(r",\s*}", "}", response_json_str)
2233
2303
  response_json_str = re.sub(r",\s*]", "]", response_json_str)
2234
2304
 
2235
- result = json.loads(response_json_str)
2305
+ result = robust_json_parser(response_json_str)
2236
2306
  ranking = result.get("ranking")
2237
2307
  explanations = result.get("explanations", []) if return_explanation else None
2238
2308
 
@@ -2856,5 +2926,3 @@ def chunk_text(text, tokenizer, detokenizer, chunk_size, overlap, use_separators
2856
2926
  break
2857
2927
 
2858
2928
  return chunks
2859
-
2860
-