dasein-core 0.2.9__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dasein/api.py +157 -75
- dasein/capture.py +0 -54
- dasein/wrappers.py +7 -6
- {dasein_core-0.2.9.dist-info → dasein_core-0.2.12.dist-info}/METADATA +1 -1
- {dasein_core-0.2.9.dist-info → dasein_core-0.2.12.dist-info}/RECORD +8 -8
- {dasein_core-0.2.9.dist-info → dasein_core-0.2.12.dist-info}/WHEEL +0 -0
- {dasein_core-0.2.9.dist-info → dasein_core-0.2.12.dist-info}/licenses/LICENSE +0 -0
- {dasein_core-0.2.9.dist-info → dasein_core-0.2.12.dist-info}/top_level.txt +0 -0
dasein/api.py
CHANGED
@@ -173,10 +173,8 @@ class DaseinLLMWrapper(BaseChatModel):
|
|
173
173
|
self._vprint(f"[DASEIN][TRACE] LLM result: {result_text[:100]}...")
|
174
174
|
self._vprint(f"[DASEIN][METRICS] Tokens: {step['tokens_input']}->{output_tokens} | Time: {duration_ms}ms | Success: {'OK' if success else 'FAIL'}")
|
175
175
|
|
176
|
-
# 🚨 MICROTURN ENFORCEMENT -
|
177
|
-
|
178
|
-
print(f"[DASEIN][MICROTURN_DEBUG] run_number={run_number}, callback_handler={self._callback_handler is not None}")
|
179
|
-
if run_number == 1 and self._callback_handler:
|
176
|
+
# 🚨 MICROTURN ENFORCEMENT - DISABLED (can interfere with tool execution)
|
177
|
+
if False: # Disabled
|
180
178
|
try:
|
181
179
|
proposed_func_name = None
|
182
180
|
print(f"[DASEIN][MICROTURN_DEBUG] Checking result for function call...")
|
@@ -883,28 +881,12 @@ class CognateProxy:
|
|
883
881
|
# Initialize KPI tracking
|
884
882
|
self._last_run_kpis = None
|
885
883
|
|
884
|
+
# Initialize wrapped LLM (will be set by _wrap_agent_llm if applicable)
|
885
|
+
self._wrapped_llm = None
|
886
|
+
|
886
887
|
# Wrap the agent's LLM with our trace capture wrapper
|
887
888
|
self._wrap_agent_llm()
|
888
889
|
|
889
|
-
# Wrap the agent's tools for pipecleaner deduplication
|
890
|
-
print(f"\n{'='*70}")
|
891
|
-
print(f"[DASEIN] Patching tool execution for pipecleaner...")
|
892
|
-
print(f"{'='*70}")
|
893
|
-
try:
|
894
|
-
from .wrappers import wrap_tools_for_pipecleaner
|
895
|
-
verbose = getattr(self._callback_handler, '_verbose', False)
|
896
|
-
success = wrap_tools_for_pipecleaner(self._agent, self._callback_handler, verbose=verbose)
|
897
|
-
if success:
|
898
|
-
print(f"[DASEIN] ✅ Tool execution patched successfully")
|
899
|
-
else:
|
900
|
-
print(f"[DASEIN] ⚠️ Tool execution patching failed")
|
901
|
-
print(f"{'='*70}\n")
|
902
|
-
except Exception as e:
|
903
|
-
print(f"[DASEIN] ❌ ERROR patching tool execution: {e}")
|
904
|
-
import traceback
|
905
|
-
traceback.print_exc()
|
906
|
-
print(f"{'='*70}\n")
|
907
|
-
|
908
890
|
# Inject universal dead-letter tool
|
909
891
|
self._inject_deadletter_tool()
|
910
892
|
|
@@ -1238,6 +1220,10 @@ class CognateProxy:
|
|
1238
1220
|
tool = tool_tuple
|
1239
1221
|
node_name = None
|
1240
1222
|
|
1223
|
+
# Unwrap DaseinToolWrapper to get complete metadata (especially args_schema)
|
1224
|
+
if hasattr(tool, 'original_tool'):
|
1225
|
+
tool = tool.original_tool
|
1226
|
+
|
1241
1227
|
tool_meta = {
|
1242
1228
|
'name': getattr(tool, 'name', str(tool.__class__.__name__)),
|
1243
1229
|
'description': getattr(tool, 'description', ''),
|
@@ -1697,7 +1683,69 @@ Follow these rules when planning your actions."""
|
|
1697
1683
|
self._deadletter_fn = None
|
1698
1684
|
|
1699
1685
|
def _wrap_agent_llm(self):
|
1700
|
-
"""
|
1686
|
+
"""Wrap LLM instance (for SQL agent callbacks) AND monkey-patch (for Pipecleaner)."""
|
1687
|
+
try:
|
1688
|
+
# STEP 1: Wrap the main agent LLM with DaseinLLMWrapper (captures all _generate calls)
|
1689
|
+
# This is critical for SQL agents where callbacks don't propagate properly
|
1690
|
+
llm = self._find_llm_recursively(self._agent, max_depth=5)
|
1691
|
+
if llm:
|
1692
|
+
wrapped_llm = DaseinLLMWrapper(llm, self._callback_handler, verbose=self._verbose)
|
1693
|
+
# Replace the original LLM with our wrapped version
|
1694
|
+
self._replace_llm_in_structure(self._agent, llm, wrapped_llm, max_depth=5)
|
1695
|
+
self._wrapped_llm = wrapped_llm
|
1696
|
+
self._vprint(f"[DASEIN][WRAPPER] Successfully wrapped {type(llm).__name__} LLM")
|
1697
|
+
else:
|
1698
|
+
self._vprint(f"[DASEIN][WRAPPER] Could not find any LLM in agent structure")
|
1699
|
+
self._wrapped_llm = None
|
1700
|
+
|
1701
|
+
# STEP 2: Monkey-patch LLM classes for Pipecleaner deduplication
|
1702
|
+
# This is critical for research agents with Summary calls that need deduplication
|
1703
|
+
self._monkey_patch_llm_classes()
|
1704
|
+
|
1705
|
+
except Exception as e:
|
1706
|
+
self._vprint(f"[DASEIN][WRAPPER] Failed to wrap agent LLM: {e}")
|
1707
|
+
import traceback
|
1708
|
+
traceback.print_exc()
|
1709
|
+
self._wrapped_llm = None
|
1710
|
+
|
1711
|
+
def _replace_llm_in_structure(self, obj, original_llm, wrapped_llm, max_depth=5, path=""):
|
1712
|
+
"""Replace the original LLM with wrapped LLM in the structure."""
|
1713
|
+
if max_depth <= 0:
|
1714
|
+
return
|
1715
|
+
|
1716
|
+
# Special handling for RunnableSequence - check steps
|
1717
|
+
if hasattr(obj, 'steps') and hasattr(obj, '__iter__'):
|
1718
|
+
for i, step in enumerate(obj.steps):
|
1719
|
+
if step is original_llm:
|
1720
|
+
self._vprint(f"[DASEIN][WRAPPER] Replacing LLM at {path}.steps[{i}]")
|
1721
|
+
obj.steps[i] = wrapped_llm
|
1722
|
+
return
|
1723
|
+
# Check if step has bound attribute (RunnableBinding)
|
1724
|
+
if hasattr(step, 'bound') and step.bound is original_llm:
|
1725
|
+
self._vprint(f"[DASEIN][WRAPPER] Replacing LLM at {path}.steps[{i}].bound")
|
1726
|
+
step.bound = wrapped_llm
|
1727
|
+
return
|
1728
|
+
# Recursively search in the step
|
1729
|
+
self._replace_llm_in_structure(step, original_llm, wrapped_llm, max_depth - 1, f"{path}.steps[{i}]")
|
1730
|
+
|
1731
|
+
# Search in attributes
|
1732
|
+
for attr_name in dir(obj):
|
1733
|
+
if attr_name.startswith('_'):
|
1734
|
+
continue
|
1735
|
+
try:
|
1736
|
+
attr_value = getattr(obj, attr_name)
|
1737
|
+
if attr_value is original_llm:
|
1738
|
+
self._vprint(f"[DASEIN][WRAPPER] Replacing LLM at {path}.{attr_name}")
|
1739
|
+
setattr(obj, attr_name, wrapped_llm)
|
1740
|
+
return
|
1741
|
+
# Recursively search in the attribute
|
1742
|
+
if hasattr(attr_value, '__dict__') or hasattr(attr_value, '__iter__'):
|
1743
|
+
self._replace_llm_in_structure(attr_value, original_llm, wrapped_llm, max_depth - 1, f"{path}.{attr_name}")
|
1744
|
+
except:
|
1745
|
+
continue
|
1746
|
+
|
1747
|
+
def _monkey_patch_llm_classes(self):
|
1748
|
+
"""Monkey-patch ALL LLM classes found in agent + tools for Pipecleaner deduplication."""
|
1701
1749
|
try:
|
1702
1750
|
# Find ALL LLMs in agent structure + tools
|
1703
1751
|
print(f"[DASEIN][WRAPPER] Searching for ALL LLMs in agent+tools...")
|
@@ -1730,10 +1778,10 @@ Follow these rules when planning your actions."""
|
|
1730
1778
|
print(f"[DASEIN][WRAPPER] Patching {llm_class.__name__} (found in {location})...")
|
1731
1779
|
|
1732
1780
|
# Check what methods the LLM class has
|
1733
|
-
#
|
1781
|
+
# Patch both user-facing AND internal methods since SQL agents bypass invoke
|
1734
1782
|
print(f"[DASEIN][WRAPPER] Checking LLM methods...")
|
1735
1783
|
methods_to_patch = []
|
1736
|
-
for method in ['invoke', 'ainvoke']: #
|
1784
|
+
for method in ['invoke', 'ainvoke', '_generate', '_agenerate']: # Include internal methods for SQL agents
|
1737
1785
|
if hasattr(llm_class, method):
|
1738
1786
|
print(f"[DASEIN][WRAPPER] - Has {method}")
|
1739
1787
|
methods_to_patch.append(method)
|
@@ -1828,7 +1876,7 @@ Follow these rules when planning your actions."""
|
|
1828
1876
|
prompt_strings.append(msg.content)
|
1829
1877
|
elif isinstance(msg, str):
|
1830
1878
|
prompt_strings.append(msg)
|
1831
|
-
|
1879
|
+
else:
|
1832
1880
|
prompt_strings.append(str(msg))
|
1833
1881
|
|
1834
1882
|
# =============================================================
|
@@ -1946,6 +1994,7 @@ Follow these rules when planning your actions."""
|
|
1946
1994
|
break
|
1947
1995
|
|
1948
1996
|
if should_dedupe:
|
1997
|
+
try:
|
1949
1998
|
# Deduplicate each prompt
|
1950
1999
|
from .pipecleaner import get_or_create_corpus
|
1951
2000
|
import hashlib
|
@@ -1986,53 +2035,56 @@ Follow these rules when planning your actions."""
|
|
1986
2035
|
kwargs['messages'] = messages_to_dedupe
|
1987
2036
|
elif 'prompts' in kwargs:
|
1988
2037
|
kwargs['prompts'] = messages_to_dedupe
|
2038
|
+
except Exception as e:
|
2039
|
+
print(f"[🔥 HOTPATH] ⚠️ Deduplication error: {e}")
|
2040
|
+
import traceback
|
2041
|
+
traceback.print_exc()
|
1989
2042
|
except Exception as e:
|
1990
|
-
print(f"[🔥 HOTPATH] ⚠️
|
2043
|
+
print(f"[🔥 HOTPATH] ⚠️ Error in pipecleaner preprocessing: {e}")
|
1991
2044
|
import traceback
|
1992
2045
|
traceback.print_exc()
|
1993
2046
|
|
1994
2047
|
try:
|
2048
|
+
# CRITICAL FIX: Ensure callbacks propagate to _generate/_agenerate
|
2049
|
+
# AgentExecutor doesn't pass run_manager to these internal methods
|
2050
|
+
# So we need to manually inject the callback handler
|
2051
|
+
if meth_name in ['_generate', '_agenerate'] and callback_handler:
|
2052
|
+
if 'run_manager' not in kwargs and hasattr(callback_handler, 'on_llm_start'):
|
2053
|
+
# Manually trigger on_llm_start since no run_manager
|
2054
|
+
import uuid
|
2055
|
+
run_id = uuid.uuid4()
|
2056
|
+
# Extract messages for on_llm_start
|
2057
|
+
messages = args[0] if args else []
|
2058
|
+
prompts = []
|
2059
|
+
for msg in (messages if isinstance(messages, list) else [messages]):
|
2060
|
+
if hasattr(msg, 'content'):
|
2061
|
+
prompts.append(str(msg.content))
|
2062
|
+
else:
|
2063
|
+
prompts.append(str(msg))
|
2064
|
+
|
2065
|
+
# Call on_llm_start
|
2066
|
+
callback_handler.on_llm_start(
|
2067
|
+
serialized={'name': type(self_llm).__name__},
|
2068
|
+
prompts=prompts,
|
2069
|
+
run_id=run_id
|
2070
|
+
)
|
2071
|
+
|
2072
|
+
# Store run_id for on_llm_end
|
2073
|
+
if not hasattr(self_llm, '_dasein_pending_run_ids'):
|
2074
|
+
self_llm._dasein_pending_run_ids = []
|
2075
|
+
self_llm._dasein_pending_run_ids.append(run_id)
|
2076
|
+
|
1995
2077
|
result = await orig_method(self_llm, *args, **kwargs)
|
1996
2078
|
|
1997
|
-
#
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
|
2005
|
-
|
2006
|
-
# No tool_end rules - silently skip microturn
|
2007
|
-
pass
|
2008
|
-
else:
|
2009
|
-
# Check if we've already processed these specific tool calls (prevents duplicate checks as call stack unwinds)
|
2010
|
-
temp_names, temp_msg = extract_proposed_function_calls(result)
|
2011
|
-
if temp_msg:
|
2012
|
-
temp_sigs = extract_tool_call_signatures(temp_msg)
|
2013
|
-
tool_calls_sig = f"{','.join(sorted(temp_sigs.values()))}" if temp_sigs else "empty"
|
2014
|
-
else:
|
2015
|
-
tool_calls_sig = f"{','.join(sorted(temp_names))}" if temp_names else "empty"
|
2016
|
-
|
2017
|
-
if not hasattr(_patch_depth, 'processed_tool_calls'):
|
2018
|
-
_patch_depth.processed_tool_calls = set()
|
2019
|
-
|
2020
|
-
if tool_calls_sig not in _patch_depth.processed_tool_calls:
|
2021
|
-
# Mark these specific tool calls as processed
|
2022
|
-
_patch_depth.processed_tool_calls.add(tool_calls_sig)
|
2023
|
-
|
2024
|
-
# Run microturn enforcement (for tool CALLS)
|
2025
|
-
from .microturn import run_microturn_enforcement
|
2026
|
-
try:
|
2027
|
-
await run_microturn_enforcement(
|
2028
|
-
result=result,
|
2029
|
-
callback_handler=callback_handler,
|
2030
|
-
self_llm=self_llm,
|
2031
|
-
patch_depth=_patch_depth,
|
2032
|
-
use_llm_microturn=USE_LLM_MICROTURN
|
2033
|
-
)
|
2034
|
-
except Exception as e:
|
2035
|
-
print(f"[DASEIN][MICROTURN] ⚠️ Microturn error: {e}")
|
2079
|
+
# Call on_llm_end if we called on_llm_start
|
2080
|
+
if meth_name in ['_generate', '_agenerate'] and callback_handler:
|
2081
|
+
if hasattr(self_llm, '_dasein_pending_run_ids') and self_llm._dasein_pending_run_ids:
|
2082
|
+
run_id = self_llm._dasein_pending_run_ids.pop(0)
|
2083
|
+
callback_handler.on_llm_end(result, run_id=run_id)
|
2084
|
+
|
2085
|
+
# 🚨 MICROTURN ENFORCEMENT - DISABLED
|
2086
|
+
# Microturn can interfere with tool execution, so it's disabled
|
2087
|
+
# TODO: Re-enable with proper gating if needed for specific use cases
|
2036
2088
|
|
2037
2089
|
return result
|
2038
2090
|
finally:
|
@@ -2258,15 +2310,45 @@ Follow these rules when planning your actions."""
|
|
2258
2310
|
traceback.print_exc()
|
2259
2311
|
|
2260
2312
|
try:
|
2313
|
+
# CRITICAL FIX: Ensure callbacks propagate to _generate/_agenerate
|
2314
|
+
# AgentExecutor doesn't pass run_manager to these internal methods
|
2315
|
+
# So we need to manually inject the callback handler
|
2316
|
+
if meth_name in ['_generate', '_agenerate'] and callback_handler:
|
2317
|
+
if 'run_manager' not in kwargs and hasattr(callback_handler, 'on_llm_start'):
|
2318
|
+
# Manually trigger on_llm_start since no run_manager
|
2319
|
+
import uuid
|
2320
|
+
run_id = uuid.uuid4()
|
2321
|
+
# Extract messages for on_llm_start
|
2322
|
+
messages = args[0] if args else []
|
2323
|
+
prompts = []
|
2324
|
+
for msg in (messages if isinstance(messages, list) else [messages]):
|
2325
|
+
if hasattr(msg, 'content'):
|
2326
|
+
prompts.append(str(msg.content))
|
2327
|
+
else:
|
2328
|
+
prompts.append(str(msg))
|
2329
|
+
|
2330
|
+
# Call on_llm_start
|
2331
|
+
callback_handler.on_llm_start(
|
2332
|
+
serialized={'name': type(self_llm).__name__},
|
2333
|
+
prompts=prompts,
|
2334
|
+
run_id=run_id
|
2335
|
+
)
|
2336
|
+
|
2337
|
+
# Store run_id for on_llm_end
|
2338
|
+
if not hasattr(self_llm, '_dasein_pending_run_ids'):
|
2339
|
+
self_llm._dasein_pending_run_ids = []
|
2340
|
+
self_llm._dasein_pending_run_ids.append(run_id)
|
2341
|
+
|
2261
2342
|
result = orig_method(self_llm, *args, **kwargs)
|
2262
2343
|
|
2263
|
-
#
|
2264
|
-
if
|
2265
|
-
|
2266
|
-
|
2267
|
-
|
2268
|
-
|
2269
|
-
|
2344
|
+
# Call on_llm_end if we called on_llm_start
|
2345
|
+
if meth_name in ['_generate', '_agenerate'] and callback_handler:
|
2346
|
+
if hasattr(self_llm, '_dasein_pending_run_ids') and self_llm._dasein_pending_run_ids:
|
2347
|
+
run_id = self_llm._dasein_pending_run_ids.pop(0)
|
2348
|
+
callback_handler.on_llm_end(result, run_id=run_id)
|
2349
|
+
|
2350
|
+
# 🚨 MICROTURN ENFORCEMENT - DISABLED (can interfere with tool execution)
|
2351
|
+
# TODO: Re-enable with proper gating if needed
|
2270
2352
|
|
2271
2353
|
return result
|
2272
2354
|
finally:
|
@@ -2286,7 +2368,7 @@ Follow these rules when planning your actions."""
|
|
2286
2368
|
# Mark and apply the patch
|
2287
2369
|
patched_method._dasein_patched = True
|
2288
2370
|
setattr(llm_class, method_name, patched_method)
|
2289
|
-
print(f"[DASEIN][WRAPPER]
|
2371
|
+
print(f"[DASEIN][WRAPPER] Patched {method_name}")
|
2290
2372
|
|
2291
2373
|
# Mark this class as patched
|
2292
2374
|
patched_classes.add(llm_class)
|
dasein/capture.py
CHANGED
@@ -87,9 +87,6 @@ class DaseinToolWrapper(BaseTool):
|
|
87
87
|
# Use original input
|
88
88
|
result = self.original_tool._run(*args, **kwargs)
|
89
89
|
|
90
|
-
# 🧹 PIPECLEANER: Apply deduplication to tool result (microturn-style interception)
|
91
|
-
result = self._apply_pipecleaner_to_result(result)
|
92
|
-
|
93
90
|
# Capture the tool output in the trace
|
94
91
|
self._vprint(f"[DASEIN][TOOL_WRAPPER] About to capture tool output for {self.name}")
|
95
92
|
self._capture_tool_output(self.name, args, kwargs, result)
|
@@ -144,9 +141,6 @@ class DaseinToolWrapper(BaseTool):
|
|
144
141
|
# Use original input
|
145
142
|
result = await self.original_tool._arun(*args, **kwargs)
|
146
143
|
|
147
|
-
# 🧹 PIPECLEANER: Apply deduplication to tool result (microturn-style interception)
|
148
|
-
result = self._apply_pipecleaner_to_result(result)
|
149
|
-
|
150
144
|
# Capture the tool output in the trace
|
151
145
|
self._vprint(f"[DASEIN][TOOL_WRAPPER] About to capture tool output for {self.name}")
|
152
146
|
self._capture_tool_output(self.name, args, kwargs, result)
|
@@ -180,9 +174,6 @@ class DaseinToolWrapper(BaseTool):
|
|
180
174
|
# Use original input
|
181
175
|
result = await self.original_tool.ainvoke(input_data, config, **kwargs)
|
182
176
|
|
183
|
-
# 🧹 PIPECLEANER: Apply deduplication to tool result (microturn-style interception)
|
184
|
-
result = self._apply_pipecleaner_to_result(result)
|
185
|
-
|
186
177
|
return result
|
187
178
|
|
188
179
|
def _apply_micro_turn_injection(self, original_input: str) -> str:
|
@@ -308,51 +299,6 @@ Apply the rule to fix the input. Return only the corrected input, nothing else."
|
|
308
299
|
self._vprint(f"[DASEIN][MICROTURN] Error executing micro-turn LLM call: {e}")
|
309
300
|
return original_input
|
310
301
|
|
311
|
-
def _apply_pipecleaner_to_result(self, result):
|
312
|
-
"""
|
313
|
-
Apply pipecleaner deduplication to tool result (microturn-style interception).
|
314
|
-
|
315
|
-
This is called right after tool execution, before returning result to agent.
|
316
|
-
Similar to how microturn intercepts LLM responses.
|
317
|
-
"""
|
318
|
-
try:
|
319
|
-
# Get callback handler's rules
|
320
|
-
if not self.callback_handler or not hasattr(self.callback_handler, '_selected_rules'):
|
321
|
-
return result
|
322
|
-
|
323
|
-
# Convert result to string
|
324
|
-
result_str = str(result)
|
325
|
-
|
326
|
-
print(f"[PIPECLEANER DEBUG] Tool wrapper intercepted: {self.name}")
|
327
|
-
print(f"[PIPECLEANER DEBUG] Result length: {len(result_str)} chars")
|
328
|
-
print(f"[PIPECLEANER DEBUG] Rules count: {len(self.callback_handler._selected_rules)}")
|
329
|
-
|
330
|
-
# Apply pipecleaner if filter search rule exists
|
331
|
-
from .pipecleaner import apply_pipecleaner_if_applicable
|
332
|
-
|
333
|
-
# Get or initialize cached model from callback handler
|
334
|
-
cached_model = getattr(self.callback_handler, '_pipecleaner_embedding_model', None)
|
335
|
-
|
336
|
-
deduplicated_str, model = apply_pipecleaner_if_applicable(
|
337
|
-
self.name,
|
338
|
-
result_str,
|
339
|
-
self.callback_handler._selected_rules,
|
340
|
-
cached_model=cached_model
|
341
|
-
)
|
342
|
-
|
343
|
-
# Cache model for next search
|
344
|
-
if model is not None:
|
345
|
-
self.callback_handler._pipecleaner_embedding_model = model
|
346
|
-
|
347
|
-
# Return deduplicated result (or original if no filter applied)
|
348
|
-
return deduplicated_str
|
349
|
-
|
350
|
-
except Exception as e:
|
351
|
-
print(f"[PIPECLEANER] Error in result interception: {e}")
|
352
|
-
import traceback
|
353
|
-
traceback.print_exc()
|
354
|
-
return result
|
355
|
-
|
356
302
|
def _capture_tool_output(self, tool_name, args, kwargs, result):
|
357
303
|
"""Capture tool output in the trace."""
|
358
304
|
try:
|
dasein/wrappers.py
CHANGED
@@ -29,13 +29,14 @@ def get_pipecleaner_context():
|
|
29
29
|
|
30
30
|
def wrap_tools_for_pipecleaner(agent: Any, callback_handler: Any, verbose: bool = False) -> bool:
|
31
31
|
"""
|
32
|
-
|
33
|
-
|
34
|
-
Wraps:
|
35
|
-
- ToolNode._run_one/_arun_one (per-invocation choke point)
|
36
|
-
- BaseTool.invoke/ainvoke (catches @tool/StructuredTool)
|
37
|
-
- ToolMessage.__init__ (message construction)
|
32
|
+
DISABLED: This function has been disabled to avoid interfering with tool execution.
|
38
33
|
"""
|
34
|
+
print(f"[WRAPPERS DISABLED] wrap_tools_for_pipecleaner called but DISABLED - no patching will occur")
|
35
|
+
return False # Return False to indicate nothing was done
|
36
|
+
|
37
|
+
# ORIGINAL CODE BELOW - COMPLETELY DISABLED
|
38
|
+
if False:
|
39
|
+
pass
|
39
40
|
try:
|
40
41
|
import importlib
|
41
42
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
dasein/__init__.py,sha256=RY0lhaaWB6yJ_5YMRmaHDvQ0eFbc0BGbYNe5OVyxzYE,2316
|
2
2
|
dasein/advice_format.py,sha256=5-h4J24L_B2Y9dlmyDuIYtmPCWOGAYoinBEXqpcNg2s,5386
|
3
|
-
dasein/api.py,sha256=
|
4
|
-
dasein/capture.py,sha256=
|
3
|
+
dasein/api.py,sha256=dDOtZVXnfZ3VbiY6N_1u2m3JtiSUwmKC9AO6gxevHxY,256483
|
4
|
+
dasein/capture.py,sha256=D4DvknI2wbmVup5WqvNcgw-zW5riEstYG81Rl98uz6o,110942
|
5
5
|
dasein/config.py,sha256=lXO8JG4RXbodn3gT5yEnuB0VRwWdrRVwhX3Rm06IZmU,1957
|
6
6
|
dasein/events.py,sha256=mG-lnOvQoZUhXbrPSjrG4RME6ywUcbSZ04PscoJ15GI,12896
|
7
7
|
dasein/extractors.py,sha256=fUFBVH9u2x4cJaM-8Zw4qiIpBF2LvjcdYkMvoXQUpL8,3986
|
@@ -11,7 +11,7 @@ dasein/microturn.py,sha256=raFDQquEPFu5wxYPv9od0Nli9tdV55kXRn7Mvk1pyso,19081
|
|
11
11
|
dasein/pipecleaner.py,sha256=Rgw-gJ6NP2k4K7hNt_Lg0Bvs1BOdr4Cf6wFCXdtM0DU,78872
|
12
12
|
dasein/trace_buffer.py,sha256=bIyTpU8ZrNFR_TCwS43HvzUrDHpZ2F8pLVDeUE9jpwM,4117
|
13
13
|
dasein/types.py,sha256=FjGXZowiRYZzWj5GzSnAnA_-xwYqqE7WmXFCosVyGI8,2974
|
14
|
-
dasein/wrappers.py,sha256=
|
14
|
+
dasein/wrappers.py,sha256=YY7Iht-0xhuMKH3bPnUerlaIyCXhHN0dgXKkEyj9OsA,13389
|
15
15
|
dasein/models/en_core_web_sm/en_core_web_sm/__init__.py,sha256=yOtXB5wD_EFXwif2QXgfvLPp0RQ5q-G_C3LkwPp4o40,237
|
16
16
|
dasein/models/en_core_web_sm/en_core_web_sm/meta.json,sha256=X8R1--W7Axckn9flHCLVMFI0W7I_E-rxSf9ZAiOWTRw,10085
|
17
17
|
dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSE,sha256=OTPBdpebaLxtC8yQLH1sEw8dEn9Hbxe6XNuo2Zz9ABI,1056
|
@@ -45,7 +45,7 @@ dasein/services/post_run_client.py,sha256=UjK3eqf7oWGSuWkKe0vQmeMS0yUUOhYFD4-SZ7
|
|
45
45
|
dasein/services/pre_run_client.py,sha256=tXmz_PQaSfq0xwypiWUAqNkXOmREZ6EwXLC4OM89J-A,4317
|
46
46
|
dasein/services/service_adapter.py,sha256=YHk41lR3PXh8WTmxOzzwKf6hwPYGqIdApI92vQKlkAY,7350
|
47
47
|
dasein/services/service_config.py,sha256=8_4tpV4mZvfaOc5_yyHbOyL4rYsPHzkLTEY1rtYgLs8,1629
|
48
|
-
dasein_core-0.2.
|
48
|
+
dasein_core-0.2.12.dist-info/licenses/LICENSE,sha256=7FHjIFEKl_3hSc3tGUVEWmufC_3oi8rh_2zVuL7jMKs,1091
|
49
49
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSE,sha256=OTPBdpebaLxtC8yQLH1sEw8dEn9Hbxe6XNuo2Zz9ABI,1056
|
50
50
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSES_SOURCES,sha256=INnfrNIVESJR8VNB7dGkex-Yvzk6IS8Q8ZT_3H7pipA,2347
|
51
51
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/METADATA,sha256=-vGqRxa_M2RwKtLjBhc4JlBQdJ3k7CwOnseT_ReYcic,2958
|
@@ -53,7 +53,7 @@ dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/RECORD,sha256=dDb6U7
|
|
53
53
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
|
54
54
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/entry_points.txt,sha256=OkWs-KxPJtDdpvIFCVXzDC9ECtejhPxv7pP3Tgk2cNg,47
|
55
55
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/top_level.txt,sha256=56OIuRbEuhr12HsM9XpCMnTtHRMgNC5Hje4Xeo8wF2c,15
|
56
|
-
dasein_core-0.2.
|
57
|
-
dasein_core-0.2.
|
58
|
-
dasein_core-0.2.
|
59
|
-
dasein_core-0.2.
|
56
|
+
dasein_core-0.2.12.dist-info/METADATA,sha256=43WrPvrzNviksaZxsnZNnLh0nPU7vBO8-rdI2J9R-JM,10297
|
57
|
+
dasein_core-0.2.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
58
|
+
dasein_core-0.2.12.dist-info/top_level.txt,sha256=6yYY9kltjvvPsg9K6KyMKRtzEr5qM7sHXN7VzmrDtp0,7
|
59
|
+
dasein_core-0.2.12.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|