dasein-core 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dasein/api.py +81 -3
- dasein/services/post_run_client.py +2 -0
- dasein/services/service_adapter.py +3 -2
- {dasein_core-0.2.4.dist-info → dasein_core-0.2.6.dist-info}/METADATA +1 -1
- {dasein_core-0.2.4.dist-info → dasein_core-0.2.6.dist-info}/RECORD +8 -8
- {dasein_core-0.2.4.dist-info → dasein_core-0.2.6.dist-info}/WHEEL +0 -0
- {dasein_core-0.2.4.dist-info → dasein_core-0.2.6.dist-info}/licenses/LICENSE +0 -0
- {dasein_core-0.2.4.dist-info → dasein_core-0.2.6.dist-info}/top_level.txt +0 -0
dasein/api.py
CHANGED
@@ -2163,12 +2163,14 @@ Follow these rules when planning your actions."""
|
|
2163
2163
|
tool_calls = len([step for step in trace if step.get('step_type') == 'tool_start'])
|
2164
2164
|
total_turns = len(trace)
|
2165
2165
|
|
2166
|
-
# Sum up tokens and time
|
2166
|
+
# Sum up tokens and calculate average time
|
2167
2167
|
input_tokens = sum(step.get('tokens_input', 0) for step in trace)
|
2168
2168
|
output_tokens = sum(step.get('tokens_output', 0) for step in trace)
|
2169
2169
|
total_tokens = input_tokens + output_tokens
|
2170
2170
|
|
2171
|
-
|
2171
|
+
# Calculate average duration_ms across all steps that have timing data
|
2172
|
+
durations = [step.get('duration_ms', 0) for step in trace if step.get('duration_ms', 0) > 0]
|
2173
|
+
trace_time_ms = int(sum(durations) / len(durations)) if durations else 0
|
2172
2174
|
|
2173
2175
|
# Calculate wall time from timestamps (they are ISO format strings)
|
2174
2176
|
if len(trace) > 1:
|
@@ -2823,6 +2825,81 @@ Follow these rules when planning your actions."""
|
|
2823
2825
|
return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
|
2824
2826
|
|
2825
2827
|
agent_fingerprint = _minimal_agent_fingerprint(self._agent)
|
2828
|
+
|
2829
|
+
# Extract tool metadata for Stage 3.5 tool grounding
|
2830
|
+
def _extract_tool_metadata(agent):
|
2831
|
+
"""
|
2832
|
+
Extract tool metadata (name, description, args_schema) from agent.
|
2833
|
+
|
2834
|
+
CRITICAL: Extracts ALL available tools from the agent, not just tools used in trace.
|
2835
|
+
Why: If agent used wrong tool (e.g., extract_text instead of get_elements),
|
2836
|
+
the trace won't show the correct tool. Stage 3.5 needs to see all options
|
2837
|
+
to suggest better alternatives.
|
2838
|
+
"""
|
2839
|
+
tools_metadata = []
|
2840
|
+
tools_to_process = []
|
2841
|
+
|
2842
|
+
# Get ALL tools from agent (LangChain or LangGraph) - not filtered by trace usage
|
2843
|
+
tools_attr = getattr(agent, 'tools', None)
|
2844
|
+
if tools_attr:
|
2845
|
+
try:
|
2846
|
+
tools_to_process = list(tools_attr)
|
2847
|
+
except Exception:
|
2848
|
+
pass
|
2849
|
+
elif getattr(agent, 'toolkit', None):
|
2850
|
+
tk = getattr(agent, 'toolkit')
|
2851
|
+
tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
|
2852
|
+
try:
|
2853
|
+
tools_to_process = list(tk_tools() if callable(tk_tools) else tk_tools or [])
|
2854
|
+
except Exception:
|
2855
|
+
pass
|
2856
|
+
|
2857
|
+
# Also try LangGraph tools from compiled graph
|
2858
|
+
if hasattr(agent, 'nodes') and 'tools' in agent.nodes:
|
2859
|
+
tools_node = agent.nodes['tools']
|
2860
|
+
if hasattr(tools_node, 'node') and hasattr(tools_node.node, 'steps'):
|
2861
|
+
for step in tools_node.node.steps:
|
2862
|
+
if hasattr(step, 'tools_by_name'):
|
2863
|
+
tools_to_process.extend(step.tools_by_name.values())
|
2864
|
+
break
|
2865
|
+
|
2866
|
+
# Extract metadata from each tool
|
2867
|
+
for tool in tools_to_process:
|
2868
|
+
try:
|
2869
|
+
tool_meta = {
|
2870
|
+
'name': getattr(tool, 'name', str(tool.__class__.__name__)),
|
2871
|
+
'description': getattr(tool, 'description', ''),
|
2872
|
+
}
|
2873
|
+
|
2874
|
+
# Extract args_schema if available
|
2875
|
+
if hasattr(tool, 'args_schema') and tool.args_schema:
|
2876
|
+
try:
|
2877
|
+
# Try Pydantic v2 method
|
2878
|
+
if hasattr(tool.args_schema, 'model_json_schema'):
|
2879
|
+
tool_meta['args_schema'] = tool.args_schema.model_json_schema()
|
2880
|
+
# Fallback to Pydantic v1 method
|
2881
|
+
elif hasattr(tool.args_schema, 'schema'):
|
2882
|
+
tool_meta['args_schema'] = tool.args_schema.schema()
|
2883
|
+
else:
|
2884
|
+
tool_meta['args_schema'] = {}
|
2885
|
+
except Exception:
|
2886
|
+
tool_meta['args_schema'] = {}
|
2887
|
+
else:
|
2888
|
+
tool_meta['args_schema'] = {}
|
2889
|
+
|
2890
|
+
tools_metadata.append(tool_meta)
|
2891
|
+
except Exception as e:
|
2892
|
+
# Skip tools that fail to extract
|
2893
|
+
pass
|
2894
|
+
|
2895
|
+
return tools_metadata
|
2896
|
+
|
2897
|
+
tools_metadata = _extract_tool_metadata(self._agent)
|
2898
|
+
print(f"[DASEIN] Extracted metadata for {len(tools_metadata)} tools")
|
2899
|
+
if tools_metadata:
|
2900
|
+
print(f"[DASEIN] Sample tool: {tools_metadata[0].get('name', 'unknown')}")
|
2901
|
+
else:
|
2902
|
+
print(f"[DASEIN] WARNING: No tools extracted! Agent type: {type(self._agent)}")
|
2826
2903
|
|
2827
2904
|
response = self._service_adapter.synthesize_rules(
|
2828
2905
|
run_id=None, # Will use stored run_id from pre-run phase
|
@@ -2837,7 +2914,8 @@ Follow these rules when planning your actions."""
|
|
2837
2914
|
agent_fingerprint=agent_fingerprint, # Reuse fingerprint from pre-run (line 2613)
|
2838
2915
|
step_id=self._current_step_id, # Pass step_id for parallel execution tracking
|
2839
2916
|
post_run_mode=self._post_run, # Pass post_run mode ("full" or "kpi_only")
|
2840
|
-
wait_for_synthesis=wait_for_synthesis # Wait for synthesis on retry runs (except last)
|
2917
|
+
wait_for_synthesis=wait_for_synthesis, # Wait for synthesis on retry runs (except last)
|
2918
|
+
tools_metadata=tools_metadata # Tool metadata for Stage 3.5 tool grounding
|
2841
2919
|
)
|
2842
2920
|
|
2843
2921
|
# response is a dict from ServiceAdapter; handle accordingly
|
@@ -32,6 +32,7 @@ class RuleSynthesisRequest:
|
|
32
32
|
skip_synthesis: bool = False
|
33
33
|
wait_for_synthesis: bool = False
|
34
34
|
step_id: Optional[str] = None
|
35
|
+
tools_metadata: Optional[List[Dict[str, Any]]] = None # Tool metadata for Stage 3.5 grounding
|
35
36
|
|
36
37
|
|
37
38
|
@dataclass
|
@@ -95,6 +96,7 @@ class PostRunClient:
|
|
95
96
|
"performance_tracking_id": request.performance_tracking_id,
|
96
97
|
"skip_synthesis": request.skip_synthesis,
|
97
98
|
"wait_for_synthesis": request.wait_for_synthesis,
|
99
|
+
"tools_metadata": request.tools_metadata or [], # Tool metadata for Stage 3.5 grounding
|
98
100
|
}
|
99
101
|
|
100
102
|
logger.info(f"Synthesizing rules for run: {request.run_id}")
|
@@ -95,7 +95,7 @@ class ServiceAdapter:
|
|
95
95
|
max_rules: Optional[int] = 5, performance_tracking_id: Optional[str] = None,
|
96
96
|
skip_synthesis: bool = False, agent_fingerprint: Optional[str] = None,
|
97
97
|
step_id: Optional[str] = None, post_run_mode: str = "full",
|
98
|
-
wait_for_synthesis: bool = False) -> Dict[str, Any]:
|
98
|
+
wait_for_synthesis: bool = False, tools_metadata: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]:
|
99
99
|
"""
|
100
100
|
Synthesize rules from run telemetry (replaces local rule synthesis)
|
101
101
|
|
@@ -138,7 +138,8 @@ class ServiceAdapter:
|
|
138
138
|
performance_tracking_id=performance_tracking_id,
|
139
139
|
skip_synthesis=should_skip_synthesis,
|
140
140
|
wait_for_synthesis=wait_for_synthesis,
|
141
|
-
step_id=step_id
|
141
|
+
step_id=step_id,
|
142
|
+
tools_metadata=tools_metadata
|
142
143
|
)
|
143
144
|
|
144
145
|
response = self.post_run_client.synthesize_rules(request)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
dasein/__init__.py,sha256=RY0lhaaWB6yJ_5YMRmaHDvQ0eFbc0BGbYNe5OVyxzYE,2316
|
2
2
|
dasein/advice_format.py,sha256=5-h4J24L_B2Y9dlmyDuIYtmPCWOGAYoinBEXqpcNg2s,5386
|
3
|
-
dasein/api.py,sha256=
|
3
|
+
dasein/api.py,sha256=Ii4_RNFtml3fA2Kt2Et_19RkXoL6OkQ91gkvvFNnP5g,176478
|
4
4
|
dasein/capture.py,sha256=XrEPsteG5__csqcqXzOmBSzPYgeI-OFzu3IRVMPYj3w,83814
|
5
5
|
dasein/config.py,sha256=lXO8JG4RXbodn3gT5yEnuB0VRwWdrRVwhX3Rm06IZmU,1957
|
6
6
|
dasein/events.py,sha256=mG-lnOvQoZUhXbrPSjrG4RME6ywUcbSZ04PscoJ15GI,12896
|
@@ -10,12 +10,12 @@ dasein/injector.py,sha256=EItWhlG6oMAf_D7YJnRNyDwAQIK5MsaATu1ig3OENqM,7256
|
|
10
10
|
dasein/trace_buffer.py,sha256=bIyTpU8ZrNFR_TCwS43HvzUrDHpZ2F8pLVDeUE9jpwM,4117
|
11
11
|
dasein/types.py,sha256=FjGXZowiRYZzWj5GzSnAnA_-xwYqqE7WmXFCosVyGI8,2974
|
12
12
|
dasein/services/__init__.py,sha256=0o6vKEVSYgGo-u-xDFf7Z4cQr8gIht2YovD6eEXUquE,356
|
13
|
-
dasein/services/post_run_client.py,sha256=
|
13
|
+
dasein/services/post_run_client.py,sha256=qYT9qp2O1MecYiTLRjFBIJy8mfZ_FTntmK-_I-7-YlI,4552
|
14
14
|
dasein/services/pre_run_client.py,sha256=tXmz_PQaSfq0xwypiWUAqNkXOmREZ6EwXLC4OM89J-A,4317
|
15
|
-
dasein/services/service_adapter.py,sha256=
|
15
|
+
dasein/services/service_adapter.py,sha256=0oMDoKDWkZ17jMCVBuDMJ9TMVmfRie4q4PLMIpiV_uw,7230
|
16
16
|
dasein/services/service_config.py,sha256=8_4tpV4mZvfaOc5_yyHbOyL4rYsPHzkLTEY1rtYgLs8,1629
|
17
|
-
dasein_core-0.2.
|
18
|
-
dasein_core-0.2.
|
19
|
-
dasein_core-0.2.
|
20
|
-
dasein_core-0.2.
|
21
|
-
dasein_core-0.2.
|
17
|
+
dasein_core-0.2.6.dist-info/licenses/LICENSE,sha256=7FHjIFEKl_3hSc3tGUVEWmufC_3oi8rh_2zVuL7jMKs,1091
|
18
|
+
dasein_core-0.2.6.dist-info/METADATA,sha256=Z30TeYgJc6ms5g7gaKCpfvOV9TiOehgXI9sO-uuowdQ,10192
|
19
|
+
dasein_core-0.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
20
|
+
dasein_core-0.2.6.dist-info/top_level.txt,sha256=6yYY9kltjvvPsg9K6KyMKRtzEr5qM7sHXN7VzmrDtp0,7
|
21
|
+
dasein_core-0.2.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|