dasein-core 0.2.16__tar.gz → 0.2.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {dasein_core-0.2.16/src/dasein_core.egg-info → dasein_core-0.2.18}/PKG-INFO +1 -1
  2. {dasein_core-0.2.16 → dasein_core-0.2.18}/pyproject.toml +1 -1
  3. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/api.py +199 -48
  4. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/capture.py +31 -5
  5. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/services/post_run_client.py +4 -0
  6. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/services/pre_run_client.py +3 -1
  7. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/services/service_adapter.py +9 -4
  8. {dasein_core-0.2.16 → dasein_core-0.2.18/src/dasein_core.egg-info}/PKG-INFO +1 -1
  9. {dasein_core-0.2.16 → dasein_core-0.2.18}/LICENSE +0 -0
  10. {dasein_core-0.2.16 → dasein_core-0.2.18}/MANIFEST.in +0 -0
  11. {dasein_core-0.2.16 → dasein_core-0.2.18}/README.md +0 -0
  12. {dasein_core-0.2.16 → dasein_core-0.2.18}/examples/dasein_examples.ipynb +0 -0
  13. {dasein_core-0.2.16 → dasein_core-0.2.18}/setup.cfg +0 -0
  14. {dasein_core-0.2.16 → dasein_core-0.2.18}/setup.py +0 -0
  15. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/__init__.py +0 -0
  16. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/advice_format.py +0 -0
  17. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/config.py +0 -0
  18. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/events.py +0 -0
  19. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/extractors.py +0 -0
  20. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/injection_strategies.py +0 -0
  21. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/injector.py +0 -0
  22. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/microturn.py +0 -0
  23. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/__init__.py +0 -0
  24. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSE +0 -0
  25. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSES_SOURCES +0 -0
  26. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/README.md +0 -0
  27. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/accuracy.json +0 -0
  28. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/attribute_ruler/patterns +0 -0
  29. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/config.cfg +0 -0
  30. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/lemmatizer/lookups/lookups.bin +0 -0
  31. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/meta.json +0 -0
  32. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/cfg +0 -0
  33. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/model +0 -0
  34. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/moves +0 -0
  35. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/cfg +0 -0
  36. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/model +0 -0
  37. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/moves +0 -0
  38. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/senter/cfg +0 -0
  39. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/senter/model +0 -0
  40. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tagger/cfg +0 -0
  41. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tagger/model +0 -0
  42. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tok2vec/cfg +0 -0
  43. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tok2vec/model +0 -0
  44. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tokenizer +0 -0
  45. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/key2row +0 -0
  46. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/lookups.bin +0 -0
  47. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/strings.json +0 -0
  48. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors +0 -0
  49. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors.cfg +0 -0
  50. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/meta.json +0 -0
  51. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSE +0 -0
  52. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSES_SOURCES +0 -0
  53. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/METADATA +0 -0
  54. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/RECORD +0 -0
  55. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/WHEEL +0 -0
  56. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/entry_points.txt +0 -0
  57. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/top_level.txt +0 -0
  58. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/pipecleaner.py +0 -0
  59. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/services/__init__.py +0 -0
  60. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/services/service_config.py +0 -0
  61. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/trace_buffer.py +0 -0
  62. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/types.py +0 -0
  63. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein/wrappers.py +0 -0
  64. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein_core.egg-info/SOURCES.txt +0 -0
  65. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein_core.egg-info/dependency_links.txt +0 -0
  66. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein_core.egg-info/requires.txt +0 -0
  67. {dasein_core-0.2.16 → dasein_core-0.2.18}/src/dasein_core.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasein-core
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line.
5
5
  Author-email: Dasein Team <support@dasein.ai>
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "dasein-core"
7
- version = "0.2.16"
7
+ version = "0.2.18"
8
8
  description = "Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -3044,11 +3044,9 @@ Follow these rules when planning your actions."""
3044
3044
  print(f"Query: {query}")
3045
3045
  print("-" * 50)
3046
3046
 
3047
- # Clear trace before each run (except the first one)
3047
+ # Reset callback handler state before each run (except the first one)
3048
+ # Note: reset_run_state() already clears trace, function calls, guards, and timers
3048
3049
  if i > 0:
3049
- from .capture import clear_trace
3050
- clear_trace()
3051
- # Reset callback handler state (function calls, injection guard)
3052
3050
  if hasattr(self, '_callback_handler') and hasattr(self._callback_handler, 'reset_run_state'):
3053
3051
  self._callback_handler.reset_run_state()
3054
3052
 
@@ -3088,11 +3086,9 @@ Follow these rules when planning your actions."""
3088
3086
  print(f"Query: {query}")
3089
3087
  print("-" * 50)
3090
3088
 
3091
- # Clear trace before each run (except the first one)
3089
+ # Reset callback handler state before each run (except the first one)
3090
+ # Note: reset_run_state() already clears trace, function calls, guards, and timers
3092
3091
  if i > 0:
3093
- from .capture import clear_trace
3094
- clear_trace()
3095
- # Reset callback handler state (function calls, injection guard)
3096
3092
  if hasattr(self, '_callback_handler') and hasattr(self._callback_handler, 'reset_run_state'):
3097
3093
  self._callback_handler.reset_run_state()
3098
3094
 
@@ -3750,28 +3746,95 @@ Follow these rules when planning your actions."""
3750
3746
  print(f"[DASEIN] Calling pre-run service for query: {str(query)[:50]}...")
3751
3747
 
3752
3748
  # Generate agent fingerprint (stable categorical tag)
3753
- def _minimal_agent_fingerprint(agent) -> str:
3749
+ def _minimal_agent_fingerprint(agent, original_agent) -> str:
3750
+ """Generate fingerprint from ORIGINAL unwrapped agent to avoid wrapper contamination"""
3754
3751
  try:
3752
+ # Use original_agent for fingerprinting, not wrapped agent
3753
+ agent_to_fingerprint = original_agent if original_agent else agent
3754
+
3755
3755
  # agent class
3756
- agent_cls = getattr(agent, '__class__', None)
3756
+ agent_cls = getattr(agent_to_fingerprint, '__class__', None)
3757
3757
  agent_name = getattr(agent_cls, '__name__', '') if agent_cls else ''
3758
3758
  # framework top-level module
3759
- module = getattr(agent, '__module__', '') or ''
3759
+ module = getattr(agent_to_fingerprint, '__module__', '') or ''
3760
3760
  framework = module.split('.')[0] if module else ''
3761
- # model id (best-effort)
3761
+
3762
+ # model id (comprehensive search through agent structure)
3762
3763
  model_id = ''
3763
- llm = getattr(agent, 'llm', None)
3764
- if llm is not None:
3765
- model_id = (
3766
- getattr(llm, 'model', None)
3767
- or getattr(llm, 'model_name', None)
3768
- or getattr(llm, 'model_id', None)
3769
- or getattr(llm, 'model_tag', None)
3770
- or ''
3771
- )
3772
- # tools/toolkit
3764
+
3765
+ # Helper to extract model from LLM instance
3766
+ def _extract_model_from_llm(llm_obj):
3767
+ if llm_obj is None:
3768
+ return None
3769
+ type_name = type(llm_obj).__name__
3770
+ if 'Language' in type_name or 'Chat' in type_name or 'LLM' in type_name:
3771
+ return (
3772
+ getattr(llm_obj, 'model', None)
3773
+ or getattr(llm_obj, 'model_name', None)
3774
+ or getattr(llm_obj, 'model_id', None)
3775
+ or getattr(llm_obj, 'model_tag', None)
3776
+ )
3777
+ return None
3778
+
3779
+ # 1. Direct llm
3780
+ llm = getattr(agent_to_fingerprint, 'llm', None)
3781
+ model_id = _extract_model_from_llm(llm)
3782
+
3783
+ # 2. Legacy ReAct: agent.llm_chain.llm
3784
+ if not model_id:
3785
+ llm_chain = getattr(agent_to_fingerprint, 'llm_chain', None)
3786
+ if llm_chain:
3787
+ llm = getattr(llm_chain, 'llm', None)
3788
+ model_id = _extract_model_from_llm(llm)
3789
+
3790
+ # 3. Nested agent.agent
3791
+ if not model_id:
3792
+ inner_agent = getattr(agent_to_fingerprint, 'agent', None)
3793
+ if inner_agent:
3794
+ llm = getattr(inner_agent, 'llm', None)
3795
+ model_id = _extract_model_from_llm(llm)
3796
+ if not model_id:
3797
+ llm_chain = getattr(inner_agent, 'llm_chain', None)
3798
+ if llm_chain:
3799
+ llm = getattr(llm_chain, 'llm', None)
3800
+ model_id = _extract_model_from_llm(llm)
3801
+
3802
+ # 4. LCEL runnable graph
3803
+ if not model_id:
3804
+ runnable = getattr(agent_to_fingerprint, 'runnable', None)
3805
+ if runnable:
3806
+ model_id = _extract_model_from_llm(runnable)
3807
+ if not model_id and hasattr(runnable, 'steps'):
3808
+ for step in runnable.steps:
3809
+ model_id = _extract_model_from_llm(step)
3810
+ if model_id:
3811
+ break
3812
+
3813
+ # 5. Toolkit
3814
+ if not model_id:
3815
+ toolkit = getattr(agent_to_fingerprint, 'toolkit', None)
3816
+ if toolkit:
3817
+ llm = getattr(toolkit, 'llm', None)
3818
+ model_id = _extract_model_from_llm(llm)
3819
+
3820
+ # 6. Tools list
3821
+ if not model_id:
3822
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
3823
+ if tools_attr:
3824
+ try:
3825
+ for tool in tools_attr:
3826
+ llm = getattr(tool, 'llm', None)
3827
+ model_id = _extract_model_from_llm(llm)
3828
+ if model_id:
3829
+ break
3830
+ except Exception:
3831
+ pass
3832
+
3833
+ model_id = str(model_id) if model_id else ''
3834
+
3835
+ # tools/toolkit (from original agent)
3773
3836
  tool_names = []
3774
- tools_attr = getattr(agent, 'tools', None)
3837
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
3775
3838
  if tools_attr:
3776
3839
  try:
3777
3840
  for t in tools_attr:
@@ -3782,8 +3845,8 @@ Follow these rules when planning your actions."""
3782
3845
  tool_names.append(str(name))
3783
3846
  except Exception:
3784
3847
  pass
3785
- elif getattr(agent, 'toolkit', None):
3786
- tk = getattr(agent, 'toolkit')
3848
+ elif getattr(agent_to_fingerprint, 'toolkit', None):
3849
+ tk = getattr(agent_to_fingerprint, 'toolkit')
3787
3850
  tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
3788
3851
  try:
3789
3852
  iterable = tk_tools() if callable(tk_tools) else tk_tools
@@ -3803,12 +3866,12 @@ Follow these rules when planning your actions."""
3803
3866
  tool_names = [norm(n) for n in tool_names if n]
3804
3867
  tools_joined = ','.join(sorted(set(tool_names)))
3805
3868
  # fixed-order segments (keep keys even if empty to preserve format)
3806
- return f"[[FINGERPRINT]] agent={agent_name} | framework={framework} | model={model_id} | tools={tools_joined}"
3869
+ return f"agent={agent_name}|framework={framework}|model={model_id}|tools={tools_joined}"
3807
3870
  except Exception:
3808
3871
  # Fallback to prior behavior on any error
3809
- return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
3872
+ return getattr(agent_to_fingerprint, 'agent_id', None) or f"agent_{id(agent_to_fingerprint)}"
3810
3873
 
3811
- agent_fingerprint = _minimal_agent_fingerprint(self._agent)
3874
+ agent_fingerprint = _minimal_agent_fingerprint(self._agent, self._original_agent)
3812
3875
 
3813
3876
  # Call pre-run service (it will handle baseline flag internally)
3814
3877
  selected_rules = self._service_adapter.select_rules(
@@ -3816,7 +3879,8 @@ Follow these rules when planning your actions."""
3816
3879
  agent_fingerprint=agent_fingerprint,
3817
3880
  max_rules_per_layer=self._top_k, # Configurable via top_k parameter
3818
3881
  performance_tracking_id=self._performance_tracking_id, # For rule isolation
3819
- is_baseline=is_baseline # Skip rule selection for baselines
3882
+ is_baseline=is_baseline, # Skip rule selection for baselines
3883
+ verbose=self._verbose # Pass verbose flag through
3820
3884
  )
3821
3885
 
3822
3886
  print(f"[DASEIN] Pre-run service returned {len(selected_rules)} rules")
@@ -4009,24 +4073,92 @@ Follow these rules when planning your actions."""
4009
4073
  print(f"[DASEIN] Calling post-run service for rule synthesis ({mode_str} mode)")
4010
4074
 
4011
4075
  # Compute agent fingerprint for post-run (mirror pre-run minimal fingerprint)
4012
- def _minimal_agent_fingerprint(agent) -> str:
4076
+ def _minimal_agent_fingerprint(agent, original_agent) -> str:
4077
+ """Generate fingerprint from ORIGINAL unwrapped agent to avoid wrapper contamination"""
4013
4078
  try:
4014
- agent_cls = getattr(agent, '__class__', None)
4079
+ # Use original_agent for fingerprinting, not wrapped agent
4080
+ agent_to_fingerprint = original_agent if original_agent else agent
4081
+
4082
+ agent_cls = getattr(agent_to_fingerprint, '__class__', None)
4015
4083
  agent_name = getattr(agent_cls, '__name__', '') if agent_cls else ''
4016
- module = getattr(agent, '__module__', '') or ''
4084
+ module = getattr(agent_to_fingerprint, '__module__', '') or ''
4017
4085
  framework = module.split('.')[0] if module else ''
4086
+
4087
+ # model id (comprehensive search through agent structure)
4018
4088
  model_id = ''
4019
- llm = getattr(agent, 'llm', None)
4020
- if llm is not None:
4021
- model_id = (
4022
- getattr(llm, 'model', None)
4023
- or getattr(llm, 'model_name', None)
4024
- or getattr(llm, 'model_id', None)
4025
- or getattr(llm, 'model_tag', None)
4026
- or ''
4027
- )
4089
+
4090
+ # Helper to extract model from LLM instance
4091
+ def _extract_model_from_llm(llm_obj):
4092
+ if llm_obj is None:
4093
+ return None
4094
+ type_name = type(llm_obj).__name__
4095
+ if 'Language' in type_name or 'Chat' in type_name or 'LLM' in type_name:
4096
+ return (
4097
+ getattr(llm_obj, 'model', None)
4098
+ or getattr(llm_obj, 'model_name', None)
4099
+ or getattr(llm_obj, 'model_id', None)
4100
+ or getattr(llm_obj, 'model_tag', None)
4101
+ )
4102
+ return None
4103
+
4104
+ # 1. Direct llm
4105
+ llm = getattr(agent_to_fingerprint, 'llm', None)
4106
+ model_id = _extract_model_from_llm(llm)
4107
+
4108
+ # 2. Legacy ReAct: agent.llm_chain.llm
4109
+ if not model_id:
4110
+ llm_chain = getattr(agent_to_fingerprint, 'llm_chain', None)
4111
+ if llm_chain:
4112
+ llm = getattr(llm_chain, 'llm', None)
4113
+ model_id = _extract_model_from_llm(llm)
4114
+
4115
+ # 3. Nested agent.agent
4116
+ if not model_id:
4117
+ inner_agent = getattr(agent_to_fingerprint, 'agent', None)
4118
+ if inner_agent:
4119
+ llm = getattr(inner_agent, 'llm', None)
4120
+ model_id = _extract_model_from_llm(llm)
4121
+ if not model_id:
4122
+ llm_chain = getattr(inner_agent, 'llm_chain', None)
4123
+ if llm_chain:
4124
+ llm = getattr(llm_chain, 'llm', None)
4125
+ model_id = _extract_model_from_llm(llm)
4126
+
4127
+ # 4. LCEL runnable graph
4128
+ if not model_id:
4129
+ runnable = getattr(agent_to_fingerprint, 'runnable', None)
4130
+ if runnable:
4131
+ model_id = _extract_model_from_llm(runnable)
4132
+ if not model_id and hasattr(runnable, 'steps'):
4133
+ for step in runnable.steps:
4134
+ model_id = _extract_model_from_llm(step)
4135
+ if model_id:
4136
+ break
4137
+
4138
+ # 5. Toolkit
4139
+ if not model_id:
4140
+ toolkit = getattr(agent_to_fingerprint, 'toolkit', None)
4141
+ if toolkit:
4142
+ llm = getattr(toolkit, 'llm', None)
4143
+ model_id = _extract_model_from_llm(llm)
4144
+
4145
+ # 6. Tools list
4146
+ if not model_id:
4147
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
4148
+ if tools_attr:
4149
+ try:
4150
+ for tool in tools_attr:
4151
+ llm = getattr(tool, 'llm', None)
4152
+ model_id = _extract_model_from_llm(llm)
4153
+ if model_id:
4154
+ break
4155
+ except Exception:
4156
+ pass
4157
+
4158
+ model_id = str(model_id) if model_id else ''
4159
+
4028
4160
  tool_names = []
4029
- tools_attr = getattr(agent, 'tools', None)
4161
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
4030
4162
  if tools_attr:
4031
4163
  try:
4032
4164
  for t in tools_attr:
@@ -4035,8 +4167,8 @@ Follow these rules when planning your actions."""
4035
4167
  tool_names.append(str(name))
4036
4168
  except Exception:
4037
4169
  pass
4038
- elif getattr(agent, 'toolkit', None):
4039
- tk = getattr(agent, 'toolkit')
4170
+ elif getattr(agent_to_fingerprint, 'toolkit', None):
4171
+ tk = getattr(agent_to_fingerprint, 'toolkit')
4040
4172
  tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
4041
4173
  try:
4042
4174
  iterable = tk_tools() if callable(tk_tools) else tk_tools
@@ -4052,11 +4184,11 @@ Follow these rules when planning your actions."""
4052
4184
  model_id = norm(model_id)
4053
4185
  tool_names = [norm(n) for n in tool_names if n]
4054
4186
  tools_joined = ','.join(sorted(set(tool_names)))
4055
- return f"[[FINGERPRINT]] agent={agent_name} | framework={framework} | model={model_id} | tools={tools_joined}"
4187
+ return f"agent={agent_name}|framework={framework}|model={model_id}|tools={tools_joined}"
4056
4188
  except Exception:
4057
- return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
4189
+ return getattr(agent_to_fingerprint, 'agent_id', None) or f"agent_{id(agent_to_fingerprint)}"
4058
4190
 
4059
- agent_fingerprint = _minimal_agent_fingerprint(self._agent)
4191
+ agent_fingerprint = _minimal_agent_fingerprint(self._agent, self._original_agent)
4060
4192
 
4061
4193
  # Get tool metadata from callback handler (extracted during runtime)
4062
4194
  tools_metadata = []
@@ -4087,6 +4219,23 @@ Follow these rules when planning your actions."""
4087
4219
  else:
4088
4220
  print(f"[DASEIN] WARNING: No tools extracted! Agent type: {type(self._agent)}")
4089
4221
 
4222
+ # Extract rules_applied from selected_rules (rule IDs that were actually selected by pre-run)
4223
+ rules_applied = []
4224
+ for rule in selected_rules:
4225
+ if isinstance(rule, dict):
4226
+ rule_id = rule.get('id', '')
4227
+ if rule_id:
4228
+ rules_applied.append(rule_id)
4229
+ elif hasattr(rule, 'id'):
4230
+ rules_applied.append(rule.id)
4231
+ print(f"[DASEIN] Passing {len(rules_applied)} rule IDs to post-run: {rules_applied}")
4232
+
4233
+ # Compute context_hash: represents query + agent fingerprint (what context node contains)
4234
+ import hashlib
4235
+ combined_context = f"{query}:{agent_fingerprint}"
4236
+ context_hash = f"ctx_{hashlib.sha256(combined_context.encode()).hexdigest()[:9]}"
4237
+ print(f"[DASEIN] Computed context_hash: {context_hash}")
4238
+
4090
4239
  response = self._service_adapter.synthesize_rules(
4091
4240
  run_id=None, # Will use stored run_id from pre-run phase
4092
4241
  trace=cleaned_trace,
@@ -4102,7 +4251,9 @@ Follow these rules when planning your actions."""
4102
4251
  post_run_mode=self._post_run, # Pass post_run mode ("full" or "kpi_only")
4103
4252
  wait_for_synthesis=wait_for_synthesis, # Wait for synthesis on retry runs (except last)
4104
4253
  tools_metadata=tools_metadata, # Tool metadata for Stage 3.5 tool grounding
4105
- graph_metadata=graph_metadata # Graph metadata for Stage 3.5 node grounding
4254
+ graph_metadata=graph_metadata, # Graph metadata for Stage 3.5 node grounding
4255
+ rules_applied=rules_applied, # Rule IDs selected by pre-run
4256
+ context_hash=context_hash # Context hash for graph grouping
4106
4257
  )
4107
4258
 
4108
4259
  # response is a dict from ServiceAdapter; handle accordingly
@@ -379,6 +379,7 @@ class DaseinCallbackHandler(BaseCallbackHandler):
379
379
  self._compiled_tools_metadata = [] # Store extracted tools
380
380
  self._pipecleaner_embedding_model = None # Cache embedding model for this run
381
381
  self._current_tool_name = None # Track currently executing tool for hotpath deduplication
382
+ self._last_reset_ts = None # Debounce guard for reset_run_state()
382
383
 
383
384
  # Generate stable run_id for corpus deduplication
384
385
  import uuid
@@ -397,6 +398,24 @@ class DaseinCallbackHandler(BaseCallbackHandler):
397
398
 
398
399
  def reset_run_state(self):
399
400
  """Reset state that should be cleared between runs."""
401
+ # Debounce: suppress duplicate rapid invocations (e.g., from multiple callers in same tick)
402
+ try:
403
+ from time import monotonic
404
+ now = monotonic()
405
+ if getattr(self, '_last_reset_ts', None) is not None and (now - self._last_reset_ts) < 0.05:
406
+ # Too soon since last reset; skip
407
+ return
408
+ self._last_reset_ts = now
409
+ except Exception:
410
+ pass
411
+ # Optional debug: print caller stack to trace root cause of unexpected resets
412
+ try:
413
+ import os, traceback
414
+ if os.getenv("DASEIN_DEBUG_RESET", "0") == "1":
415
+ stack_excerpt = ''.join(traceback.format_stack(limit=8))
416
+ self._vprint("[DASEIN][CALLBACK] reset_run_state() caller stack (set DASEIN_DEBUG_RESET=0 to disable):\n" + stack_excerpt, True)
417
+ except Exception:
418
+ pass
400
419
  self._function_calls_made = {}
401
420
  self._injection_guard = set()
402
421
  self._trace = [] # Clear instance trace
@@ -1796,7 +1815,7 @@ EXECUTION STATE (functions called so far in this run):
1796
1815
 
1797
1816
  """
1798
1817
 
1799
- combined_injection = f""" SYSTEM OVERRIDE — PLANNING TURN ONLY
1818
+ combined_injection = f""" SYSTEM OVERRIDE
1800
1819
  These rules OVERRIDE all defaults. You MUST enforce them exactly or the task FAILS.
1801
1820
 
1802
1821
  Tags: AVOID (absolute ban), SKIP (force bypass), FIX (mandatory params), PREFER (ranked choice), HINT (optional).
@@ -1809,8 +1828,7 @@ Precedence: AVOID/SKIP > FIX > PREFER > HINT. On conflict, the higher rule ALWAY
1809
1828
  - PREFER: when multiple compliant options exist, choose the preferred—NO exceptions.
1810
1829
  - Recovery: if a banned/skipped item already failed, IMMEDIATELY switch to a compliant alternative.
1811
1830
 
1812
- Output Contract: Produce ONE compliant tool/function call (or direct answer if none is needed).
1813
- NO reasoning, NO justification, NO markdown.
1831
+ Honor the agent's existing output contract verbatim; do not add or change fields or formatting.
1814
1832
 
1815
1833
  Rules to Enforce:
1816
1834
 
@@ -2147,9 +2165,17 @@ def clear_trace() -> None:
2147
2165
  # Try to clear traces in active CognateProxy instances
2148
2166
  try:
2149
2167
  import gc
2168
+ seen_handlers = set()
2150
2169
  for obj in gc.get_objects():
2151
- if hasattr(obj, '_callback_handler') and hasattr(obj._callback_handler, 'reset_run_state'):
2152
- obj._callback_handler.reset_run_state()
2170
+ if hasattr(obj, '_callback_handler'):
2171
+ handler = getattr(obj, '_callback_handler', None)
2172
+ if handler is None or not hasattr(handler, 'reset_run_state'):
2173
+ continue
2174
+ handler_id = id(handler)
2175
+ if handler_id in seen_handlers:
2176
+ continue
2177
+ seen_handlers.add(handler_id)
2178
+ handler.reset_run_state()
2153
2179
  except Exception:
2154
2180
  pass # Ignore if not available
2155
2181
 
@@ -34,6 +34,8 @@ class RuleSynthesisRequest:
34
34
  step_id: Optional[str] = None
35
35
  tools_metadata: Optional[List[Dict[str, Any]]] = None # Tool metadata for Stage 3.5 tool grounding
36
36
  graph_metadata: Optional[Dict[str, Any]] = None # Graph metadata for Stage 3.5 node grounding
37
+ rules_applied: Optional[List[str]] = None # Rule IDs that were selected by pre-run and applied during execution
38
+ context_hash: Optional[str] = None # Context hash for grouping traces (query + agent fingerprint)
37
39
 
38
40
 
39
41
  @dataclass
@@ -99,6 +101,8 @@ class PostRunClient:
99
101
  "wait_for_synthesis": request.wait_for_synthesis,
100
102
  "tools_metadata": request.tools_metadata or [], # Tool metadata for Stage 3.5 tool grounding
101
103
  "graph_metadata": request.graph_metadata or {}, # Graph metadata for Stage 3.5 node grounding
104
+ "rules_applied": request.rules_applied or [], # Rule IDs selected and applied
105
+ "context_hash": request.context_hash, # Context hash for graph grouping
102
106
  }
103
107
 
104
108
  logger.info(f"Synthesizing rules for run: {request.run_id}")
@@ -28,6 +28,7 @@ class RuleSelectionRequest:
28
28
  max_rules_per_layer: Optional[int] = 5
29
29
  performance_tracking_id: Optional[str] = None
30
30
  is_baseline: bool = False
31
+ verbose: bool = False
31
32
 
32
33
 
33
34
  @dataclass
@@ -87,7 +88,8 @@ class PreRunClient:
87
88
  "run_id": request.run_id,
88
89
  "max_rules_per_layer": request.max_rules_per_layer,
89
90
  "performance_tracking_id": request.performance_tracking_id,
90
- "is_baseline": request.is_baseline
91
+ "is_baseline": request.is_baseline,
92
+ "verbose": request.verbose
91
93
  }
92
94
 
93
95
  logger.info(f"Selecting rules for query: {str(request.query)[:50]}...")
@@ -43,7 +43,8 @@ class ServiceAdapter:
43
43
  def select_rules(self, query: str, agent_fingerprint: Optional[str] = None,
44
44
  artifacts: Optional[List[str]] = None, limits: Optional[Dict[str, int]] = None,
45
45
  run_id: Optional[str] = None, max_rules_per_layer: Optional[int] = 5,
46
- performance_tracking_id: Optional[str] = None, is_baseline: bool = False) -> List[Dict[str, Any]]:
46
+ performance_tracking_id: Optional[str] = None, is_baseline: bool = False,
47
+ verbose: bool = False) -> List[Dict[str, Any]]:
47
48
  """
48
49
  Select rules for an incoming run (replaces local rule selection)
49
50
 
@@ -68,7 +69,8 @@ class ServiceAdapter:
68
69
  run_id=run_id,
69
70
  max_rules_per_layer=max_rules_per_layer,
70
71
  performance_tracking_id=performance_tracking_id,
71
- is_baseline=is_baseline
72
+ is_baseline=is_baseline,
73
+ verbose=verbose
72
74
  )
73
75
 
74
76
  response = self.pre_run_client.select_rules(request)
@@ -96,7 +98,8 @@ class ServiceAdapter:
96
98
  skip_synthesis: bool = False, agent_fingerprint: Optional[str] = None,
97
99
  step_id: Optional[str] = None, post_run_mode: str = "full",
98
100
  wait_for_synthesis: bool = False, tools_metadata: Optional[List[Dict[str, Any]]] = None,
99
- graph_metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
101
+ graph_metadata: Optional[Dict[str, Any]] = None, rules_applied: Optional[List[str]] = None,
102
+ context_hash: Optional[str] = None) -> Dict[str, Any]:
100
103
  """
101
104
  Synthesize rules from run telemetry (replaces local rule synthesis)
102
105
 
@@ -141,7 +144,9 @@ class ServiceAdapter:
141
144
  wait_for_synthesis=wait_for_synthesis,
142
145
  step_id=step_id,
143
146
  tools_metadata=tools_metadata,
144
- graph_metadata=graph_metadata
147
+ graph_metadata=graph_metadata,
148
+ rules_applied=rules_applied,
149
+ context_hash=context_hash
145
150
  )
146
151
 
147
152
  response = self.post_run_client.synthesize_rules(request)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasein-core
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line.
5
5
  Author-email: Dasein Team <support@dasein.ai>
6
6
  License: MIT
File without changes
File without changes
File without changes
File without changes
File without changes