dasein-core 0.2.17__tar.gz → 0.2.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {dasein_core-0.2.17/src/dasein_core.egg-info → dasein_core-0.2.19}/PKG-INFO +1 -1
  2. {dasein_core-0.2.17 → dasein_core-0.2.19}/pyproject.toml +1 -1
  3. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/api.py +202 -40
  4. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/capture.py +1 -2
  5. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/services/post_run_client.py +7 -2
  6. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/services/pre_run_client.py +3 -1
  7. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/services/service_adapter.py +9 -4
  8. {dasein_core-0.2.17 → dasein_core-0.2.19/src/dasein_core.egg-info}/PKG-INFO +1 -1
  9. {dasein_core-0.2.17 → dasein_core-0.2.19}/LICENSE +0 -0
  10. {dasein_core-0.2.17 → dasein_core-0.2.19}/MANIFEST.in +0 -0
  11. {dasein_core-0.2.17 → dasein_core-0.2.19}/README.md +0 -0
  12. {dasein_core-0.2.17 → dasein_core-0.2.19}/examples/dasein_examples.ipynb +0 -0
  13. {dasein_core-0.2.17 → dasein_core-0.2.19}/setup.cfg +0 -0
  14. {dasein_core-0.2.17 → dasein_core-0.2.19}/setup.py +0 -0
  15. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/__init__.py +0 -0
  16. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/advice_format.py +0 -0
  17. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/config.py +0 -0
  18. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/events.py +0 -0
  19. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/extractors.py +0 -0
  20. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/injection_strategies.py +0 -0
  21. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/injector.py +0 -0
  22. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/microturn.py +0 -0
  23. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/__init__.py +0 -0
  24. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSE +0 -0
  25. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSES_SOURCES +0 -0
  26. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/README.md +0 -0
  27. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/accuracy.json +0 -0
  28. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/attribute_ruler/patterns +0 -0
  29. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/config.cfg +0 -0
  30. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/lemmatizer/lookups/lookups.bin +0 -0
  31. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/meta.json +0 -0
  32. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/cfg +0 -0
  33. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/model +0 -0
  34. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/moves +0 -0
  35. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/cfg +0 -0
  36. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/model +0 -0
  37. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/moves +0 -0
  38. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/senter/cfg +0 -0
  39. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/senter/model +0 -0
  40. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tagger/cfg +0 -0
  41. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tagger/model +0 -0
  42. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tok2vec/cfg +0 -0
  43. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tok2vec/model +0 -0
  44. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tokenizer +0 -0
  45. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/key2row +0 -0
  46. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/lookups.bin +0 -0
  47. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/strings.json +0 -0
  48. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors +0 -0
  49. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors.cfg +0 -0
  50. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm/meta.json +0 -0
  51. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSE +0 -0
  52. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSES_SOURCES +0 -0
  53. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/METADATA +0 -0
  54. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/RECORD +0 -0
  55. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/WHEEL +0 -0
  56. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/entry_points.txt +0 -0
  57. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/top_level.txt +0 -0
  58. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/pipecleaner.py +0 -0
  59. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/services/__init__.py +0 -0
  60. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/services/service_config.py +0 -0
  61. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/trace_buffer.py +0 -0
  62. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/types.py +0 -0
  63. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein/wrappers.py +0 -0
  64. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein_core.egg-info/SOURCES.txt +0 -0
  65. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein_core.egg-info/dependency_links.txt +0 -0
  66. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein_core.egg-info/requires.txt +0 -0
  67. {dasein_core-0.2.17 → dasein_core-0.2.19}/src/dasein_core.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasein-core
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line.
5
5
  Author-email: Dasein Team <support@dasein.ai>
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "dasein-core"
7
- version = "0.2.17"
7
+ version = "0.2.19"
8
8
  description = "Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -889,6 +889,9 @@ class CognateProxy:
889
889
  # Track which LLM classes have been monkey-patched (to avoid double-patching)
890
890
  self._patched_llm_classes = set()
891
891
 
892
+ # Generate agent fingerprint ONCE and cache it (must be consistent across pre-run and post-run)
893
+ self._agent_fingerprint = None
894
+
892
895
  # Wrap the agent's LLM with our trace capture wrapper
893
896
  self._wrap_agent_llm()
894
897
 
@@ -3746,28 +3749,95 @@ Follow these rules when planning your actions."""
3746
3749
  print(f"[DASEIN] Calling pre-run service for query: {str(query)[:50]}...")
3747
3750
 
3748
3751
  # Generate agent fingerprint (stable categorical tag)
3749
- def _minimal_agent_fingerprint(agent) -> str:
3752
+ def _minimal_agent_fingerprint(agent, original_agent) -> str:
3753
+ """Generate fingerprint from ORIGINAL unwrapped agent to avoid wrapper contamination"""
3750
3754
  try:
3755
+ # Use original_agent for fingerprinting, not wrapped agent
3756
+ agent_to_fingerprint = original_agent if original_agent else agent
3757
+
3751
3758
  # agent class
3752
- agent_cls = getattr(agent, '__class__', None)
3759
+ agent_cls = getattr(agent_to_fingerprint, '__class__', None)
3753
3760
  agent_name = getattr(agent_cls, '__name__', '') if agent_cls else ''
3754
3761
  # framework top-level module
3755
- module = getattr(agent, '__module__', '') or ''
3762
+ module = getattr(agent_to_fingerprint, '__module__', '') or ''
3756
3763
  framework = module.split('.')[0] if module else ''
3757
- # model id (best-effort)
3764
+
3765
+ # model id (comprehensive search through agent structure)
3758
3766
  model_id = ''
3759
- llm = getattr(agent, 'llm', None)
3760
- if llm is not None:
3761
- model_id = (
3762
- getattr(llm, 'model', None)
3763
- or getattr(llm, 'model_name', None)
3764
- or getattr(llm, 'model_id', None)
3765
- or getattr(llm, 'model_tag', None)
3766
- or ''
3767
- )
3768
- # tools/toolkit
3767
+
3768
+ # Helper to extract model from LLM instance
3769
+ def _extract_model_from_llm(llm_obj):
3770
+ if llm_obj is None:
3771
+ return None
3772
+ type_name = type(llm_obj).__name__
3773
+ if 'Language' in type_name or 'Chat' in type_name or 'LLM' in type_name:
3774
+ return (
3775
+ getattr(llm_obj, 'model', None)
3776
+ or getattr(llm_obj, 'model_name', None)
3777
+ or getattr(llm_obj, 'model_id', None)
3778
+ or getattr(llm_obj, 'model_tag', None)
3779
+ )
3780
+ return None
3781
+
3782
+ # 1. Direct llm
3783
+ llm = getattr(agent_to_fingerprint, 'llm', None)
3784
+ model_id = _extract_model_from_llm(llm)
3785
+
3786
+ # 2. Legacy ReAct: agent.llm_chain.llm
3787
+ if not model_id:
3788
+ llm_chain = getattr(agent_to_fingerprint, 'llm_chain', None)
3789
+ if llm_chain:
3790
+ llm = getattr(llm_chain, 'llm', None)
3791
+ model_id = _extract_model_from_llm(llm)
3792
+
3793
+ # 3. Nested agent.agent
3794
+ if not model_id:
3795
+ inner_agent = getattr(agent_to_fingerprint, 'agent', None)
3796
+ if inner_agent:
3797
+ llm = getattr(inner_agent, 'llm', None)
3798
+ model_id = _extract_model_from_llm(llm)
3799
+ if not model_id:
3800
+ llm_chain = getattr(inner_agent, 'llm_chain', None)
3801
+ if llm_chain:
3802
+ llm = getattr(llm_chain, 'llm', None)
3803
+ model_id = _extract_model_from_llm(llm)
3804
+
3805
+ # 4. LCEL runnable graph
3806
+ if not model_id:
3807
+ runnable = getattr(agent_to_fingerprint, 'runnable', None)
3808
+ if runnable:
3809
+ model_id = _extract_model_from_llm(runnable)
3810
+ if not model_id and hasattr(runnable, 'steps'):
3811
+ for step in runnable.steps:
3812
+ model_id = _extract_model_from_llm(step)
3813
+ if model_id:
3814
+ break
3815
+
3816
+ # 5. Toolkit
3817
+ if not model_id:
3818
+ toolkit = getattr(agent_to_fingerprint, 'toolkit', None)
3819
+ if toolkit:
3820
+ llm = getattr(toolkit, 'llm', None)
3821
+ model_id = _extract_model_from_llm(llm)
3822
+
3823
+ # 6. Tools list
3824
+ if not model_id:
3825
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
3826
+ if tools_attr:
3827
+ try:
3828
+ for tool in tools_attr:
3829
+ llm = getattr(tool, 'llm', None)
3830
+ model_id = _extract_model_from_llm(llm)
3831
+ if model_id:
3832
+ break
3833
+ except Exception:
3834
+ pass
3835
+
3836
+ model_id = str(model_id) if model_id else ''
3837
+
3838
+ # tools/toolkit (from original agent)
3769
3839
  tool_names = []
3770
- tools_attr = getattr(agent, 'tools', None)
3840
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
3771
3841
  if tools_attr:
3772
3842
  try:
3773
3843
  for t in tools_attr:
@@ -3778,8 +3848,8 @@ Follow these rules when planning your actions."""
3778
3848
  tool_names.append(str(name))
3779
3849
  except Exception:
3780
3850
  pass
3781
- elif getattr(agent, 'toolkit', None):
3782
- tk = getattr(agent, 'toolkit')
3851
+ elif getattr(agent_to_fingerprint, 'toolkit', None):
3852
+ tk = getattr(agent_to_fingerprint, 'toolkit')
3783
3853
  tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
3784
3854
  try:
3785
3855
  iterable = tk_tools() if callable(tk_tools) else tk_tools
@@ -3799,12 +3869,15 @@ Follow these rules when planning your actions."""
3799
3869
  tool_names = [norm(n) for n in tool_names if n]
3800
3870
  tools_joined = ','.join(sorted(set(tool_names)))
3801
3871
  # fixed-order segments (keep keys even if empty to preserve format)
3802
- return f"[[FINGERPRINT]] agent={agent_name} | framework={framework} | model={model_id} | tools={tools_joined}"
3872
+ return f"agent={agent_name}|framework={framework}|model={model_id}|tools={tools_joined}"
3803
3873
  except Exception:
3804
3874
  # Fallback to prior behavior on any error
3805
- return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
3875
+ return getattr(agent_to_fingerprint, 'agent_id', None) or f"agent_{id(agent_to_fingerprint)}"
3806
3876
 
3807
- agent_fingerprint = _minimal_agent_fingerprint(self._agent)
3877
+ # Generate fingerprint once and cache it for reuse in post-run
3878
+ if self._agent_fingerprint is None:
3879
+ self._agent_fingerprint = _minimal_agent_fingerprint(self._agent, self._original_agent)
3880
+ agent_fingerprint = self._agent_fingerprint
3808
3881
 
3809
3882
  # Call pre-run service (it will handle baseline flag internally)
3810
3883
  selected_rules = self._service_adapter.select_rules(
@@ -3812,7 +3885,8 @@ Follow these rules when planning your actions."""
3812
3885
  agent_fingerprint=agent_fingerprint,
3813
3886
  max_rules_per_layer=self._top_k, # Configurable via top_k parameter
3814
3887
  performance_tracking_id=self._performance_tracking_id, # For rule isolation
3815
- is_baseline=is_baseline # Skip rule selection for baselines
3888
+ is_baseline=is_baseline, # Skip rule selection for baselines
3889
+ verbose=self._verbose # Pass verbose flag through
3816
3890
  )
3817
3891
 
3818
3892
  print(f"[DASEIN] Pre-run service returned {len(selected_rules)} rules")
@@ -4005,24 +4079,92 @@ Follow these rules when planning your actions."""
4005
4079
  print(f"[DASEIN] Calling post-run service for rule synthesis ({mode_str} mode)")
4006
4080
 
4007
4081
  # Compute agent fingerprint for post-run (mirror pre-run minimal fingerprint)
4008
- def _minimal_agent_fingerprint(agent) -> str:
4082
+ def _minimal_agent_fingerprint(agent, original_agent) -> str:
4083
+ """Generate fingerprint from ORIGINAL unwrapped agent to avoid wrapper contamination"""
4009
4084
  try:
4010
- agent_cls = getattr(agent, '__class__', None)
4085
+ # Use original_agent for fingerprinting, not wrapped agent
4086
+ agent_to_fingerprint = original_agent if original_agent else agent
4087
+
4088
+ agent_cls = getattr(agent_to_fingerprint, '__class__', None)
4011
4089
  agent_name = getattr(agent_cls, '__name__', '') if agent_cls else ''
4012
- module = getattr(agent, '__module__', '') or ''
4090
+ module = getattr(agent_to_fingerprint, '__module__', '') or ''
4013
4091
  framework = module.split('.')[0] if module else ''
4092
+
4093
+ # model id (comprehensive search through agent structure)
4014
4094
  model_id = ''
4015
- llm = getattr(agent, 'llm', None)
4016
- if llm is not None:
4017
- model_id = (
4018
- getattr(llm, 'model', None)
4019
- or getattr(llm, 'model_name', None)
4020
- or getattr(llm, 'model_id', None)
4021
- or getattr(llm, 'model_tag', None)
4022
- or ''
4023
- )
4095
+
4096
+ # Helper to extract model from LLM instance
4097
+ def _extract_model_from_llm(llm_obj):
4098
+ if llm_obj is None:
4099
+ return None
4100
+ type_name = type(llm_obj).__name__
4101
+ if 'Language' in type_name or 'Chat' in type_name or 'LLM' in type_name:
4102
+ return (
4103
+ getattr(llm_obj, 'model', None)
4104
+ or getattr(llm_obj, 'model_name', None)
4105
+ or getattr(llm_obj, 'model_id', None)
4106
+ or getattr(llm_obj, 'model_tag', None)
4107
+ )
4108
+ return None
4109
+
4110
+ # 1. Direct llm
4111
+ llm = getattr(agent_to_fingerprint, 'llm', None)
4112
+ model_id = _extract_model_from_llm(llm)
4113
+
4114
+ # 2. Legacy ReAct: agent.llm_chain.llm
4115
+ if not model_id:
4116
+ llm_chain = getattr(agent_to_fingerprint, 'llm_chain', None)
4117
+ if llm_chain:
4118
+ llm = getattr(llm_chain, 'llm', None)
4119
+ model_id = _extract_model_from_llm(llm)
4120
+
4121
+ # 3. Nested agent.agent
4122
+ if not model_id:
4123
+ inner_agent = getattr(agent_to_fingerprint, 'agent', None)
4124
+ if inner_agent:
4125
+ llm = getattr(inner_agent, 'llm', None)
4126
+ model_id = _extract_model_from_llm(llm)
4127
+ if not model_id:
4128
+ llm_chain = getattr(inner_agent, 'llm_chain', None)
4129
+ if llm_chain:
4130
+ llm = getattr(llm_chain, 'llm', None)
4131
+ model_id = _extract_model_from_llm(llm)
4132
+
4133
+ # 4. LCEL runnable graph
4134
+ if not model_id:
4135
+ runnable = getattr(agent_to_fingerprint, 'runnable', None)
4136
+ if runnable:
4137
+ model_id = _extract_model_from_llm(runnable)
4138
+ if not model_id and hasattr(runnable, 'steps'):
4139
+ for step in runnable.steps:
4140
+ model_id = _extract_model_from_llm(step)
4141
+ if model_id:
4142
+ break
4143
+
4144
+ # 5. Toolkit
4145
+ if not model_id:
4146
+ toolkit = getattr(agent_to_fingerprint, 'toolkit', None)
4147
+ if toolkit:
4148
+ llm = getattr(toolkit, 'llm', None)
4149
+ model_id = _extract_model_from_llm(llm)
4150
+
4151
+ # 6. Tools list
4152
+ if not model_id:
4153
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
4154
+ if tools_attr:
4155
+ try:
4156
+ for tool in tools_attr:
4157
+ llm = getattr(tool, 'llm', None)
4158
+ model_id = _extract_model_from_llm(llm)
4159
+ if model_id:
4160
+ break
4161
+ except Exception:
4162
+ pass
4163
+
4164
+ model_id = str(model_id) if model_id else ''
4165
+
4024
4166
  tool_names = []
4025
- tools_attr = getattr(agent, 'tools', None)
4167
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
4026
4168
  if tools_attr:
4027
4169
  try:
4028
4170
  for t in tools_attr:
@@ -4031,8 +4173,8 @@ Follow these rules when planning your actions."""
4031
4173
  tool_names.append(str(name))
4032
4174
  except Exception:
4033
4175
  pass
4034
- elif getattr(agent, 'toolkit', None):
4035
- tk = getattr(agent, 'toolkit')
4176
+ elif getattr(agent_to_fingerprint, 'toolkit', None):
4177
+ tk = getattr(agent_to_fingerprint, 'toolkit')
4036
4178
  tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
4037
4179
  try:
4038
4180
  iterable = tk_tools() if callable(tk_tools) else tk_tools
@@ -4048,11 +4190,12 @@ Follow these rules when planning your actions."""
4048
4190
  model_id = norm(model_id)
4049
4191
  tool_names = [norm(n) for n in tool_names if n]
4050
4192
  tools_joined = ','.join(sorted(set(tool_names)))
4051
- return f"[[FINGERPRINT]] agent={agent_name} | framework={framework} | model={model_id} | tools={tools_joined}"
4193
+ return f"agent={agent_name}|framework={framework}|model={model_id}|tools={tools_joined}"
4052
4194
  except Exception:
4053
- return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
4195
+ return getattr(agent_to_fingerprint, 'agent_id', None) or f"agent_{id(agent_to_fingerprint)}"
4054
4196
 
4055
- agent_fingerprint = _minimal_agent_fingerprint(self._agent)
4197
+ # Reuse cached fingerprint from pre-run (guaranteed to be identical)
4198
+ agent_fingerprint = self._agent_fingerprint
4056
4199
 
4057
4200
  # Get tool metadata from callback handler (extracted during runtime)
4058
4201
  tools_metadata = []
@@ -4083,6 +4226,23 @@ Follow these rules when planning your actions."""
4083
4226
  else:
4084
4227
  print(f"[DASEIN] WARNING: No tools extracted! Agent type: {type(self._agent)}")
4085
4228
 
4229
+ # Extract rules_applied from selected_rules (rule IDs that were actually selected by pre-run)
4230
+ rules_applied = []
4231
+ for rule in selected_rules:
4232
+ if isinstance(rule, dict):
4233
+ rule_id = rule.get('id', '')
4234
+ if rule_id:
4235
+ rules_applied.append(rule_id)
4236
+ elif hasattr(rule, 'id'):
4237
+ rules_applied.append(rule.id)
4238
+ print(f"[DASEIN] Passing {len(rules_applied)} rule IDs to post-run: {rules_applied}")
4239
+
4240
+ # Compute context_hash: represents query + agent fingerprint (what context node contains)
4241
+ import hashlib
4242
+ combined_context = f"{query}:{agent_fingerprint}"
4243
+ context_hash = f"ctx_{hashlib.sha256(combined_context.encode()).hexdigest()[:9]}"
4244
+ print(f"[DASEIN] Computed context_hash: {context_hash}")
4245
+
4086
4246
  response = self._service_adapter.synthesize_rules(
4087
4247
  run_id=None, # Will use stored run_id from pre-run phase
4088
4248
  trace=cleaned_trace,
@@ -4098,7 +4258,9 @@ Follow these rules when planning your actions."""
4098
4258
  post_run_mode=self._post_run, # Pass post_run mode ("full" or "kpi_only")
4099
4259
  wait_for_synthesis=wait_for_synthesis, # Wait for synthesis on retry runs (except last)
4100
4260
  tools_metadata=tools_metadata, # Tool metadata for Stage 3.5 tool grounding
4101
- graph_metadata=graph_metadata # Graph metadata for Stage 3.5 node grounding
4261
+ graph_metadata=graph_metadata, # Graph metadata for Stage 3.5 node grounding
4262
+ rules_applied=rules_applied, # Rule IDs selected by pre-run
4263
+ context_hash=context_hash # Context hash for graph grouping
4102
4264
  )
4103
4265
 
4104
4266
  # response is a dict from ServiceAdapter; handle accordingly
@@ -1828,8 +1828,7 @@ Precedence: AVOID/SKIP > FIX > PREFER > HINT. On conflict, the higher rule ALWAY
1828
1828
  - PREFER: when multiple compliant options exist, choose the preferred—NO exceptions.
1829
1829
  - Recovery: if a banned/skipped item already failed, IMMEDIATELY switch to a compliant alternative.
1830
1830
 
1831
- Output Contract: Produce ONE compliant tool/function call (or direct answer if none is needed).
1832
- NO reasoning, NO justification, NO markdown.
1831
+ Honor the agent's existing output contract verbatim; do not add or change fields or formatting.
1833
1832
 
1834
1833
  Rules to Enforce:
1835
1834
 
@@ -34,6 +34,8 @@ class RuleSynthesisRequest:
34
34
  step_id: Optional[str] = None
35
35
  tools_metadata: Optional[List[Dict[str, Any]]] = None # Tool metadata for Stage 3.5 tool grounding
36
36
  graph_metadata: Optional[Dict[str, Any]] = None # Graph metadata for Stage 3.5 node grounding
37
+ rules_applied: Optional[List[str]] = None # Rule IDs that were selected by pre-run and applied during execution
38
+ context_hash: Optional[str] = None # Context hash for grouping traces (query + agent fingerprint)
37
39
 
38
40
 
39
41
  @dataclass
@@ -97,8 +99,11 @@ class PostRunClient:
97
99
  "performance_tracking_id": request.performance_tracking_id,
98
100
  "skip_synthesis": request.skip_synthesis,
99
101
  "wait_for_synthesis": request.wait_for_synthesis,
100
- "tools_metadata": request.tools_metadata or [], # Tool metadata for Stage 3.5 tool grounding
101
- "graph_metadata": request.graph_metadata or {}, # Graph metadata for Stage 3.5 node grounding
102
+ "step_id": request.step_id,
103
+ "tools_metadata": request.tools_metadata or [],
104
+ "graph_metadata": request.graph_metadata or {},
105
+ "rules_applied": request.rules_applied or [],
106
+ "context_hash": request.context_hash,
102
107
  }
103
108
 
104
109
  logger.info(f"Synthesizing rules for run: {request.run_id}")
@@ -28,6 +28,7 @@ class RuleSelectionRequest:
28
28
  max_rules_per_layer: Optional[int] = 5
29
29
  performance_tracking_id: Optional[str] = None
30
30
  is_baseline: bool = False
31
+ verbose: bool = False
31
32
 
32
33
 
33
34
  @dataclass
@@ -87,7 +88,8 @@ class PreRunClient:
87
88
  "run_id": request.run_id,
88
89
  "max_rules_per_layer": request.max_rules_per_layer,
89
90
  "performance_tracking_id": request.performance_tracking_id,
90
- "is_baseline": request.is_baseline
91
+ "is_baseline": request.is_baseline,
92
+ "verbose": request.verbose
91
93
  }
92
94
 
93
95
  logger.info(f"Selecting rules for query: {str(request.query)[:50]}...")
@@ -43,7 +43,8 @@ class ServiceAdapter:
43
43
  def select_rules(self, query: str, agent_fingerprint: Optional[str] = None,
44
44
  artifacts: Optional[List[str]] = None, limits: Optional[Dict[str, int]] = None,
45
45
  run_id: Optional[str] = None, max_rules_per_layer: Optional[int] = 5,
46
- performance_tracking_id: Optional[str] = None, is_baseline: bool = False) -> List[Dict[str, Any]]:
46
+ performance_tracking_id: Optional[str] = None, is_baseline: bool = False,
47
+ verbose: bool = False) -> List[Dict[str, Any]]:
47
48
  """
48
49
  Select rules for an incoming run (replaces local rule selection)
49
50
 
@@ -68,7 +69,8 @@ class ServiceAdapter:
68
69
  run_id=run_id,
69
70
  max_rules_per_layer=max_rules_per_layer,
70
71
  performance_tracking_id=performance_tracking_id,
71
- is_baseline=is_baseline
72
+ is_baseline=is_baseline,
73
+ verbose=verbose
72
74
  )
73
75
 
74
76
  response = self.pre_run_client.select_rules(request)
@@ -96,7 +98,8 @@ class ServiceAdapter:
96
98
  skip_synthesis: bool = False, agent_fingerprint: Optional[str] = None,
97
99
  step_id: Optional[str] = None, post_run_mode: str = "full",
98
100
  wait_for_synthesis: bool = False, tools_metadata: Optional[List[Dict[str, Any]]] = None,
99
- graph_metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
101
+ graph_metadata: Optional[Dict[str, Any]] = None, rules_applied: Optional[List[str]] = None,
102
+ context_hash: Optional[str] = None) -> Dict[str, Any]:
100
103
  """
101
104
  Synthesize rules from run telemetry (replaces local rule synthesis)
102
105
 
@@ -141,7 +144,9 @@ class ServiceAdapter:
141
144
  wait_for_synthesis=wait_for_synthesis,
142
145
  step_id=step_id,
143
146
  tools_metadata=tools_metadata,
144
- graph_metadata=graph_metadata
147
+ graph_metadata=graph_metadata,
148
+ rules_applied=rules_applied,
149
+ context_hash=context_hash
145
150
  )
146
151
 
147
152
  response = self.post_run_client.synthesize_rules(request)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasein-core
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line.
5
5
  Author-email: Dasein Team <support@dasein.ai>
6
6
  License: MIT
File without changes
File without changes
File without changes
File without changes
File without changes