dasein-core 0.2.17__tar.gz → 0.2.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {dasein_core-0.2.17/src/dasein_core.egg-info → dasein_core-0.2.18}/PKG-INFO +1 -1
  2. {dasein_core-0.2.17 → dasein_core-0.2.18}/pyproject.toml +1 -1
  3. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/api.py +195 -40
  4. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/capture.py +1 -2
  5. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/services/post_run_client.py +4 -0
  6. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/services/pre_run_client.py +3 -1
  7. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/services/service_adapter.py +9 -4
  8. {dasein_core-0.2.17 → dasein_core-0.2.18/src/dasein_core.egg-info}/PKG-INFO +1 -1
  9. {dasein_core-0.2.17 → dasein_core-0.2.18}/LICENSE +0 -0
  10. {dasein_core-0.2.17 → dasein_core-0.2.18}/MANIFEST.in +0 -0
  11. {dasein_core-0.2.17 → dasein_core-0.2.18}/README.md +0 -0
  12. {dasein_core-0.2.17 → dasein_core-0.2.18}/examples/dasein_examples.ipynb +0 -0
  13. {dasein_core-0.2.17 → dasein_core-0.2.18}/setup.cfg +0 -0
  14. {dasein_core-0.2.17 → dasein_core-0.2.18}/setup.py +0 -0
  15. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/__init__.py +0 -0
  16. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/advice_format.py +0 -0
  17. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/config.py +0 -0
  18. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/events.py +0 -0
  19. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/extractors.py +0 -0
  20. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/injection_strategies.py +0 -0
  21. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/injector.py +0 -0
  22. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/microturn.py +0 -0
  23. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/__init__.py +0 -0
  24. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSE +0 -0
  25. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSES_SOURCES +0 -0
  26. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/README.md +0 -0
  27. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/accuracy.json +0 -0
  28. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/attribute_ruler/patterns +0 -0
  29. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/config.cfg +0 -0
  30. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/lemmatizer/lookups/lookups.bin +0 -0
  31. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/meta.json +0 -0
  32. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/cfg +0 -0
  33. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/model +0 -0
  34. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/ner/moves +0 -0
  35. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/cfg +0 -0
  36. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/model +0 -0
  37. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/parser/moves +0 -0
  38. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/senter/cfg +0 -0
  39. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/senter/model +0 -0
  40. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tagger/cfg +0 -0
  41. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tagger/model +0 -0
  42. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tok2vec/cfg +0 -0
  43. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tok2vec/model +0 -0
  44. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/tokenizer +0 -0
  45. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/key2row +0 -0
  46. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/lookups.bin +0 -0
  47. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/strings.json +0 -0
  48. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors +0 -0
  49. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors.cfg +0 -0
  50. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm/meta.json +0 -0
  51. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSE +0 -0
  52. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSES_SOURCES +0 -0
  53. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/METADATA +0 -0
  54. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/RECORD +0 -0
  55. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/WHEEL +0 -0
  56. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/entry_points.txt +0 -0
  57. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/top_level.txt +0 -0
  58. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/pipecleaner.py +0 -0
  59. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/services/__init__.py +0 -0
  60. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/services/service_config.py +0 -0
  61. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/trace_buffer.py +0 -0
  62. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/types.py +0 -0
  63. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein/wrappers.py +0 -0
  64. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein_core.egg-info/SOURCES.txt +0 -0
  65. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein_core.egg-info/dependency_links.txt +0 -0
  66. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein_core.egg-info/requires.txt +0 -0
  67. {dasein_core-0.2.17 → dasein_core-0.2.18}/src/dasein_core.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasein-core
3
- Version: 0.2.17
3
+ Version: 0.2.18
4
4
  Summary: Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line.
5
5
  Author-email: Dasein Team <support@dasein.ai>
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "dasein-core"
7
- version = "0.2.17"
7
+ version = "0.2.18"
8
8
  description = "Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -3746,28 +3746,95 @@ Follow these rules when planning your actions."""
3746
3746
  print(f"[DASEIN] Calling pre-run service for query: {str(query)[:50]}...")
3747
3747
 
3748
3748
  # Generate agent fingerprint (stable categorical tag)
3749
- def _minimal_agent_fingerprint(agent) -> str:
3749
+ def _minimal_agent_fingerprint(agent, original_agent) -> str:
3750
+ """Generate fingerprint from ORIGINAL unwrapped agent to avoid wrapper contamination"""
3750
3751
  try:
3752
+ # Use original_agent for fingerprinting, not wrapped agent
3753
+ agent_to_fingerprint = original_agent if original_agent else agent
3754
+
3751
3755
  # agent class
3752
- agent_cls = getattr(agent, '__class__', None)
3756
+ agent_cls = getattr(agent_to_fingerprint, '__class__', None)
3753
3757
  agent_name = getattr(agent_cls, '__name__', '') if agent_cls else ''
3754
3758
  # framework top-level module
3755
- module = getattr(agent, '__module__', '') or ''
3759
+ module = getattr(agent_to_fingerprint, '__module__', '') or ''
3756
3760
  framework = module.split('.')[0] if module else ''
3757
- # model id (best-effort)
3761
+
3762
+ # model id (comprehensive search through agent structure)
3758
3763
  model_id = ''
3759
- llm = getattr(agent, 'llm', None)
3760
- if llm is not None:
3761
- model_id = (
3762
- getattr(llm, 'model', None)
3763
- or getattr(llm, 'model_name', None)
3764
- or getattr(llm, 'model_id', None)
3765
- or getattr(llm, 'model_tag', None)
3766
- or ''
3767
- )
3768
- # tools/toolkit
3764
+
3765
+ # Helper to extract model from LLM instance
3766
+ def _extract_model_from_llm(llm_obj):
3767
+ if llm_obj is None:
3768
+ return None
3769
+ type_name = type(llm_obj).__name__
3770
+ if 'Language' in type_name or 'Chat' in type_name or 'LLM' in type_name:
3771
+ return (
3772
+ getattr(llm_obj, 'model', None)
3773
+ or getattr(llm_obj, 'model_name', None)
3774
+ or getattr(llm_obj, 'model_id', None)
3775
+ or getattr(llm_obj, 'model_tag', None)
3776
+ )
3777
+ return None
3778
+
3779
+ # 1. Direct llm
3780
+ llm = getattr(agent_to_fingerprint, 'llm', None)
3781
+ model_id = _extract_model_from_llm(llm)
3782
+
3783
+ # 2. Legacy ReAct: agent.llm_chain.llm
3784
+ if not model_id:
3785
+ llm_chain = getattr(agent_to_fingerprint, 'llm_chain', None)
3786
+ if llm_chain:
3787
+ llm = getattr(llm_chain, 'llm', None)
3788
+ model_id = _extract_model_from_llm(llm)
3789
+
3790
+ # 3. Nested agent.agent
3791
+ if not model_id:
3792
+ inner_agent = getattr(agent_to_fingerprint, 'agent', None)
3793
+ if inner_agent:
3794
+ llm = getattr(inner_agent, 'llm', None)
3795
+ model_id = _extract_model_from_llm(llm)
3796
+ if not model_id:
3797
+ llm_chain = getattr(inner_agent, 'llm_chain', None)
3798
+ if llm_chain:
3799
+ llm = getattr(llm_chain, 'llm', None)
3800
+ model_id = _extract_model_from_llm(llm)
3801
+
3802
+ # 4. LCEL runnable graph
3803
+ if not model_id:
3804
+ runnable = getattr(agent_to_fingerprint, 'runnable', None)
3805
+ if runnable:
3806
+ model_id = _extract_model_from_llm(runnable)
3807
+ if not model_id and hasattr(runnable, 'steps'):
3808
+ for step in runnable.steps:
3809
+ model_id = _extract_model_from_llm(step)
3810
+ if model_id:
3811
+ break
3812
+
3813
+ # 5. Toolkit
3814
+ if not model_id:
3815
+ toolkit = getattr(agent_to_fingerprint, 'toolkit', None)
3816
+ if toolkit:
3817
+ llm = getattr(toolkit, 'llm', None)
3818
+ model_id = _extract_model_from_llm(llm)
3819
+
3820
+ # 6. Tools list
3821
+ if not model_id:
3822
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
3823
+ if tools_attr:
3824
+ try:
3825
+ for tool in tools_attr:
3826
+ llm = getattr(tool, 'llm', None)
3827
+ model_id = _extract_model_from_llm(llm)
3828
+ if model_id:
3829
+ break
3830
+ except Exception:
3831
+ pass
3832
+
3833
+ model_id = str(model_id) if model_id else ''
3834
+
3835
+ # tools/toolkit (from original agent)
3769
3836
  tool_names = []
3770
- tools_attr = getattr(agent, 'tools', None)
3837
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
3771
3838
  if tools_attr:
3772
3839
  try:
3773
3840
  for t in tools_attr:
@@ -3778,8 +3845,8 @@ Follow these rules when planning your actions."""
3778
3845
  tool_names.append(str(name))
3779
3846
  except Exception:
3780
3847
  pass
3781
- elif getattr(agent, 'toolkit', None):
3782
- tk = getattr(agent, 'toolkit')
3848
+ elif getattr(agent_to_fingerprint, 'toolkit', None):
3849
+ tk = getattr(agent_to_fingerprint, 'toolkit')
3783
3850
  tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
3784
3851
  try:
3785
3852
  iterable = tk_tools() if callable(tk_tools) else tk_tools
@@ -3799,12 +3866,12 @@ Follow these rules when planning your actions."""
3799
3866
  tool_names = [norm(n) for n in tool_names if n]
3800
3867
  tools_joined = ','.join(sorted(set(tool_names)))
3801
3868
  # fixed-order segments (keep keys even if empty to preserve format)
3802
- return f"[[FINGERPRINT]] agent={agent_name} | framework={framework} | model={model_id} | tools={tools_joined}"
3869
+ return f"agent={agent_name}|framework={framework}|model={model_id}|tools={tools_joined}"
3803
3870
  except Exception:
3804
3871
  # Fallback to prior behavior on any error
3805
- return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
3872
+ return getattr(agent_to_fingerprint, 'agent_id', None) or f"agent_{id(agent_to_fingerprint)}"
3806
3873
 
3807
- agent_fingerprint = _minimal_agent_fingerprint(self._agent)
3874
+ agent_fingerprint = _minimal_agent_fingerprint(self._agent, self._original_agent)
3808
3875
 
3809
3876
  # Call pre-run service (it will handle baseline flag internally)
3810
3877
  selected_rules = self._service_adapter.select_rules(
@@ -3812,7 +3879,8 @@ Follow these rules when planning your actions."""
3812
3879
  agent_fingerprint=agent_fingerprint,
3813
3880
  max_rules_per_layer=self._top_k, # Configurable via top_k parameter
3814
3881
  performance_tracking_id=self._performance_tracking_id, # For rule isolation
3815
- is_baseline=is_baseline # Skip rule selection for baselines
3882
+ is_baseline=is_baseline, # Skip rule selection for baselines
3883
+ verbose=self._verbose # Pass verbose flag through
3816
3884
  )
3817
3885
 
3818
3886
  print(f"[DASEIN] Pre-run service returned {len(selected_rules)} rules")
@@ -4005,24 +4073,92 @@ Follow these rules when planning your actions."""
4005
4073
  print(f"[DASEIN] Calling post-run service for rule synthesis ({mode_str} mode)")
4006
4074
 
4007
4075
  # Compute agent fingerprint for post-run (mirror pre-run minimal fingerprint)
4008
- def _minimal_agent_fingerprint(agent) -> str:
4076
+ def _minimal_agent_fingerprint(agent, original_agent) -> str:
4077
+ """Generate fingerprint from ORIGINAL unwrapped agent to avoid wrapper contamination"""
4009
4078
  try:
4010
- agent_cls = getattr(agent, '__class__', None)
4079
+ # Use original_agent for fingerprinting, not wrapped agent
4080
+ agent_to_fingerprint = original_agent if original_agent else agent
4081
+
4082
+ agent_cls = getattr(agent_to_fingerprint, '__class__', None)
4011
4083
  agent_name = getattr(agent_cls, '__name__', '') if agent_cls else ''
4012
- module = getattr(agent, '__module__', '') or ''
4084
+ module = getattr(agent_to_fingerprint, '__module__', '') or ''
4013
4085
  framework = module.split('.')[0] if module else ''
4086
+
4087
+ # model id (comprehensive search through agent structure)
4014
4088
  model_id = ''
4015
- llm = getattr(agent, 'llm', None)
4016
- if llm is not None:
4017
- model_id = (
4018
- getattr(llm, 'model', None)
4019
- or getattr(llm, 'model_name', None)
4020
- or getattr(llm, 'model_id', None)
4021
- or getattr(llm, 'model_tag', None)
4022
- or ''
4023
- )
4089
+
4090
+ # Helper to extract model from LLM instance
4091
+ def _extract_model_from_llm(llm_obj):
4092
+ if llm_obj is None:
4093
+ return None
4094
+ type_name = type(llm_obj).__name__
4095
+ if 'Language' in type_name or 'Chat' in type_name or 'LLM' in type_name:
4096
+ return (
4097
+ getattr(llm_obj, 'model', None)
4098
+ or getattr(llm_obj, 'model_name', None)
4099
+ or getattr(llm_obj, 'model_id', None)
4100
+ or getattr(llm_obj, 'model_tag', None)
4101
+ )
4102
+ return None
4103
+
4104
+ # 1. Direct llm
4105
+ llm = getattr(agent_to_fingerprint, 'llm', None)
4106
+ model_id = _extract_model_from_llm(llm)
4107
+
4108
+ # 2. Legacy ReAct: agent.llm_chain.llm
4109
+ if not model_id:
4110
+ llm_chain = getattr(agent_to_fingerprint, 'llm_chain', None)
4111
+ if llm_chain:
4112
+ llm = getattr(llm_chain, 'llm', None)
4113
+ model_id = _extract_model_from_llm(llm)
4114
+
4115
+ # 3. Nested agent.agent
4116
+ if not model_id:
4117
+ inner_agent = getattr(agent_to_fingerprint, 'agent', None)
4118
+ if inner_agent:
4119
+ llm = getattr(inner_agent, 'llm', None)
4120
+ model_id = _extract_model_from_llm(llm)
4121
+ if not model_id:
4122
+ llm_chain = getattr(inner_agent, 'llm_chain', None)
4123
+ if llm_chain:
4124
+ llm = getattr(llm_chain, 'llm', None)
4125
+ model_id = _extract_model_from_llm(llm)
4126
+
4127
+ # 4. LCEL runnable graph
4128
+ if not model_id:
4129
+ runnable = getattr(agent_to_fingerprint, 'runnable', None)
4130
+ if runnable:
4131
+ model_id = _extract_model_from_llm(runnable)
4132
+ if not model_id and hasattr(runnable, 'steps'):
4133
+ for step in runnable.steps:
4134
+ model_id = _extract_model_from_llm(step)
4135
+ if model_id:
4136
+ break
4137
+
4138
+ # 5. Toolkit
4139
+ if not model_id:
4140
+ toolkit = getattr(agent_to_fingerprint, 'toolkit', None)
4141
+ if toolkit:
4142
+ llm = getattr(toolkit, 'llm', None)
4143
+ model_id = _extract_model_from_llm(llm)
4144
+
4145
+ # 6. Tools list
4146
+ if not model_id:
4147
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
4148
+ if tools_attr:
4149
+ try:
4150
+ for tool in tools_attr:
4151
+ llm = getattr(tool, 'llm', None)
4152
+ model_id = _extract_model_from_llm(llm)
4153
+ if model_id:
4154
+ break
4155
+ except Exception:
4156
+ pass
4157
+
4158
+ model_id = str(model_id) if model_id else ''
4159
+
4024
4160
  tool_names = []
4025
- tools_attr = getattr(agent, 'tools', None)
4161
+ tools_attr = getattr(agent_to_fingerprint, 'tools', None)
4026
4162
  if tools_attr:
4027
4163
  try:
4028
4164
  for t in tools_attr:
@@ -4031,8 +4167,8 @@ Follow these rules when planning your actions."""
4031
4167
  tool_names.append(str(name))
4032
4168
  except Exception:
4033
4169
  pass
4034
- elif getattr(agent, 'toolkit', None):
4035
- tk = getattr(agent, 'toolkit')
4170
+ elif getattr(agent_to_fingerprint, 'toolkit', None):
4171
+ tk = getattr(agent_to_fingerprint, 'toolkit')
4036
4172
  tk_tools = getattr(tk, 'tools', None) or getattr(tk, 'get_tools', None)
4037
4173
  try:
4038
4174
  iterable = tk_tools() if callable(tk_tools) else tk_tools
@@ -4048,11 +4184,11 @@ Follow these rules when planning your actions."""
4048
4184
  model_id = norm(model_id)
4049
4185
  tool_names = [norm(n) for n in tool_names if n]
4050
4186
  tools_joined = ','.join(sorted(set(tool_names)))
4051
- return f"[[FINGERPRINT]] agent={agent_name} | framework={framework} | model={model_id} | tools={tools_joined}"
4187
+ return f"agent={agent_name}|framework={framework}|model={model_id}|tools={tools_joined}"
4052
4188
  except Exception:
4053
- return getattr(agent, 'agent_id', None) or f"agent_{id(agent)}"
4189
+ return getattr(agent_to_fingerprint, 'agent_id', None) or f"agent_{id(agent_to_fingerprint)}"
4054
4190
 
4055
- agent_fingerprint = _minimal_agent_fingerprint(self._agent)
4191
+ agent_fingerprint = _minimal_agent_fingerprint(self._agent, self._original_agent)
4056
4192
 
4057
4193
  # Get tool metadata from callback handler (extracted during runtime)
4058
4194
  tools_metadata = []
@@ -4083,6 +4219,23 @@ Follow these rules when planning your actions."""
4083
4219
  else:
4084
4220
  print(f"[DASEIN] WARNING: No tools extracted! Agent type: {type(self._agent)}")
4085
4221
 
4222
+ # Extract rules_applied from selected_rules (rule IDs that were actually selected by pre-run)
4223
+ rules_applied = []
4224
+ for rule in selected_rules:
4225
+ if isinstance(rule, dict):
4226
+ rule_id = rule.get('id', '')
4227
+ if rule_id:
4228
+ rules_applied.append(rule_id)
4229
+ elif hasattr(rule, 'id'):
4230
+ rules_applied.append(rule.id)
4231
+ print(f"[DASEIN] Passing {len(rules_applied)} rule IDs to post-run: {rules_applied}")
4232
+
4233
+ # Compute context_hash: represents query + agent fingerprint (what context node contains)
4234
+ import hashlib
4235
+ combined_context = f"{query}:{agent_fingerprint}"
4236
+ context_hash = f"ctx_{hashlib.sha256(combined_context.encode()).hexdigest()[:9]}"
4237
+ print(f"[DASEIN] Computed context_hash: {context_hash}")
4238
+
4086
4239
  response = self._service_adapter.synthesize_rules(
4087
4240
  run_id=None, # Will use stored run_id from pre-run phase
4088
4241
  trace=cleaned_trace,
@@ -4098,7 +4251,9 @@ Follow these rules when planning your actions."""
4098
4251
  post_run_mode=self._post_run, # Pass post_run mode ("full" or "kpi_only")
4099
4252
  wait_for_synthesis=wait_for_synthesis, # Wait for synthesis on retry runs (except last)
4100
4253
  tools_metadata=tools_metadata, # Tool metadata for Stage 3.5 tool grounding
4101
- graph_metadata=graph_metadata # Graph metadata for Stage 3.5 node grounding
4254
+ graph_metadata=graph_metadata, # Graph metadata for Stage 3.5 node grounding
4255
+ rules_applied=rules_applied, # Rule IDs selected by pre-run
4256
+ context_hash=context_hash # Context hash for graph grouping
4102
4257
  )
4103
4258
 
4104
4259
  # response is a dict from ServiceAdapter; handle accordingly
@@ -1828,8 +1828,7 @@ Precedence: AVOID/SKIP > FIX > PREFER > HINT. On conflict, the higher rule ALWAY
1828
1828
  - PREFER: when multiple compliant options exist, choose the preferred—NO exceptions.
1829
1829
  - Recovery: if a banned/skipped item already failed, IMMEDIATELY switch to a compliant alternative.
1830
1830
 
1831
- Output Contract: Produce ONE compliant tool/function call (or direct answer if none is needed).
1832
- NO reasoning, NO justification, NO markdown.
1831
+ Honor the agent's existing output contract verbatim; do not add or change fields or formatting.
1833
1832
 
1834
1833
  Rules to Enforce:
1835
1834
 
@@ -34,6 +34,8 @@ class RuleSynthesisRequest:
34
34
  step_id: Optional[str] = None
35
35
  tools_metadata: Optional[List[Dict[str, Any]]] = None # Tool metadata for Stage 3.5 tool grounding
36
36
  graph_metadata: Optional[Dict[str, Any]] = None # Graph metadata for Stage 3.5 node grounding
37
+ rules_applied: Optional[List[str]] = None # Rule IDs that were selected by pre-run and applied during execution
38
+ context_hash: Optional[str] = None # Context hash for grouping traces (query + agent fingerprint)
37
39
 
38
40
 
39
41
  @dataclass
@@ -99,6 +101,8 @@ class PostRunClient:
99
101
  "wait_for_synthesis": request.wait_for_synthesis,
100
102
  "tools_metadata": request.tools_metadata or [], # Tool metadata for Stage 3.5 tool grounding
101
103
  "graph_metadata": request.graph_metadata or {}, # Graph metadata for Stage 3.5 node grounding
104
+ "rules_applied": request.rules_applied or [], # Rule IDs selected and applied
105
+ "context_hash": request.context_hash, # Context hash for graph grouping
102
106
  }
103
107
 
104
108
  logger.info(f"Synthesizing rules for run: {request.run_id}")
@@ -28,6 +28,7 @@ class RuleSelectionRequest:
28
28
  max_rules_per_layer: Optional[int] = 5
29
29
  performance_tracking_id: Optional[str] = None
30
30
  is_baseline: bool = False
31
+ verbose: bool = False
31
32
 
32
33
 
33
34
  @dataclass
@@ -87,7 +88,8 @@ class PreRunClient:
87
88
  "run_id": request.run_id,
88
89
  "max_rules_per_layer": request.max_rules_per_layer,
89
90
  "performance_tracking_id": request.performance_tracking_id,
90
- "is_baseline": request.is_baseline
91
+ "is_baseline": request.is_baseline,
92
+ "verbose": request.verbose
91
93
  }
92
94
 
93
95
  logger.info(f"Selecting rules for query: {str(request.query)[:50]}...")
@@ -43,7 +43,8 @@ class ServiceAdapter:
43
43
  def select_rules(self, query: str, agent_fingerprint: Optional[str] = None,
44
44
  artifacts: Optional[List[str]] = None, limits: Optional[Dict[str, int]] = None,
45
45
  run_id: Optional[str] = None, max_rules_per_layer: Optional[int] = 5,
46
- performance_tracking_id: Optional[str] = None, is_baseline: bool = False) -> List[Dict[str, Any]]:
46
+ performance_tracking_id: Optional[str] = None, is_baseline: bool = False,
47
+ verbose: bool = False) -> List[Dict[str, Any]]:
47
48
  """
48
49
  Select rules for an incoming run (replaces local rule selection)
49
50
 
@@ -68,7 +69,8 @@ class ServiceAdapter:
68
69
  run_id=run_id,
69
70
  max_rules_per_layer=max_rules_per_layer,
70
71
  performance_tracking_id=performance_tracking_id,
71
- is_baseline=is_baseline
72
+ is_baseline=is_baseline,
73
+ verbose=verbose
72
74
  )
73
75
 
74
76
  response = self.pre_run_client.select_rules(request)
@@ -96,7 +98,8 @@ class ServiceAdapter:
96
98
  skip_synthesis: bool = False, agent_fingerprint: Optional[str] = None,
97
99
  step_id: Optional[str] = None, post_run_mode: str = "full",
98
100
  wait_for_synthesis: bool = False, tools_metadata: Optional[List[Dict[str, Any]]] = None,
99
- graph_metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
101
+ graph_metadata: Optional[Dict[str, Any]] = None, rules_applied: Optional[List[str]] = None,
102
+ context_hash: Optional[str] = None) -> Dict[str, Any]:
100
103
  """
101
104
  Synthesize rules from run telemetry (replaces local rule synthesis)
102
105
 
@@ -141,7 +144,9 @@ class ServiceAdapter:
141
144
  wait_for_synthesis=wait_for_synthesis,
142
145
  step_id=step_id,
143
146
  tools_metadata=tools_metadata,
144
- graph_metadata=graph_metadata
147
+ graph_metadata=graph_metadata,
148
+ rules_applied=rules_applied,
149
+ context_hash=context_hash
145
150
  )
146
151
 
147
152
  response = self.post_run_client.synthesize_rules(request)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasein-core
3
- Version: 0.2.17
3
+ Version: 0.2.18
4
4
  Summary: Universal memory for agentic AI. Attach a brain to any LangChain/LangGraph agent in a single line.
5
5
  Author-email: Dasein Team <support@dasein.ai>
6
6
  License: MIT
File without changes
File without changes
File without changes
File without changes
File without changes