deja-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deja/__init__.py ADDED
File without changes
deja/config.py ADDED
@@ -0,0 +1,127 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import re
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ import yaml
9
+ from pydantic import BaseModel, field_validator
10
+
11
+
12
+ def _substitute_env(value: str) -> str:
13
+ """Substitute ${ENV_VAR} patterns with environment variable values."""
14
+ def replacer(match: re.Match) -> str:
15
+ var_name = match.group(1)
16
+ return os.environ.get(var_name, match.group(0))
17
+
18
+ return re.sub(r"\$\{([^}]+)\}", replacer, value)
19
+
20
+
21
+ def _expand_paths(data: dict) -> dict:
22
+ """Recursively expand ~ in string values."""
23
+ result = {}
24
+ for key, value in data.items():
25
+ if isinstance(value, dict):
26
+ result[key] = _expand_paths(value)
27
+ elif isinstance(value, str):
28
+ result[key] = _substitute_env(value)
29
+ else:
30
+ result[key] = value
31
+ return result
32
+
33
+
34
+ class LLMProviderConfig(BaseModel):
35
+ provider: str = "ollama"
36
+ model: str = "qwen2.5:3b"
37
+ base_url: str = "http://localhost:11434"
38
+ api_key: Optional[str] = None
39
+
40
+ @field_validator("api_key", mode="before")
41
+ @classmethod
42
+ def expand_env_vars(cls, v: Optional[str]) -> Optional[str]:
43
+ if v is None:
44
+ return v
45
+ return _substitute_env(v)
46
+
47
+
48
+ class LLMConfig(BaseModel):
49
+ extraction: LLMProviderConfig = LLMProviderConfig(provider="none")
50
+ reflection: LLMProviderConfig = LLMProviderConfig(provider="none")
51
+ fallback: LLMProviderConfig = LLMProviderConfig(
52
+ provider="anthropic",
53
+ model="claude-haiku-4-5-20251001",
54
+ api_key="${ANTHROPIC_API_KEY}",
55
+ )
56
+
57
+
58
+ class StoreConfig(BaseModel):
59
+ path: str = "~/.deja/store/memories.db"
60
+ vault_path: str = "~/.deja/store/vault/"
61
+
62
+ @property
63
+ def db_path(self) -> Path:
64
+ return Path(self.path).expanduser()
65
+
66
+ @property
67
+ def vault_dir(self) -> Path:
68
+ return Path(self.vault_path).expanduser()
69
+
70
+
71
+ class ReflectionConfig(BaseModel):
72
+ observer_trigger_tokens: int = 30000
73
+ reflector_trigger_tokens: int = 40000
74
+ kg_merge_schedule: str = "0 2 * * *"
75
+ confidence_archive_threshold: float = 0.3
76
+ # agent memories (gotcha, decision, progress, pattern) decay at this rate.
77
+ # Operational knowledge goes stale; 0.05/week means a memory hits the 0.3
78
+ # archive threshold in ~14 weeks without re-confirmation.
79
+ confidence_decay_per_week: float = 0.05
80
+ # user memories (preferences, habits) decay ~5x slower. Personal style
81
+ # preferences don't go stale the way project-specific gotchas do.
82
+ user_confidence_decay_per_week: float = 0.01
83
+ min_project_pattern_count: int = 2
84
+
85
+
86
+ class WatchersConfig(BaseModel):
87
+ claude_code: bool = True
88
+ gemini_cli: bool = False
89
+ codex_cli: bool = False
90
+ aider: bool = False
91
+ debounce_seconds: int = 30
92
+
93
+
94
+ class EmbeddingConfig(BaseModel):
95
+ provider: str = "none" # none | ollama
96
+ model: str = "nomic-embed-text"
97
+ base_url: str = "http://localhost:11434"
98
+
99
+
100
+ class Config(BaseModel):
101
+ llm: LLMConfig = LLMConfig()
102
+ store: StoreConfig = StoreConfig()
103
+ reflection: ReflectionConfig = ReflectionConfig()
104
+ watchers: WatchersConfig = WatchersConfig()
105
+ embedding: EmbeddingConfig = EmbeddingConfig()
106
+
107
+
108
+ def load_config(path: Optional[Path] = None) -> Config:
109
+ """Load config from path, falling back to ~/.deja/config.yaml,
110
+ then to the bundled default."""
111
+ candidates = []
112
+ if path:
113
+ candidates.append(Path(path).expanduser())
114
+ candidates.append(Path("~/.deja/config.yaml").expanduser())
115
+
116
+ # Bundled default
117
+ default_path = Path(__file__).parent.parent / "config" / "default.yaml"
118
+ candidates.append(default_path)
119
+
120
+ for candidate in candidates:
121
+ if candidate.exists():
122
+ with open(candidate) as f:
123
+ raw = yaml.safe_load(f) or {}
124
+ raw = _expand_paths(raw)
125
+ return Config.model_validate(raw)
126
+
127
+ return Config()
deja/core/__init__.py ADDED
File without changes
deja/core/extractor.py ADDED
@@ -0,0 +1,135 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from typing import Optional
5
+
6
+ from deja.llm.base import LLMAdapter
7
+
8
+ EXTRACTION_SYSTEM = """You are a memory extraction system for a software engineer.
9
+ Given a coding session transcript or summary, extract ONLY memories
10
+ that would be genuinely useful in a future session.
11
+
12
+ Be ruthless — most session content is NOT worth remembering.
13
+ Only extract things that are:
14
+ - Non-obvious (not derivable from reading the codebase)
15
+ - Reusable (would apply in future sessions)
16
+ - Important (would cause problems if forgotten)
17
+
18
+ Memory types:
19
+ - preference: how the user likes to code (style, tools, patterns)
20
+ - pattern: reusable solution / architectural approach that applies across contexts ("knowing what works")
21
+ - decision: non-obvious architectural choice with reasoning
22
+ - gotcha: bug, trap, or non-obvious issue to avoid
23
+ - progress: current state of in-progress work
24
+ - procedure: reusable ordered steps for a recurring class of work ("knowing how to execute"). Keep thin: numbered steps + tool hints + exit criteria only. Do NOT inline gotchas or decisions — save those as separate memory types.
25
+
26
+ Category:
27
+ - user: personal preferences and habits (applies across all projects)
28
+ - agent: operational knowledge discovered while doing work
29
+
30
+ Domain (optional, coarse routing tag — use for procedure type mainly):
31
+ - debug | build | test | deploy | research
32
+
33
+ Output ONLY valid JSON:
34
+ {
35
+ "memories": [
36
+ {
37
+ "type": "preference|pattern|decision|gotcha|progress|procedure",
38
+ "category": "user|agent",
39
+ "content": "concise, self-contained fact. 1-2 sentences max for most types. For procedure: one-line description followed by numbered steps, tool hints, and exit criteria.",
40
+ "scope": "global|project",
41
+ "project": "project_name or null if global",
42
+ "confidence": 0.0-1.0,
43
+ "domain": "debug|build|test|deploy|research|null"
44
+ }
45
+ ]
46
+ }
47
+
48
+ If nothing is worth remembering, return: {"memories": []}"""
49
+
50
+ EXTRACTION_SCHEMA = {
51
+ "type": "object",
52
+ "properties": {
53
+ "memories": {
54
+ "type": "array",
55
+ "items": {
56
+ "type": "object",
57
+ "properties": {
58
+ "type": {
59
+ "type": "string",
60
+ "enum": ["preference", "pattern", "decision", "gotcha", "progress", "procedure"],
61
+ },
62
+ "category": {"type": "string", "enum": ["user", "agent"]},
63
+ "content": {"type": "string"},
64
+ "scope": {"type": "string", "enum": ["global", "project"]},
65
+ "project": {"type": ["string", "null"]},
66
+ "confidence": {"type": "number"},
67
+ "domain": {"type": ["string", "null"]},
68
+ },
69
+ "required": ["type", "category", "content", "scope", "confidence"],
70
+ },
71
+ }
72
+ },
73
+ "required": ["memories"],
74
+ }
75
+
76
+
77
+ async def extract_memories(
78
+ transcript: str,
79
+ project: str,
80
+ source: str,
81
+ adapter: LLMAdapter,
82
+ ) -> list[dict]:
83
+ """Extract memories from a session transcript or summary.
84
+
85
+ Returns list of memory dicts ready to pass to store.save().
86
+ """
87
+ if not transcript.strip():
88
+ return []
89
+
90
+ user_prompt = f"Session transcript/summary to extract memories from:\n\n{transcript}"
91
+
92
+ try:
93
+ result = await adapter.complete_structured(
94
+ system=EXTRACTION_SYSTEM,
95
+ user=user_prompt,
96
+ schema=EXTRACTION_SCHEMA,
97
+ )
98
+ except Exception as e:
99
+ print(f"[deja] Extraction LLM error: {e}", file=sys.stderr)
100
+ return []
101
+
102
+ memories = result.get("memories", [])
103
+ if not isinstance(memories, list):
104
+ return []
105
+
106
+ output = []
107
+ for mem in memories:
108
+ if not isinstance(mem, dict):
109
+ continue
110
+ if not mem.get("content") or not mem.get("type"):
111
+ continue
112
+
113
+ # Normalize scope: if scope is "project" but no project given, use the provided project
114
+ scope = mem.get("scope", "global")
115
+ mem_project = mem.get("project") or (project if scope == "project" else None)
116
+ if scope == "project" and mem_project:
117
+ scope_value = f"project:{mem_project}"
118
+ else:
119
+ scope_value = "global"
120
+ mem_project = None
121
+
122
+ output.append(
123
+ {
124
+ "type": mem["type"],
125
+ "category": mem.get("category", "agent"),
126
+ "content": mem["content"],
127
+ "scope": scope_value,
128
+ "project": mem_project,
129
+ "source": source,
130
+ "confidence": float(mem.get("confidence", 0.8)),
131
+ "domain": mem.get("domain"),
132
+ }
133
+ )
134
+
135
+ return output
@@ -0,0 +1,364 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import re
5
+ import sys
6
+ from datetime import datetime, timezone
7
+ from typing import Optional
8
+
9
+ from deja.config import ReflectionConfig
10
+ from deja.core.store import MemoryStore
11
+ from deja.llm.base import LLMAdapter
12
+
13
+ OBSERVER_SYSTEM = """You are a memory compressor for a software engineer's coding knowledge base.
14
+ Given these recent memories, extract the key observations worth preserving long-term.
15
+
16
+ Discard:
17
+ - Obvious facts derivable from the codebase
18
+ - Task-specific one-offs that won't recur
19
+ - Superseded progress updates (if newer progress exists)
20
+
21
+ Keep:
22
+ - Patterns and gotchas
23
+ - Architectural decisions and their reasoning
24
+ - User preferences and working style
25
+ - Ongoing work that is not yet complete
26
+ - Procedures (reusable step sequences) — keep steps thin
27
+
28
+ For procedure memories:
29
+ - Preserve only: numbered steps, tool hints, exit criteria
30
+ - Strip any inline gotchas or decisions — those belong in separate memories
31
+ - Merge near-duplicate procedures rather than keeping both
32
+
33
+ Output concise observations in plain text, one per line.
34
+ Each observation should be self-contained and useful without context.
35
+
36
+ After the observations, if any memories would benefit from metadata improvements, append a
37
+ MEMORY_UPDATES block with a JSON array of suggested changes. Only update what you are
38
+ confident about — omit the block entirely if no improvements are needed.
39
+
40
+ Rules for memory_updates:
41
+ - Add a trigger only if the memory is a gotcha clearly tied to a specific command or action
42
+ boundary (e.g. kubectl apply, alembic upgrade, terraform apply, git push --force).
43
+ Trigger phrases must be commands the agent would literally type, comma-separated.
44
+ - Change type only if clearly wrong (e.g. saved as pattern but describes a specific
45
+ command failure → gotcha). Be conservative.
46
+ - Never update preferences, decisions, or progress entries.
47
+ - If the memory already has a trigger shown in [trigger:...], skip it.
48
+
49
+ Format (append at the end, after all observations):
50
+
51
+ MEMORY_UPDATES:
52
+ [
53
+ {"id": "01JKB...", "trigger": "kubectl apply, helm upgrade"},
54
+ {"id": "01JKC...", "type": "gotcha", "trigger": "alembic upgrade"}
55
+ ]"""
56
+
57
+ REFLECTOR_SYSTEM = """Given this observation log, identify which observations are:
58
+ - Superseded by a newer observation about the same topic
59
+ - Redundant (same fact stated multiple ways)
60
+ - Resolved (a gotcha that was fixed, progress that completed)
61
+
62
+ Remove or merge superseded/redundant/resolved observations.
63
+ For merged observations, keep the most recent and accurate version.
64
+
65
+ Output the condensed observation log only. Plain text, one observation per line.
66
+ Do not add commentary. Do not add headers. Just the observations."""
67
+
68
+
69
+ def _now_iso() -> str:
70
+ return datetime.now(timezone.utc).isoformat()
71
+
72
+
73
+ def _parse_observer_response(text: str) -> tuple[str, list[dict]]:
74
+ """Split Observer LLM response into (observations_text, memory_updates).
75
+
76
+ The LLM appends a MEMORY_UPDATES: JSON block after the plain-text observations.
77
+ If the block is absent or malformed, returns the full text as observations and
78
+ an empty updates list — observations always take priority.
79
+ """
80
+ marker = "MEMORY_UPDATES:"
81
+ idx = text.find(marker)
82
+ if idx == -1:
83
+ return text, []
84
+
85
+ obs_part = text[:idx].strip()
86
+ json_part = text[idx + len(marker):].strip()
87
+
88
+ # Extract JSON array — tolerate surrounding whitespace or markdown fences
89
+ json_match = re.search(r"\[.*\]", json_part, re.DOTALL)
90
+ if not json_match:
91
+ return obs_part, []
92
+
93
+ try:
94
+ updates = json.loads(json_match.group())
95
+ if not isinstance(updates, list):
96
+ return obs_part, []
97
+ return obs_part, [u for u in updates if isinstance(u, dict) and "id" in u]
98
+ except json.JSONDecodeError:
99
+ return obs_part, []
100
+
101
+
102
+ def _format_memories_for_prompt(memories: list[dict]) -> str:
103
+ lines = []
104
+ for m in memories:
105
+ label = m.get("project") or "global"
106
+ trigger_str = f" [trigger:{m['trigger']}]" if m.get("trigger") else ""
107
+ lines.append(
108
+ f"[{m['type']}] [conf:{m['confidence']:.2f}] [{label}] [id:{m['id']}]{trigger_str}\n"
109
+ f"{m['content']}"
110
+ )
111
+ return "\n\n".join(lines)
112
+
113
+
114
+ def _format_observations_for_prompt(observations: list[dict]) -> str:
115
+ return "\n".join(o["content"] for o in observations)
116
+
117
+
118
+ class ReflectionEngine:
119
+ """Compresses, deduplicates, decays, and promotes memories over time.
120
+
121
+ Two reflection modes
122
+ --------------------
123
+ LLM mode — uses the configured ``reflection`` LLM (Ollama / Anthropic).
124
+ Triggered automatically by token-count thresholds or manually
125
+ via ``deja reflect``.
126
+
127
+ Agent mode — no extra LLM call. The active coding agent (Claude Code,
128
+ Codex, Gemini CLI) reads the output of ``agent_mode_prompt()``
129
+ and executes ``deja archive / invalidate / save`` commands
130
+ directly. Zero additional API cost — the agent is already
131
+ being billed for the session.
132
+ """
133
+
134
+ def __init__(
135
+ self,
136
+ store: MemoryStore,
137
+ config: ReflectionConfig,
138
+ adapter: Optional[LLMAdapter] = None,
139
+ ) -> None:
140
+ self.store = store
141
+ self.config = config
142
+ self.adapter = adapter
143
+
144
+ # ── LLM-driven compression ─────────────────────────────────────────────
145
+
146
+ async def run_observer(self, project: Optional[str] = None) -> int:
147
+ """Compress memories into observations via LLM. Returns observations created."""
148
+ if not self.adapter:
149
+ raise RuntimeError(
150
+ "No LLM adapter configured for reflection. "
151
+ "Set reflection.provider in ~/.deja/config.yaml or use --agent-mode."
152
+ )
153
+
154
+ meta = await self.store.get_reflection_meta(project)
155
+ last_run = meta.get("last_observer_at") if meta else None
156
+
157
+ memories = await self.store.list_for_reflection(project, since=last_run)
158
+ if not memories:
159
+ return 0
160
+
161
+ token_estimate = sum(len(m["content"].split()) * 2 for m in memories)
162
+ if token_estimate < 100:
163
+ return 0
164
+
165
+ user_prompt = (
166
+ "Recent memories to compress into observations:\n\n"
167
+ + _format_memories_for_prompt(memories)
168
+ )
169
+
170
+ try:
171
+ response = await self.adapter.complete(system=OBSERVER_SYSTEM, user=user_prompt)
172
+ observations_text = response.content.strip()
173
+ except Exception as e:
174
+ print(f"[deja] Observer LLM error: {e}", file=sys.stderr)
175
+ return 0
176
+
177
+ # Split on MEMORY_UPDATES: block — observations come first, JSON updates after
178
+ obs_part, memory_updates = _parse_observer_response(observations_text)
179
+
180
+ new_obs = [line.strip() for line in obs_part.splitlines() if line.strip()]
181
+ for obs_text in new_obs:
182
+ await self.store.save_observation(project, obs_text)
183
+
184
+ # Apply memory metadata updates suggested by the Observer
185
+ applied = 0
186
+ for update in memory_updates:
187
+ mem_id = update.get("id")
188
+ if not mem_id:
189
+ continue
190
+ fields = {k: v for k, v in update.items() if k in ("trigger", "type")}
191
+ if fields:
192
+ ok = await self.store.update_memory(mem_id, fields)
193
+ if ok:
194
+ applied += 1
195
+ if applied:
196
+ print(f"[deja] Observer applied {applied} memory metadata update(s).", file=sys.stderr)
197
+
198
+ await self.store.set_reflection_meta(project, last_observer_at=_now_iso())
199
+ return len(new_obs)
200
+
201
+ async def run_reflector(self, project: Optional[str] = None) -> int:
202
+ """Condense the observation log via LLM. Returns reduction in observation count."""
203
+ if not self.adapter:
204
+ raise RuntimeError(
205
+ "No LLM adapter configured for reflection. "
206
+ "Set reflection.provider in ~/.deja/config.yaml or use --agent-mode."
207
+ )
208
+
209
+ observations = await self.store.list_observations(project)
210
+ if len(observations) < 3:
211
+ return 0
212
+
213
+ user_prompt = (
214
+ "Observation log to condense:\n\n"
215
+ + _format_observations_for_prompt(observations)
216
+ )
217
+
218
+ try:
219
+ response = await self.adapter.complete(system=REFLECTOR_SYSTEM, user=user_prompt)
220
+ condensed_text = response.content.strip()
221
+ except Exception as e:
222
+ print(f"[deja] Reflector LLM error: {e}", file=sys.stderr)
223
+ return 0
224
+
225
+ condensed = [line.strip() for line in condensed_text.splitlines() if line.strip()]
226
+ original_count = len(observations)
227
+ await self.store.replace_observations(project, condensed)
228
+
229
+ # Surviving compression is a confirmation signal — increment reuse_count
230
+ # for all active memories. We can't map observations back to specific
231
+ # memories, so we treat all active memories as "surviving" this pass.
232
+ await self.store.increment_reuse_count(project)
233
+
234
+ await self.store.set_reflection_meta(project, last_reflector_at=_now_iso())
235
+ return original_count - len(condensed)
236
+
237
+ # ── No-LLM maintenance passes ──────────────────────────────────────────
238
+
239
+ async def run_decay(self) -> int:
240
+ """Reduce confidence on memories not confirmed recently.
241
+
242
+ Two rates are applied:
243
+ - category='agent': config.confidence_decay_per_week (default 0.05/week)
244
+ Operational knowledge (gotchas, decisions, progress) goes stale.
245
+ - category='user': config.user_confidence_decay_per_week (default 0.01/week)
246
+ Preferences and habits are stable; they decay ~5x slower.
247
+ """
248
+ count = await self.store.decay_unconfirmed(
249
+ days_threshold=14,
250
+ decay_per_week=self.config.confidence_decay_per_week,
251
+ user_decay_per_week=self.config.user_confidence_decay_per_week,
252
+ )
253
+ await self.store.set_reflection_meta(None, last_decay_at=_now_iso())
254
+ return count
255
+
256
+ async def run_promote(self) -> int:
257
+ """Promote patterns seen in 2+ projects to global scope."""
258
+ count = await self.store.promote_patterns_to_global(
259
+ self.config.min_project_pattern_count
260
+ )
261
+ await self.store.set_reflection_meta(None, last_promote_at=_now_iso())
262
+ return count
263
+
264
+ async def run_archive(self) -> int:
265
+ """Archive memories below confidence threshold."""
266
+ count = await self.store.archive_below_threshold(
267
+ self.config.confidence_archive_threshold
268
+ )
269
+ await self.store.set_reflection_meta(None, last_archive_at=_now_iso())
270
+ return count
271
+
272
+ # ── Agent mode ─────────────────────────────────────────────────────────
273
+
274
+ async def agent_mode_prompt(self, project: Optional[str] = None) -> str:
275
+ """Return a formatted memory dump with instructions for the coding agent to reflect.
276
+
277
+ The agent (Claude Code, Codex, Gemini CLI) reads this output and executes
278
+ deja commands directly — no separate LLM API call needed.
279
+ """
280
+ memories = await self.store.list_for_reflection(project)
281
+ project_label = project or "global"
282
+ project_flag = f" --project {project}" if project else ""
283
+
284
+ if not memories:
285
+ return f"No active memories found for project '{project_label}'. Nothing to reflect on."
286
+
287
+ lines = [
288
+ f"You are acting as a memory reflector for project '{project_label}'.",
289
+ "",
290
+ f"Review the {len(memories)} active memories below and identify any that should be:",
291
+ f" 1. Archived (stale, no longer relevant):",
292
+ f" deja archive <id>",
293
+ f" 2. Invalidated (contradicted by newer information):",
294
+ f" deja invalidate <id>",
295
+ f" 3. Consolidated (two memories express the same thing):",
296
+ f" deja archive <id1>",
297
+ f" deja archive <id2>",
298
+ f' deja save "<condensed content>" --type <type>{project_flag}',
299
+ f" 4. Trigger-tagged (gotcha clearly tied to a specific command but has no trigger):",
300
+ f' deja update <id> --trigger "cmd1, cmd2"',
301
+ f" Use this for gotchas about what to do right before/after a specific command.",
302
+ f" Example triggers: 'kubectl apply', 'alembic upgrade', 'terraform apply'.",
303
+ f" Only tag gotchas — not preferences, decisions, or progress.",
304
+ f" 5. Reclassified (saved as the wrong type — e.g. pattern that is really a gotcha):",
305
+ f" deja update <id> --type gotcha",
306
+ "",
307
+ "Be conservative — only act on memories that clearly need attention.",
308
+ "For trigger tagging: if a gotcha is already tagged (shown as [trigger:...]), skip it.",
309
+ "If everything looks good, do nothing.",
310
+ "",
311
+ "--- MEMORIES ---",
312
+ "",
313
+ ]
314
+
315
+ for m in memories:
316
+ scope_label = f"project:{m['project']}" if m.get("project") else "global"
317
+ trigger_str = f" [trigger:{m['trigger']}]" if m.get("trigger") else ""
318
+ lines.append(
319
+ f"[{m['type']}] [conf:{m['confidence']:.2f}] [scope:{scope_label}]"
320
+ f" [ID:{m['id']}]{trigger_str}"
321
+ )
322
+ lines.append(m["content"])
323
+ lines.append("")
324
+
325
+ lines.append("--- END MEMORIES ---")
326
+ return "\n".join(lines)
327
+
328
+ # ── Full pass + auto-trigger ────────────────────────────────────────────
329
+
330
+ async def run_full(self, project: Optional[str] = None) -> dict:
331
+ """Full reflection pass: observer → reflector → decay → promote → archive."""
332
+ results: dict = {}
333
+ if self.adapter:
334
+ results["observer"] = await self.run_observer(project)
335
+ results["reflector"] = await self.run_reflector(project)
336
+ else:
337
+ results["observer"] = 0
338
+ results["reflector"] = 0
339
+ results["decay"] = await self.run_decay()
340
+ results["promote"] = await self.run_promote()
341
+ results["archive"] = await self.run_archive()
342
+ return results
343
+
344
+ async def check_and_trigger(self, project: Optional[str] = None) -> None:
345
+ """Check token thresholds and auto-trigger observer/reflector if exceeded."""
346
+ if not self.adapter:
347
+ return
348
+
349
+ meta = await self.store.get_reflection_meta(project)
350
+ last_observer_at = meta.get("last_observer_at") if meta else None
351
+
352
+ memories = await self.store.list_for_reflection(project, since=last_observer_at)
353
+ token_count = sum(len(m["content"].split()) * 2 for m in memories)
354
+
355
+ if token_count >= self.config.observer_trigger_tokens:
356
+ n = await self.run_observer(project)
357
+ print(f"[deja] Auto-observer triggered: {n} observations created.", file=sys.stderr)
358
+
359
+ observations = await self.store.list_observations(project)
360
+ obs_tokens = sum(len(o["content"].split()) * 2 for o in observations)
361
+
362
+ if obs_tokens >= self.config.reflector_trigger_tokens:
363
+ n = await self.run_reflector(project)
364
+ print(f"[deja] Auto-reflector triggered: {n} observations reduced.", file=sys.stderr)