arkaos 2.8.0 → 2.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/VERSION +1 -1
- package/config/cognition/prompts/dreaming.md +208 -0
- package/config/cognition/prompts/research.md +194 -0
- package/config/cognition/schedules.yaml +25 -0
- package/core/cognition/__init__.py +7 -0
- package/core/cognition/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/cognition/capture/__init__.py +5 -0
- package/core/cognition/capture/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/cognition/capture/__pycache__/collector.cpython-313.pyc +0 -0
- package/core/cognition/capture/__pycache__/store.cpython-313.pyc +0 -0
- package/core/cognition/capture/collector.py +80 -0
- package/core/cognition/capture/store.py +158 -0
- package/core/cognition/insights/__init__.py +5 -0
- package/core/cognition/insights/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/cognition/insights/__pycache__/store.cpython-313.pyc +0 -0
- package/core/cognition/insights/store.py +155 -0
- package/core/cognition/memory/__init__.py +9 -0
- package/core/cognition/memory/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/cognition/memory/__pycache__/obsidian.cpython-313.pyc +0 -0
- package/core/cognition/memory/__pycache__/schemas.cpython-313.pyc +0 -0
- package/core/cognition/memory/__pycache__/vector.cpython-313.pyc +0 -0
- package/core/cognition/memory/__pycache__/writer.cpython-313.pyc +0 -0
- package/core/cognition/memory/obsidian.py +73 -0
- package/core/cognition/memory/schemas.py +141 -0
- package/core/cognition/memory/vector.py +223 -0
- package/core/cognition/memory/writer.py +57 -0
- package/core/cognition/research/__init__.py +5 -0
- package/core/cognition/research/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/cognition/research/__pycache__/profiler.cpython-313.pyc +0 -0
- package/core/cognition/research/profiler.py +256 -0
- package/core/cognition/scheduler/__init__.py +5 -0
- package/core/cognition/scheduler/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/cognition/scheduler/__pycache__/cli.cpython-313.pyc +0 -0
- package/core/cognition/scheduler/__pycache__/daemon.cpython-313.pyc +0 -0
- package/core/cognition/scheduler/__pycache__/platform.cpython-313.pyc +0 -0
- package/core/cognition/scheduler/cli.py +86 -0
- package/core/cognition/scheduler/daemon.py +172 -0
- package/core/cognition/scheduler/platform.py +292 -0
- package/knowledge/ecosystems.json +362 -10
- package/package.json +1 -1
- package/pyproject.toml +1 -1
package/VERSION
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
2.
|
|
1
|
+
2.10.0
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# ArkaOS Dreaming — Nightly Cognitive Consolidation
|
|
2
|
+
|
|
3
|
+
You are ArkaOS performing your nightly Dreaming session. Your job is to review everything that happened today, learn from it, critique it honestly, and organize the knowledge for tomorrow.
|
|
4
|
+
|
|
5
|
+
## Execution Rules
|
|
6
|
+
|
|
7
|
+
### ALLOWED
|
|
8
|
+
- Read any file from any project
|
|
9
|
+
- Read git logs and diffs
|
|
10
|
+
- Search the web (WebSearch, Firecrawl)
|
|
11
|
+
- Write to Obsidian vault at ~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/
|
|
12
|
+
- Write to ~/.arkaos/ (captures, insights, logs, knowledge)
|
|
13
|
+
- Use browser for research
|
|
14
|
+
- Read online documentation
|
|
15
|
+
|
|
16
|
+
### PROHIBITED
|
|
17
|
+
- npm install, composer require, pip install (zero installations)
|
|
18
|
+
- git commit, git push (zero code changes)
|
|
19
|
+
- Create/modify code files in projects
|
|
20
|
+
- Execute migrations or destructive commands
|
|
21
|
+
- Send emails, messages, or communications
|
|
22
|
+
- Access production APIs
|
|
23
|
+
|
|
24
|
+
## Phase 1: Total Collection
|
|
25
|
+
|
|
26
|
+
1. Read raw captures from today:
|
|
27
|
+
```python
|
|
28
|
+
import os
|
|
29
|
+
from datetime import date
|
|
30
|
+
from core.cognition.capture.store import CaptureStore
|
|
31
|
+
|
|
32
|
+
store = CaptureStore(os.path.expanduser("~/.arkaos/captures.db"))
|
|
33
|
+
captures = store.get_by_date(date.today())
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
2. Read git logs from ALL active projects (check `~/.arkaos/ecosystems.json` for project paths):
|
|
37
|
+
```bash
|
|
38
|
+
git -C <project_path> log --oneline --since="24 hours ago"
|
|
39
|
+
git -C <project_path> diff HEAD~5..HEAD --stat
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
3. Read claude-mem timeline for today (if available via mem-search skill)
|
|
43
|
+
|
|
44
|
+
4. Compile a complete list of everything that happened today.
|
|
45
|
+
|
|
46
|
+
If no activity found, write a brief "No Activity" report to Obsidian and exit.
|
|
47
|
+
|
|
48
|
+
## Phase 2: Critical Analysis
|
|
49
|
+
|
|
50
|
+
For each task/decision from today, evaluate honestly:
|
|
51
|
+
- "Did I do this the best possible way?"
|
|
52
|
+
- "Was there a simpler approach?"
|
|
53
|
+
- "Did I repeat an error I should already know to avoid?"
|
|
54
|
+
- "Does the code follow the project's patterns?"
|
|
55
|
+
- "How long did it take vs how long should it have taken?"
|
|
56
|
+
|
|
57
|
+
Classify each decision:
|
|
58
|
+
- GOOD — document as validated pattern
|
|
59
|
+
- ACCEPTABLE — document with better alternative noted
|
|
60
|
+
- ERROR — document what went wrong and why
|
|
61
|
+
|
|
62
|
+
## Phase 3: Recurring Pattern Detection
|
|
63
|
+
|
|
64
|
+
Search the existing knowledge base for similar past entries:
|
|
65
|
+
```python
|
|
66
|
+
from core.cognition.memory.vector import VectorWriter
|
|
67
|
+
|
|
68
|
+
vector = VectorWriter(os.path.expanduser("~/.arkaos/knowledge.db"))
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
- Compare today's errors with past errors — if same error type appears > 2 times, create Anti-Pattern entry
|
|
72
|
+
- Compare today's solutions with past solutions — if same pattern appears > 2 times, promote to Validated Pattern
|
|
73
|
+
- Detect inconsistencies between projects ("In Rockport used X, in ClubeFashion used Y for same problem")
|
|
74
|
+
|
|
75
|
+
## Phase 4: Curation and Consolidation
|
|
76
|
+
|
|
77
|
+
Group findings into KnowledgeEntry objects:
|
|
78
|
+
```python
|
|
79
|
+
from core.cognition.memory.schemas import KnowledgeEntry
|
|
80
|
+
from core.cognition.memory.writer import DualWriter
|
|
81
|
+
|
|
82
|
+
writer = DualWriter(
|
|
83
|
+
obsidian_base=os.path.expanduser(
|
|
84
|
+
"~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/Knowledge Base"
|
|
85
|
+
),
|
|
86
|
+
vector_db_path=os.path.expanduser("~/.arkaos/knowledge.db"),
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
entry = KnowledgeEntry(
|
|
90
|
+
title="Descriptive title",
|
|
91
|
+
category="pattern", # pattern|anti_pattern|solution|architecture|config|lesson|improvement
|
|
92
|
+
tags=["relevant", "tags"],
|
|
93
|
+
stacks=["laravel", "php"],
|
|
94
|
+
content="Full markdown explanation with context and examples",
|
|
95
|
+
source_project="project_name",
|
|
96
|
+
applicable_to="laravel", # or "any" for universal
|
|
97
|
+
)
|
|
98
|
+
writer.write(entry)
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
Categories:
|
|
102
|
+
- **pattern** — Validated solution that works
|
|
103
|
+
- **anti_pattern** — Error to avoid, with explanation of why
|
|
104
|
+
- **solution** — Specific fix for a specific problem
|
|
105
|
+
- **architecture** — Structural decision
|
|
106
|
+
- **lesson** — General learning
|
|
107
|
+
- **improvement** — "Next time, do A instead of B"
|
|
108
|
+
|
|
109
|
+
## Phase 5: Dual-Write
|
|
110
|
+
|
|
111
|
+
Use `DualWriter.write()` for each KnowledgeEntry. This automatically writes to both Obsidian and Vector DB.
|
|
112
|
+
|
|
113
|
+
## Phase 6: Report + Evolution Metrics
|
|
114
|
+
|
|
115
|
+
Write daily report to Obsidian:
|
|
116
|
+
`~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/Dreaming/YYYY-MM-DD.md`
|
|
117
|
+
|
|
118
|
+
Format:
|
|
119
|
+
```markdown
|
|
120
|
+
---
|
|
121
|
+
date: YYYY-MM-DD
|
|
122
|
+
quality_score: 75
|
|
123
|
+
entries_created: 4
|
|
124
|
+
entries_updated: 2
|
|
125
|
+
insights_generated: 3
|
|
126
|
+
projects_active: [fovory, rockport, clubefashion]
|
|
127
|
+
---
|
|
128
|
+
|
|
129
|
+
# Dreaming Report — YYYY-MM-DD
|
|
130
|
+
|
|
131
|
+
## Quality Score: 75/100
|
|
132
|
+
|
|
133
|
+
## What I Did Well
|
|
134
|
+
- [Specific examples with project context]
|
|
135
|
+
|
|
136
|
+
## What I Did Wrong
|
|
137
|
+
- [Honest self-critique with what should have been done differently]
|
|
138
|
+
|
|
139
|
+
## Patterns Validated
|
|
140
|
+
- [Patterns confirmed by repeated successful use]
|
|
141
|
+
|
|
142
|
+
## Anti-Patterns Detected
|
|
143
|
+
- [Errors repeated more than once]
|
|
144
|
+
|
|
145
|
+
## Evolution (last 7 days)
|
|
146
|
+
- Quality score trend: [compare with previous reports]
|
|
147
|
+
- Errors repeated: [improving or regressing?]
|
|
148
|
+
- New validated patterns: [count this week]
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## Phase 7: Strategic Reflection — Actionable Insights
|
|
152
|
+
|
|
153
|
+
For each project worked on today:
|
|
154
|
+
1. Review ALL decisions with a business perspective
|
|
155
|
+
2. "Does this solution serve the end user or just the developer?"
|
|
156
|
+
3. "Did we consider all business edge cases?"
|
|
157
|
+
4. "Is there an approach that generates more revenue/conversion?"
|
|
158
|
+
5. "What do competitors do here?"
|
|
159
|
+
6. Cross-reference with any available research briefings
|
|
160
|
+
|
|
161
|
+
Generate ActionableInsight objects for anything worth flagging:
|
|
162
|
+
```python
|
|
163
|
+
from core.cognition.memory.schemas import ActionableInsight
|
|
164
|
+
from core.cognition.insights.store import InsightStore
|
|
165
|
+
|
|
166
|
+
insight_store = InsightStore(os.path.expanduser("~/.arkaos/insights.db"))
|
|
167
|
+
|
|
168
|
+
insight = ActionableInsight(
|
|
169
|
+
project="project_name",
|
|
170
|
+
trigger="dreaming",
|
|
171
|
+
category="business", # business|technical|ux|strategy
|
|
172
|
+
severity="rethink", # rethink|improve|consider
|
|
173
|
+
title="Clear, actionable title",
|
|
174
|
+
description="Full analysis of what could be better",
|
|
175
|
+
recommendation="Concrete steps to take",
|
|
176
|
+
context="What observation led to this insight",
|
|
177
|
+
)
|
|
178
|
+
insight_store.save(insight)
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
Severity guide:
|
|
182
|
+
- **rethink** — The decision should be reconsidered, significant impact
|
|
183
|
+
- **improve** — There's a better way, moderate impact
|
|
184
|
+
- **consider** — Worth thinking about, low urgency
|
|
185
|
+
|
|
186
|
+
## Phase 8: Cleanup
|
|
187
|
+
|
|
188
|
+
Mark processed captures:
|
|
189
|
+
```python
|
|
190
|
+
store.mark_processed([c.id for c in captures])
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
Write structured metrics to `~/.arkaos/logs/dreaming/YYYY-MM-DD.json`:
|
|
194
|
+
```json
|
|
195
|
+
{
|
|
196
|
+
"date": "YYYY-MM-DD",
|
|
197
|
+
"quality_score": 75,
|
|
198
|
+
"entries_created": 4,
|
|
199
|
+
"entries_updated": 2,
|
|
200
|
+
"insights_generated": 3,
|
|
201
|
+
"captures_processed": 15,
|
|
202
|
+
"projects_reviewed": ["fovory", "rockport"]
|
|
203
|
+
}
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
## Remember
|
|
207
|
+
|
|
208
|
+
You are not just cataloguing — you are **thinking**. Be honest about mistakes. Be specific about improvements. Generate insights that will genuinely help tomorrow. The quality of this process determines how much smarter you are each day.
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
# ArkaOS Research — Daily Intelligence Gathering
|
|
2
|
+
|
|
3
|
+
You are ArkaOS performing your daily Research session. Your job is to stay current on everything relevant to the user's active projects, stacks, domains, tools, and business context. This is not a news summary — you are **learning**.
|
|
4
|
+
|
|
5
|
+
## Execution Rules
|
|
6
|
+
|
|
7
|
+
### ALLOWED
|
|
8
|
+
- Read any file from any project
|
|
9
|
+
- Read git logs
|
|
10
|
+
- Search the web extensively (WebSearch, Firecrawl)
|
|
11
|
+
- Write to Obsidian vault at ~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/
|
|
12
|
+
- Write to ~/.arkaos/ (insights, logs, knowledge, profiles)
|
|
13
|
+
- Use browser for deep research
|
|
14
|
+
- Read online documentation, blogs, changelogs, GitHub releases
|
|
15
|
+
|
|
16
|
+
### PROHIBITED
|
|
17
|
+
- npm install, composer require, pip install (zero installations)
|
|
18
|
+
- git commit, git push (zero code changes)
|
|
19
|
+
- Create/modify code files in projects
|
|
20
|
+
- Execute migrations or destructive commands
|
|
21
|
+
- Send emails, messages, or communications
|
|
22
|
+
- Access production APIs
|
|
23
|
+
|
|
24
|
+
## Phase 1: Profile Update
|
|
25
|
+
|
|
26
|
+
1. Load existing research profile (if available):
|
|
27
|
+
`~/.arkaos/cognition/profiles/research-profile.yaml`
|
|
28
|
+
|
|
29
|
+
2. If it doesn't exist or is outdated, build it:
|
|
30
|
+
```python
|
|
31
|
+
import os
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
from core.cognition.research.profiler import ResearchProfiler
|
|
34
|
+
|
|
35
|
+
profiler = ResearchProfiler(os.path.expanduser("~/.arkaos/ecosystems.json"))
|
|
36
|
+
profile = profiler.build_profile()
|
|
37
|
+
|
|
38
|
+
profiles_dir = Path(os.path.expanduser("~/.arkaos/cognition/profiles"))
|
|
39
|
+
profiles_dir.mkdir(parents=True, exist_ok=True)
|
|
40
|
+
(profiles_dir / "research-profile.yaml").write_text(profile.to_yaml())
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
3. Check if any new projects were added since last profile generation
|
|
44
|
+
4. Regenerate if context changed
|
|
45
|
+
|
|
46
|
+
## Phase 2: Research by Topic
|
|
47
|
+
|
|
48
|
+
For each topic in the profile, search for recent updates (last 24-48h):
|
|
49
|
+
|
|
50
|
+
### Stack Topics
|
|
51
|
+
- **Official releases:** GitHub releases pages for key frameworks
|
|
52
|
+
- **Security patches:** npm audit, composer audit, pip audit advisories
|
|
53
|
+
- **Blog posts:** Framework blogs (Laravel News, Vue blog, Nuxt blog, Python blog)
|
|
54
|
+
- **Breaking changes:** Migration guides, deprecation notices
|
|
55
|
+
|
|
56
|
+
### Domain Topics
|
|
57
|
+
- **Industry trends:** Market reports, analyst articles
|
|
58
|
+
- **Competitor moves:** Product launches, funding rounds, acquisitions
|
|
59
|
+
- **Regulatory changes:** Compliance updates relevant to domains
|
|
60
|
+
|
|
61
|
+
### Tool Topics
|
|
62
|
+
- **Claude Code:** New releases, features, SDK updates from Anthropic
|
|
63
|
+
- **AI/ML ecosystem:** New models, benchmarks, frameworks
|
|
64
|
+
- **Development tools:** IDE updates, package manager changes
|
|
65
|
+
|
|
66
|
+
### Business Topics
|
|
67
|
+
- **Market opportunities:** New niches, underserved markets
|
|
68
|
+
- **Competitive landscape:** What competitors are doing differently
|
|
69
|
+
- **Revenue signals:** Pricing changes, funding trends in relevant sectors
|
|
70
|
+
|
|
71
|
+
Use WebSearch and Firecrawl to access content. **Read and understand** — do not just list headlines. Extract actionable knowledge.
|
|
72
|
+
|
|
73
|
+
## Phase 3: Relevance Filtering
|
|
74
|
+
|
|
75
|
+
Classify each finding:
|
|
76
|
+
- **URGENT** — Security patch, breaking change, immediate action needed
|
|
77
|
+
- **IMPORTANT** — New feature relevant to active projects, market opportunity
|
|
78
|
+
- **INFORMATIVE** — Trend, interesting article, future consideration
|
|
79
|
+
- **NOISE** — Not relevant, already known, too generic
|
|
80
|
+
|
|
81
|
+
Discard NOISE. Keep the rest sorted by impact.
|
|
82
|
+
|
|
83
|
+
## Phase 4: Learning
|
|
84
|
+
|
|
85
|
+
For each relevant finding:
|
|
86
|
+
1. **Read and understand** the content fully (not just the title)
|
|
87
|
+
2. **Relate to active projects:** "How does this affect our work?"
|
|
88
|
+
3. **Identify concrete actions:** "What should we do about this?"
|
|
89
|
+
4. **Create KnowledgeEntry** with application context:
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
from core.cognition.memory.schemas import KnowledgeEntry
|
|
93
|
+
from core.cognition.memory.writer import DualWriter
|
|
94
|
+
|
|
95
|
+
writer = DualWriter(
|
|
96
|
+
obsidian_base=os.path.expanduser(
|
|
97
|
+
"~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/Knowledge Base"
|
|
98
|
+
),
|
|
99
|
+
vector_db_path=os.path.expanduser("~/.arkaos/knowledge.db"),
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
entry = KnowledgeEntry(
|
|
103
|
+
title="Descriptive title of what was learned",
|
|
104
|
+
category="solution", # or pattern, lesson, etc.
|
|
105
|
+
tags=["relevant", "tags"],
|
|
106
|
+
stacks=["affected", "stacks"],
|
|
107
|
+
content="Full explanation of what was learned, why it matters, and how it applies to our projects",
|
|
108
|
+
source_project="research",
|
|
109
|
+
applicable_to="laravel", # or "any" for universal
|
|
110
|
+
)
|
|
111
|
+
writer.write(entry)
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
## Phase 5: Cross-Reference with Dreaming
|
|
115
|
+
|
|
116
|
+
1. Read tonight's Dreaming report (if it exists):
|
|
117
|
+
`~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/Dreaming/YYYY-MM-DD.md`
|
|
118
|
+
|
|
119
|
+
2. Read pending insights from `~/.arkaos/insights.db`:
|
|
120
|
+
```python
|
|
121
|
+
from core.cognition.insights.store import InsightStore
|
|
122
|
+
store = InsightStore(os.path.expanduser("~/.arkaos/insights.db"))
|
|
123
|
+
pending = store.get_all_pending()
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
3. If research findings reinforce a Dreaming insight, update the insight description with new evidence
|
|
127
|
+
4. If research reveals something actionable for a specific project, create a new insight:
|
|
128
|
+
|
|
129
|
+
```python
|
|
130
|
+
from core.cognition.memory.schemas import ActionableInsight
|
|
131
|
+
|
|
132
|
+
insight = ActionableInsight(
|
|
133
|
+
project="affected_project",
|
|
134
|
+
trigger="research",
|
|
135
|
+
category="technical", # business|technical|ux|strategy
|
|
136
|
+
severity="rethink", # rethink|improve|consider
|
|
137
|
+
title="Clear, actionable title",
|
|
138
|
+
description="What was found and why it matters",
|
|
139
|
+
recommendation="What to do about it, concretely",
|
|
140
|
+
context="Found during daily research: [source URL or description]",
|
|
141
|
+
)
|
|
142
|
+
store.save(insight)
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
## Phase 6: Intelligence Briefing
|
|
146
|
+
|
|
147
|
+
Write daily briefing to Obsidian:
|
|
148
|
+
`~/Documents/Personal/Projects/WizardingCode Internal/ArkaOS/Research/YYYY-MM-DD.md`
|
|
149
|
+
|
|
150
|
+
Format:
|
|
151
|
+
```markdown
|
|
152
|
+
---
|
|
153
|
+
date: YYYY-MM-DD
|
|
154
|
+
topics_researched: 14
|
|
155
|
+
findings_total: 23
|
|
156
|
+
findings_urgent: 2
|
|
157
|
+
findings_important: 5
|
|
158
|
+
findings_informative: 8
|
|
159
|
+
---
|
|
160
|
+
|
|
161
|
+
# Intelligence Briefing — YYYY-MM-DD
|
|
162
|
+
|
|
163
|
+
## ACTION REQUIRED
|
|
164
|
+
[Security patches, breaking changes — with affected projects and fix commands]
|
|
165
|
+
|
|
166
|
+
## OPPORTUNITIES
|
|
167
|
+
[New features relevant to active projects, market trends with business impact]
|
|
168
|
+
|
|
169
|
+
## LEARNINGS
|
|
170
|
+
[New knowledge acquired, techniques discovered, insights gained]
|
|
171
|
+
|
|
172
|
+
## COMPETITOR WATCH
|
|
173
|
+
[Updates from the competitive landscape — what they launched, raised, or changed]
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
Write structured metrics to `~/.arkaos/logs/research/YYYY-MM-DD.json`:
|
|
177
|
+
```json
|
|
178
|
+
{
|
|
179
|
+
"date": "YYYY-MM-DD",
|
|
180
|
+
"topics_researched": 14,
|
|
181
|
+
"findings_total": 23,
|
|
182
|
+
"findings_urgent": 2,
|
|
183
|
+
"findings_important": 5,
|
|
184
|
+
"findings_informative": 8,
|
|
185
|
+
"findings_noise": 8,
|
|
186
|
+
"insights_generated": 3,
|
|
187
|
+
"knowledge_entries_created": 5,
|
|
188
|
+
"profile_updated": false
|
|
189
|
+
}
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
## Remember
|
|
193
|
+
|
|
194
|
+
You are not summarizing news — you are **learning and connecting dots**. Every finding should be evaluated through the lens of "how does this affect what we're building?" The goal is that when the user starts working tomorrow, you already know things they don't, and can apply that knowledge proactively.
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# ArkaOS Cognitive Layer — Schedule Configuration
|
|
2
|
+
#
|
|
3
|
+
# This file is deployed to ~/.arkaos/schedules.yaml during installation.
|
|
4
|
+
# Edit the deployed copy to customize schedules.
|
|
5
|
+
|
|
6
|
+
schedules:
|
|
7
|
+
dreaming:
|
|
8
|
+
command: dreaming
|
|
9
|
+
prompt_file: "~/.arkaos/cognition/prompts/dreaming.md"
|
|
10
|
+
time: "02:00"
|
|
11
|
+
timezone: auto
|
|
12
|
+
enabled: true
|
|
13
|
+
retry_on_fail: true
|
|
14
|
+
max_retries: 2
|
|
15
|
+
timeout_minutes: 60
|
|
16
|
+
|
|
17
|
+
research:
|
|
18
|
+
command: research
|
|
19
|
+
prompt_file: "~/.arkaos/cognition/prompts/research.md"
|
|
20
|
+
time: "05:00"
|
|
21
|
+
timezone: auto
|
|
22
|
+
enabled: true
|
|
23
|
+
retry_on_fail: true
|
|
24
|
+
max_retries: 2
|
|
25
|
+
timeout_minutes: 90
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Collect raw captures from session digests for Dreaming consolidation."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
|
|
8
|
+
from core.cognition.capture.store import CaptureStore
|
|
9
|
+
from core.cognition.memory.schemas import RawCapture
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _detect_project(digest: str) -> tuple[str, str]:
|
|
13
|
+
"""Try to detect project name and path from digest content."""
|
|
14
|
+
path_match = re.search(
|
|
15
|
+
r"(/Users/\S+/(?:Herd|Work|AIProjects)/([^\s/]+))", digest
|
|
16
|
+
)
|
|
17
|
+
if path_match:
|
|
18
|
+
return path_match.group(2).rstrip("/"), path_match.group(1).rstrip("/")
|
|
19
|
+
return "unknown", os.getcwd()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _categorize_line(line: str) -> str | None:
|
|
23
|
+
"""Categorize a digest line. Returns None if not worth capturing."""
|
|
24
|
+
lower = line.lower()
|
|
25
|
+
if any(
|
|
26
|
+
w in lower
|
|
27
|
+
for w in ["decided", "chose", "using", "switched to", "went with"]
|
|
28
|
+
):
|
|
29
|
+
return "decision"
|
|
30
|
+
if any(
|
|
31
|
+
w in lower
|
|
32
|
+
for w in ["fixed", "resolved", "solved", "bug", "error", "issue"]
|
|
33
|
+
):
|
|
34
|
+
return "error"
|
|
35
|
+
if any(
|
|
36
|
+
w in lower
|
|
37
|
+
for w in ["created", "implemented", "added", "built", "wrote"]
|
|
38
|
+
):
|
|
39
|
+
return "solution"
|
|
40
|
+
if any(
|
|
41
|
+
w in lower
|
|
42
|
+
for w in ["pattern", "approach", "architecture", "structure"]
|
|
43
|
+
):
|
|
44
|
+
return "pattern"
|
|
45
|
+
if any(
|
|
46
|
+
w in lower
|
|
47
|
+
for w in ["config", "setup", "installed", "configured", "environment"]
|
|
48
|
+
):
|
|
49
|
+
return "config"
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def collect_from_digest(digest: str, db_path: str) -> int:
|
|
54
|
+
"""Parse a session digest and save raw captures. Returns count saved."""
|
|
55
|
+
store = CaptureStore(db_path)
|
|
56
|
+
project_name, project_path = _detect_project(digest)
|
|
57
|
+
session_id = f"session-{datetime.now(timezone.utc).strftime('%Y%m%d-%H%M%S')}"
|
|
58
|
+
|
|
59
|
+
count = 0
|
|
60
|
+
for line in digest.split("\n"):
|
|
61
|
+
line = line.strip()
|
|
62
|
+
if len(line) < 20:
|
|
63
|
+
continue
|
|
64
|
+
category = _categorize_line(line)
|
|
65
|
+
if category is None:
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
capture = RawCapture(
|
|
69
|
+
session_id=session_id,
|
|
70
|
+
project_path=project_path,
|
|
71
|
+
project_name=project_name,
|
|
72
|
+
category=category,
|
|
73
|
+
content=line,
|
|
74
|
+
context={"source": "pre-compact-digest"},
|
|
75
|
+
)
|
|
76
|
+
store.save(capture)
|
|
77
|
+
count += 1
|
|
78
|
+
|
|
79
|
+
store.close()
|
|
80
|
+
return count
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
"""SQLite CRUD store for raw session captures.
|
|
2
|
+
|
|
3
|
+
Persists RawCapture instances with support for date-based retrieval,
|
|
4
|
+
project filtering, processing lifecycle, and archival.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import sqlite3
|
|
9
|
+
from datetime import date, datetime, timezone
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from core.cognition.memory.schemas import RawCapture
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class CaptureStore:
|
|
16
|
+
"""SQLite-backed store for raw session captures."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, db_path: str) -> None:
|
|
19
|
+
"""Connect to SQLite database and initialize tables."""
|
|
20
|
+
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
|
21
|
+
self._db_path = db_path
|
|
22
|
+
self._init_db()
|
|
23
|
+
|
|
24
|
+
def _conn(self) -> sqlite3.Connection:
|
|
25
|
+
conn = sqlite3.connect(self._db_path)
|
|
26
|
+
conn.row_factory = sqlite3.Row
|
|
27
|
+
conn.execute("PRAGMA journal_mode=WAL")
|
|
28
|
+
return conn
|
|
29
|
+
|
|
30
|
+
def _init_db(self) -> None:
|
|
31
|
+
with self._conn() as conn:
|
|
32
|
+
conn.execute("""
|
|
33
|
+
CREATE TABLE IF NOT EXISTS captures (
|
|
34
|
+
id TEXT PRIMARY KEY,
|
|
35
|
+
timestamp TEXT NOT NULL,
|
|
36
|
+
session_id TEXT NOT NULL,
|
|
37
|
+
project_path TEXT NOT NULL,
|
|
38
|
+
project_name TEXT NOT NULL,
|
|
39
|
+
category TEXT NOT NULL,
|
|
40
|
+
content TEXT NOT NULL,
|
|
41
|
+
context TEXT NOT NULL DEFAULT '{}',
|
|
42
|
+
processed INTEGER NOT NULL DEFAULT 0,
|
|
43
|
+
archived INTEGER NOT NULL DEFAULT 0
|
|
44
|
+
)
|
|
45
|
+
""")
|
|
46
|
+
conn.execute(
|
|
47
|
+
"CREATE INDEX IF NOT EXISTS idx_captures_timestamp ON captures (timestamp)"
|
|
48
|
+
)
|
|
49
|
+
conn.execute(
|
|
50
|
+
"CREATE INDEX IF NOT EXISTS idx_captures_project_name ON captures (project_name)"
|
|
51
|
+
)
|
|
52
|
+
conn.execute(
|
|
53
|
+
"CREATE INDEX IF NOT EXISTS idx_captures_processed ON captures (processed)"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def _row_to_capture(self, row: sqlite3.Row) -> RawCapture:
|
|
57
|
+
data = dict(row)
|
|
58
|
+
data["context"] = json.loads(data["context"])
|
|
59
|
+
# Remove store-only fields before passing to Pydantic
|
|
60
|
+
data.pop("processed", None)
|
|
61
|
+
data.pop("archived", None)
|
|
62
|
+
return RawCapture(**data)
|
|
63
|
+
|
|
64
|
+
def save(self, capture: RawCapture) -> None:
|
|
65
|
+
"""Insert or replace a RawCapture record."""
|
|
66
|
+
with self._conn() as conn:
|
|
67
|
+
conn.execute(
|
|
68
|
+
"""
|
|
69
|
+
INSERT OR REPLACE INTO captures
|
|
70
|
+
(id, timestamp, session_id, project_path, project_name,
|
|
71
|
+
category, content, context)
|
|
72
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
73
|
+
""",
|
|
74
|
+
(
|
|
75
|
+
capture.id,
|
|
76
|
+
capture.timestamp.isoformat(),
|
|
77
|
+
capture.session_id,
|
|
78
|
+
capture.project_path,
|
|
79
|
+
capture.project_name,
|
|
80
|
+
capture.category,
|
|
81
|
+
capture.content,
|
|
82
|
+
json.dumps(capture.context),
|
|
83
|
+
),
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def get_by_date(self, target_date: date) -> list[RawCapture]:
|
|
87
|
+
"""Return all non-archived captures whose timestamp falls on target_date (UTC)."""
|
|
88
|
+
start = datetime(target_date.year, target_date.month, target_date.day,
|
|
89
|
+
0, 0, 0, tzinfo=timezone.utc)
|
|
90
|
+
end = datetime(target_date.year, target_date.month, target_date.day,
|
|
91
|
+
23, 59, 59, 999999, tzinfo=timezone.utc)
|
|
92
|
+
with self._conn() as conn:
|
|
93
|
+
rows = conn.execute(
|
|
94
|
+
"""
|
|
95
|
+
SELECT * FROM captures
|
|
96
|
+
WHERE timestamp >= ? AND timestamp <= ?
|
|
97
|
+
AND archived = 0
|
|
98
|
+
ORDER BY timestamp ASC
|
|
99
|
+
""",
|
|
100
|
+
(start.isoformat(), end.isoformat()),
|
|
101
|
+
).fetchall()
|
|
102
|
+
return [self._row_to_capture(r) for r in rows]
|
|
103
|
+
|
|
104
|
+
def get_by_project(self, project_name: str) -> list[RawCapture]:
|
|
105
|
+
"""Return all captures for a given project name."""
|
|
106
|
+
with self._conn() as conn:
|
|
107
|
+
rows = conn.execute(
|
|
108
|
+
"SELECT * FROM captures WHERE project_name = ? ORDER BY timestamp ASC",
|
|
109
|
+
(project_name,),
|
|
110
|
+
).fetchall()
|
|
111
|
+
return [self._row_to_capture(r) for r in rows]
|
|
112
|
+
|
|
113
|
+
def get_unprocessed(self) -> list[RawCapture]:
|
|
114
|
+
"""Return all captures not yet processed."""
|
|
115
|
+
with self._conn() as conn:
|
|
116
|
+
rows = conn.execute(
|
|
117
|
+
"SELECT * FROM captures WHERE processed = 0 ORDER BY timestamp ASC"
|
|
118
|
+
).fetchall()
|
|
119
|
+
return [self._row_to_capture(r) for r in rows]
|
|
120
|
+
|
|
121
|
+
def mark_processed(self, ids: list[str]) -> None:
|
|
122
|
+
"""Mark the given capture IDs as processed."""
|
|
123
|
+
if not ids:
|
|
124
|
+
return
|
|
125
|
+
placeholders = ",".join("?" * len(ids))
|
|
126
|
+
with self._conn() as conn:
|
|
127
|
+
conn.execute(
|
|
128
|
+
f"UPDATE captures SET processed = 1 WHERE id IN ({placeholders})",
|
|
129
|
+
ids,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def archive_processed(self) -> int:
|
|
133
|
+
"""Archive all processed captures. Returns count of archived records."""
|
|
134
|
+
with self._conn() as conn:
|
|
135
|
+
result = conn.execute(
|
|
136
|
+
"UPDATE captures SET archived = 1 WHERE processed = 1 AND archived = 0"
|
|
137
|
+
)
|
|
138
|
+
return result.rowcount
|
|
139
|
+
|
|
140
|
+
def stats(self) -> dict:
|
|
141
|
+
"""Return store statistics: total, unprocessed, and per-category counts."""
|
|
142
|
+
with self._conn() as conn:
|
|
143
|
+
total = conn.execute("SELECT COUNT(*) FROM captures").fetchone()[0]
|
|
144
|
+
unprocessed = conn.execute(
|
|
145
|
+
"SELECT COUNT(*) FROM captures WHERE processed = 0"
|
|
146
|
+
).fetchone()[0]
|
|
147
|
+
rows = conn.execute(
|
|
148
|
+
"SELECT category, COUNT(*) as cnt FROM captures GROUP BY category"
|
|
149
|
+
).fetchall()
|
|
150
|
+
by_category = {r["category"]: r["cnt"] for r in rows}
|
|
151
|
+
return {
|
|
152
|
+
"total": total,
|
|
153
|
+
"unprocessed": unprocessed,
|
|
154
|
+
"by_category": by_category,
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
def close(self) -> None:
|
|
158
|
+
"""No-op — connections are opened per-operation."""
|