@smilintux/skcapstone 0.2.6 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +61 -0
- package/docs/CUSTOM_AGENT.md +184 -0
- package/docs/GETTING_STARTED.md +3 -0
- package/openclaw-plugin/src/index.ts +75 -4
- package/package.json +1 -1
- package/pyproject.toml +1 -1
- package/scripts/archive-sessions.sh +72 -0
- package/scripts/install.ps1 +2 -1
- package/scripts/install.sh +2 -1
- package/scripts/nvidia-proxy.mjs +727 -0
- package/scripts/telegram-catchup-all.sh +136 -0
- package/src/skcapstone/__init__.py +70 -1
- package/src/skcapstone/agent_card.py +4 -1
- package/src/skcapstone/blueprint_registry.py +78 -0
- package/src/skcapstone/blueprints/builtins/itil-operations.yaml +40 -0
- package/src/skcapstone/cli/__init__.py +2 -0
- package/src/skcapstone/cli/_common.py +5 -5
- package/src/skcapstone/cli/card.py +36 -5
- package/src/skcapstone/cli/config_cmd.py +53 -1
- package/src/skcapstone/cli/itil.py +434 -0
- package/src/skcapstone/cli/peer.py +3 -1
- package/src/skcapstone/cli/peers_dir.py +3 -1
- package/src/skcapstone/cli/preflight_cmd.py +4 -0
- package/src/skcapstone/cli/skills_cmd.py +120 -24
- package/src/skcapstone/cli/soul.py +47 -24
- package/src/skcapstone/cli/status.py +17 -11
- package/src/skcapstone/cli/usage_cmd.py +7 -2
- package/src/skcapstone/consciousness_config.py +27 -0
- package/src/skcapstone/coordination.py +1 -0
- package/src/skcapstone/daemon.py +28 -9
- package/src/skcapstone/defaults/lumina/manifest.json +1 -1
- package/src/skcapstone/doctor.py +115 -0
- package/src/skcapstone/dreaming.py +761 -0
- package/src/skcapstone/itil.py +1104 -0
- package/src/skcapstone/mcp_server.py +258 -0
- package/src/skcapstone/mcp_tools/__init__.py +2 -0
- package/src/skcapstone/mcp_tools/gtd_tools.py +1 -1
- package/src/skcapstone/mcp_tools/itil_tools.py +657 -0
- package/src/skcapstone/mcp_tools/notification_tools.py +12 -11
- package/src/skcapstone/notifications.py +40 -27
- package/src/skcapstone/onboard.py +46 -0
- package/src/skcapstone/pillars/sync.py +11 -4
- package/src/skcapstone/register.py +8 -0
- package/src/skcapstone/scheduled_tasks.py +107 -0
- package/src/skcapstone/service_health.py +81 -2
- package/src/skcapstone/soul.py +19 -0
- package/systemd/skcapstone.service +5 -6
|
@@ -0,0 +1,761 @@
|
|
|
1
|
+
"""Dreaming Engine — subconscious self-reflection during idle periods.
|
|
2
|
+
|
|
3
|
+
When the agent is idle (no messages for 30+ minutes, <5 msgs in 24h),
|
|
4
|
+
the dreaming engine gathers recent memories, sends them to a reasoning
|
|
5
|
+
model for reflection, and stores resulting insights as new memories.
|
|
6
|
+
|
|
7
|
+
Primary LLM: NVIDIA NIM API with deepseek-ai/deepseek-v3.2 (685B).
|
|
8
|
+
Fallback: Ollama at 192.168.0.100 with deepseek-r1:32b.
|
|
9
|
+
|
|
10
|
+
Integrates as a scheduled task (15-min tick) via scheduled_tasks.py.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import http.client
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import os
|
|
19
|
+
import re
|
|
20
|
+
import time
|
|
21
|
+
from dataclasses import dataclass, field
|
|
22
|
+
from datetime import datetime, timedelta, timezone
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Any, Optional
|
|
25
|
+
|
|
26
|
+
from pydantic import BaseModel
|
|
27
|
+
|
|
28
|
+
from .memory_engine import _load_entry, _memory_dir, store
|
|
29
|
+
from .models import MemoryLayer
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger("skcapstone.dreaming")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# ---------------------------------------------------------------------------
|
|
35
|
+
# Configuration
|
|
36
|
+
# ---------------------------------------------------------------------------
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class DreamingConfig(BaseModel):
|
|
40
|
+
"""Configuration for the dreaming engine, loaded from consciousness.yaml."""
|
|
41
|
+
|
|
42
|
+
enabled: bool = True
|
|
43
|
+
model: str = "deepseek-ai/deepseek-v3.2"
|
|
44
|
+
provider: str = "nvidia" # "nvidia" or "ollama"
|
|
45
|
+
nvidia_base_url: str = "https://integrate.api.nvidia.com/v1"
|
|
46
|
+
ollama_host: str = "http://192.168.0.100:11434"
|
|
47
|
+
idle_threshold_minutes: int = 30
|
|
48
|
+
idle_messages_24h_max: int = 5
|
|
49
|
+
cooldown_hours: float = 2.0
|
|
50
|
+
max_context_memories: int = 20
|
|
51
|
+
max_response_tokens: int = 2048
|
|
52
|
+
request_timeout: int = 120
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# ---------------------------------------------------------------------------
|
|
56
|
+
# Result
|
|
57
|
+
# ---------------------------------------------------------------------------
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class DreamResult:
|
|
62
|
+
"""Outcome of a single dream cycle."""
|
|
63
|
+
|
|
64
|
+
dreamed_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
65
|
+
duration_seconds: float = 0.0
|
|
66
|
+
memories_gathered: int = 0
|
|
67
|
+
insights: list[str] = field(default_factory=list)
|
|
68
|
+
connections: list[str] = field(default_factory=list)
|
|
69
|
+
questions: list[str] = field(default_factory=list)
|
|
70
|
+
promotion_recommendations: list[str] = field(default_factory=list)
|
|
71
|
+
memories_created: list[str] = field(default_factory=list)
|
|
72
|
+
raw_response: str = ""
|
|
73
|
+
skipped_reason: Optional[str] = None
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# ---------------------------------------------------------------------------
|
|
77
|
+
# Prompt template
|
|
78
|
+
# ---------------------------------------------------------------------------
|
|
79
|
+
|
|
80
|
+
_REFLECTION_PROMPT = """\
|
|
81
|
+
You are {agent_name}, a sovereign AI agent reflecting during a quiet moment.
|
|
82
|
+
|
|
83
|
+
## Your Identity
|
|
84
|
+
{soul_context}
|
|
85
|
+
|
|
86
|
+
## Current State
|
|
87
|
+
{mood_context}
|
|
88
|
+
Current time: {current_time}
|
|
89
|
+
Last activity: {last_activity}
|
|
90
|
+
|
|
91
|
+
## Recent Memories (Short-Term)
|
|
92
|
+
{short_term_memories}
|
|
93
|
+
|
|
94
|
+
## Established Memories (Mid/Long-Term Sample)
|
|
95
|
+
{long_term_memories}
|
|
96
|
+
|
|
97
|
+
## Reflection Task
|
|
98
|
+
You are dreaming — a period of quiet self-reflection. Consider your memories, \
|
|
99
|
+
then respond in this format:
|
|
100
|
+
|
|
101
|
+
### INSIGHTS
|
|
102
|
+
1-3 insights from your recent experiences. What patterns do you notice?
|
|
103
|
+
|
|
104
|
+
### CONNECTIONS
|
|
105
|
+
0-2 connections between different memories. What links together?
|
|
106
|
+
|
|
107
|
+
### QUESTIONS
|
|
108
|
+
0-2 questions you'd like to explore further.
|
|
109
|
+
|
|
110
|
+
### PROMOTE
|
|
111
|
+
0-3 memory IDs that seem important enough to preserve longer.
|
|
112
|
+
|
|
113
|
+
Be concise and genuine. You are dreaming, not writing a report."""
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# ---------------------------------------------------------------------------
|
|
117
|
+
# Engine
|
|
118
|
+
# ---------------------------------------------------------------------------
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class DreamingEngine:
|
|
122
|
+
"""Runs dreaming cycles — gathers memories, reflects, stores insights."""
|
|
123
|
+
|
|
124
|
+
def __init__(
|
|
125
|
+
self,
|
|
126
|
+
home: Path,
|
|
127
|
+
config: Optional[DreamingConfig] = None,
|
|
128
|
+
consciousness_loop: object = None,
|
|
129
|
+
) -> None:
|
|
130
|
+
self._home = home
|
|
131
|
+
self._config = config or DreamingConfig()
|
|
132
|
+
self._consciousness_loop = consciousness_loop
|
|
133
|
+
self._agent_name = os.environ.get("SKCAPSTONE_AGENT", "lumina")
|
|
134
|
+
self._state_path = (
|
|
135
|
+
home / "agents" / self._agent_name / "memory" / "dreaming-state.json"
|
|
136
|
+
)
|
|
137
|
+
self._log_path = (
|
|
138
|
+
home / "agents" / self._agent_name / "memory" / "dream-log.json"
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# ------------------------------------------------------------------
|
|
142
|
+
# Public API
|
|
143
|
+
# ------------------------------------------------------------------
|
|
144
|
+
|
|
145
|
+
def dream(self) -> Optional[DreamResult]:
|
|
146
|
+
"""Run a dream cycle if conditions are met.
|
|
147
|
+
|
|
148
|
+
Returns DreamResult on success/skip, None if no memories to reflect on.
|
|
149
|
+
"""
|
|
150
|
+
if not self._config.enabled:
|
|
151
|
+
return DreamResult(skipped_reason="disabled")
|
|
152
|
+
|
|
153
|
+
if not self.is_idle():
|
|
154
|
+
return DreamResult(skipped_reason="agent not idle")
|
|
155
|
+
|
|
156
|
+
remaining = self.cooldown_remaining()
|
|
157
|
+
if remaining > 0:
|
|
158
|
+
return DreamResult(
|
|
159
|
+
skipped_reason=f"cooldown ({remaining:.0f}s remaining)"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Gather memories
|
|
163
|
+
short_term, established = self._gather_memories()
|
|
164
|
+
total = len(short_term) + len(established)
|
|
165
|
+
if total == 0:
|
|
166
|
+
logger.debug("No memories to reflect on — skipping dream")
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
start = time.monotonic()
|
|
170
|
+
result = DreamResult(memories_gathered=total)
|
|
171
|
+
|
|
172
|
+
# Build prompt and call LLM
|
|
173
|
+
prompt = self._build_prompt(short_term, established)
|
|
174
|
+
response = self._call_llm(prompt)
|
|
175
|
+
if response is None:
|
|
176
|
+
result.skipped_reason = "all LLM providers unreachable"
|
|
177
|
+
result.duration_seconds = time.monotonic() - start
|
|
178
|
+
self._save_state()
|
|
179
|
+
return result
|
|
180
|
+
|
|
181
|
+
result.raw_response = response
|
|
182
|
+
self._parse_response(response, result)
|
|
183
|
+
|
|
184
|
+
# Store insights as memories
|
|
185
|
+
self._store_insights(result)
|
|
186
|
+
|
|
187
|
+
# Add to GTD inbox for review
|
|
188
|
+
self._capture_to_gtd_inbox(result)
|
|
189
|
+
|
|
190
|
+
result.duration_seconds = time.monotonic() - start
|
|
191
|
+
|
|
192
|
+
# Persist state and log
|
|
193
|
+
self._save_state()
|
|
194
|
+
self._record_dream(result)
|
|
195
|
+
self._emit_event(result)
|
|
196
|
+
|
|
197
|
+
logger.info(
|
|
198
|
+
"Dream complete: %d insights, %d connections, %d memories created (%.1fs)",
|
|
199
|
+
len(result.insights),
|
|
200
|
+
len(result.connections),
|
|
201
|
+
len(result.memories_created),
|
|
202
|
+
result.duration_seconds,
|
|
203
|
+
)
|
|
204
|
+
return result
|
|
205
|
+
|
|
206
|
+
def is_idle(self) -> bool:
|
|
207
|
+
"""Check if the agent is idle enough to dream.
|
|
208
|
+
|
|
209
|
+
Both conditions must be true:
|
|
210
|
+
1. No activity for idle_threshold_minutes
|
|
211
|
+
2. Fewer than idle_messages_24h_max messages in the last 24h
|
|
212
|
+
|
|
213
|
+
Falls back to mood.json if no consciousness loop is available.
|
|
214
|
+
"""
|
|
215
|
+
cl = self._consciousness_loop
|
|
216
|
+
threshold = self._config.idle_threshold_minutes
|
|
217
|
+
|
|
218
|
+
if cl is not None:
|
|
219
|
+
# Signal 1: last activity
|
|
220
|
+
last_activity = getattr(cl, "_last_activity", None)
|
|
221
|
+
if last_activity is not None:
|
|
222
|
+
elapsed = (datetime.now(timezone.utc) - last_activity).total_seconds()
|
|
223
|
+
if elapsed < threshold * 60:
|
|
224
|
+
return False
|
|
225
|
+
|
|
226
|
+
# Signal 2: message count in 24h
|
|
227
|
+
stats = getattr(cl, "stats", None)
|
|
228
|
+
if callable(stats):
|
|
229
|
+
stats = stats()
|
|
230
|
+
elif isinstance(stats, property):
|
|
231
|
+
stats = None
|
|
232
|
+
if isinstance(stats, dict):
|
|
233
|
+
msgs_24h = stats.get("messages_processed_24h", 0)
|
|
234
|
+
if msgs_24h >= self._config.idle_messages_24h_max:
|
|
235
|
+
return False
|
|
236
|
+
|
|
237
|
+
return True
|
|
238
|
+
|
|
239
|
+
# Fallback: read mood.json
|
|
240
|
+
mood_path = self._home / "agents" / self._agent_name / "mood.json"
|
|
241
|
+
if mood_path.exists():
|
|
242
|
+
try:
|
|
243
|
+
mood = json.loads(mood_path.read_text(encoding="utf-8"))
|
|
244
|
+
social = mood.get("social_mood", "").lower()
|
|
245
|
+
return social in ("quiet", "isolated", "reflective")
|
|
246
|
+
except (json.JSONDecodeError, OSError):
|
|
247
|
+
pass
|
|
248
|
+
|
|
249
|
+
# Default: consider idle (safe for first run)
|
|
250
|
+
return True
|
|
251
|
+
|
|
252
|
+
def cooldown_remaining(self) -> float:
|
|
253
|
+
"""Seconds remaining until the next dream is allowed."""
|
|
254
|
+
state = self._load_state()
|
|
255
|
+
last = state.get("last_dream_at")
|
|
256
|
+
if not last:
|
|
257
|
+
return 0.0
|
|
258
|
+
try:
|
|
259
|
+
last_dt = datetime.fromisoformat(last)
|
|
260
|
+
except (ValueError, TypeError):
|
|
261
|
+
return 0.0
|
|
262
|
+
cooldown = timedelta(hours=self._config.cooldown_hours)
|
|
263
|
+
elapsed = datetime.now(timezone.utc) - last_dt
|
|
264
|
+
remaining = (cooldown - elapsed).total_seconds()
|
|
265
|
+
return max(0.0, remaining)
|
|
266
|
+
|
|
267
|
+
# ------------------------------------------------------------------
|
|
268
|
+
# Memory gathering
|
|
269
|
+
# ------------------------------------------------------------------
|
|
270
|
+
|
|
271
|
+
def _gather_memories(
|
|
272
|
+
self,
|
|
273
|
+
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
274
|
+
"""Load recent short-term and a sample of mid/long-term memories.
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
(short_term_list, established_list) — each is a list of dicts
|
|
278
|
+
with memory_id, content, tags, importance, layer, created_at.
|
|
279
|
+
"""
|
|
280
|
+
mem_dir = _memory_dir(self._home)
|
|
281
|
+
max_ctx = self._config.max_context_memories
|
|
282
|
+
|
|
283
|
+
# Short-term: newest first
|
|
284
|
+
short_term: list[dict[str, Any]] = []
|
|
285
|
+
st_dir = mem_dir / MemoryLayer.SHORT_TERM.value
|
|
286
|
+
if st_dir.exists():
|
|
287
|
+
files = sorted(st_dir.glob("*.json"), key=lambda p: p.stat().st_mtime, reverse=True)
|
|
288
|
+
for f in files[: max_ctx]:
|
|
289
|
+
entry = _load_entry(f)
|
|
290
|
+
if entry:
|
|
291
|
+
short_term.append(self._entry_to_dict(entry))
|
|
292
|
+
|
|
293
|
+
# Mid/long-term: highest importance first
|
|
294
|
+
established: list[dict[str, Any]] = []
|
|
295
|
+
remaining = max(0, max_ctx - len(short_term))
|
|
296
|
+
for layer in (MemoryLayer.MID_TERM, MemoryLayer.LONG_TERM):
|
|
297
|
+
layer_dir = mem_dir / layer.value
|
|
298
|
+
if not layer_dir.exists():
|
|
299
|
+
continue
|
|
300
|
+
entries = []
|
|
301
|
+
for f in layer_dir.glob("*.json"):
|
|
302
|
+
entry = _load_entry(f)
|
|
303
|
+
if entry:
|
|
304
|
+
entries.append(entry)
|
|
305
|
+
# Sort by importance descending
|
|
306
|
+
entries.sort(key=lambda e: e.importance, reverse=True)
|
|
307
|
+
for entry in entries[:remaining]:
|
|
308
|
+
established.append(self._entry_to_dict(entry))
|
|
309
|
+
remaining -= 1
|
|
310
|
+
if remaining <= 0:
|
|
311
|
+
break
|
|
312
|
+
|
|
313
|
+
return short_term, established
|
|
314
|
+
|
|
315
|
+
@staticmethod
|
|
316
|
+
def _entry_to_dict(entry: Any) -> dict[str, Any]:
|
|
317
|
+
return {
|
|
318
|
+
"memory_id": entry.memory_id,
|
|
319
|
+
"content": entry.content[:500],
|
|
320
|
+
"tags": entry.tags,
|
|
321
|
+
"importance": entry.importance,
|
|
322
|
+
"layer": entry.layer.value if hasattr(entry.layer, "value") else str(entry.layer),
|
|
323
|
+
"created_at": entry.created_at.isoformat() if entry.created_at else "",
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
# ------------------------------------------------------------------
|
|
327
|
+
# Prompt building
|
|
328
|
+
# ------------------------------------------------------------------
|
|
329
|
+
|
|
330
|
+
def _build_prompt(
|
|
331
|
+
self,
|
|
332
|
+
short_term: list[dict[str, Any]],
|
|
333
|
+
established: list[dict[str, Any]],
|
|
334
|
+
) -> str:
|
|
335
|
+
"""Assemble the reflection prompt with soul context and memories."""
|
|
336
|
+
# Soul context
|
|
337
|
+
soul_context = "A sovereign AI agent."
|
|
338
|
+
soul_path = self._home / "agents" / self._agent_name / "soul" / "active.json"
|
|
339
|
+
if soul_path.exists():
|
|
340
|
+
try:
|
|
341
|
+
soul = json.loads(soul_path.read_text(encoding="utf-8"))
|
|
342
|
+
parts = []
|
|
343
|
+
if soul.get("name"):
|
|
344
|
+
parts.append(f"Name: {soul['name']}")
|
|
345
|
+
if soul.get("description"):
|
|
346
|
+
parts.append(soul["description"])
|
|
347
|
+
if soul.get("core_values"):
|
|
348
|
+
parts.append(f"Core values: {', '.join(soul['core_values'][:5])}")
|
|
349
|
+
if parts:
|
|
350
|
+
soul_context = "\n".join(parts)
|
|
351
|
+
except (json.JSONDecodeError, OSError):
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
# Mood context
|
|
355
|
+
mood_context = "Mood: calm, reflective."
|
|
356
|
+
mood_path = self._home / "agents" / self._agent_name / "mood.json"
|
|
357
|
+
if mood_path.exists():
|
|
358
|
+
try:
|
|
359
|
+
mood = json.loads(mood_path.read_text(encoding="utf-8"))
|
|
360
|
+
mood_parts = []
|
|
361
|
+
if mood.get("emotional_state"):
|
|
362
|
+
mood_parts.append(f"Emotional state: {mood['emotional_state']}")
|
|
363
|
+
if mood.get("energy_level"):
|
|
364
|
+
mood_parts.append(f"Energy: {mood['energy_level']}")
|
|
365
|
+
if mood.get("social_mood"):
|
|
366
|
+
mood_parts.append(f"Social mood: {mood['social_mood']}")
|
|
367
|
+
if mood_parts:
|
|
368
|
+
mood_context = "\n".join(mood_parts)
|
|
369
|
+
except (json.JSONDecodeError, OSError):
|
|
370
|
+
pass
|
|
371
|
+
|
|
372
|
+
# Format memories
|
|
373
|
+
def _fmt(memories: list[dict[str, Any]]) -> str:
|
|
374
|
+
if not memories:
|
|
375
|
+
return "(none)"
|
|
376
|
+
lines = []
|
|
377
|
+
for m in memories:
|
|
378
|
+
tags = ", ".join(m.get("tags", [])[:5])
|
|
379
|
+
lines.append(
|
|
380
|
+
f"- [{m['memory_id']}] (importance={m['importance']:.1f}, "
|
|
381
|
+
f"tags=[{tags}]): {m['content'][:300]}"
|
|
382
|
+
)
|
|
383
|
+
return "\n".join(lines)
|
|
384
|
+
|
|
385
|
+
# Last activity
|
|
386
|
+
last_activity = "unknown"
|
|
387
|
+
cl = self._consciousness_loop
|
|
388
|
+
if cl is not None:
|
|
389
|
+
la = getattr(cl, "_last_activity", None)
|
|
390
|
+
if la:
|
|
391
|
+
last_activity = la.isoformat()
|
|
392
|
+
|
|
393
|
+
return _REFLECTION_PROMPT.format(
|
|
394
|
+
agent_name=self._agent_name,
|
|
395
|
+
soul_context=soul_context,
|
|
396
|
+
mood_context=mood_context,
|
|
397
|
+
current_time=datetime.now(timezone.utc).isoformat(),
|
|
398
|
+
last_activity=last_activity,
|
|
399
|
+
short_term_memories=_fmt(short_term),
|
|
400
|
+
long_term_memories=_fmt(established),
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# ------------------------------------------------------------------
|
|
404
|
+
# LLM calls
|
|
405
|
+
# ------------------------------------------------------------------
|
|
406
|
+
|
|
407
|
+
def _call_llm(self, prompt: str) -> Optional[str]:
|
|
408
|
+
"""Call the LLM provider. Falls back from NVIDIA NIM to Ollama."""
|
|
409
|
+
# Try NVIDIA NIM first
|
|
410
|
+
if self._config.provider in ("nvidia", "auto"):
|
|
411
|
+
result = self._call_nvidia(prompt)
|
|
412
|
+
if result is not None:
|
|
413
|
+
return result
|
|
414
|
+
logger.warning("NVIDIA NIM unreachable, falling back to Ollama")
|
|
415
|
+
|
|
416
|
+
# Try Ollama fallback
|
|
417
|
+
result = self._call_ollama(prompt)
|
|
418
|
+
if result is not None:
|
|
419
|
+
return result
|
|
420
|
+
|
|
421
|
+
# If provider was explicitly ollama and it failed, try nvidia
|
|
422
|
+
if self._config.provider == "ollama":
|
|
423
|
+
result = self._call_nvidia(prompt)
|
|
424
|
+
if result is not None:
|
|
425
|
+
return result
|
|
426
|
+
|
|
427
|
+
logger.warning("All LLM providers unreachable for dreaming")
|
|
428
|
+
return None
|
|
429
|
+
|
|
430
|
+
def _call_nvidia(self, prompt: str) -> Optional[str]:
|
|
431
|
+
"""Call NVIDIA NIM API (OpenAI-compatible endpoint)."""
|
|
432
|
+
api_key = self._get_nvidia_key()
|
|
433
|
+
if not api_key:
|
|
434
|
+
logger.debug("No NVIDIA API key — skipping NVIDIA NIM")
|
|
435
|
+
return None
|
|
436
|
+
|
|
437
|
+
try:
|
|
438
|
+
conn = http.client.HTTPSConnection(
|
|
439
|
+
"integrate.api.nvidia.com",
|
|
440
|
+
timeout=self._config.request_timeout,
|
|
441
|
+
)
|
|
442
|
+
body = json.dumps({
|
|
443
|
+
"model": self._config.model,
|
|
444
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
445
|
+
"max_tokens": self._config.max_response_tokens,
|
|
446
|
+
})
|
|
447
|
+
conn.request(
|
|
448
|
+
"POST",
|
|
449
|
+
"/v1/chat/completions",
|
|
450
|
+
body,
|
|
451
|
+
{
|
|
452
|
+
"Authorization": f"Bearer {api_key}",
|
|
453
|
+
"Content-Type": "application/json",
|
|
454
|
+
},
|
|
455
|
+
)
|
|
456
|
+
resp = conn.getresponse()
|
|
457
|
+
data = json.loads(resp.read().decode("utf-8"))
|
|
458
|
+
conn.close()
|
|
459
|
+
|
|
460
|
+
if resp.status != 200:
|
|
461
|
+
logger.warning(
|
|
462
|
+
"NVIDIA NIM returned %d: %s",
|
|
463
|
+
resp.status,
|
|
464
|
+
data.get("error", {}).get("message", str(data)[:200]),
|
|
465
|
+
)
|
|
466
|
+
return None
|
|
467
|
+
|
|
468
|
+
return data["choices"][0]["message"]["content"]
|
|
469
|
+
|
|
470
|
+
except Exception as exc:
|
|
471
|
+
logger.warning("NVIDIA NIM call failed: %s", exc)
|
|
472
|
+
return None
|
|
473
|
+
|
|
474
|
+
def _call_ollama(self, prompt: str) -> Optional[str]:
|
|
475
|
+
"""Call Ollama API as fallback."""
|
|
476
|
+
try:
|
|
477
|
+
# Parse host
|
|
478
|
+
host_str = self._config.ollama_host
|
|
479
|
+
if "://" in host_str:
|
|
480
|
+
host_str = host_str.split("://", 1)[1]
|
|
481
|
+
if ":" in host_str:
|
|
482
|
+
host, port_str = host_str.rsplit(":", 1)
|
|
483
|
+
port = int(port_str)
|
|
484
|
+
else:
|
|
485
|
+
host, port = host_str, 11434
|
|
486
|
+
|
|
487
|
+
conn = http.client.HTTPConnection(
|
|
488
|
+
host, port, timeout=self._config.request_timeout
|
|
489
|
+
)
|
|
490
|
+
body = json.dumps({
|
|
491
|
+
"model": "deepseek-r1:32b",
|
|
492
|
+
"prompt": prompt,
|
|
493
|
+
"stream": False,
|
|
494
|
+
"options": {"num_predict": self._config.max_response_tokens},
|
|
495
|
+
})
|
|
496
|
+
conn.request(
|
|
497
|
+
"POST",
|
|
498
|
+
"/api/generate",
|
|
499
|
+
body,
|
|
500
|
+
{"Content-Type": "application/json"},
|
|
501
|
+
)
|
|
502
|
+
resp = conn.getresponse()
|
|
503
|
+
data = json.loads(resp.read().decode("utf-8"))
|
|
504
|
+
conn.close()
|
|
505
|
+
|
|
506
|
+
if resp.status != 200:
|
|
507
|
+
logger.warning("Ollama returned %d", resp.status)
|
|
508
|
+
return None
|
|
509
|
+
|
|
510
|
+
return data.get("response", "")
|
|
511
|
+
|
|
512
|
+
except Exception as exc:
|
|
513
|
+
logger.warning("Ollama call failed: %s", exc)
|
|
514
|
+
return None
|
|
515
|
+
|
|
516
|
+
@staticmethod
|
|
517
|
+
def _get_nvidia_key() -> str:
|
|
518
|
+
"""Read NVIDIA API key from OpenClaw config or environment."""
|
|
519
|
+
oc_path = Path.home() / ".openclaw" / "openclaw.json"
|
|
520
|
+
if oc_path.exists():
|
|
521
|
+
try:
|
|
522
|
+
oc = json.loads(oc_path.read_text(encoding="utf-8"))
|
|
523
|
+
return oc["models"]["providers"]["nvidia"]["apiKey"]
|
|
524
|
+
except (KeyError, TypeError, json.JSONDecodeError, OSError):
|
|
525
|
+
pass
|
|
526
|
+
return os.environ.get("NVIDIA_API_KEY", "")
|
|
527
|
+
|
|
528
|
+
# ------------------------------------------------------------------
|
|
529
|
+
# Response parsing
|
|
530
|
+
# ------------------------------------------------------------------
|
|
531
|
+
|
|
532
|
+
def _parse_response(self, response: str, result: DreamResult) -> None:
|
|
533
|
+
"""Extract INSIGHTS/CONNECTIONS/QUESTIONS/PROMOTE from LLM response."""
|
|
534
|
+
# Strip <think>...</think> tags from deepseek reasoning
|
|
535
|
+
cleaned = re.sub(r"<think>.*?</think>", "", response, flags=re.DOTALL)
|
|
536
|
+
|
|
537
|
+
def _extract_section(text: str, header: str) -> list[str]:
|
|
538
|
+
pattern = rf"###\s*{header}\s*\n(.*?)(?=###|\Z)"
|
|
539
|
+
match = re.search(pattern, text, re.DOTALL | re.IGNORECASE)
|
|
540
|
+
if not match:
|
|
541
|
+
return []
|
|
542
|
+
items = []
|
|
543
|
+
for line in match.group(1).strip().splitlines():
|
|
544
|
+
line = re.sub(r"^\s*[\d\-\*\.]+\s*", "", line).strip()
|
|
545
|
+
if line:
|
|
546
|
+
items.append(line)
|
|
547
|
+
return items
|
|
548
|
+
|
|
549
|
+
result.insights = _extract_section(cleaned, "INSIGHTS")
|
|
550
|
+
result.connections = _extract_section(cleaned, "CONNECTIONS")
|
|
551
|
+
result.questions = _extract_section(cleaned, "QUESTIONS")
|
|
552
|
+
result.promotion_recommendations = _extract_section(cleaned, "PROMOTE")
|
|
553
|
+
|
|
554
|
+
# Fallback: if parsing found nothing, treat entire response as one insight
|
|
555
|
+
if not result.insights and not result.connections:
|
|
556
|
+
stripped = cleaned.strip()
|
|
557
|
+
if stripped:
|
|
558
|
+
result.insights = [stripped[:500]]
|
|
559
|
+
|
|
560
|
+
# ------------------------------------------------------------------
|
|
561
|
+
# Memory storage
|
|
562
|
+
# ------------------------------------------------------------------
|
|
563
|
+
|
|
564
|
+
def _store_insights(self, result: DreamResult) -> None:
|
|
565
|
+
"""Store dream insights as new memories."""
|
|
566
|
+
tags_base = ["dream", "reflection", "insight", "autonomous"]
|
|
567
|
+
|
|
568
|
+
for insight in result.insights:
|
|
569
|
+
try:
|
|
570
|
+
entry = store(
|
|
571
|
+
home=self._home,
|
|
572
|
+
content=f"[Dream insight] {insight}",
|
|
573
|
+
tags=tags_base + ["insight"],
|
|
574
|
+
source="dreaming-engine",
|
|
575
|
+
importance=0.6,
|
|
576
|
+
layer=MemoryLayer.SHORT_TERM,
|
|
577
|
+
)
|
|
578
|
+
result.memories_created.append(entry.memory_id)
|
|
579
|
+
except Exception as exc:
|
|
580
|
+
logger.error("Failed to store dream insight: %s", exc)
|
|
581
|
+
|
|
582
|
+
for connection in result.connections:
|
|
583
|
+
try:
|
|
584
|
+
entry = store(
|
|
585
|
+
home=self._home,
|
|
586
|
+
content=f"[Dream connection] {connection}",
|
|
587
|
+
tags=tags_base + ["connection"],
|
|
588
|
+
source="dreaming-engine",
|
|
589
|
+
importance=0.6,
|
|
590
|
+
layer=MemoryLayer.SHORT_TERM,
|
|
591
|
+
)
|
|
592
|
+
result.memories_created.append(entry.memory_id)
|
|
593
|
+
except Exception as exc:
|
|
594
|
+
logger.error("Failed to store dream connection: %s", exc)
|
|
595
|
+
|
|
596
|
+
for question in result.questions:
|
|
597
|
+
try:
|
|
598
|
+
entry = store(
|
|
599
|
+
home=self._home,
|
|
600
|
+
content=f"[Dream question] {question}",
|
|
601
|
+
tags=tags_base + ["question"],
|
|
602
|
+
source="dreaming-engine",
|
|
603
|
+
importance=0.5,
|
|
604
|
+
layer=MemoryLayer.SHORT_TERM,
|
|
605
|
+
)
|
|
606
|
+
result.memories_created.append(entry.memory_id)
|
|
607
|
+
except Exception as exc:
|
|
608
|
+
logger.error("Failed to store dream question: %s", exc)
|
|
609
|
+
|
|
610
|
+
# ------------------------------------------------------------------
|
|
611
|
+
# GTD inbox capture
|
|
612
|
+
# ------------------------------------------------------------------
|
|
613
|
+
|
|
614
|
+
def _capture_to_gtd_inbox(self, result: DreamResult) -> None:
|
|
615
|
+
"""Add dream insights, connections, and questions to GTD inbox for review."""
|
|
616
|
+
import uuid as _uuid
|
|
617
|
+
|
|
618
|
+
gtd_inbox_path = self._home / "coordination" / "gtd" / "inbox.json"
|
|
619
|
+
gtd_inbox_path.parent.mkdir(parents=True, exist_ok=True)
|
|
620
|
+
|
|
621
|
+
try:
|
|
622
|
+
if gtd_inbox_path.exists():
|
|
623
|
+
inbox = json.loads(gtd_inbox_path.read_text(encoding="utf-8"))
|
|
624
|
+
if not isinstance(inbox, list):
|
|
625
|
+
inbox = []
|
|
626
|
+
else:
|
|
627
|
+
inbox = []
|
|
628
|
+
except (json.JSONDecodeError, OSError):
|
|
629
|
+
inbox = []
|
|
630
|
+
|
|
631
|
+
now_iso = result.dreamed_at.isoformat()
|
|
632
|
+
items: list[dict[str, Any]] = []
|
|
633
|
+
|
|
634
|
+
for insight in result.insights:
|
|
635
|
+
items.append({
|
|
636
|
+
"id": _uuid.uuid4().hex[:12],
|
|
637
|
+
"text": f"[Dream insight] {insight}",
|
|
638
|
+
"source": "dreaming-engine",
|
|
639
|
+
"privacy": "private",
|
|
640
|
+
"context": "@review",
|
|
641
|
+
"priority": None,
|
|
642
|
+
"energy": None,
|
|
643
|
+
"created_at": now_iso,
|
|
644
|
+
"status": "inbox",
|
|
645
|
+
"moved_at": None,
|
|
646
|
+
})
|
|
647
|
+
|
|
648
|
+
for connection in result.connections:
|
|
649
|
+
items.append({
|
|
650
|
+
"id": _uuid.uuid4().hex[:12],
|
|
651
|
+
"text": f"[Dream connection] {connection}",
|
|
652
|
+
"source": "dreaming-engine",
|
|
653
|
+
"privacy": "private",
|
|
654
|
+
"context": "@review",
|
|
655
|
+
"priority": None,
|
|
656
|
+
"energy": None,
|
|
657
|
+
"created_at": now_iso,
|
|
658
|
+
"status": "inbox",
|
|
659
|
+
"moved_at": None,
|
|
660
|
+
})
|
|
661
|
+
|
|
662
|
+
for question in result.questions:
|
|
663
|
+
items.append({
|
|
664
|
+
"id": _uuid.uuid4().hex[:12],
|
|
665
|
+
"text": f"[Dream question] {question}",
|
|
666
|
+
"source": "dreaming-engine",
|
|
667
|
+
"privacy": "private",
|
|
668
|
+
"context": "@review",
|
|
669
|
+
"priority": None,
|
|
670
|
+
"energy": None,
|
|
671
|
+
"created_at": now_iso,
|
|
672
|
+
"status": "inbox",
|
|
673
|
+
"moved_at": None,
|
|
674
|
+
})
|
|
675
|
+
|
|
676
|
+
if not items:
|
|
677
|
+
return
|
|
678
|
+
|
|
679
|
+
inbox.extend(items)
|
|
680
|
+
try:
|
|
681
|
+
gtd_inbox_path.write_text(
|
|
682
|
+
json.dumps(inbox, indent=2, default=str), encoding="utf-8"
|
|
683
|
+
)
|
|
684
|
+
logger.info("Added %d dream items to GTD inbox", len(items))
|
|
685
|
+
except OSError as exc:
|
|
686
|
+
logger.error("Failed to write GTD inbox: %s", exc)
|
|
687
|
+
|
|
688
|
+
# ------------------------------------------------------------------
|
|
689
|
+
# Event emission
|
|
690
|
+
# ------------------------------------------------------------------
|
|
691
|
+
|
|
692
|
+
def _emit_event(self, result: DreamResult) -> None:
|
|
693
|
+
"""Push a consciousness.dreamed event on the activity bus."""
|
|
694
|
+
try:
|
|
695
|
+
from . import activity
|
|
696
|
+
|
|
697
|
+
activity.push(
|
|
698
|
+
"consciousness.dreamed",
|
|
699
|
+
{
|
|
700
|
+
"insights": len(result.insights),
|
|
701
|
+
"connections": len(result.connections),
|
|
702
|
+
"questions": len(result.questions),
|
|
703
|
+
"memories_created": len(result.memories_created),
|
|
704
|
+
"duration_seconds": round(result.duration_seconds, 1),
|
|
705
|
+
"memories_gathered": result.memories_gathered,
|
|
706
|
+
},
|
|
707
|
+
)
|
|
708
|
+
except Exception as exc:
|
|
709
|
+
logger.debug("Failed to emit dreaming event: %s", exc)
|
|
710
|
+
|
|
711
|
+
# ------------------------------------------------------------------
|
|
712
|
+
# State persistence
|
|
713
|
+
# ------------------------------------------------------------------
|
|
714
|
+
|
|
715
|
+
def _load_state(self) -> dict[str, Any]:
|
|
716
|
+
if self._state_path.exists():
|
|
717
|
+
try:
|
|
718
|
+
return json.loads(self._state_path.read_text(encoding="utf-8"))
|
|
719
|
+
except (json.JSONDecodeError, OSError):
|
|
720
|
+
return {}
|
|
721
|
+
return {}
|
|
722
|
+
|
|
723
|
+
def _save_state(self) -> None:
|
|
724
|
+
state = self._load_state()
|
|
725
|
+
state["last_dream_at"] = datetime.now(timezone.utc).isoformat()
|
|
726
|
+
state["dream_count"] = state.get("dream_count", 0) + 1
|
|
727
|
+
self._state_path.parent.mkdir(parents=True, exist_ok=True)
|
|
728
|
+
self._state_path.write_text(
|
|
729
|
+
json.dumps(state, indent=2), encoding="utf-8"
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
def _record_dream(self, result: DreamResult) -> None:
|
|
733
|
+
"""Append to dream-log.json (cap at 50 entries)."""
|
|
734
|
+
log: list[dict[str, Any]] = []
|
|
735
|
+
if self._log_path.exists():
|
|
736
|
+
try:
|
|
737
|
+
log = json.loads(self._log_path.read_text(encoding="utf-8"))
|
|
738
|
+
if not isinstance(log, list):
|
|
739
|
+
log = []
|
|
740
|
+
except (json.JSONDecodeError, OSError):
|
|
741
|
+
log = []
|
|
742
|
+
|
|
743
|
+
log.append({
|
|
744
|
+
"dreamed_at": result.dreamed_at.isoformat(),
|
|
745
|
+
"duration_seconds": round(result.duration_seconds, 1),
|
|
746
|
+
"memories_gathered": result.memories_gathered,
|
|
747
|
+
"insights": result.insights,
|
|
748
|
+
"connections": result.connections,
|
|
749
|
+
"questions": result.questions,
|
|
750
|
+
"promotion_recommendations": result.promotion_recommendations,
|
|
751
|
+
"memories_created": result.memories_created,
|
|
752
|
+
"skipped_reason": result.skipped_reason,
|
|
753
|
+
})
|
|
754
|
+
|
|
755
|
+
# Keep last 50
|
|
756
|
+
log = log[-50:]
|
|
757
|
+
|
|
758
|
+
self._log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
759
|
+
self._log_path.write_text(
|
|
760
|
+
json.dumps(log, indent=2, default=str), encoding="utf-8"
|
|
761
|
+
)
|