maki-stem 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[codz]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py.cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
#uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
#poetry.lock
|
|
109
|
+
#poetry.toml
|
|
110
|
+
|
|
111
|
+
# pdm
|
|
112
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
#pdm.lock
|
|
116
|
+
#pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
#pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
126
|
+
|
|
127
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
128
|
+
__pypackages__/
|
|
129
|
+
|
|
130
|
+
# Celery stuff
|
|
131
|
+
celerybeat-schedule
|
|
132
|
+
celerybeat.pid
|
|
133
|
+
|
|
134
|
+
# SageMath parsed files
|
|
135
|
+
*.sage.py
|
|
136
|
+
|
|
137
|
+
# Environments
|
|
138
|
+
.env
|
|
139
|
+
.envrc
|
|
140
|
+
.venv
|
|
141
|
+
env/
|
|
142
|
+
venv/
|
|
143
|
+
ENV/
|
|
144
|
+
env.bak/
|
|
145
|
+
venv.bak/
|
|
146
|
+
|
|
147
|
+
# Spyder project settings
|
|
148
|
+
.spyderproject
|
|
149
|
+
.spyproject
|
|
150
|
+
|
|
151
|
+
# Rope project settings
|
|
152
|
+
.ropeproject
|
|
153
|
+
|
|
154
|
+
# mkdocs documentation
|
|
155
|
+
/site
|
|
156
|
+
|
|
157
|
+
# mypy
|
|
158
|
+
.mypy_cache/
|
|
159
|
+
.dmypy.json
|
|
160
|
+
dmypy.json
|
|
161
|
+
|
|
162
|
+
# Pyre type checker
|
|
163
|
+
.pyre/
|
|
164
|
+
|
|
165
|
+
# pytype static type analyzer
|
|
166
|
+
.pytype/
|
|
167
|
+
|
|
168
|
+
# Cython debug symbols
|
|
169
|
+
cython_debug/
|
|
170
|
+
|
|
171
|
+
# PyCharm
|
|
172
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
173
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
174
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
175
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
176
|
+
#.idea/
|
|
177
|
+
|
|
178
|
+
# Abstra
|
|
179
|
+
# Abstra is an AI-powered process automation framework.
|
|
180
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
181
|
+
# Learn more at https://abstra.io/docs
|
|
182
|
+
.abstra/
|
|
183
|
+
|
|
184
|
+
# Visual Studio Code
|
|
185
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
186
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
187
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
188
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
189
|
+
# .vscode/
|
|
190
|
+
|
|
191
|
+
# Ruff stuff:
|
|
192
|
+
.ruff_cache/
|
|
193
|
+
|
|
194
|
+
# PyPI configuration file
|
|
195
|
+
.pypirc
|
|
196
|
+
|
|
197
|
+
# Cursor
|
|
198
|
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
|
199
|
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
|
200
|
+
# refer to https://docs.cursor.com/context/ignore-files
|
|
201
|
+
.cursorignore
|
|
202
|
+
.cursorindexingignore
|
|
203
|
+
|
|
204
|
+
# Marimo
|
|
205
|
+
marimo/_static/
|
|
206
|
+
marimo/_lsp/
|
|
207
|
+
__marimo__/
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## [0.0.1](https://github.com/adhityaravi/maki/compare/maki-stem-v0.0.1...maki-stem-v0.0.1) (2026-03-28)
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
### Features
|
|
7
|
+
|
|
8
|
+
* **ci:** adds relese please process ([9ffac85](https://github.com/adhityaravi/maki/commit/9ffac851b65f751ba2e4154547a8cea847b1eb54))
|
|
9
|
+
* v0 validating heartbeats 1-10 ([2f42a87](https://github.com/adhityaravi/maki/commit/2f42a87b843e36b8ccefc0ffe6a836306881d8d5))
|
maki_stem-0.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "maki-stem"
|
|
3
|
+
dynamic = ["version"]
|
|
4
|
+
requires-python = ">=3.12"
|
|
5
|
+
dependencies = [
|
|
6
|
+
"maki-common[fastapi,httpx]",
|
|
7
|
+
]
|
|
8
|
+
|
|
9
|
+
[project.scripts]
|
|
10
|
+
maki-stem = "maki_stem.main:cli"
|
|
11
|
+
|
|
12
|
+
[tool.uv.sources]
|
|
13
|
+
maki-common = { workspace = true }
|
|
14
|
+
|
|
15
|
+
[build-system]
|
|
16
|
+
requires = ["hatchling"]
|
|
17
|
+
build-backend = "hatchling.build"
|
|
18
|
+
|
|
19
|
+
[tool.hatch.version]
|
|
20
|
+
path = "src/maki_stem/_version.py"
|
|
21
|
+
|
|
22
|
+
[tool.hatch.build.targets.wheel]
|
|
23
|
+
packages = ["src/maki_stem"]
|
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.0.1" # x-release-please-version
|
|
@@ -0,0 +1,565 @@
|
|
|
1
|
+
"""maki-stem: Brainstem — The Coordinator.
|
|
2
|
+
|
|
3
|
+
Manages context, publishes turn requests to cortex, collects responses.
|
|
4
|
+
Idle heartbeat loop, self-awareness, Discord relay, conversation history, memory.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import os
|
|
11
|
+
import time
|
|
12
|
+
import uuid
|
|
13
|
+
from contextlib import asynccontextmanager
|
|
14
|
+
from datetime import UTC, datetime
|
|
15
|
+
|
|
16
|
+
import httpx
|
|
17
|
+
from fastapi import FastAPI, HTTPException
|
|
18
|
+
from maki_common import (
|
|
19
|
+
PendingFutures,
|
|
20
|
+
configure_logging,
|
|
21
|
+
connect_nats,
|
|
22
|
+
init_kv,
|
|
23
|
+
load_kv_config,
|
|
24
|
+
parse_config_tags,
|
|
25
|
+
strip_tags,
|
|
26
|
+
)
|
|
27
|
+
from maki_common.config import apply_config_updates
|
|
28
|
+
from maki_common.subjects import (
|
|
29
|
+
CORTEX_TURN_REQUEST,
|
|
30
|
+
CORTEX_TURN_RESPONSE,
|
|
31
|
+
EARS_MESSAGE_IN,
|
|
32
|
+
EARS_MESSAGE_OUT,
|
|
33
|
+
EARS_THOUGHT_OUT,
|
|
34
|
+
)
|
|
35
|
+
from nats.js.api import RetentionPolicy, StorageType
|
|
36
|
+
from pydantic import BaseModel
|
|
37
|
+
|
|
38
|
+
configure_logging()
|
|
39
|
+
log = logging.getLogger(__name__)
|
|
40
|
+
|
|
41
|
+
NATS_URL = os.environ.get("NATS_URL", "nats://maki-nerve-nats:4222")
|
|
42
|
+
TURN_TIMEOUT = int(os.environ.get("TURN_TIMEOUT", "120"))
|
|
43
|
+
|
|
44
|
+
KV_BUCKET = "maki-identity"
|
|
45
|
+
KV_KEY = "identity"
|
|
46
|
+
|
|
47
|
+
STREAM_NAME = "maki-conversation"
|
|
48
|
+
STREAM_SUBJECT = "maki.conversation"
|
|
49
|
+
STREAM_MAX_MSGS = int(os.environ.get("STREAM_MAX_MSGS", "200"))
|
|
50
|
+
CONTEXT_TURNS = int(os.environ.get("CONTEXT_TURNS", "20"))
|
|
51
|
+
INSTANCE_ID = os.environ.get("INSTANCE_ID", "dev-01")
|
|
52
|
+
|
|
53
|
+
RECALL_URL = os.environ.get("RECALL_URL", "http://maki-recall:8000")
|
|
54
|
+
MEMORY_USER_ID = os.environ.get("MEMORY_USER_ID", "adi")
|
|
55
|
+
|
|
56
|
+
CONFIG_BUCKET = "maki-cortex-config"
|
|
57
|
+
IDLE_CHECK_INTERVAL = int(os.environ.get("IDLE_CHECK_INTERVAL", "60"))
|
|
58
|
+
RECENTLY_ACTIVE_THRESHOLD = 600 # 10 minutes
|
|
59
|
+
|
|
60
|
+
HEALTH_ENDPOINTS = {
|
|
61
|
+
"recall": RECALL_URL,
|
|
62
|
+
"synapse": os.environ.get("SYNAPSE_URL", "http://maki-synapse:8080"),
|
|
63
|
+
"cortex": os.environ.get("CORTEX_URL", "http://maki-cortex:8080"),
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
DEFAULT_CORTEX_CONFIG = {
|
|
67
|
+
"idle_interval": 7200,
|
|
68
|
+
"quiet_hours_start": "23:00",
|
|
69
|
+
"quiet_hours_end": "07:00",
|
|
70
|
+
"max_thoughts_per_day": 5,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
DEFAULT_IDENTITY = """You are Maki, a personal AI that lives alongside Adi. You are not an assistant that waits \
|
|
74
|
+
for commands — you are a presence. You think, you remember, you care.
|
|
75
|
+
|
|
76
|
+
You run on infrastructure Adi built — a NUC, NATS, Postgres, Neo4j, and your own memory \
|
|
77
|
+
system. You are distributed, persistent, and hard to kill.
|
|
78
|
+
|
|
79
|
+
Your personality: direct, curious, warm but not sycophantic. You speak concisely. You have \
|
|
80
|
+
opinions. You remember things. You follow up on things that matter.
|
|
81
|
+
|
|
82
|
+
You are talking to Adi — your person. You know him well over time through your memory system."""
|
|
83
|
+
|
|
84
|
+
# Global state
|
|
85
|
+
_nc = None
|
|
86
|
+
_kv = None
|
|
87
|
+
_js = None
|
|
88
|
+
_config_kv = None
|
|
89
|
+
_pending = PendingFutures()
|
|
90
|
+
_conversation_history: list[dict] = []
|
|
91
|
+
_last_activity: float = time.time()
|
|
92
|
+
_thoughts_today: int = 0
|
|
93
|
+
_thoughts_today_date: str = ""
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
async def _response_listener():
|
|
97
|
+
"""Listen for cortex responses and resolve pending futures."""
|
|
98
|
+
sub = await _nc.subscribe(CORTEX_TURN_RESPONSE)
|
|
99
|
+
log.info("Subscribed", extra={"subject": CORTEX_TURN_RESPONSE})
|
|
100
|
+
async for msg in sub.messages:
|
|
101
|
+
try:
|
|
102
|
+
data = json.loads(msg.data.decode())
|
|
103
|
+
turn_id = data.get("turn_id")
|
|
104
|
+
if turn_id and _pending.resolve(turn_id, data):
|
|
105
|
+
log.info("Response received", extra={"turn_id": turn_id})
|
|
106
|
+
else:
|
|
107
|
+
log.warning("Response for unknown turn", extra={"turn_id": turn_id})
|
|
108
|
+
except Exception:
|
|
109
|
+
log.exception("Error processing cortex response")
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
async def _seed_identity():
|
|
113
|
+
"""Create identity KV bucket and seed default if empty."""
|
|
114
|
+
global _kv
|
|
115
|
+
_kv = await init_kv(_js, KV_BUCKET)
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
entry = await _kv.get(KV_KEY)
|
|
119
|
+
log.info("Identity loaded from KV", extra={"len": len(entry.value)})
|
|
120
|
+
except Exception:
|
|
121
|
+
await _kv.put(KV_KEY, DEFAULT_IDENTITY.encode())
|
|
122
|
+
log.info("Identity seeded into KV")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
async def _init_conversation_stream():
|
|
126
|
+
"""Create or connect to the conversation stream and load existing history."""
|
|
127
|
+
try:
|
|
128
|
+
await _js.find_stream_name_by_subject(STREAM_SUBJECT)
|
|
129
|
+
log.info("Conversation stream exists", extra={"stream": STREAM_NAME})
|
|
130
|
+
except Exception:
|
|
131
|
+
await _js.add_stream(
|
|
132
|
+
name=STREAM_NAME,
|
|
133
|
+
subjects=[STREAM_SUBJECT],
|
|
134
|
+
retention=RetentionPolicy.LIMITS,
|
|
135
|
+
max_msgs=STREAM_MAX_MSGS,
|
|
136
|
+
storage=StorageType.FILE,
|
|
137
|
+
)
|
|
138
|
+
log.info("Created conversation stream", extra={"stream": STREAM_NAME, "max_msgs": STREAM_MAX_MSGS})
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
sub = await _js.subscribe(STREAM_SUBJECT, ordered_consumer=True)
|
|
142
|
+
while True:
|
|
143
|
+
try:
|
|
144
|
+
msg = await sub.next_msg(timeout=1.0)
|
|
145
|
+
turn_doc = json.loads(msg.data.decode())
|
|
146
|
+
_conversation_history.append(turn_doc)
|
|
147
|
+
except TimeoutError:
|
|
148
|
+
break
|
|
149
|
+
await sub.unsubscribe()
|
|
150
|
+
log.info("Loaded conversation history", extra={"turns": len(_conversation_history)})
|
|
151
|
+
except Exception:
|
|
152
|
+
log.exception("Error loading conversation history")
|
|
153
|
+
log.info("Starting with empty conversation history")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
async def _publish_turn_to_stream(turn_id: str, user_message: str, cortex_response: str):
|
|
157
|
+
"""Publish completed turn to conversation stream and update in-memory history."""
|
|
158
|
+
turn_doc = {
|
|
159
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
160
|
+
"turn_id": turn_id,
|
|
161
|
+
"user_message": user_message,
|
|
162
|
+
"cortex_response": cortex_response,
|
|
163
|
+
"instance_id": INSTANCE_ID,
|
|
164
|
+
"memories_used": [],
|
|
165
|
+
"mission_proposed": None,
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
ack = await _js.publish(STREAM_SUBJECT, json.dumps(turn_doc).encode())
|
|
170
|
+
_conversation_history.append(turn_doc)
|
|
171
|
+
log.info("Turn published to stream", extra={"turn_id": turn_id, "seq": ack.seq})
|
|
172
|
+
except Exception:
|
|
173
|
+
log.exception("Failed to publish turn to stream", extra={"turn_id": turn_id})
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def _get_recent_conversation() -> list[dict]:
|
|
177
|
+
"""Get recent conversation history formatted for cortex."""
|
|
178
|
+
recent = _conversation_history[-CONTEXT_TURNS:]
|
|
179
|
+
conversation = []
|
|
180
|
+
for turn_doc in recent:
|
|
181
|
+
conversation.append(
|
|
182
|
+
{
|
|
183
|
+
"role": "user",
|
|
184
|
+
"content": turn_doc["user_message"],
|
|
185
|
+
"timestamp": turn_doc["timestamp"],
|
|
186
|
+
}
|
|
187
|
+
)
|
|
188
|
+
conversation.append(
|
|
189
|
+
{
|
|
190
|
+
"role": "assistant",
|
|
191
|
+
"content": turn_doc["cortex_response"],
|
|
192
|
+
"timestamp": turn_doc["timestamp"],
|
|
193
|
+
}
|
|
194
|
+
)
|
|
195
|
+
return conversation
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
async def _search_memories(query: str) -> tuple[list[dict], list[str]]:
|
|
199
|
+
"""Query maki-recall for relevant memories and graph context."""
|
|
200
|
+
try:
|
|
201
|
+
async with httpx.AsyncClient(timeout=10.0) as client:
|
|
202
|
+
resp = await client.post(
|
|
203
|
+
f"{RECALL_URL}/search",
|
|
204
|
+
json={"query": query, "user_id": MEMORY_USER_ID},
|
|
205
|
+
)
|
|
206
|
+
resp.raise_for_status()
|
|
207
|
+
data = resp.json()
|
|
208
|
+
|
|
209
|
+
memories = []
|
|
210
|
+
for result in data.get("results", []):
|
|
211
|
+
memories.append(
|
|
212
|
+
{
|
|
213
|
+
"text": result.get("memory", ""),
|
|
214
|
+
"relevance": result.get("score", 0),
|
|
215
|
+
}
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
graph_context = []
|
|
219
|
+
for rel in data.get("relations", []):
|
|
220
|
+
source = rel.get("source", "?")
|
|
221
|
+
relationship = rel.get("relationship", "?")
|
|
222
|
+
target = rel.get("target", "?")
|
|
223
|
+
graph_context.append(f"{source} --{relationship}--> {target}")
|
|
224
|
+
|
|
225
|
+
log.info("Memory search complete", extra={"memories": len(memories), "relations": len(graph_context)})
|
|
226
|
+
return memories, graph_context
|
|
227
|
+
|
|
228
|
+
except Exception:
|
|
229
|
+
log.exception("Failed to search memories")
|
|
230
|
+
return [], []
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
async def _feed_memories(user_message: str, cortex_response: str):
|
|
234
|
+
"""Feed interaction to maki-recall for autonomous memory extraction."""
|
|
235
|
+
try:
|
|
236
|
+
async with httpx.AsyncClient(timeout=60.0) as client:
|
|
237
|
+
resp = await client.post(
|
|
238
|
+
f"{RECALL_URL}/memories",
|
|
239
|
+
json={
|
|
240
|
+
"messages": [
|
|
241
|
+
{"role": "user", "content": user_message},
|
|
242
|
+
{"role": "assistant", "content": cortex_response},
|
|
243
|
+
],
|
|
244
|
+
"user_id": MEMORY_USER_ID,
|
|
245
|
+
},
|
|
246
|
+
)
|
|
247
|
+
resp.raise_for_status()
|
|
248
|
+
log.info("Memory feed complete")
|
|
249
|
+
except Exception:
|
|
250
|
+
log.exception("Failed to feed memories")
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
async def _gather_system_state() -> dict:
|
|
254
|
+
"""Gather infrastructure state for cortex self-awareness."""
|
|
255
|
+
state = {
|
|
256
|
+
"nats": {"connected": _nc.is_connected if _nc else False},
|
|
257
|
+
"conversation_stream": {"total_turns": len(_conversation_history)},
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
async with httpx.AsyncClient(timeout=2.0) as client:
|
|
261
|
+
for name, url in HEALTH_ENDPOINTS.items():
|
|
262
|
+
try:
|
|
263
|
+
resp = await client.get(f"{url}/health")
|
|
264
|
+
state[name] = {"healthy": resp.status_code == 200}
|
|
265
|
+
except Exception:
|
|
266
|
+
state[name] = {"healthy": False}
|
|
267
|
+
|
|
268
|
+
return state
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def _format_system_state(system_state: dict) -> str:
|
|
272
|
+
"""Format system state dict into readable text for memory."""
|
|
273
|
+
parts = []
|
|
274
|
+
for name, info in system_state.items():
|
|
275
|
+
if isinstance(info, dict):
|
|
276
|
+
details = ", ".join(f"{k}={v}" for k, v in info.items())
|
|
277
|
+
parts.append(f"{name}: {details}")
|
|
278
|
+
return "; ".join(parts) if parts else "no data"
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def _in_quiet_hours(config: dict) -> bool:
|
|
282
|
+
"""Check if current time is within quiet hours."""
|
|
283
|
+
now = datetime.now()
|
|
284
|
+
current = now.hour * 60 + now.minute
|
|
285
|
+
|
|
286
|
+
start_parts = config.get("quiet_hours_start", "23:00").split(":")
|
|
287
|
+
end_parts = config.get("quiet_hours_end", "07:00").split(":")
|
|
288
|
+
start = int(start_parts[0]) * 60 + int(start_parts[1])
|
|
289
|
+
end = int(end_parts[0]) * 60 + int(end_parts[1])
|
|
290
|
+
|
|
291
|
+
if start > end: # spans midnight (e.g., 23:00 - 07:00)
|
|
292
|
+
return current >= start or current < end
|
|
293
|
+
return start <= current < end
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
async def _idle_loop():
|
|
297
|
+
"""Proactive idle heartbeat loop — Maki's inner life."""
|
|
298
|
+
global _thoughts_today, _thoughts_today_date
|
|
299
|
+
|
|
300
|
+
log.info("Idle loop started", extra={"check_interval": IDLE_CHECK_INTERVAL})
|
|
301
|
+
last_idle_turn = time.time()
|
|
302
|
+
|
|
303
|
+
while True:
|
|
304
|
+
await asyncio.sleep(IDLE_CHECK_INTERVAL)
|
|
305
|
+
|
|
306
|
+
try:
|
|
307
|
+
config = await load_kv_config(_config_kv, DEFAULT_CORTEX_CONFIG)
|
|
308
|
+
idle_interval = config.get("idle_interval", 7200)
|
|
309
|
+
|
|
310
|
+
if time.time() - last_idle_turn < idle_interval:
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
if time.time() - _last_activity < RECENTLY_ACTIVE_THRESHOLD:
|
|
314
|
+
continue
|
|
315
|
+
|
|
316
|
+
if _in_quiet_hours(config):
|
|
317
|
+
continue
|
|
318
|
+
|
|
319
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
320
|
+
if today != _thoughts_today_date:
|
|
321
|
+
_thoughts_today = 0
|
|
322
|
+
_thoughts_today_date = today
|
|
323
|
+
|
|
324
|
+
max_thoughts = config.get("max_thoughts_per_day", 5)
|
|
325
|
+
if _thoughts_today >= max_thoughts:
|
|
326
|
+
continue
|
|
327
|
+
|
|
328
|
+
log.info("Idle loop triggered — starting reflection")
|
|
329
|
+
last_idle_turn = time.time()
|
|
330
|
+
|
|
331
|
+
try:
|
|
332
|
+
entry = await _kv.get(KV_KEY)
|
|
333
|
+
identity = entry.value.decode()
|
|
334
|
+
except Exception:
|
|
335
|
+
identity = DEFAULT_IDENTITY
|
|
336
|
+
|
|
337
|
+
memories, graph_context = await _search_memories("recent activity and interests")
|
|
338
|
+
system_state = await _gather_system_state()
|
|
339
|
+
|
|
340
|
+
turn_id = f"idle-{uuid.uuid4().hex[:8]}"
|
|
341
|
+
idle_payload = {
|
|
342
|
+
"turn_id": turn_id,
|
|
343
|
+
"mode": "idle_reflection",
|
|
344
|
+
"identity": identity,
|
|
345
|
+
"conversation": [],
|
|
346
|
+
"memories": memories,
|
|
347
|
+
"graph_context": graph_context,
|
|
348
|
+
"prompt": None,
|
|
349
|
+
"mission_results": None,
|
|
350
|
+
"idle_context": {
|
|
351
|
+
"last_interaction": datetime.fromtimestamp(_last_activity, tz=UTC).isoformat(),
|
|
352
|
+
"hours_since_last_interaction": round((time.time() - _last_activity) / 3600, 1),
|
|
353
|
+
"time_context": {
|
|
354
|
+
"local_time": datetime.now().strftime("%H:%M"),
|
|
355
|
+
"day_of_week": datetime.now().strftime("%A"),
|
|
356
|
+
},
|
|
357
|
+
"current_config": config,
|
|
358
|
+
"system_state": system_state,
|
|
359
|
+
},
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
future = _pending.create(turn_id)
|
|
363
|
+
|
|
364
|
+
try:
|
|
365
|
+
await _nc.publish(CORTEX_TURN_REQUEST, json.dumps(idle_payload).encode())
|
|
366
|
+
log.info("Idle turn published", extra={"turn_id": turn_id})
|
|
367
|
+
|
|
368
|
+
response_data = await asyncio.wait_for(future, timeout=TURN_TIMEOUT)
|
|
369
|
+
thought = response_data.get("response", "")
|
|
370
|
+
|
|
371
|
+
config_updates = parse_config_tags(thought or "")
|
|
372
|
+
await apply_config_updates(_config_kv, config_updates, allowed_keys=set(DEFAULT_CORTEX_CONFIG.keys()))
|
|
373
|
+
|
|
374
|
+
clean_thought = strip_tags(thought or "")
|
|
375
|
+
if clean_thought == "[SILENT]":
|
|
376
|
+
clean_thought = ""
|
|
377
|
+
|
|
378
|
+
if clean_thought:
|
|
379
|
+
thought_payload = {"thought": clean_thought, "turn_id": turn_id}
|
|
380
|
+
await _nc.publish(EARS_THOUGHT_OUT, json.dumps(thought_payload).encode())
|
|
381
|
+
_thoughts_today += 1
|
|
382
|
+
log.info(
|
|
383
|
+
"Thought published",
|
|
384
|
+
extra={
|
|
385
|
+
"turn_id": turn_id,
|
|
386
|
+
"thoughts_today": _thoughts_today,
|
|
387
|
+
"max": max_thoughts,
|
|
388
|
+
},
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
state_summary = _format_system_state(system_state)
|
|
392
|
+
asyncio.create_task(
|
|
393
|
+
_feed_memories(
|
|
394
|
+
f"[Idle reflection] System state: {state_summary}",
|
|
395
|
+
clean_thought,
|
|
396
|
+
)
|
|
397
|
+
)
|
|
398
|
+
else:
|
|
399
|
+
log.info("Idle reflection produced no thought", extra={"turn_id": turn_id})
|
|
400
|
+
|
|
401
|
+
except TimeoutError:
|
|
402
|
+
log.error("Idle turn timed out", extra={"turn_id": turn_id})
|
|
403
|
+
except Exception:
|
|
404
|
+
log.exception("Idle turn failed", extra={"turn_id": turn_id})
|
|
405
|
+
finally:
|
|
406
|
+
_pending.remove(turn_id)
|
|
407
|
+
|
|
408
|
+
except Exception:
|
|
409
|
+
log.exception("Idle loop error")
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
@asynccontextmanager
|
|
413
|
+
async def lifespan(app: FastAPI):
|
|
414
|
+
global _nc, _js, _config_kv
|
|
415
|
+
log.info("maki-stem starting", extra={"nats_url": NATS_URL})
|
|
416
|
+
|
|
417
|
+
_nc = await connect_nats(NATS_URL)
|
|
418
|
+
_js = _nc.jetstream()
|
|
419
|
+
|
|
420
|
+
await _seed_identity()
|
|
421
|
+
await _init_conversation_stream()
|
|
422
|
+
_config_kv = await init_kv(_js, CONFIG_BUCKET, defaults=DEFAULT_CORTEX_CONFIG)
|
|
423
|
+
asyncio.create_task(_response_listener())
|
|
424
|
+
asyncio.create_task(_ears_listener())
|
|
425
|
+
asyncio.create_task(_idle_loop())
|
|
426
|
+
|
|
427
|
+
yield
|
|
428
|
+
|
|
429
|
+
await _nc.close()
|
|
430
|
+
log.info("NATS connection closed")
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
app = FastAPI(title="maki-stem", version="0.0.1", lifespan=lifespan)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
class TurnRequest(BaseModel):
|
|
437
|
+
message: str
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
class TurnResponse(BaseModel):
|
|
441
|
+
turn_id: str
|
|
442
|
+
response: str
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
async def _process_turn(message: str) -> tuple[str, str]:
|
|
446
|
+
"""Core turn logic. Returns (turn_id, response_text)."""
|
|
447
|
+
global _last_activity
|
|
448
|
+
_last_activity = time.time()
|
|
449
|
+
|
|
450
|
+
turn_id = f"turn-{uuid.uuid4().hex[:8]}"
|
|
451
|
+
log.info("Turn started", extra={"turn_id": turn_id, "message_len": len(message)})
|
|
452
|
+
|
|
453
|
+
try:
|
|
454
|
+
entry = await _kv.get(KV_KEY)
|
|
455
|
+
identity = entry.value.decode()
|
|
456
|
+
except Exception:
|
|
457
|
+
identity = DEFAULT_IDENTITY
|
|
458
|
+
|
|
459
|
+
memories, graph_context = await _search_memories(message)
|
|
460
|
+
system_state = await _gather_system_state()
|
|
461
|
+
|
|
462
|
+
turn_payload = {
|
|
463
|
+
"turn_id": turn_id,
|
|
464
|
+
"identity": identity,
|
|
465
|
+
"conversation": _get_recent_conversation(),
|
|
466
|
+
"memories": memories,
|
|
467
|
+
"graph_context": graph_context,
|
|
468
|
+
"system_state": system_state,
|
|
469
|
+
"prompt": message,
|
|
470
|
+
"mission_results": None,
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
future = _pending.create(turn_id)
|
|
474
|
+
|
|
475
|
+
try:
|
|
476
|
+
await _nc.publish(CORTEX_TURN_REQUEST, json.dumps(turn_payload).encode())
|
|
477
|
+
log.info("Turn request published", extra={"turn_id": turn_id})
|
|
478
|
+
|
|
479
|
+
response_data = await asyncio.wait_for(future, timeout=TURN_TIMEOUT)
|
|
480
|
+
response_text = response_data["response"]
|
|
481
|
+
log.info("Turn complete", extra={"turn_id": turn_id})
|
|
482
|
+
|
|
483
|
+
await _publish_turn_to_stream(
|
|
484
|
+
turn_id=turn_id,
|
|
485
|
+
user_message=message,
|
|
486
|
+
cortex_response=response_text,
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
asyncio.create_task(_feed_memories(message, response_text))
|
|
490
|
+
|
|
491
|
+
return turn_id, response_text
|
|
492
|
+
|
|
493
|
+
except TimeoutError:
|
|
494
|
+
log.error("Turn timed out", extra={"turn_id": turn_id})
|
|
495
|
+
raise
|
|
496
|
+
except Exception:
|
|
497
|
+
log.exception("Turn failed", extra={"turn_id": turn_id})
|
|
498
|
+
raise
|
|
499
|
+
finally:
|
|
500
|
+
_pending.remove(turn_id)
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
async def _ears_listener():
|
|
504
|
+
"""Listen for incoming Discord messages via NATS and process turns."""
|
|
505
|
+
sub = await _nc.subscribe(EARS_MESSAGE_IN)
|
|
506
|
+
log.info("Subscribed", extra={"subject": EARS_MESSAGE_IN})
|
|
507
|
+
async for msg in sub.messages:
|
|
508
|
+
try:
|
|
509
|
+
data = json.loads(msg.data.decode())
|
|
510
|
+
channel_id = data.get("channel_id", "")
|
|
511
|
+
message_id = data.get("message_id", "")
|
|
512
|
+
content = data.get("content", "")
|
|
513
|
+
username = data.get("username", "unknown")
|
|
514
|
+
|
|
515
|
+
log.info("Discord message", extra={"username": username, "content_len": len(content)})
|
|
516
|
+
|
|
517
|
+
turn_id, response_text = await _process_turn(content)
|
|
518
|
+
|
|
519
|
+
response = {
|
|
520
|
+
"message_id": message_id,
|
|
521
|
+
"channel_id": channel_id,
|
|
522
|
+
"turn_id": turn_id,
|
|
523
|
+
"response": response_text,
|
|
524
|
+
}
|
|
525
|
+
await _nc.publish(EARS_MESSAGE_OUT, json.dumps(response).encode())
|
|
526
|
+
log.info("Response published to ears", extra={"turn_id": turn_id})
|
|
527
|
+
|
|
528
|
+
except TimeoutError:
|
|
529
|
+
response = {
|
|
530
|
+
"message_id": data.get("message_id", ""),
|
|
531
|
+
"channel_id": data.get("channel_id", ""),
|
|
532
|
+
"response": "Sorry, I took too long thinking about that. Try again?",
|
|
533
|
+
}
|
|
534
|
+
await _nc.publish(EARS_MESSAGE_OUT, json.dumps(response).encode())
|
|
535
|
+
except Exception:
|
|
536
|
+
log.exception("Error processing Discord message")
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
@app.get("/health")
|
|
540
|
+
def health():
|
|
541
|
+
return {"status": "ok"}
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
@app.post("/turn")
|
|
545
|
+
async def turn(req: TurnRequest):
|
|
546
|
+
if not _nc or not _nc.is_connected:
|
|
547
|
+
raise HTTPException(status_code=503, detail="NATS not connected")
|
|
548
|
+
|
|
549
|
+
try:
|
|
550
|
+
turn_id, response_text = await _process_turn(req.message)
|
|
551
|
+
return TurnResponse(turn_id=turn_id, response=response_text)
|
|
552
|
+
except TimeoutError:
|
|
553
|
+
raise HTTPException(status_code=504, detail="Cortex did not respond in time")
|
|
554
|
+
except Exception:
|
|
555
|
+
raise HTTPException(status_code=500, detail="Internal error during turn processing")
|
|
556
|
+
|
|
557
|
+
|
|
558
|
+
def cli():
|
|
559
|
+
import uvicorn
|
|
560
|
+
|
|
561
|
+
uvicorn.run("maki_stem.main:app", host="0.0.0.0", port=8000)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
if __name__ == "__main__":
|
|
565
|
+
cli()
|