anima-engine 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anima_engine-0.0.1/.gitignore +51 -0
- anima_engine-0.0.1/PKG-INFO +46 -0
- anima_engine-0.0.1/README.md +30 -0
- anima_engine-0.0.1/pyproject.toml +33 -0
- anima_engine-0.0.1/src/anima/__init__.py +7 -0
- anima_engine-0.0.1/src/anima/anima.py +313 -0
- anima_engine-0.0.1/src/anima/config.py +131 -0
- anima_engine-0.0.1/src/anima/extractors/__init__.py +6 -0
- anima_engine-0.0.1/src/anima/extractors/fast.py +335 -0
- anima_engine-0.0.1/src/anima/extractors/local.py +118 -0
- anima_engine-0.0.1/src/anima/transforms/__init__.py +6 -0
- anima_engine-0.0.1/src/anima/transforms/blend.py +103 -0
- anima_engine-0.0.1/src/anima/transforms/cinematic.py +232 -0
- anima_engine-0.0.1/src/anima/utils/nrc_vad_lexicon.py +83 -0
- anima_engine-0.0.1/src/anima/utils/protocols.py +42 -0
- anima_engine-0.0.1/src/anima/utils/time_source.py +55 -0
- anima_engine-0.0.1/tests/smoke.py +56 -0
- anima_engine-0.0.1/uv.lock +2077 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# ----------------------
|
|
2
|
+
# Python
|
|
3
|
+
# ----------------------
|
|
4
|
+
__pycache__/
|
|
5
|
+
*.py[cod]
|
|
6
|
+
*$py.class
|
|
7
|
+
|
|
8
|
+
# Virtual environments
|
|
9
|
+
.venv/
|
|
10
|
+
venv/
|
|
11
|
+
env/
|
|
12
|
+
ENV/
|
|
13
|
+
|
|
14
|
+
# Distribution / packaging
|
|
15
|
+
build/
|
|
16
|
+
dist/
|
|
17
|
+
*.egg-info/
|
|
18
|
+
.eggs/
|
|
19
|
+
|
|
20
|
+
# Installer logs
|
|
21
|
+
pip-log.txt
|
|
22
|
+
pip-delete-this-directory.txt
|
|
23
|
+
|
|
24
|
+
# Test / coverage
|
|
25
|
+
.coverage
|
|
26
|
+
.coverage.*
|
|
27
|
+
.pytest_cache/
|
|
28
|
+
htmlcov/
|
|
29
|
+
|
|
30
|
+
# Type checkers
|
|
31
|
+
.mypy_cache/
|
|
32
|
+
.pyright/
|
|
33
|
+
.ruff_cache/
|
|
34
|
+
|
|
35
|
+
# IDEs
|
|
36
|
+
.vscode/
|
|
37
|
+
.idea/
|
|
38
|
+
|
|
39
|
+
# OS
|
|
40
|
+
.DS_Store
|
|
41
|
+
Thumbs.db
|
|
42
|
+
|
|
43
|
+
# Local data (usually not tracked)
|
|
44
|
+
data/
|
|
45
|
+
|
|
46
|
+
# Environment variables
|
|
47
|
+
.env
|
|
48
|
+
.env.*
|
|
49
|
+
|
|
50
|
+
# uv lockfile (uncomment if you DON'T want it tracked)
|
|
51
|
+
# uv.lock
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: anima-engine
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Real-time continuous affect engine
|
|
5
|
+
Requires-Python: >=3.12
|
|
6
|
+
Requires-Dist: datasets>=2.0.0
|
|
7
|
+
Requires-Dist: huggingface-hub>=1.4.0
|
|
8
|
+
Requires-Dist: nltk>=3.8.1
|
|
9
|
+
Requires-Dist: numpy>=2.2.0
|
|
10
|
+
Requires-Dist: sentence-transformers>=5.2.0
|
|
11
|
+
Requires-Dist: textstat>=0.7.3
|
|
12
|
+
Requires-Dist: transformers>=5.1.0
|
|
13
|
+
Provides-Extra: anchor
|
|
14
|
+
Requires-Dist: ollama>=0.1.7; extra == 'anchor'
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
# Anima
|
|
18
|
+
|
|
19
|
+
```
|
|
20
|
+
anima /ˈæn.ɪ.mə/
|
|
21
|
+
|
|
22
|
+
the soul, especially the irrational part of the soul as distinguished from the rational mind.
|
|
23
|
+
```
|
|
24
|
+
Anima extracts a living emotional state from raw text, and outputs a vector that can be broadcast to any downstream consumer.
|
|
25
|
+
|
|
26
|
+
One example use is generating in-flight modifications to body language and changes in eye-color with the related [Reachy Mini Conversational App behavioral adapter](https://github.com/brainwavecollective/anima-reachy-conversation) for the [Reachy Mini](https://github.com/pollen-robotics/reachy_mini) robot. To get a better sense of how the affect extraction engine works you can check out this [writeup explaining the approach](https://github.com/brainwavecollective/affection/blob/main/EMOTION_ENGINE.md).
|
|
27
|
+
|
|
28
|
+
You can get a quick sense for how the engine converts text to an emotional vector by running:
|
|
29
|
+
`uv run tests/smoke.py`
|
|
30
|
+
|
|
31
|
+
## Installation
|
|
32
|
+
|
|
33
|
+
`pip install anima-engine@git+https://github.com/brainwavecollective/anima-engine.git`
|
|
34
|
+
|
|
35
|
+
## Acknowledgements
|
|
36
|
+
|
|
37
|
+
This project uses the NRC Valence, Arousal, and Dominance (VAD) Lexicon (v2.1) created by Saif M. Mohammad at the National Research Council Canada. Homepage: http://saifmohammad.com/WebPages/nrc-vad.html
|
|
38
|
+
|
|
39
|
+
If you use this project in academic work, please cite:
|
|
40
|
+
Mohammad, Saif M. (2025). NRC VAD Lexicon v2: Norms for Valence, Arousal, and Dominance for over 55k English Terms. arXiv:2503.23547.
|
|
41
|
+
|
|
42
|
+
## About the Author
|
|
43
|
+
|
|
44
|
+
Daniel Ritchie is an independent technologist and founder of the Brain Wave Collective.
|
|
45
|
+
[LinkedIn](https://linkedin.com/in/danielritchie123)
|
|
46
|
+
[Email](mailto:daniel@brainwavecollective.ai)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# Anima
|
|
2
|
+
|
|
3
|
+
```
|
|
4
|
+
anima /ˈæn.ɪ.mə/
|
|
5
|
+
|
|
6
|
+
the soul, especially the irrational part of the soul as distinguished from the rational mind.
|
|
7
|
+
```
|
|
8
|
+
Anima extracts a living emotional state from raw text, and outputs a vector that can be broadcast to any downstream consumer.
|
|
9
|
+
|
|
10
|
+
One example use is generating in-flight modifications to body language and changes in eye-color with the related [Reachy Mini Conversational App behavioral adapter](https://github.com/brainwavecollective/anima-reachy-conversation) for the [Reachy Mini](https://github.com/pollen-robotics/reachy_mini) robot. To get a better sense of how the affect extraction engine works you can check out this [writeup explaining the approach](https://github.com/brainwavecollective/affection/blob/main/EMOTION_ENGINE.md).
|
|
11
|
+
|
|
12
|
+
You can get a quick sense for how the engine converts text to an emotional vector by running:
|
|
13
|
+
`uv run tests/smoke.py`
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
`pip install anima-engine@git+https://github.com/brainwavecollective/anima-engine.git`
|
|
18
|
+
|
|
19
|
+
## Acknowledgements
|
|
20
|
+
|
|
21
|
+
This project uses the NRC Valence, Arousal, and Dominance (VAD) Lexicon (v2.1) created by Saif M. Mohammad at the National Research Council Canada. Homepage: http://saifmohammad.com/WebPages/nrc-vad.html
|
|
22
|
+
|
|
23
|
+
If you use this project in academic work, please cite:
|
|
24
|
+
Mohammad, Saif M. (2025). NRC VAD Lexicon v2: Norms for Valence, Arousal, and Dominance for over 55k English Terms. arXiv:2503.23547.
|
|
25
|
+
|
|
26
|
+
## About the Author
|
|
27
|
+
|
|
28
|
+
Daniel Ritchie is an independent technologist and founder of the Brain Wave Collective.
|
|
29
|
+
[LinkedIn](https://linkedin.com/in/danielritchie123)
|
|
30
|
+
[Email](mailto:daniel@brainwavecollective.ai)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "anima-engine"
|
|
3
|
+
version = "0.0.1"
|
|
4
|
+
description = "Real-time continuous affect engine"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.12"
|
|
7
|
+
|
|
8
|
+
dependencies = [
|
|
9
|
+
"numpy>=2.2.0",
|
|
10
|
+
"sentence-transformers>=5.2.0",
|
|
11
|
+
"transformers>=5.1.0",
|
|
12
|
+
"huggingface-hub>=1.4.0",
|
|
13
|
+
"textstat>=0.7.3",
|
|
14
|
+
"nltk>=3.8.1",
|
|
15
|
+
"datasets>=2.0.0"
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
[project.optional-dependencies]
|
|
19
|
+
anchor = [
|
|
20
|
+
"ollama>=0.1.7"
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[build-system]
|
|
24
|
+
requires = ["hatchling"]
|
|
25
|
+
build-backend = "hatchling.build"
|
|
26
|
+
|
|
27
|
+
[tool.hatch.build.targets.wheel]
|
|
28
|
+
packages = ["src/anima"]
|
|
29
|
+
|
|
30
|
+
[dependency-groups]
|
|
31
|
+
dev = [
|
|
32
|
+
"build>=1.4.0",
|
|
33
|
+
]
|
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import inspect
|
|
3
|
+
import logging
|
|
4
|
+
import time
|
|
5
|
+
from collections import deque
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
7
|
+
from typing import Callable, List, Optional
|
|
8
|
+
|
|
9
|
+
import nltk
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
from .transforms.cinematic import CinematicAmplifier
|
|
13
|
+
from .extractors.local import OLLAMA_AVAILABLE, LocalExtractor
|
|
14
|
+
from .transforms.blend import Blend
|
|
15
|
+
from .config import Config
|
|
16
|
+
from .extractors.fast import FastExtractor
|
|
17
|
+
from .utils.time_source import TimeSource
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Anima:
|
|
21
|
+
"""
|
|
22
|
+
Real-time affect generation engine.
|
|
23
|
+
|
|
24
|
+
Produces continuous VADCC values from text input.
|
|
25
|
+
Owns its own loop and broadcasts state at fixed tick rate.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, config: Config):
|
|
29
|
+
self.config = config
|
|
30
|
+
self.config.validate()
|
|
31
|
+
|
|
32
|
+
self.logger = logging.getLogger("anima")
|
|
33
|
+
self.logger.setLevel(logging.DEBUG if self.config.debug else logging.INFO)
|
|
34
|
+
|
|
35
|
+
# Components (loaded in start())
|
|
36
|
+
self.extractor: Optional[FastExtractor] = None
|
|
37
|
+
self.amplifier: Optional[CinematicAmplifier] = None
|
|
38
|
+
self.anchor: Optional[LocalExtractor] = None
|
|
39
|
+
self.blend: Optional[Blend] = None
|
|
40
|
+
|
|
41
|
+
# Create time_source
|
|
42
|
+
self.clock = TimeSource()
|
|
43
|
+
|
|
44
|
+
# Runtime
|
|
45
|
+
self._running = False
|
|
46
|
+
self._loop_task: Optional[asyncio.Task] = None
|
|
47
|
+
self._anchor_executor: Optional[ThreadPoolExecutor] = None
|
|
48
|
+
|
|
49
|
+
self._subscribers: List[Callable[[List[float]], None]] = []
|
|
50
|
+
|
|
51
|
+
self._context_buffer = deque(maxlen=self.config.context_buffer_words)
|
|
52
|
+
self._transcript_count = 0
|
|
53
|
+
|
|
54
|
+
# --- Baseline coordination (race-free) ---
|
|
55
|
+
self._baseline_event = asyncio.Event()
|
|
56
|
+
self._baseline_task: Optional[asyncio.Task] = None
|
|
57
|
+
self._baseline_lock = asyncio.Lock()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# ---------------------------------------------------------
|
|
61
|
+
# Lifecycle
|
|
62
|
+
# ---------------------------------------------------------
|
|
63
|
+
|
|
64
|
+
async def start(self) -> None:
|
|
65
|
+
self.logger.info("Starting Anima...")
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
nltk.data.find("tokenizers/punkt")
|
|
69
|
+
except LookupError:
|
|
70
|
+
nltk.download("punkt", quiet=True)
|
|
71
|
+
|
|
72
|
+
self.extractor = FastExtractor(
|
|
73
|
+
lexicon_path=self.config.nrc_lexicon_path,
|
|
74
|
+
model_name=self.config.sentence_model_name,
|
|
75
|
+
debug=self.config.debug,
|
|
76
|
+
)
|
|
77
|
+
self.amplifier = CinematicAmplifier()
|
|
78
|
+
self.blend = Blend(self.config, self.clock)
|
|
79
|
+
|
|
80
|
+
# ---- FULL PIPELINE WARMUP (direct, no executor) ----
|
|
81
|
+
self._extract_sentence_states_sync(
|
|
82
|
+
"Warmup sentence to initialize extractor and embedder."
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Anchor (optional)
|
|
86
|
+
if self.config.enable_anchor:
|
|
87
|
+
if not OLLAMA_AVAILABLE:
|
|
88
|
+
raise RuntimeError(
|
|
89
|
+
"Anchor enabled but ollama is not installed. "
|
|
90
|
+
"Install affect-engine[anchor]"
|
|
91
|
+
)
|
|
92
|
+
self.logger.info("Anchor enabled")
|
|
93
|
+
|
|
94
|
+
self.anchor = LocalExtractor(model=self.config.anchor_model_name)
|
|
95
|
+
self._anchor_executor = ThreadPoolExecutor(
|
|
96
|
+
max_workers=1,
|
|
97
|
+
thread_name_prefix="affect-anchor",
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
self.logger.info("Anchor disabled")
|
|
101
|
+
self.anchor = None
|
|
102
|
+
self._anchor_executor = None
|
|
103
|
+
|
|
104
|
+
if self.config.warm_baseline_on_start:
|
|
105
|
+
await self._warm_baseline()
|
|
106
|
+
|
|
107
|
+
self._running = True
|
|
108
|
+
self._loop_task = asyncio.create_task(self._loop())
|
|
109
|
+
|
|
110
|
+
self.logger.info("Anima started")
|
|
111
|
+
|
|
112
|
+
async def stop(self) -> None:
|
|
113
|
+
self.logger.info("Stopping Anima...")
|
|
114
|
+
|
|
115
|
+
self._running = False
|
|
116
|
+
|
|
117
|
+
if self._loop_task:
|
|
118
|
+
self._loop_task.cancel()
|
|
119
|
+
try:
|
|
120
|
+
await self._loop_task
|
|
121
|
+
except asyncio.CancelledError:
|
|
122
|
+
pass
|
|
123
|
+
|
|
124
|
+
if self._baseline_task and not self._baseline_task.done():
|
|
125
|
+
self._baseline_task.cancel()
|
|
126
|
+
try:
|
|
127
|
+
await self._baseline_task
|
|
128
|
+
except asyncio.CancelledError:
|
|
129
|
+
pass
|
|
130
|
+
|
|
131
|
+
if self._anchor_executor:
|
|
132
|
+
self._anchor_executor.shutdown(wait=False)
|
|
133
|
+
|
|
134
|
+
self.logger.info("Anima stopped")
|
|
135
|
+
|
|
136
|
+
def subscribe(self, callback: Callable[[List[float]], None]) -> None:
|
|
137
|
+
self._subscribers.append(callback)
|
|
138
|
+
|
|
139
|
+
# ---------------------------------------------------------
|
|
140
|
+
# Public API
|
|
141
|
+
# ---------------------------------------------------------
|
|
142
|
+
|
|
143
|
+
async def process_text(
|
|
144
|
+
self,
|
|
145
|
+
text: str,
|
|
146
|
+
influence: Optional[float] = None,
|
|
147
|
+
) -> dict:
|
|
148
|
+
"""
|
|
149
|
+
Process text and apply a single burst driven by the most
|
|
150
|
+
emotionally dominant sentence.
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
if not self.blend or not self.extractor or not self.amplifier:
|
|
154
|
+
raise RuntimeError("Anima not started. Call `await engine.start()` first.")
|
|
155
|
+
|
|
156
|
+
t0 = time.perf_counter()
|
|
157
|
+
|
|
158
|
+
text = (text or "").strip()
|
|
159
|
+
if not text:
|
|
160
|
+
return {"anima": self.blend.current.tolist()}
|
|
161
|
+
|
|
162
|
+
self._transcript_count += 1
|
|
163
|
+
self._context_buffer.extend(text.split())
|
|
164
|
+
|
|
165
|
+
sentence_states, sentences = self._extract_sentence_states_sync(text)
|
|
166
|
+
|
|
167
|
+
if not sentence_states:
|
|
168
|
+
return {
|
|
169
|
+
"anima": self.blend.current.tolist(),
|
|
170
|
+
"sentences_processed": 0,
|
|
171
|
+
"transcript_count": self._transcript_count,
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
def _dominance_score(vibe: np.ndarray) -> float:
|
|
175
|
+
v, a, d = float(vibe[0]), float(vibe[1]), float(vibe[2])
|
|
176
|
+
magnitude = abs(v - 0.5) * 2.0 + abs(a - 0.5) + abs(d - 0.5)
|
|
177
|
+
negative_bias = max(0.0, 0.5 - v) * 3.5
|
|
178
|
+
return magnitude + negative_bias
|
|
179
|
+
|
|
180
|
+
dominant = max(sentence_states, key=_dominance_score)
|
|
181
|
+
mean_vibe = np.mean(sentence_states, axis=0)
|
|
182
|
+
final_burst = np.clip(0.85 * dominant + 0.15 * mean_vibe, 0.0, 1.0)
|
|
183
|
+
|
|
184
|
+
self.blend.apply_burst(final_burst, influence=influence)
|
|
185
|
+
|
|
186
|
+
if self.anchor:
|
|
187
|
+
self._baseline_event.set()
|
|
188
|
+
|
|
189
|
+
if self.logger.isEnabledFor(logging.DEBUG):
|
|
190
|
+
self.logger.debug("ENGINE total dt=%.3fs", time.perf_counter() - t0)
|
|
191
|
+
|
|
192
|
+
return {
|
|
193
|
+
"anima": self.blend.current.tolist(),
|
|
194
|
+
"sentences_processed": len(sentences),
|
|
195
|
+
"transcript_count": self._transcript_count,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
# ---------------------------------------------------------
|
|
199
|
+
# Sync extraction (runs directly now)
|
|
200
|
+
# ---------------------------------------------------------
|
|
201
|
+
|
|
202
|
+
def _extract_sentence_states_sync(self, text: str):
|
|
203
|
+
sentences = nltk.sent_tokenize(text)
|
|
204
|
+
sentence_states: List[np.ndarray] = []
|
|
205
|
+
|
|
206
|
+
for sentence in sentences:
|
|
207
|
+
natural = np.array(self.extractor.extract(sentence))
|
|
208
|
+
post_passion = self.amplifier.amplify_passion(natural, self.config.passion)
|
|
209
|
+
post_drama = self.amplifier.snap_drama(post_passion, self.config.drama)
|
|
210
|
+
|
|
211
|
+
sentence_states.append(post_drama)
|
|
212
|
+
|
|
213
|
+
if self.config.debug:
|
|
214
|
+
self._log_telemetry(sentence, natural, post_passion, post_drama)
|
|
215
|
+
|
|
216
|
+
return sentence_states, sentences
|
|
217
|
+
|
|
218
|
+
# ---------------------------------------------------------
|
|
219
|
+
# Internal loop
|
|
220
|
+
# ---------------------------------------------------------
|
|
221
|
+
|
|
222
|
+
async def _loop(self) -> None:
|
|
223
|
+
interval = 1.0 / self.config.tick_rate_hz
|
|
224
|
+
|
|
225
|
+
while self._running:
|
|
226
|
+
self.clock.tick()
|
|
227
|
+
vibe = self.blend.tick()
|
|
228
|
+
|
|
229
|
+
for sub in self._subscribers:
|
|
230
|
+
try:
|
|
231
|
+
result = sub(vibe)
|
|
232
|
+
if inspect.isawaitable(result):
|
|
233
|
+
asyncio.create_task(result)
|
|
234
|
+
except Exception:
|
|
235
|
+
self.logger.exception("Subscriber error")
|
|
236
|
+
|
|
237
|
+
if self.anchor and self._baseline_event.is_set() and self._has_settled():
|
|
238
|
+
self._ensure_baseline_task()
|
|
239
|
+
|
|
240
|
+
await asyncio.sleep(interval)
|
|
241
|
+
|
|
242
|
+
# ---------------------------------------------------------
|
|
243
|
+
# Baseline (race-free, coalesced)
|
|
244
|
+
# ---------------------------------------------------------
|
|
245
|
+
|
|
246
|
+
def _ensure_baseline_task(self) -> None:
|
|
247
|
+
if self._baseline_task and not self._baseline_task.done():
|
|
248
|
+
return
|
|
249
|
+
self._baseline_task = asyncio.create_task(self._baseline_worker())
|
|
250
|
+
|
|
251
|
+
async def _baseline_worker(self) -> None:
|
|
252
|
+
async with self._baseline_lock:
|
|
253
|
+
if not self._baseline_event.is_set():
|
|
254
|
+
return
|
|
255
|
+
|
|
256
|
+
self._baseline_event.clear()
|
|
257
|
+
|
|
258
|
+
context_text = " ".join(
|
|
259
|
+
list(self._context_buffer)[-self.config.baseline_window_words :]
|
|
260
|
+
)
|
|
261
|
+
if not context_text.strip():
|
|
262
|
+
return
|
|
263
|
+
|
|
264
|
+
if not self.anchor or not self._anchor_executor:
|
|
265
|
+
return
|
|
266
|
+
|
|
267
|
+
loop = asyncio.get_running_loop()
|
|
268
|
+
baseline = await loop.run_in_executor(
|
|
269
|
+
self._anchor_executor,
|
|
270
|
+
self.anchor.extract_baseline,
|
|
271
|
+
context_text,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
if baseline:
|
|
275
|
+
self.blend.apply_baseline(baseline)
|
|
276
|
+
self.logger.info("New baseline applied: %s", baseline)
|
|
277
|
+
|
|
278
|
+
async def _warm_baseline(self) -> None:
|
|
279
|
+
if not self.anchor:
|
|
280
|
+
return
|
|
281
|
+
|
|
282
|
+
self._baseline_event.set()
|
|
283
|
+
self._ensure_baseline_task()
|
|
284
|
+
|
|
285
|
+
if self._baseline_task:
|
|
286
|
+
await self._baseline_task
|
|
287
|
+
|
|
288
|
+
def _has_settled(self, threshold: float = 0.02) -> bool:
|
|
289
|
+
diff = np.abs(self.blend.current - self.blend.baseline)
|
|
290
|
+
return np.all(diff < threshold)
|
|
291
|
+
|
|
292
|
+
# ---------------------------------------------------------
|
|
293
|
+
# Debug telemetry
|
|
294
|
+
# ---------------------------------------------------------
|
|
295
|
+
|
|
296
|
+
def _log_telemetry(
|
|
297
|
+
self,
|
|
298
|
+
sentence: str,
|
|
299
|
+
natural: np.ndarray,
|
|
300
|
+
post_passion: np.ndarray,
|
|
301
|
+
post_drama: np.ndarray,
|
|
302
|
+
) -> None:
|
|
303
|
+
if not self.logger.isEnabledFor(logging.DEBUG):
|
|
304
|
+
return
|
|
305
|
+
|
|
306
|
+
self.logger.debug("--------------------------------------------------")
|
|
307
|
+
self.logger.debug("TEXT: %s", sentence)
|
|
308
|
+
self.logger.debug("NATURAL: %s", natural.tolist())
|
|
309
|
+
self.logger.debug("PASSION: %s", post_passion.tolist())
|
|
310
|
+
self.logger.debug("DRAMA: %s", post_drama.tolist())
|
|
311
|
+
self.logger.debug("BASELINE: %s", self.blend.baseline.tolist())
|
|
312
|
+
self.logger.debug("--------------------------------------------------")
|
|
313
|
+
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import logging.config
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from .utils.nrc_vad_lexicon import ensure_nrc_lexicon
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# ---------------------------------------------------------------------------
|
|
9
|
+
# Third-party loggers: name → the minimum verbosity level required to unlock.
|
|
10
|
+
# Ordered from least to most noisy — verbosity is cumulative (>= threshold).
|
|
11
|
+
# ---------------------------------------------------------------------------
|
|
12
|
+
_THIRD_PARTY_VERBOSITY: dict[str, int] = {
|
|
13
|
+
"httpx": 1, # -v: clean "HTTP Request: ..." summary lines
|
|
14
|
+
"fsspec": 2, # -vv: local file open/read events
|
|
15
|
+
"filelock": 2, # -vv: lock acquire/release events
|
|
16
|
+
"httpcore": 3, # -vvv: full HTTP lifecycle (very noisy)
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _apply_logging(debug: bool, verbosity: int) -> None:
|
|
21
|
+
"""
|
|
22
|
+
Configure logging for Anima.
|
|
23
|
+
|
|
24
|
+
debug=True → your code logs at DEBUG; third-party stays quiet by default.
|
|
25
|
+
verbosity → independently unlocks noisy third-party loggers:
|
|
26
|
+
0 all third-party quiet (default)
|
|
27
|
+
1 httpx INFO summaries
|
|
28
|
+
2 + fsspec, filelock DEBUG
|
|
29
|
+
3 + httpcore full HTTP lifecycle
|
|
30
|
+
"""
|
|
31
|
+
logging.config.dictConfig({
|
|
32
|
+
"version": 1,
|
|
33
|
+
"disable_existing_loggers": False,
|
|
34
|
+
"formatters": {
|
|
35
|
+
"standard": {
|
|
36
|
+
"format": "%(asctime)s %(levelname)-8s %(name)s | %(message)s",
|
|
37
|
+
"datefmt": "%Y-%m-%d %H:%M:%S",
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
"handlers": {
|
|
41
|
+
"console": {
|
|
42
|
+
"class": "logging.StreamHandler",
|
|
43
|
+
"formatter": "standard",
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
"root": {
|
|
47
|
+
"handlers": ["console"],
|
|
48
|
+
"level": logging.DEBUG if debug else logging.INFO,
|
|
49
|
+
},
|
|
50
|
+
})
|
|
51
|
+
|
|
52
|
+
# Third-party loggers stay quiet regardless of debug,
|
|
53
|
+
# unlocking only when verbosity meets their threshold.
|
|
54
|
+
for name, unlocks_at in _THIRD_PARTY_VERBOSITY.items():
|
|
55
|
+
level = logging.DEBUG if verbosity >= unlocks_at else logging.WARNING
|
|
56
|
+
logging.getLogger(name).setLevel(level)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class Config:
|
|
61
|
+
"""
|
|
62
|
+
Configuration for Anima.
|
|
63
|
+
All tuning parameters live here.
|
|
64
|
+
Mutate before calling engine.start().
|
|
65
|
+
"""
|
|
66
|
+
# ---------------------------------------------------------
|
|
67
|
+
# Core Emotional Tuning
|
|
68
|
+
# ---------------------------------------------------------
|
|
69
|
+
passion: float = 2.25
|
|
70
|
+
drama: float = 0.65
|
|
71
|
+
default_influence: float = 0.22
|
|
72
|
+
baseline_influence: float = 0.70
|
|
73
|
+
dwell_seconds: float = 1.2
|
|
74
|
+
hold_seconds: float = 0.8
|
|
75
|
+
decay_rate: float = 0.06
|
|
76
|
+
tick_rate_hz: float = 10.0
|
|
77
|
+
# ---------------------------------------------------------
|
|
78
|
+
# Bounds
|
|
79
|
+
# ---------------------------------------------------------
|
|
80
|
+
min_vadcc: float = 0.0
|
|
81
|
+
max_vadcc: float = 1.0
|
|
82
|
+
# ---------------------------------------------------------
|
|
83
|
+
# NLP Models
|
|
84
|
+
# ---------------------------------------------------------
|
|
85
|
+
nrc_lexicon_path: str = "data/NRC-VAD-Lexicon-v2.1/NRC-VAD-Lexicon-v2.1.txt"
|
|
86
|
+
sentence_model_name: str = "all-MiniLM-L6-v2"
|
|
87
|
+
# ---------------------------------------------------------
|
|
88
|
+
# Anchor (Slow Baseline)
|
|
89
|
+
# ---------------------------------------------------------
|
|
90
|
+
enable_anchor: bool = False
|
|
91
|
+
anchor_model_name: str = "nemotron-mini:4b-instruct-q5_K_M"
|
|
92
|
+
warm_baseline_on_start: bool = True
|
|
93
|
+
baseline_window_words: int = 100
|
|
94
|
+
# ---------------------------------------------------------
|
|
95
|
+
# Context Tracking
|
|
96
|
+
# ---------------------------------------------------------
|
|
97
|
+
context_buffer_words: int = 200
|
|
98
|
+
# ---------------------------------------------------------
|
|
99
|
+
# Logging
|
|
100
|
+
# ---------------------------------------------------------
|
|
101
|
+
debug: bool = False
|
|
102
|
+
log_verbosity: int = 0
|
|
103
|
+
# Controls third-party logger noise, independent of debug.
|
|
104
|
+
# Third-party loggers stay quiet even when debug=True unless verbosity is raised.
|
|
105
|
+
# 0 quiet across the board (default)
|
|
106
|
+
# 1 httpx request summaries (-v)
|
|
107
|
+
# 2 + fsspec, filelock (-vv)
|
|
108
|
+
# 3 + httpcore full lifecycle (-vvv, very noisy)
|
|
109
|
+
# ---------------------------------------------------------
|
|
110
|
+
# Validation
|
|
111
|
+
# ---------------------------------------------------------
|
|
112
|
+
def validate(self) -> None:
|
|
113
|
+
if not (0.0 <= self.passion <= 5.0):
|
|
114
|
+
raise ValueError("passion must be between 0 and 5")
|
|
115
|
+
if not (0.0 <= self.drama <= 1.0):
|
|
116
|
+
raise ValueError("drama must be between 0 and 1")
|
|
117
|
+
if self.tick_rate_hz <= 0:
|
|
118
|
+
raise ValueError("tick_rate_hz must be positive")
|
|
119
|
+
if self.min_vadcc >= self.max_vadcc:
|
|
120
|
+
raise ValueError("min_vadcc must be < max_vadcc")
|
|
121
|
+
if not (0 <= self.log_verbosity <= 3):
|
|
122
|
+
raise ValueError("log_verbosity must be between 0 and 3")
|
|
123
|
+
ensure_nrc_lexicon(self.nrc_lexicon_path)
|
|
124
|
+
if not Path(self.nrc_lexicon_path).exists():
|
|
125
|
+
raise FileNotFoundError(
|
|
126
|
+
f"NRC lexicon not found at {self.nrc_lexicon_path}"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
def apply_logging(self) -> None:
|
|
130
|
+
"""Configure logging based on this config. Call once at startup."""
|
|
131
|
+
_apply_logging(self.debug, self.log_verbosity)
|