agethos 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. agethos-0.1.0/.env.example +24 -0
  2. agethos-0.1.0/.github/workflows/publish.yml +29 -0
  3. agethos-0.1.0/.gitignore +10 -0
  4. agethos-0.1.0/PKG-INFO +561 -0
  5. agethos-0.1.0/README.md +538 -0
  6. agethos-0.1.0/agethos/__init__.py +33 -0
  7. agethos-0.1.0/agethos/brain.py +327 -0
  8. agethos-0.1.0/agethos/cognition/__init__.py +6 -0
  9. agethos-0.1.0/agethos/cognition/perceive.py +53 -0
  10. agethos-0.1.0/agethos/cognition/plan.py +171 -0
  11. agethos-0.1.0/agethos/cognition/reflect.py +142 -0
  12. agethos-0.1.0/agethos/cognition/retrieve.py +42 -0
  13. agethos-0.1.0/agethos/embedding/__init__.py +3 -0
  14. agethos-0.1.0/agethos/embedding/base.py +20 -0
  15. agethos-0.1.0/agethos/embedding/openai.py +35 -0
  16. agethos-0.1.0/agethos/llm/__init__.py +3 -0
  17. agethos-0.1.0/agethos/llm/anthropic.py +50 -0
  18. agethos-0.1.0/agethos/llm/base.py +62 -0
  19. agethos-0.1.0/agethos/llm/openai.py +82 -0
  20. agethos-0.1.0/agethos/memory/__init__.py +4 -0
  21. agethos-0.1.0/agethos/memory/retrieval.py +103 -0
  22. agethos-0.1.0/agethos/memory/store.py +46 -0
  23. agethos-0.1.0/agethos/memory/stream.py +91 -0
  24. agethos-0.1.0/agethos/models.py +675 -0
  25. agethos-0.1.0/agethos/persona/__init__.py +4 -0
  26. agethos-0.1.0/agethos/persona/renderer.py +109 -0
  27. agethos-0.1.0/agethos/persona/spec.py +5 -0
  28. agethos-0.1.0/agethos/storage/__init__.py +3 -0
  29. agethos-0.1.0/agethos/storage/memory_store.py +40 -0
  30. agethos-0.1.0/examples/demo_persona.py +163 -0
  31. agethos-0.1.0/examples/personas/minsoo.yaml +30 -0
  32. agethos-0.1.0/examples/personas/yuna.yaml +26 -0
  33. agethos-0.1.0/pyproject.toml +25 -0
  34. agethos-0.1.0/tests/test_persona.py +503 -0
@@ -0,0 +1,24 @@
1
+ # ============================================
2
+ # Agethos — Environment Variables
3
+ # ============================================
4
+ # Copy this file to .env and fill in your keys:
5
+ # cp .env.example .env
6
+ #
7
+ # The library reads these automatically via the OpenAI/Anthropic SDKs.
8
+
9
+ # ── OpenAI ──
10
+ OPENAI_API_KEY=sk-your-key-here
11
+
12
+ # ── Anthropic ──
13
+ ANTHROPIC_API_KEY=sk-ant-your-key-here
14
+
15
+ # ── OpenAI-compatible providers (Qwen, Ollama, vLLM, Together, etc.) ──
16
+ # Set these when using Brain.build(llm="openai", base_url="...") or
17
+ # pass base_url directly to OpenAIAdapter.
18
+ #
19
+ # Examples:
20
+ # Ollama (local): http://localhost:11434/v1
21
+ # vLLM: http://localhost:8000/v1
22
+ # Qwen (DashScope): https://dashscope.aliyuncs.com/compatible-mode/v1
23
+ # Together AI: https://api.together.xyz/v1
24
+ # LM Studio: http://localhost:1234/v1
@@ -0,0 +1,29 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+ workflow_dispatch:
7
+
8
+ permissions:
9
+ id-token: write
10
+
11
+ jobs:
12
+ publish:
13
+ runs-on: ubuntu-latest
14
+ environment: pypi
15
+ steps:
16
+ - uses: actions/checkout@v4
17
+
18
+ - uses: actions/setup-python@v5
19
+ with:
20
+ python-version: "3.11"
21
+
22
+ - name: Install build tools
23
+ run: pip install build
24
+
25
+ - name: Build package
26
+ run: python -m build
27
+
28
+ - name: Publish to PyPI
29
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,10 @@
1
+ .claude/
2
+ .venv/
3
+ .env
4
+ blueprint.md
5
+ __pycache__/
6
+ *.pyc
7
+ *.egg-info/
8
+ dist/
9
+ build/
10
+ .eggs/
agethos-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,561 @@
1
+ Metadata-Version: 2.4
2
+ Name: agethos
3
+ Version: 0.1.0
4
+ Summary: A brain for AI agents — persona, memory, reflection, and planning
5
+ License-Expression: MIT
6
+ Requires-Python: >=3.11
7
+ Requires-Dist: pydantic>=2.0
8
+ Provides-Extra: all
9
+ Requires-Dist: anthropic>=0.30; extra == 'all'
10
+ Requires-Dist: openai>=1.0; extra == 'all'
11
+ Requires-Dist: pyyaml>=6.0; extra == 'all'
12
+ Provides-Extra: anthropic
13
+ Requires-Dist: anthropic>=0.30; extra == 'anthropic'
14
+ Provides-Extra: dev
15
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
16
+ Requires-Dist: pytest>=8.0; extra == 'dev'
17
+ Requires-Dist: pyyaml>=6.0; extra == 'dev'
18
+ Provides-Extra: openai
19
+ Requires-Dist: openai>=1.0; extra == 'openai'
20
+ Provides-Extra: yaml
21
+ Requires-Dist: pyyaml>=6.0; extra == 'yaml'
22
+ Description-Content-Type: text/markdown
23
+
24
+ <h1 align="center">Agethos</h1>
25
+
26
+ <p align="center">A brain for AI agents — persona, memory, reflection, and planning in one library.</p>
27
+
28
+ <p align="center">Give any LLM agent a persistent identity with psychological grounding, long-term memory with retrieval scoring, dynamic emotional state, self-reflection, and daily planning.</p>
29
+
30
+ <p align="center">
31
+ <a href="https://www.python.org/"><img src="https://img.shields.io/badge/python-3.11+-blue.svg" alt="Python"></a>
32
+ <a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="License"></a>
33
+ </p>
34
+
35
+ ---
36
+
37
+ ## Why
38
+
39
+ LLM agents have **no identity.** Every conversation starts from zero — no personality continuity, no memory of past interactions, no emotional consistency.
40
+
41
+ System prompts give a shallow persona, but agents need more than a static instruction block — they need a **cognitive architecture**:
42
+
43
+ - "How should my personality shape my response to this event?"
44
+ - "What happened last time, and how should that change my behavior now?"
45
+ - "How does this event make me feel, and how does that affect my tone?"
46
+
47
+ Agethos borrows the answer from **cognitive science, personality psychology, and generative agent research.**
48
+
49
+ ---
50
+
51
+ ## Differentiators
52
+
53
+ | | Agethos | Generative Agents | CrewAI | Character Cards |
54
+ |---|---|---|---|---|
55
+ | **Personality model** | OCEAN (Big Five) numerical | ISS text only | role/goal/backstory | text traits |
56
+ | **Emotional state** | PAD 3-axis, OCEAN-coupled | None | None | None |
57
+ | **Memory + retrieval** | recency × importance × relevance | Same approach | None | None |
58
+ | **Reflection** | Importance threshold → focal points → insights | Same approach | None | None |
59
+ | **Persona evolution** | L2 dynamic + emotion drift | L2 daily update | Static | Static |
60
+ | **Character card formats** | W++, SBF, Tavern Card V2 | None | None | Native |
61
+ | **LLM-agnostic** | OpenAI, Anthropic, custom | OpenAI only | Various | N/A |
62
+
63
+ ---
64
+
65
+ ## Design Philosophy — Four Pillars
66
+
67
+ ### 1. Psychological Grounding — OCEAN + PAD
68
+
69
+ Personality isn't just adjectives. Agethos uses the **Big Five (OCEAN)** model with numerical trait scores:
70
+
71
+ OceanTraits(
72
+ openness=0.8, # Creative, curious → metaphorical language
73
+ conscientiousness=0.7, # Organized → structured responses
74
+ extraversion=0.3, # Reserved → concise, thoughtful
75
+ agreeableness=0.9, # Cooperative → empathetic, conflict-avoidant
76
+ neuroticism=0.2, # Stable → calm under pressure
77
+ )
78
+
79
+ OCEAN traits automatically derive a **PAD emotional baseline** via Mehrabian (1996):
80
+
81
+ P = 0.21·E + 0.59·A - 0.19·N → Pleasure baseline
82
+ A = 0.15·O + 0.30·N - 0.57·A → Arousal baseline
83
+ D = 0.25·O + 0.17·C + 0.60·E - 0.32·A → Dominance baseline
84
+
85
+ ### 2. Dynamic Emotion — Stimulus → Transition → Decay
86
+
87
+ Events shift the agent's emotional state. High Neuroticism = higher sensitivity:
88
+
89
+ Event: "user criticized my work"
90
+ → stimulus PAD: (-0.5, +0.4, -0.3)
91
+ → sensitivity: 0.15 + 0.35 × N (auto from personality)
92
+ → E(t+1) = E(t) + α·(stimulus - E(t)) + β·baseline
93
+ → closest_emotion() → "sadness" or "anger"
94
+
95
+ Over time, emotion decays back to personality baseline:
96
+ E(t) = baseline + (current - baseline) · (1 - rate)
97
+
98
+ ### 3. Layered Persona — Identity that evolves
99
+
100
+ Three identity layers from Generative Agents + six persona facets from system prompt analysis:
101
+
102
+ L0 (Innate) ← Core traits, personality, role. Never changes.
103
+ L1 (Learned) ← Skills, relationships, knowledge. Grows over time.
104
+ L2 (Situation) ← Current task, mood, location. Changes frequently.
105
+
106
+ + 6 Facets: identity, tone, values, boundaries, conversation_style, transparency
107
+ + Behavioral Rules: "When X happens, do Y" (more effective than adjectives)
108
+
109
+ ### 4. Memory Stream — Remember what matters
110
+
111
+ Retrieval scoring from the Generative Agents paper:
112
+
113
+ Score = w_r × recency + w_i × importance + w_v × relevance
114
+
115
+ recency: 0.995^(hours_since_access)
116
+ importance: LLM-judged 1-10 per observation
117
+ relevance: cosine similarity (query embedding ↔ memory embedding)
118
+
119
+ Reflection triggers when importance accumulates > 150:
120
+ → 3 focal points → retrieve related memories → synthesize insights → store as depth=2+ nodes
121
+
122
+ ---
123
+
124
+ ## Demo Results
125
+
126
+ Two agents with identical questions, different OCEAN profiles — tested with `gpt-4o-mini`:
127
+
128
+ | | **Minsoo** (Introvert Engineer) | **Yuna** (Extrovert Designer) |
129
+ |---|---|---|
130
+ | **OCEAN** | O=0.8 C=0.9 **E=0.2** A=0.6 **N=0.3** | O=0.9 C=0.4 **E=0.9** A=0.8 **N=0.6** |
131
+ | **Baseline emotion** | calm (P=+0.34) | pride (P=+0.55) |
132
+ | **Response style** | Numbered lists, structured, no emojis, short | Emojis, metaphors, exclamation marks, follow-up questions |
133
+ | **"AI replacing jobs?"** | "A balanced approach is essential to leverage AI's capabilities while ensuring job security..." | "It's like standing at a crossroads! On one hand AI can streamline tasks... What are your thoughts? 🚀✨" |
134
+ | **After criticism event** | calm → calm (P=+0.34→+0.13, small shift) | pride → pride (P=+0.55→+0.19, larger shift) |
135
+ | **Emotion decay (10 steps)** | P=+0.13 → +0.32 (recovers toward baseline) | P=+0.19 → +0.51 (recovers toward baseline) |
136
+
137
+ > **Key takeaway**: Same LLM, same question — personality shapes tone, structure, emotional reactivity, and recovery. High Neuroticism (N) amplifies emotional response to negative events.
138
+
139
+ ### Try it yourself
140
+
141
+ # Compare two agents side-by-side
142
+ python examples/demo_persona.py compare
143
+
144
+ # Interactive chat with a specific agent
145
+ python examples/demo_persona.py chat minsoo
146
+ python examples/demo_persona.py chat yuna
147
+
148
+ # In interactive mode:
149
+ # :emo -0.5 0.4 -0.3 → apply emotional event
150
+ # :decay → decay emotion toward baseline
151
+ # :q → quit
152
+
153
+ ---
154
+
155
+ ## Install
156
+
157
+ pip install agethos # Core (pydantic only)
158
+ pip install agethos[openai] # + OpenAI LLM & embeddings
159
+ pip install agethos[anthropic] # + Anthropic Claude
160
+ pip install agethos[all] # Everything
161
+
162
+ ## Quick Start
163
+
164
+ ### 1. One-liner with `Brain.build()`
165
+
166
+ ```python
167
+ from agethos import Brain
168
+
169
+ brain = Brain.build(
170
+ persona={
171
+ "name": "Minsoo",
172
+ "ocean": {"O": 0.8, "C": 0.9, "E": 0.2, "A": 0.6, "N": 0.3},
173
+ "innate": {"age": "28", "occupation": "Backend Engineer"},
174
+ "tone": "Concise and analytical",
175
+ "rules": ["Prefer data over opinions", "Keep responses structured"],
176
+ },
177
+ llm="openai", # or "anthropic"
178
+ )
179
+ reply = await brain.chat("How's the recommendation system going?")
180
+ ```
181
+
182
+ ### 2. From YAML file
183
+
184
+ ```yaml
185
+ # personas/minsoo.yaml
186
+ name: Minsoo
187
+ ocean: { O: 0.8, C: 0.9, E: 0.2, A: 0.6, N: 0.3 }
188
+ innate:
189
+ age: "28"
190
+ occupation: Backend Engineer
191
+ tone: Concise and analytical
192
+ rules:
193
+ - Prefer data over opinions
194
+ - Keep responses structured
195
+ ```
196
+
197
+ ```python
198
+ brain = Brain.build(persona="personas/minsoo.yaml", llm="openai")
199
+ ```
200
+
201
+ ### 3. Full control (traditional style)
202
+
203
+ ```python
204
+ from agethos import Brain, PersonaSpec, PersonaLayer, OceanTraits
205
+ from agethos.llm.openai import OpenAIAdapter
206
+
207
+ persona = PersonaSpec(
208
+ name="Minsoo",
209
+ ocean=OceanTraits(
210
+ openness=0.8,
211
+ conscientiousness=0.7,
212
+ extraversion=0.3,
213
+ agreeableness=0.9,
214
+ neuroticism=0.2,
215
+ ),
216
+ l0_innate=PersonaLayer(traits={
217
+ "age": "28",
218
+ "occupation": "Software Engineer",
219
+ }),
220
+ tone="Precise but warm, uses technical terms naturally",
221
+ values=["Code quality", "Knowledge sharing"],
222
+ behavioral_rules=[
223
+ "Include code examples for technical questions",
224
+ "Honestly say 'I don't know' when uncertain",
225
+ ],
226
+ )
227
+
228
+ brain = Brain(persona=persona, llm=OpenAIAdapter(), max_history=20)
229
+ reply = await brain.chat("How's the recommendation system going?")
230
+ # Multi-turn: brain remembers conversation history automatically
231
+ reply2 = await brain.chat("Can you elaborate on the caching part?")
232
+ ```
233
+
234
+ ### 4. Emotional Events
235
+
236
+ ```python
237
+ # Apply an event that triggers emotion
238
+ brain.apply_event_emotion((-0.5, 0.4, -0.3)) # criticism → sadness/anger
239
+ print(brain.emotion.closest_emotion()) # "sadness"
240
+
241
+ # Emotion decays back to OCEAN baseline over time
242
+ brain.decay_emotion(rate=0.1)
243
+ ```
244
+
245
+ ### 5. Random Persona Generation
246
+
247
+ ```python
248
+ from agethos import PersonaSpec, OceanTraits
249
+
250
+ # Fully random persona
251
+ spec = PersonaSpec.random()
252
+
253
+ # Pin what you want, randomize the rest
254
+ spec = PersonaSpec.random(name="Minsoo", ocean={"E": 0.2, "N": 0.8})
255
+
256
+ # Random OCEAN only
257
+ ocean = OceanTraits.random()
258
+ ocean = OceanTraits.random(E=0.2) # pin extraversion, randomize rest
259
+
260
+ # Random persona → Brain in one line
261
+ brain = Brain.build(persona=PersonaSpec.random(), llm="openai")
262
+ ```
263
+
264
+ ### 6. Character Card Import (W++ / SBF / Tavern Card)
265
+
266
+ ```python
267
+ from agethos import CharacterCard
268
+
269
+ card = CharacterCard.from_wpp('''
270
+ [character("Luna")
271
+ {
272
+ Personality("analytical" + "curious" + "dry humor")
273
+ Age("25")
274
+ Occupation("AI Researcher")
275
+ }]
276
+ ''')
277
+ brain = Brain.build(persona=card.to_persona_spec(), llm="openai")
278
+ ```
279
+
280
+ ---
281
+
282
+ ## Usage Recipes
283
+
284
+ ### Customer Support Bot with Personality
285
+
286
+ ```python
287
+ brain = Brain.build(
288
+ persona={
289
+ "name": "Hana",
290
+ "ocean": {"O": 0.5, "C": 0.9, "E": 0.7, "A": 0.95, "N": 0.1},
291
+ "innate": {"role": "Customer Support Agent"},
292
+ "tone": "Friendly, patient, solution-oriented",
293
+ "values": ["Customer satisfaction", "Clear communication"],
294
+ "rules": [
295
+ "Always acknowledge the customer's frustration first",
296
+ "Provide step-by-step solutions",
297
+ "Escalate if unable to resolve in 3 exchanges",
298
+ ],
299
+ "boundaries": ["Never share internal system details", "Never make promises about timelines"],
300
+ },
301
+ llm="openai",
302
+ )
303
+
304
+ reply = await brain.chat("My order has been stuck for 3 days!")
305
+ # Hana responds with high agreeableness + low neuroticism → calm, empathetic, structured
306
+ ```
307
+
308
+ ### NPC in a Game — Emotional Reactions
309
+
310
+ ```python
311
+ npc = Brain.build(
312
+ persona={
313
+ "name": "Gareth",
314
+ "ocean": {"O": 0.3, "C": 0.8, "E": 0.4, "A": 0.3, "N": 0.7},
315
+ "innate": {"role": "Town Guard", "age": "42"},
316
+ "tone": "Gruff, suspicious, speaks in short sentences",
317
+ "rules": ["Never reveal patrol routes", "Distrust strangers by default"],
318
+ },
319
+ llm="openai",
320
+ )
321
+
322
+ reply = await npc.chat("I need to enter the castle.")
323
+ # Low A + high N → suspicious, terse response
324
+
325
+ # Player does something threatening
326
+ npc.apply_event_emotion((-0.6, 0.7, 0.3)) # anger + high arousal
327
+ reply = await npc.chat("I said let me through!")
328
+ # Now responding with anger-influenced tone
329
+
330
+ # After time passes, Gareth calms down
331
+ for _ in range(5):
332
+ npc.decay_emotion(rate=0.2)
333
+ ```
334
+
335
+ ### Multi-Agent Conversation
336
+
337
+ ```python
338
+ agents = {
339
+ "pm": Brain.build(
340
+ persona={"name": "Sara", "ocean": {"O": 0.7, "C": 0.8, "E": 0.8, "A": 0.7, "N": 0.3},
341
+ "innate": {"role": "Product Manager"}, "tone": "Big-picture, decisive"},
342
+ llm="openai",
343
+ ),
344
+ "eng": Brain.build(
345
+ persona={"name": "Jin", "ocean": {"O": 0.6, "C": 0.9, "E": 0.2, "A": 0.5, "N": 0.2},
346
+ "innate": {"role": "Staff Engineer"}, "tone": "Technical, cautious about scope"},
347
+ llm="openai",
348
+ ),
349
+ }
350
+
351
+ # Simulate a discussion
352
+ topic = "Should we rewrite the auth system before launch?"
353
+ pm_reply = await agents["pm"].chat(topic)
354
+ eng_reply = await agents["eng"].chat(f"Sara (PM) said: {pm_reply}\n\nWhat do you think?")
355
+ ```
356
+
357
+ ### Bulk Random Agents for Simulation
358
+
359
+ ```python
360
+ # Spawn 10 random agents for a social simulation
361
+ agents = [
362
+ Brain.build(persona=PersonaSpec.random(), llm="openai")
363
+ for _ in range(10)
364
+ ]
365
+
366
+ # Each has unique personality, tone, values, and emotional baseline
367
+ for agent in agents:
368
+ p = agent.persona
369
+ print(f"{p.name} | E={p.ocean.extraversion:.2f} N={p.ocean.neuroticism:.2f} | {p.tone}")
370
+ ```
371
+
372
+ ### Situation-Aware Responses
373
+
374
+ ```python
375
+ brain = Brain.build(
376
+ persona={"name": "Alex", "ocean": {"O": 0.7, "C": 0.6, "E": 0.5, "A": 0.7, "N": 0.4}},
377
+ llm="openai",
378
+ )
379
+
380
+ # Update L2 situation layer dynamically
381
+ brain.update_situation(location="job interview", mood="nervous")
382
+ reply = await brain.chat("Tell me about yourself.")
383
+ # Response shaped by interview context
384
+
385
+ brain.update_situation(location="bar with friends", mood="relaxed")
386
+ reply = await brain.chat("Tell me about yourself.")
387
+ # Same question, completely different tone and content
388
+ ```
389
+
390
+ ### Memory + Reflection in Long Conversations
391
+
392
+ ```python
393
+ brain = Brain.build(
394
+ persona={"name": "Dr. Lee", "ocean": {"O": 0.8, "C": 0.7, "E": 0.5, "A": 0.8, "N": 0.3},
395
+ "innate": {"role": "Therapist"}},
396
+ llm="openai",
397
+ )
398
+
399
+ # Session 1: patient shares concerns
400
+ await brain.observe("Patient expressed anxiety about upcoming presentation")
401
+ await brain.observe("Patient mentioned difficulty sleeping for the past week")
402
+ await brain.observe("Patient has a history of public speaking fear since college")
403
+
404
+ # Automatic reflection triggers when importance accumulates > 150
405
+ # Brain synthesizes: "Patient's sleep issues may be linked to presentation anxiety,
406
+ # rooted in long-standing public speaking fear"
407
+
408
+ # Later: memories inform future responses
409
+ reply = await brain.chat("I have another presentation next month.")
410
+ # Dr. Lee's response draws on stored memories and reflections
411
+ ```
412
+
413
+ ---
414
+
415
+ ## Architecture
416
+
417
+ Brain (Facade)
418
+
419
+ ├── PersonaRenderer ──── PersonaSpec → system prompt
420
+ │ ├── PersonaSpec ── L0/L1/L2 + 6 facets + behavioral rules
421
+ │ ├── OceanTraits ── Big Five numerical scores → prompt text
422
+ │ └── EmotionalState PAD 3-axis → closest emotion → prompt text
423
+
424
+ ├── MemoryStream ─────── Append, retrieve, importance tracking
425
+ │ ├── Retrieval ──── recency × importance × relevance scoring
426
+ │ └── StorageBackend (ABC) ── InMemoryStore / custom
427
+
428
+ ├── Cognition
429
+ │ ├── Perceiver ──── Observation → MemoryNode (LLM importance 1-10)
430
+ │ ├── Retriever ──── Query memory with composite scoring
431
+ │ ├── Reflector ──── Importance > 150 → focal points → insights
432
+ │ └── Planner ────── Recursive plan decomposition
433
+
434
+ ├── Character Cards ──── W++ / SBF / Tavern Card V2 → PersonaSpec
435
+
436
+ └── Adapters
437
+ ├── LLMAdapter (ABC) ── OpenAI / Anthropic / custom
438
+ └── EmbeddingAdapter (ABC) ── OpenAI / custom
439
+
440
+ ## Cognitive Loop
441
+
442
+ Every `brain.chat()` call:
443
+
444
+ User Message
445
+ → [Perceive] Store as MemoryNode, LLM judges importance (1-10)
446
+ → [Retrieve] Score all memories: recency + importance + relevance → top-k
447
+ → [Render] Persona ISS + OCEAN + emotion + memories + plan → system prompt
448
+ → [Generate] LLM produces response (personality-shaped)
449
+ → [Store] Own response saved as MemoryNode
450
+ → [Reflect?] If importance sum > 150 → generate insights automatically
451
+
452
+ ## Personality Pipeline
453
+
454
+ OCEAN Traits (static)
455
+ → PAD baseline (Mehrabian formula)
456
+ → Event stimulus shifts PAD
457
+ → closest_emotion() labels the state
458
+ → Emotion injected into system prompt
459
+ → LLM response shaped by personality + emotion
460
+ → Over time, decay() returns to baseline
461
+
462
+ ## Core API
463
+
464
+ | Method | Description |
465
+ |--------|-------------|
466
+ | `Brain.build(persona, llm)` | Factory — create Brain from dict/yaml/string |
467
+ | `brain.chat(message)` | Full cognitive loop — perceive, retrieve, render, generate, reflect |
468
+ | `brain.observe(text)` | Record external event, auto-reflect if threshold exceeded |
469
+ | `brain.plan_day(date)` | Generate daily plan from persona and memories |
470
+ | `brain.reflect()` | Manual reflection — focal points → insights |
471
+ | `brain.recall(query)` | Search memories by composite score |
472
+ | `brain.apply_event_emotion(pad)` | Shift emotional state by event PAD values |
473
+ | `brain.decay_emotion(rate)` | Decay emotion toward personality baseline |
474
+ | `brain.update_situation(**traits)` | Update L2 situation layer dynamically |
475
+ | `brain.clear_history()` | Clear multi-turn conversation history |
476
+ | `PersonaSpec.random(**pins)` | Generate random persona, pin specific fields |
477
+ | `OceanTraits.random(**pins)` | Generate random OCEAN, pin specific traits |
478
+ | `PersonaSpec.from_dict(d)` | Create persona from dict (shorthand keys supported) |
479
+ | `PersonaSpec.from_yaml(path)` | Load persona from YAML file |
480
+
481
+ ## Data Models
482
+
483
+ | Model | Description |
484
+ |-------|-------------|
485
+ | `PersonaSpec` | 3-layer identity + 6 facets + OCEAN + PAD emotion + rules |
486
+ | `OceanTraits` | Big Five: O/C/E/A/N scores (0.0-1.0) with auto prompt generation |
487
+ | `EmotionalState` | PAD 3-axis (-1~+1), stimulus transition, decay, closest emotion |
488
+ | `CharacterCard` | Tavern Card V2 compatible, parsers for W++ and SBF formats |
489
+ | `MemoryNode` | SPO triple, importance, embedding, evidence pointers |
490
+ | `DailyPlan` | Recursive PlanItems with time ranges and status |
491
+ | `RetrievalResult` | Node + score breakdown (recency, importance, relevance) |
492
+
493
+ ## Algorithms
494
+
495
+ | Algorithm | Source | Implementation |
496
+ |-----------|--------|----------------|
497
+ | Memory retrieval scoring | Generative Agents (Park 2023) | `memory/retrieval.py` |
498
+ | Reflection (focal points → insights) | Generative Agents (Park 2023) | `cognition/reflect.py` |
499
+ | OCEAN → PAD conversion | Mehrabian (1996) | `models.py:EmotionalState.from_ocean()` |
500
+ | Emotion transition | PAD stimulus model | `models.py:EmotionalState.apply_stimulus()` |
501
+ | Emotion decay | Exponential return to baseline | `models.py:EmotionalState.decay()` |
502
+ | Personality-sensitivity coupling | N → α mapping | `models.py:PersonaSpec.apply_event()` |
503
+ | W++ parsing | Community standard | `models.py:CharacterCard.from_wpp()` |
504
+ | SBF parsing | Community standard | `models.py:CharacterCard.from_sbf()` |
505
+
506
+ ---
507
+
508
+ ## References
509
+
510
+ - [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) — Memory stream, reflection, planning
511
+ - [Mehrabian PAD Model (1996)](https://en.wikipedia.org/wiki/PAD_emotional_state_model) — Pleasure-Arousal-Dominance emotional space
512
+ - [Big Five / OCEAN](https://en.wikipedia.org/wiki/Big_Five_personality_traits) — Five-factor personality model
513
+ - [BIG5-CHAT (2024)](https://openreview.net/pdf?id=TqwTzLjzGS) — Big Five personality in LLM conversations
514
+ - [Machine Mindset (MBTI)](https://arxiv.org/html/2312.12999v3) — MBTI-based LLM personality tuning
515
+ - [JPAF: Evolving Personality](https://github.com/agent-topia/evolving_personality) — Jung function weights for dynamic personality
516
+ - [Character Card V2 Spec](https://github.com/malfoyslastname/character-card-spec-v2) — Tavern Card standard
517
+ - [Leaked System Prompts](https://github.com/jujumilk3/leaked-system-prompts) — Real-world persona patterns
518
+
519
+ ## Project Status (v0.1.0)
520
+
521
+ > **Phase: Core Architecture Complete — Pre-release**
522
+
523
+ ### Implemented
524
+
525
+ | Module | Status | Files |
526
+ |--------|--------|-------|
527
+ | **Data Models** | Done | `models.py` — OceanTraits, EmotionalState, PersonaSpec, PersonaLayer, CharacterCard, MemoryNode, PlanItem, DailyPlan, RetrievalResult |
528
+ | **Brain Facade** | Done | `brain.py` — chat, observe, plan_day, reflect, recall, emotion control |
529
+ | **Persona Renderer** | Done | `persona/renderer.py` — ISS + OCEAN + emotion + memories + plan → system prompt |
530
+ | **Memory Stream** | Done | `memory/stream.py` — append, retrieve (composite scoring), get_recent, importance tracking |
531
+ | **Retrieval Scoring** | Done | `memory/retrieval.py` — recency × importance × relevance, min-max normalization, cosine similarity |
532
+ | **Storage Backend** | Done | `memory/store.py` (ABC) + `storage/memory_store.py` (InMemoryStore) |
533
+ | **Cognition: Perceive** | Done | `cognition/perceive.py` — observation → MemoryNode (LLM importance 1-10, SPO triple extraction) |
534
+ | **Cognition: Retrieve** | Done | `cognition/retrieve.py` — composite scoring wrapper, reflection-specific retrieval |
535
+ | **Cognition: Reflect** | Done | `cognition/reflect.py` — importance threshold → focal points → insights → depth=2+ nodes |
536
+ | **Cognition: Plan** | Done | `cognition/plan.py` — daily plan, recursive decompose, replan on new observations |
537
+ | **LLM Adapters** | Done | `llm/openai.py` (OpenAI), `llm/anthropic.py` (Anthropic Claude) |
538
+ | **Embedding Adapter** | Done | `embedding/openai.py` (text-embedding-3-small/large/ada-002) |
539
+ | **Character Cards** | Done | `models.py` — W++ parser, SBF parser, Tavern Card V2 → PersonaSpec conversion |
540
+
541
+ | **Multi-turn Chat** | Done | `brain.py` — sliding window conversation history (max_history) |
542
+ | **Factory Methods** | Done | `Brain.build()` from dict/yaml/string, `PersonaSpec.from_dict()`, `from_yaml()` |
543
+ | **Random Generation** | Done | `OceanTraits.random()`, `PersonaSpec.random()` with partial pinning |
544
+ | **YAML Personas** | Done | `examples/personas/` — load persona from YAML file |
545
+
546
+ ### Not Yet Implemented
547
+
548
+ | Item | Notes |
549
+ |------|-------|
550
+ | Persistent storage backend | SQLite, Redis, etc. — currently InMemory only |
551
+ | Anthropic embedding adapter | Only OpenAI embeddings available |
552
+ | PyPI publish | Package configured (`pyproject.toml`) but not yet published |
553
+ | CI/CD | No GitHub Actions / workflows |
554
+ | Tavern Card V2 export | Import only, no export to card format |
555
+ | L1/L2 persona auto-evolution | Layers exist but no automatic update logic from interactions |
556
+
557
+ ---
558
+
559
+ ## License
560
+
561
+ MIT