htm 0.0.14 → 0.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +33 -0
  3. data/README.md +269 -79
  4. data/db/migrate/00003_create_file_sources.rb +5 -0
  5. data/db/migrate/00004_create_nodes.rb +17 -0
  6. data/db/migrate/00005_create_tags.rb +7 -0
  7. data/db/migrate/00006_create_node_tags.rb +2 -0
  8. data/db/migrate/00007_create_robot_nodes.rb +7 -0
  9. data/db/schema.sql +41 -29
  10. data/docs/api/yard/HTM/Configuration.md +54 -0
  11. data/docs/api/yard/HTM/Database.md +13 -10
  12. data/docs/api/yard/HTM/EmbeddingService.md +5 -1
  13. data/docs/api/yard/HTM/LongTermMemory.md +18 -277
  14. data/docs/api/yard/HTM/PropositionError.md +18 -0
  15. data/docs/api/yard/HTM/PropositionService.md +66 -0
  16. data/docs/api/yard/HTM/QueryCache.md +88 -0
  17. data/docs/api/yard/HTM/RobotGroup.md +481 -0
  18. data/docs/api/yard/HTM/SqlBuilder.md +108 -0
  19. data/docs/api/yard/HTM/TagService.md +4 -0
  20. data/docs/api/yard/HTM/Telemetry/NullInstrument.md +13 -0
  21. data/docs/api/yard/HTM/Telemetry/NullMeter.md +15 -0
  22. data/docs/api/yard/HTM/Telemetry.md +109 -0
  23. data/docs/api/yard/HTM/WorkingMemoryChannel.md +176 -0
  24. data/docs/api/yard/HTM.md +11 -23
  25. data/docs/api/yard/index.csv +102 -25
  26. data/docs/api/yard-reference.md +8 -0
  27. data/docs/assets/images/multi-provider-failover.svg +51 -0
  28. data/docs/assets/images/robot-group-architecture.svg +65 -0
  29. data/docs/database/README.md +3 -3
  30. data/docs/database/public.file_sources.svg +29 -21
  31. data/docs/database/public.node_tags.md +2 -0
  32. data/docs/database/public.node_tags.svg +53 -41
  33. data/docs/database/public.nodes.md +2 -0
  34. data/docs/database/public.nodes.svg +52 -40
  35. data/docs/database/public.robot_nodes.md +2 -0
  36. data/docs/database/public.robot_nodes.svg +30 -22
  37. data/docs/database/public.robots.svg +16 -12
  38. data/docs/database/public.tags.md +3 -0
  39. data/docs/database/public.tags.svg +41 -33
  40. data/docs/database/schema.json +66 -0
  41. data/docs/database/schema.svg +60 -48
  42. data/docs/development/index.md +13 -0
  43. data/docs/development/rake-tasks.md +1068 -0
  44. data/docs/getting-started/quick-start.md +144 -155
  45. data/docs/guides/adding-memories.md +2 -3
  46. data/docs/guides/context-assembly.md +185 -184
  47. data/docs/guides/getting-started.md +154 -148
  48. data/docs/guides/index.md +7 -0
  49. data/docs/guides/long-term-memory.md +60 -92
  50. data/docs/guides/mcp-server.md +617 -0
  51. data/docs/guides/multi-robot.md +249 -345
  52. data/docs/guides/recalling-memories.md +153 -163
  53. data/docs/guides/robot-groups.md +604 -0
  54. data/docs/guides/search-strategies.md +61 -58
  55. data/docs/guides/working-memory.md +103 -136
  56. data/docs/index.md +30 -26
  57. data/examples/robot_groups/robot_worker.rb +1 -2
  58. data/examples/robot_groups/same_process.rb +1 -4
  59. data/lib/htm/robot_group.rb +721 -0
  60. data/lib/htm/version.rb +1 -1
  61. data/lib/htm/working_memory_channel.rb +250 -0
  62. data/lib/htm.rb +2 -0
  63. data/mkdocs.yml +2 -0
  64. metadata +18 -9
  65. data/db/migrate/00009_add_working_memory_to_robot_nodes.rb +0 -12
  66. data/db/migrate/00010_add_soft_delete_to_associations.rb +0 -29
  67. data/db/migrate/00011_add_performance_indexes.rb +0 -21
  68. data/db/migrate/00012_add_tags_trigram_index.rb +0 -18
  69. data/db/migrate/00013_enable_lz4_compression.rb +0 -43
  70. data/examples/robot_groups/lib/robot_group.rb +0 -419
  71. data/examples/robot_groups/lib/working_memory_channel.rb +0 -140
@@ -23,7 +23,7 @@ Get started with HTM in just 5 minutes! This guide will walk you through buildin
23
23
  <rect x="270" y="70" width="180" height="100" fill="rgba(33, 150, 243, 0.2)" stroke="#2196F3" stroke-width="3" rx="5"/>
24
24
  <text x="360" y="95" text-anchor="middle" fill="#2196F3" font-size="16" font-weight="bold">Step 2</text>
25
25
  <text x="360" y="115" text-anchor="middle" fill="#E0E0E0" font-size="14" font-weight="bold">Add Memories</text>
26
- <text x="360" y="140" text-anchor="middle" fill="#B0B0B0" font-size="11">add_node()</text>
26
+ <text x="360" y="140" text-anchor="middle" fill="#B0B0B0" font-size="11">remember()</text>
27
27
  <text x="360" y="160" text-anchor="middle" fill="#B0B0B0" font-size="10">Store knowledge</text>
28
28
 
29
29
  <!-- Arrow 2 to 3 -->
@@ -43,7 +43,7 @@ Get started with HTM in just 5 minutes! This guide will walk you through buildin
43
43
  <rect x="710" y="70" width="180" height="100" fill="rgba(255, 152, 0, 0.2)" stroke="#FF9800" stroke-width="3" rx="5"/>
44
44
  <text x="800" y="95" text-anchor="middle" fill="#FF9800" font-size="16" font-weight="bold">Step 4</text>
45
45
  <text x="800" y="115" text-anchor="middle" fill="#E0E0E0" font-size="14" font-weight="bold">Use Context</text>
46
- <text x="800" y="140" text-anchor="middle" fill="#B0B0B0" font-size="11">create_context()</text>
46
+ <text x="800" y="140" text-anchor="middle" fill="#B0B0B0" font-size="11">assemble_context()</text>
47
47
  <text x="800" y="160" text-anchor="middle" fill="#B0B0B0" font-size="10">For LLM prompts</text>
48
48
 
49
49
  <!-- Memory Layers Visualization -->
@@ -79,8 +79,8 @@ Get started with HTM in just 5 minutes! This guide will walk you through buildin
79
79
  <rect x="50" y="490" width="800" height="90" fill="rgba(76, 175, 80, 0.1)" stroke="#4CAF50" stroke-width="2" rx="5"/>
80
80
  <text x="450" y="515" text-anchor="middle" fill="#4CAF50" font-size="13" font-weight="bold">Quick Example Code:</text>
81
81
  <text x="70" y="540" fill="#B0B0B0" font-family="monospace" font-size="10">htm = HTM.new(robot_name: "My Assistant")</text>
82
- <text x="70" y="555" fill="#B0B0B0" font-family="monospace" font-size="10">htm.add_node("key1", "Remember this fact", type: :fact)</text>
83
- <text x="70" y="570" fill="#B0B0B0" font-family="monospace" font-size="10">memories = htm.recall(timeframe: "today", topic: "fact")</text>
82
+ <text x="70" y="555" fill="#B0B0B0" font-family="monospace" font-size="10">htm.remember("Remember this fact", tags: ["fact"])</text>
83
+ <text x="70" y="570" fill="#B0B0B0" font-family="monospace" font-size="10">memories = htm.recall("fact", timeframe: "today")</text>
84
84
 
85
85
  <!-- Arrow markers -->
86
86
  <defs>
@@ -123,12 +123,18 @@ puts "=" * 60
123
123
  Create an HTM instance for your robot:
124
124
 
125
125
  ```ruby
126
+ # Configure HTM globally (optional - uses Ollama by default)
127
+ HTM.configure do |config|
128
+ config.embedding_provider = :ollama
129
+ config.embedding_model = 'nomic-embed-text:latest'
130
+ config.tag_provider = :ollama
131
+ config.tag_model = 'gemma3:latest'
132
+ end
133
+
126
134
  # Initialize HTM with a robot name
127
135
  htm = HTM.new(
128
136
  robot_name: "Code Helper",
129
- working_memory_size: 128_000, # 128k tokens
130
- embedding_service: :ollama, # Use Ollama for embeddings
131
- embedding_model: 'gpt-oss' # Default embedding model
137
+ working_memory_size: 128_000 # 128k tokens
132
138
  )
133
139
 
134
140
  puts "✓ HTM initialized for '#{htm.robot_name}'"
@@ -140,11 +146,10 @@ puts " Working Memory: #{htm.working_memory.max_tokens} tokens"
140
146
 
141
147
  - `robot_name`: A human-readable name for your AI robot
142
148
  - `working_memory_size`: Maximum tokens for active context (128k is typical)
143
- - `embedding_service`: Service to generate vector embeddings (`:ollama` is default)
144
- - `embedding_model`: Which model to use for embeddings (`gpt-oss` is default)
149
+ - Configuration is set globally via `HTM.configure` block
145
150
 
146
151
  !!! tip "Robot Identity"
147
- Each HTM instance represents one robot. The `robot_id` is automatically generated (UUID) and used to track which robot created each memory.
152
+ Each HTM instance represents one robot. The `robot_id` is an integer database ID used to track which robot created each memory.
148
153
 
149
154
  ### Step 3: Add Your First Memory
150
155
 
@@ -153,31 +158,25 @@ Add a project decision to HTM's memory:
153
158
  ```ruby
154
159
  puts "\n1. Adding a project decision..."
155
160
 
156
- htm.add_node(
157
- "decision_001", # Unique key
161
+ node_id = htm.remember(
158
162
  "We decided to use PostgreSQL for the database " \
159
163
  "because it provides excellent time-series optimization and " \
160
164
  "native vector search with pgvector.",
161
- type: :decision, # Memory type
162
- category: "architecture", # Optional category
163
- importance: 9.0, # Importance score (0-10)
164
- tags: ["database", "architecture"] # Searchable tags
165
+ tags: ["database:postgresql", "architecture:decisions"],
166
+ metadata: { category: "architecture", priority: "high" }
165
167
  )
166
168
 
167
- puts "✓ Decision added to memory"
169
+ puts "✓ Decision added to memory (node #{node_id})"
168
170
  ```
169
171
 
170
172
  **Memory Components:**
171
173
 
172
- - **Key**: Unique identifier (e.g., `"decision_001"`)
173
- - **Value**: The actual content/memory text
174
- - **Type**: Category of memory (`:decision`, `:fact`, `:code`, `:preference`, etc.)
175
- - **Category**: Optional grouping
176
- - **Importance**: Score from 0.0 to 10.0 (affects recall priority)
177
- - **Tags**: Searchable keywords for organization
174
+ - **Content**: The actual memory text (first argument)
175
+ - **Tags**: Hierarchical tags for categorization (e.g., `"database:postgresql"`)
176
+ - **Metadata**: Arbitrary key-value data stored as JSONB
178
177
 
179
178
  !!! note "Automatic Embeddings"
180
- HTM automatically generates vector embeddings for the memory content using Ollama. You don't need to handle embeddings yourself!
179
+ HTM automatically generates vector embeddings for the memory content in the background. You don't need to handle embeddings yourself!
181
180
 
182
181
  ### Step 4: Add More Memories
183
182
 
@@ -186,51 +185,44 @@ Let's add a few more memories:
186
185
  ```ruby
187
186
  puts "\n2. Adding user preferences..."
188
187
 
189
- htm.add_node(
190
- "pref_001",
188
+ htm.remember(
191
189
  "User prefers using the debug_me gem for debugging instead of puts statements.",
192
- type: :preference,
193
- category: "coding_style",
194
- importance: 7.0,
195
- tags: ["debugging", "ruby", "preferences"]
190
+ tags: ["debugging:ruby", "preferences:coding-style"],
191
+ metadata: { category: "preference" }
196
192
  )
197
193
 
198
194
  puts "✓ Preference added"
199
195
 
200
196
  puts "\n3. Adding a code pattern..."
201
197
 
202
- htm.add_node(
203
- "code_001",
198
+ htm.remember(
204
199
  "For database queries, use connection pooling with the connection_pool gem " \
205
200
  "to handle concurrent requests efficiently.",
206
- type: :code,
207
- category: "patterns",
208
- importance: 8.0,
209
- tags: ["database", "performance", "ruby"],
210
- related_to: ["decision_001"] # Link to related memory
201
+ tags: ["database:performance", "ruby:patterns"],
202
+ metadata: { category: "code-pattern" }
211
203
  )
212
204
 
213
- puts "✓ Code pattern added (linked to decision_001)"
205
+ puts "✓ Code pattern added"
214
206
  ```
215
207
 
216
- **Notice the `related_to` parameter?** This creates a relationship in the knowledge graph, linking related memories together.
208
+ **Tags create relationships** - use hierarchical tags to build a navigable knowledge graph. Tags like `database:postgresql` and `database:performance` are connected through their shared `database` prefix.
217
209
 
218
- ### Step 5: Retrieve a Specific Memory
210
+ ### Step 5: Look Up a Specific Memory
219
211
 
220
- Retrieve a memory by its key:
212
+ Look up a memory by its node ID:
221
213
 
222
214
  ```ruby
223
- puts "\n4. Retrieving specific memory..."
215
+ puts "\n4. Looking up specific memory..."
224
216
 
225
- memory = htm.retrieve("decision_001")
217
+ # Use the node_id returned from remember()
218
+ node = HTM::Models::Node.find_by(id: node_id)
226
219
 
227
- if memory
220
+ if node
228
221
  puts "✓ Found memory:"
229
- puts " Key: #{memory['key']}"
230
- puts " Type: #{memory['type']}"
231
- puts " Content: #{memory['value'][0..100]}..."
232
- puts " Importance: #{memory['importance']}"
233
- puts " Created: #{memory['created_at']}"
222
+ puts " ID: #{node.id}"
223
+ puts " Content: #{node.content[0..100]}..."
224
+ puts " Tags: #{node.tags.pluck(:name).join(', ')}"
225
+ puts " Created: #{node.created_at}"
234
226
  else
235
227
  puts "✗ Memory not found"
236
228
  end
@@ -244,22 +236,23 @@ Use HTM's powerful recall feature to find relevant memories:
244
236
  puts "\n5. Recalling memories about 'database'..."
245
237
 
246
238
  memories = htm.recall(
239
+ "database", # Topic (first positional argument)
247
240
  timeframe: "last week", # Natural language time filter
248
- topic: "database", # What to search for
249
241
  limit: 10, # Max results
250
- strategy: :hybrid # Search strategy (vector + full-text)
242
+ strategy: :hybrid, # Search strategy (vector + full-text)
243
+ raw: true # Return full node data
251
244
  )
252
245
 
253
246
  puts "✓ Found #{memories.length} relevant memories:"
254
247
  memories.each_with_index do |mem, idx|
255
- puts " #{idx + 1}. [#{mem['type']}] #{mem['value'][0..60]}..."
248
+ puts " #{idx + 1}. #{mem['content'][0..60]}..."
256
249
  end
257
250
  ```
258
251
 
259
252
  **Search Strategies:**
260
253
 
261
254
  - **`:vector`**: Semantic similarity search using embeddings
262
- - **`:fulltext`**: Keyword-based PostgreSQL full-text search
255
+ - **`:fulltext`**: Keyword-based PostgreSQL full-text search (default)
263
256
  - **`:hybrid`**: Combines both for best results (recommended)
264
257
 
265
258
  **Timeframe Options:**
@@ -268,7 +261,7 @@ end
268
261
  - `"yesterday"` - Previous day
269
262
  - `"last 30 days"` - Last month
270
263
  - `"this month"` - Current calendar month
271
- - Date ranges: `(Time.now - 7.days)..Time.now`
264
+ - Date ranges: `7.days.ago..Time.now`
272
265
 
273
266
  ### Step 7: Create Context for Your LLM
274
267
 
@@ -277,8 +270,8 @@ Generate a context string optimized for LLM consumption:
277
270
  ```ruby
278
271
  puts "\n6. Creating context for LLM..."
279
272
 
280
- context = htm.create_context(
281
- strategy: :balanced, # Balance importance and recency
273
+ context = htm.working_memory.assemble_context(
274
+ strategy: :balanced, # Balance frequency and recency
282
275
  max_tokens: 50_000 # Optional token limit
283
276
  )
284
277
 
@@ -290,9 +283,9 @@ puts "..."
290
283
 
291
284
  **Context Strategies:**
292
285
 
293
- - **`:recent`**: Most recent memories first
294
- - **`:important`**: Highest importance scores first
295
- - **`:balanced`**: Combines importance × recency (recommended)
286
+ - **`:recent`**: Most recently accessed memories first (LRU)
287
+ - **`:frequent`**: Most frequently accessed memories first (LFU)
288
+ - **`:balanced`**: Combines frequency × recency (recommended)
296
289
 
297
290
  This context can be directly injected into your LLM prompt:
298
291
 
@@ -317,14 +310,18 @@ View statistics about your memory usage:
317
310
  ```ruby
318
311
  puts "\n7. Memory Statistics:"
319
312
 
320
- stats = htm.memory_stats
321
-
322
- puts " Total nodes in long-term memory: #{stats[:total_nodes]}"
323
- puts " Active robots: #{stats[:active_robots]}"
324
- puts " Working memory usage: #{stats[:working_memory][:current_tokens]} / " \
325
- "#{stats[:working_memory][:max_tokens]} tokens " \
326
- "(#{stats[:working_memory][:utilization].round(2)}%)"
327
- puts " Database size: #{(stats[:database_size] / (1024.0 ** 2)).round(2)} MB"
313
+ # Working memory stats
314
+ wm = htm.working_memory
315
+ puts " Working memory:"
316
+ puts " Nodes: #{wm.node_count}"
317
+ puts " Tokens: #{wm.token_count} / #{wm.max_tokens}"
318
+ puts " Utilization: #{wm.utilization_percentage}%"
319
+
320
+ # Long-term memory stats via models
321
+ puts " Long-term memory:"
322
+ puts " Total nodes: #{HTM::Models::Node.count}"
323
+ puts " Total tags: #{HTM::Models::Tag.count}"
324
+ puts " Active robots: #{HTM::Models::Robot.count}"
328
325
  ```
329
326
 
330
327
  ### Complete Example
@@ -339,52 +336,51 @@ require 'htm'
339
336
  puts "My First HTM Application"
340
337
  puts "=" * 60
341
338
 
342
- # Step 1: Initialize HTM
339
+ # Step 1: Configure and initialize HTM
340
+ HTM.configure do |config|
341
+ config.embedding_provider = :ollama
342
+ config.embedding_model = 'nomic-embed-text:latest'
343
+ config.tag_provider = :ollama
344
+ config.tag_model = 'gemma3:latest'
345
+ end
346
+
343
347
  htm = HTM.new(
344
348
  robot_name: "Code Helper",
345
- working_memory_size: 128_000,
346
- embedding_service: :ollama,
347
- embedding_model: 'gpt-oss'
349
+ working_memory_size: 128_000
348
350
  )
349
351
 
350
352
  puts "✓ HTM initialized for '#{htm.robot_name}'"
351
353
 
352
354
  # Step 2: Add memories
353
- htm.add_node(
354
- "decision_001",
355
+ htm.remember(
355
356
  "We decided to use PostgreSQL for the database.",
356
- type: :decision,
357
- category: "architecture",
358
- importance: 9.0,
359
- tags: ["database", "architecture"]
357
+ tags: ["database:postgresql", "architecture:decisions"],
358
+ metadata: { priority: "high" }
360
359
  )
361
360
 
362
- htm.add_node(
363
- "pref_001",
361
+ htm.remember(
364
362
  "User prefers using the debug_me gem for debugging.",
365
- type: :preference,
366
- importance: 7.0,
367
- tags: ["debugging", "ruby"]
363
+ tags: ["debugging:ruby", "preferences"],
364
+ metadata: { category: "preference" }
368
365
  )
369
366
 
370
367
  puts "✓ Memories added"
371
368
 
372
369
  # Step 3: Recall memories
373
370
  memories = htm.recall(
371
+ "database",
374
372
  timeframe: "last week",
375
- topic: "database",
376
373
  strategy: :hybrid
377
374
  )
378
375
 
379
376
  puts "✓ Found #{memories.length} memories about 'database'"
380
377
 
381
378
  # Step 4: Create context
382
- context = htm.create_context(strategy: :balanced)
379
+ context = htm.working_memory.assemble_context(strategy: :balanced)
383
380
  puts "✓ Context created: #{context.length} characters"
384
381
 
385
382
  # Step 5: View statistics
386
- stats = htm.memory_stats
387
- puts "✓ Total nodes: #{stats[:total_nodes]}"
383
+ puts "✓ Total nodes: #{HTM::Models::Node.count}"
388
384
 
389
385
  puts "\n" + "=" * 60
390
386
  puts "Success! Your first HTM application is working."
@@ -408,31 +404,32 @@ robot_a = HTM.new(robot_name: "Code Assistant")
408
404
  robot_b = HTM.new(robot_name: "Documentation Writer")
409
405
 
410
406
  # Robot A adds a memory
411
- robot_a.add_node(
412
- "shared_001",
407
+ robot_a.remember(
413
408
  "The API documentation is stored in the docs/ directory.",
414
- type: :fact,
415
- importance: 8.0
409
+ tags: ["docs:api", "project:structure"]
416
410
  )
417
411
 
418
412
  puts "Robot A added memory"
419
413
 
420
414
  # Robot B can access the same memory!
421
415
  memories = robot_b.recall(
416
+ "documentation",
422
417
  timeframe: "last week",
423
- topic: "documentation",
424
418
  strategy: :hybrid
425
419
  )
426
420
 
427
421
  puts "Robot B found #{memories.length} memories"
428
422
  # Robot B sees Robot A's memory!
429
423
 
430
- # Track which robot said what
431
- breakdown = robot_b.which_robot_said("documentation")
432
- puts "Who mentioned 'documentation':"
433
- breakdown.each do |robot_id, count|
434
- puts " #{robot_id}: #{count} times"
435
- end
424
+ # Query which robots have accessed which nodes
425
+ HTM::Models::RobotNode.includes(:robot, :node)
426
+ .where(nodes: { content: 'documentation' })
427
+ .group(:robot_id)
428
+ .count
429
+ .each do |robot_id, count|
430
+ robot = HTM::Models::Robot.find(robot_id)
431
+ puts " #{robot.name}: #{count} memories"
432
+ end
436
433
  ```
437
434
 
438
435
  **Use cases for multi-robot:**
@@ -444,54 +441,56 @@ end
444
441
 
445
442
  ## Working with Relationships
446
443
 
447
- Build a knowledge graph by linking related memories:
444
+ Build a knowledge graph using hierarchical tags:
448
445
 
449
446
  ```ruby
450
447
  # Add parent concept
451
- htm.add_node(
452
- "concept_databases",
448
+ htm.remember(
453
449
  "Databases store and organize data persistently.",
454
- type: :fact,
455
- importance: 5.0
450
+ tags: ["knowledge:databases"]
456
451
  )
457
452
 
458
- # Add child concept with relationship
459
- htm.add_node(
460
- "concept_postgresql",
453
+ # Add child concept with shared tag hierarchy
454
+ htm.remember(
461
455
  "PostgreSQL is a powerful open-source relational database.",
462
- type: :fact,
463
- importance: 7.0,
464
- related_to: ["concept_databases"] # Links to parent
456
+ tags: ["knowledge:databases:postgresql", "tech:database"]
465
457
  )
466
458
 
467
459
  # Add another related concept
468
- htm.add_node(
469
- "concept_postgresql",
460
+ htm.remember(
470
461
  "PostgreSQL provides robust relational database capabilities.",
471
- type: :fact,
472
- importance: 8.0,
473
- related_to: ["concept_postgresql", "concept_databases"]
462
+ tags: ["knowledge:databases:postgresql:features", "tech:database"]
474
463
  )
475
464
 
476
- # Now you have a knowledge graph:
477
- # concept_databases
478
- # ├── concept_postgresql
479
- # └── concept_postgresql
465
+ # View tag hierarchy
466
+ puts HTM::Models::Tag.tree_string
467
+ # knowledge
468
+ # └── databases
469
+ # └── postgresql
470
+ # └── features
471
+
472
+ # Find all memories under a tag prefix
473
+ nodes = HTM::Models::Tag.find_by(name: 'knowledge:databases')&.nodes
480
474
  ```
481
475
 
482
476
  ## Forget (Explicit Deletion)
483
477
 
484
- HTM follows a "never forget" philosophy, but you can explicitly delete memories:
478
+ HTM follows a "never forget" philosophy with soft delete by default:
485
479
 
486
480
  ```ruby
487
- # Deletion requires confirmation
488
- htm.forget("old_decision", confirm: :confirmed)
481
+ # Soft delete (recoverable) - default behavior
482
+ node_id = htm.remember("Temporary note")
483
+ htm.forget(node_id) # Soft delete
484
+ htm.restore(node_id) # Restore it!
485
+
486
+ # Permanent delete requires confirmation
487
+ htm.forget(node_id, soft: false, confirm: :confirmed)
489
488
 
490
- puts "✓ Memory deleted"
489
+ puts "✓ Memory permanently deleted"
491
490
  ```
492
491
 
493
- !!! warning "Deletion is Permanent"
494
- The `forget()` method permanently deletes data. This is the **only** way to delete memories in HTM. Working memory evictions move data to long-term storage, they don't delete it.
492
+ !!! info "Soft Delete by Default"
493
+ The `forget()` method performs a soft delete by default (sets `deleted_at` timestamp). The memory can be restored with `restore()`. Permanent deletion requires `soft: false, confirm: :confirmed`. Working memory evictions move data to long-term storage, they don't delete it.
495
494
 
496
495
  ## Next Steps
497
496
 
@@ -521,17 +520,16 @@ htm = HTM.new(
521
520
  working_memory_size: 256_000 # 256k tokens
522
521
  )
523
522
 
524
- # Try different embedding models
525
- htm = HTM.new(
526
- robot_name: "Custom Embeddings",
527
- embedding_service: :ollama,
528
- embedding_model: 'llama2' # Use Llama2 instead of gpt-oss
529
- )
523
+ # Try different embedding models via configure
524
+ HTM.configure do |config|
525
+ config.embedding_provider = :ollama
526
+ config.embedding_model = 'llama3:latest' # Use Llama3
527
+ end
530
528
 
531
529
  # Try different recall strategies
532
530
  memories = htm.recall(
531
+ "important decisions",
533
532
  timeframe: "last month",
534
- topic: "important decisions",
535
533
  strategy: :vector # Pure semantic search
536
534
  )
537
535
  ```
@@ -558,21 +556,17 @@ For production applications:
558
556
 
559
557
  ```ruby
560
558
  # Store user messages
561
- htm.add_node(
562
- "msg_#{Time.now.to_i}",
559
+ htm.remember(
563
560
  "User: How do I optimize database queries?",
564
- type: :context,
565
- importance: 6.0,
566
- tags: ["conversation", "question"]
561
+ tags: ["conversation:question"],
562
+ metadata: { role: "user", timestamp: Time.now.to_i }
567
563
  )
568
564
 
569
565
  # Store assistant responses
570
- htm.add_node(
571
- "response_#{Time.now.to_i}",
566
+ htm.remember(
572
567
  "Assistant: Use indexes and connection pooling.",
573
- type: :context,
574
- importance: 6.0,
575
- tags: ["conversation", "answer"]
568
+ tags: ["conversation:answer"],
569
+ metadata: { role: "assistant", timestamp: Time.now.to_i }
576
570
  )
577
571
  ```
578
572
 
@@ -580,12 +574,10 @@ htm.add_node(
580
574
 
581
575
  ```ruby
582
576
  # Extract patterns from code reviews
583
- htm.add_node(
584
- "pattern_#{SecureRandom.hex(4)}",
577
+ htm.remember(
585
578
  "Always validate user input before database queries.",
586
- type: :code,
587
- importance: 9.0,
588
- tags: ["security", "validation", "best-practice"]
579
+ tags: ["security:validation", "patterns:best-practice"],
580
+ metadata: { source: "code-review" }
589
581
  )
590
582
  ```
591
583
 
@@ -593,14 +585,11 @@ htm.add_node(
593
585
 
594
586
  ```ruby
595
587
  # Document architectural decisions
596
- htm.add_node(
597
- "adr_001",
588
+ htm.remember(
598
589
  "Decision: Use microservices architecture. " \
599
590
  "Reasoning: Better scalability and independent deployment.",
600
- type: :decision,
601
- category: "architecture",
602
- importance: 10.0,
603
- tags: ["adr", "architecture", "microservices"]
591
+ tags: ["adr", "architecture:microservices"],
592
+ metadata: { category: "architecture", priority: "critical" }
604
593
  )
605
594
  ```
606
595
 
@@ -629,8 +618,8 @@ echo $HTM_DBURL
629
618
  **Solution**: Check Ollama's status and ensure the model is downloaded:
630
619
 
631
620
  ```bash
632
- ollama list | grep gpt-oss
633
- # Should show gpt-oss model
621
+ ollama list | grep nomic-embed-text
622
+ # Should show nomic-embed-text model
634
623
  ```
635
624
 
636
625
  ### Issue: Memory not found during recall
@@ -640,8 +629,8 @@ ollama list | grep gpt-oss
640
629
  ```ruby
641
630
  # Instead of "last week", use:
642
631
  memories = htm.recall(
643
- timeframe: (Time.now - 3600)..Time.now, # Last hour
644
- topic: "your topic"
632
+ "your topic",
633
+ timeframe: (Time.now - 3600)..Time.now # Last hour
645
634
  )
646
635
  ```
647
636
 
@@ -367,9 +367,8 @@ When you `remember()`, the node is automatically added to working memory:
367
367
  htm.remember("Important fact")
368
368
 
369
369
  # Check working memory
370
- stats = htm.working_memory.stats
371
- puts "Nodes in WM: #{stats[:node_count]}"
372
- puts "Token usage: #{stats[:utilization]}%"
370
+ puts "Nodes in WM: #{htm.working_memory.node_count}"
371
+ puts "Token usage: #{htm.working_memory.utilization_percentage}%"
373
372
  ```
374
373
 
375
374
  ### Eviction