@sesamespace/hivemind 0.8.13 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +2 -1
  2. package/dist/{chunk-MLY4VFOO.js → chunk-BHCDOHSK.js} +3 -3
  3. package/dist/{chunk-PFZO67E2.js → chunk-DPLCEMEC.js} +2 -2
  4. package/dist/{chunk-HTLHMXAL.js → chunk-FBQBBAPZ.js} +2 -2
  5. package/dist/{chunk-NSTTILSN.js → chunk-FK6WYXRM.js} +79 -2
  6. package/dist/chunk-FK6WYXRM.js.map +1 -0
  7. package/dist/{chunk-LJHJGDKY.js → chunk-ICSJNKI6.js} +62 -2
  8. package/dist/chunk-ICSJNKI6.js.map +1 -0
  9. package/dist/{chunk-4Y7A25UG.js → chunk-IXBIAX76.js} +2 -2
  10. package/dist/{chunk-ZM7RK5YV.js → chunk-M3A2WRXM.js} +560 -37
  11. package/dist/chunk-M3A2WRXM.js.map +1 -0
  12. package/dist/commands/fleet.js +3 -3
  13. package/dist/commands/init.js +3 -3
  14. package/dist/commands/start.js +3 -3
  15. package/dist/commands/upgrade.js +1 -1
  16. package/dist/commands/watchdog.js +3 -3
  17. package/dist/dashboard.html +873 -131
  18. package/dist/index.js +2 -2
  19. package/dist/main.js +375 -7
  20. package/dist/main.js.map +1 -1
  21. package/dist/start.js +1 -1
  22. package/install.sh +162 -0
  23. package/package.json +24 -23
  24. package/packages/memory/Cargo.lock +6480 -0
  25. package/packages/memory/Cargo.toml +21 -0
  26. package/packages/memory/src/src/context.rs +179 -0
  27. package/packages/memory/src/src/embeddings.rs +51 -0
  28. package/packages/memory/src/src/main.rs +887 -0
  29. package/packages/memory/src/src/promotion.rs +808 -0
  30. package/packages/memory/src/src/scoring.rs +142 -0
  31. package/packages/memory/src/src/store.rs +460 -0
  32. package/packages/memory/src/src/tasks.rs +321 -0
  33. package/.pnpmrc.json +0 -1
  34. package/AUTO-DEBUG-DESIGN.md +0 -267
  35. package/DASHBOARD-PLAN.md +0 -206
  36. package/MEMORY-ENHANCEMENT-PLAN.md +0 -211
  37. package/TOOL-USE-DESIGN.md +0 -173
  38. package/dist/chunk-LJHJGDKY.js.map +0 -1
  39. package/dist/chunk-NSTTILSN.js.map +0 -1
  40. package/dist/chunk-ZM7RK5YV.js.map +0 -1
  41. package/docs/TOOL-PARITY-PLAN.md +0 -191
  42. package/src/memory/dashboard-integration.ts +0 -295
  43. package/src/memory/index.ts +0 -187
  44. package/src/memory/performance-test.ts +0 -208
  45. package/src/memory/processors/agent-sync.ts +0 -312
  46. package/src/memory/processors/command-learner.ts +0 -298
  47. package/src/memory/processors/memory-api-client.ts +0 -105
  48. package/src/memory/processors/message-flow-integration.ts +0 -168
  49. package/src/memory/processors/research-digester.ts +0 -204
  50. package/test-caitlin-access.md +0 -11
  51. /package/dist/{chunk-MLY4VFOO.js.map → chunk-BHCDOHSK.js.map} +0 -0
  52. /package/dist/{chunk-PFZO67E2.js.map → chunk-DPLCEMEC.js.map} +0 -0
  53. /package/dist/{chunk-HTLHMXAL.js.map → chunk-FBQBBAPZ.js.map} +0 -0
  54. /package/dist/{chunk-4Y7A25UG.js.map → chunk-IXBIAX76.js.map} +0 -0
@@ -0,0 +1,887 @@
1
+ mod context;
2
+ mod embeddings;
3
+ mod promotion;
4
+ mod scoring;
5
+ mod store;
6
+ mod tasks;
7
+
8
+ use axum::{
9
+ extract::{Path, Query, State},
10
+ http::StatusCode,
11
+ routing::{delete, get, patch, post},
12
+ Json, Router,
13
+ };
14
+ use serde::{Deserialize, Serialize};
15
+ use std::sync::Arc;
16
+ use tower_http::trace::TraceLayer;
17
+ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
18
+
19
+ use crate::context::ContextStore;
20
+ use crate::embeddings::OllamaClient;
21
+ use crate::promotion::PromotionEngine;
22
+ use crate::store::{Episode, EpisodeInput, MemoryStore};
23
+ use crate::tasks::{TaskInput, TaskRecord, TaskStore, TaskUpdate};
24
+
25
+ use crate::scoring::ScoringConfig;
26
+ use std::collections::HashMap;
27
+
28
+ #[derive(Clone)]
29
+ struct AppState {
30
+ store: Arc<MemoryStore>,
31
+ ollama: Arc<OllamaClient>,
32
+ context_store: Arc<ContextStore>,
33
+ promotion_engine: Arc<PromotionEngine>,
34
+ task_store: Arc<TaskStore>,
35
+ scoring_config: Arc<ScoringConfig>,
36
+ }
37
+
38
+ #[derive(Deserialize)]
39
+ struct SearchQuery {
40
+ q: String,
41
+ context: Option<String>,
42
+ limit: Option<usize>,
43
+ }
44
+
45
+ #[derive(Serialize)]
46
+ struct SearchResult {
47
+ episodes: Vec<ScoredEpisode>,
48
+ }
49
+
50
+ #[derive(Serialize)]
51
+ struct ScoredEpisode {
52
+ #[serde(flatten)]
53
+ episode: Episode,
54
+ score: f64,
55
+ #[serde(skip_serializing_if = "Option::is_none")]
56
+ source_context: Option<String>,
57
+ }
58
+
59
+ #[derive(Deserialize)]
60
+ struct CreateContextRequest {
61
+ name: String,
62
+ #[serde(default)]
63
+ description: String,
64
+ }
65
+
66
+ #[derive(Serialize)]
67
+ struct HealthResponse {
68
+ status: String,
69
+ version: String,
70
+ }
71
+
72
+ #[derive(Serialize)]
73
+ struct ContextInfoResponse {
74
+ name: String,
75
+ description: String,
76
+ created_at: String,
77
+ episode_count: usize,
78
+ }
79
+
80
+ #[derive(Serialize)]
81
+ struct ListContextsResponse {
82
+ contexts: Vec<ContextInfoResponse>,
83
+ }
84
+
85
+ #[derive(Serialize)]
86
+ struct CrossContextResult {
87
+ context: String,
88
+ episodes: Vec<ScoredEpisode>,
89
+ }
90
+
91
+ #[derive(Serialize)]
92
+ struct CrossContextResponse {
93
+ results: Vec<CrossContextResult>,
94
+ }
95
+
96
+ #[derive(Deserialize)]
97
+ struct ShareEpisodeRequest {
98
+ target_context: String,
99
+ }
100
+
101
+ #[derive(Deserialize)]
102
+ struct CoAccessRequest {
103
+ episode_ids: Vec<String>,
104
+ }
105
+
106
+ #[derive(Serialize)]
107
+ struct PromotionResponse {
108
+ promoted_count: usize,
109
+ episode_ids: Vec<String>,
110
+ }
111
+
112
+ #[derive(Deserialize)]
113
+ struct PromotionQuery {
114
+ context: Option<String>,
115
+ }
116
+
117
+ #[derive(Deserialize)]
118
+ struct TaskQuery {
119
+ context: String,
120
+ status: Option<String>,
121
+ }
122
+
123
+ #[derive(Deserialize)]
124
+ struct NextTaskQuery {
125
+ context: String,
126
+ }
127
+
128
+ #[derive(Serialize)]
129
+ struct ListTasksResponse {
130
+ tasks: Vec<TaskRecord>,
131
+ }
132
+
133
+ #[tokio::main]
134
+ async fn main() -> anyhow::Result<()> {
135
+ tracing_subscriber::registry()
136
+ .with(
137
+ tracing_subscriber::EnvFilter::try_from_default_env()
138
+ .unwrap_or_else(|_| "hivemind_memory=debug,tower_http=debug".into()),
139
+ )
140
+ .with(tracing_subscriber::fmt::layer())
141
+ .init();
142
+
143
+ let ollama_url =
144
+ std::env::var("OLLAMA_URL").unwrap_or_else(|_| "http://localhost:11434".to_string());
145
+ let embedding_model =
146
+ std::env::var("EMBEDDING_MODEL").unwrap_or_else(|_| "nomic-embed-text".to_string());
147
+ let db_path = std::env::var("DB_PATH").unwrap_or_else(|_| "./data/lancedb".to_string());
148
+ let port: u16 = std::env::var("PORT")
149
+ .unwrap_or_else(|_| "3434".to_string())
150
+ .parse()?;
151
+
152
+ let store = MemoryStore::new(&db_path).await?;
153
+ let ollama = OllamaClient::new(&ollama_url, &embedding_model);
154
+ let context_store = ContextStore::new(store.get_connection()).await?;
155
+ let promotion_engine = PromotionEngine::new(store.get_connection()).await?;
156
+ let task_store = TaskStore::new(store.get_connection()).await?;
157
+
158
+ let scoring_config = ScoringConfig::new();
159
+
160
+ let state = AppState {
161
+ store: Arc::new(store),
162
+ ollama: Arc::new(ollama),
163
+ context_store: Arc::new(context_store),
164
+ promotion_engine: Arc::new(promotion_engine),
165
+ task_store: Arc::new(task_store),
166
+ scoring_config: Arc::new(scoring_config),
167
+ };
168
+
169
+ let app = Router::new()
170
+ // Health
171
+ .route("/health", get(health))
172
+ // Stats
173
+ .route("/stats", get(get_stats))
174
+ // Episodes
175
+ .route("/episodes", post(create_episode))
176
+ .route("/episodes/{id}", get(get_episode))
177
+ .route("/episodes/{id}/access", post(record_access))
178
+ .route("/episodes/{id}/share", post(share_episode))
179
+ .route("/episodes/co-access", post(record_co_access))
180
+ // Search
181
+ .route("/search", get(search_episodes))
182
+ .route("/search/cross-context", get(search_cross_context))
183
+ // Contexts
184
+ .route("/contexts", get(list_contexts))
185
+ .route("/contexts", post(create_context))
186
+ .route("/contexts/{name}", get(get_context_episodes))
187
+ .route("/contexts/{name}", delete(delete_context))
188
+ // Scoring config
189
+ .route("/contexts/{name}/scoring", post(set_context_scoring))
190
+ .route("/scoring/{context}", get(get_scoring_config))
191
+ // Promotion
192
+ .route("/promotion/run", post(run_promotion))
193
+ .route("/promotion/l3", get(get_l3_knowledge))
194
+ .route("/promotion/l3/{id}", delete(delete_l3))
195
+ .route("/promotion/l3/{id}", patch(update_l3))
196
+ // Access tracking (static route before dynamic)
197
+ .route("/access/top", get(get_access_top))
198
+ .route("/access/{episode_id}", get(get_access))
199
+ // Cooccurrence
200
+ .route("/cooccurrence/{episode_id}", get(get_cooccurrence))
201
+ // Tasks
202
+ .route("/tasks", get(list_tasks))
203
+ .route("/tasks", post(create_task))
204
+ .route("/tasks/next", get(get_next_task))
205
+ .route("/tasks/{id}", patch(update_task))
206
+ .layer(TraceLayer::new_for_http())
207
+ .with_state(state);
208
+
209
+ let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port)).await?;
210
+ tracing::info!("Memory daemon listening on port {}", port);
211
+ axum::serve(listener, app).await?;
212
+
213
+ Ok(())
214
+ }
215
+
216
+ async fn health() -> Json<HealthResponse> {
217
+ Json(HealthResponse {
218
+ status: "ok".to_string(),
219
+ version: env!("CARGO_PKG_VERSION").to_string(),
220
+ })
221
+ }
222
+
223
+ // === Episode endpoints ===
224
+
225
+ async fn create_episode(
226
+ State(state): State<AppState>,
227
+ Json(input): Json<EpisodeInput>,
228
+ ) -> Result<Json<Episode>, (StatusCode, String)> {
229
+ let content_for_check = input.content.clone();
230
+ let context_for_check = input.context_name.clone().unwrap_or_else(|| "global".to_string());
231
+
232
+ let embedding = state
233
+ .ollama
234
+ .embed(&input.content)
235
+ .await
236
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Embedding failed: {e}")))?;
237
+
238
+ let episode = state
239
+ .store
240
+ .insert_episode(input, embedding.clone())
241
+ .await
242
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Store failed: {e}")))?;
243
+
244
+ // Check for auto-promotion to Global: if similar content exists in 3+ contexts
245
+ if context_for_check != "global" {
246
+ let state_clone = state.clone();
247
+ let content = content_for_check;
248
+ let embed = embedding;
249
+ tokio::spawn(async move {
250
+ if let Ok(contexts) = state_clone
251
+ .promotion_engine
252
+ .check_cross_context_promotion(&content)
253
+ .await
254
+ {
255
+ let unique_contexts: std::collections::HashSet<&str> =
256
+ contexts.iter().map(|s| s.as_str()).collect();
257
+ if unique_contexts.len() >= 3 {
258
+ // Auto-promote to global context
259
+ let global_input = EpisodeInput {
260
+ context_name: Some("global".to_string()),
261
+ role: "system".to_string(),
262
+ content: content.clone(),
263
+ };
264
+ if let Err(e) = state_clone.store.insert_episode(global_input, embed).await {
265
+ tracing::warn!("Auto-promotion to global failed: {}", e);
266
+ } else {
267
+ tracing::info!(
268
+ "Auto-promoted episode to global (referenced in {} contexts)",
269
+ unique_contexts.len()
270
+ );
271
+ }
272
+ }
273
+ }
274
+ });
275
+ }
276
+
277
+ Ok(Json(episode))
278
+ }
279
+
280
+ async fn record_access(
281
+ State(state): State<AppState>,
282
+ Path(id): Path<String>,
283
+ ) -> Result<StatusCode, (StatusCode, String)> {
284
+ state
285
+ .promotion_engine
286
+ .record_access(&id)
287
+ .await
288
+ .map_err(|e| {
289
+ (
290
+ StatusCode::INTERNAL_SERVER_ERROR,
291
+ format!("Record access failed: {e}"),
292
+ )
293
+ })?;
294
+
295
+ Ok(StatusCode::NO_CONTENT)
296
+ }
297
+
298
+ async fn share_episode(
299
+ State(state): State<AppState>,
300
+ Path(id): Path<String>,
301
+ Json(req): Json<ShareEpisodeRequest>,
302
+ ) -> Result<StatusCode, (StatusCode, String)> {
303
+ // Get the original episode
304
+ let episode = state
305
+ .store
306
+ .get_by_id(&id)
307
+ .await
308
+ .map_err(|e| {
309
+ (
310
+ StatusCode::INTERNAL_SERVER_ERROR,
311
+ format!("Get episode failed: {e}"),
312
+ )
313
+ })?
314
+ .ok_or_else(|| (StatusCode::NOT_FOUND, "Episode not found".to_string()))?;
315
+
316
+ // Re-embed and store in target context
317
+ let embedding = state
318
+ .ollama
319
+ .embed(&episode.content)
320
+ .await
321
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Embedding failed: {e}")))?;
322
+
323
+ let input = EpisodeInput {
324
+ context_name: Some(req.target_context),
325
+ role: episode.role,
326
+ content: episode.content,
327
+ };
328
+
329
+ state
330
+ .store
331
+ .insert_episode(input, embedding)
332
+ .await
333
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Store failed: {e}")))?;
334
+
335
+ Ok(StatusCode::CREATED)
336
+ }
337
+
338
+ async fn record_co_access(
339
+ State(state): State<AppState>,
340
+ Json(req): Json<CoAccessRequest>,
341
+ ) -> Result<StatusCode, (StatusCode, String)> {
342
+ state
343
+ .promotion_engine
344
+ .record_co_access(&req.episode_ids)
345
+ .await
346
+ .map_err(|e| {
347
+ (
348
+ StatusCode::INTERNAL_SERVER_ERROR,
349
+ format!("Co-access failed: {e}"),
350
+ )
351
+ })?;
352
+
353
+ Ok(StatusCode::NO_CONTENT)
354
+ }
355
+
356
+ // === Search endpoints ===
357
+
358
+ async fn search_episodes(
359
+ State(state): State<AppState>,
360
+ Query(query): Query<SearchQuery>,
361
+ ) -> Result<Json<SearchResult>, (StatusCode, String)> {
362
+ let embedding = state
363
+ .ollama
364
+ .embed(&query.q)
365
+ .await
366
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Embedding failed: {e}")))?;
367
+
368
+ let limit = query.limit.unwrap_or(10);
369
+ let context = query.context.as_deref().unwrap_or("global");
370
+
371
+ let results = state
372
+ .store
373
+ .search(embedding, context, limit)
374
+ .await
375
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Search failed: {e}")))?;
376
+
377
+ let half_life = state.scoring_config.get_half_life(context);
378
+ let scored: Vec<ScoredEpisode> = results
379
+ .into_iter()
380
+ .map(|(episode, distance)| {
381
+ let score = scoring::combined_score(&episode.timestamp, distance, half_life);
382
+ ScoredEpisode {
383
+ episode,
384
+ score,
385
+ source_context: None,
386
+ }
387
+ })
388
+ .collect();
389
+
390
+ Ok(Json(SearchResult { episodes: scored }))
391
+ }
392
+
393
+ async fn search_cross_context(
394
+ State(state): State<AppState>,
395
+ Query(query): Query<SearchQuery>,
396
+ ) -> Result<Json<CrossContextResponse>, (StatusCode, String)> {
397
+ let embedding = state
398
+ .ollama
399
+ .embed(&query.q)
400
+ .await
401
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Embedding failed: {e}")))?;
402
+
403
+ let limit = query.limit.unwrap_or(10);
404
+
405
+ // Search across all contexts
406
+ let results = state
407
+ .store
408
+ .search_all(embedding, limit)
409
+ .await
410
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Search failed: {e}")))?;
411
+
412
+ // Group by context
413
+ let mut by_context: std::collections::HashMap<String, Vec<ScoredEpisode>> =
414
+ std::collections::HashMap::new();
415
+
416
+ for (episode, distance) in results {
417
+ let ctx = episode.context_name.clone();
418
+ let half_life = state.scoring_config.get_half_life(&ctx);
419
+ let score = scoring::combined_score(&episode.timestamp, distance, half_life);
420
+ by_context
421
+ .entry(ctx.clone())
422
+ .or_default()
423
+ .push(ScoredEpisode {
424
+ episode,
425
+ score,
426
+ source_context: Some(ctx),
427
+ });
428
+ }
429
+
430
+ let results: Vec<CrossContextResult> = by_context
431
+ .into_iter()
432
+ .map(|(context, episodes)| CrossContextResult { context, episodes })
433
+ .collect();
434
+
435
+ Ok(Json(CrossContextResponse { results }))
436
+ }
437
+
438
+ // === Context endpoints ===
439
+
440
+ async fn list_contexts(
441
+ State(state): State<AppState>,
442
+ ) -> Result<Json<ListContextsResponse>, (StatusCode, String)> {
443
+ let contexts = state
444
+ .context_store
445
+ .list_contexts()
446
+ .await
447
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("List failed: {e}")))?;
448
+
449
+ let mut infos = Vec::new();
450
+ for ctx in contexts {
451
+ let count = state
452
+ .store
453
+ .count_by_context(&ctx.name)
454
+ .await
455
+ .unwrap_or(0);
456
+ infos.push(ContextInfoResponse {
457
+ name: ctx.name,
458
+ description: ctx.description,
459
+ created_at: ctx.created_at,
460
+ episode_count: count,
461
+ });
462
+ }
463
+
464
+ Ok(Json(ListContextsResponse { contexts: infos }))
465
+ }
466
+
467
+ async fn create_context(
468
+ State(state): State<AppState>,
469
+ Json(req): Json<CreateContextRequest>,
470
+ ) -> Result<(StatusCode, Json<serde_json::Value>), (StatusCode, String)> {
471
+ state
472
+ .context_store
473
+ .create_context(&req.name, &req.description)
474
+ .await
475
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Failed: {e}")))?;
476
+
477
+ Ok((
478
+ StatusCode::CREATED,
479
+ Json(serde_json::json!({ "name": req.name, "created": true })),
480
+ ))
481
+ }
482
+
483
+ async fn get_context_episodes(
484
+ State(state): State<AppState>,
485
+ Path(name): Path<String>,
486
+ ) -> Result<Json<Vec<Episode>>, (StatusCode, String)> {
487
+ let episodes = state
488
+ .store
489
+ .get_by_context(&name)
490
+ .await
491
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Query failed: {e}")))?;
492
+
493
+ Ok(Json(episodes))
494
+ }
495
+
496
+ async fn delete_context(
497
+ State(state): State<AppState>,
498
+ Path(name): Path<String>,
499
+ ) -> Result<StatusCode, (StatusCode, String)> {
500
+ state
501
+ .context_store
502
+ .delete_context(&name)
503
+ .await
504
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Delete failed: {e}")))?;
505
+
506
+ // Also delete episodes in that context
507
+ state
508
+ .store
509
+ .delete_by_context(&name)
510
+ .await
511
+ .map_err(|e| {
512
+ (
513
+ StatusCode::INTERNAL_SERVER_ERROR,
514
+ format!("Delete episodes failed: {e}"),
515
+ )
516
+ })?;
517
+
518
+ Ok(StatusCode::NO_CONTENT)
519
+ }
520
+
521
+ // === Scoring config endpoint ===
522
+
523
+ #[derive(Deserialize)]
524
+ struct ScoringConfigRequest {
525
+ half_life_hours: f64,
526
+ }
527
+
528
+ #[derive(Serialize)]
529
+ struct StatsResponse {
530
+ total_episodes: HashMap<String, usize>,
531
+ total_l3_entries: usize,
532
+ total_access_records: usize,
533
+ promotion_thresholds: PromotionThresholds,
534
+ embedding_model: String,
535
+ default_half_life_hours: f64,
536
+ }
537
+
538
+ #[derive(Serialize)]
539
+ struct PromotionThresholds {
540
+ access: u64,
541
+ cooccurrence: u64,
542
+ }
543
+
544
+ #[derive(Deserialize)]
545
+ struct AccessTopQuery {
546
+ limit: Option<usize>,
547
+ }
548
+
549
+ #[derive(Serialize)]
550
+ struct AccessTopResponse {
551
+ records: Vec<AccessRecordWithDensity>,
552
+ }
553
+
554
+ #[derive(Serialize)]
555
+ struct AccessRecordWithDensity {
556
+ episode_id: String,
557
+ access_count: u64,
558
+ last_accessed: String,
559
+ connection_density: u64,
560
+ is_promoted: bool,
561
+ context_name: Option<String>,
562
+ content_preview: Option<String>,
563
+ }
564
+
565
+ #[derive(Serialize)]
566
+ struct CooccurrenceResponse {
567
+ pairs: Vec<promotion::CooccurrenceRecord>,
568
+ }
569
+
570
+ #[derive(Serialize)]
571
+ struct ScoringResponse {
572
+ context: String,
573
+ half_life_hours: f64,
574
+ is_default: bool,
575
+ all_half_lives: HashMap<String, f64>,
576
+ default_half_life_hours: f64,
577
+ }
578
+
579
+ #[derive(Deserialize)]
580
+ struct L3UpdateRequest {
581
+ content: String,
582
+ }
583
+
584
+ async fn set_context_scoring(
585
+ State(state): State<AppState>,
586
+ Path(name): Path<String>,
587
+ Json(req): Json<ScoringConfigRequest>,
588
+ ) -> StatusCode {
589
+ state.scoring_config.set_half_life(&name, req.half_life_hours);
590
+ tracing::info!("Set half-life for context '{}' to {} hours", name, req.half_life_hours);
591
+ StatusCode::NO_CONTENT
592
+ }
593
+
594
+ // === Promotion endpoint ===
595
+
596
+ async fn run_promotion(
597
+ State(state): State<AppState>,
598
+ Query(query): Query<PromotionQuery>,
599
+ ) -> Result<Json<PromotionResponse>, (StatusCode, String)> {
600
+ let promoted = state
601
+ .promotion_engine
602
+ .run_promotion(query.context.as_deref())
603
+ .await
604
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Promotion failed: {e}")))?;
605
+
606
+ Ok(Json(PromotionResponse {
607
+ promoted_count: promoted.len(),
608
+ episode_ids: promoted,
609
+ }))
610
+ }
611
+
612
+ #[derive(Deserialize)]
613
+ struct L3Query {
614
+ context: String,
615
+ }
616
+
617
+ #[derive(Serialize)]
618
+ struct L3Response {
619
+ entries: Vec<promotion::L3Entry>,
620
+ }
621
+
622
+ async fn get_l3_knowledge(
623
+ State(state): State<AppState>,
624
+ Query(query): Query<L3Query>,
625
+ ) -> Result<Json<L3Response>, (StatusCode, String)> {
626
+ let entries = state
627
+ .promotion_engine
628
+ .get_l3_entries(&query.context)
629
+ .await
630
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("L3 query failed: {e}")))?;
631
+
632
+ Ok(Json(L3Response { entries }))
633
+ }
634
+
635
+ // === Stats endpoint ===
636
+
637
+ async fn get_stats(
638
+ State(state): State<AppState>,
639
+ ) -> Result<Json<StatsResponse>, (StatusCode, String)> {
640
+ let contexts = state
641
+ .context_store
642
+ .list_contexts()
643
+ .await
644
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("List failed: {e}")))?;
645
+
646
+ let mut episode_counts = HashMap::new();
647
+ for ctx in &contexts {
648
+ let count = state.store.count_by_context(&ctx.name).await.unwrap_or(0);
649
+ episode_counts.insert(ctx.name.clone(), count);
650
+ }
651
+
652
+ let all_l3 = state
653
+ .promotion_engine
654
+ .get_all_l3_entries()
655
+ .await
656
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("L3 query failed: {e}")))?;
657
+
658
+ let all_access = state
659
+ .promotion_engine
660
+ .get_all_access_records()
661
+ .await
662
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Access query failed: {e}")))?;
663
+
664
+ let embedding_model =
665
+ std::env::var("EMBEDDING_MODEL").unwrap_or_else(|_| "nomic-embed-text".to_string());
666
+
667
+ Ok(Json(StatsResponse {
668
+ total_episodes: episode_counts,
669
+ total_l3_entries: all_l3.len(),
670
+ total_access_records: all_access.len(),
671
+ promotion_thresholds: PromotionThresholds {
672
+ access: 5,
673
+ cooccurrence: 3,
674
+ },
675
+ embedding_model,
676
+ default_half_life_hours: ScoringConfig::get_default_half_life(),
677
+ }))
678
+ }
679
+
680
+ // === Single episode endpoint ===
681
+
682
+ async fn get_episode(
683
+ State(state): State<AppState>,
684
+ Path(id): Path<String>,
685
+ ) -> Result<Json<Episode>, (StatusCode, String)> {
686
+ let episode = state
687
+ .store
688
+ .get_by_id(&id)
689
+ .await
690
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Query failed: {e}")))?
691
+ .ok_or_else(|| (StatusCode::NOT_FOUND, "Episode not found".to_string()))?;
692
+ Ok(Json(episode))
693
+ }
694
+
695
+ // === Access & Cooccurrence endpoints ===
696
+
697
+ async fn get_access(
698
+ State(state): State<AppState>,
699
+ Path(episode_id): Path<String>,
700
+ ) -> Result<Json<promotion::AccessRecord>, (StatusCode, String)> {
701
+ let record = state
702
+ .promotion_engine
703
+ .get_all_access_records()
704
+ .await
705
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Query failed: {e}")))?
706
+ .into_iter()
707
+ .find(|r| r.episode_id == episode_id);
708
+
709
+ match record {
710
+ Some(r) => Ok(Json(r)),
711
+ None => Ok(Json(promotion::AccessRecord {
712
+ episode_id,
713
+ access_count: 0,
714
+ last_accessed: String::new(),
715
+ })),
716
+ }
717
+ }
718
+
719
+ async fn get_access_top(
720
+ State(state): State<AppState>,
721
+ Query(query): Query<AccessTopQuery>,
722
+ ) -> Result<Json<AccessTopResponse>, (StatusCode, String)> {
723
+ let limit = query.limit.unwrap_or(50);
724
+ let records = state
725
+ .promotion_engine
726
+ .get_all_access_records()
727
+ .await
728
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Query failed: {e}")))?;
729
+
730
+ let mut enriched = Vec::new();
731
+ for record in records.into_iter().take(limit) {
732
+ let density = state
733
+ .promotion_engine
734
+ .get_cooccurrence_pairs(&record.episode_id)
735
+ .await
736
+ .map(|pairs| pairs.iter().map(|p| p.count).sum::<u64>())
737
+ .unwrap_or(0);
738
+ let is_promoted = state
739
+ .promotion_engine
740
+ .get_all_l3_entries()
741
+ .await
742
+ .map(|entries| entries.iter().any(|e| e.source_episode_id == record.episode_id))
743
+ .unwrap_or(false);
744
+ let context_name = state.store.get_by_id(&record.episode_id).await.ok().flatten().map(|e| e.context_name);
745
+ let content_preview = state.store.get_by_id(&record.episode_id).await.ok().flatten().map(|e| {
746
+ if e.content.len() > 200 { format!("{}...", &e.content[..200]) } else { e.content }
747
+ });
748
+
749
+ enriched.push(AccessRecordWithDensity {
750
+ episode_id: record.episode_id,
751
+ access_count: record.access_count,
752
+ last_accessed: record.last_accessed,
753
+ connection_density: density,
754
+ is_promoted,
755
+ context_name,
756
+ content_preview,
757
+ });
758
+ }
759
+
760
+ Ok(Json(AccessTopResponse { records: enriched }))
761
+ }
762
+
763
+ async fn get_cooccurrence(
764
+ State(state): State<AppState>,
765
+ Path(episode_id): Path<String>,
766
+ ) -> Result<Json<CooccurrenceResponse>, (StatusCode, String)> {
767
+ let pairs = state
768
+ .promotion_engine
769
+ .get_cooccurrence_pairs(&episode_id)
770
+ .await
771
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Query failed: {e}")))?;
772
+ Ok(Json(CooccurrenceResponse { pairs }))
773
+ }
774
+
775
+ // === Scoring config GET endpoint ===
776
+
777
+ async fn get_scoring_config(
778
+ State(state): State<AppState>,
779
+ Path(context): Path<String>,
780
+ ) -> Json<ScoringResponse> {
781
+ let half_life = state.scoring_config.get_half_life(&context);
782
+ let all = state.scoring_config.get_all_half_lives();
783
+ let is_default = !all.contains_key(&context);
784
+ Json(ScoringResponse {
785
+ context,
786
+ half_life_hours: half_life,
787
+ is_default,
788
+ all_half_lives: all,
789
+ default_half_life_hours: ScoringConfig::get_default_half_life(),
790
+ })
791
+ }
792
+
793
+ // === L3 Delete & Update endpoints ===
794
+
795
+ async fn delete_l3(
796
+ State(state): State<AppState>,
797
+ Path(id): Path<String>,
798
+ ) -> Result<StatusCode, (StatusCode, String)> {
799
+ let deleted = state
800
+ .promotion_engine
801
+ .delete_l3_entry(&id)
802
+ .await
803
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Delete failed: {e}")))?;
804
+ if deleted {
805
+ Ok(StatusCode::NO_CONTENT)
806
+ } else {
807
+ Err((StatusCode::NOT_FOUND, "L3 entry not found".to_string()))
808
+ }
809
+ }
810
+
811
+ async fn update_l3(
812
+ State(state): State<AppState>,
813
+ Path(id): Path<String>,
814
+ Json(req): Json<L3UpdateRequest>,
815
+ ) -> Result<StatusCode, (StatusCode, String)> {
816
+ let updated = state
817
+ .promotion_engine
818
+ .update_l3_content(&id, &req.content)
819
+ .await
820
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Update failed: {e}")))?;
821
+ if updated {
822
+ Ok(StatusCode::NO_CONTENT)
823
+ } else {
824
+ Err((StatusCode::NOT_FOUND, "L3 entry not found".to_string()))
825
+ }
826
+ }
827
+
828
+ // === Task endpoints ===
829
+
830
+ async fn create_task(
831
+ State(state): State<AppState>,
832
+ Json(input): Json<TaskInput>,
833
+ ) -> Result<(StatusCode, Json<TaskRecord>), (StatusCode, String)> {
834
+ let task = state
835
+ .task_store
836
+ .create_task(input)
837
+ .await
838
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Create task failed: {e}")))?;
839
+
840
+ Ok((StatusCode::CREATED, Json(task)))
841
+ }
842
+
843
+ async fn list_tasks(
844
+ State(state): State<AppState>,
845
+ Query(query): Query<TaskQuery>,
846
+ ) -> Result<Json<ListTasksResponse>, (StatusCode, String)> {
847
+ let tasks = state
848
+ .task_store
849
+ .list_tasks(&query.context, query.status.as_deref())
850
+ .await
851
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("List tasks failed: {e}")))?;
852
+
853
+ Ok(Json(ListTasksResponse { tasks }))
854
+ }
855
+
856
+ async fn get_next_task(
857
+ State(state): State<AppState>,
858
+ Query(query): Query<NextTaskQuery>,
859
+ ) -> Result<Json<TaskRecord>, (StatusCode, String)> {
860
+ let task = state
861
+ .task_store
862
+ .get_next_task(&query.context)
863
+ .await
864
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Get next task failed: {e}")))?;
865
+
866
+ match task {
867
+ Some(t) => Ok(Json(t)),
868
+ None => Err((StatusCode::NOT_FOUND, "No available tasks".to_string())),
869
+ }
870
+ }
871
+
872
+ async fn update_task(
873
+ State(state): State<AppState>,
874
+ Path(id): Path<String>,
875
+ Json(update): Json<TaskUpdate>,
876
+ ) -> Result<Json<TaskRecord>, (StatusCode, String)> {
877
+ let task = state
878
+ .task_store
879
+ .update_task(&id, update)
880
+ .await
881
+ .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Update task failed: {e}")))?;
882
+
883
+ match task {
884
+ Some(t) => Ok(Json(t)),
885
+ None => Err((StatusCode::NOT_FOUND, "Task not found".to_string())),
886
+ }
887
+ }