remdb 0.3.181__py3-none-any.whl → 0.3.202__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

@@ -94,14 +94,14 @@ def generate_table_schema(
94
94
  # Always add id as primary key
95
95
  columns.append("id UUID PRIMARY KEY DEFAULT uuid_generate_v4()")
96
96
 
97
- # Add tenant_id if tenant scoped
97
+ # Add tenant_id if tenant scoped (nullable - NULL means public/shared)
98
98
  if tenant_scoped:
99
- columns.append("tenant_id VARCHAR(100) NOT NULL")
100
- indexes.append(f"CREATE INDEX idx_{table_name}_tenant ON {table_name} (tenant_id);")
99
+ columns.append("tenant_id VARCHAR(100)")
100
+ indexes.append(f"CREATE INDEX IF NOT EXISTS idx_{table_name}_tenant ON {table_name} (tenant_id);")
101
101
 
102
102
  # Add user_id (owner field)
103
103
  columns.append("user_id VARCHAR(256)")
104
- indexes.append(f"CREATE INDEX idx_{table_name}_user ON {table_name} (user_id);")
104
+ indexes.append(f"CREATE INDEX IF NOT EXISTS idx_{table_name}_user ON {table_name} (user_id);")
105
105
 
106
106
  # Process Pydantic fields (skip system fields)
107
107
  for field_name, field_info in model.model_fields.items():
@@ -125,19 +125,19 @@ def generate_table_schema(
125
125
  # Add graph_edges JSONB field
126
126
  columns.append("graph_edges JSONB DEFAULT '[]'::jsonb")
127
127
  indexes.append(
128
- f"CREATE INDEX idx_{table_name}_graph_edges ON {table_name} USING GIN (graph_edges);"
128
+ f"CREATE INDEX IF NOT EXISTS idx_{table_name}_graph_edges ON {table_name} USING GIN (graph_edges);"
129
129
  )
130
130
 
131
131
  # Add metadata JSONB field
132
132
  columns.append("metadata JSONB DEFAULT '{}'::jsonb")
133
133
  indexes.append(
134
- f"CREATE INDEX idx_{table_name}_metadata ON {table_name} USING GIN (metadata);"
134
+ f"CREATE INDEX IF NOT EXISTS idx_{table_name}_metadata ON {table_name} USING GIN (metadata);"
135
135
  )
136
136
 
137
137
  # Add tags field (TEXT[] for list[str])
138
138
  columns.append("tags TEXT[] DEFAULT ARRAY[]::TEXT[]")
139
139
  indexes.append(
140
- f"CREATE INDEX idx_{table_name}_tags ON {table_name} USING GIN (tags);"
140
+ f"CREATE INDEX IF NOT EXISTS idx_{table_name}_tags ON {table_name} USING GIN (tags);"
141
141
  )
142
142
 
143
143
  # Generate CREATE TABLE statement
@@ -202,10 +202,10 @@ CREATE TABLE IF NOT EXISTS {embeddings_table} (
202
202
  );
203
203
 
204
204
  -- Index for entity lookup (get all embeddings for entity)
205
- CREATE INDEX idx_{embeddings_table}_entity ON {embeddings_table} (entity_id);
205
+ CREATE INDEX IF NOT EXISTS idx_{embeddings_table}_entity ON {embeddings_table} (entity_id);
206
206
 
207
207
  -- Index for field + provider lookup
208
- CREATE INDEX idx_{embeddings_table}_field_provider ON {embeddings_table} (field_name, provider);
208
+ CREATE INDEX IF NOT EXISTS idx_{embeddings_table}_field_provider ON {embeddings_table} (field_name, provider);
209
209
 
210
210
  -- HNSW index for vector similarity search (created in background)
211
211
  -- Note: This will be created by background thread after data load
@@ -258,6 +258,7 @@ BEGIN
258
258
  RETURN OLD;
259
259
  ELSIF (TG_OP = 'INSERT' OR TG_OP = 'UPDATE') THEN
260
260
  -- Upsert to KV_STORE (O(1) lookup by entity_key)
261
+ -- tenant_id can be NULL (meaning public/shared data)
261
262
  INSERT INTO kv_store (
262
263
  entity_key,
263
264
  entity_type,
@@ -277,7 +278,7 @@ BEGIN
277
278
  COALESCE(NEW.graph_edges, '[]'::jsonb),
278
279
  CURRENT_TIMESTAMP
279
280
  )
280
- ON CONFLICT (tenant_id, entity_key)
281
+ ON CONFLICT (COALESCE(tenant_id, ''), entity_key)
281
282
  DO UPDATE SET
282
283
  entity_id = EXCLUDED.entity_id,
283
284
  user_id = EXCLUDED.user_id,
@@ -1,6 +1,12 @@
1
1
  """Session management services for conversation persistence and compression."""
2
2
 
3
3
  from .compression import MessageCompressor, SessionMessageStore
4
+ from .pydantic_messages import session_to_pydantic_messages
4
5
  from .reload import reload_session
5
6
 
6
- __all__ = ["MessageCompressor", "SessionMessageStore", "reload_session"]
7
+ __all__ = [
8
+ "MessageCompressor",
9
+ "SessionMessageStore",
10
+ "reload_session",
11
+ "session_to_pydantic_messages",
12
+ ]
@@ -65,7 +65,7 @@ def truncate_key(key: str, max_length: int = MAX_ENTITY_KEY_LENGTH) -> str:
65
65
  logger.warning(f"Truncated key from {len(key)} to {len(truncated)} chars: {key[:50]}...")
66
66
  return truncated
67
67
 
68
- from rem.models.entities import Message
68
+ from rem.models.entities import Message, Session
69
69
  from rem.services.postgres import PostgresService, Repository
70
70
  from rem.settings import settings
71
71
 
@@ -177,6 +177,41 @@ class SessionMessageStore:
177
177
  self.user_id = user_id
178
178
  self.compressor = compressor or MessageCompressor()
179
179
  self.repo = Repository(Message)
180
+ self._session_repo = Repository(Session, table_name="sessions")
181
+
182
+ async def _ensure_session_exists(
183
+ self,
184
+ session_id: str,
185
+ user_id: str | None = None,
186
+ ) -> None:
187
+ """
188
+ Ensure session exists, creating it if necessary.
189
+
190
+ Args:
191
+ session_id: Session identifier (maps to Session.name)
192
+ user_id: Optional user identifier
193
+ """
194
+ try:
195
+ # Check if session already exists by name
196
+ existing = await self._session_repo.find(
197
+ filters={"name": session_id},
198
+ limit=1,
199
+ )
200
+ if existing:
201
+ return # Session already exists
202
+
203
+ # Create new session
204
+ session = Session(
205
+ name=session_id,
206
+ user_id=user_id or self.user_id,
207
+ tenant_id=self.user_id, # tenant_id set to user_id for scoping
208
+ )
209
+ await self._session_repo.upsert(session)
210
+ logger.info(f"Created session {session_id} for user {user_id or self.user_id}")
211
+
212
+ except Exception as e:
213
+ # Log but don't fail - session creation is best-effort
214
+ logger.warning(f"Failed to ensure session exists: {e}")
180
215
 
181
216
  async def store_message(
182
217
  self,
@@ -283,8 +318,10 @@ class SessionMessageStore:
283
318
  """
284
319
  Store all session messages and return compressed versions.
285
320
 
321
+ Ensures session exists before storing messages.
322
+
286
323
  Args:
287
- session_id: Session identifier
324
+ session_id: Session identifier (maps to Session.name)
288
325
  messages: List of messages to store
289
326
  user_id: Optional user identifier
290
327
  compress: Whether to compress messages (default: True)
@@ -296,6 +333,9 @@ class SessionMessageStore:
296
333
  logger.debug("Postgres disabled, returning messages uncompressed")
297
334
  return messages
298
335
 
336
+ # Ensure session exists before storing messages
337
+ await self._ensure_session_exists(session_id, user_id)
338
+
299
339
  compressed_messages = []
300
340
 
301
341
  for idx, message in enumerate(messages):
@@ -0,0 +1,210 @@
1
+ """Convert stored session messages to pydantic-ai native message format.
2
+
3
+ This module enables proper conversation history replay by converting our simplified
4
+ storage format into pydantic-ai's native ModelRequest/ModelResponse types.
5
+
6
+ Key insight: When we store tool results, we only store the result (ToolReturnPart).
7
+ But LLM APIs require matching ToolCallPart for each ToolReturnPart. So we synthesize
8
+ the ToolCallPart from stored metadata (tool_name, tool_call_id, tool_arguments).
9
+
10
+ Storage format (our simplified format):
11
+ {"role": "user", "content": "..."}
12
+ {"role": "assistant", "content": "..."}
13
+ {"role": "tool", "content": "{...}", "tool_name": "...", "tool_call_id": "...", "tool_arguments": {...}}
14
+
15
+ Pydantic-ai format (what the LLM expects):
16
+ ModelRequest(parts=[UserPromptPart(content="...")])
17
+ ModelResponse(parts=[TextPart(content="..."), ToolCallPart(...)]) # Call
18
+ ModelRequest(parts=[ToolReturnPart(...)]) # Result
19
+
20
+ Example usage:
21
+ from rem.services.session.pydantic_messages import session_to_pydantic_messages
22
+
23
+ # Load session history
24
+ session_history = await store.load_session_messages(session_id)
25
+
26
+ # Convert to pydantic-ai format
27
+ message_history = session_to_pydantic_messages(session_history)
28
+
29
+ # Use with agent.run()
30
+ result = await agent.run(user_prompt, message_history=message_history)
31
+ """
32
+
33
+ import json
34
+ from typing import Any
35
+
36
+ from loguru import logger
37
+ from pydantic_ai.messages import (
38
+ ModelMessage,
39
+ ModelRequest,
40
+ ModelResponse,
41
+ SystemPromptPart,
42
+ TextPart,
43
+ ToolCallPart,
44
+ ToolReturnPart,
45
+ UserPromptPart,
46
+ )
47
+
48
+
49
+ def session_to_pydantic_messages(
50
+ session_history: list[dict[str, Any]],
51
+ system_prompt: str | None = None,
52
+ ) -> list[ModelMessage]:
53
+ """Convert stored session messages to pydantic-ai ModelMessage format.
54
+
55
+ Handles the conversion of our simplified storage format to pydantic-ai's
56
+ native message types, including synthesizing ToolCallPart for tool results.
57
+
58
+ IMPORTANT: pydantic-ai only auto-adds system prompts when message_history is empty.
59
+ When passing message_history to agent.run(), you MUST include the system prompt
60
+ via the system_prompt parameter here.
61
+
62
+ Args:
63
+ session_history: List of message dicts from SessionMessageStore.load_session_messages()
64
+ Each dict has: role, content, and optionally tool_name, tool_call_id, tool_arguments
65
+ system_prompt: The agent's system prompt (from schema description). This is REQUIRED
66
+ for proper agent behavior on subsequent turns, as pydantic-ai won't add it
67
+ automatically when message_history is provided.
68
+
69
+ Returns:
70
+ List of ModelMessage (ModelRequest | ModelResponse) ready for agent.run(message_history=...)
71
+
72
+ Note:
73
+ - System prompts ARE included as SystemPromptPart when system_prompt is provided
74
+ - Tool results require synthesized ToolCallPart to satisfy LLM API requirements
75
+ - The first message in session_history should be "user" role (from context builder)
76
+ """
77
+ messages: list[ModelMessage] = []
78
+
79
+ # CRITICAL: Prepend agent's system prompt if provided
80
+ # This ensures the agent's instructions are present on every turn
81
+ # pydantic-ai only auto-adds system prompts when message_history is empty
82
+ if system_prompt:
83
+ messages.append(ModelRequest(parts=[SystemPromptPart(content=system_prompt)]))
84
+ logger.debug(f"Prepended agent system prompt ({len(system_prompt)} chars) to message history")
85
+
86
+ # Track pending tool results to batch them with assistant responses
87
+ # When we see a tool message, we need to:
88
+ # 1. Add a ModelResponse with ToolCallPart (synthesized)
89
+ # 2. Add a ModelRequest with ToolReturnPart (actual result)
90
+
91
+ i = 0
92
+ while i < len(session_history):
93
+ msg = session_history[i]
94
+ role = msg.get("role", "")
95
+ content = msg.get("content", "")
96
+
97
+ if role == "user":
98
+ # User messages become ModelRequest with UserPromptPart
99
+ messages.append(ModelRequest(parts=[UserPromptPart(content=content)]))
100
+
101
+ elif role == "assistant":
102
+ # Assistant text becomes ModelResponse with TextPart
103
+ # Check if there are following tool messages that should be grouped
104
+ tool_calls = []
105
+ tool_returns = []
106
+
107
+ # Look ahead for tool messages that follow this assistant message
108
+ j = i + 1
109
+ while j < len(session_history) and session_history[j].get("role") == "tool":
110
+ tool_msg = session_history[j]
111
+ tool_name = tool_msg.get("tool_name", "unknown_tool")
112
+ tool_call_id = tool_msg.get("tool_call_id", f"call_{j}")
113
+ tool_arguments = tool_msg.get("tool_arguments", {})
114
+ tool_content = tool_msg.get("content", "{}")
115
+
116
+ # Parse tool content if it's a JSON string
117
+ if isinstance(tool_content, str):
118
+ try:
119
+ tool_result = json.loads(tool_content)
120
+ except json.JSONDecodeError:
121
+ tool_result = {"raw": tool_content}
122
+ else:
123
+ tool_result = tool_content
124
+
125
+ # Synthesize ToolCallPart (what the model "called")
126
+ tool_calls.append(ToolCallPart(
127
+ tool_name=tool_name,
128
+ args=tool_arguments if tool_arguments else {},
129
+ tool_call_id=tool_call_id,
130
+ ))
131
+
132
+ # Create ToolReturnPart (the actual result)
133
+ tool_returns.append(ToolReturnPart(
134
+ tool_name=tool_name,
135
+ content=tool_result,
136
+ tool_call_id=tool_call_id,
137
+ ))
138
+
139
+ j += 1
140
+
141
+ # Build the assistant's ModelResponse
142
+ response_parts = []
143
+
144
+ # Add tool calls first (if any)
145
+ response_parts.extend(tool_calls)
146
+
147
+ # Add text content (if any)
148
+ if content:
149
+ response_parts.append(TextPart(content=content))
150
+
151
+ # Only add ModelResponse if we have parts
152
+ if response_parts:
153
+ messages.append(ModelResponse(
154
+ parts=response_parts,
155
+ model_name="recovered", # We don't store model name
156
+ ))
157
+
158
+ # Add tool returns as ModelRequest (required by LLM API)
159
+ if tool_returns:
160
+ messages.append(ModelRequest(parts=tool_returns))
161
+
162
+ # Skip the tool messages we just processed
163
+ i = j - 1
164
+
165
+ elif role == "tool":
166
+ # Orphan tool message (no preceding assistant) - synthesize both parts
167
+ tool_name = msg.get("tool_name", "unknown_tool")
168
+ tool_call_id = msg.get("tool_call_id", f"call_{i}")
169
+ tool_arguments = msg.get("tool_arguments", {})
170
+ tool_content = msg.get("content", "{}")
171
+
172
+ # Parse tool content
173
+ if isinstance(tool_content, str):
174
+ try:
175
+ tool_result = json.loads(tool_content)
176
+ except json.JSONDecodeError:
177
+ tool_result = {"raw": tool_content}
178
+ else:
179
+ tool_result = tool_content
180
+
181
+ # Synthesize the tool call (ModelResponse with ToolCallPart)
182
+ messages.append(ModelResponse(
183
+ parts=[ToolCallPart(
184
+ tool_name=tool_name,
185
+ args=tool_arguments if tool_arguments else {},
186
+ tool_call_id=tool_call_id,
187
+ )],
188
+ model_name="recovered",
189
+ ))
190
+
191
+ # Add the tool return (ModelRequest with ToolReturnPart)
192
+ messages.append(ModelRequest(
193
+ parts=[ToolReturnPart(
194
+ tool_name=tool_name,
195
+ content=tool_result,
196
+ tool_call_id=tool_call_id,
197
+ )]
198
+ ))
199
+
200
+ elif role == "system":
201
+ # Skip system messages - pydantic-ai handles these via Agent.system_prompt
202
+ logger.debug("Skipping system message in session history (handled by Agent)")
203
+
204
+ else:
205
+ logger.warning(f"Unknown message role in session history: {role}")
206
+
207
+ i += 1
208
+
209
+ logger.debug(f"Converted {len(session_history)} stored messages to {len(messages)} pydantic-ai messages")
210
+ return messages
@@ -121,18 +121,18 @@ CREATE UNLOGGED TABLE IF NOT EXISTS kv_store (
121
121
  entity_key VARCHAR(255) NOT NULL,
122
122
  entity_type VARCHAR(100) NOT NULL,
123
123
  entity_id UUID NOT NULL,
124
- tenant_id VARCHAR(100) NOT NULL,
124
+ tenant_id VARCHAR(100), -- NULL = public/shared data
125
125
  user_id VARCHAR(100),
126
126
  content_summary TEXT,
127
127
  metadata JSONB DEFAULT '{}',
128
128
  graph_edges JSONB DEFAULT '[]'::jsonb, -- Cached edges for fast graph traversal
129
129
  created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
130
- updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
131
-
132
- -- Composite primary key: entity_key unique per tenant
133
- PRIMARY KEY (tenant_id, entity_key)
130
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
134
131
  );
135
132
 
133
+ -- Unique constraint on (tenant_id, entity_key) using COALESCE to handle NULL tenant_id
134
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_kv_store_tenant_key ON kv_store (COALESCE(tenant_id, ''), entity_key);
135
+
136
136
  -- Index for user-scoped lookups (when user_id IS NOT NULL)
137
137
  CREATE INDEX IF NOT EXISTS idx_kv_store_user ON kv_store (tenant_id, user_id)
138
138
  WHERE user_id IS NOT NULL;
@@ -173,7 +173,7 @@ COMMENT ON COLUMN kv_store.entity_id IS
173
173
  'UUID from primary table for reverse lookup';
174
174
 
175
175
  COMMENT ON COLUMN kv_store.tenant_id IS
176
- 'Tenant identifier for multi-tenancy isolation';
176
+ 'Tenant identifier for multi-tenancy isolation. NULL = public/shared data visible to all.';
177
177
 
178
178
  COMMENT ON COLUMN kv_store.user_id IS
179
179
  'Optional user scoping. NULL = system-level entity, visible to all users in tenant';
@@ -271,8 +271,12 @@ BEGIN
271
271
  AND kv.entity_key = normalize_key(p_entity_key)
272
272
  LIMIT 1;
273
273
 
274
- -- If not found, return empty
274
+ -- If not found, check if cache is empty and maybe trigger rebuild
275
275
  IF entity_table IS NULL THEN
276
+ -- SELF-HEALING: Check if this is because cache is empty
277
+ IF rem_kv_store_empty(effective_user_id) THEN
278
+ PERFORM maybe_trigger_kv_rebuild(effective_user_id, 'rem_lookup');
279
+ END IF;
276
280
  RETURN;
277
281
  END IF;
278
282
 
@@ -357,6 +361,7 @@ DECLARE
357
361
  entities_by_table JSONB := '{}'::jsonb;
358
362
  table_keys JSONB;
359
363
  effective_user_id VARCHAR(100);
364
+ v_found_any BOOLEAN := FALSE;
360
365
  BEGIN
361
366
  effective_user_id := COALESCE(p_user_id, p_tenant_id);
362
367
 
@@ -373,6 +378,7 @@ BEGIN
373
378
  ORDER BY sim_score DESC
374
379
  LIMIT p_limit
375
380
  LOOP
381
+ v_found_any := TRUE;
376
382
  -- Build JSONB mapping {table: [keys]}
377
383
  IF entities_by_table ? kv_matches.entity_type THEN
378
384
  table_keys := entities_by_table->kv_matches.entity_type;
@@ -390,6 +396,11 @@ BEGIN
390
396
  END IF;
391
397
  END LOOP;
392
398
 
399
+ -- SELF-HEALING: If no matches and cache is empty, trigger rebuild
400
+ IF NOT v_found_any AND rem_kv_store_empty(effective_user_id) THEN
401
+ PERFORM maybe_trigger_kv_rebuild(effective_user_id, 'rem_fuzzy');
402
+ END IF;
403
+
393
404
  -- Fetch full records using rem_fetch (which now supports NULL user_id)
394
405
  RETURN QUERY
395
406
  SELECT
@@ -436,9 +447,25 @@ DECLARE
436
447
  entities_by_table JSONB := '{}'::jsonb;
437
448
  table_keys JSONB;
438
449
  effective_user_id VARCHAR(100);
450
+ v_found_start BOOLEAN := FALSE;
439
451
  BEGIN
440
452
  effective_user_id := COALESCE(p_user_id, p_tenant_id);
441
453
 
454
+ -- Check if start entity exists in kv_store
455
+ SELECT TRUE INTO v_found_start
456
+ FROM kv_store kv
457
+ WHERE (kv.user_id = effective_user_id OR kv.user_id IS NULL)
458
+ AND kv.entity_key = normalize_key(p_entity_key)
459
+ LIMIT 1;
460
+
461
+ -- SELF-HEALING: If start not found and cache is empty, trigger rebuild
462
+ IF NOT COALESCE(v_found_start, FALSE) THEN
463
+ IF rem_kv_store_empty(effective_user_id) THEN
464
+ PERFORM maybe_trigger_kv_rebuild(effective_user_id, 'rem_traverse');
465
+ END IF;
466
+ RETURN;
467
+ END IF;
468
+
442
469
  FOR graph_keys IN
443
470
  WITH RECURSIVE graph_traversal AS (
444
471
  -- Base case: Find starting entity (user-owned OR public)
@@ -789,6 +816,97 @@ $$ LANGUAGE plpgsql STABLE;
789
816
  COMMENT ON FUNCTION fn_get_shared_messages IS
790
817
  'Get messages from sessions shared by a specific user with the recipient.';
791
818
 
819
+ -- ============================================================================
820
+ -- SESSIONS WITH USER INFO
821
+ -- ============================================================================
822
+ -- Function to list sessions with user details (name, email) for admin views
823
+
824
+ -- List sessions with user info, CTE pagination
825
+ -- Note: messages.session_id stores the session name (not UUID), so we join on sessions.name
826
+ CREATE OR REPLACE FUNCTION fn_list_sessions_with_user(
827
+ p_user_id VARCHAR(256) DEFAULT NULL, -- Filter by user_id (NULL = all users, admin only)
828
+ p_user_name VARCHAR(256) DEFAULT NULL, -- Filter by user name (partial match, admin only)
829
+ p_user_email VARCHAR(256) DEFAULT NULL, -- Filter by user email (partial match, admin only)
830
+ p_mode VARCHAR(50) DEFAULT NULL, -- Filter by session mode
831
+ p_page INTEGER DEFAULT 1,
832
+ p_page_size INTEGER DEFAULT 50
833
+ )
834
+ RETURNS TABLE(
835
+ id UUID,
836
+ name VARCHAR(256),
837
+ mode TEXT,
838
+ description TEXT,
839
+ user_id VARCHAR(256),
840
+ user_name VARCHAR(256),
841
+ user_email VARCHAR(256),
842
+ message_count INTEGER,
843
+ total_tokens INTEGER,
844
+ created_at TIMESTAMP,
845
+ updated_at TIMESTAMP,
846
+ metadata JSONB,
847
+ total_count BIGINT
848
+ ) AS $$
849
+ BEGIN
850
+ RETURN QUERY
851
+ WITH session_msg_counts AS (
852
+ -- Count messages per session (joining on session name since messages.session_id = sessions.name)
853
+ SELECT
854
+ m.session_id as session_name,
855
+ COUNT(*)::INTEGER as actual_message_count
856
+ FROM messages m
857
+ GROUP BY m.session_id
858
+ ),
859
+ filtered_sessions AS (
860
+ SELECT
861
+ s.id,
862
+ s.name,
863
+ s.mode,
864
+ s.description,
865
+ s.user_id,
866
+ COALESCE(u.name, s.user_id)::VARCHAR(256) AS user_name,
867
+ u.email::VARCHAR(256) AS user_email,
868
+ COALESCE(mc.actual_message_count, 0) AS message_count,
869
+ s.total_tokens,
870
+ s.created_at,
871
+ s.updated_at,
872
+ s.metadata
873
+ FROM sessions s
874
+ LEFT JOIN users u ON u.id::text = s.user_id
875
+ LEFT JOIN session_msg_counts mc ON mc.session_name = s.name
876
+ WHERE s.deleted_at IS NULL
877
+ AND (p_user_id IS NULL OR s.user_id = p_user_id)
878
+ AND (p_user_name IS NULL OR u.name ILIKE '%' || p_user_name || '%')
879
+ AND (p_user_email IS NULL OR u.email ILIKE '%' || p_user_email || '%')
880
+ AND (p_mode IS NULL OR s.mode = p_mode)
881
+ ),
882
+ counted AS (
883
+ SELECT *, COUNT(*) OVER () AS total_count
884
+ FROM filtered_sessions
885
+ )
886
+ SELECT
887
+ c.id,
888
+ c.name,
889
+ c.mode,
890
+ c.description,
891
+ c.user_id,
892
+ c.user_name,
893
+ c.user_email,
894
+ c.message_count,
895
+ c.total_tokens,
896
+ c.created_at,
897
+ c.updated_at,
898
+ c.metadata,
899
+ c.total_count
900
+ FROM counted c
901
+ ORDER BY c.created_at DESC
902
+ LIMIT p_page_size
903
+ OFFSET (p_page - 1) * p_page_size;
904
+ END;
905
+ $$ LANGUAGE plpgsql STABLE;
906
+
907
+ COMMENT ON FUNCTION fn_list_sessions_with_user IS
908
+ 'List sessions with user details and computed message counts. Joins messages on session name.';
909
+
792
910
  -- ============================================================================
793
911
  -- RECORD INSTALLATION
794
912
  -- ============================================================================