letta-nightly 0.11.7.dev20250909104137__py3-none-any.whl → 0.11.7.dev20250910104051__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. letta/adapters/letta_llm_adapter.py +81 -0
  2. letta/adapters/letta_llm_request_adapter.py +111 -0
  3. letta/adapters/letta_llm_stream_adapter.py +169 -0
  4. letta/agents/base_agent.py +4 -1
  5. letta/agents/base_agent_v2.py +68 -0
  6. letta/agents/helpers.py +3 -5
  7. letta/agents/letta_agent.py +23 -12
  8. letta/agents/letta_agent_v2.py +1220 -0
  9. letta/agents/voice_agent.py +2 -1
  10. letta/constants.py +1 -1
  11. letta/errors.py +12 -0
  12. letta/functions/function_sets/base.py +53 -12
  13. letta/functions/schema_generator.py +1 -1
  14. letta/groups/sleeptime_multi_agent_v3.py +231 -0
  15. letta/helpers/tool_rule_solver.py +4 -0
  16. letta/helpers/tpuf_client.py +607 -34
  17. letta/interfaces/anthropic_streaming_interface.py +64 -24
  18. letta/interfaces/openai_streaming_interface.py +80 -37
  19. letta/llm_api/openai_client.py +45 -4
  20. letta/orm/block.py +1 -0
  21. letta/orm/group.py +1 -0
  22. letta/orm/source.py +8 -1
  23. letta/orm/step_metrics.py +10 -0
  24. letta/schemas/block.py +4 -0
  25. letta/schemas/enums.py +1 -0
  26. letta/schemas/group.py +8 -0
  27. letta/schemas/letta_message.py +1 -1
  28. letta/schemas/letta_request.py +2 -2
  29. letta/schemas/mcp.py +9 -1
  30. letta/schemas/message.py +23 -0
  31. letta/schemas/providers/ollama.py +1 -1
  32. letta/schemas/providers.py +1 -2
  33. letta/schemas/source.py +6 -0
  34. letta/schemas/step_metrics.py +2 -0
  35. letta/server/rest_api/routers/v1/__init__.py +2 -0
  36. letta/server/rest_api/routers/v1/agents.py +100 -5
  37. letta/server/rest_api/routers/v1/blocks.py +6 -0
  38. letta/server/rest_api/routers/v1/folders.py +23 -5
  39. letta/server/rest_api/routers/v1/groups.py +6 -0
  40. letta/server/rest_api/routers/v1/internal_templates.py +218 -12
  41. letta/server/rest_api/routers/v1/messages.py +14 -19
  42. letta/server/rest_api/routers/v1/runs.py +43 -28
  43. letta/server/rest_api/routers/v1/sources.py +23 -5
  44. letta/server/rest_api/routers/v1/tools.py +42 -0
  45. letta/server/rest_api/streaming_response.py +9 -1
  46. letta/server/server.py +2 -1
  47. letta/services/agent_manager.py +39 -59
  48. letta/services/agent_serialization_manager.py +22 -8
  49. letta/services/archive_manager.py +60 -9
  50. letta/services/block_manager.py +5 -0
  51. letta/services/file_processor/embedder/base_embedder.py +5 -0
  52. letta/services/file_processor/embedder/openai_embedder.py +4 -0
  53. letta/services/file_processor/embedder/pinecone_embedder.py +5 -1
  54. letta/services/file_processor/embedder/turbopuffer_embedder.py +71 -0
  55. letta/services/file_processor/file_processor.py +9 -7
  56. letta/services/group_manager.py +74 -11
  57. letta/services/mcp_manager.py +132 -26
  58. letta/services/message_manager.py +229 -125
  59. letta/services/passage_manager.py +2 -1
  60. letta/services/source_manager.py +23 -1
  61. letta/services/summarizer/summarizer.py +2 -0
  62. letta/services/tool_executor/core_tool_executor.py +2 -120
  63. letta/services/tool_executor/files_tool_executor.py +133 -8
  64. letta/settings.py +6 -0
  65. letta/utils.py +34 -1
  66. {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/METADATA +2 -2
  67. {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/RECORD +70 -63
  68. {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/WHEEL +0 -0
  69. {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/entry_points.txt +0 -0
  70. {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/licenses/LICENSE +0 -0
@@ -5,6 +5,7 @@ from sqlalchemy import select
5
5
  from letta.helpers.tpuf_client import should_use_tpuf
6
6
  from letta.log import get_logger
7
7
  from letta.orm import ArchivalPassage, Archive as ArchiveModel, ArchivesAgents
8
+ from letta.otel.tracing import trace_method
8
9
  from letta.schemas.archive import Archive as PydanticArchive
9
10
  from letta.schemas.enums import VectorDBProvider
10
11
  from letta.schemas.user import User as PydanticUser
@@ -19,6 +20,7 @@ class ArchiveManager:
19
20
  """Manager class to handle business logic related to Archives."""
20
21
 
21
22
  @enforce_types
23
+ @trace_method
22
24
  def create_archive(
23
25
  self,
24
26
  name: str,
@@ -44,6 +46,7 @@ class ArchiveManager:
44
46
  raise
45
47
 
46
48
  @enforce_types
49
+ @trace_method
47
50
  async def create_archive_async(
48
51
  self,
49
52
  name: str,
@@ -69,6 +72,7 @@ class ArchiveManager:
69
72
  raise
70
73
 
71
74
  @enforce_types
75
+ @trace_method
72
76
  async def get_archive_by_id_async(
73
77
  self,
74
78
  archive_id: str,
@@ -84,6 +88,7 @@ class ArchiveManager:
84
88
  return archive.to_pydantic()
85
89
 
86
90
  @enforce_types
91
+ @trace_method
87
92
  def attach_agent_to_archive(
88
93
  self,
89
94
  agent_id: str,
@@ -113,6 +118,7 @@ class ArchiveManager:
113
118
  session.commit()
114
119
 
115
120
  @enforce_types
121
+ @trace_method
116
122
  async def attach_agent_to_archive_async(
117
123
  self,
118
124
  agent_id: str,
@@ -148,6 +154,7 @@ class ArchiveManager:
148
154
  await session.commit()
149
155
 
150
156
  @enforce_types
157
+ @trace_method
151
158
  async def get_default_archive_for_agent_async(
152
159
  self,
153
160
  agent_id: str,
@@ -179,6 +186,24 @@ class ArchiveManager:
179
186
  return None
180
187
 
181
188
  @enforce_types
189
+ @trace_method
190
+ async def delete_archive_async(
191
+ self,
192
+ archive_id: str,
193
+ actor: PydanticUser = None,
194
+ ) -> None:
195
+ """Delete an archive permanently."""
196
+ async with db_registry.async_session() as session:
197
+ archive_model = await ArchiveModel.read_async(
198
+ db_session=session,
199
+ identifier=archive_id,
200
+ actor=actor,
201
+ )
202
+ await archive_model.hard_delete_async(session, actor=actor)
203
+ logger.info(f"Deleted archive {archive_id}")
204
+
205
+ @enforce_types
206
+ @trace_method
182
207
  async def get_or_create_default_archive_for_agent_async(
183
208
  self,
184
209
  agent_id: str,
@@ -187,6 +212,8 @@ class ArchiveManager:
187
212
  ) -> PydanticArchive:
188
213
  """Get the agent's default archive, creating one if it doesn't exist."""
189
214
  # First check if agent has any archives
215
+ from sqlalchemy.exc import IntegrityError
216
+
190
217
  from letta.services.agent_manager import AgentManager
191
218
 
192
219
  agent_manager = AgentManager()
@@ -215,17 +242,38 @@ class ArchiveManager:
215
242
  actor=actor,
216
243
  )
217
244
 
218
- # Attach the agent to the archive as owner
219
- await self.attach_agent_to_archive_async(
220
- agent_id=agent_id,
221
- archive_id=archive.id,
222
- is_owner=True,
223
- actor=actor,
224
- )
225
-
226
- return archive
245
+ try:
246
+ # Attach the agent to the archive as owner
247
+ await self.attach_agent_to_archive_async(
248
+ agent_id=agent_id,
249
+ archive_id=archive.id,
250
+ is_owner=True,
251
+ actor=actor,
252
+ )
253
+ return archive
254
+ except IntegrityError:
255
+ # race condition: another concurrent request already created and attached an archive
256
+ # clean up the orphaned archive we just created
257
+ logger.info(f"Race condition detected for agent {agent_id}, cleaning up orphaned archive {archive.id}")
258
+ await self.delete_archive_async(archive_id=archive.id, actor=actor)
259
+
260
+ # fetch the existing archive that was created by the concurrent request
261
+ archive_ids = await agent_manager.get_agent_archive_ids_async(
262
+ agent_id=agent_id,
263
+ actor=actor,
264
+ )
265
+ if archive_ids:
266
+ archive = await self.get_archive_by_id_async(
267
+ archive_id=archive_ids[0],
268
+ actor=actor,
269
+ )
270
+ return archive
271
+ else:
272
+ # this shouldn't happen, but if it does, re-raise
273
+ raise
227
274
 
228
275
  @enforce_types
276
+ @trace_method
229
277
  def get_or_create_default_archive_for_agent(
230
278
  self,
231
279
  agent_id: str,
@@ -269,6 +317,7 @@ class ArchiveManager:
269
317
  return archive_model.to_pydantic()
270
318
 
271
319
  @enforce_types
320
+ @trace_method
272
321
  async def get_agents_for_archive_async(
273
322
  self,
274
323
  archive_id: str,
@@ -280,6 +329,7 @@ class ArchiveManager:
280
329
  return [row[0] for row in result.fetchall()]
281
330
 
282
331
  @enforce_types
332
+ @trace_method
283
333
  async def get_agent_from_passage_async(
284
334
  self,
285
335
  passage_id: str,
@@ -309,6 +359,7 @@ class ArchiveManager:
309
359
  return agent_ids[0]
310
360
 
311
361
  @enforce_types
362
+ @trace_method
312
363
  async def get_or_set_vector_db_namespace_async(
313
364
  self,
314
365
  archive_id: str,
@@ -188,6 +188,7 @@ class BlockManager:
188
188
  connected_to_agents_count_lt: Optional[int] = None,
189
189
  connected_to_agents_count_eq: Optional[List[int]] = None,
190
190
  ascending: bool = True,
191
+ show_hidden_blocks: Optional[bool] = None,
191
192
  ) -> List[PydanticBlock]:
192
193
  """Async version of get_blocks method. Retrieve blocks based on various optional filters."""
193
194
  from sqlalchemy import select
@@ -228,6 +229,10 @@ class BlockManager:
228
229
  if value_search:
229
230
  query = query.where(BlockModel.value.ilike(f"%{value_search}%"))
230
231
 
232
+ # Apply hidden filter
233
+ if not show_hidden_blocks:
234
+ query = query.where((BlockModel.hidden.is_(None)) | (BlockModel.hidden == False))
235
+
231
236
  needs_distinct = False
232
237
 
233
238
  needs_agent_count_join = any(
@@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
2
2
  from typing import List
3
3
 
4
4
  from letta.log import get_logger
5
+ from letta.schemas.enums import VectorDBProvider
5
6
  from letta.schemas.passage import Passage
6
7
  from letta.schemas.user import User
7
8
 
@@ -11,6 +12,10 @@ logger = get_logger(__name__)
11
12
  class BaseEmbedder(ABC):
12
13
  """Abstract base class for embedding generation"""
13
14
 
15
+ def __init__(self):
16
+ # Default to NATIVE, subclasses will override this
17
+ self.vector_db_type = VectorDBProvider.NATIVE
18
+
14
19
  @abstractmethod
15
20
  async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
16
21
  """Generate embeddings for chunks with batching and concurrent processing"""
@@ -19,6 +19,10 @@ class OpenAIEmbedder(BaseEmbedder):
19
19
  """OpenAI-based embedding generation"""
20
20
 
21
21
  def __init__(self, embedding_config: Optional[EmbeddingConfig] = None):
22
+ super().__init__()
23
+ # OpenAI embedder uses the native vector db (PostgreSQL)
24
+ # self.vector_db_type already set to VectorDBProvider.NATIVE by parent
25
+
22
26
  self.default_embedding_config = (
23
27
  EmbeddingConfig.default_config(model_name="text-embedding-3-small", provider="openai")
24
28
  if model_settings.openai_api_key
@@ -4,6 +4,7 @@ from letta.helpers.pinecone_utils import upsert_file_records_to_pinecone_index
4
4
  from letta.log import get_logger
5
5
  from letta.otel.tracing import log_event, trace_method
6
6
  from letta.schemas.embedding_config import EmbeddingConfig
7
+ from letta.schemas.enums import VectorDBProvider
7
8
  from letta.schemas.passage import Passage
8
9
  from letta.schemas.user import User
9
10
  from letta.services.file_processor.embedder.base_embedder import BaseEmbedder
@@ -20,6 +21,10 @@ class PineconeEmbedder(BaseEmbedder):
20
21
  """Pinecone-based embedding generation"""
21
22
 
22
23
  def __init__(self, embedding_config: Optional[EmbeddingConfig] = None):
24
+ super().__init__()
25
+ # set the vector db type for pinecone
26
+ self.vector_db_type = VectorDBProvider.PINECONE
27
+
23
28
  if not PINECONE_AVAILABLE:
24
29
  raise ImportError("Pinecone package is not installed. Install it with: pip install pinecone")
25
30
 
@@ -28,7 +33,6 @@ class PineconeEmbedder(BaseEmbedder):
28
33
  embedding_config = EmbeddingConfig.default_config(provider="pinecone")
29
34
 
30
35
  self.embedding_config = embedding_config
31
- super().__init__()
32
36
 
33
37
  @trace_method
34
38
  async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
@@ -0,0 +1,71 @@
1
+ from typing import List, Optional
2
+
3
+ from letta.helpers.tpuf_client import TurbopufferClient
4
+ from letta.log import get_logger
5
+ from letta.otel.tracing import log_event, trace_method
6
+ from letta.schemas.embedding_config import EmbeddingConfig
7
+ from letta.schemas.enums import VectorDBProvider
8
+ from letta.schemas.passage import Passage
9
+ from letta.schemas.user import User
10
+ from letta.services.file_processor.embedder.base_embedder import BaseEmbedder
11
+
12
+ logger = get_logger(__name__)
13
+
14
+
15
+ class TurbopufferEmbedder(BaseEmbedder):
16
+ """Turbopuffer-based embedding generation and storage"""
17
+
18
+ def __init__(self, embedding_config: Optional[EmbeddingConfig] = None):
19
+ super().__init__()
20
+ # set the vector db type for turbopuffer
21
+ self.vector_db_type = VectorDBProvider.TPUF
22
+ # use the default embedding config from TurbopufferClient if not provided
23
+ self.embedding_config = embedding_config or TurbopufferClient.default_embedding_config
24
+ self.tpuf_client = TurbopufferClient()
25
+
26
+ @trace_method
27
+ async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
28
+ """Generate embeddings and store in Turbopuffer, then return Passage objects"""
29
+ if not chunks:
30
+ return []
31
+
32
+ logger.info(f"Generating embeddings for {len(chunks)} chunks using Turbopuffer")
33
+ log_event(
34
+ "turbopuffer_embedder.generation_started",
35
+ {
36
+ "total_chunks": len(chunks),
37
+ "file_id": file_id,
38
+ "source_id": source_id,
39
+ "embedding_model": self.embedding_config.embedding_model,
40
+ },
41
+ )
42
+
43
+ try:
44
+ # insert passages to Turbopuffer - it will handle embedding generation internally
45
+ passages = await self.tpuf_client.insert_file_passages(
46
+ source_id=source_id,
47
+ file_id=file_id,
48
+ text_chunks=chunks,
49
+ organization_id=actor.organization_id,
50
+ actor=actor,
51
+ )
52
+
53
+ logger.info(f"Successfully generated and stored {len(passages)} passages in Turbopuffer")
54
+ log_event(
55
+ "turbopuffer_embedder.generation_completed",
56
+ {
57
+ "passages_created": len(passages),
58
+ "total_chunks_processed": len(chunks),
59
+ "file_id": file_id,
60
+ "source_id": source_id,
61
+ },
62
+ )
63
+ return passages
64
+
65
+ except Exception as e:
66
+ logger.error(f"Failed to generate embeddings with Turbopuffer: {str(e)}")
67
+ log_event(
68
+ "turbopuffer_embedder.generation_failed",
69
+ {"error": str(e), "error_type": type(e).__name__, "file_id": file_id, "source_id": source_id},
70
+ )
71
+ raise
@@ -6,7 +6,7 @@ from letta.log import get_logger
6
6
  from letta.otel.context import get_ctx_attributes
7
7
  from letta.otel.tracing import log_event, trace_method
8
8
  from letta.schemas.agent import AgentState
9
- from letta.schemas.enums import FileProcessingStatus
9
+ from letta.schemas.enums import FileProcessingStatus, VectorDBProvider
10
10
  from letta.schemas.file import FileMetadata
11
11
  from letta.schemas.passage import Passage
12
12
  from letta.schemas.user import User
@@ -30,7 +30,6 @@ class FileProcessor:
30
30
  file_parser: FileParser,
31
31
  embedder: BaseEmbedder,
32
32
  actor: User,
33
- using_pinecone: bool,
34
33
  max_file_size: int = 50 * 1024 * 1024, # 50MB default
35
34
  ):
36
35
  self.file_parser = file_parser
@@ -42,7 +41,8 @@ class FileProcessor:
42
41
  self.job_manager = JobManager()
43
42
  self.agent_manager = AgentManager()
44
43
  self.actor = actor
45
- self.using_pinecone = using_pinecone
44
+ # get vector db type from the embedder
45
+ self.vector_db_type = embedder.vector_db_type
46
46
 
47
47
  async def _chunk_and_embed_with_fallback(self, file_metadata: FileMetadata, ocr_response, source_id: str) -> List:
48
48
  """Chunk text and generate embeddings with fallback to default chunker if needed"""
@@ -218,7 +218,7 @@ class FileProcessor:
218
218
  source_id=source_id,
219
219
  )
220
220
 
221
- if not self.using_pinecone:
221
+ if self.vector_db_type == VectorDBProvider.NATIVE:
222
222
  all_passages = await self.passage_manager.create_many_source_passages_async(
223
223
  passages=all_passages,
224
224
  file_metadata=file_metadata,
@@ -241,7 +241,8 @@ class FileProcessor:
241
241
  )
242
242
 
243
243
  # update job status
244
- if not self.using_pinecone:
244
+ # pinecone completes slowly, so gets updated later
245
+ if self.vector_db_type != VectorDBProvider.PINECONE:
245
246
  await self.file_manager.update_file_status(
246
247
  file_id=file_metadata.id,
247
248
  actor=self.actor,
@@ -317,14 +318,15 @@ class FileProcessor:
317
318
  )
318
319
 
319
320
  # Create passages in database (unless using Pinecone)
320
- if not self.using_pinecone:
321
+ if self.vector_db_type == VectorDBProvider.NATIVE:
321
322
  all_passages = await self.passage_manager.create_many_source_passages_async(
322
323
  passages=all_passages, file_metadata=file_metadata, actor=self.actor
323
324
  )
324
325
  log_event("file_processor.import_passages_created", {"filename": filename, "total_passages": len(all_passages)})
325
326
 
326
327
  # Update file status to completed (valid transition from EMBEDDING)
327
- if not self.using_pinecone:
328
+ # pinecone completes slowly, so gets updated later
329
+ if self.vector_db_type != VectorDBProvider.PINECONE:
328
330
  await self.file_manager.update_file_status(
329
331
  file_id=file_metadata.id, actor=self.actor, processing_status=FileProcessingStatus.COMPLETED
330
332
  )
@@ -1,6 +1,7 @@
1
+ from datetime import datetime
1
2
  from typing import List, Optional, Union
2
3
 
3
- from sqlalchemy import delete, select
4
+ from sqlalchemy import and_, asc, delete, desc, or_, select
4
5
  from sqlalchemy.orm import Session
5
6
 
6
7
  from letta.orm.agent import Agent as AgentModel
@@ -13,6 +14,7 @@ from letta.schemas.letta_message import LettaMessage
13
14
  from letta.schemas.message import Message as PydanticMessage
14
15
  from letta.schemas.user import User as PydanticUser
15
16
  from letta.server.db import db_registry
17
+ from letta.settings import DatabaseChoice, settings
16
18
  from letta.utils import enforce_types
17
19
 
18
20
 
@@ -27,20 +29,34 @@ class GroupManager:
27
29
  before: Optional[str] = None,
28
30
  after: Optional[str] = None,
29
31
  limit: Optional[int] = 50,
32
+ show_hidden_groups: Optional[bool] = None,
30
33
  ) -> list[PydanticGroup]:
31
34
  async with db_registry.async_session() as session:
32
- filters = {"organization_id": actor.organization_id}
35
+ from sqlalchemy import select
36
+
37
+ from letta.orm.sqlalchemy_base import AccessType
38
+
39
+ query = select(GroupModel)
40
+ query = GroupModel.apply_access_predicate(query, actor, ["read"], AccessType.ORGANIZATION)
41
+
42
+ # Apply filters
33
43
  if project_id:
34
- filters["project_id"] = project_id
44
+ query = query.where(GroupModel.project_id == project_id)
35
45
  if manager_type:
36
- filters["manager_type"] = manager_type
37
- groups = await GroupModel.list_async(
38
- db_session=session,
39
- before=before,
40
- after=after,
41
- limit=limit,
42
- **filters,
43
- )
46
+ query = query.where(GroupModel.manager_type == manager_type)
47
+
48
+ # Apply hidden filter
49
+ if not show_hidden_groups:
50
+ query = query.where((GroupModel.hidden.is_(None)) | (GroupModel.hidden == False))
51
+
52
+ # Apply pagination
53
+ query = await _apply_group_pagination_async(query, before, after, session, ascending=True)
54
+
55
+ if limit:
56
+ query = query.limit(limit)
57
+
58
+ result = await session.execute(query)
59
+ groups = result.scalars().all()
44
60
  return [group.to_pydantic() for group in groups]
45
61
 
46
62
  @enforce_types
@@ -561,3 +577,50 @@ class GroupManager:
561
577
  # 3) ordering
562
578
  if max_value <= min_value:
563
579
  raise ValueError(f"'{max_name}' must be greater than '{min_name}' (got {max_name}={max_value} <= {min_name}={min_value})")
580
+
581
+
582
+ def _cursor_filter(sort_col, id_col, ref_sort_col, ref_id, forward: bool):
583
+ """
584
+ Returns a SQLAlchemy filter expression for cursor-based pagination for groups.
585
+
586
+ If `forward` is True, returns records after the reference.
587
+ If `forward` is False, returns records before the reference.
588
+ """
589
+ if forward:
590
+ return or_(
591
+ sort_col > ref_sort_col,
592
+ and_(sort_col == ref_sort_col, id_col > ref_id),
593
+ )
594
+ else:
595
+ return or_(
596
+ sort_col < ref_sort_col,
597
+ and_(sort_col == ref_sort_col, id_col < ref_id),
598
+ )
599
+
600
+
601
+ async def _apply_group_pagination_async(query, before: Optional[str], after: Optional[str], session, ascending: bool = True) -> any:
602
+ """Apply cursor-based pagination to group queries."""
603
+ sort_column = GroupModel.created_at
604
+
605
+ if after:
606
+ result = (await session.execute(select(sort_column, GroupModel.id).where(GroupModel.id == after))).first()
607
+ if result:
608
+ after_sort_value, after_id = result
609
+ # SQLite does not support as granular timestamping, so we need to round the timestamp
610
+ if settings.database_engine is DatabaseChoice.SQLITE and isinstance(after_sort_value, datetime):
611
+ after_sort_value = after_sort_value.strftime("%Y-%m-%d %H:%M:%S")
612
+ query = query.where(_cursor_filter(sort_column, GroupModel.id, after_sort_value, after_id, forward=ascending))
613
+
614
+ if before:
615
+ result = (await session.execute(select(sort_column, GroupModel.id).where(GroupModel.id == before))).first()
616
+ if result:
617
+ before_sort_value, before_id = result
618
+ # SQLite does not support as granular timestamping, so we need to round the timestamp
619
+ if settings.database_engine is DatabaseChoice.SQLITE and isinstance(before_sort_value, datetime):
620
+ before_sort_value = before_sort_value.strftime("%Y-%m-%d %H:%M:%S")
621
+ query = query.where(_cursor_filter(sort_column, GroupModel.id, before_sort_value, before_id, forward=not ascending))
622
+
623
+ # Apply ordering
624
+ order_fn = asc if ascending else desc
625
+ query = query.order_by(order_fn(sort_column), order_fn(GroupModel.id))
626
+ return query