hindsight-api 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. hindsight_api/__init__.py +38 -0
  2. hindsight_api/api/__init__.py +105 -0
  3. hindsight_api/api/http.py +1872 -0
  4. hindsight_api/api/mcp.py +157 -0
  5. hindsight_api/engine/__init__.py +47 -0
  6. hindsight_api/engine/cross_encoder.py +97 -0
  7. hindsight_api/engine/db_utils.py +93 -0
  8. hindsight_api/engine/embeddings.py +113 -0
  9. hindsight_api/engine/entity_resolver.py +575 -0
  10. hindsight_api/engine/llm_wrapper.py +269 -0
  11. hindsight_api/engine/memory_engine.py +3095 -0
  12. hindsight_api/engine/query_analyzer.py +519 -0
  13. hindsight_api/engine/response_models.py +222 -0
  14. hindsight_api/engine/retain/__init__.py +50 -0
  15. hindsight_api/engine/retain/bank_utils.py +423 -0
  16. hindsight_api/engine/retain/chunk_storage.py +82 -0
  17. hindsight_api/engine/retain/deduplication.py +104 -0
  18. hindsight_api/engine/retain/embedding_processing.py +62 -0
  19. hindsight_api/engine/retain/embedding_utils.py +54 -0
  20. hindsight_api/engine/retain/entity_processing.py +90 -0
  21. hindsight_api/engine/retain/fact_extraction.py +1027 -0
  22. hindsight_api/engine/retain/fact_storage.py +176 -0
  23. hindsight_api/engine/retain/link_creation.py +121 -0
  24. hindsight_api/engine/retain/link_utils.py +651 -0
  25. hindsight_api/engine/retain/orchestrator.py +405 -0
  26. hindsight_api/engine/retain/types.py +206 -0
  27. hindsight_api/engine/search/__init__.py +15 -0
  28. hindsight_api/engine/search/fusion.py +122 -0
  29. hindsight_api/engine/search/observation_utils.py +132 -0
  30. hindsight_api/engine/search/reranking.py +103 -0
  31. hindsight_api/engine/search/retrieval.py +503 -0
  32. hindsight_api/engine/search/scoring.py +161 -0
  33. hindsight_api/engine/search/temporal_extraction.py +64 -0
  34. hindsight_api/engine/search/think_utils.py +255 -0
  35. hindsight_api/engine/search/trace.py +215 -0
  36. hindsight_api/engine/search/tracer.py +447 -0
  37. hindsight_api/engine/search/types.py +160 -0
  38. hindsight_api/engine/task_backend.py +223 -0
  39. hindsight_api/engine/utils.py +203 -0
  40. hindsight_api/metrics.py +227 -0
  41. hindsight_api/migrations.py +163 -0
  42. hindsight_api/models.py +309 -0
  43. hindsight_api/pg0.py +425 -0
  44. hindsight_api/web/__init__.py +12 -0
  45. hindsight_api/web/server.py +143 -0
  46. hindsight_api-0.0.13.dist-info/METADATA +41 -0
  47. hindsight_api-0.0.13.dist-info/RECORD +48 -0
  48. hindsight_api-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,163 @@
1
+ """
2
+ Database migration management using Alembic.
3
+
4
+ This module provides programmatic access to run database migrations
5
+ on application startup. It is designed to be safe for concurrent
6
+ execution - Alembic uses PostgreSQL transactions to prevent
7
+ conflicts when multiple instances start simultaneously.
8
+
9
+ Important: All migrations must be backward-compatible to allow
10
+ safe rolling deployments.
11
+
12
+ No alembic.ini required - all configuration is done programmatically.
13
+ """
14
+ import logging
15
+ import os
16
+ import shutil
17
+ from pathlib import Path
18
+ from typing import Optional
19
+
20
+ from alembic import command
21
+ from alembic.config import Config
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+
27
+ def run_migrations(database_url: str, script_location: Optional[str] = None) -> None:
28
+ """
29
+ Run database migrations to the latest version using programmatic Alembic configuration.
30
+
31
+ This function is safe to call on every application startup:
32
+ - Alembic checks the current schema version in the database
33
+ - Only missing migrations are applied
34
+ - PostgreSQL transactions prevent concurrent migration conflicts
35
+ - If schema is already up-to-date, this is a fast no-op
36
+
37
+ Args:
38
+ database_url: SQLAlchemy database URL (e.g., "postgresql://user:pass@host/db")
39
+ script_location: Path to alembic migrations directory (e.g., "/path/to/alembic").
40
+ If None, defaults to hindsight-api/alembic directory.
41
+
42
+ Raises:
43
+ RuntimeError: If migrations fail to complete
44
+ FileNotFoundError: If script_location doesn't exist
45
+
46
+ Example:
47
+ # Using default location (hindsight_api package)
48
+ run_migrations("postgresql://user:pass@host/db")
49
+
50
+ # Using custom location (when importing from another project)
51
+ run_migrations(
52
+ "postgresql://user:pass@host/db",
53
+ script_location="/path/to/copied/_alembic"
54
+ )
55
+ """
56
+ try:
57
+ # Determine script location
58
+ if script_location is None:
59
+ # Default: use the alembic directory in the hindsight_api package
60
+ # This file is in: hindsight-api/hindsight_api/migrations.py
61
+ # Default location is: hindsight-api/alembic
62
+ package_root = Path(__file__).parent.parent
63
+ script_location = str(package_root / "alembic")
64
+
65
+ script_path = Path(script_location)
66
+ if not script_path.exists():
67
+ raise FileNotFoundError(
68
+ f"Alembic script location not found at {script_location}. "
69
+ "Database migrations cannot be run."
70
+ )
71
+
72
+ logger.info(f"Running database migrations to head...")
73
+ logger.info(f"Database URL: {database_url}")
74
+ logger.info(f"Script location: {script_location}")
75
+
76
+ # Create Alembic configuration programmatically (no alembic.ini needed)
77
+ alembic_cfg = Config()
78
+
79
+ # Set the script location (where alembic versions are stored)
80
+ alembic_cfg.set_main_option("script_location", script_location)
81
+
82
+ # Set the database URL
83
+ alembic_cfg.set_main_option("sqlalchemy.url", database_url)
84
+
85
+ # Configure logging (optional, but helps with debugging)
86
+ # Uses Python's logging system instead of alembic.ini
87
+ alembic_cfg.set_main_option("prepend_sys_path", ".")
88
+
89
+ # Set path_separator to avoid deprecation warning
90
+ alembic_cfg.set_main_option("path_separator", "os")
91
+
92
+ # Run migrations to head (latest version)
93
+ # Note: Alembic may call sys.exit() on errors instead of raising exceptions
94
+ # We rely on the outer try/except and logging to catch issues
95
+ command.upgrade(alembic_cfg, "head")
96
+
97
+ logger.info("Database migrations completed successfully")
98
+
99
+ except FileNotFoundError:
100
+ logger.error(f"Alembic script location not found at {script_location}")
101
+ raise
102
+ except SystemExit as e:
103
+ # Catch sys.exit() calls from Alembic
104
+ logger.error(f"Alembic called sys.exit() with code: {e.code}", exc_info=True)
105
+ raise RuntimeError(f"Database migration failed with exit code {e.code}") from e
106
+ except Exception as e:
107
+ logger.error(f"Failed to run database migrations: {e}", exc_info=True)
108
+ raise RuntimeError("Database migration failed") from e
109
+
110
+
111
+ def check_migration_status(database_url: Optional[str] = None, script_location: Optional[str] = None) -> tuple[str | None, str | None]:
112
+ """
113
+ Check current database schema version and latest available version.
114
+
115
+ Args:
116
+ database_url: SQLAlchemy database URL. If None, uses HINDSIGHT_API_DATABASE_URL env var.
117
+ script_location: Path to alembic migrations directory. If None, uses default location.
118
+
119
+ Returns:
120
+ Tuple of (current_revision, head_revision)
121
+ Returns (None, None) if unable to determine versions
122
+ """
123
+ try:
124
+ from alembic.runtime.migration import MigrationContext
125
+ from alembic.script import ScriptDirectory
126
+ from sqlalchemy import create_engine
127
+
128
+ # Get database URL
129
+ if database_url is None:
130
+ database_url = os.getenv("HINDSIGHT_API_DATABASE_URL")
131
+ if not database_url:
132
+ logger.warning("Database URL not provided and HINDSIGHT_API_DATABASE_URL not set, cannot check migration status")
133
+ return None, None
134
+
135
+ # Get current revision from database
136
+ engine = create_engine(database_url)
137
+ with engine.connect() as connection:
138
+ context = MigrationContext.configure(connection)
139
+ current_rev = context.get_current_revision()
140
+
141
+ # Get head revision from migration scripts
142
+ if script_location is None:
143
+ package_root = Path(__file__).parent.parent
144
+ script_location = str(package_root / "alembic")
145
+
146
+ script_path = Path(script_location)
147
+ if not script_path.exists():
148
+ logger.warning(f"Script location not found at {script_location}")
149
+ return current_rev, None
150
+
151
+ # Create config programmatically
152
+ alembic_cfg = Config()
153
+ alembic_cfg.set_main_option("script_location", script_location)
154
+ alembic_cfg.set_main_option("path_separator", "os")
155
+
156
+ script = ScriptDirectory.from_config(alembic_cfg)
157
+ head_rev = script.get_current_head()
158
+
159
+ return current_rev, head_rev
160
+
161
+ except Exception as e:
162
+ logger.warning(f"Unable to check migration status: {e}")
163
+ return None, None
@@ -0,0 +1,309 @@
1
+ """
2
+ SQLAlchemy models for the memory system.
3
+ """
4
+ from datetime import datetime
5
+ from typing import Optional
6
+ from uuid import UUID as PyUUID, uuid4
7
+
8
+ from sqlalchemy import (
9
+ CheckConstraint,
10
+ Column,
11
+ Float,
12
+ ForeignKey,
13
+ ForeignKeyConstraint,
14
+ Index,
15
+ Integer,
16
+ PrimaryKeyConstraint,
17
+ Text,
18
+ func,
19
+ text as sql_text,
20
+ )
21
+ from sqlalchemy.dialects.postgresql import JSONB, TIMESTAMP, UUID
22
+ from sqlalchemy.ext.asyncio import AsyncAttrs
23
+ from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship
24
+ from pgvector.sqlalchemy import Vector
25
+
26
+
27
+ class Base(AsyncAttrs, DeclarativeBase):
28
+ """Base class for all models."""
29
+ pass
30
+
31
+
32
+ class Document(Base):
33
+ """Source documents for memory units."""
34
+ __tablename__ = "documents"
35
+
36
+ id: Mapped[str] = mapped_column(Text, primary_key=True)
37
+ bank_id: Mapped[str] = mapped_column(Text, primary_key=True)
38
+ original_text: Mapped[Optional[str]] = mapped_column(Text)
39
+ content_hash: Mapped[Optional[str]] = mapped_column(Text)
40
+ doc_metadata: Mapped[dict] = mapped_column("metadata", JSONB, server_default=sql_text("'{}'::jsonb"))
41
+ created_at: Mapped[datetime] = mapped_column(
42
+ TIMESTAMP(timezone=True), server_default=func.now()
43
+ )
44
+ updated_at: Mapped[datetime] = mapped_column(
45
+ TIMESTAMP(timezone=True), server_default=func.now()
46
+ )
47
+
48
+ # Relationships
49
+ memory_units = relationship("MemoryUnit", back_populates="document", cascade="all, delete-orphan")
50
+
51
+ __table_args__ = (
52
+ Index("idx_documents_bank_id", "bank_id"),
53
+ Index("idx_documents_content_hash", "content_hash"),
54
+ )
55
+
56
+
57
+ class MemoryUnit(Base):
58
+ """Individual sentence-level memories."""
59
+ __tablename__ = "memory_units"
60
+
61
+ id: Mapped[PyUUID] = mapped_column(
62
+ UUID(as_uuid=True), primary_key=True, server_default=sql_text("uuid_generate_v4()")
63
+ )
64
+ bank_id: Mapped[str] = mapped_column(Text, nullable=False)
65
+ document_id: Mapped[Optional[str]] = mapped_column(Text)
66
+ text: Mapped[str] = mapped_column(Text, nullable=False)
67
+ embedding = mapped_column(Vector(384)) # pgvector type
68
+ context: Mapped[Optional[str]] = mapped_column(Text)
69
+ event_date: Mapped[datetime] = mapped_column(TIMESTAMP(timezone=True), nullable=False) # Kept for backward compatibility
70
+ occurred_start: Mapped[Optional[datetime]] = mapped_column(TIMESTAMP(timezone=True)) # When fact occurred (range start)
71
+ occurred_end: Mapped[Optional[datetime]] = mapped_column(TIMESTAMP(timezone=True)) # When fact occurred (range end)
72
+ mentioned_at: Mapped[Optional[datetime]] = mapped_column(TIMESTAMP(timezone=True)) # When fact was mentioned
73
+ fact_type: Mapped[str] = mapped_column(Text, nullable=False, server_default="world")
74
+ confidence_score: Mapped[Optional[float]] = mapped_column(Float)
75
+ access_count: Mapped[int] = mapped_column(Integer, server_default="0")
76
+ unit_metadata: Mapped[dict] = mapped_column("metadata", JSONB, server_default=sql_text("'{}'::jsonb")) # User-defined metadata (str->str)
77
+ created_at: Mapped[datetime] = mapped_column(
78
+ TIMESTAMP(timezone=True), server_default=func.now()
79
+ )
80
+ updated_at: Mapped[datetime] = mapped_column(
81
+ TIMESTAMP(timezone=True), server_default=func.now()
82
+ )
83
+
84
+ # Relationships
85
+ document = relationship("Document", back_populates="memory_units")
86
+ unit_entities = relationship("UnitEntity", back_populates="memory_unit", cascade="all, delete-orphan")
87
+ outgoing_links = relationship(
88
+ "MemoryLink",
89
+ foreign_keys="MemoryLink.from_unit_id",
90
+ back_populates="from_unit",
91
+ cascade="all, delete-orphan"
92
+ )
93
+ incoming_links = relationship(
94
+ "MemoryLink",
95
+ foreign_keys="MemoryLink.to_unit_id",
96
+ back_populates="to_unit",
97
+ cascade="all, delete-orphan"
98
+ )
99
+
100
+ __table_args__ = (
101
+ ForeignKeyConstraint(
102
+ ["document_id", "bank_id"],
103
+ ["documents.id", "documents.bank_id"],
104
+ name="memory_units_document_fkey",
105
+ ondelete="CASCADE",
106
+ ),
107
+ CheckConstraint("fact_type IN ('world', 'bank', 'opinion', 'observation')"),
108
+ CheckConstraint("confidence_score IS NULL OR (confidence_score >= 0.0 AND confidence_score <= 1.0)"),
109
+ CheckConstraint(
110
+ "(fact_type = 'opinion' AND confidence_score IS NOT NULL) OR "
111
+ "(fact_type = 'observation') OR "
112
+ "(fact_type NOT IN ('opinion', 'observation') AND confidence_score IS NULL)",
113
+ name="confidence_score_fact_type_check"
114
+ ),
115
+ Index("idx_memory_units_bank_id", "bank_id"),
116
+ Index("idx_memory_units_document_id", "document_id"),
117
+ Index("idx_memory_units_event_date", "event_date", postgresql_ops={"event_date": "DESC"}),
118
+ Index("idx_memory_units_bank_date", "bank_id", "event_date", postgresql_ops={"event_date": "DESC"}),
119
+ Index("idx_memory_units_access_count", "access_count", postgresql_ops={"access_count": "DESC"}),
120
+ Index("idx_memory_units_fact_type", "fact_type"),
121
+ Index("idx_memory_units_bank_fact_type", "bank_id", "fact_type"),
122
+ Index("idx_memory_units_bank_type_date", "bank_id", "fact_type", "event_date", postgresql_ops={"event_date": "DESC"}),
123
+ Index(
124
+ "idx_memory_units_opinion_confidence",
125
+ "bank_id",
126
+ "confidence_score",
127
+ postgresql_where=sql_text("fact_type = 'opinion'"),
128
+ postgresql_ops={"confidence_score": "DESC"}
129
+ ),
130
+ Index(
131
+ "idx_memory_units_opinion_date",
132
+ "bank_id",
133
+ "event_date",
134
+ postgresql_where=sql_text("fact_type = 'opinion'"),
135
+ postgresql_ops={"event_date": "DESC"}
136
+ ),
137
+ Index(
138
+ "idx_memory_units_observation_date",
139
+ "bank_id",
140
+ "event_date",
141
+ postgresql_where=sql_text("fact_type = 'observation'"),
142
+ postgresql_ops={"event_date": "DESC"}
143
+ ),
144
+ Index(
145
+ "idx_memory_units_embedding",
146
+ "embedding",
147
+ postgresql_using="hnsw",
148
+ postgresql_ops={"embedding": "vector_cosine_ops"}
149
+ ),
150
+ )
151
+
152
+
153
+ class Entity(Base):
154
+ """Resolved entities (people, organizations, locations, etc.)."""
155
+ __tablename__ = "entities"
156
+
157
+ id: Mapped[PyUUID] = mapped_column(
158
+ UUID(as_uuid=True), primary_key=True, server_default=sql_text("uuid_generate_v4()")
159
+ )
160
+ canonical_name: Mapped[str] = mapped_column(Text, nullable=False)
161
+ bank_id: Mapped[str] = mapped_column(Text, nullable=False)
162
+ entity_metadata: Mapped[dict] = mapped_column("metadata", JSONB, server_default=sql_text("'{}'::jsonb"))
163
+ first_seen: Mapped[datetime] = mapped_column(
164
+ TIMESTAMP(timezone=True), server_default=func.now()
165
+ )
166
+ last_seen: Mapped[datetime] = mapped_column(
167
+ TIMESTAMP(timezone=True), server_default=func.now()
168
+ )
169
+ mention_count: Mapped[int] = mapped_column(Integer, server_default="1")
170
+
171
+ # Relationships
172
+ unit_entities = relationship("UnitEntity", back_populates="entity", cascade="all, delete-orphan")
173
+ memory_links = relationship("MemoryLink", back_populates="entity", cascade="all, delete-orphan")
174
+ cooccurrences_1 = relationship(
175
+ "EntityCooccurrence",
176
+ foreign_keys="EntityCooccurrence.entity_id_1",
177
+ back_populates="entity_1",
178
+ cascade="all, delete-orphan"
179
+ )
180
+ cooccurrences_2 = relationship(
181
+ "EntityCooccurrence",
182
+ foreign_keys="EntityCooccurrence.entity_id_2",
183
+ back_populates="entity_2",
184
+ cascade="all, delete-orphan"
185
+ )
186
+
187
+ __table_args__ = (
188
+ Index("idx_entities_bank_id", "bank_id"),
189
+ Index("idx_entities_canonical_name", "canonical_name"),
190
+ Index("idx_entities_bank_name", "bank_id", "canonical_name"),
191
+ )
192
+
193
+
194
+ class UnitEntity(Base):
195
+ """Association between memory units and entities."""
196
+ __tablename__ = "unit_entities"
197
+
198
+ unit_id: Mapped[PyUUID] = mapped_column(
199
+ UUID(as_uuid=True), ForeignKey("memory_units.id", ondelete="CASCADE"), primary_key=True
200
+ )
201
+ entity_id: Mapped[PyUUID] = mapped_column(
202
+ UUID(as_uuid=True), ForeignKey("entities.id", ondelete="CASCADE"), primary_key=True
203
+ )
204
+
205
+ # Relationships
206
+ memory_unit = relationship("MemoryUnit", back_populates="unit_entities")
207
+ entity = relationship("Entity", back_populates="unit_entities")
208
+
209
+ __table_args__ = (
210
+ Index("idx_unit_entities_unit", "unit_id"),
211
+ Index("idx_unit_entities_entity", "entity_id"),
212
+ )
213
+
214
+
215
+ class EntityCooccurrence(Base):
216
+ """Materialized cache of entity co-occurrences."""
217
+ __tablename__ = "entity_cooccurrences"
218
+
219
+ entity_id_1: Mapped[PyUUID] = mapped_column(
220
+ UUID(as_uuid=True), ForeignKey("entities.id", ondelete="CASCADE"), primary_key=True
221
+ )
222
+ entity_id_2: Mapped[PyUUID] = mapped_column(
223
+ UUID(as_uuid=True), ForeignKey("entities.id", ondelete="CASCADE"), primary_key=True
224
+ )
225
+ cooccurrence_count: Mapped[int] = mapped_column(Integer, server_default="1")
226
+ last_cooccurred: Mapped[datetime] = mapped_column(
227
+ TIMESTAMP(timezone=True), server_default=func.now()
228
+ )
229
+
230
+ # Relationships
231
+ entity_1 = relationship("Entity", foreign_keys=[entity_id_1], back_populates="cooccurrences_1")
232
+ entity_2 = relationship("Entity", foreign_keys=[entity_id_2], back_populates="cooccurrences_2")
233
+
234
+ __table_args__ = (
235
+ CheckConstraint("entity_id_1 < entity_id_2", name="entity_cooccurrence_order_check"),
236
+ Index("idx_entity_cooccurrences_entity1", "entity_id_1"),
237
+ Index("idx_entity_cooccurrences_entity2", "entity_id_2"),
238
+ Index("idx_entity_cooccurrences_count", "cooccurrence_count", postgresql_ops={"cooccurrence_count": "DESC"}),
239
+ )
240
+
241
+
242
+ class MemoryLink(Base):
243
+ """Links between memory units (temporal, semantic, entity)."""
244
+ __tablename__ = "memory_links"
245
+
246
+ from_unit_id: Mapped[PyUUID] = mapped_column(
247
+ UUID(as_uuid=True), ForeignKey("memory_units.id", ondelete="CASCADE"), primary_key=True
248
+ )
249
+ to_unit_id: Mapped[PyUUID] = mapped_column(
250
+ UUID(as_uuid=True), ForeignKey("memory_units.id", ondelete="CASCADE"), primary_key=True
251
+ )
252
+ link_type: Mapped[str] = mapped_column(Text, primary_key=True)
253
+ entity_id: Mapped[Optional[PyUUID]] = mapped_column(
254
+ UUID(as_uuid=True), ForeignKey("entities.id", ondelete="CASCADE"), primary_key=True
255
+ )
256
+ weight: Mapped[float] = mapped_column(Float, nullable=False, server_default="1.0")
257
+ created_at: Mapped[datetime] = mapped_column(
258
+ TIMESTAMP(timezone=True), server_default=func.now()
259
+ )
260
+
261
+ # Relationships
262
+ from_unit = relationship("MemoryUnit", foreign_keys=[from_unit_id], back_populates="outgoing_links")
263
+ to_unit = relationship("MemoryUnit", foreign_keys=[to_unit_id], back_populates="incoming_links")
264
+ entity = relationship("Entity", back_populates="memory_links")
265
+
266
+ __table_args__ = (
267
+ CheckConstraint(
268
+ "link_type IN ('temporal', 'semantic', 'entity', 'causes', 'caused_by', 'enables', 'prevents')",
269
+ name="memory_links_link_type_check"
270
+ ),
271
+ CheckConstraint("weight >= 0.0 AND weight <= 1.0", name="memory_links_weight_check"),
272
+ Index("idx_memory_links_from", "from_unit_id"),
273
+ Index("idx_memory_links_to", "to_unit_id"),
274
+ Index("idx_memory_links_type", "link_type"),
275
+ Index("idx_memory_links_entity", "entity_id", postgresql_where=sql_text("entity_id IS NOT NULL")),
276
+ Index(
277
+ "idx_memory_links_from_weight",
278
+ "from_unit_id",
279
+ "weight",
280
+ postgresql_where=sql_text("weight >= 0.1"),
281
+ postgresql_ops={"weight": "DESC"}
282
+ ),
283
+ )
284
+
285
+
286
+ class Bank(Base):
287
+ """Memory bank profiles with personality traits and background."""
288
+ __tablename__ = "banks"
289
+
290
+ bank_id: Mapped[str] = mapped_column(Text, primary_key=True)
291
+ personality: Mapped[dict] = mapped_column(
292
+ JSONB,
293
+ nullable=False,
294
+ server_default=sql_text(
295
+ '\'{"openness": 0.5, "conscientiousness": 0.5, "extraversion": 0.5, '
296
+ '"agreeableness": 0.5, "neuroticism": 0.5, "bias_strength": 0.5}\'::jsonb'
297
+ )
298
+ )
299
+ background: Mapped[str] = mapped_column(Text, nullable=False, server_default="")
300
+ created_at: Mapped[datetime] = mapped_column(
301
+ TIMESTAMP(timezone=True), server_default=func.now()
302
+ )
303
+ updated_at: Mapped[datetime] = mapped_column(
304
+ TIMESTAMP(timezone=True), server_default=func.now()
305
+ )
306
+
307
+ __table_args__ = (
308
+ Index("idx_banks_bank_id", "bank_id"),
309
+ )