agno 2.3.2__py3-none-any.whl → 2.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +513 -185
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +176 -0
- agno/db/dynamo/dynamo.py +11 -0
- agno/db/firestore/firestore.py +5 -1
- agno/db/gcs_json/gcs_json_db.py +5 -2
- agno/db/in_memory/in_memory_db.py +5 -2
- agno/db/json/json_db.py +5 -1
- agno/db/migrations/manager.py +4 -4
- agno/db/mongo/async_mongo.py +158 -34
- agno/db/mongo/mongo.py +6 -2
- agno/db/mysql/mysql.py +48 -54
- agno/db/postgres/async_postgres.py +66 -52
- agno/db/postgres/postgres.py +42 -50
- agno/db/redis/redis.py +5 -0
- agno/db/redis/utils.py +5 -5
- agno/db/singlestore/singlestore.py +99 -108
- agno/db/sqlite/async_sqlite.py +29 -27
- agno/db/sqlite/sqlite.py +30 -26
- agno/knowledge/reader/pdf_reader.py +2 -2
- agno/knowledge/reader/tavily_reader.py +0 -1
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +217 -4
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +67 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +18 -0
- agno/models/anthropic/claude.py +87 -81
- agno/models/aws/bedrock.py +38 -16
- agno/models/aws/claude.py +97 -277
- agno/models/azure/ai_foundry.py +8 -4
- agno/models/base.py +101 -14
- agno/models/cerebras/cerebras.py +25 -9
- agno/models/cerebras/cerebras_openai.py +22 -2
- agno/models/cohere/chat.py +18 -6
- agno/models/cometapi/cometapi.py +19 -1
- agno/models/deepinfra/deepinfra.py +19 -1
- agno/models/fireworks/fireworks.py +19 -1
- agno/models/google/gemini.py +583 -21
- agno/models/groq/groq.py +23 -6
- agno/models/huggingface/huggingface.py +22 -7
- agno/models/ibm/watsonx.py +21 -7
- agno/models/internlm/internlm.py +19 -1
- agno/models/langdb/langdb.py +10 -0
- agno/models/litellm/chat.py +17 -7
- agno/models/litellm/litellm_openai.py +19 -1
- agno/models/message.py +19 -5
- agno/models/meta/llama.py +25 -5
- agno/models/meta/llama_openai.py +18 -0
- agno/models/mistral/mistral.py +13 -5
- agno/models/nvidia/nvidia.py +19 -1
- agno/models/ollama/chat.py +17 -6
- agno/models/openai/chat.py +22 -7
- agno/models/openai/responses.py +28 -10
- agno/models/openrouter/openrouter.py +20 -0
- agno/models/perplexity/perplexity.py +17 -0
- agno/models/requesty/requesty.py +18 -0
- agno/models/sambanova/sambanova.py +19 -1
- agno/models/siliconflow/siliconflow.py +19 -1
- agno/models/together/together.py +19 -1
- agno/models/vercel/v0.py +19 -1
- agno/models/vertexai/claude.py +99 -5
- agno/models/xai/xai.py +18 -0
- agno/os/interfaces/agui/router.py +1 -0
- agno/os/interfaces/agui/utils.py +97 -57
- agno/os/router.py +16 -0
- agno/os/routers/memory/memory.py +143 -0
- agno/os/routers/memory/schemas.py +26 -0
- agno/os/schema.py +33 -6
- agno/os/utils.py +134 -10
- agno/run/base.py +2 -1
- agno/run/workflow.py +1 -1
- agno/team/team.py +566 -219
- agno/tools/mcp/mcp.py +1 -1
- agno/utils/agent.py +119 -1
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +12 -5
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +37 -2
- agno/utils/print_response/team.py +52 -0
- agno/utils/tokens.py +41 -0
- agno/workflow/types.py +2 -2
- {agno-2.3.2.dist-info → agno-2.3.4.dist-info}/METADATA +45 -40
- {agno-2.3.2.dist-info → agno-2.3.4.dist-info}/RECORD +90 -83
- {agno-2.3.2.dist-info → agno-2.3.4.dist-info}/WHEEL +0 -0
- {agno-2.3.2.dist-info → agno-2.3.4.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.2.dist-info → agno-2.3.4.dist-info}/top_level.txt +0 -0
|
@@ -109,7 +109,7 @@ class SingleStoreDb(BaseDb):
|
|
|
109
109
|
self.db_url: Optional[str] = db_url
|
|
110
110
|
self.db_engine: Engine = _engine
|
|
111
111
|
self.db_schema: Optional[str] = db_schema
|
|
112
|
-
self.metadata: MetaData = MetaData()
|
|
112
|
+
self.metadata: MetaData = MetaData(schema=self.db_schema)
|
|
113
113
|
|
|
114
114
|
# Initialize database session
|
|
115
115
|
self.Session: scoped_session = scoped_session(sessionmaker(bind=self.db_engine))
|
|
@@ -127,7 +127,7 @@ class SingleStoreDb(BaseDb):
|
|
|
127
127
|
with self.Session() as sess:
|
|
128
128
|
return is_table_available(session=sess, table_name=table_name, db_schema=self.db_schema)
|
|
129
129
|
|
|
130
|
-
def _create_table_structure_only(self, table_name: str, table_type: str
|
|
130
|
+
def _create_table_structure_only(self, table_name: str, table_type: str) -> Table:
|
|
131
131
|
"""
|
|
132
132
|
Create a table structure definition without actually creating the table in the database.
|
|
133
133
|
Used to avoid autoload issues with SingleStore JSON types.
|
|
@@ -135,7 +135,6 @@ class SingleStoreDb(BaseDb):
|
|
|
135
135
|
Args:
|
|
136
136
|
table_name (str): Name of the table
|
|
137
137
|
table_type (str): Type of table (used to get schema definition)
|
|
138
|
-
db_schema (Optional[str]): Database schema name
|
|
139
138
|
|
|
140
139
|
Returns:
|
|
141
140
|
Table: SQLAlchemy Table object with column definitions
|
|
@@ -161,13 +160,12 @@ class SingleStoreDb(BaseDb):
|
|
|
161
160
|
columns.append(Column(*column_args, **column_kwargs))
|
|
162
161
|
|
|
163
162
|
# Create the table object without constraints to avoid autoload issues
|
|
164
|
-
|
|
165
|
-
table = Table(table_name, table_metadata, *columns, schema=db_schema)
|
|
163
|
+
table = Table(table_name, self.metadata, *columns, schema=self.db_schema)
|
|
166
164
|
|
|
167
165
|
return table
|
|
168
166
|
|
|
169
167
|
except Exception as e:
|
|
170
|
-
table_ref = f"{db_schema}.{table_name}" if db_schema else table_name
|
|
168
|
+
table_ref = f"{self.db_schema}.{table_name}" if self.db_schema else table_name
|
|
171
169
|
log_error(f"Could not create table structure for {table_ref}: {e}")
|
|
172
170
|
raise
|
|
173
171
|
|
|
@@ -183,31 +181,23 @@ class SingleStoreDb(BaseDb):
|
|
|
183
181
|
]
|
|
184
182
|
|
|
185
183
|
for table_name, table_type in tables_to_create:
|
|
186
|
-
|
|
187
|
-
# Also store the schema version for the created table
|
|
188
|
-
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
189
|
-
self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
184
|
+
self._get_or_create_table(table_name=table_name, table_type=table_type, create_table_if_not_found=True)
|
|
190
185
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
def _create_table(self, table_name: str, table_type: str, db_schema: Optional[str]) -> Table:
|
|
186
|
+
def _create_table(self, table_name: str, table_type: str) -> Table:
|
|
194
187
|
"""
|
|
195
188
|
Create a table with the appropriate schema based on the table type.
|
|
196
189
|
|
|
197
190
|
Args:
|
|
198
191
|
table_name (str): Name of the table to create
|
|
199
192
|
table_type (str): Type of table (used to get schema definition)
|
|
200
|
-
db_schema (Optional[str]): Database schema name
|
|
201
193
|
|
|
202
194
|
Returns:
|
|
203
195
|
Table: SQLAlchemy Table object
|
|
204
196
|
"""
|
|
205
|
-
table_ref = f"{db_schema}.{table_name}" if db_schema else table_name
|
|
197
|
+
table_ref = f"{self.db_schema}.{table_name}" if self.db_schema else table_name
|
|
206
198
|
try:
|
|
207
199
|
table_schema = get_table_schema_definition(table_type)
|
|
208
200
|
|
|
209
|
-
log_debug(f"Creating table {table_ref}")
|
|
210
|
-
|
|
211
201
|
columns: List[Column] = []
|
|
212
202
|
indexes: List[str] = []
|
|
213
203
|
unique_constraints: List[str] = []
|
|
@@ -229,8 +219,7 @@ class SingleStoreDb(BaseDb):
|
|
|
229
219
|
columns.append(Column(*column_args, **column_kwargs))
|
|
230
220
|
|
|
231
221
|
# Create the table object
|
|
232
|
-
|
|
233
|
-
table = Table(table_name, table_metadata, *columns, schema=db_schema)
|
|
222
|
+
table = Table(table_name, self.metadata, *columns, schema=self.db_schema)
|
|
234
223
|
|
|
235
224
|
# Add multi-column unique constraints with table-specific names
|
|
236
225
|
for constraint in schema_unique_constraints:
|
|
@@ -244,48 +233,52 @@ class SingleStoreDb(BaseDb):
|
|
|
244
233
|
table.append_constraint(Index(idx_name, idx_col))
|
|
245
234
|
|
|
246
235
|
# Create schema if one is specified
|
|
247
|
-
if db_schema is not None:
|
|
236
|
+
if self.db_schema is not None:
|
|
248
237
|
with self.Session() as sess, sess.begin():
|
|
249
|
-
create_schema(session=sess, db_schema=db_schema)
|
|
238
|
+
create_schema(session=sess, db_schema=self.db_schema)
|
|
250
239
|
|
|
251
240
|
# SingleStore has a limitation on the number of unique multi-field constraints per table.
|
|
252
241
|
# We need to work around that limitation for the sessions table.
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
col_sql
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
242
|
+
table_created = False
|
|
243
|
+
if not self.table_exists(table_name):
|
|
244
|
+
if table_type == "sessions":
|
|
245
|
+
with self.Session() as sess, sess.begin():
|
|
246
|
+
# Build column definitions
|
|
247
|
+
columns_sql = []
|
|
248
|
+
for col in table.columns:
|
|
249
|
+
col_sql = f"{col.name} {col.type.compile(self.db_engine.dialect)}"
|
|
250
|
+
if not col.nullable:
|
|
251
|
+
col_sql += " NOT NULL"
|
|
252
|
+
columns_sql.append(col_sql)
|
|
253
|
+
|
|
254
|
+
columns_def = ", ".join(columns_sql)
|
|
255
|
+
|
|
256
|
+
# Add shard key and single unique constraint
|
|
257
|
+
table_sql = f"""CREATE TABLE IF NOT EXISTS {table_ref} (
|
|
258
|
+
{columns_def},
|
|
259
|
+
SHARD KEY (session_id),
|
|
260
|
+
UNIQUE KEY uq_session_type (session_id, session_type)
|
|
261
|
+
)"""
|
|
262
|
+
|
|
263
|
+
sess.execute(text(table_sql))
|
|
264
|
+
else:
|
|
265
|
+
table.create(self.db_engine, checkfirst=True)
|
|
266
|
+
log_debug(f"Successfully created table '{table_ref}'")
|
|
267
|
+
table_created = True
|
|
273
268
|
else:
|
|
274
|
-
|
|
269
|
+
log_debug(f"Table '{table_ref}' already exists, skipping creation")
|
|
275
270
|
|
|
276
271
|
# Create indexes
|
|
277
272
|
for idx in table.indexes:
|
|
278
273
|
try:
|
|
279
|
-
log_debug(f"Creating index: {idx.name}")
|
|
280
|
-
|
|
281
274
|
# Check if index already exists
|
|
282
275
|
with self.Session() as sess:
|
|
283
|
-
if db_schema is not None:
|
|
276
|
+
if self.db_schema is not None:
|
|
284
277
|
exists_query = text(
|
|
285
278
|
"SELECT 1 FROM information_schema.statistics WHERE table_schema = :schema AND index_name = :index_name"
|
|
286
279
|
)
|
|
287
280
|
exists = (
|
|
288
|
-
sess.execute(exists_query, {"schema": db_schema, "index_name": idx.name}).scalar()
|
|
281
|
+
sess.execute(exists_query, {"schema": self.db_schema, "index_name": idx.name}).scalar()
|
|
289
282
|
is not None
|
|
290
283
|
)
|
|
291
284
|
else:
|
|
@@ -299,10 +292,15 @@ class SingleStoreDb(BaseDb):
|
|
|
299
292
|
|
|
300
293
|
idx.create(self.db_engine)
|
|
301
294
|
|
|
295
|
+
log_debug(f"Created index: {idx.name} for table {table_ref}")
|
|
302
296
|
except Exception as e:
|
|
303
297
|
log_error(f"Error creating index {idx.name}: {e}")
|
|
304
298
|
|
|
305
|
-
|
|
299
|
+
# Store the schema version for the created table
|
|
300
|
+
if table_name != self.versions_table_name and table_created:
|
|
301
|
+
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
302
|
+
self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
303
|
+
|
|
306
304
|
return table
|
|
307
305
|
|
|
308
306
|
except Exception as e:
|
|
@@ -314,7 +312,6 @@ class SingleStoreDb(BaseDb):
|
|
|
314
312
|
self.session_table = self._get_or_create_table(
|
|
315
313
|
table_name=self.session_table_name,
|
|
316
314
|
table_type="sessions",
|
|
317
|
-
db_schema=self.db_schema,
|
|
318
315
|
create_table_if_not_found=create_table_if_not_found,
|
|
319
316
|
)
|
|
320
317
|
return self.session_table
|
|
@@ -323,7 +320,6 @@ class SingleStoreDb(BaseDb):
|
|
|
323
320
|
self.memory_table = self._get_or_create_table(
|
|
324
321
|
table_name=self.memory_table_name,
|
|
325
322
|
table_type="memories",
|
|
326
|
-
db_schema=self.db_schema,
|
|
327
323
|
create_table_if_not_found=create_table_if_not_found,
|
|
328
324
|
)
|
|
329
325
|
return self.memory_table
|
|
@@ -332,7 +328,6 @@ class SingleStoreDb(BaseDb):
|
|
|
332
328
|
self.metrics_table = self._get_or_create_table(
|
|
333
329
|
table_name=self.metrics_table_name,
|
|
334
330
|
table_type="metrics",
|
|
335
|
-
db_schema=self.db_schema,
|
|
336
331
|
create_table_if_not_found=create_table_if_not_found,
|
|
337
332
|
)
|
|
338
333
|
return self.metrics_table
|
|
@@ -341,7 +336,6 @@ class SingleStoreDb(BaseDb):
|
|
|
341
336
|
self.eval_table = self._get_or_create_table(
|
|
342
337
|
table_name=self.eval_table_name,
|
|
343
338
|
table_type="evals",
|
|
344
|
-
db_schema=self.db_schema,
|
|
345
339
|
create_table_if_not_found=create_table_if_not_found,
|
|
346
340
|
)
|
|
347
341
|
return self.eval_table
|
|
@@ -350,7 +344,6 @@ class SingleStoreDb(BaseDb):
|
|
|
350
344
|
self.knowledge_table = self._get_or_create_table(
|
|
351
345
|
table_name=self.knowledge_table_name,
|
|
352
346
|
table_type="knowledge",
|
|
353
|
-
db_schema=self.db_schema,
|
|
354
347
|
create_table_if_not_found=create_table_if_not_found,
|
|
355
348
|
)
|
|
356
349
|
return self.knowledge_table
|
|
@@ -359,7 +352,6 @@ class SingleStoreDb(BaseDb):
|
|
|
359
352
|
self.culture_table = self._get_or_create_table(
|
|
360
353
|
table_name=self.culture_table_name,
|
|
361
354
|
table_type="culture",
|
|
362
|
-
db_schema=self.db_schema,
|
|
363
355
|
create_table_if_not_found=create_table_if_not_found,
|
|
364
356
|
)
|
|
365
357
|
return self.culture_table
|
|
@@ -368,54 +360,16 @@ class SingleStoreDb(BaseDb):
|
|
|
368
360
|
self.versions_table = self._get_or_create_table(
|
|
369
361
|
table_name=self.versions_table_name,
|
|
370
362
|
table_type="versions",
|
|
371
|
-
db_schema=self.db_schema,
|
|
372
363
|
create_table_if_not_found=create_table_if_not_found,
|
|
373
364
|
)
|
|
374
365
|
return self.versions_table
|
|
375
366
|
|
|
376
367
|
raise ValueError(f"Unknown table type: {table_type}")
|
|
377
368
|
|
|
378
|
-
def get_latest_schema_version(self, table_name: str) -> str:
|
|
379
|
-
"""Get the latest version of the database schema."""
|
|
380
|
-
table = self._get_table(table_type="versions", create_table_if_not_found=True)
|
|
381
|
-
if table is None:
|
|
382
|
-
return "2.0.0"
|
|
383
|
-
with self.Session() as sess:
|
|
384
|
-
stmt = select(table)
|
|
385
|
-
# Latest version for the given table
|
|
386
|
-
stmt = stmt.where(table.c.table_name == table_name)
|
|
387
|
-
stmt = stmt.order_by(table.c.version.desc()).limit(1)
|
|
388
|
-
result = sess.execute(stmt).fetchone()
|
|
389
|
-
if result is None:
|
|
390
|
-
return "2.0.0"
|
|
391
|
-
version_dict = dict(result._mapping)
|
|
392
|
-
return version_dict.get("version") or "2.0.0"
|
|
393
|
-
|
|
394
|
-
def upsert_schema_version(self, table_name: str, version: str) -> None:
|
|
395
|
-
"""Upsert the schema version into the database."""
|
|
396
|
-
table = self._get_table(table_type="versions", create_table_if_not_found=True)
|
|
397
|
-
if table is None:
|
|
398
|
-
return
|
|
399
|
-
current_datetime = datetime.now().isoformat()
|
|
400
|
-
with self.Session() as sess, sess.begin():
|
|
401
|
-
stmt = mysql.insert(table).values(
|
|
402
|
-
table_name=table_name,
|
|
403
|
-
version=version,
|
|
404
|
-
created_at=current_datetime, # Store as ISO format string
|
|
405
|
-
updated_at=current_datetime,
|
|
406
|
-
)
|
|
407
|
-
# Update version if table_name already exists
|
|
408
|
-
stmt = stmt.on_duplicate_key_update(
|
|
409
|
-
version=version,
|
|
410
|
-
updated_at=current_datetime,
|
|
411
|
-
)
|
|
412
|
-
sess.execute(stmt)
|
|
413
|
-
|
|
414
369
|
def _get_or_create_table(
|
|
415
370
|
self,
|
|
416
371
|
table_name: str,
|
|
417
372
|
table_type: str,
|
|
418
|
-
db_schema: Optional[str],
|
|
419
373
|
create_table_if_not_found: Optional[bool] = False,
|
|
420
374
|
) -> Optional[Table]:
|
|
421
375
|
"""
|
|
@@ -424,14 +378,13 @@ class SingleStoreDb(BaseDb):
|
|
|
424
378
|
Args:
|
|
425
379
|
table_name (str): Name of the table to get or create
|
|
426
380
|
table_type (str): Type of table (used to get schema definition)
|
|
427
|
-
db_schema (Optional[str]): Database schema name
|
|
428
381
|
|
|
429
382
|
Returns:
|
|
430
383
|
Table: SQLAlchemy Table object representing the schema.
|
|
431
384
|
"""
|
|
432
385
|
|
|
433
386
|
with self.Session() as sess, sess.begin():
|
|
434
|
-
table_is_available = is_table_available(session=sess, table_name=table_name, db_schema=db_schema)
|
|
387
|
+
table_is_available = is_table_available(session=sess, table_name=table_name, db_schema=self.db_schema)
|
|
435
388
|
|
|
436
389
|
if not table_is_available:
|
|
437
390
|
if not create_table_if_not_found:
|
|
@@ -442,25 +395,61 @@ class SingleStoreDb(BaseDb):
|
|
|
442
395
|
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
443
396
|
self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
444
397
|
|
|
445
|
-
return self._create_table(table_name=table_name, table_type=table_type
|
|
398
|
+
return self._create_table(table_name=table_name, table_type=table_type)
|
|
446
399
|
|
|
447
400
|
if not is_valid_table(
|
|
448
401
|
db_engine=self.db_engine,
|
|
449
402
|
table_name=table_name,
|
|
450
403
|
table_type=table_type,
|
|
451
|
-
db_schema=db_schema,
|
|
404
|
+
db_schema=self.db_schema,
|
|
452
405
|
):
|
|
453
|
-
table_ref = f"{db_schema}.{table_name}" if db_schema else table_name
|
|
406
|
+
table_ref = f"{self.db_schema}.{table_name}" if self.db_schema else table_name
|
|
454
407
|
raise ValueError(f"Table {table_ref} has an invalid schema")
|
|
455
408
|
|
|
456
409
|
try:
|
|
457
|
-
return self._create_table_structure_only(table_name=table_name, table_type=table_type
|
|
410
|
+
return self._create_table_structure_only(table_name=table_name, table_type=table_type)
|
|
458
411
|
|
|
459
412
|
except Exception as e:
|
|
460
|
-
table_ref = f"{db_schema}.{table_name}" if db_schema else table_name
|
|
413
|
+
table_ref = f"{self.db_schema}.{table_name}" if self.db_schema else table_name
|
|
461
414
|
log_error(f"Error loading existing table {table_ref}: {e}")
|
|
462
415
|
raise
|
|
463
416
|
|
|
417
|
+
def get_latest_schema_version(self, table_name: str) -> str:
|
|
418
|
+
"""Get the latest version of the database schema."""
|
|
419
|
+
table = self._get_table(table_type="versions", create_table_if_not_found=True)
|
|
420
|
+
if table is None:
|
|
421
|
+
return "2.0.0"
|
|
422
|
+
with self.Session() as sess:
|
|
423
|
+
stmt = select(table)
|
|
424
|
+
# Latest version for the given table
|
|
425
|
+
stmt = stmt.where(table.c.table_name == table_name)
|
|
426
|
+
stmt = stmt.order_by(table.c.version.desc()).limit(1)
|
|
427
|
+
result = sess.execute(stmt).fetchone()
|
|
428
|
+
if result is None:
|
|
429
|
+
return "2.0.0"
|
|
430
|
+
version_dict = dict(result._mapping)
|
|
431
|
+
return version_dict.get("version") or "2.0.0"
|
|
432
|
+
|
|
433
|
+
def upsert_schema_version(self, table_name: str, version: str) -> None:
|
|
434
|
+
"""Upsert the schema version into the database."""
|
|
435
|
+
table = self._get_table(table_type="versions", create_table_if_not_found=True)
|
|
436
|
+
if table is None:
|
|
437
|
+
return
|
|
438
|
+
current_datetime = datetime.now().isoformat()
|
|
439
|
+
with self.Session() as sess, sess.begin():
|
|
440
|
+
stmt = mysql.insert(table).values(
|
|
441
|
+
table_name=table_name,
|
|
442
|
+
version=version,
|
|
443
|
+
created_at=current_datetime, # Store as ISO format string
|
|
444
|
+
updated_at=current_datetime,
|
|
445
|
+
)
|
|
446
|
+
# Update version if table_name already exists
|
|
447
|
+
stmt = stmt.on_duplicate_key_update(
|
|
448
|
+
version=version,
|
|
449
|
+
updated_at=current_datetime,
|
|
450
|
+
)
|
|
451
|
+
sess.execute(stmt)
|
|
452
|
+
|
|
464
453
|
# -- Session methods --
|
|
465
454
|
def delete_session(self, session_id: str) -> bool:
|
|
466
455
|
"""
|
|
@@ -1307,13 +1296,14 @@ class SingleStoreDb(BaseDb):
|
|
|
1307
1296
|
raise e
|
|
1308
1297
|
|
|
1309
1298
|
def get_user_memory_stats(
|
|
1310
|
-
self, limit: Optional[int] = None, page: Optional[int] = None
|
|
1299
|
+
self, limit: Optional[int] = None, page: Optional[int] = None, user_id: Optional[str] = None
|
|
1311
1300
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
1312
1301
|
"""Get user memories stats.
|
|
1313
1302
|
|
|
1314
1303
|
Args:
|
|
1315
1304
|
limit (Optional[int]): The maximum number of user stats to return.
|
|
1316
1305
|
page (Optional[int]): The page number.
|
|
1306
|
+
user_id (Optional[str]): User ID for filtering.
|
|
1317
1307
|
|
|
1318
1308
|
Returns:
|
|
1319
1309
|
Tuple[List[Dict[str, Any]], int]: A list of dictionaries containing user stats and total count.
|
|
@@ -1336,16 +1326,17 @@ class SingleStoreDb(BaseDb):
|
|
|
1336
1326
|
return [], 0
|
|
1337
1327
|
|
|
1338
1328
|
with self.Session() as sess, sess.begin():
|
|
1339
|
-
stmt = (
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
func.max(table.c.updated_at).label("last_memory_updated_at"),
|
|
1344
|
-
)
|
|
1345
|
-
.where(table.c.user_id.is_not(None))
|
|
1346
|
-
.group_by(table.c.user_id)
|
|
1347
|
-
.order_by(func.max(table.c.updated_at).desc())
|
|
1329
|
+
stmt = select(
|
|
1330
|
+
table.c.user_id,
|
|
1331
|
+
func.count(table.c.memory_id).label("total_memories"),
|
|
1332
|
+
func.max(table.c.updated_at).label("last_memory_updated_at"),
|
|
1348
1333
|
)
|
|
1334
|
+
if user_id is not None:
|
|
1335
|
+
stmt = stmt.where(table.c.user_id == user_id)
|
|
1336
|
+
else:
|
|
1337
|
+
stmt = stmt.where(table.c.user_id.is_not(None))
|
|
1338
|
+
stmt = stmt.group_by(table.c.user_id)
|
|
1339
|
+
stmt = stmt.order_by(func.max(table.c.updated_at).desc())
|
|
1349
1340
|
|
|
1350
1341
|
count_stmt = select(func.count()).select_from(stmt.alias())
|
|
1351
1342
|
total_count = sess.execute(count_stmt).scalar()
|
agno/db/sqlite/async_sqlite.py
CHANGED
|
@@ -140,12 +140,7 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
140
140
|
]
|
|
141
141
|
|
|
142
142
|
for table_name, table_type in tables_to_create:
|
|
143
|
-
|
|
144
|
-
# Also store the schema version for the created table
|
|
145
|
-
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
146
|
-
await self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
147
|
-
|
|
148
|
-
await self._create_table(table_name=table_name, table_type=table_type)
|
|
143
|
+
await self._get_or_create_table(table_name=table_name, table_type=table_type)
|
|
149
144
|
|
|
150
145
|
async def _create_table(self, table_name: str, table_type: str) -> Table:
|
|
151
146
|
"""
|
|
@@ -160,7 +155,6 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
160
155
|
"""
|
|
161
156
|
try:
|
|
162
157
|
table_schema = get_table_schema_definition(table_type)
|
|
163
|
-
log_debug(f"Creating table {table_name}")
|
|
164
158
|
|
|
165
159
|
columns: List[Column] = []
|
|
166
160
|
indexes: List[str] = []
|
|
@@ -185,8 +179,7 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
185
179
|
columns.append(Column(*column_args, **column_kwargs)) # type: ignore
|
|
186
180
|
|
|
187
181
|
# Create the table object
|
|
188
|
-
|
|
189
|
-
table = Table(table_name, table_metadata, *columns)
|
|
182
|
+
table = Table(table_name, self.metadata, *columns)
|
|
190
183
|
|
|
191
184
|
# Add multi-column unique constraints with table-specific names
|
|
192
185
|
for constraint in schema_unique_constraints:
|
|
@@ -200,13 +193,18 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
200
193
|
table.append_constraint(Index(idx_name, idx_col))
|
|
201
194
|
|
|
202
195
|
# Create table
|
|
203
|
-
|
|
204
|
-
|
|
196
|
+
table_created = False
|
|
197
|
+
if not await self.table_exists(table_name):
|
|
198
|
+
async with self.db_engine.begin() as conn:
|
|
199
|
+
await conn.run_sync(table.create, checkfirst=True)
|
|
200
|
+
log_debug(f"Successfully created table '{table_name}'")
|
|
201
|
+
table_created = True
|
|
202
|
+
else:
|
|
203
|
+
log_debug(f"Table {table_name} already exists, skipping creation")
|
|
205
204
|
|
|
206
205
|
# Create indexes
|
|
207
206
|
for idx in table.indexes:
|
|
208
207
|
try:
|
|
209
|
-
log_debug(f"Creating index: {idx.name}")
|
|
210
208
|
# Check if index already exists
|
|
211
209
|
async with self.async_session_factory() as sess:
|
|
212
210
|
exists_query = text("SELECT 1 FROM sqlite_master WHERE type = 'index' AND name = :index_name")
|
|
@@ -218,11 +216,16 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
218
216
|
|
|
219
217
|
async with self.db_engine.begin() as conn:
|
|
220
218
|
await conn.run_sync(idx.create)
|
|
219
|
+
log_debug(f"Created index: {idx.name} for table {table_name}")
|
|
221
220
|
|
|
222
221
|
except Exception as e:
|
|
223
222
|
log_warning(f"Error creating index {idx.name}: {e}")
|
|
224
223
|
|
|
225
|
-
|
|
224
|
+
# Store the schema version for the created table
|
|
225
|
+
if table_name != self.versions_table_name and table_created:
|
|
226
|
+
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
227
|
+
await self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
228
|
+
|
|
226
229
|
return table
|
|
227
230
|
|
|
228
231
|
except Exception as e:
|
|
@@ -308,11 +311,6 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
308
311
|
table_is_available = await ais_table_available(session=sess, table_name=table_name)
|
|
309
312
|
|
|
310
313
|
if not table_is_available:
|
|
311
|
-
if table_name != self.versions_table_name:
|
|
312
|
-
# Also store the schema version for the created table
|
|
313
|
-
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
314
|
-
await self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
315
|
-
|
|
316
314
|
return await self._create_table(table_name=table_name, table_type=table_type)
|
|
317
315
|
|
|
318
316
|
# SQLite version of table validation (no schema)
|
|
@@ -1218,12 +1216,14 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
1218
1216
|
self,
|
|
1219
1217
|
limit: Optional[int] = None,
|
|
1220
1218
|
page: Optional[int] = None,
|
|
1219
|
+
user_id: Optional[str] = None,
|
|
1221
1220
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
1222
1221
|
"""Get user memories stats.
|
|
1223
1222
|
|
|
1224
1223
|
Args:
|
|
1225
1224
|
limit (Optional[int]): The maximum number of user stats to return.
|
|
1226
1225
|
page (Optional[int]): The page number.
|
|
1226
|
+
user_id (Optional[str]): User ID for filtering.
|
|
1227
1227
|
|
|
1228
1228
|
Returns:
|
|
1229
1229
|
Tuple[List[Dict[str, Any]], int]: A list of dictionaries containing user stats and total count.
|
|
@@ -1246,17 +1246,19 @@ class AsyncSqliteDb(AsyncBaseDb):
|
|
|
1246
1246
|
return [], 0
|
|
1247
1247
|
|
|
1248
1248
|
async with self.async_session_factory() as sess, sess.begin():
|
|
1249
|
-
stmt = (
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
func.max(table.c.updated_at).label("last_memory_updated_at"),
|
|
1254
|
-
)
|
|
1255
|
-
.where(table.c.user_id.is_not(None))
|
|
1256
|
-
.group_by(table.c.user_id)
|
|
1257
|
-
.order_by(func.max(table.c.updated_at).desc())
|
|
1249
|
+
stmt = select(
|
|
1250
|
+
table.c.user_id,
|
|
1251
|
+
func.count(table.c.memory_id).label("total_memories"),
|
|
1252
|
+
func.max(table.c.updated_at).label("last_memory_updated_at"),
|
|
1258
1253
|
)
|
|
1259
1254
|
|
|
1255
|
+
if user_id is not None:
|
|
1256
|
+
stmt = stmt.where(table.c.user_id == user_id)
|
|
1257
|
+
else:
|
|
1258
|
+
stmt = stmt.where(table.c.user_id.is_not(None))
|
|
1259
|
+
stmt = stmt.group_by(table.c.user_id)
|
|
1260
|
+
stmt = stmt.order_by(func.max(table.c.updated_at).desc())
|
|
1261
|
+
|
|
1260
1262
|
count_stmt = select(func.count()).select_from(stmt.alias())
|
|
1261
1263
|
total_count = (await sess.execute(count_stmt)).scalar() or 0
|
|
1262
1264
|
|
agno/db/sqlite/sqlite.py
CHANGED
|
@@ -141,12 +141,7 @@ class SqliteDb(BaseDb):
|
|
|
141
141
|
]
|
|
142
142
|
|
|
143
143
|
for table_name, table_type in tables_to_create:
|
|
144
|
-
|
|
145
|
-
# Also store the schema version for the created table
|
|
146
|
-
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
147
|
-
self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
148
|
-
|
|
149
|
-
self._create_table(table_name=table_name, table_type=table_type)
|
|
144
|
+
self._get_or_create_table(table_name=table_name, table_type=table_type, create_table_if_not_found=True)
|
|
150
145
|
|
|
151
146
|
def _create_table(self, table_name: str, table_type: str) -> Table:
|
|
152
147
|
"""
|
|
@@ -161,7 +156,6 @@ class SqliteDb(BaseDb):
|
|
|
161
156
|
"""
|
|
162
157
|
try:
|
|
163
158
|
table_schema = get_table_schema_definition(table_type)
|
|
164
|
-
log_debug(f"Creating table {table_name}")
|
|
165
159
|
|
|
166
160
|
columns: List[Column] = []
|
|
167
161
|
indexes: List[str] = []
|
|
@@ -186,8 +180,7 @@ class SqliteDb(BaseDb):
|
|
|
186
180
|
columns.append(Column(*column_args, **column_kwargs)) # type: ignore
|
|
187
181
|
|
|
188
182
|
# Create the table object
|
|
189
|
-
|
|
190
|
-
table = Table(table_name, table_metadata, *columns)
|
|
183
|
+
table = Table(table_name, self.metadata, *columns)
|
|
191
184
|
|
|
192
185
|
# Add multi-column unique constraints with table-specific names
|
|
193
186
|
for constraint in schema_unique_constraints:
|
|
@@ -201,12 +194,17 @@ class SqliteDb(BaseDb):
|
|
|
201
194
|
table.append_constraint(Index(idx_name, idx_col))
|
|
202
195
|
|
|
203
196
|
# Create table
|
|
204
|
-
|
|
197
|
+
table_created = False
|
|
198
|
+
if not self.table_exists(table_name):
|
|
199
|
+
table.create(self.db_engine, checkfirst=True)
|
|
200
|
+
log_debug(f"Successfully created table '{table_name}'")
|
|
201
|
+
table_created = True
|
|
202
|
+
else:
|
|
203
|
+
log_debug(f"Table '{table_name}' already exists, skipping creation")
|
|
205
204
|
|
|
206
205
|
# Create indexes
|
|
207
206
|
for idx in table.indexes:
|
|
208
207
|
try:
|
|
209
|
-
log_debug(f"Creating index: {idx.name}")
|
|
210
208
|
# Check if index already exists
|
|
211
209
|
with self.Session() as sess:
|
|
212
210
|
exists_query = text("SELECT 1 FROM sqlite_master WHERE type = 'index' AND name = :index_name")
|
|
@@ -217,13 +215,21 @@ class SqliteDb(BaseDb):
|
|
|
217
215
|
|
|
218
216
|
idx.create(self.db_engine)
|
|
219
217
|
|
|
218
|
+
log_debug(f"Created index: {idx.name} for table {table_name}")
|
|
220
219
|
except Exception as e:
|
|
221
220
|
log_warning(f"Error creating index {idx.name}: {e}")
|
|
222
221
|
|
|
223
|
-
|
|
222
|
+
# Store the schema version for the created table
|
|
223
|
+
if table_name != self.versions_table_name and table_created:
|
|
224
|
+
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
225
|
+
self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
226
|
+
|
|
224
227
|
return table
|
|
225
228
|
|
|
226
229
|
except Exception as e:
|
|
230
|
+
from traceback import format_exc
|
|
231
|
+
|
|
232
|
+
print(format_exc())
|
|
227
233
|
log_error(f"Could not create table '{table_name}': {e}")
|
|
228
234
|
raise e
|
|
229
235
|
|
|
@@ -311,11 +317,6 @@ class SqliteDb(BaseDb):
|
|
|
311
317
|
if not create_table_if_not_found:
|
|
312
318
|
return None
|
|
313
319
|
|
|
314
|
-
if table_name != self.versions_table_name:
|
|
315
|
-
# Also store the schema version for the created table
|
|
316
|
-
latest_schema_version = MigrationManager(self).latest_schema_version
|
|
317
|
-
self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
|
|
318
|
-
|
|
319
320
|
return self._create_table(table_name=table_name, table_type=table_type)
|
|
320
321
|
|
|
321
322
|
# SQLite version of table validation (no schema)
|
|
@@ -1212,12 +1213,14 @@ class SqliteDb(BaseDb):
|
|
|
1212
1213
|
self,
|
|
1213
1214
|
limit: Optional[int] = None,
|
|
1214
1215
|
page: Optional[int] = None,
|
|
1216
|
+
user_id: Optional[str] = None,
|
|
1215
1217
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
1216
1218
|
"""Get user memories stats.
|
|
1217
1219
|
|
|
1218
1220
|
Args:
|
|
1219
1221
|
limit (Optional[int]): The maximum number of user stats to return.
|
|
1220
1222
|
page (Optional[int]): The page number.
|
|
1223
|
+
user_id (Optional[str]): User ID for filtering.
|
|
1221
1224
|
|
|
1222
1225
|
Returns:
|
|
1223
1226
|
Tuple[List[Dict[str, Any]], int]: A list of dictionaries containing user stats and total count.
|
|
@@ -1240,16 +1243,17 @@ class SqliteDb(BaseDb):
|
|
|
1240
1243
|
return [], 0
|
|
1241
1244
|
|
|
1242
1245
|
with self.Session() as sess, sess.begin():
|
|
1243
|
-
stmt = (
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
func.max(table.c.updated_at).label("last_memory_updated_at"),
|
|
1248
|
-
)
|
|
1249
|
-
.where(table.c.user_id.is_not(None))
|
|
1250
|
-
.group_by(table.c.user_id)
|
|
1251
|
-
.order_by(func.max(table.c.updated_at).desc())
|
|
1246
|
+
stmt = select(
|
|
1247
|
+
table.c.user_id,
|
|
1248
|
+
func.count(table.c.memory_id).label("total_memories"),
|
|
1249
|
+
func.max(table.c.updated_at).label("last_memory_updated_at"),
|
|
1252
1250
|
)
|
|
1251
|
+
if user_id is not None:
|
|
1252
|
+
stmt = stmt.where(table.c.user_id == user_id)
|
|
1253
|
+
else:
|
|
1254
|
+
stmt = stmt.where(table.c.user_id.is_not(None))
|
|
1255
|
+
stmt = stmt.group_by(table.c.user_id)
|
|
1256
|
+
stmt = stmt.order_by(func.max(table.c.updated_at).desc())
|
|
1253
1257
|
|
|
1254
1258
|
count_stmt = select(func.count()).select_from(stmt.alias())
|
|
1255
1259
|
total_count = sess.execute(count_stmt).scalar() or 0
|