@mastra/libsql 0.13.4 → 0.13.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +1 -1
- package/CHANGELOG.md +22 -0
- package/dist/index.cjs +679 -8
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +680 -9
- package/dist/index.js.map +1 -1
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +34 -0
- package/dist/storage/domains/observability/index.d.ts.map +1 -0
- package/dist/storage/domains/operations/index.d.ts +50 -1
- package/dist/storage/domains/operations/index.d.ts.map +1 -1
- package/dist/storage/domains/utils.d.ts +45 -1
- package/dist/storage/domains/utils.d.ts.map +1 -1
- package/dist/storage/domains/workflows/index.d.ts +27 -2
- package/dist/storage/domains/workflows/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +42 -2
- package/dist/storage/index.d.ts.map +1 -1
- package/package.json +5 -5
- package/src/storage/domains/memory/index.ts +1 -1
- package/src/storage/domains/observability/index.ts +237 -0
- package/src/storage/domains/operations/index.ts +213 -3
- package/src/storage/domains/utils.ts +207 -2
- package/src/storage/domains/workflows/index.ts +225 -2
- package/src/storage/index.ts +74 -1
package/dist/index.cjs
CHANGED
|
@@ -6,7 +6,6 @@ var utils = require('@mastra/core/utils');
|
|
|
6
6
|
var vector = require('@mastra/core/vector');
|
|
7
7
|
var filter = require('@mastra/core/vector/filter');
|
|
8
8
|
var storage = require('@mastra/core/storage');
|
|
9
|
-
var core = require('@mastra/core');
|
|
10
9
|
var agent = require('@mastra/core/agent');
|
|
11
10
|
|
|
12
11
|
// src/vector/index.ts
|
|
@@ -1392,14 +1391,14 @@ var MemoryLibSQL = class extends storage.MemoryStorage {
|
|
|
1392
1391
|
}
|
|
1393
1392
|
} : {}
|
|
1394
1393
|
};
|
|
1395
|
-
setClauses.push(`${
|
|
1394
|
+
setClauses.push(`${utils.parseSqlIdentifier("content", "column name")} = ?`);
|
|
1396
1395
|
args.push(JSON.stringify(newContent));
|
|
1397
1396
|
delete updatableFields.content;
|
|
1398
1397
|
}
|
|
1399
1398
|
for (const key in updatableFields) {
|
|
1400
1399
|
if (Object.prototype.hasOwnProperty.call(updatableFields, key)) {
|
|
1401
1400
|
const dbKey = columnMapping[key] || key;
|
|
1402
|
-
setClauses.push(`${
|
|
1401
|
+
setClauses.push(`${utils.parseSqlIdentifier(dbKey, "column name")} = ?`);
|
|
1403
1402
|
let value = updatableFields[key];
|
|
1404
1403
|
if (typeof value === "object" && value !== null) {
|
|
1405
1404
|
value = JSON.stringify(value);
|
|
@@ -1801,7 +1800,7 @@ function prepareStatement({ tableName, record }) {
|
|
|
1801
1800
|
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1802
1801
|
const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
|
|
1803
1802
|
const values = Object.values(record).map((v) => {
|
|
1804
|
-
if (typeof v === `undefined`) {
|
|
1803
|
+
if (typeof v === `undefined` || v === null) {
|
|
1805
1804
|
return null;
|
|
1806
1805
|
}
|
|
1807
1806
|
if (v instanceof Date) {
|
|
@@ -1815,8 +1814,338 @@ function prepareStatement({ tableName, record }) {
|
|
|
1815
1814
|
args: values
|
|
1816
1815
|
};
|
|
1817
1816
|
}
|
|
1817
|
+
function prepareUpdateStatement({
|
|
1818
|
+
tableName,
|
|
1819
|
+
updates,
|
|
1820
|
+
keys
|
|
1821
|
+
}) {
|
|
1822
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1823
|
+
const schema = storage.TABLE_SCHEMAS[tableName];
|
|
1824
|
+
const updateColumns = Object.keys(updates).map((col) => utils.parseSqlIdentifier(col, "column name"));
|
|
1825
|
+
const updateValues = Object.values(updates).map(transformToSqlValue);
|
|
1826
|
+
const setClause = updateColumns.map((col) => `${col} = ?`).join(", ");
|
|
1827
|
+
const whereClause = prepareWhereClause(keys, schema);
|
|
1828
|
+
return {
|
|
1829
|
+
sql: `UPDATE ${parsedTableName} SET ${setClause}${whereClause.sql}`,
|
|
1830
|
+
args: [...updateValues, ...whereClause.args]
|
|
1831
|
+
};
|
|
1832
|
+
}
|
|
1833
|
+
function transformToSqlValue(value) {
|
|
1834
|
+
if (typeof value === "undefined" || value === null) {
|
|
1835
|
+
return null;
|
|
1836
|
+
}
|
|
1837
|
+
if (value instanceof Date) {
|
|
1838
|
+
return value.toISOString();
|
|
1839
|
+
}
|
|
1840
|
+
return typeof value === "object" ? JSON.stringify(value) : value;
|
|
1841
|
+
}
|
|
1842
|
+
function prepareDeleteStatement({ tableName, keys }) {
|
|
1843
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1844
|
+
const whereClause = prepareWhereClause(keys, storage.TABLE_SCHEMAS[tableName]);
|
|
1845
|
+
return {
|
|
1846
|
+
sql: `DELETE FROM ${parsedTableName}${whereClause.sql}`,
|
|
1847
|
+
args: whereClause.args
|
|
1848
|
+
};
|
|
1849
|
+
}
|
|
1850
|
+
function prepareWhereClause(filters, schema) {
|
|
1851
|
+
const conditions = [];
|
|
1852
|
+
const args = [];
|
|
1853
|
+
for (const [columnName, filterValue] of Object.entries(filters)) {
|
|
1854
|
+
const column = schema[columnName];
|
|
1855
|
+
if (!column) {
|
|
1856
|
+
throw new Error(`Unknown column: ${columnName}`);
|
|
1857
|
+
}
|
|
1858
|
+
const parsedColumn = utils.parseSqlIdentifier(columnName, "column name");
|
|
1859
|
+
const result = buildCondition2(parsedColumn, filterValue);
|
|
1860
|
+
conditions.push(result.condition);
|
|
1861
|
+
args.push(...result.args);
|
|
1862
|
+
}
|
|
1863
|
+
return {
|
|
1864
|
+
sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
|
|
1865
|
+
args
|
|
1866
|
+
};
|
|
1867
|
+
}
|
|
1868
|
+
function buildCondition2(columnName, filterValue) {
|
|
1869
|
+
if (filterValue === null) {
|
|
1870
|
+
return { condition: `${columnName} IS NULL`, args: [] };
|
|
1871
|
+
}
|
|
1872
|
+
if (typeof filterValue === "object" && filterValue !== null && ("startAt" in filterValue || "endAt" in filterValue)) {
|
|
1873
|
+
return buildDateRangeCondition(columnName, filterValue);
|
|
1874
|
+
}
|
|
1875
|
+
return {
|
|
1876
|
+
condition: `${columnName} = ?`,
|
|
1877
|
+
args: [transformToSqlValue(filterValue)]
|
|
1878
|
+
};
|
|
1879
|
+
}
|
|
1880
|
+
function buildDateRangeCondition(columnName, range) {
|
|
1881
|
+
const conditions = [];
|
|
1882
|
+
const args = [];
|
|
1883
|
+
if (range.startAt !== void 0) {
|
|
1884
|
+
conditions.push(`${columnName} >= ?`);
|
|
1885
|
+
args.push(transformToSqlValue(range.startAt));
|
|
1886
|
+
}
|
|
1887
|
+
if (range.endAt !== void 0) {
|
|
1888
|
+
conditions.push(`${columnName} <= ?`);
|
|
1889
|
+
args.push(transformToSqlValue(range.endAt));
|
|
1890
|
+
}
|
|
1891
|
+
if (conditions.length === 0) {
|
|
1892
|
+
throw new Error("Date range must specify at least startAt or endAt");
|
|
1893
|
+
}
|
|
1894
|
+
return {
|
|
1895
|
+
condition: conditions.join(" AND "),
|
|
1896
|
+
args
|
|
1897
|
+
};
|
|
1898
|
+
}
|
|
1899
|
+
function buildDateRangeFilter(dateRange, columnName = "createdAt") {
|
|
1900
|
+
if (!dateRange?.start && !dateRange?.end) {
|
|
1901
|
+
return {};
|
|
1902
|
+
}
|
|
1903
|
+
const filter = {};
|
|
1904
|
+
if (dateRange.start) {
|
|
1905
|
+
filter.startAt = new Date(dateRange.start).toISOString();
|
|
1906
|
+
}
|
|
1907
|
+
if (dateRange.end) {
|
|
1908
|
+
filter.endAt = new Date(dateRange.end).toISOString();
|
|
1909
|
+
}
|
|
1910
|
+
return { [columnName]: filter };
|
|
1911
|
+
}
|
|
1912
|
+
function transformFromSqlRow({
|
|
1913
|
+
tableName,
|
|
1914
|
+
sqlRow
|
|
1915
|
+
}) {
|
|
1916
|
+
const result = {};
|
|
1917
|
+
const jsonColumns = new Set(
|
|
1918
|
+
Object.keys(storage.TABLE_SCHEMAS[tableName]).filter((key) => storage.TABLE_SCHEMAS[tableName][key].type === "jsonb").map((key) => key)
|
|
1919
|
+
);
|
|
1920
|
+
const dateColumns = new Set(
|
|
1921
|
+
Object.keys(storage.TABLE_SCHEMAS[tableName]).filter((key) => storage.TABLE_SCHEMAS[tableName][key].type === "timestamp").map((key) => key)
|
|
1922
|
+
);
|
|
1923
|
+
for (const [key, value] of Object.entries(sqlRow)) {
|
|
1924
|
+
if (value === null || value === void 0) {
|
|
1925
|
+
result[key] = value;
|
|
1926
|
+
continue;
|
|
1927
|
+
}
|
|
1928
|
+
if (dateColumns.has(key) && typeof value === "string") {
|
|
1929
|
+
result[key] = new Date(value);
|
|
1930
|
+
continue;
|
|
1931
|
+
}
|
|
1932
|
+
if (jsonColumns.has(key) && typeof value === "string") {
|
|
1933
|
+
result[key] = storage.safelyParseJSON(value);
|
|
1934
|
+
continue;
|
|
1935
|
+
}
|
|
1936
|
+
result[key] = value;
|
|
1937
|
+
}
|
|
1938
|
+
return result;
|
|
1939
|
+
}
|
|
1818
1940
|
|
|
1819
|
-
// src/storage/domains/
|
|
1941
|
+
// src/storage/domains/observability/index.ts
|
|
1942
|
+
var ObservabilityLibSQL = class extends storage.ObservabilityStorage {
|
|
1943
|
+
operations;
|
|
1944
|
+
constructor({ operations }) {
|
|
1945
|
+
super();
|
|
1946
|
+
this.operations = operations;
|
|
1947
|
+
}
|
|
1948
|
+
async createAISpan(span) {
|
|
1949
|
+
try {
|
|
1950
|
+
return this.operations.insert({ tableName: storage.TABLE_AI_SPANS, record: span });
|
|
1951
|
+
} catch (error$1) {
|
|
1952
|
+
throw new error.MastraError(
|
|
1953
|
+
{
|
|
1954
|
+
id: "LIBSQL_STORE_CREATE_AI_SPAN_FAILED",
|
|
1955
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1956
|
+
category: error.ErrorCategory.USER,
|
|
1957
|
+
details: {
|
|
1958
|
+
spanId: span.spanId,
|
|
1959
|
+
traceId: span.traceId,
|
|
1960
|
+
spanType: span.spanType,
|
|
1961
|
+
spanName: span.name
|
|
1962
|
+
}
|
|
1963
|
+
},
|
|
1964
|
+
error$1
|
|
1965
|
+
);
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
async getAITrace(traceId) {
|
|
1969
|
+
try {
|
|
1970
|
+
const spans = await this.operations.loadMany({
|
|
1971
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
1972
|
+
whereClause: { sql: " WHERE traceId = ?", args: [traceId] },
|
|
1973
|
+
orderBy: "startedAt DESC"
|
|
1974
|
+
});
|
|
1975
|
+
if (!spans || spans.length === 0) {
|
|
1976
|
+
return null;
|
|
1977
|
+
}
|
|
1978
|
+
return {
|
|
1979
|
+
traceId,
|
|
1980
|
+
spans: spans.map((span) => transformFromSqlRow({ tableName: storage.TABLE_AI_SPANS, sqlRow: span }))
|
|
1981
|
+
};
|
|
1982
|
+
} catch (error$1) {
|
|
1983
|
+
throw new error.MastraError(
|
|
1984
|
+
{
|
|
1985
|
+
id: "LIBSQL_STORE_GET_AI_TRACE_FAILED",
|
|
1986
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1987
|
+
category: error.ErrorCategory.USER,
|
|
1988
|
+
details: {
|
|
1989
|
+
traceId
|
|
1990
|
+
}
|
|
1991
|
+
},
|
|
1992
|
+
error$1
|
|
1993
|
+
);
|
|
1994
|
+
}
|
|
1995
|
+
}
|
|
1996
|
+
async updateAISpan({
|
|
1997
|
+
spanId,
|
|
1998
|
+
traceId,
|
|
1999
|
+
updates
|
|
2000
|
+
}) {
|
|
2001
|
+
try {
|
|
2002
|
+
await this.operations.update({
|
|
2003
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
2004
|
+
keys: { spanId, traceId },
|
|
2005
|
+
data: { ...updates, updatedAt: (/* @__PURE__ */ new Date()).toISOString() }
|
|
2006
|
+
});
|
|
2007
|
+
} catch (error$1) {
|
|
2008
|
+
throw new error.MastraError(
|
|
2009
|
+
{
|
|
2010
|
+
id: "LIBSQL_STORE_UPDATE_AI_SPAN_FAILED",
|
|
2011
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2012
|
+
category: error.ErrorCategory.USER,
|
|
2013
|
+
details: {
|
|
2014
|
+
spanId,
|
|
2015
|
+
traceId
|
|
2016
|
+
}
|
|
2017
|
+
},
|
|
2018
|
+
error$1
|
|
2019
|
+
);
|
|
2020
|
+
}
|
|
2021
|
+
}
|
|
2022
|
+
async getAITracesPaginated({
|
|
2023
|
+
filters,
|
|
2024
|
+
pagination
|
|
2025
|
+
}) {
|
|
2026
|
+
const page = pagination?.page ?? 0;
|
|
2027
|
+
const perPage = pagination?.perPage ?? 10;
|
|
2028
|
+
const filtersWithDateRange = {
|
|
2029
|
+
...filters,
|
|
2030
|
+
...buildDateRangeFilter(pagination?.dateRange, "startedAt")
|
|
2031
|
+
};
|
|
2032
|
+
const whereClause = prepareWhereClause(filtersWithDateRange, storage.AI_SPAN_SCHEMA);
|
|
2033
|
+
const orderBy = "startedAt DESC";
|
|
2034
|
+
let count = 0;
|
|
2035
|
+
try {
|
|
2036
|
+
count = await this.operations.loadTotalCount({
|
|
2037
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
2038
|
+
whereClause: { sql: whereClause.sql, args: whereClause.args }
|
|
2039
|
+
});
|
|
2040
|
+
} catch (error$1) {
|
|
2041
|
+
throw new error.MastraError(
|
|
2042
|
+
{
|
|
2043
|
+
id: "LIBSQL_STORE_GET_AI_TRACES_PAGINATED_COUNT_FAILED",
|
|
2044
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2045
|
+
category: error.ErrorCategory.USER
|
|
2046
|
+
},
|
|
2047
|
+
error$1
|
|
2048
|
+
);
|
|
2049
|
+
}
|
|
2050
|
+
if (count === 0) {
|
|
2051
|
+
return {
|
|
2052
|
+
pagination: {
|
|
2053
|
+
total: 0,
|
|
2054
|
+
page,
|
|
2055
|
+
perPage,
|
|
2056
|
+
hasMore: false
|
|
2057
|
+
},
|
|
2058
|
+
spans: []
|
|
2059
|
+
};
|
|
2060
|
+
}
|
|
2061
|
+
try {
|
|
2062
|
+
const spans = await this.operations.loadMany({
|
|
2063
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
2064
|
+
whereClause,
|
|
2065
|
+
orderBy,
|
|
2066
|
+
offset: page * perPage,
|
|
2067
|
+
limit: perPage
|
|
2068
|
+
});
|
|
2069
|
+
return {
|
|
2070
|
+
pagination: {
|
|
2071
|
+
total: count,
|
|
2072
|
+
page,
|
|
2073
|
+
perPage,
|
|
2074
|
+
hasMore: spans.length === perPage
|
|
2075
|
+
},
|
|
2076
|
+
spans: spans.map((span) => transformFromSqlRow({ tableName: storage.TABLE_AI_SPANS, sqlRow: span }))
|
|
2077
|
+
};
|
|
2078
|
+
} catch (error$1) {
|
|
2079
|
+
throw new error.MastraError(
|
|
2080
|
+
{
|
|
2081
|
+
id: "LIBSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
|
|
2082
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2083
|
+
category: error.ErrorCategory.USER
|
|
2084
|
+
},
|
|
2085
|
+
error$1
|
|
2086
|
+
);
|
|
2087
|
+
}
|
|
2088
|
+
}
|
|
2089
|
+
async batchCreateAISpans(args) {
|
|
2090
|
+
try {
|
|
2091
|
+
return this.operations.batchInsert({
|
|
2092
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
2093
|
+
records: args.records.map((record) => ({
|
|
2094
|
+
...record,
|
|
2095
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2096
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2097
|
+
}))
|
|
2098
|
+
});
|
|
2099
|
+
} catch (error$1) {
|
|
2100
|
+
throw new error.MastraError(
|
|
2101
|
+
{
|
|
2102
|
+
id: "LIBSQL_STORE_BATCH_CREATE_AI_SPANS_FAILED",
|
|
2103
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2104
|
+
category: error.ErrorCategory.USER
|
|
2105
|
+
},
|
|
2106
|
+
error$1
|
|
2107
|
+
);
|
|
2108
|
+
}
|
|
2109
|
+
}
|
|
2110
|
+
async batchUpdateAISpans(args) {
|
|
2111
|
+
try {
|
|
2112
|
+
return this.operations.batchUpdate({
|
|
2113
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
2114
|
+
updates: args.records.map((record) => ({
|
|
2115
|
+
keys: { spanId: record.spanId, traceId: record.traceId },
|
|
2116
|
+
data: { ...record.updates, updatedAt: (/* @__PURE__ */ new Date()).toISOString() }
|
|
2117
|
+
}))
|
|
2118
|
+
});
|
|
2119
|
+
} catch (error$1) {
|
|
2120
|
+
throw new error.MastraError(
|
|
2121
|
+
{
|
|
2122
|
+
id: "LIBSQL_STORE_BATCH_UPDATE_AI_SPANS_FAILED",
|
|
2123
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2124
|
+
category: error.ErrorCategory.USER
|
|
2125
|
+
},
|
|
2126
|
+
error$1
|
|
2127
|
+
);
|
|
2128
|
+
}
|
|
2129
|
+
}
|
|
2130
|
+
async batchDeleteAITraces(args) {
|
|
2131
|
+
try {
|
|
2132
|
+
const keys = args.traceIds.map((traceId) => ({ traceId }));
|
|
2133
|
+
return this.operations.batchDelete({
|
|
2134
|
+
tableName: storage.TABLE_AI_SPANS,
|
|
2135
|
+
keys
|
|
2136
|
+
});
|
|
2137
|
+
} catch (error$1) {
|
|
2138
|
+
throw new error.MastraError(
|
|
2139
|
+
{
|
|
2140
|
+
id: "LIBSQL_STORE_BATCH_DELETE_AI_TRACES_FAILED",
|
|
2141
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2142
|
+
category: error.ErrorCategory.USER
|
|
2143
|
+
},
|
|
2144
|
+
error$1
|
|
2145
|
+
);
|
|
2146
|
+
}
|
|
2147
|
+
}
|
|
2148
|
+
};
|
|
1820
2149
|
var StoreOperationsLibSQL = class extends storage.StoreOperations {
|
|
1821
2150
|
client;
|
|
1822
2151
|
/**
|
|
@@ -1864,6 +2193,13 @@ var StoreOperationsLibSQL = class extends storage.StoreOperations {
|
|
|
1864
2193
|
)`;
|
|
1865
2194
|
return stmnt;
|
|
1866
2195
|
}
|
|
2196
|
+
if (tableName === storage.TABLE_AI_SPANS) {
|
|
2197
|
+
const stmnt = `CREATE TABLE IF NOT EXISTS ${parsedTableName} (
|
|
2198
|
+
${columns.join(",\n")},
|
|
2199
|
+
PRIMARY KEY (traceId, spanId)
|
|
2200
|
+
)`;
|
|
2201
|
+
return stmnt;
|
|
2202
|
+
}
|
|
1867
2203
|
return `CREATE TABLE IF NOT EXISTS ${parsedTableName} (${columns.join(", ")})`;
|
|
1868
2204
|
}
|
|
1869
2205
|
async createTable({
|
|
@@ -1943,6 +2279,64 @@ var StoreOperationsLibSQL = class extends storage.StoreOperations {
|
|
|
1943
2279
|
);
|
|
1944
2280
|
return parsed;
|
|
1945
2281
|
}
|
|
2282
|
+
async loadMany({
|
|
2283
|
+
tableName,
|
|
2284
|
+
whereClause,
|
|
2285
|
+
orderBy,
|
|
2286
|
+
offset,
|
|
2287
|
+
limit,
|
|
2288
|
+
args
|
|
2289
|
+
}) {
|
|
2290
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
2291
|
+
let statement = `SELECT * FROM ${parsedTableName}`;
|
|
2292
|
+
if (whereClause?.sql) {
|
|
2293
|
+
statement += `${whereClause.sql}`;
|
|
2294
|
+
}
|
|
2295
|
+
if (orderBy) {
|
|
2296
|
+
statement += ` ORDER BY ${orderBy}`;
|
|
2297
|
+
}
|
|
2298
|
+
if (limit) {
|
|
2299
|
+
statement += ` LIMIT ${limit}`;
|
|
2300
|
+
}
|
|
2301
|
+
if (offset) {
|
|
2302
|
+
statement += ` OFFSET ${offset}`;
|
|
2303
|
+
}
|
|
2304
|
+
const result = await this.client.execute({
|
|
2305
|
+
sql: statement,
|
|
2306
|
+
args: [...whereClause?.args ?? [], ...args ?? []]
|
|
2307
|
+
});
|
|
2308
|
+
return result.rows;
|
|
2309
|
+
}
|
|
2310
|
+
async loadTotalCount({
|
|
2311
|
+
tableName,
|
|
2312
|
+
whereClause
|
|
2313
|
+
}) {
|
|
2314
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
2315
|
+
const statement = `SELECT COUNT(*) as count FROM ${parsedTableName} ${whereClause ? `${whereClause.sql}` : ""}`;
|
|
2316
|
+
const result = await this.client.execute({
|
|
2317
|
+
sql: statement,
|
|
2318
|
+
args: whereClause?.args ?? []
|
|
2319
|
+
});
|
|
2320
|
+
if (!result.rows || result.rows.length === 0) {
|
|
2321
|
+
return 0;
|
|
2322
|
+
}
|
|
2323
|
+
return result.rows[0]?.count ?? 0;
|
|
2324
|
+
}
|
|
2325
|
+
update(args) {
|
|
2326
|
+
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
2327
|
+
logger: this.logger,
|
|
2328
|
+
maxRetries: this.maxRetries,
|
|
2329
|
+
initialBackoffMs: this.initialBackoffMs
|
|
2330
|
+
});
|
|
2331
|
+
return executeWriteOperationWithRetry(() => this.executeUpdate(args), `update table ${args.tableName}`);
|
|
2332
|
+
}
|
|
2333
|
+
async executeUpdate({
|
|
2334
|
+
tableName,
|
|
2335
|
+
keys,
|
|
2336
|
+
data
|
|
2337
|
+
}) {
|
|
2338
|
+
await this.client.execute(prepareUpdateStatement({ tableName, updates: data, keys }));
|
|
2339
|
+
}
|
|
1946
2340
|
async doBatchInsert({
|
|
1947
2341
|
tableName,
|
|
1948
2342
|
records
|
|
@@ -1974,6 +2368,91 @@ var StoreOperationsLibSQL = class extends storage.StoreOperations {
|
|
|
1974
2368
|
);
|
|
1975
2369
|
});
|
|
1976
2370
|
}
|
|
2371
|
+
/**
|
|
2372
|
+
* Public batch update method with retry logic
|
|
2373
|
+
*/
|
|
2374
|
+
batchUpdate(args) {
|
|
2375
|
+
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
2376
|
+
logger: this.logger,
|
|
2377
|
+
maxRetries: this.maxRetries,
|
|
2378
|
+
initialBackoffMs: this.initialBackoffMs
|
|
2379
|
+
});
|
|
2380
|
+
return executeWriteOperationWithRetry(
|
|
2381
|
+
() => this.executeBatchUpdate(args),
|
|
2382
|
+
`batch update in table ${args.tableName}`
|
|
2383
|
+
).catch((error$1) => {
|
|
2384
|
+
throw new error.MastraError(
|
|
2385
|
+
{
|
|
2386
|
+
id: "LIBSQL_STORE_BATCH_UPDATE_FAILED",
|
|
2387
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2388
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2389
|
+
details: {
|
|
2390
|
+
tableName: args.tableName
|
|
2391
|
+
}
|
|
2392
|
+
},
|
|
2393
|
+
error$1
|
|
2394
|
+
);
|
|
2395
|
+
});
|
|
2396
|
+
}
|
|
2397
|
+
/**
|
|
2398
|
+
* Updates multiple records in batch. Each record can be updated based on single or composite keys.
|
|
2399
|
+
*/
|
|
2400
|
+
async executeBatchUpdate({
|
|
2401
|
+
tableName,
|
|
2402
|
+
updates
|
|
2403
|
+
}) {
|
|
2404
|
+
if (updates.length === 0) return;
|
|
2405
|
+
const batchStatements = updates.map(
|
|
2406
|
+
({ keys, data }) => prepareUpdateStatement({
|
|
2407
|
+
tableName,
|
|
2408
|
+
updates: data,
|
|
2409
|
+
keys
|
|
2410
|
+
})
|
|
2411
|
+
);
|
|
2412
|
+
await this.client.batch(batchStatements, "write");
|
|
2413
|
+
}
|
|
2414
|
+
/**
|
|
2415
|
+
* Public batch delete method with retry logic
|
|
2416
|
+
*/
|
|
2417
|
+
batchDelete({ tableName, keys }) {
|
|
2418
|
+
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
2419
|
+
logger: this.logger,
|
|
2420
|
+
maxRetries: this.maxRetries,
|
|
2421
|
+
initialBackoffMs: this.initialBackoffMs
|
|
2422
|
+
});
|
|
2423
|
+
return executeWriteOperationWithRetry(
|
|
2424
|
+
() => this.executeBatchDelete({ tableName, keys }),
|
|
2425
|
+
`batch delete from table ${tableName}`
|
|
2426
|
+
).catch((error$1) => {
|
|
2427
|
+
throw new error.MastraError(
|
|
2428
|
+
{
|
|
2429
|
+
id: "LIBSQL_STORE_BATCH_DELETE_FAILED",
|
|
2430
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2431
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2432
|
+
details: {
|
|
2433
|
+
tableName
|
|
2434
|
+
}
|
|
2435
|
+
},
|
|
2436
|
+
error$1
|
|
2437
|
+
);
|
|
2438
|
+
});
|
|
2439
|
+
}
|
|
2440
|
+
/**
|
|
2441
|
+
* Deletes multiple records in batch. Each record can be deleted based on single or composite keys.
|
|
2442
|
+
*/
|
|
2443
|
+
async executeBatchDelete({
|
|
2444
|
+
tableName,
|
|
2445
|
+
keys
|
|
2446
|
+
}) {
|
|
2447
|
+
if (keys.length === 0) return;
|
|
2448
|
+
const batchStatements = keys.map(
|
|
2449
|
+
(keyObj) => prepareDeleteStatement({
|
|
2450
|
+
tableName,
|
|
2451
|
+
keys: keyObj
|
|
2452
|
+
})
|
|
2453
|
+
);
|
|
2454
|
+
await this.client.batch(batchStatements, "write");
|
|
2455
|
+
}
|
|
1977
2456
|
/**
|
|
1978
2457
|
* Alters table schema to add columns if they don't exist
|
|
1979
2458
|
* @param tableName Name of the table
|
|
@@ -2393,10 +2872,165 @@ function parseWorkflowRun(row) {
|
|
|
2393
2872
|
var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
2394
2873
|
operations;
|
|
2395
2874
|
client;
|
|
2396
|
-
|
|
2875
|
+
maxRetries;
|
|
2876
|
+
initialBackoffMs;
|
|
2877
|
+
constructor({
|
|
2878
|
+
operations,
|
|
2879
|
+
client,
|
|
2880
|
+
maxRetries = 5,
|
|
2881
|
+
initialBackoffMs = 500
|
|
2882
|
+
}) {
|
|
2397
2883
|
super();
|
|
2398
2884
|
this.operations = operations;
|
|
2399
2885
|
this.client = client;
|
|
2886
|
+
this.maxRetries = maxRetries;
|
|
2887
|
+
this.initialBackoffMs = initialBackoffMs;
|
|
2888
|
+
this.setupPragmaSettings().catch(
|
|
2889
|
+
(err) => this.logger.warn("LibSQL Workflows: Failed to setup PRAGMA settings.", err)
|
|
2890
|
+
);
|
|
2891
|
+
}
|
|
2892
|
+
async setupPragmaSettings() {
|
|
2893
|
+
try {
|
|
2894
|
+
await this.client.execute("PRAGMA busy_timeout = 10000;");
|
|
2895
|
+
this.logger.debug("LibSQL Workflows: PRAGMA busy_timeout=10000 set.");
|
|
2896
|
+
try {
|
|
2897
|
+
await this.client.execute("PRAGMA journal_mode = WAL;");
|
|
2898
|
+
this.logger.debug("LibSQL Workflows: PRAGMA journal_mode=WAL set.");
|
|
2899
|
+
} catch {
|
|
2900
|
+
this.logger.debug("LibSQL Workflows: WAL mode not supported, using default journal mode.");
|
|
2901
|
+
}
|
|
2902
|
+
try {
|
|
2903
|
+
await this.client.execute("PRAGMA synchronous = NORMAL;");
|
|
2904
|
+
this.logger.debug("LibSQL Workflows: PRAGMA synchronous=NORMAL set.");
|
|
2905
|
+
} catch {
|
|
2906
|
+
this.logger.debug("LibSQL Workflows: Failed to set synchronous mode.");
|
|
2907
|
+
}
|
|
2908
|
+
} catch (err) {
|
|
2909
|
+
this.logger.warn("LibSQL Workflows: Failed to set PRAGMA settings.", err);
|
|
2910
|
+
}
|
|
2911
|
+
}
|
|
2912
|
+
async executeWithRetry(operation) {
|
|
2913
|
+
let attempts = 0;
|
|
2914
|
+
let backoff = this.initialBackoffMs;
|
|
2915
|
+
while (attempts < this.maxRetries) {
|
|
2916
|
+
try {
|
|
2917
|
+
return await operation();
|
|
2918
|
+
} catch (error) {
|
|
2919
|
+
this.logger.debug("LibSQL Workflows: Error caught in retry loop", {
|
|
2920
|
+
errorType: error.constructor.name,
|
|
2921
|
+
errorCode: error.code,
|
|
2922
|
+
errorMessage: error.message,
|
|
2923
|
+
attempts,
|
|
2924
|
+
maxRetries: this.maxRetries
|
|
2925
|
+
});
|
|
2926
|
+
const isLockError = error.code === "SQLITE_BUSY" || error.code === "SQLITE_LOCKED" || error.message?.toLowerCase().includes("database is locked") || error.message?.toLowerCase().includes("database table is locked") || error.message?.toLowerCase().includes("table is locked") || error.constructor.name === "SqliteError" && error.message?.toLowerCase().includes("locked");
|
|
2927
|
+
if (isLockError) {
|
|
2928
|
+
attempts++;
|
|
2929
|
+
if (attempts >= this.maxRetries) {
|
|
2930
|
+
this.logger.error(
|
|
2931
|
+
`LibSQL Workflows: Operation failed after ${this.maxRetries} attempts due to database lock: ${error.message}`,
|
|
2932
|
+
{ error, attempts, maxRetries: this.maxRetries }
|
|
2933
|
+
);
|
|
2934
|
+
throw error;
|
|
2935
|
+
}
|
|
2936
|
+
this.logger.warn(
|
|
2937
|
+
`LibSQL Workflows: Attempt ${attempts} failed due to database lock. Retrying in ${backoff}ms...`,
|
|
2938
|
+
{ errorMessage: error.message, attempts, backoff, maxRetries: this.maxRetries }
|
|
2939
|
+
);
|
|
2940
|
+
await new Promise((resolve) => setTimeout(resolve, backoff));
|
|
2941
|
+
backoff *= 2;
|
|
2942
|
+
} else {
|
|
2943
|
+
this.logger.error("LibSQL Workflows: Non-lock error occurred, not retrying", { error });
|
|
2944
|
+
throw error;
|
|
2945
|
+
}
|
|
2946
|
+
}
|
|
2947
|
+
}
|
|
2948
|
+
throw new Error("LibSQL Workflows: Max retries reached, but no error was re-thrown from the loop.");
|
|
2949
|
+
}
|
|
2950
|
+
async updateWorkflowResults({
|
|
2951
|
+
workflowName,
|
|
2952
|
+
runId,
|
|
2953
|
+
stepId,
|
|
2954
|
+
result,
|
|
2955
|
+
runtimeContext
|
|
2956
|
+
}) {
|
|
2957
|
+
return this.executeWithRetry(async () => {
|
|
2958
|
+
const tx = await this.client.transaction("write");
|
|
2959
|
+
try {
|
|
2960
|
+
const existingSnapshotResult = await tx.execute({
|
|
2961
|
+
sql: `SELECT snapshot FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
2962
|
+
args: [workflowName, runId]
|
|
2963
|
+
});
|
|
2964
|
+
let snapshot;
|
|
2965
|
+
if (!existingSnapshotResult.rows?.[0]) {
|
|
2966
|
+
snapshot = {
|
|
2967
|
+
context: {},
|
|
2968
|
+
activePaths: [],
|
|
2969
|
+
timestamp: Date.now(),
|
|
2970
|
+
suspendedPaths: {},
|
|
2971
|
+
serializedStepGraph: [],
|
|
2972
|
+
value: {},
|
|
2973
|
+
waitingPaths: {},
|
|
2974
|
+
status: "pending",
|
|
2975
|
+
runId,
|
|
2976
|
+
runtimeContext: {}
|
|
2977
|
+
};
|
|
2978
|
+
} else {
|
|
2979
|
+
const existingSnapshot = existingSnapshotResult.rows[0].snapshot;
|
|
2980
|
+
snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
|
|
2981
|
+
}
|
|
2982
|
+
snapshot.context[stepId] = result;
|
|
2983
|
+
snapshot.runtimeContext = { ...snapshot.runtimeContext, ...runtimeContext };
|
|
2984
|
+
await tx.execute({
|
|
2985
|
+
sql: `UPDATE ${storage.TABLE_WORKFLOW_SNAPSHOT} SET snapshot = ? WHERE workflow_name = ? AND run_id = ?`,
|
|
2986
|
+
args: [JSON.stringify(snapshot), workflowName, runId]
|
|
2987
|
+
});
|
|
2988
|
+
await tx.commit();
|
|
2989
|
+
return snapshot.context;
|
|
2990
|
+
} catch (error) {
|
|
2991
|
+
if (!tx.closed) {
|
|
2992
|
+
await tx.rollback();
|
|
2993
|
+
}
|
|
2994
|
+
throw error;
|
|
2995
|
+
}
|
|
2996
|
+
});
|
|
2997
|
+
}
|
|
2998
|
+
async updateWorkflowState({
|
|
2999
|
+
workflowName,
|
|
3000
|
+
runId,
|
|
3001
|
+
opts
|
|
3002
|
+
}) {
|
|
3003
|
+
return this.executeWithRetry(async () => {
|
|
3004
|
+
const tx = await this.client.transaction("write");
|
|
3005
|
+
try {
|
|
3006
|
+
const existingSnapshotResult = await tx.execute({
|
|
3007
|
+
sql: `SELECT snapshot FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
3008
|
+
args: [workflowName, runId]
|
|
3009
|
+
});
|
|
3010
|
+
if (!existingSnapshotResult.rows?.[0]) {
|
|
3011
|
+
await tx.rollback();
|
|
3012
|
+
return void 0;
|
|
3013
|
+
}
|
|
3014
|
+
const existingSnapshot = existingSnapshotResult.rows[0].snapshot;
|
|
3015
|
+
const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
|
|
3016
|
+
if (!snapshot || !snapshot?.context) {
|
|
3017
|
+
await tx.rollback();
|
|
3018
|
+
throw new Error(`Snapshot not found for runId ${runId}`);
|
|
3019
|
+
}
|
|
3020
|
+
const updatedSnapshot = { ...snapshot, ...opts };
|
|
3021
|
+
await tx.execute({
|
|
3022
|
+
sql: `UPDATE ${storage.TABLE_WORKFLOW_SNAPSHOT} SET snapshot = ? WHERE workflow_name = ? AND run_id = ?`,
|
|
3023
|
+
args: [JSON.stringify(updatedSnapshot), workflowName, runId]
|
|
3024
|
+
});
|
|
3025
|
+
await tx.commit();
|
|
3026
|
+
return updatedSnapshot;
|
|
3027
|
+
} catch (error) {
|
|
3028
|
+
if (!tx.closed) {
|
|
3029
|
+
await tx.rollback();
|
|
3030
|
+
}
|
|
3031
|
+
throw error;
|
|
3032
|
+
}
|
|
3033
|
+
});
|
|
2400
3034
|
}
|
|
2401
3035
|
async persistWorkflowSnapshot({
|
|
2402
3036
|
workflowName,
|
|
@@ -2557,13 +3191,15 @@ var LibSQLStore = class extends storage.MastraStorage {
|
|
|
2557
3191
|
const workflows = new WorkflowsLibSQL({ client: this.client, operations });
|
|
2558
3192
|
const memory = new MemoryLibSQL({ client: this.client, operations });
|
|
2559
3193
|
const legacyEvals = new LegacyEvalsLibSQL({ client: this.client });
|
|
3194
|
+
const observability = new ObservabilityLibSQL({ operations });
|
|
2560
3195
|
this.stores = {
|
|
2561
3196
|
operations,
|
|
2562
3197
|
scores,
|
|
2563
3198
|
traces,
|
|
2564
3199
|
workflows,
|
|
2565
3200
|
memory,
|
|
2566
|
-
legacyEvals
|
|
3201
|
+
legacyEvals,
|
|
3202
|
+
observability
|
|
2567
3203
|
};
|
|
2568
3204
|
}
|
|
2569
3205
|
get supports() {
|
|
@@ -2572,7 +3208,8 @@ var LibSQLStore = class extends storage.MastraStorage {
|
|
|
2572
3208
|
resourceWorkingMemory: true,
|
|
2573
3209
|
hasColumn: true,
|
|
2574
3210
|
createTable: true,
|
|
2575
|
-
deleteMessages: true
|
|
3211
|
+
deleteMessages: true,
|
|
3212
|
+
aiTracing: true
|
|
2576
3213
|
};
|
|
2577
3214
|
}
|
|
2578
3215
|
async createTable({
|
|
@@ -2714,6 +3351,22 @@ var LibSQLStore = class extends storage.MastraStorage {
|
|
|
2714
3351
|
/**
|
|
2715
3352
|
* WORKFLOWS
|
|
2716
3353
|
*/
|
|
3354
|
+
async updateWorkflowResults({
|
|
3355
|
+
workflowName,
|
|
3356
|
+
runId,
|
|
3357
|
+
stepId,
|
|
3358
|
+
result,
|
|
3359
|
+
runtimeContext
|
|
3360
|
+
}) {
|
|
3361
|
+
return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
|
|
3362
|
+
}
|
|
3363
|
+
async updateWorkflowState({
|
|
3364
|
+
workflowName,
|
|
3365
|
+
runId,
|
|
3366
|
+
opts
|
|
3367
|
+
}) {
|
|
3368
|
+
return this.stores.workflows.updateWorkflowState({ workflowName, runId, opts });
|
|
3369
|
+
}
|
|
2717
3370
|
async persistWorkflowSnapshot({
|
|
2718
3371
|
workflowName,
|
|
2719
3372
|
runId,
|
|
@@ -2756,6 +3409,24 @@ var LibSQLStore = class extends storage.MastraStorage {
|
|
|
2756
3409
|
}) {
|
|
2757
3410
|
return this.stores.memory.updateResource({ resourceId, workingMemory, metadata });
|
|
2758
3411
|
}
|
|
3412
|
+
async createAISpan(span) {
|
|
3413
|
+
return this.stores.observability.createAISpan(span);
|
|
3414
|
+
}
|
|
3415
|
+
async updateAISpan(params) {
|
|
3416
|
+
return this.stores.observability.updateAISpan(params);
|
|
3417
|
+
}
|
|
3418
|
+
async getAITrace(traceId) {
|
|
3419
|
+
return this.stores.observability.getAITrace(traceId);
|
|
3420
|
+
}
|
|
3421
|
+
async getAITracesPaginated(args) {
|
|
3422
|
+
return this.stores.observability.getAITracesPaginated(args);
|
|
3423
|
+
}
|
|
3424
|
+
async batchCreateAISpans(args) {
|
|
3425
|
+
return this.stores.observability.batchCreateAISpans(args);
|
|
3426
|
+
}
|
|
3427
|
+
async batchUpdateAISpans(args) {
|
|
3428
|
+
return this.stores.observability.batchUpdateAISpans(args);
|
|
3429
|
+
}
|
|
2759
3430
|
};
|
|
2760
3431
|
|
|
2761
3432
|
// src/vector/prompt.ts
|