@mastra/libsql 1.0.0-beta.1 → 1.0.0-beta.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +418 -0
- package/README.md +3 -0
- package/dist/index.cjs +2166 -1582
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +2161 -1582
- package/dist/index.js.map +1 -1
- package/dist/storage/db/index.d.ts +264 -0
- package/dist/storage/db/index.d.ts.map +1 -0
- package/dist/storage/{domains → db}/utils.d.ts +5 -12
- package/dist/storage/db/utils.d.ts.map +1 -0
- package/dist/storage/domains/agents/index.d.ts +23 -0
- package/dist/storage/domains/agents/index.d.ts.map +1 -0
- package/dist/storage/domains/memory/index.d.ts +5 -8
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +19 -27
- package/dist/storage/domains/observability/index.d.ts.map +1 -1
- package/dist/storage/domains/scores/index.d.ts +16 -27
- package/dist/storage/domains/scores/index.d.ts.map +1 -1
- package/dist/storage/domains/workflows/index.d.ts +15 -22
- package/dist/storage/domains/workflows/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +56 -196
- package/dist/storage/index.d.ts.map +1 -1
- package/dist/vector/index.d.ts +4 -2
- package/dist/vector/index.d.ts.map +1 -1
- package/dist/vector/sql-builder.d.ts.map +1 -1
- package/package.json +6 -7
- package/dist/storage/domains/operations/index.d.ts +0 -110
- package/dist/storage/domains/operations/index.d.ts.map +0 -1
- package/dist/storage/domains/utils.d.ts.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -2,10 +2,11 @@
|
|
|
2
2
|
|
|
3
3
|
var client = require('@libsql/client');
|
|
4
4
|
var error = require('@mastra/core/error');
|
|
5
|
+
var storage = require('@mastra/core/storage');
|
|
5
6
|
var utils = require('@mastra/core/utils');
|
|
6
7
|
var vector = require('@mastra/core/vector');
|
|
7
8
|
var filter = require('@mastra/core/vector/filter');
|
|
8
|
-
var
|
|
9
|
+
var base = require('@mastra/core/base');
|
|
9
10
|
var agent = require('@mastra/core/agent');
|
|
10
11
|
var evals = require('@mastra/core/evals');
|
|
11
12
|
|
|
@@ -92,12 +93,20 @@ var createBasicOperator = (symbol) => {
|
|
|
92
93
|
};
|
|
93
94
|
};
|
|
94
95
|
var createNumericOperator = (symbol) => {
|
|
95
|
-
return (key) => {
|
|
96
|
+
return (key, value) => {
|
|
96
97
|
const jsonPath = getJsonPath(key);
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
98
|
+
const isNumeric = typeof value === "number" || typeof value === "string" && !isNaN(Number(value)) && value.trim() !== "";
|
|
99
|
+
if (isNumeric) {
|
|
100
|
+
return {
|
|
101
|
+
sql: `CAST(json_extract(metadata, ${jsonPath}) AS NUMERIC) ${symbol} ?`,
|
|
102
|
+
needsValue: true
|
|
103
|
+
};
|
|
104
|
+
} else {
|
|
105
|
+
return {
|
|
106
|
+
sql: `CAST(json_extract(metadata, ${jsonPath}) AS TEXT) ${symbol} ?`,
|
|
107
|
+
needsValue: true
|
|
108
|
+
};
|
|
109
|
+
}
|
|
101
110
|
};
|
|
102
111
|
};
|
|
103
112
|
var validateJsonArray = (key) => {
|
|
@@ -575,7 +584,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
575
584
|
} catch (error$1) {
|
|
576
585
|
throw new error.MastraError(
|
|
577
586
|
{
|
|
578
|
-
id: "
|
|
587
|
+
id: storage.createVectorErrorId("LIBSQL", "QUERY", "INVALID_ARGS"),
|
|
579
588
|
domain: error.ErrorDomain.STORAGE,
|
|
580
589
|
category: error.ErrorCategory.USER
|
|
581
590
|
},
|
|
@@ -617,7 +626,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
617
626
|
} catch (error$1) {
|
|
618
627
|
throw new error.MastraError(
|
|
619
628
|
{
|
|
620
|
-
id: "
|
|
629
|
+
id: storage.createVectorErrorId("LIBSQL", "QUERY", "FAILED"),
|
|
621
630
|
domain: error.ErrorDomain.STORAGE,
|
|
622
631
|
category: error.ErrorCategory.THIRD_PARTY
|
|
623
632
|
},
|
|
@@ -631,7 +640,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
631
640
|
} catch (error$1) {
|
|
632
641
|
throw new error.MastraError(
|
|
633
642
|
{
|
|
634
|
-
id: "
|
|
643
|
+
id: storage.createVectorErrorId("LIBSQL", "UPSERT", "FAILED"),
|
|
635
644
|
domain: error.ErrorDomain.STORAGE,
|
|
636
645
|
category: error.ErrorCategory.THIRD_PARTY
|
|
637
646
|
},
|
|
@@ -685,7 +694,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
685
694
|
} catch (error$1) {
|
|
686
695
|
throw new error.MastraError(
|
|
687
696
|
{
|
|
688
|
-
id: "
|
|
697
|
+
id: storage.createVectorErrorId("LIBSQL", "CREATE_INDEX", "FAILED"),
|
|
689
698
|
domain: error.ErrorDomain.STORAGE,
|
|
690
699
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
691
700
|
details: { indexName: args.indexName, dimension: args.dimension }
|
|
@@ -724,7 +733,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
724
733
|
} catch (error$1) {
|
|
725
734
|
throw new error.MastraError(
|
|
726
735
|
{
|
|
727
|
-
id: "
|
|
736
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_INDEX", "FAILED"),
|
|
728
737
|
domain: error.ErrorDomain.STORAGE,
|
|
729
738
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
730
739
|
details: { indexName: args.indexName }
|
|
@@ -755,7 +764,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
755
764
|
} catch (error$1) {
|
|
756
765
|
throw new error.MastraError(
|
|
757
766
|
{
|
|
758
|
-
id: "
|
|
767
|
+
id: storage.createVectorErrorId("LIBSQL", "LIST_INDEXES", "FAILED"),
|
|
759
768
|
domain: error.ErrorDomain.STORAGE,
|
|
760
769
|
category: error.ErrorCategory.THIRD_PARTY
|
|
761
770
|
},
|
|
@@ -803,7 +812,7 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
803
812
|
} catch (e) {
|
|
804
813
|
throw new error.MastraError(
|
|
805
814
|
{
|
|
806
|
-
id: "
|
|
815
|
+
id: storage.createVectorErrorId("LIBSQL", "DESCRIBE_INDEX", "FAILED"),
|
|
807
816
|
domain: error.ErrorDomain.STORAGE,
|
|
808
817
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
809
818
|
details: { indexName }
|
|
@@ -826,8 +835,27 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
826
835
|
updateVector(args) {
|
|
827
836
|
return this.executeWriteOperationWithRetry(() => this.doUpdateVector(args));
|
|
828
837
|
}
|
|
829
|
-
async doUpdateVector(
|
|
838
|
+
async doUpdateVector(params) {
|
|
839
|
+
const { indexName, update } = params;
|
|
830
840
|
const parsedIndexName = utils.parseSqlIdentifier(indexName, "index name");
|
|
841
|
+
if ("id" in params && params.id && "filter" in params && params.filter) {
|
|
842
|
+
throw new error.MastraError({
|
|
843
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "MUTUALLY_EXCLUSIVE"),
|
|
844
|
+
domain: error.ErrorDomain.STORAGE,
|
|
845
|
+
category: error.ErrorCategory.USER,
|
|
846
|
+
details: { indexName },
|
|
847
|
+
text: "id and filter are mutually exclusive - provide only one"
|
|
848
|
+
});
|
|
849
|
+
}
|
|
850
|
+
if (!update.vector && !update.metadata) {
|
|
851
|
+
throw new error.MastraError({
|
|
852
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "NO_PAYLOAD"),
|
|
853
|
+
domain: error.ErrorDomain.STORAGE,
|
|
854
|
+
category: error.ErrorCategory.USER,
|
|
855
|
+
details: { indexName },
|
|
856
|
+
text: "No updates provided"
|
|
857
|
+
});
|
|
858
|
+
}
|
|
831
859
|
const updates = [];
|
|
832
860
|
const args = [];
|
|
833
861
|
if (update.vector) {
|
|
@@ -839,32 +867,81 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
839
867
|
args.push(JSON.stringify(update.metadata));
|
|
840
868
|
}
|
|
841
869
|
if (updates.length === 0) {
|
|
870
|
+
return;
|
|
871
|
+
}
|
|
872
|
+
let whereClause;
|
|
873
|
+
let whereValues;
|
|
874
|
+
if ("id" in params && params.id) {
|
|
875
|
+
whereClause = "vector_id = ?";
|
|
876
|
+
whereValues = [params.id];
|
|
877
|
+
} else if ("filter" in params && params.filter) {
|
|
878
|
+
const filter = params.filter;
|
|
879
|
+
if (!filter || Object.keys(filter).length === 0) {
|
|
880
|
+
throw new error.MastraError({
|
|
881
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "EMPTY_FILTER"),
|
|
882
|
+
domain: error.ErrorDomain.STORAGE,
|
|
883
|
+
category: error.ErrorCategory.USER,
|
|
884
|
+
details: { indexName },
|
|
885
|
+
text: "Cannot update with empty filter"
|
|
886
|
+
});
|
|
887
|
+
}
|
|
888
|
+
const translatedFilter = this.transformFilter(filter);
|
|
889
|
+
const { sql: filterSql, values: filterValues } = buildFilterQuery(translatedFilter);
|
|
890
|
+
if (!filterSql || filterSql.trim() === "") {
|
|
891
|
+
throw new error.MastraError({
|
|
892
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "INVALID_FILTER"),
|
|
893
|
+
domain: error.ErrorDomain.STORAGE,
|
|
894
|
+
category: error.ErrorCategory.USER,
|
|
895
|
+
details: { indexName },
|
|
896
|
+
text: "Filter produced empty WHERE clause"
|
|
897
|
+
});
|
|
898
|
+
}
|
|
899
|
+
const normalizedCondition = filterSql.replace(/^\s*WHERE\s+/i, "").trim().toLowerCase();
|
|
900
|
+
const matchAllPatterns = ["true", "1 = 1", "1=1"];
|
|
901
|
+
if (matchAllPatterns.includes(normalizedCondition)) {
|
|
902
|
+
throw new error.MastraError({
|
|
903
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "MATCH_ALL_FILTER"),
|
|
904
|
+
domain: error.ErrorDomain.STORAGE,
|
|
905
|
+
category: error.ErrorCategory.USER,
|
|
906
|
+
details: { indexName, filterSql: normalizedCondition },
|
|
907
|
+
text: "Filter matches all vectors. Provide a specific filter to update targeted vectors."
|
|
908
|
+
});
|
|
909
|
+
}
|
|
910
|
+
whereClause = filterSql.replace(/^WHERE\s+/i, "");
|
|
911
|
+
whereValues = filterValues;
|
|
912
|
+
} else {
|
|
842
913
|
throw new error.MastraError({
|
|
843
|
-
id: "
|
|
914
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "NO_TARGET"),
|
|
844
915
|
domain: error.ErrorDomain.STORAGE,
|
|
845
916
|
category: error.ErrorCategory.USER,
|
|
846
|
-
details: { indexName
|
|
847
|
-
text: "
|
|
917
|
+
details: { indexName },
|
|
918
|
+
text: "Either id or filter must be provided"
|
|
848
919
|
});
|
|
849
920
|
}
|
|
850
|
-
args.push(id);
|
|
851
921
|
const query = `
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
922
|
+
UPDATE ${parsedIndexName}
|
|
923
|
+
SET ${updates.join(", ")}
|
|
924
|
+
WHERE ${whereClause};
|
|
925
|
+
`;
|
|
856
926
|
try {
|
|
857
927
|
await this.turso.execute({
|
|
858
928
|
sql: query,
|
|
859
|
-
args
|
|
929
|
+
args: [...args, ...whereValues]
|
|
860
930
|
});
|
|
861
931
|
} catch (error$1) {
|
|
932
|
+
const errorDetails = { indexName };
|
|
933
|
+
if ("id" in params && params.id) {
|
|
934
|
+
errorDetails.id = params.id;
|
|
935
|
+
}
|
|
936
|
+
if ("filter" in params && params.filter) {
|
|
937
|
+
errorDetails.filter = JSON.stringify(params.filter);
|
|
938
|
+
}
|
|
862
939
|
throw new error.MastraError(
|
|
863
940
|
{
|
|
864
|
-
id: "
|
|
941
|
+
id: storage.createVectorErrorId("LIBSQL", "UPDATE_VECTOR", "FAILED"),
|
|
865
942
|
domain: error.ErrorDomain.STORAGE,
|
|
866
943
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
867
|
-
details:
|
|
944
|
+
details: errorDetails
|
|
868
945
|
},
|
|
869
946
|
error$1
|
|
870
947
|
);
|
|
@@ -883,10 +960,13 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
883
960
|
} catch (error$1) {
|
|
884
961
|
throw new error.MastraError(
|
|
885
962
|
{
|
|
886
|
-
id: "
|
|
963
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTOR", "FAILED"),
|
|
887
964
|
domain: error.ErrorDomain.STORAGE,
|
|
888
965
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
889
|
-
details: {
|
|
966
|
+
details: {
|
|
967
|
+
indexName: args.indexName,
|
|
968
|
+
...args.id && { id: args.id }
|
|
969
|
+
}
|
|
890
970
|
},
|
|
891
971
|
error$1
|
|
892
972
|
);
|
|
@@ -899,13 +979,107 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
899
979
|
args: [id]
|
|
900
980
|
});
|
|
901
981
|
}
|
|
982
|
+
deleteVectors(args) {
|
|
983
|
+
return this.executeWriteOperationWithRetry(() => this.doDeleteVectors(args));
|
|
984
|
+
}
|
|
985
|
+
async doDeleteVectors({ indexName, filter, ids }) {
|
|
986
|
+
const parsedIndexName = utils.parseSqlIdentifier(indexName, "index name");
|
|
987
|
+
if (!filter && !ids) {
|
|
988
|
+
throw new error.MastraError({
|
|
989
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "NO_TARGET"),
|
|
990
|
+
domain: error.ErrorDomain.STORAGE,
|
|
991
|
+
category: error.ErrorCategory.USER,
|
|
992
|
+
details: { indexName },
|
|
993
|
+
text: "Either filter or ids must be provided"
|
|
994
|
+
});
|
|
995
|
+
}
|
|
996
|
+
if (filter && ids) {
|
|
997
|
+
throw new error.MastraError({
|
|
998
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "MUTUALLY_EXCLUSIVE"),
|
|
999
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1000
|
+
category: error.ErrorCategory.USER,
|
|
1001
|
+
details: { indexName },
|
|
1002
|
+
text: "Cannot provide both filter and ids - they are mutually exclusive"
|
|
1003
|
+
});
|
|
1004
|
+
}
|
|
1005
|
+
let query;
|
|
1006
|
+
let values;
|
|
1007
|
+
if (ids) {
|
|
1008
|
+
if (ids.length === 0) {
|
|
1009
|
+
throw new error.MastraError({
|
|
1010
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "EMPTY_IDS"),
|
|
1011
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1012
|
+
category: error.ErrorCategory.USER,
|
|
1013
|
+
details: { indexName },
|
|
1014
|
+
text: "Cannot delete with empty ids array"
|
|
1015
|
+
});
|
|
1016
|
+
}
|
|
1017
|
+
const placeholders = ids.map(() => "?").join(", ");
|
|
1018
|
+
query = `DELETE FROM ${parsedIndexName} WHERE vector_id IN (${placeholders})`;
|
|
1019
|
+
values = ids;
|
|
1020
|
+
} else {
|
|
1021
|
+
if (!filter || Object.keys(filter).length === 0) {
|
|
1022
|
+
throw new error.MastraError({
|
|
1023
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "EMPTY_FILTER"),
|
|
1024
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1025
|
+
category: error.ErrorCategory.USER,
|
|
1026
|
+
details: { indexName },
|
|
1027
|
+
text: "Cannot delete with empty filter. Use deleteIndex to delete all vectors."
|
|
1028
|
+
});
|
|
1029
|
+
}
|
|
1030
|
+
const translatedFilter = this.transformFilter(filter);
|
|
1031
|
+
const { sql: filterSql, values: filterValues } = buildFilterQuery(translatedFilter);
|
|
1032
|
+
if (!filterSql || filterSql.trim() === "") {
|
|
1033
|
+
throw new error.MastraError({
|
|
1034
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "INVALID_FILTER"),
|
|
1035
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1036
|
+
category: error.ErrorCategory.USER,
|
|
1037
|
+
details: { indexName },
|
|
1038
|
+
text: "Filter produced empty WHERE clause"
|
|
1039
|
+
});
|
|
1040
|
+
}
|
|
1041
|
+
const normalizedCondition = filterSql.replace(/^\s*WHERE\s+/i, "").trim().toLowerCase();
|
|
1042
|
+
const matchAllPatterns = ["true", "1 = 1", "1=1"];
|
|
1043
|
+
if (matchAllPatterns.includes(normalizedCondition)) {
|
|
1044
|
+
throw new error.MastraError({
|
|
1045
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "MATCH_ALL_FILTER"),
|
|
1046
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1047
|
+
category: error.ErrorCategory.USER,
|
|
1048
|
+
details: { indexName, filterSql: normalizedCondition },
|
|
1049
|
+
text: "Filter matches all vectors. Use deleteIndex to delete all vectors from an index."
|
|
1050
|
+
});
|
|
1051
|
+
}
|
|
1052
|
+
query = `DELETE FROM ${parsedIndexName} ${filterSql}`;
|
|
1053
|
+
values = filterValues;
|
|
1054
|
+
}
|
|
1055
|
+
try {
|
|
1056
|
+
await this.turso.execute({
|
|
1057
|
+
sql: query,
|
|
1058
|
+
args: values
|
|
1059
|
+
});
|
|
1060
|
+
} catch (error$1) {
|
|
1061
|
+
throw new error.MastraError(
|
|
1062
|
+
{
|
|
1063
|
+
id: storage.createVectorErrorId("LIBSQL", "DELETE_VECTORS", "FAILED"),
|
|
1064
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1065
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
1066
|
+
details: {
|
|
1067
|
+
indexName,
|
|
1068
|
+
...filter && { filter: JSON.stringify(filter) },
|
|
1069
|
+
...ids && { idsCount: ids.length }
|
|
1070
|
+
}
|
|
1071
|
+
},
|
|
1072
|
+
error$1
|
|
1073
|
+
);
|
|
1074
|
+
}
|
|
1075
|
+
}
|
|
902
1076
|
truncateIndex(args) {
|
|
903
1077
|
try {
|
|
904
1078
|
return this.executeWriteOperationWithRetry(() => this._doTruncateIndex(args));
|
|
905
1079
|
} catch (error$1) {
|
|
906
1080
|
throw new error.MastraError(
|
|
907
1081
|
{
|
|
908
|
-
id: "
|
|
1082
|
+
id: storage.createVectorErrorId("LIBSQL", "TRUNCATE_INDEX", "FAILED"),
|
|
909
1083
|
domain: error.ErrorDomain.STORAGE,
|
|
910
1084
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
911
1085
|
details: { indexName: args.indexName }
|
|
@@ -921,1512 +1095,2134 @@ var LibSQLVector = class extends vector.MastraVector {
|
|
|
921
1095
|
});
|
|
922
1096
|
}
|
|
923
1097
|
};
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
let
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
1098
|
+
function isLockError(error) {
|
|
1099
|
+
return error.code === "SQLITE_BUSY" || error.code === "SQLITE_LOCKED" || error.message?.toLowerCase().includes("database is locked") || error.message?.toLowerCase().includes("database table is locked") || error.message?.toLowerCase().includes("table is locked") || error.constructor.name === "SqliteError" && error.message?.toLowerCase().includes("locked");
|
|
1100
|
+
}
|
|
1101
|
+
function createExecuteWriteOperationWithRetry({
|
|
1102
|
+
logger,
|
|
1103
|
+
maxRetries,
|
|
1104
|
+
initialBackoffMs
|
|
1105
|
+
}) {
|
|
1106
|
+
return async function executeWriteOperationWithRetry(operationFn, operationDescription) {
|
|
1107
|
+
let attempts = 0;
|
|
1108
|
+
let backoff = initialBackoffMs;
|
|
1109
|
+
while (attempts < maxRetries) {
|
|
1110
|
+
try {
|
|
1111
|
+
return await operationFn();
|
|
1112
|
+
} catch (error) {
|
|
1113
|
+
logger.debug(`LibSQLStore: Error caught in retry loop for ${operationDescription}`, {
|
|
1114
|
+
errorType: error.constructor.name,
|
|
1115
|
+
errorCode: error.code,
|
|
1116
|
+
errorMessage: error.message,
|
|
1117
|
+
attempts,
|
|
1118
|
+
maxRetries
|
|
1119
|
+
});
|
|
1120
|
+
if (isLockError(error)) {
|
|
1121
|
+
attempts++;
|
|
1122
|
+
if (attempts >= maxRetries) {
|
|
1123
|
+
logger.error(
|
|
1124
|
+
`LibSQLStore: Operation failed after ${maxRetries} attempts due to database lock: ${error.message}`,
|
|
1125
|
+
{ error, attempts, maxRetries }
|
|
1126
|
+
);
|
|
1127
|
+
throw error;
|
|
1128
|
+
}
|
|
1129
|
+
logger.warn(
|
|
1130
|
+
`LibSQLStore: Attempt ${attempts} failed due to database lock during ${operationDescription}. Retrying in ${backoff}ms...`,
|
|
1131
|
+
{ errorMessage: error.message, attempts, backoff, maxRetries }
|
|
1132
|
+
);
|
|
1133
|
+
await new Promise((resolve) => setTimeout(resolve, backoff));
|
|
1134
|
+
backoff *= 2;
|
|
1135
|
+
} else {
|
|
1136
|
+
logger.error(`LibSQLStore: Non-lock error during ${operationDescription}, not retrying`, { error });
|
|
1137
|
+
throw error;
|
|
1138
|
+
}
|
|
1139
|
+
}
|
|
937
1140
|
}
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
return result;
|
|
948
|
-
}
|
|
949
|
-
async _getIncludedMessages({
|
|
950
|
-
threadId,
|
|
951
|
-
include
|
|
952
|
-
}) {
|
|
953
|
-
if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
|
|
954
|
-
if (!include) return null;
|
|
955
|
-
const unionQueries = [];
|
|
956
|
-
const params = [];
|
|
957
|
-
for (const inc of include) {
|
|
958
|
-
const { id, withPreviousMessages = 0, withNextMessages = 0 } = inc;
|
|
959
|
-
const searchId = inc.threadId || threadId;
|
|
960
|
-
unionQueries.push(
|
|
961
|
-
`
|
|
962
|
-
SELECT * FROM (
|
|
963
|
-
WITH numbered_messages AS (
|
|
964
|
-
SELECT
|
|
965
|
-
id, content, role, type, "createdAt", thread_id, "resourceId",
|
|
966
|
-
ROW_NUMBER() OVER (ORDER BY "createdAt" ASC) as row_num
|
|
967
|
-
FROM "${storage.TABLE_MESSAGES}"
|
|
968
|
-
WHERE thread_id = ?
|
|
969
|
-
),
|
|
970
|
-
target_positions AS (
|
|
971
|
-
SELECT row_num as target_pos
|
|
972
|
-
FROM numbered_messages
|
|
973
|
-
WHERE id = ?
|
|
974
|
-
)
|
|
975
|
-
SELECT DISTINCT m.*
|
|
976
|
-
FROM numbered_messages m
|
|
977
|
-
CROSS JOIN target_positions t
|
|
978
|
-
WHERE m.row_num BETWEEN (t.target_pos - ?) AND (t.target_pos + ?)
|
|
979
|
-
)
|
|
980
|
-
`
|
|
981
|
-
// Keep ASC for final sorting after fetching context
|
|
982
|
-
);
|
|
983
|
-
params.push(searchId, id, withPreviousMessages, withNextMessages);
|
|
1141
|
+
throw new Error(`LibSQLStore: Unexpected exit from retry loop for ${operationDescription}`);
|
|
1142
|
+
};
|
|
1143
|
+
}
|
|
1144
|
+
function prepareStatement({ tableName, record }) {
|
|
1145
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1146
|
+
const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
|
|
1147
|
+
const values = Object.values(record).map((v) => {
|
|
1148
|
+
if (typeof v === `undefined` || v === null) {
|
|
1149
|
+
return null;
|
|
984
1150
|
}
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
const includedRows = includedResult.rows?.map((row) => this.parseRow(row));
|
|
988
|
-
const seen = /* @__PURE__ */ new Set();
|
|
989
|
-
const dedupedRows = includedRows.filter((row) => {
|
|
990
|
-
if (seen.has(row.id)) return false;
|
|
991
|
-
seen.add(row.id);
|
|
992
|
-
return true;
|
|
993
|
-
});
|
|
994
|
-
return dedupedRows;
|
|
995
|
-
}
|
|
996
|
-
async listMessagesById({ messageIds }) {
|
|
997
|
-
if (messageIds.length === 0) return { messages: [] };
|
|
998
|
-
try {
|
|
999
|
-
const sql = `
|
|
1000
|
-
SELECT
|
|
1001
|
-
id,
|
|
1002
|
-
content,
|
|
1003
|
-
role,
|
|
1004
|
-
type,
|
|
1005
|
-
"createdAt",
|
|
1006
|
-
thread_id,
|
|
1007
|
-
"resourceId"
|
|
1008
|
-
FROM "${storage.TABLE_MESSAGES}"
|
|
1009
|
-
WHERE id IN (${messageIds.map(() => "?").join(", ")})
|
|
1010
|
-
ORDER BY "createdAt" DESC
|
|
1011
|
-
`;
|
|
1012
|
-
const result = await this.client.execute({ sql, args: messageIds });
|
|
1013
|
-
if (!result.rows) return { messages: [] };
|
|
1014
|
-
const list = new agent.MessageList().add(result.rows.map(this.parseRow), "memory");
|
|
1015
|
-
return { messages: list.get.all.db() };
|
|
1016
|
-
} catch (error$1) {
|
|
1017
|
-
throw new error.MastraError(
|
|
1018
|
-
{
|
|
1019
|
-
id: "LIBSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
|
|
1020
|
-
domain: error.ErrorDomain.STORAGE,
|
|
1021
|
-
category: error.ErrorCategory.THIRD_PARTY,
|
|
1022
|
-
details: { messageIds: JSON.stringify(messageIds) }
|
|
1023
|
-
},
|
|
1024
|
-
error$1
|
|
1025
|
-
);
|
|
1151
|
+
if (v instanceof Date) {
|
|
1152
|
+
return v.toISOString();
|
|
1026
1153
|
}
|
|
1154
|
+
return typeof v === "object" ? JSON.stringify(v) : v;
|
|
1155
|
+
});
|
|
1156
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
1157
|
+
return {
|
|
1158
|
+
sql: `INSERT OR REPLACE INTO ${parsedTableName} (${columns.join(", ")}) VALUES (${placeholders})`,
|
|
1159
|
+
args: values
|
|
1160
|
+
};
|
|
1161
|
+
}
|
|
1162
|
+
function prepareUpdateStatement({
|
|
1163
|
+
tableName,
|
|
1164
|
+
updates,
|
|
1165
|
+
keys
|
|
1166
|
+
}) {
|
|
1167
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1168
|
+
const schema = storage.TABLE_SCHEMAS[tableName];
|
|
1169
|
+
const updateColumns = Object.keys(updates).map((col) => utils.parseSqlIdentifier(col, "column name"));
|
|
1170
|
+
const updateValues = Object.values(updates).map(transformToSqlValue);
|
|
1171
|
+
const setClause = updateColumns.map((col) => `${col} = ?`).join(", ");
|
|
1172
|
+
const whereClause = prepareWhereClause(keys, schema);
|
|
1173
|
+
return {
|
|
1174
|
+
sql: `UPDATE ${parsedTableName} SET ${setClause}${whereClause.sql}`,
|
|
1175
|
+
args: [...updateValues, ...whereClause.args]
|
|
1176
|
+
};
|
|
1177
|
+
}
|
|
1178
|
+
function transformToSqlValue(value) {
|
|
1179
|
+
if (typeof value === "undefined" || value === null) {
|
|
1180
|
+
return null;
|
|
1027
1181
|
}
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
},
|
|
1049
|
-
new Error("page must be >= 0")
|
|
1050
|
-
);
|
|
1182
|
+
if (value instanceof Date) {
|
|
1183
|
+
return value.toISOString();
|
|
1184
|
+
}
|
|
1185
|
+
return typeof value === "object" ? JSON.stringify(value) : value;
|
|
1186
|
+
}
|
|
1187
|
+
function prepareDeleteStatement({ tableName, keys }) {
|
|
1188
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1189
|
+
const whereClause = prepareWhereClause(keys, storage.TABLE_SCHEMAS[tableName]);
|
|
1190
|
+
return {
|
|
1191
|
+
sql: `DELETE FROM ${parsedTableName}${whereClause.sql}`,
|
|
1192
|
+
args: whereClause.args
|
|
1193
|
+
};
|
|
1194
|
+
}
|
|
1195
|
+
function prepareWhereClause(filters, schema) {
|
|
1196
|
+
const conditions = [];
|
|
1197
|
+
const args = [];
|
|
1198
|
+
for (const [columnName, filterValue] of Object.entries(filters)) {
|
|
1199
|
+
const column = schema[columnName];
|
|
1200
|
+
if (!column) {
|
|
1201
|
+
throw new Error(`Unknown column: ${columnName}`);
|
|
1051
1202
|
}
|
|
1052
|
-
const
|
|
1053
|
-
const
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
}
|
|
1130
|
-
|
|
1203
|
+
const parsedColumn = utils.parseSqlIdentifier(columnName, "column name");
|
|
1204
|
+
const result = buildCondition2(parsedColumn, filterValue);
|
|
1205
|
+
conditions.push(result.condition);
|
|
1206
|
+
args.push(...result.args);
|
|
1207
|
+
}
|
|
1208
|
+
return {
|
|
1209
|
+
sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
|
|
1210
|
+
args
|
|
1211
|
+
};
|
|
1212
|
+
}
|
|
1213
|
+
function buildCondition2(columnName, filterValue) {
|
|
1214
|
+
if (filterValue === null) {
|
|
1215
|
+
return { condition: `${columnName} IS NULL`, args: [] };
|
|
1216
|
+
}
|
|
1217
|
+
if (typeof filterValue === "object" && filterValue !== null && ("startAt" in filterValue || "endAt" in filterValue)) {
|
|
1218
|
+
return buildDateRangeCondition(columnName, filterValue);
|
|
1219
|
+
}
|
|
1220
|
+
return {
|
|
1221
|
+
condition: `${columnName} = ?`,
|
|
1222
|
+
args: [transformToSqlValue(filterValue)]
|
|
1223
|
+
};
|
|
1224
|
+
}
|
|
1225
|
+
function buildDateRangeCondition(columnName, range) {
|
|
1226
|
+
const conditions = [];
|
|
1227
|
+
const args = [];
|
|
1228
|
+
if (range.startAt !== void 0) {
|
|
1229
|
+
conditions.push(`${columnName} >= ?`);
|
|
1230
|
+
args.push(transformToSqlValue(range.startAt));
|
|
1231
|
+
}
|
|
1232
|
+
if (range.endAt !== void 0) {
|
|
1233
|
+
conditions.push(`${columnName} <= ?`);
|
|
1234
|
+
args.push(transformToSqlValue(range.endAt));
|
|
1235
|
+
}
|
|
1236
|
+
if (conditions.length === 0) {
|
|
1237
|
+
throw new Error("Date range must specify at least startAt or endAt");
|
|
1238
|
+
}
|
|
1239
|
+
return {
|
|
1240
|
+
condition: conditions.join(" AND "),
|
|
1241
|
+
args
|
|
1242
|
+
};
|
|
1243
|
+
}
|
|
1244
|
+
function transformFromSqlRow({
|
|
1245
|
+
tableName,
|
|
1246
|
+
sqlRow
|
|
1247
|
+
}) {
|
|
1248
|
+
const result = {};
|
|
1249
|
+
const jsonColumns = new Set(
|
|
1250
|
+
Object.keys(storage.TABLE_SCHEMAS[tableName]).filter((key) => storage.TABLE_SCHEMAS[tableName][key].type === "jsonb").map((key) => key)
|
|
1251
|
+
);
|
|
1252
|
+
const dateColumns = new Set(
|
|
1253
|
+
Object.keys(storage.TABLE_SCHEMAS[tableName]).filter((key) => storage.TABLE_SCHEMAS[tableName][key].type === "timestamp").map((key) => key)
|
|
1254
|
+
);
|
|
1255
|
+
for (const [key, value] of Object.entries(sqlRow)) {
|
|
1256
|
+
if (value === null || value === void 0) {
|
|
1257
|
+
result[key] = value;
|
|
1258
|
+
continue;
|
|
1259
|
+
}
|
|
1260
|
+
if (dateColumns.has(key) && typeof value === "string") {
|
|
1261
|
+
result[key] = new Date(value);
|
|
1262
|
+
continue;
|
|
1263
|
+
}
|
|
1264
|
+
if (jsonColumns.has(key) && typeof value === "string") {
|
|
1265
|
+
result[key] = storage.safelyParseJSON(value);
|
|
1266
|
+
continue;
|
|
1267
|
+
}
|
|
1268
|
+
result[key] = value;
|
|
1269
|
+
}
|
|
1270
|
+
return result;
|
|
1271
|
+
}
|
|
1272
|
+
|
|
1273
|
+
// src/storage/db/index.ts
|
|
1274
|
+
function resolveClient(config) {
|
|
1275
|
+
if ("client" in config) {
|
|
1276
|
+
return config.client;
|
|
1277
|
+
}
|
|
1278
|
+
return client.createClient({
|
|
1279
|
+
url: config.url,
|
|
1280
|
+
...config.authToken ? { authToken: config.authToken } : {}
|
|
1281
|
+
});
|
|
1282
|
+
}
|
|
1283
|
+
var LibSQLDB = class extends base.MastraBase {
|
|
1284
|
+
client;
|
|
1285
|
+
maxRetries;
|
|
1286
|
+
initialBackoffMs;
|
|
1287
|
+
executeWriteOperationWithRetry;
|
|
1288
|
+
constructor({
|
|
1289
|
+
client,
|
|
1290
|
+
maxRetries,
|
|
1291
|
+
initialBackoffMs
|
|
1292
|
+
}) {
|
|
1293
|
+
super({
|
|
1294
|
+
component: "STORAGE",
|
|
1295
|
+
name: "LIBSQL_DB_LAYER"
|
|
1296
|
+
});
|
|
1297
|
+
this.client = client;
|
|
1298
|
+
this.maxRetries = maxRetries ?? 5;
|
|
1299
|
+
this.initialBackoffMs = initialBackoffMs ?? 100;
|
|
1300
|
+
this.executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
1301
|
+
logger: this.logger,
|
|
1302
|
+
maxRetries: this.maxRetries,
|
|
1303
|
+
initialBackoffMs: this.initialBackoffMs
|
|
1304
|
+
});
|
|
1305
|
+
}
|
|
1306
|
+
/**
|
|
1307
|
+
* Checks if a column exists in the specified table.
|
|
1308
|
+
*
|
|
1309
|
+
* @param table - The name of the table to check
|
|
1310
|
+
* @param column - The name of the column to look for
|
|
1311
|
+
* @returns `true` if the column exists in the table, `false` otherwise
|
|
1312
|
+
*/
|
|
1313
|
+
async hasColumn(table, column) {
|
|
1314
|
+
const sanitizedTable = utils.parseSqlIdentifier(table, "table name");
|
|
1315
|
+
const result = await this.client.execute({
|
|
1316
|
+
sql: `PRAGMA table_info("${sanitizedTable}")`
|
|
1317
|
+
});
|
|
1318
|
+
return result.rows?.some((row) => row.name === column);
|
|
1319
|
+
}
|
|
1320
|
+
/**
|
|
1321
|
+
* Internal insert implementation without retry logic.
|
|
1322
|
+
*/
|
|
1323
|
+
async doInsert({
|
|
1324
|
+
tableName,
|
|
1325
|
+
record
|
|
1326
|
+
}) {
|
|
1327
|
+
await this.client.execute(
|
|
1328
|
+
prepareStatement({
|
|
1329
|
+
tableName,
|
|
1330
|
+
record
|
|
1331
|
+
})
|
|
1332
|
+
);
|
|
1333
|
+
}
|
|
1334
|
+
/**
|
|
1335
|
+
* Inserts or replaces a record in the specified table with automatic retry on lock errors.
|
|
1336
|
+
*
|
|
1337
|
+
* @param args - The insert arguments
|
|
1338
|
+
* @param args.tableName - The name of the table to insert into
|
|
1339
|
+
* @param args.record - The record to insert (key-value pairs)
|
|
1340
|
+
*/
|
|
1341
|
+
insert(args) {
|
|
1342
|
+
return this.executeWriteOperationWithRetry(() => this.doInsert(args), `insert into table ${args.tableName}`);
|
|
1343
|
+
}
|
|
1344
|
+
/**
|
|
1345
|
+
* Internal update implementation without retry logic.
|
|
1346
|
+
*/
|
|
1347
|
+
async doUpdate({
|
|
1348
|
+
tableName,
|
|
1349
|
+
keys,
|
|
1350
|
+
data
|
|
1351
|
+
}) {
|
|
1352
|
+
await this.client.execute(prepareUpdateStatement({ tableName, updates: data, keys }));
|
|
1353
|
+
}
|
|
1354
|
+
/**
|
|
1355
|
+
* Updates a record in the specified table with automatic retry on lock errors.
|
|
1356
|
+
*
|
|
1357
|
+
* @param args - The update arguments
|
|
1358
|
+
* @param args.tableName - The name of the table to update
|
|
1359
|
+
* @param args.keys - The key(s) identifying the record to update
|
|
1360
|
+
* @param args.data - The fields to update (key-value pairs)
|
|
1361
|
+
*/
|
|
1362
|
+
update(args) {
|
|
1363
|
+
return this.executeWriteOperationWithRetry(() => this.doUpdate(args), `update table ${args.tableName}`);
|
|
1364
|
+
}
|
|
1365
|
+
/**
|
|
1366
|
+
* Internal batch insert implementation without retry logic.
|
|
1367
|
+
*/
|
|
1368
|
+
async doBatchInsert({
|
|
1369
|
+
tableName,
|
|
1370
|
+
records
|
|
1371
|
+
}) {
|
|
1372
|
+
if (records.length === 0) return;
|
|
1373
|
+
const batchStatements = records.map((r) => prepareStatement({ tableName, record: r }));
|
|
1374
|
+
await this.client.batch(batchStatements, "write");
|
|
1375
|
+
}
|
|
1376
|
+
/**
|
|
1377
|
+
* Inserts multiple records in a single batch transaction with automatic retry on lock errors.
|
|
1378
|
+
*
|
|
1379
|
+
* @param args - The batch insert arguments
|
|
1380
|
+
* @param args.tableName - The name of the table to insert into
|
|
1381
|
+
* @param args.records - Array of records to insert
|
|
1382
|
+
* @throws {MastraError} When the batch insert fails after retries
|
|
1383
|
+
*/
|
|
1384
|
+
async batchInsert(args) {
|
|
1385
|
+
return this.executeWriteOperationWithRetry(
|
|
1386
|
+
() => this.doBatchInsert(args),
|
|
1387
|
+
`batch insert into table ${args.tableName}`
|
|
1388
|
+
).catch((error$1) => {
|
|
1389
|
+
throw new error.MastraError(
|
|
1131
1390
|
{
|
|
1132
|
-
id: "
|
|
1391
|
+
id: storage.createStorageErrorId("LIBSQL", "BATCH_INSERT", "FAILED"),
|
|
1133
1392
|
domain: error.ErrorDomain.STORAGE,
|
|
1134
1393
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1135
1394
|
details: {
|
|
1136
|
-
|
|
1137
|
-
resourceId: resourceId ?? ""
|
|
1395
|
+
tableName: args.tableName
|
|
1138
1396
|
}
|
|
1139
1397
|
},
|
|
1140
1398
|
error$1
|
|
1141
1399
|
);
|
|
1142
|
-
|
|
1143
|
-
this.logger?.trackException?.(mastraError);
|
|
1144
|
-
return {
|
|
1145
|
-
messages: [],
|
|
1146
|
-
total: 0,
|
|
1147
|
-
page,
|
|
1148
|
-
perPage: perPageForResponse,
|
|
1149
|
-
hasMore: false
|
|
1150
|
-
};
|
|
1151
|
-
}
|
|
1400
|
+
});
|
|
1152
1401
|
}
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
typeof message.content === "object" ? JSON.stringify(message.content) : message.content,
|
|
1186
|
-
message.role,
|
|
1187
|
-
message.type || "v2",
|
|
1188
|
-
time instanceof Date ? time.toISOString() : time,
|
|
1189
|
-
message.resourceId
|
|
1190
|
-
]
|
|
1191
|
-
};
|
|
1192
|
-
});
|
|
1193
|
-
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
1194
|
-
batchStatements.push({
|
|
1195
|
-
sql: `UPDATE "${storage.TABLE_THREADS}" SET "updatedAt" = ? WHERE id = ?`,
|
|
1196
|
-
args: [now, threadId]
|
|
1197
|
-
});
|
|
1198
|
-
const BATCH_SIZE = 50;
|
|
1199
|
-
const messageStatements = batchStatements.slice(0, -1);
|
|
1200
|
-
const threadUpdateStatement = batchStatements[batchStatements.length - 1];
|
|
1201
|
-
for (let i = 0; i < messageStatements.length; i += BATCH_SIZE) {
|
|
1202
|
-
const batch = messageStatements.slice(i, i + BATCH_SIZE);
|
|
1203
|
-
if (batch.length > 0) {
|
|
1204
|
-
await this.client.batch(batch, "write");
|
|
1205
|
-
}
|
|
1206
|
-
}
|
|
1207
|
-
if (threadUpdateStatement) {
|
|
1208
|
-
await this.client.execute(threadUpdateStatement);
|
|
1209
|
-
}
|
|
1210
|
-
const list = new agent.MessageList().add(messages, "memory");
|
|
1211
|
-
return { messages: list.get.all.db() };
|
|
1212
|
-
} catch (error$1) {
|
|
1402
|
+
/**
|
|
1403
|
+
* Internal batch update implementation without retry logic.
|
|
1404
|
+
* Each record can be updated based on single or composite keys.
|
|
1405
|
+
*/
|
|
1406
|
+
async doBatchUpdate({
|
|
1407
|
+
tableName,
|
|
1408
|
+
updates
|
|
1409
|
+
}) {
|
|
1410
|
+
if (updates.length === 0) return;
|
|
1411
|
+
const batchStatements = updates.map(
|
|
1412
|
+
({ keys, data }) => prepareUpdateStatement({
|
|
1413
|
+
tableName,
|
|
1414
|
+
updates: data,
|
|
1415
|
+
keys
|
|
1416
|
+
})
|
|
1417
|
+
);
|
|
1418
|
+
await this.client.batch(batchStatements, "write");
|
|
1419
|
+
}
|
|
1420
|
+
/**
|
|
1421
|
+
* Updates multiple records in a single batch transaction with automatic retry on lock errors.
|
|
1422
|
+
* Each record can be updated based on single or composite keys.
|
|
1423
|
+
*
|
|
1424
|
+
* @param args - The batch update arguments
|
|
1425
|
+
* @param args.tableName - The name of the table to update
|
|
1426
|
+
* @param args.updates - Array of update operations, each containing keys and data
|
|
1427
|
+
* @throws {MastraError} When the batch update fails after retries
|
|
1428
|
+
*/
|
|
1429
|
+
async batchUpdate(args) {
|
|
1430
|
+
return this.executeWriteOperationWithRetry(
|
|
1431
|
+
() => this.doBatchUpdate(args),
|
|
1432
|
+
`batch update in table ${args.tableName}`
|
|
1433
|
+
).catch((error$1) => {
|
|
1213
1434
|
throw new error.MastraError(
|
|
1214
1435
|
{
|
|
1215
|
-
id: "
|
|
1436
|
+
id: storage.createStorageErrorId("LIBSQL", "BATCH_UPDATE", "FAILED"),
|
|
1216
1437
|
domain: error.ErrorDomain.STORAGE,
|
|
1217
|
-
category: error.ErrorCategory.THIRD_PARTY
|
|
1438
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
1439
|
+
details: {
|
|
1440
|
+
tableName: args.tableName
|
|
1441
|
+
}
|
|
1218
1442
|
},
|
|
1219
1443
|
error$1
|
|
1220
1444
|
);
|
|
1221
|
-
}
|
|
1445
|
+
});
|
|
1222
1446
|
}
|
|
1223
|
-
|
|
1224
|
-
|
|
1447
|
+
/**
|
|
1448
|
+
* Internal batch delete implementation without retry logic.
|
|
1449
|
+
* Each record can be deleted based on single or composite keys.
|
|
1450
|
+
*/
|
|
1451
|
+
async doBatchDelete({
|
|
1452
|
+
tableName,
|
|
1453
|
+
keys
|
|
1225
1454
|
}) {
|
|
1226
|
-
if (
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
const existingMessages = existingResult.rows.map((row) => this.parseRow(row));
|
|
1234
|
-
if (existingMessages.length === 0) {
|
|
1235
|
-
return [];
|
|
1236
|
-
}
|
|
1237
|
-
const batchStatements = [];
|
|
1238
|
-
const threadIdsToUpdate = /* @__PURE__ */ new Set();
|
|
1239
|
-
const columnMapping = {
|
|
1240
|
-
threadId: "thread_id"
|
|
1241
|
-
};
|
|
1242
|
-
for (const existingMessage of existingMessages) {
|
|
1243
|
-
const updatePayload = messages.find((m) => m.id === existingMessage.id);
|
|
1244
|
-
if (!updatePayload) continue;
|
|
1245
|
-
const { id, ...fieldsToUpdate } = updatePayload;
|
|
1246
|
-
if (Object.keys(fieldsToUpdate).length === 0) continue;
|
|
1247
|
-
threadIdsToUpdate.add(existingMessage.threadId);
|
|
1248
|
-
if (updatePayload.threadId && updatePayload.threadId !== existingMessage.threadId) {
|
|
1249
|
-
threadIdsToUpdate.add(updatePayload.threadId);
|
|
1250
|
-
}
|
|
1251
|
-
const setClauses = [];
|
|
1252
|
-
const args = [];
|
|
1253
|
-
const updatableFields = { ...fieldsToUpdate };
|
|
1254
|
-
if (updatableFields.content) {
|
|
1255
|
-
const newContent = {
|
|
1256
|
-
...existingMessage.content,
|
|
1257
|
-
...updatableFields.content,
|
|
1258
|
-
// Deep merge metadata if it exists on both
|
|
1259
|
-
...existingMessage.content?.metadata && updatableFields.content.metadata ? {
|
|
1260
|
-
metadata: {
|
|
1261
|
-
...existingMessage.content.metadata,
|
|
1262
|
-
...updatableFields.content.metadata
|
|
1263
|
-
}
|
|
1264
|
-
} : {}
|
|
1265
|
-
};
|
|
1266
|
-
setClauses.push(`${utils.parseSqlIdentifier("content", "column name")} = ?`);
|
|
1267
|
-
args.push(JSON.stringify(newContent));
|
|
1268
|
-
delete updatableFields.content;
|
|
1269
|
-
}
|
|
1270
|
-
for (const key in updatableFields) {
|
|
1271
|
-
if (Object.prototype.hasOwnProperty.call(updatableFields, key)) {
|
|
1272
|
-
const dbKey = columnMapping[key] || key;
|
|
1273
|
-
setClauses.push(`${utils.parseSqlIdentifier(dbKey, "column name")} = ?`);
|
|
1274
|
-
let value = updatableFields[key];
|
|
1275
|
-
if (typeof value === "object" && value !== null) {
|
|
1276
|
-
value = JSON.stringify(value);
|
|
1277
|
-
}
|
|
1278
|
-
args.push(value);
|
|
1279
|
-
}
|
|
1280
|
-
}
|
|
1281
|
-
if (setClauses.length === 0) continue;
|
|
1282
|
-
args.push(id);
|
|
1283
|
-
const sql = `UPDATE ${storage.TABLE_MESSAGES} SET ${setClauses.join(", ")} WHERE id = ?`;
|
|
1284
|
-
batchStatements.push({ sql, args });
|
|
1285
|
-
}
|
|
1286
|
-
if (batchStatements.length === 0) {
|
|
1287
|
-
return existingMessages;
|
|
1288
|
-
}
|
|
1289
|
-
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
1290
|
-
for (const threadId of threadIdsToUpdate) {
|
|
1291
|
-
if (threadId) {
|
|
1292
|
-
batchStatements.push({
|
|
1293
|
-
sql: `UPDATE ${storage.TABLE_THREADS} SET updatedAt = ? WHERE id = ?`,
|
|
1294
|
-
args: [now, threadId]
|
|
1295
|
-
});
|
|
1296
|
-
}
|
|
1297
|
-
}
|
|
1455
|
+
if (keys.length === 0) return;
|
|
1456
|
+
const batchStatements = keys.map(
|
|
1457
|
+
(keyObj) => prepareDeleteStatement({
|
|
1458
|
+
tableName,
|
|
1459
|
+
keys: keyObj
|
|
1460
|
+
})
|
|
1461
|
+
);
|
|
1298
1462
|
await this.client.batch(batchStatements, "write");
|
|
1299
|
-
const updatedResult = await this.client.execute({ sql: selectSql, args: messageIds });
|
|
1300
|
-
return updatedResult.rows.map((row) => this.parseRow(row));
|
|
1301
1463
|
}
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
if (row.thread_id) threadIds.add(row.thread_id);
|
|
1320
|
-
});
|
|
1321
|
-
await tx.execute({
|
|
1322
|
-
sql: `DELETE FROM "${storage.TABLE_MESSAGES}" WHERE id IN (${placeholders})`,
|
|
1323
|
-
args: batch
|
|
1324
|
-
});
|
|
1325
|
-
}
|
|
1326
|
-
if (threadIds.size > 0) {
|
|
1327
|
-
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
1328
|
-
for (const threadId of threadIds) {
|
|
1329
|
-
await tx.execute({
|
|
1330
|
-
sql: `UPDATE "${storage.TABLE_THREADS}" SET "updatedAt" = ? WHERE id = ?`,
|
|
1331
|
-
args: [now, threadId]
|
|
1332
|
-
});
|
|
1333
|
-
}
|
|
1334
|
-
}
|
|
1335
|
-
await tx.commit();
|
|
1336
|
-
} catch (error) {
|
|
1337
|
-
await tx.rollback();
|
|
1338
|
-
throw error;
|
|
1339
|
-
}
|
|
1340
|
-
} catch (error$1) {
|
|
1464
|
+
/**
|
|
1465
|
+
* Deletes multiple records in a single batch transaction with automatic retry on lock errors.
|
|
1466
|
+
* Each record can be deleted based on single or composite keys.
|
|
1467
|
+
*
|
|
1468
|
+
* @param args - The batch delete arguments
|
|
1469
|
+
* @param args.tableName - The name of the table to delete from
|
|
1470
|
+
* @param args.keys - Array of key objects identifying records to delete
|
|
1471
|
+
* @throws {MastraError} When the batch delete fails after retries
|
|
1472
|
+
*/
|
|
1473
|
+
async batchDelete({
|
|
1474
|
+
tableName,
|
|
1475
|
+
keys
|
|
1476
|
+
}) {
|
|
1477
|
+
return this.executeWriteOperationWithRetry(
|
|
1478
|
+
() => this.doBatchDelete({ tableName, keys }),
|
|
1479
|
+
`batch delete from table ${tableName}`
|
|
1480
|
+
).catch((error$1) => {
|
|
1341
1481
|
throw new error.MastraError(
|
|
1342
1482
|
{
|
|
1343
|
-
id: "
|
|
1483
|
+
id: storage.createStorageErrorId("LIBSQL", "BATCH_DELETE", "FAILED"),
|
|
1344
1484
|
domain: error.ErrorDomain.STORAGE,
|
|
1345
1485
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1346
|
-
details: {
|
|
1486
|
+
details: {
|
|
1487
|
+
tableName
|
|
1488
|
+
}
|
|
1347
1489
|
},
|
|
1348
1490
|
error$1
|
|
1349
1491
|
);
|
|
1350
|
-
}
|
|
1351
|
-
}
|
|
1352
|
-
async getResourceById({ resourceId }) {
|
|
1353
|
-
const result = await this.operations.load({
|
|
1354
|
-
tableName: storage.TABLE_RESOURCES,
|
|
1355
|
-
keys: { id: resourceId }
|
|
1356
1492
|
});
|
|
1357
|
-
if (!result) {
|
|
1358
|
-
return null;
|
|
1359
|
-
}
|
|
1360
|
-
return {
|
|
1361
|
-
...result,
|
|
1362
|
-
// Ensure workingMemory is always returned as a string, even if auto-parsed as JSON
|
|
1363
|
-
workingMemory: result.workingMemory && typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
|
|
1364
|
-
metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata,
|
|
1365
|
-
createdAt: new Date(result.createdAt),
|
|
1366
|
-
updatedAt: new Date(result.updatedAt)
|
|
1367
|
-
};
|
|
1368
1493
|
}
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1494
|
+
/**
|
|
1495
|
+
* Internal single-record delete implementation without retry logic.
|
|
1496
|
+
*/
|
|
1497
|
+
async doDelete({ tableName, keys }) {
|
|
1498
|
+
await this.client.execute(prepareDeleteStatement({ tableName, keys }));
|
|
1499
|
+
}
|
|
1500
|
+
/**
|
|
1501
|
+
* Deletes a single record from the specified table with automatic retry on lock errors.
|
|
1502
|
+
*
|
|
1503
|
+
* @param args - The delete arguments
|
|
1504
|
+
* @param args.tableName - The name of the table to delete from
|
|
1505
|
+
* @param args.keys - The key(s) identifying the record to delete
|
|
1506
|
+
* @throws {MastraError} When the delete fails after retries
|
|
1507
|
+
*/
|
|
1508
|
+
async delete(args) {
|
|
1509
|
+
return this.executeWriteOperationWithRetry(() => this.doDelete(args), `delete from table ${args.tableName}`).catch(
|
|
1510
|
+
(error$1) => {
|
|
1511
|
+
throw new error.MastraError(
|
|
1512
|
+
{
|
|
1513
|
+
id: storage.createStorageErrorId("LIBSQL", "DELETE", "FAILED"),
|
|
1514
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1515
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
1516
|
+
details: {
|
|
1517
|
+
tableName: args.tableName
|
|
1518
|
+
}
|
|
1519
|
+
},
|
|
1520
|
+
error$1
|
|
1521
|
+
);
|
|
1375
1522
|
}
|
|
1523
|
+
);
|
|
1524
|
+
}
|
|
1525
|
+
/**
|
|
1526
|
+
* Selects a single record from the specified table by key(s).
|
|
1527
|
+
* Returns the most recently created record if multiple matches exist.
|
|
1528
|
+
* Automatically parses JSON string values back to objects/arrays.
|
|
1529
|
+
*
|
|
1530
|
+
* @typeParam R - The expected return type of the record
|
|
1531
|
+
* @param args - The select arguments
|
|
1532
|
+
* @param args.tableName - The name of the table to select from
|
|
1533
|
+
* @param args.keys - The key(s) identifying the record to select
|
|
1534
|
+
* @returns The matching record or `null` if not found
|
|
1535
|
+
*/
|
|
1536
|
+
async select({ tableName, keys }) {
|
|
1537
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1538
|
+
const parsedKeys = Object.keys(keys).map((key) => utils.parseSqlIdentifier(key, "column name"));
|
|
1539
|
+
const conditions = parsedKeys.map((key) => `${key} = ?`).join(" AND ");
|
|
1540
|
+
const values = Object.values(keys);
|
|
1541
|
+
const result = await this.client.execute({
|
|
1542
|
+
sql: `SELECT * FROM ${parsedTableName} WHERE ${conditions} ORDER BY createdAt DESC LIMIT 1`,
|
|
1543
|
+
args: values
|
|
1376
1544
|
});
|
|
1377
|
-
|
|
1545
|
+
if (!result.rows || result.rows.length === 0) {
|
|
1546
|
+
return null;
|
|
1547
|
+
}
|
|
1548
|
+
const row = result.rows[0];
|
|
1549
|
+
const parsed = Object.fromEntries(
|
|
1550
|
+
Object.entries(row || {}).map(([k, v]) => {
|
|
1551
|
+
try {
|
|
1552
|
+
return [k, typeof v === "string" ? v.startsWith("{") || v.startsWith("[") ? JSON.parse(v) : v : v];
|
|
1553
|
+
} catch {
|
|
1554
|
+
return [k, v];
|
|
1555
|
+
}
|
|
1556
|
+
})
|
|
1557
|
+
);
|
|
1558
|
+
return parsed;
|
|
1378
1559
|
}
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1560
|
+
/**
|
|
1561
|
+
* Selects multiple records from the specified table with optional filtering, ordering, and pagination.
|
|
1562
|
+
*
|
|
1563
|
+
* @typeParam R - The expected return type of each record
|
|
1564
|
+
* @param args - The select arguments
|
|
1565
|
+
* @param args.tableName - The name of the table to select from
|
|
1566
|
+
* @param args.whereClause - Optional WHERE clause with SQL string and arguments
|
|
1567
|
+
* @param args.orderBy - Optional ORDER BY clause (e.g., "createdAt DESC")
|
|
1568
|
+
* @param args.offset - Optional offset for pagination
|
|
1569
|
+
* @param args.limit - Optional limit for pagination
|
|
1570
|
+
* @param args.args - Optional additional query arguments
|
|
1571
|
+
* @returns Array of matching records
|
|
1572
|
+
*/
|
|
1573
|
+
async selectMany({
|
|
1574
|
+
tableName,
|
|
1575
|
+
whereClause,
|
|
1576
|
+
orderBy,
|
|
1577
|
+
offset,
|
|
1578
|
+
limit,
|
|
1579
|
+
args
|
|
1383
1580
|
}) {
|
|
1384
|
-
const
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
workingMemory,
|
|
1389
|
-
metadata: metadata || {},
|
|
1390
|
-
createdAt: /* @__PURE__ */ new Date(),
|
|
1391
|
-
updatedAt: /* @__PURE__ */ new Date()
|
|
1392
|
-
};
|
|
1393
|
-
return this.saveResource({ resource: newResource });
|
|
1581
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1582
|
+
let statement = `SELECT * FROM ${parsedTableName}`;
|
|
1583
|
+
if (whereClause?.sql) {
|
|
1584
|
+
statement += ` ${whereClause.sql}`;
|
|
1394
1585
|
}
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
workingMemory: workingMemory !== void 0 ? workingMemory : existingResource.workingMemory,
|
|
1398
|
-
metadata: {
|
|
1399
|
-
...existingResource.metadata,
|
|
1400
|
-
...metadata
|
|
1401
|
-
},
|
|
1402
|
-
updatedAt: /* @__PURE__ */ new Date()
|
|
1403
|
-
};
|
|
1404
|
-
const updates = [];
|
|
1405
|
-
const values = [];
|
|
1406
|
-
if (workingMemory !== void 0) {
|
|
1407
|
-
updates.push("workingMemory = ?");
|
|
1408
|
-
values.push(workingMemory);
|
|
1586
|
+
if (orderBy) {
|
|
1587
|
+
statement += ` ORDER BY ${orderBy}`;
|
|
1409
1588
|
}
|
|
1410
|
-
if (
|
|
1411
|
-
|
|
1412
|
-
values.push(JSON.stringify(updatedResource.metadata));
|
|
1589
|
+
if (limit) {
|
|
1590
|
+
statement += ` LIMIT ${limit}`;
|
|
1413
1591
|
}
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
await this.client.execute({
|
|
1418
|
-
sql:
|
|
1419
|
-
args:
|
|
1592
|
+
if (offset) {
|
|
1593
|
+
statement += ` OFFSET ${offset}`;
|
|
1594
|
+
}
|
|
1595
|
+
const result = await this.client.execute({
|
|
1596
|
+
sql: statement,
|
|
1597
|
+
args: [...whereClause?.args ?? [], ...args ?? []]
|
|
1420
1598
|
});
|
|
1421
|
-
return
|
|
1599
|
+
return result.rows;
|
|
1422
1600
|
}
|
|
1423
|
-
|
|
1601
|
+
/**
|
|
1602
|
+
* Returns the total count of records matching the optional WHERE clause.
|
|
1603
|
+
*
|
|
1604
|
+
* @param args - The count arguments
|
|
1605
|
+
* @param args.tableName - The name of the table to count from
|
|
1606
|
+
* @param args.whereClause - Optional WHERE clause with SQL string and arguments
|
|
1607
|
+
* @returns The total count of matching records
|
|
1608
|
+
*/
|
|
1609
|
+
async selectTotalCount({
|
|
1610
|
+
tableName,
|
|
1611
|
+
whereClause
|
|
1612
|
+
}) {
|
|
1613
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1614
|
+
const statement = `SELECT COUNT(*) as count FROM ${parsedTableName} ${whereClause ? `${whereClause.sql}` : ""}`;
|
|
1615
|
+
const result = await this.client.execute({
|
|
1616
|
+
sql: statement,
|
|
1617
|
+
args: whereClause?.args ?? []
|
|
1618
|
+
});
|
|
1619
|
+
if (!result.rows || result.rows.length === 0) {
|
|
1620
|
+
return 0;
|
|
1621
|
+
}
|
|
1622
|
+
return result.rows[0]?.count ?? 0;
|
|
1623
|
+
}
|
|
1624
|
+
/**
|
|
1625
|
+
* Maps a storage column type to its SQLite equivalent.
|
|
1626
|
+
*/
|
|
1627
|
+
getSqlType(type) {
|
|
1628
|
+
switch (type) {
|
|
1629
|
+
case "bigint":
|
|
1630
|
+
return "INTEGER";
|
|
1631
|
+
// SQLite uses INTEGER for all integer sizes
|
|
1632
|
+
case "timestamp":
|
|
1633
|
+
return "TEXT";
|
|
1634
|
+
// Store timestamps as ISO strings in SQLite
|
|
1635
|
+
case "float":
|
|
1636
|
+
return "REAL";
|
|
1637
|
+
// SQLite's floating point type
|
|
1638
|
+
case "boolean":
|
|
1639
|
+
return "INTEGER";
|
|
1640
|
+
// SQLite uses 0/1 for booleans
|
|
1641
|
+
case "jsonb":
|
|
1642
|
+
return "TEXT";
|
|
1643
|
+
// Store JSON as TEXT in SQLite
|
|
1644
|
+
default:
|
|
1645
|
+
return storage.getSqlType(type);
|
|
1646
|
+
}
|
|
1647
|
+
}
|
|
1648
|
+
/**
|
|
1649
|
+
* Creates a table if it doesn't exist based on the provided schema.
|
|
1650
|
+
*
|
|
1651
|
+
* @param args - The create table arguments
|
|
1652
|
+
* @param args.tableName - The name of the table to create
|
|
1653
|
+
* @param args.schema - The schema definition for the table columns
|
|
1654
|
+
*/
|
|
1655
|
+
async createTable({
|
|
1656
|
+
tableName,
|
|
1657
|
+
schema
|
|
1658
|
+
}) {
|
|
1424
1659
|
try {
|
|
1425
|
-
const
|
|
1426
|
-
|
|
1427
|
-
|
|
1660
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1661
|
+
const columnDefinitions = Object.entries(schema).map(([colName, colDef]) => {
|
|
1662
|
+
const type = this.getSqlType(colDef.type);
|
|
1663
|
+
const nullable = colDef.nullable === false ? "NOT NULL" : "";
|
|
1664
|
+
const primaryKey = colDef.primaryKey ? "PRIMARY KEY" : "";
|
|
1665
|
+
return `"${colName}" ${type} ${nullable} ${primaryKey}`.trim();
|
|
1428
1666
|
});
|
|
1429
|
-
|
|
1430
|
-
|
|
1667
|
+
const tableConstraints = [];
|
|
1668
|
+
if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
|
|
1669
|
+
tableConstraints.push("UNIQUE (workflow_name, run_id)");
|
|
1670
|
+
}
|
|
1671
|
+
const allDefinitions = [...columnDefinitions, ...tableConstraints].join(",\n ");
|
|
1672
|
+
const sql = `CREATE TABLE IF NOT EXISTS ${parsedTableName} (
|
|
1673
|
+
${allDefinitions}
|
|
1674
|
+
)`;
|
|
1675
|
+
await this.client.execute(sql);
|
|
1676
|
+
this.logger.debug(`LibSQLDB: Created table ${tableName}`);
|
|
1677
|
+
if (tableName === storage.TABLE_SPANS) {
|
|
1678
|
+
await this.migrateSpansTable();
|
|
1431
1679
|
}
|
|
1432
|
-
return {
|
|
1433
|
-
...result,
|
|
1434
|
-
metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata,
|
|
1435
|
-
createdAt: new Date(result.createdAt),
|
|
1436
|
-
updatedAt: new Date(result.updatedAt)
|
|
1437
|
-
};
|
|
1438
1680
|
} catch (error$1) {
|
|
1439
1681
|
throw new error.MastraError(
|
|
1440
1682
|
{
|
|
1441
|
-
id: "
|
|
1683
|
+
id: storage.createStorageErrorId("LIBSQL", "CREATE_TABLE", "FAILED"),
|
|
1442
1684
|
domain: error.ErrorDomain.STORAGE,
|
|
1443
1685
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1444
|
-
details: {
|
|
1686
|
+
details: { tableName }
|
|
1445
1687
|
},
|
|
1446
1688
|
error$1
|
|
1447
1689
|
);
|
|
1448
1690
|
}
|
|
1449
1691
|
}
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1692
|
+
/**
|
|
1693
|
+
* Migrates the spans table schema from OLD_SPAN_SCHEMA to current SPAN_SCHEMA.
|
|
1694
|
+
* This adds new columns that don't exist in old schema.
|
|
1695
|
+
*/
|
|
1696
|
+
async migrateSpansTable() {
|
|
1697
|
+
const schema = storage.TABLE_SCHEMAS[storage.TABLE_SPANS];
|
|
1698
|
+
try {
|
|
1699
|
+
for (const [columnName, columnDef] of Object.entries(schema)) {
|
|
1700
|
+
const columnExists = await this.hasColumn(storage.TABLE_SPANS, columnName);
|
|
1701
|
+
if (!columnExists) {
|
|
1702
|
+
const sqlType = this.getSqlType(columnDef.type);
|
|
1703
|
+
const alterSql = `ALTER TABLE "${storage.TABLE_SPANS}" ADD COLUMN "${columnName}" ${sqlType}`;
|
|
1704
|
+
await this.client.execute(alterSql);
|
|
1705
|
+
this.logger.debug(`LibSQLDB: Added column '${columnName}' to ${storage.TABLE_SPANS}`);
|
|
1706
|
+
}
|
|
1707
|
+
}
|
|
1708
|
+
this.logger.info(`LibSQLDB: Migration completed for ${storage.TABLE_SPANS}`);
|
|
1709
|
+
} catch (error) {
|
|
1710
|
+
this.logger.warn(`LibSQLDB: Failed to migrate spans table ${storage.TABLE_SPANS}:`, error);
|
|
1462
1711
|
}
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1712
|
+
}
|
|
1713
|
+
/**
|
|
1714
|
+
* Gets a default value for a column type (used when adding NOT NULL columns).
|
|
1715
|
+
*/
|
|
1716
|
+
getDefaultValue(type) {
|
|
1717
|
+
switch (type) {
|
|
1718
|
+
case "text":
|
|
1719
|
+
case "uuid":
|
|
1720
|
+
return "DEFAULT ''";
|
|
1721
|
+
case "integer":
|
|
1722
|
+
case "bigint":
|
|
1723
|
+
case "float":
|
|
1724
|
+
return "DEFAULT 0";
|
|
1725
|
+
case "boolean":
|
|
1726
|
+
return "DEFAULT 0";
|
|
1727
|
+
case "jsonb":
|
|
1728
|
+
return "DEFAULT '{}'";
|
|
1729
|
+
case "timestamp":
|
|
1730
|
+
return "DEFAULT CURRENT_TIMESTAMP";
|
|
1731
|
+
default:
|
|
1732
|
+
return "DEFAULT ''";
|
|
1733
|
+
}
|
|
1734
|
+
}
|
|
1735
|
+
/**
|
|
1736
|
+
* Alters an existing table to add missing columns.
|
|
1737
|
+
* Used for schema migrations when new columns are added.
|
|
1738
|
+
*
|
|
1739
|
+
* @param args - The alter table arguments
|
|
1740
|
+
* @param args.tableName - The name of the table to alter
|
|
1741
|
+
* @param args.schema - The full schema definition for the table
|
|
1742
|
+
* @param args.ifNotExists - Array of column names to add if they don't exist
|
|
1743
|
+
*/
|
|
1744
|
+
async alterTable({
|
|
1745
|
+
tableName,
|
|
1746
|
+
schema,
|
|
1747
|
+
ifNotExists
|
|
1748
|
+
}) {
|
|
1749
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1466
1750
|
try {
|
|
1467
|
-
const
|
|
1468
|
-
|
|
1469
|
-
const mapRowToStorageThreadType = (row) => ({
|
|
1470
|
-
id: row.id,
|
|
1471
|
-
resourceId: row.resourceId,
|
|
1472
|
-
title: row.title,
|
|
1473
|
-
createdAt: new Date(row.createdAt),
|
|
1474
|
-
// Convert string to Date
|
|
1475
|
-
updatedAt: new Date(row.updatedAt),
|
|
1476
|
-
// Convert string to Date
|
|
1477
|
-
metadata: typeof row.metadata === "string" ? JSON.parse(row.metadata) : row.metadata
|
|
1478
|
-
});
|
|
1479
|
-
const countResult = await this.client.execute({
|
|
1480
|
-
sql: `SELECT COUNT(*) as count ${baseQuery}`,
|
|
1481
|
-
args: queryParams
|
|
1751
|
+
const tableInfo = await this.client.execute({
|
|
1752
|
+
sql: `PRAGMA table_info("${parsedTableName}")`
|
|
1482
1753
|
});
|
|
1483
|
-
const
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1754
|
+
const existingColumns = new Set((tableInfo.rows || []).map((row) => row.name?.toLowerCase()));
|
|
1755
|
+
for (const columnName of ifNotExists) {
|
|
1756
|
+
if (!existingColumns.has(columnName.toLowerCase()) && schema[columnName]) {
|
|
1757
|
+
const columnDef = schema[columnName];
|
|
1758
|
+
const sqlType = this.getSqlType(columnDef.type);
|
|
1759
|
+
const defaultValue = this.getDefaultValue(columnDef.type);
|
|
1760
|
+
const alterSql = `ALTER TABLE ${parsedTableName} ADD COLUMN "${columnName}" ${sqlType} ${defaultValue}`;
|
|
1761
|
+
await this.client.execute(alterSql);
|
|
1762
|
+
this.logger.debug(`LibSQLDB: Added column ${columnName} to table ${tableName}`);
|
|
1763
|
+
}
|
|
1492
1764
|
}
|
|
1493
|
-
const limitValue = perPageInput === false ? total : perPage;
|
|
1494
|
-
const dataResult = await this.client.execute({
|
|
1495
|
-
sql: `SELECT * ${baseQuery} ORDER BY "${field}" ${direction} LIMIT ? OFFSET ?`,
|
|
1496
|
-
args: [...queryParams, limitValue, offset]
|
|
1497
|
-
});
|
|
1498
|
-
const threads = (dataResult.rows || []).map(mapRowToStorageThreadType);
|
|
1499
|
-
return {
|
|
1500
|
-
threads,
|
|
1501
|
-
total,
|
|
1502
|
-
page,
|
|
1503
|
-
perPage: perPageForResponse,
|
|
1504
|
-
hasMore: perPageInput === false ? false : offset + perPage < total
|
|
1505
|
-
};
|
|
1506
1765
|
} catch (error$1) {
|
|
1507
|
-
|
|
1766
|
+
throw new error.MastraError(
|
|
1508
1767
|
{
|
|
1509
|
-
id: "
|
|
1768
|
+
id: storage.createStorageErrorId("LIBSQL", "ALTER_TABLE", "FAILED"),
|
|
1510
1769
|
domain: error.ErrorDomain.STORAGE,
|
|
1511
1770
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1512
|
-
details: {
|
|
1771
|
+
details: { tableName }
|
|
1513
1772
|
},
|
|
1514
1773
|
error$1
|
|
1515
1774
|
);
|
|
1516
|
-
this.logger?.trackException?.(mastraError);
|
|
1517
|
-
this.logger?.error?.(mastraError.toString());
|
|
1518
|
-
return {
|
|
1519
|
-
threads: [],
|
|
1520
|
-
total: 0,
|
|
1521
|
-
page,
|
|
1522
|
-
perPage: perPageForResponse,
|
|
1523
|
-
hasMore: false
|
|
1524
|
-
};
|
|
1525
1775
|
}
|
|
1526
1776
|
}
|
|
1527
|
-
|
|
1777
|
+
/**
|
|
1778
|
+
* Deletes all records from the specified table.
|
|
1779
|
+
* Errors are logged but not thrown.
|
|
1780
|
+
*
|
|
1781
|
+
* @param args - The delete arguments
|
|
1782
|
+
* @param args.tableName - The name of the table to clear
|
|
1783
|
+
*/
|
|
1784
|
+
async deleteData({ tableName }) {
|
|
1785
|
+
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1528
1786
|
try {
|
|
1529
|
-
await this.
|
|
1530
|
-
|
|
1531
|
-
record: {
|
|
1532
|
-
...thread,
|
|
1533
|
-
metadata: JSON.stringify(thread.metadata)
|
|
1534
|
-
}
|
|
1535
|
-
});
|
|
1536
|
-
return thread;
|
|
1537
|
-
} catch (error$1) {
|
|
1787
|
+
await this.client.execute(`DELETE FROM ${parsedTableName}`);
|
|
1788
|
+
} catch (e) {
|
|
1538
1789
|
const mastraError = new error.MastraError(
|
|
1539
1790
|
{
|
|
1540
|
-
id: "
|
|
1791
|
+
id: storage.createStorageErrorId("LIBSQL", "CLEAR_TABLE", "FAILED"),
|
|
1541
1792
|
domain: error.ErrorDomain.STORAGE,
|
|
1542
1793
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1543
|
-
details: {
|
|
1794
|
+
details: {
|
|
1795
|
+
tableName
|
|
1796
|
+
}
|
|
1544
1797
|
},
|
|
1545
|
-
|
|
1798
|
+
e
|
|
1546
1799
|
);
|
|
1547
1800
|
this.logger?.trackException?.(mastraError);
|
|
1548
1801
|
this.logger?.error?.(mastraError.toString());
|
|
1549
|
-
throw mastraError;
|
|
1550
1802
|
}
|
|
1551
1803
|
}
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
...
|
|
1804
|
+
};
|
|
1805
|
+
|
|
1806
|
+
// src/storage/domains/agents/index.ts
|
|
1807
|
+
var AgentsLibSQL = class extends storage.AgentsStorage {
|
|
1808
|
+
#db;
|
|
1809
|
+
constructor(config) {
|
|
1810
|
+
super();
|
|
1811
|
+
const client = resolveClient(config);
|
|
1812
|
+
this.#db = new LibSQLDB({ client, maxRetries: config.maxRetries, initialBackoffMs: config.initialBackoffMs });
|
|
1813
|
+
}
|
|
1814
|
+
async init() {
|
|
1815
|
+
await this.#db.createTable({ tableName: storage.TABLE_AGENTS, schema: storage.AGENTS_SCHEMA });
|
|
1816
|
+
}
|
|
1817
|
+
async dangerouslyClearAll() {
|
|
1818
|
+
await this.#db.deleteData({ tableName: storage.TABLE_AGENTS });
|
|
1819
|
+
}
|
|
1820
|
+
parseJson(value, fieldName) {
|
|
1821
|
+
if (!value) return void 0;
|
|
1822
|
+
if (typeof value !== "string") return value;
|
|
1823
|
+
try {
|
|
1824
|
+
return JSON.parse(value);
|
|
1825
|
+
} catch (error$1) {
|
|
1826
|
+
const details = {
|
|
1827
|
+
value: value.length > 100 ? value.substring(0, 100) + "..." : value
|
|
1828
|
+
};
|
|
1829
|
+
if (fieldName) {
|
|
1830
|
+
details.field = fieldName;
|
|
1576
1831
|
}
|
|
1832
|
+
throw new error.MastraError(
|
|
1833
|
+
{
|
|
1834
|
+
id: storage.createStorageErrorId("LIBSQL", "PARSE_JSON", "INVALID_JSON"),
|
|
1835
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1836
|
+
category: error.ErrorCategory.SYSTEM,
|
|
1837
|
+
text: `Failed to parse JSON${fieldName ? ` for field "${fieldName}"` : ""}: ${error$1 instanceof Error ? error$1.message : "Unknown error"}`,
|
|
1838
|
+
details
|
|
1839
|
+
},
|
|
1840
|
+
error$1
|
|
1841
|
+
);
|
|
1842
|
+
}
|
|
1843
|
+
}
|
|
1844
|
+
parseRow(row) {
|
|
1845
|
+
return {
|
|
1846
|
+
id: row.id,
|
|
1847
|
+
name: row.name,
|
|
1848
|
+
description: row.description,
|
|
1849
|
+
instructions: row.instructions,
|
|
1850
|
+
model: this.parseJson(row.model, "model"),
|
|
1851
|
+
tools: this.parseJson(row.tools, "tools"),
|
|
1852
|
+
defaultOptions: this.parseJson(row.defaultOptions, "defaultOptions"),
|
|
1853
|
+
workflows: this.parseJson(row.workflows, "workflows"),
|
|
1854
|
+
agents: this.parseJson(row.agents, "agents"),
|
|
1855
|
+
inputProcessors: this.parseJson(row.inputProcessors, "inputProcessors"),
|
|
1856
|
+
outputProcessors: this.parseJson(row.outputProcessors, "outputProcessors"),
|
|
1857
|
+
memory: this.parseJson(row.memory, "memory"),
|
|
1858
|
+
scorers: this.parseJson(row.scorers, "scorers"),
|
|
1859
|
+
metadata: this.parseJson(row.metadata, "metadata"),
|
|
1860
|
+
createdAt: new Date(row.createdAt),
|
|
1861
|
+
updatedAt: new Date(row.updatedAt)
|
|
1577
1862
|
};
|
|
1863
|
+
}
|
|
1864
|
+
async getAgentById({ id }) {
|
|
1578
1865
|
try {
|
|
1579
|
-
await this.
|
|
1580
|
-
|
|
1581
|
-
|
|
1866
|
+
const result = await this.#db.select({
|
|
1867
|
+
tableName: storage.TABLE_AGENTS,
|
|
1868
|
+
keys: { id }
|
|
1582
1869
|
});
|
|
1583
|
-
return
|
|
1870
|
+
return result ? this.parseRow(result) : null;
|
|
1584
1871
|
} catch (error$1) {
|
|
1585
1872
|
throw new error.MastraError(
|
|
1586
1873
|
{
|
|
1587
|
-
id: "
|
|
1874
|
+
id: storage.createStorageErrorId("LIBSQL", "GET_AGENT_BY_ID", "FAILED"),
|
|
1588
1875
|
domain: error.ErrorDomain.STORAGE,
|
|
1589
1876
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1590
|
-
|
|
1591
|
-
details: { threadId: id }
|
|
1877
|
+
details: { agentId: id }
|
|
1592
1878
|
},
|
|
1593
1879
|
error$1
|
|
1594
1880
|
);
|
|
1595
1881
|
}
|
|
1596
1882
|
}
|
|
1597
|
-
async
|
|
1883
|
+
async createAgent({ agent }) {
|
|
1598
1884
|
try {
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1885
|
+
const now = /* @__PURE__ */ new Date();
|
|
1886
|
+
await this.#db.insert({
|
|
1887
|
+
tableName: storage.TABLE_AGENTS,
|
|
1888
|
+
record: {
|
|
1889
|
+
id: agent.id,
|
|
1890
|
+
name: agent.name,
|
|
1891
|
+
description: agent.description ?? null,
|
|
1892
|
+
instructions: agent.instructions,
|
|
1893
|
+
model: agent.model,
|
|
1894
|
+
tools: agent.tools ?? null,
|
|
1895
|
+
defaultOptions: agent.defaultOptions ?? null,
|
|
1896
|
+
workflows: agent.workflows ?? null,
|
|
1897
|
+
agents: agent.agents ?? null,
|
|
1898
|
+
inputProcessors: agent.inputProcessors ?? null,
|
|
1899
|
+
outputProcessors: agent.outputProcessors ?? null,
|
|
1900
|
+
memory: agent.memory ?? null,
|
|
1901
|
+
scorers: agent.scorers ?? null,
|
|
1902
|
+
metadata: agent.metadata ?? null,
|
|
1903
|
+
createdAt: now,
|
|
1904
|
+
updatedAt: now
|
|
1905
|
+
}
|
|
1606
1906
|
});
|
|
1907
|
+
return {
|
|
1908
|
+
...agent,
|
|
1909
|
+
createdAt: now,
|
|
1910
|
+
updatedAt: now
|
|
1911
|
+
};
|
|
1607
1912
|
} catch (error$1) {
|
|
1608
1913
|
throw new error.MastraError(
|
|
1609
1914
|
{
|
|
1610
|
-
id: "
|
|
1915
|
+
id: storage.createStorageErrorId("LIBSQL", "CREATE_AGENT", "FAILED"),
|
|
1611
1916
|
domain: error.ErrorDomain.STORAGE,
|
|
1612
1917
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1613
|
-
details: {
|
|
1918
|
+
details: { agentId: agent.id }
|
|
1614
1919
|
},
|
|
1615
1920
|
error$1
|
|
1616
1921
|
);
|
|
1617
1922
|
}
|
|
1618
1923
|
}
|
|
1619
|
-
}
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
} catch (error) {
|
|
1631
|
-
if (error.message && (error.message.includes("SQLITE_BUSY") || error.message.includes("database is locked")) && retries < maxRetries) {
|
|
1632
|
-
retries++;
|
|
1633
|
-
const backoffTime = initialBackoffMs * Math.pow(2, retries - 1);
|
|
1634
|
-
logger.warn(
|
|
1635
|
-
`LibSQLStore: Encountered SQLITE_BUSY during ${operationDescription}. Retrying (${retries}/${maxRetries}) in ${backoffTime}ms...`
|
|
1636
|
-
);
|
|
1637
|
-
await new Promise((resolve) => setTimeout(resolve, backoffTime));
|
|
1638
|
-
} else {
|
|
1639
|
-
logger.error(`LibSQLStore: Error during ${operationDescription} after ${retries} retries: ${error}`);
|
|
1640
|
-
throw error;
|
|
1641
|
-
}
|
|
1924
|
+
async updateAgent({ id, ...updates }) {
|
|
1925
|
+
try {
|
|
1926
|
+
const existingAgent = await this.getAgentById({ id });
|
|
1927
|
+
if (!existingAgent) {
|
|
1928
|
+
throw new error.MastraError({
|
|
1929
|
+
id: storage.createStorageErrorId("LIBSQL", "UPDATE_AGENT", "NOT_FOUND"),
|
|
1930
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1931
|
+
category: error.ErrorCategory.USER,
|
|
1932
|
+
text: `Agent ${id} not found`,
|
|
1933
|
+
details: { agentId: id }
|
|
1934
|
+
});
|
|
1642
1935
|
}
|
|
1936
|
+
const data = {
|
|
1937
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1938
|
+
};
|
|
1939
|
+
if (updates.name !== void 0) data.name = updates.name;
|
|
1940
|
+
if (updates.description !== void 0) data.description = updates.description;
|
|
1941
|
+
if (updates.instructions !== void 0) data.instructions = updates.instructions;
|
|
1942
|
+
if (updates.model !== void 0) data.model = updates.model;
|
|
1943
|
+
if (updates.tools !== void 0) data.tools = updates.tools;
|
|
1944
|
+
if (updates.defaultOptions !== void 0) data.defaultOptions = updates.defaultOptions;
|
|
1945
|
+
if (updates.workflows !== void 0) data.workflows = updates.workflows;
|
|
1946
|
+
if (updates.agents !== void 0) data.agents = updates.agents;
|
|
1947
|
+
if (updates.inputProcessors !== void 0) data.inputProcessors = updates.inputProcessors;
|
|
1948
|
+
if (updates.outputProcessors !== void 0) data.outputProcessors = updates.outputProcessors;
|
|
1949
|
+
if (updates.memory !== void 0) data.memory = updates.memory;
|
|
1950
|
+
if (updates.scorers !== void 0) data.scorers = updates.scorers;
|
|
1951
|
+
if (updates.metadata !== void 0) {
|
|
1952
|
+
data.metadata = { ...existingAgent.metadata, ...updates.metadata };
|
|
1953
|
+
}
|
|
1954
|
+
if (Object.keys(data).length > 1) {
|
|
1955
|
+
await this.#db.update({
|
|
1956
|
+
tableName: storage.TABLE_AGENTS,
|
|
1957
|
+
keys: { id },
|
|
1958
|
+
data
|
|
1959
|
+
});
|
|
1960
|
+
}
|
|
1961
|
+
const updatedAgent = await this.getAgentById({ id });
|
|
1962
|
+
if (!updatedAgent) {
|
|
1963
|
+
throw new error.MastraError({
|
|
1964
|
+
id: storage.createStorageErrorId("LIBSQL", "UPDATE_AGENT", "NOT_FOUND_AFTER_UPDATE"),
|
|
1965
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1966
|
+
category: error.ErrorCategory.SYSTEM,
|
|
1967
|
+
text: `Agent ${id} not found after update`,
|
|
1968
|
+
details: { agentId: id }
|
|
1969
|
+
});
|
|
1970
|
+
}
|
|
1971
|
+
return updatedAgent;
|
|
1972
|
+
} catch (error$1) {
|
|
1973
|
+
if (error$1 instanceof error.MastraError) {
|
|
1974
|
+
throw error$1;
|
|
1975
|
+
}
|
|
1976
|
+
throw new error.MastraError(
|
|
1977
|
+
{
|
|
1978
|
+
id: storage.createStorageErrorId("LIBSQL", "UPDATE_AGENT", "FAILED"),
|
|
1979
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1980
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
1981
|
+
details: { agentId: id }
|
|
1982
|
+
},
|
|
1983
|
+
error$1
|
|
1984
|
+
);
|
|
1643
1985
|
}
|
|
1644
|
-
};
|
|
1645
|
-
}
|
|
1646
|
-
function prepareStatement({ tableName, record }) {
|
|
1647
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1648
|
-
const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
|
|
1649
|
-
const values = Object.values(record).map((v) => {
|
|
1650
|
-
if (typeof v === `undefined` || v === null) {
|
|
1651
|
-
return null;
|
|
1652
|
-
}
|
|
1653
|
-
if (v instanceof Date) {
|
|
1654
|
-
return v.toISOString();
|
|
1655
|
-
}
|
|
1656
|
-
return typeof v === "object" ? JSON.stringify(v) : v;
|
|
1657
|
-
});
|
|
1658
|
-
const placeholders = values.map(() => "?").join(", ");
|
|
1659
|
-
return {
|
|
1660
|
-
sql: `INSERT OR REPLACE INTO ${parsedTableName} (${columns.join(", ")}) VALUES (${placeholders})`,
|
|
1661
|
-
args: values
|
|
1662
|
-
};
|
|
1663
|
-
}
|
|
1664
|
-
function prepareUpdateStatement({
|
|
1665
|
-
tableName,
|
|
1666
|
-
updates,
|
|
1667
|
-
keys
|
|
1668
|
-
}) {
|
|
1669
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1670
|
-
const schema = storage.TABLE_SCHEMAS[tableName];
|
|
1671
|
-
const updateColumns = Object.keys(updates).map((col) => utils.parseSqlIdentifier(col, "column name"));
|
|
1672
|
-
const updateValues = Object.values(updates).map(transformToSqlValue);
|
|
1673
|
-
const setClause = updateColumns.map((col) => `${col} = ?`).join(", ");
|
|
1674
|
-
const whereClause = prepareWhereClause(keys, schema);
|
|
1675
|
-
return {
|
|
1676
|
-
sql: `UPDATE ${parsedTableName} SET ${setClause}${whereClause.sql}`,
|
|
1677
|
-
args: [...updateValues, ...whereClause.args]
|
|
1678
|
-
};
|
|
1679
|
-
}
|
|
1680
|
-
function transformToSqlValue(value) {
|
|
1681
|
-
if (typeof value === "undefined" || value === null) {
|
|
1682
|
-
return null;
|
|
1683
|
-
}
|
|
1684
|
-
if (value instanceof Date) {
|
|
1685
|
-
return value.toISOString();
|
|
1686
|
-
}
|
|
1687
|
-
return typeof value === "object" ? JSON.stringify(value) : value;
|
|
1688
|
-
}
|
|
1689
|
-
function prepareDeleteStatement({ tableName, keys }) {
|
|
1690
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
1691
|
-
const whereClause = prepareWhereClause(keys, storage.TABLE_SCHEMAS[tableName]);
|
|
1692
|
-
return {
|
|
1693
|
-
sql: `DELETE FROM ${parsedTableName}${whereClause.sql}`,
|
|
1694
|
-
args: whereClause.args
|
|
1695
|
-
};
|
|
1696
|
-
}
|
|
1697
|
-
function prepareWhereClause(filters, schema) {
|
|
1698
|
-
const conditions = [];
|
|
1699
|
-
const args = [];
|
|
1700
|
-
for (const [columnName, filterValue] of Object.entries(filters)) {
|
|
1701
|
-
const column = schema[columnName];
|
|
1702
|
-
if (!column) {
|
|
1703
|
-
throw new Error(`Unknown column: ${columnName}`);
|
|
1704
|
-
}
|
|
1705
|
-
const parsedColumn = utils.parseSqlIdentifier(columnName, "column name");
|
|
1706
|
-
const result = buildCondition2(parsedColumn, filterValue);
|
|
1707
|
-
conditions.push(result.condition);
|
|
1708
|
-
args.push(...result.args);
|
|
1709
|
-
}
|
|
1710
|
-
return {
|
|
1711
|
-
sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
|
|
1712
|
-
args
|
|
1713
|
-
};
|
|
1714
|
-
}
|
|
1715
|
-
function buildCondition2(columnName, filterValue) {
|
|
1716
|
-
if (filterValue === null) {
|
|
1717
|
-
return { condition: `${columnName} IS NULL`, args: [] };
|
|
1718
|
-
}
|
|
1719
|
-
if (typeof filterValue === "object" && filterValue !== null && ("startAt" in filterValue || "endAt" in filterValue)) {
|
|
1720
|
-
return buildDateRangeCondition(columnName, filterValue);
|
|
1721
|
-
}
|
|
1722
|
-
return {
|
|
1723
|
-
condition: `${columnName} = ?`,
|
|
1724
|
-
args: [transformToSqlValue(filterValue)]
|
|
1725
|
-
};
|
|
1726
|
-
}
|
|
1727
|
-
function buildDateRangeCondition(columnName, range) {
|
|
1728
|
-
const conditions = [];
|
|
1729
|
-
const args = [];
|
|
1730
|
-
if (range.startAt !== void 0) {
|
|
1731
|
-
conditions.push(`${columnName} >= ?`);
|
|
1732
|
-
args.push(transformToSqlValue(range.startAt));
|
|
1733
1986
|
}
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
const filter = {};
|
|
1751
|
-
if (dateRange.start) {
|
|
1752
|
-
filter.startAt = new Date(dateRange.start).toISOString();
|
|
1753
|
-
}
|
|
1754
|
-
if (dateRange.end) {
|
|
1755
|
-
filter.endAt = new Date(dateRange.end).toISOString();
|
|
1756
|
-
}
|
|
1757
|
-
return { [columnName]: filter };
|
|
1758
|
-
}
|
|
1759
|
-
function transformFromSqlRow({
|
|
1760
|
-
tableName,
|
|
1761
|
-
sqlRow
|
|
1762
|
-
}) {
|
|
1763
|
-
const result = {};
|
|
1764
|
-
const jsonColumns = new Set(
|
|
1765
|
-
Object.keys(storage.TABLE_SCHEMAS[tableName]).filter((key) => storage.TABLE_SCHEMAS[tableName][key].type === "jsonb").map((key) => key)
|
|
1766
|
-
);
|
|
1767
|
-
const dateColumns = new Set(
|
|
1768
|
-
Object.keys(storage.TABLE_SCHEMAS[tableName]).filter((key) => storage.TABLE_SCHEMAS[tableName][key].type === "timestamp").map((key) => key)
|
|
1769
|
-
);
|
|
1770
|
-
for (const [key, value] of Object.entries(sqlRow)) {
|
|
1771
|
-
if (value === null || value === void 0) {
|
|
1772
|
-
result[key] = value;
|
|
1773
|
-
continue;
|
|
1987
|
+
async deleteAgent({ id }) {
|
|
1988
|
+
try {
|
|
1989
|
+
await this.#db.delete({
|
|
1990
|
+
tableName: storage.TABLE_AGENTS,
|
|
1991
|
+
keys: { id }
|
|
1992
|
+
});
|
|
1993
|
+
} catch (error$1) {
|
|
1994
|
+
throw new error.MastraError(
|
|
1995
|
+
{
|
|
1996
|
+
id: storage.createStorageErrorId("LIBSQL", "DELETE_AGENT", "FAILED"),
|
|
1997
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1998
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
1999
|
+
details: { agentId: id }
|
|
2000
|
+
},
|
|
2001
|
+
error$1
|
|
2002
|
+
);
|
|
1774
2003
|
}
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
2004
|
+
}
|
|
2005
|
+
async listAgents(args) {
|
|
2006
|
+
const { page = 0, perPage: perPageInput, orderBy } = args || {};
|
|
2007
|
+
const { field, direction } = this.parseOrderBy(orderBy);
|
|
2008
|
+
if (page < 0) {
|
|
2009
|
+
throw new error.MastraError(
|
|
2010
|
+
{
|
|
2011
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_AGENTS", "INVALID_PAGE"),
|
|
2012
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2013
|
+
category: error.ErrorCategory.USER,
|
|
2014
|
+
details: { page }
|
|
2015
|
+
},
|
|
2016
|
+
new Error("page must be >= 0")
|
|
2017
|
+
);
|
|
1778
2018
|
}
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
2019
|
+
const perPage = storage.normalizePerPage(perPageInput, 100);
|
|
2020
|
+
const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
2021
|
+
try {
|
|
2022
|
+
const total = await this.#db.selectTotalCount({ tableName: storage.TABLE_AGENTS });
|
|
2023
|
+
if (total === 0) {
|
|
2024
|
+
return {
|
|
2025
|
+
agents: [],
|
|
2026
|
+
total: 0,
|
|
2027
|
+
page,
|
|
2028
|
+
perPage: perPageForResponse,
|
|
2029
|
+
hasMore: false
|
|
2030
|
+
};
|
|
2031
|
+
}
|
|
2032
|
+
const limitValue = perPageInput === false ? total : perPage;
|
|
2033
|
+
const rows = await this.#db.selectMany({
|
|
2034
|
+
tableName: storage.TABLE_AGENTS,
|
|
2035
|
+
orderBy: `"${field}" ${direction}`,
|
|
2036
|
+
limit: limitValue,
|
|
2037
|
+
offset
|
|
2038
|
+
});
|
|
2039
|
+
const agents = rows.map((row) => this.parseRow(row));
|
|
2040
|
+
return {
|
|
2041
|
+
agents,
|
|
2042
|
+
total,
|
|
2043
|
+
page,
|
|
2044
|
+
perPage: perPageForResponse,
|
|
2045
|
+
hasMore: perPageInput === false ? false : offset + perPage < total
|
|
2046
|
+
};
|
|
2047
|
+
} catch (error$1) {
|
|
2048
|
+
throw new error.MastraError(
|
|
2049
|
+
{
|
|
2050
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_AGENTS", "FAILED"),
|
|
2051
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2052
|
+
category: error.ErrorCategory.THIRD_PARTY
|
|
2053
|
+
},
|
|
2054
|
+
error$1
|
|
2055
|
+
);
|
|
1782
2056
|
}
|
|
1783
|
-
result[key] = value;
|
|
1784
2057
|
}
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
operations;
|
|
1791
|
-
constructor({ operations }) {
|
|
2058
|
+
};
|
|
2059
|
+
var MemoryLibSQL = class extends storage.MemoryStorage {
|
|
2060
|
+
#client;
|
|
2061
|
+
#db;
|
|
2062
|
+
constructor(config) {
|
|
1792
2063
|
super();
|
|
1793
|
-
|
|
2064
|
+
const client = resolveClient(config);
|
|
2065
|
+
this.#client = client;
|
|
2066
|
+
this.#db = new LibSQLDB({ client, maxRetries: config.maxRetries, initialBackoffMs: config.initialBackoffMs });
|
|
2067
|
+
}
|
|
2068
|
+
async init() {
|
|
2069
|
+
await this.#db.createTable({ tableName: storage.TABLE_THREADS, schema: storage.TABLE_SCHEMAS[storage.TABLE_THREADS] });
|
|
2070
|
+
await this.#db.createTable({ tableName: storage.TABLE_MESSAGES, schema: storage.TABLE_SCHEMAS[storage.TABLE_MESSAGES] });
|
|
2071
|
+
await this.#db.createTable({ tableName: storage.TABLE_RESOURCES, schema: storage.TABLE_SCHEMAS[storage.TABLE_RESOURCES] });
|
|
2072
|
+
await this.#db.alterTable({
|
|
2073
|
+
tableName: storage.TABLE_MESSAGES,
|
|
2074
|
+
schema: storage.TABLE_SCHEMAS[storage.TABLE_MESSAGES],
|
|
2075
|
+
ifNotExists: ["resourceId"]
|
|
2076
|
+
});
|
|
2077
|
+
}
|
|
2078
|
+
async dangerouslyClearAll() {
|
|
2079
|
+
await this.#db.deleteData({ tableName: storage.TABLE_MESSAGES });
|
|
2080
|
+
await this.#db.deleteData({ tableName: storage.TABLE_THREADS });
|
|
2081
|
+
await this.#db.deleteData({ tableName: storage.TABLE_RESOURCES });
|
|
2082
|
+
}
|
|
2083
|
+
parseRow(row) {
|
|
2084
|
+
let content = row.content;
|
|
2085
|
+
try {
|
|
2086
|
+
content = JSON.parse(row.content);
|
|
2087
|
+
} catch {
|
|
2088
|
+
}
|
|
2089
|
+
const result = {
|
|
2090
|
+
id: row.id,
|
|
2091
|
+
content,
|
|
2092
|
+
role: row.role,
|
|
2093
|
+
createdAt: new Date(row.createdAt),
|
|
2094
|
+
threadId: row.thread_id,
|
|
2095
|
+
resourceId: row.resourceId
|
|
2096
|
+
};
|
|
2097
|
+
if (row.type && row.type !== `v2`) result.type = row.type;
|
|
2098
|
+
return result;
|
|
2099
|
+
}
|
|
2100
|
+
async _getIncludedMessages({ include }) {
|
|
2101
|
+
if (!include || include.length === 0) return null;
|
|
2102
|
+
const unionQueries = [];
|
|
2103
|
+
const params = [];
|
|
2104
|
+
for (const inc of include) {
|
|
2105
|
+
const { id, withPreviousMessages = 0, withNextMessages = 0 } = inc;
|
|
2106
|
+
unionQueries.push(
|
|
2107
|
+
`
|
|
2108
|
+
SELECT * FROM (
|
|
2109
|
+
WITH target_thread AS (
|
|
2110
|
+
SELECT thread_id FROM "${storage.TABLE_MESSAGES}" WHERE id = ?
|
|
2111
|
+
),
|
|
2112
|
+
numbered_messages AS (
|
|
2113
|
+
SELECT
|
|
2114
|
+
id, content, role, type, "createdAt", thread_id, "resourceId",
|
|
2115
|
+
ROW_NUMBER() OVER (ORDER BY "createdAt" ASC) as row_num
|
|
2116
|
+
FROM "${storage.TABLE_MESSAGES}"
|
|
2117
|
+
WHERE thread_id = (SELECT thread_id FROM target_thread)
|
|
2118
|
+
),
|
|
2119
|
+
target_positions AS (
|
|
2120
|
+
SELECT row_num as target_pos
|
|
2121
|
+
FROM numbered_messages
|
|
2122
|
+
WHERE id = ?
|
|
2123
|
+
)
|
|
2124
|
+
SELECT DISTINCT m.*
|
|
2125
|
+
FROM numbered_messages m
|
|
2126
|
+
CROSS JOIN target_positions t
|
|
2127
|
+
WHERE m.row_num BETWEEN (t.target_pos - ?) AND (t.target_pos + ?)
|
|
2128
|
+
)
|
|
2129
|
+
`
|
|
2130
|
+
// Keep ASC for final sorting after fetching context
|
|
2131
|
+
);
|
|
2132
|
+
params.push(id, id, withPreviousMessages, withNextMessages);
|
|
2133
|
+
}
|
|
2134
|
+
const finalQuery = unionQueries.join(" UNION ALL ") + ' ORDER BY "createdAt" ASC';
|
|
2135
|
+
const includedResult = await this.#client.execute({ sql: finalQuery, args: params });
|
|
2136
|
+
const includedRows = includedResult.rows?.map((row) => this.parseRow(row));
|
|
2137
|
+
const seen = /* @__PURE__ */ new Set();
|
|
2138
|
+
const dedupedRows = includedRows.filter((row) => {
|
|
2139
|
+
if (seen.has(row.id)) return false;
|
|
2140
|
+
seen.add(row.id);
|
|
2141
|
+
return true;
|
|
2142
|
+
});
|
|
2143
|
+
return dedupedRows;
|
|
1794
2144
|
}
|
|
1795
|
-
async
|
|
2145
|
+
async listMessagesById({ messageIds }) {
|
|
2146
|
+
if (messageIds.length === 0) return { messages: [] };
|
|
1796
2147
|
try {
|
|
1797
|
-
const
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
2148
|
+
const sql = `
|
|
2149
|
+
SELECT
|
|
2150
|
+
id,
|
|
2151
|
+
content,
|
|
2152
|
+
role,
|
|
2153
|
+
type,
|
|
2154
|
+
"createdAt",
|
|
2155
|
+
thread_id,
|
|
2156
|
+
"resourceId"
|
|
2157
|
+
FROM "${storage.TABLE_MESSAGES}"
|
|
2158
|
+
WHERE id IN (${messageIds.map(() => "?").join(", ")})
|
|
2159
|
+
ORDER BY "createdAt" DESC
|
|
2160
|
+
`;
|
|
2161
|
+
const result = await this.#client.execute({ sql, args: messageIds });
|
|
2162
|
+
if (!result.rows) return { messages: [] };
|
|
2163
|
+
const list = new agent.MessageList().add(result.rows.map(this.parseRow), "memory");
|
|
2164
|
+
return { messages: list.get.all.db() };
|
|
1804
2165
|
} catch (error$1) {
|
|
1805
2166
|
throw new error.MastraError(
|
|
1806
2167
|
{
|
|
1807
|
-
id: "
|
|
2168
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_MESSAGES_BY_ID", "FAILED"),
|
|
1808
2169
|
domain: error.ErrorDomain.STORAGE,
|
|
1809
|
-
category: error.ErrorCategory.
|
|
1810
|
-
details: {
|
|
1811
|
-
spanId: span.spanId,
|
|
1812
|
-
traceId: span.traceId,
|
|
1813
|
-
spanType: span.spanType,
|
|
1814
|
-
spanName: span.name
|
|
1815
|
-
}
|
|
2170
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2171
|
+
details: { messageIds: JSON.stringify(messageIds) }
|
|
1816
2172
|
},
|
|
1817
2173
|
error$1
|
|
1818
2174
|
);
|
|
1819
2175
|
}
|
|
1820
2176
|
}
|
|
1821
|
-
async
|
|
2177
|
+
async listMessages(args) {
|
|
2178
|
+
const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
|
|
2179
|
+
const threadIds = Array.isArray(threadId) ? threadId : [threadId];
|
|
2180
|
+
if (threadIds.length === 0 || threadIds.some((id) => !id.trim())) {
|
|
2181
|
+
throw new error.MastraError(
|
|
2182
|
+
{
|
|
2183
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_MESSAGES", "INVALID_THREAD_ID"),
|
|
2184
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2185
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2186
|
+
details: { threadId: Array.isArray(threadId) ? threadId.join(",") : threadId }
|
|
2187
|
+
},
|
|
2188
|
+
new Error("threadId must be a non-empty string or array of non-empty strings")
|
|
2189
|
+
);
|
|
2190
|
+
}
|
|
2191
|
+
if (page < 0) {
|
|
2192
|
+
throw new error.MastraError(
|
|
2193
|
+
{
|
|
2194
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_MESSAGES", "INVALID_PAGE"),
|
|
2195
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2196
|
+
category: error.ErrorCategory.USER,
|
|
2197
|
+
details: { page }
|
|
2198
|
+
},
|
|
2199
|
+
new Error("page must be >= 0")
|
|
2200
|
+
);
|
|
2201
|
+
}
|
|
2202
|
+
const perPage = storage.normalizePerPage(perPageInput, 40);
|
|
2203
|
+
const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
1822
2204
|
try {
|
|
1823
|
-
const
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
2205
|
+
const { field, direction } = this.parseOrderBy(orderBy, "ASC");
|
|
2206
|
+
const orderByStatement = `ORDER BY "${field}" ${direction}`;
|
|
2207
|
+
const threadPlaceholders = threadIds.map(() => "?").join(", ");
|
|
2208
|
+
const conditions = [`thread_id IN (${threadPlaceholders})`];
|
|
2209
|
+
const queryParams = [...threadIds];
|
|
2210
|
+
if (resourceId) {
|
|
2211
|
+
conditions.push(`"resourceId" = ?`);
|
|
2212
|
+
queryParams.push(resourceId);
|
|
2213
|
+
}
|
|
2214
|
+
if (filter?.dateRange?.start) {
|
|
2215
|
+
conditions.push(`"createdAt" >= ?`);
|
|
2216
|
+
queryParams.push(
|
|
2217
|
+
filter.dateRange.start instanceof Date ? filter.dateRange.start.toISOString() : filter.dateRange.start
|
|
2218
|
+
);
|
|
2219
|
+
}
|
|
2220
|
+
if (filter?.dateRange?.end) {
|
|
2221
|
+
conditions.push(`"createdAt" <= ?`);
|
|
2222
|
+
queryParams.push(
|
|
2223
|
+
filter.dateRange.end instanceof Date ? filter.dateRange.end.toISOString() : filter.dateRange.end
|
|
2224
|
+
);
|
|
2225
|
+
}
|
|
2226
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
2227
|
+
const countResult = await this.#client.execute({
|
|
2228
|
+
sql: `SELECT COUNT(*) as count FROM ${storage.TABLE_MESSAGES} ${whereClause}`,
|
|
2229
|
+
args: queryParams
|
|
1827
2230
|
});
|
|
1828
|
-
|
|
1829
|
-
|
|
2231
|
+
const total = Number(countResult.rows?.[0]?.count ?? 0);
|
|
2232
|
+
const limitValue = perPageInput === false ? total : perPage;
|
|
2233
|
+
const dataResult = await this.#client.execute({
|
|
2234
|
+
sql: `SELECT id, content, role, type, "createdAt", "resourceId", "thread_id" FROM ${storage.TABLE_MESSAGES} ${whereClause} ${orderByStatement} LIMIT ? OFFSET ?`,
|
|
2235
|
+
args: [...queryParams, limitValue, offset]
|
|
2236
|
+
});
|
|
2237
|
+
const messages = (dataResult.rows || []).map((row) => this.parseRow(row));
|
|
2238
|
+
if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
|
|
2239
|
+
return {
|
|
2240
|
+
messages: [],
|
|
2241
|
+
total: 0,
|
|
2242
|
+
page,
|
|
2243
|
+
perPage: perPageForResponse,
|
|
2244
|
+
hasMore: false
|
|
2245
|
+
};
|
|
2246
|
+
}
|
|
2247
|
+
const messageIds = new Set(messages.map((m) => m.id));
|
|
2248
|
+
if (include && include.length > 0) {
|
|
2249
|
+
const includeMessages = await this._getIncludedMessages({ include });
|
|
2250
|
+
if (includeMessages) {
|
|
2251
|
+
for (const includeMsg of includeMessages) {
|
|
2252
|
+
if (!messageIds.has(includeMsg.id)) {
|
|
2253
|
+
messages.push(includeMsg);
|
|
2254
|
+
messageIds.add(includeMsg.id);
|
|
2255
|
+
}
|
|
2256
|
+
}
|
|
2257
|
+
}
|
|
1830
2258
|
}
|
|
2259
|
+
const list = new agent.MessageList().add(messages, "memory");
|
|
2260
|
+
let finalMessages = list.get.all.db();
|
|
2261
|
+
finalMessages = finalMessages.sort((a, b) => {
|
|
2262
|
+
const isDateField = field === "createdAt" || field === "updatedAt";
|
|
2263
|
+
const aValue = isDateField ? new Date(a[field]).getTime() : a[field];
|
|
2264
|
+
const bValue = isDateField ? new Date(b[field]).getTime() : b[field];
|
|
2265
|
+
if (typeof aValue === "number" && typeof bValue === "number") {
|
|
2266
|
+
return direction === "ASC" ? aValue - bValue : bValue - aValue;
|
|
2267
|
+
}
|
|
2268
|
+
return direction === "ASC" ? String(aValue).localeCompare(String(bValue)) : String(bValue).localeCompare(String(aValue));
|
|
2269
|
+
});
|
|
2270
|
+
const threadIdSet = new Set(threadIds);
|
|
2271
|
+
const returnedThreadMessageIds = new Set(
|
|
2272
|
+
finalMessages.filter((m) => m.threadId && threadIdSet.has(m.threadId)).map((m) => m.id)
|
|
2273
|
+
);
|
|
2274
|
+
const allThreadMessagesReturned = returnedThreadMessageIds.size >= total;
|
|
2275
|
+
const hasMore = perPageInput !== false && !allThreadMessagesReturned && offset + perPage < total;
|
|
1831
2276
|
return {
|
|
1832
|
-
|
|
1833
|
-
|
|
2277
|
+
messages: finalMessages,
|
|
2278
|
+
total,
|
|
2279
|
+
page,
|
|
2280
|
+
perPage: perPageForResponse,
|
|
2281
|
+
hasMore
|
|
1834
2282
|
};
|
|
1835
2283
|
} catch (error$1) {
|
|
1836
|
-
|
|
2284
|
+
const mastraError = new error.MastraError(
|
|
1837
2285
|
{
|
|
1838
|
-
id: "
|
|
2286
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_MESSAGES", "FAILED"),
|
|
1839
2287
|
domain: error.ErrorDomain.STORAGE,
|
|
1840
|
-
category: error.ErrorCategory.
|
|
2288
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
1841
2289
|
details: {
|
|
1842
|
-
|
|
2290
|
+
threadId: Array.isArray(threadId) ? threadId.join(",") : threadId,
|
|
2291
|
+
resourceId: resourceId ?? ""
|
|
1843
2292
|
}
|
|
1844
2293
|
},
|
|
1845
2294
|
error$1
|
|
1846
2295
|
);
|
|
2296
|
+
this.logger?.error?.(mastraError.toString());
|
|
2297
|
+
this.logger?.trackException?.(mastraError);
|
|
2298
|
+
return {
|
|
2299
|
+
messages: [],
|
|
2300
|
+
total: 0,
|
|
2301
|
+
page,
|
|
2302
|
+
perPage: perPageForResponse,
|
|
2303
|
+
hasMore: false
|
|
2304
|
+
};
|
|
1847
2305
|
}
|
|
1848
2306
|
}
|
|
1849
|
-
async
|
|
1850
|
-
|
|
1851
|
-
traceId,
|
|
1852
|
-
updates
|
|
1853
|
-
}) {
|
|
2307
|
+
async saveMessages({ messages }) {
|
|
2308
|
+
if (messages.length === 0) return { messages };
|
|
1854
2309
|
try {
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
2310
|
+
const threadId = messages[0]?.threadId;
|
|
2311
|
+
if (!threadId) {
|
|
2312
|
+
throw new Error("Thread ID is required");
|
|
2313
|
+
}
|
|
2314
|
+
const batchStatements = messages.map((message) => {
|
|
2315
|
+
const time = message.createdAt || /* @__PURE__ */ new Date();
|
|
2316
|
+
if (!message.threadId) {
|
|
2317
|
+
throw new Error(
|
|
2318
|
+
`Expected to find a threadId for message, but couldn't find one. An unexpected error has occurred.`
|
|
2319
|
+
);
|
|
2320
|
+
}
|
|
2321
|
+
if (!message.resourceId) {
|
|
2322
|
+
throw new Error(
|
|
2323
|
+
`Expected to find a resourceId for message, but couldn't find one. An unexpected error has occurred.`
|
|
2324
|
+
);
|
|
2325
|
+
}
|
|
2326
|
+
return {
|
|
2327
|
+
sql: `INSERT INTO "${storage.TABLE_MESSAGES}" (id, thread_id, content, role, type, "createdAt", "resourceId")
|
|
2328
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
2329
|
+
ON CONFLICT(id) DO UPDATE SET
|
|
2330
|
+
thread_id=excluded.thread_id,
|
|
2331
|
+
content=excluded.content,
|
|
2332
|
+
role=excluded.role,
|
|
2333
|
+
type=excluded.type,
|
|
2334
|
+
"resourceId"=excluded."resourceId"
|
|
2335
|
+
`,
|
|
2336
|
+
args: [
|
|
2337
|
+
message.id,
|
|
2338
|
+
message.threadId,
|
|
2339
|
+
typeof message.content === "object" ? JSON.stringify(message.content) : message.content,
|
|
2340
|
+
message.role,
|
|
2341
|
+
message.type || "v2",
|
|
2342
|
+
time instanceof Date ? time.toISOString() : time,
|
|
2343
|
+
message.resourceId
|
|
2344
|
+
]
|
|
2345
|
+
};
|
|
2346
|
+
});
|
|
2347
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2348
|
+
batchStatements.push({
|
|
2349
|
+
sql: `UPDATE "${storage.TABLE_THREADS}" SET "updatedAt" = ? WHERE id = ?`,
|
|
2350
|
+
args: [now, threadId]
|
|
1859
2351
|
});
|
|
2352
|
+
const BATCH_SIZE = 50;
|
|
2353
|
+
const messageStatements = batchStatements.slice(0, -1);
|
|
2354
|
+
const threadUpdateStatement = batchStatements[batchStatements.length - 1];
|
|
2355
|
+
for (let i = 0; i < messageStatements.length; i += BATCH_SIZE) {
|
|
2356
|
+
const batch = messageStatements.slice(i, i + BATCH_SIZE);
|
|
2357
|
+
if (batch.length > 0) {
|
|
2358
|
+
await this.#client.batch(batch, "write");
|
|
2359
|
+
}
|
|
2360
|
+
}
|
|
2361
|
+
if (threadUpdateStatement) {
|
|
2362
|
+
await this.#client.execute(threadUpdateStatement);
|
|
2363
|
+
}
|
|
2364
|
+
const list = new agent.MessageList().add(messages, "memory");
|
|
2365
|
+
return { messages: list.get.all.db() };
|
|
1860
2366
|
} catch (error$1) {
|
|
1861
2367
|
throw new error.MastraError(
|
|
1862
2368
|
{
|
|
1863
|
-
id: "
|
|
2369
|
+
id: storage.createStorageErrorId("LIBSQL", "SAVE_MESSAGES", "FAILED"),
|
|
1864
2370
|
domain: error.ErrorDomain.STORAGE,
|
|
1865
|
-
category: error.ErrorCategory.
|
|
1866
|
-
details: {
|
|
1867
|
-
spanId,
|
|
1868
|
-
traceId
|
|
1869
|
-
}
|
|
2371
|
+
category: error.ErrorCategory.THIRD_PARTY
|
|
1870
2372
|
},
|
|
1871
2373
|
error$1
|
|
1872
2374
|
);
|
|
1873
2375
|
}
|
|
1874
2376
|
}
|
|
1875
|
-
async
|
|
1876
|
-
|
|
1877
|
-
pagination
|
|
2377
|
+
async updateMessages({
|
|
2378
|
+
messages
|
|
1878
2379
|
}) {
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
const
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
2380
|
+
if (messages.length === 0) {
|
|
2381
|
+
return [];
|
|
2382
|
+
}
|
|
2383
|
+
const messageIds = messages.map((m) => m.id);
|
|
2384
|
+
const placeholders = messageIds.map(() => "?").join(",");
|
|
2385
|
+
const selectSql = `SELECT * FROM ${storage.TABLE_MESSAGES} WHERE id IN (${placeholders})`;
|
|
2386
|
+
const existingResult = await this.#client.execute({ sql: selectSql, args: messageIds });
|
|
2387
|
+
const existingMessages = existingResult.rows.map((row) => this.parseRow(row));
|
|
2388
|
+
if (existingMessages.length === 0) {
|
|
2389
|
+
return [];
|
|
2390
|
+
}
|
|
2391
|
+
const batchStatements = [];
|
|
2392
|
+
const threadIdsToUpdate = /* @__PURE__ */ new Set();
|
|
2393
|
+
const columnMapping = {
|
|
2394
|
+
threadId: "thread_id"
|
|
1886
2395
|
};
|
|
1887
|
-
const
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
const
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
name = `agent run: '${entityId}'`;
|
|
1896
|
-
} else {
|
|
1897
|
-
const error$1 = new error.MastraError({
|
|
1898
|
-
id: "LIBSQL_STORE_GET_TRACES_PAGINATED_FAILED",
|
|
1899
|
-
domain: error.ErrorDomain.STORAGE,
|
|
1900
|
-
category: error.ErrorCategory.USER,
|
|
1901
|
-
details: {
|
|
1902
|
-
entityType
|
|
1903
|
-
},
|
|
1904
|
-
text: `Cannot filter by entity type: ${entityType}`
|
|
1905
|
-
});
|
|
1906
|
-
this.logger?.trackException(error$1);
|
|
1907
|
-
throw error$1;
|
|
2396
|
+
for (const existingMessage of existingMessages) {
|
|
2397
|
+
const updatePayload = messages.find((m) => m.id === existingMessage.id);
|
|
2398
|
+
if (!updatePayload) continue;
|
|
2399
|
+
const { id, ...fieldsToUpdate } = updatePayload;
|
|
2400
|
+
if (Object.keys(fieldsToUpdate).length === 0) continue;
|
|
2401
|
+
threadIdsToUpdate.add(existingMessage.threadId);
|
|
2402
|
+
if (updatePayload.threadId && updatePayload.threadId !== existingMessage.threadId) {
|
|
2403
|
+
threadIdsToUpdate.add(updatePayload.threadId);
|
|
1908
2404
|
}
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
2405
|
+
const setClauses = [];
|
|
2406
|
+
const args = [];
|
|
2407
|
+
const updatableFields = { ...fieldsToUpdate };
|
|
2408
|
+
if (updatableFields.content) {
|
|
2409
|
+
const newContent = {
|
|
2410
|
+
...existingMessage.content,
|
|
2411
|
+
...updatableFields.content,
|
|
2412
|
+
// Deep merge metadata if it exists on both
|
|
2413
|
+
...existingMessage.content?.metadata && updatableFields.content.metadata ? {
|
|
2414
|
+
metadata: {
|
|
2415
|
+
...existingMessage.content.metadata,
|
|
2416
|
+
...updatableFields.content.metadata
|
|
2417
|
+
}
|
|
2418
|
+
} : {}
|
|
2419
|
+
};
|
|
2420
|
+
setClauses.push(`${utils.parseSqlIdentifier("content", "column name")} = ?`);
|
|
2421
|
+
args.push(JSON.stringify(newContent));
|
|
2422
|
+
delete updatableFields.content;
|
|
2423
|
+
}
|
|
2424
|
+
for (const key in updatableFields) {
|
|
2425
|
+
if (Object.prototype.hasOwnProperty.call(updatableFields, key)) {
|
|
2426
|
+
const dbKey = columnMapping[key] || key;
|
|
2427
|
+
setClauses.push(`${utils.parseSqlIdentifier(dbKey, "column name")} = ?`);
|
|
2428
|
+
let value = updatableFields[key];
|
|
2429
|
+
if (typeof value === "object" && value !== null) {
|
|
2430
|
+
value = JSON.stringify(value);
|
|
2431
|
+
}
|
|
2432
|
+
args.push(value);
|
|
2433
|
+
}
|
|
2434
|
+
}
|
|
2435
|
+
if (setClauses.length === 0) continue;
|
|
2436
|
+
args.push(id);
|
|
2437
|
+
const sql = `UPDATE ${storage.TABLE_MESSAGES} SET ${setClauses.join(", ")} WHERE id = ?`;
|
|
2438
|
+
batchStatements.push({ sql, args });
|
|
2439
|
+
}
|
|
2440
|
+
if (batchStatements.length === 0) {
|
|
2441
|
+
return existingMessages;
|
|
2442
|
+
}
|
|
2443
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2444
|
+
for (const threadId of threadIdsToUpdate) {
|
|
2445
|
+
if (threadId) {
|
|
2446
|
+
batchStatements.push({
|
|
2447
|
+
sql: `UPDATE ${storage.TABLE_THREADS} SET updatedAt = ? WHERE id = ?`,
|
|
2448
|
+
args: [now, threadId]
|
|
2449
|
+
});
|
|
1914
2450
|
}
|
|
1915
2451
|
}
|
|
1916
|
-
|
|
1917
|
-
|
|
2452
|
+
await this.#client.batch(batchStatements, "write");
|
|
2453
|
+
const updatedResult = await this.#client.execute({ sql: selectSql, args: messageIds });
|
|
2454
|
+
return updatedResult.rows.map((row) => this.parseRow(row));
|
|
2455
|
+
}
|
|
2456
|
+
async deleteMessages(messageIds) {
|
|
2457
|
+
if (!messageIds || messageIds.length === 0) {
|
|
2458
|
+
return;
|
|
2459
|
+
}
|
|
1918
2460
|
try {
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
2461
|
+
const BATCH_SIZE = 100;
|
|
2462
|
+
const threadIds = /* @__PURE__ */ new Set();
|
|
2463
|
+
const tx = await this.#client.transaction("write");
|
|
2464
|
+
try {
|
|
2465
|
+
for (let i = 0; i < messageIds.length; i += BATCH_SIZE) {
|
|
2466
|
+
const batch = messageIds.slice(i, i + BATCH_SIZE);
|
|
2467
|
+
const placeholders = batch.map(() => "?").join(",");
|
|
2468
|
+
const result = await tx.execute({
|
|
2469
|
+
sql: `SELECT DISTINCT thread_id FROM "${storage.TABLE_MESSAGES}" WHERE id IN (${placeholders})`,
|
|
2470
|
+
args: batch
|
|
2471
|
+
});
|
|
2472
|
+
result.rows?.forEach((row) => {
|
|
2473
|
+
if (row.thread_id) threadIds.add(row.thread_id);
|
|
2474
|
+
});
|
|
2475
|
+
await tx.execute({
|
|
2476
|
+
sql: `DELETE FROM "${storage.TABLE_MESSAGES}" WHERE id IN (${placeholders})`,
|
|
2477
|
+
args: batch
|
|
2478
|
+
});
|
|
2479
|
+
}
|
|
2480
|
+
if (threadIds.size > 0) {
|
|
2481
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2482
|
+
for (const threadId of threadIds) {
|
|
2483
|
+
await tx.execute({
|
|
2484
|
+
sql: `UPDATE "${storage.TABLE_THREADS}" SET "updatedAt" = ? WHERE id = ?`,
|
|
2485
|
+
args: [now, threadId]
|
|
2486
|
+
});
|
|
2487
|
+
}
|
|
2488
|
+
}
|
|
2489
|
+
await tx.commit();
|
|
2490
|
+
} catch (error) {
|
|
2491
|
+
await tx.rollback();
|
|
2492
|
+
throw error;
|
|
2493
|
+
}
|
|
1923
2494
|
} catch (error$1) {
|
|
1924
2495
|
throw new error.MastraError(
|
|
1925
2496
|
{
|
|
1926
|
-
id: "
|
|
2497
|
+
id: storage.createStorageErrorId("LIBSQL", "DELETE_MESSAGES", "FAILED"),
|
|
1927
2498
|
domain: error.ErrorDomain.STORAGE,
|
|
1928
|
-
category: error.ErrorCategory.
|
|
2499
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2500
|
+
details: { messageIds: messageIds.join(", ") }
|
|
1929
2501
|
},
|
|
1930
2502
|
error$1
|
|
1931
2503
|
);
|
|
1932
2504
|
}
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
2505
|
+
}
|
|
2506
|
+
async getResourceById({ resourceId }) {
|
|
2507
|
+
const result = await this.#db.select({
|
|
2508
|
+
tableName: storage.TABLE_RESOURCES,
|
|
2509
|
+
keys: { id: resourceId }
|
|
2510
|
+
});
|
|
2511
|
+
if (!result) {
|
|
2512
|
+
return null;
|
|
2513
|
+
}
|
|
2514
|
+
return {
|
|
2515
|
+
...result,
|
|
2516
|
+
// Ensure workingMemory is always returned as a string, even if auto-parsed as JSON
|
|
2517
|
+
workingMemory: result.workingMemory && typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
|
|
2518
|
+
metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata,
|
|
2519
|
+
createdAt: new Date(result.createdAt),
|
|
2520
|
+
updatedAt: new Date(result.updatedAt)
|
|
2521
|
+
};
|
|
2522
|
+
}
|
|
2523
|
+
async saveResource({ resource }) {
|
|
2524
|
+
await this.#db.insert({
|
|
2525
|
+
tableName: storage.TABLE_RESOURCES,
|
|
2526
|
+
record: {
|
|
2527
|
+
...resource,
|
|
2528
|
+
metadata: JSON.stringify(resource.metadata)
|
|
2529
|
+
}
|
|
2530
|
+
});
|
|
2531
|
+
return resource;
|
|
2532
|
+
}
|
|
2533
|
+
async updateResource({
|
|
2534
|
+
resourceId,
|
|
2535
|
+
workingMemory,
|
|
2536
|
+
metadata
|
|
2537
|
+
}) {
|
|
2538
|
+
const existingResource = await this.getResourceById({ resourceId });
|
|
2539
|
+
if (!existingResource) {
|
|
2540
|
+
const newResource = {
|
|
2541
|
+
id: resourceId,
|
|
2542
|
+
workingMemory,
|
|
2543
|
+
metadata: metadata || {},
|
|
2544
|
+
createdAt: /* @__PURE__ */ new Date(),
|
|
2545
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1942
2546
|
};
|
|
2547
|
+
return this.saveResource({ resource: newResource });
|
|
2548
|
+
}
|
|
2549
|
+
const updatedResource = {
|
|
2550
|
+
...existingResource,
|
|
2551
|
+
workingMemory: workingMemory !== void 0 ? workingMemory : existingResource.workingMemory,
|
|
2552
|
+
metadata: {
|
|
2553
|
+
...existingResource.metadata,
|
|
2554
|
+
...metadata
|
|
2555
|
+
},
|
|
2556
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
2557
|
+
};
|
|
2558
|
+
const updates = [];
|
|
2559
|
+
const values = [];
|
|
2560
|
+
if (workingMemory !== void 0) {
|
|
2561
|
+
updates.push("workingMemory = ?");
|
|
2562
|
+
values.push(workingMemory);
|
|
2563
|
+
}
|
|
2564
|
+
if (metadata) {
|
|
2565
|
+
updates.push("metadata = ?");
|
|
2566
|
+
values.push(JSON.stringify(updatedResource.metadata));
|
|
1943
2567
|
}
|
|
2568
|
+
updates.push("updatedAt = ?");
|
|
2569
|
+
values.push(updatedResource.updatedAt.toISOString());
|
|
2570
|
+
values.push(resourceId);
|
|
2571
|
+
await this.#client.execute({
|
|
2572
|
+
sql: `UPDATE ${storage.TABLE_RESOURCES} SET ${updates.join(", ")} WHERE id = ?`,
|
|
2573
|
+
args: values
|
|
2574
|
+
});
|
|
2575
|
+
return updatedResource;
|
|
2576
|
+
}
|
|
2577
|
+
async getThreadById({ threadId }) {
|
|
1944
2578
|
try {
|
|
1945
|
-
const
|
|
1946
|
-
tableName: storage.
|
|
1947
|
-
|
|
1948
|
-
sql: actualWhereClause,
|
|
1949
|
-
args: whereClause.args
|
|
1950
|
-
},
|
|
1951
|
-
orderBy,
|
|
1952
|
-
offset: page * perPage,
|
|
1953
|
-
limit: perPage
|
|
2579
|
+
const result = await this.#db.select({
|
|
2580
|
+
tableName: storage.TABLE_THREADS,
|
|
2581
|
+
keys: { id: threadId }
|
|
1954
2582
|
});
|
|
2583
|
+
if (!result) {
|
|
2584
|
+
return null;
|
|
2585
|
+
}
|
|
1955
2586
|
return {
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
hasMore: spans.length === perPage
|
|
1961
|
-
},
|
|
1962
|
-
spans: spans.map((span) => transformFromSqlRow({ tableName: storage.TABLE_SPANS, sqlRow: span }))
|
|
2587
|
+
...result,
|
|
2588
|
+
metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata,
|
|
2589
|
+
createdAt: new Date(result.createdAt),
|
|
2590
|
+
updatedAt: new Date(result.updatedAt)
|
|
1963
2591
|
};
|
|
1964
2592
|
} catch (error$1) {
|
|
1965
2593
|
throw new error.MastraError(
|
|
1966
2594
|
{
|
|
1967
|
-
id: "
|
|
2595
|
+
id: storage.createStorageErrorId("LIBSQL", "GET_THREAD_BY_ID", "FAILED"),
|
|
1968
2596
|
domain: error.ErrorDomain.STORAGE,
|
|
1969
|
-
category: error.ErrorCategory.
|
|
2597
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2598
|
+
details: { threadId }
|
|
1970
2599
|
},
|
|
1971
2600
|
error$1
|
|
1972
2601
|
);
|
|
1973
2602
|
}
|
|
1974
2603
|
}
|
|
1975
|
-
async
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
return this.operations.batchInsert({
|
|
1979
|
-
tableName: storage.TABLE_SPANS,
|
|
1980
|
-
records: args.records.map((record) => ({
|
|
1981
|
-
...record,
|
|
1982
|
-
createdAt: now,
|
|
1983
|
-
updatedAt: now
|
|
1984
|
-
}))
|
|
1985
|
-
});
|
|
1986
|
-
} catch (error$1) {
|
|
2604
|
+
async listThreadsByResourceId(args) {
|
|
2605
|
+
const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
|
|
2606
|
+
if (page < 0) {
|
|
1987
2607
|
throw new error.MastraError(
|
|
1988
2608
|
{
|
|
1989
|
-
id: "
|
|
2609
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
|
|
1990
2610
|
domain: error.ErrorDomain.STORAGE,
|
|
1991
|
-
category: error.ErrorCategory.USER
|
|
2611
|
+
category: error.ErrorCategory.USER,
|
|
2612
|
+
details: { page }
|
|
1992
2613
|
},
|
|
1993
|
-
|
|
2614
|
+
new Error("page must be >= 0")
|
|
1994
2615
|
);
|
|
1995
2616
|
}
|
|
1996
|
-
|
|
1997
|
-
|
|
2617
|
+
const perPage = storage.normalizePerPage(perPageInput, 100);
|
|
2618
|
+
const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
2619
|
+
const { field, direction } = this.parseOrderBy(orderBy);
|
|
1998
2620
|
try {
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2621
|
+
const baseQuery = `FROM ${storage.TABLE_THREADS} WHERE resourceId = ?`;
|
|
2622
|
+
const queryParams = [resourceId];
|
|
2623
|
+
const mapRowToStorageThreadType = (row) => ({
|
|
2624
|
+
id: row.id,
|
|
2625
|
+
resourceId: row.resourceId,
|
|
2626
|
+
title: row.title,
|
|
2627
|
+
createdAt: new Date(row.createdAt),
|
|
2628
|
+
// Convert string to Date
|
|
2629
|
+
updatedAt: new Date(row.updatedAt),
|
|
2630
|
+
// Convert string to Date
|
|
2631
|
+
metadata: typeof row.metadata === "string" ? JSON.parse(row.metadata) : row.metadata
|
|
2632
|
+
});
|
|
2633
|
+
const countResult = await this.#client.execute({
|
|
2634
|
+
sql: `SELECT COUNT(*) as count ${baseQuery}`,
|
|
2635
|
+
args: queryParams
|
|
2636
|
+
});
|
|
2637
|
+
const total = Number(countResult.rows?.[0]?.count ?? 0);
|
|
2638
|
+
if (total === 0) {
|
|
2639
|
+
return {
|
|
2640
|
+
threads: [],
|
|
2641
|
+
total: 0,
|
|
2642
|
+
page,
|
|
2643
|
+
perPage: perPageForResponse,
|
|
2644
|
+
hasMore: false
|
|
2645
|
+
};
|
|
2646
|
+
}
|
|
2647
|
+
const limitValue = perPageInput === false ? total : perPage;
|
|
2648
|
+
const dataResult = await this.#client.execute({
|
|
2649
|
+
sql: `SELECT * ${baseQuery} ORDER BY "${field}" ${direction} LIMIT ? OFFSET ?`,
|
|
2650
|
+
args: [...queryParams, limitValue, offset]
|
|
2005
2651
|
});
|
|
2652
|
+
const threads = (dataResult.rows || []).map(mapRowToStorageThreadType);
|
|
2653
|
+
return {
|
|
2654
|
+
threads,
|
|
2655
|
+
total,
|
|
2656
|
+
page,
|
|
2657
|
+
perPage: perPageForResponse,
|
|
2658
|
+
hasMore: perPageInput === false ? false : offset + perPage < total
|
|
2659
|
+
};
|
|
2006
2660
|
} catch (error$1) {
|
|
2007
|
-
|
|
2661
|
+
const mastraError = new error.MastraError(
|
|
2008
2662
|
{
|
|
2009
|
-
id: "
|
|
2663
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
|
|
2010
2664
|
domain: error.ErrorDomain.STORAGE,
|
|
2011
|
-
category: error.ErrorCategory.
|
|
2665
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2666
|
+
details: { resourceId }
|
|
2012
2667
|
},
|
|
2013
2668
|
error$1
|
|
2014
2669
|
);
|
|
2670
|
+
this.logger?.trackException?.(mastraError);
|
|
2671
|
+
this.logger?.error?.(mastraError.toString());
|
|
2672
|
+
return {
|
|
2673
|
+
threads: [],
|
|
2674
|
+
total: 0,
|
|
2675
|
+
page,
|
|
2676
|
+
perPage: perPageForResponse,
|
|
2677
|
+
hasMore: false
|
|
2678
|
+
};
|
|
2015
2679
|
}
|
|
2016
2680
|
}
|
|
2017
|
-
async
|
|
2681
|
+
async saveThread({ thread }) {
|
|
2018
2682
|
try {
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2683
|
+
await this.#db.insert({
|
|
2684
|
+
tableName: storage.TABLE_THREADS,
|
|
2685
|
+
record: {
|
|
2686
|
+
...thread,
|
|
2687
|
+
metadata: JSON.stringify(thread.metadata)
|
|
2688
|
+
}
|
|
2023
2689
|
});
|
|
2690
|
+
return thread;
|
|
2024
2691
|
} catch (error$1) {
|
|
2025
|
-
|
|
2692
|
+
const mastraError = new error.MastraError(
|
|
2026
2693
|
{
|
|
2027
|
-
id: "
|
|
2694
|
+
id: storage.createStorageErrorId("LIBSQL", "SAVE_THREAD", "FAILED"),
|
|
2028
2695
|
domain: error.ErrorDomain.STORAGE,
|
|
2029
|
-
category: error.ErrorCategory.
|
|
2696
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2697
|
+
details: { threadId: thread.id }
|
|
2030
2698
|
},
|
|
2031
2699
|
error$1
|
|
2032
2700
|
);
|
|
2701
|
+
this.logger?.trackException?.(mastraError);
|
|
2702
|
+
this.logger?.error?.(mastraError.toString());
|
|
2703
|
+
throw mastraError;
|
|
2033
2704
|
}
|
|
2034
2705
|
}
|
|
2035
|
-
|
|
2036
|
-
|
|
2037
|
-
|
|
2038
|
-
|
|
2039
|
-
* Maximum number of retries for write operations if an SQLITE_BUSY error occurs.
|
|
2040
|
-
* @default 5
|
|
2041
|
-
*/
|
|
2042
|
-
maxRetries;
|
|
2043
|
-
/**
|
|
2044
|
-
* Initial backoff time in milliseconds for retrying write operations on SQLITE_BUSY.
|
|
2045
|
-
* The backoff time will double with each retry (exponential backoff).
|
|
2046
|
-
* @default 100
|
|
2047
|
-
*/
|
|
2048
|
-
initialBackoffMs;
|
|
2049
|
-
constructor({
|
|
2050
|
-
client,
|
|
2051
|
-
maxRetries,
|
|
2052
|
-
initialBackoffMs
|
|
2053
|
-
}) {
|
|
2054
|
-
super();
|
|
2055
|
-
this.client = client;
|
|
2056
|
-
this.maxRetries = maxRetries ?? 5;
|
|
2057
|
-
this.initialBackoffMs = initialBackoffMs ?? 100;
|
|
2058
|
-
}
|
|
2059
|
-
async hasColumn(table, column) {
|
|
2060
|
-
const result = await this.client.execute({
|
|
2061
|
-
sql: `PRAGMA table_info(${table})`
|
|
2062
|
-
});
|
|
2063
|
-
return (await result.rows)?.some((row) => row.name === column);
|
|
2064
|
-
}
|
|
2065
|
-
getCreateTableSQL(tableName, schema) {
|
|
2066
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
2067
|
-
const columns = Object.entries(schema).map(([name, col]) => {
|
|
2068
|
-
const parsedColumnName = utils.parseSqlIdentifier(name, "column name");
|
|
2069
|
-
let type = col.type.toUpperCase();
|
|
2070
|
-
if (type === "TEXT") type = "TEXT";
|
|
2071
|
-
if (type === "TIMESTAMP") type = "TEXT";
|
|
2072
|
-
const nullable = col.nullable ? "" : "NOT NULL";
|
|
2073
|
-
const primaryKey = col.primaryKey ? "PRIMARY KEY" : "";
|
|
2074
|
-
return `${parsedColumnName} ${type} ${nullable} ${primaryKey}`.trim();
|
|
2075
|
-
});
|
|
2076
|
-
if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
|
|
2077
|
-
const stmnt = `CREATE TABLE IF NOT EXISTS ${parsedTableName} (
|
|
2078
|
-
${columns.join(",\n")},
|
|
2079
|
-
PRIMARY KEY (workflow_name, run_id)
|
|
2080
|
-
)`;
|
|
2081
|
-
return stmnt;
|
|
2082
|
-
}
|
|
2083
|
-
if (tableName === storage.TABLE_SPANS) {
|
|
2084
|
-
const stmnt = `CREATE TABLE IF NOT EXISTS ${parsedTableName} (
|
|
2085
|
-
${columns.join(",\n")},
|
|
2086
|
-
PRIMARY KEY (traceId, spanId)
|
|
2087
|
-
)`;
|
|
2088
|
-
return stmnt;
|
|
2089
|
-
}
|
|
2090
|
-
return `CREATE TABLE IF NOT EXISTS ${parsedTableName} (${columns.join(", ")})`;
|
|
2091
|
-
}
|
|
2092
|
-
async createTable({
|
|
2093
|
-
tableName,
|
|
2094
|
-
schema
|
|
2706
|
+
async updateThread({
|
|
2707
|
+
id,
|
|
2708
|
+
title,
|
|
2709
|
+
metadata
|
|
2095
2710
|
}) {
|
|
2711
|
+
const thread = await this.getThreadById({ threadId: id });
|
|
2712
|
+
if (!thread) {
|
|
2713
|
+
throw new error.MastraError({
|
|
2714
|
+
id: storage.createStorageErrorId("LIBSQL", "UPDATE_THREAD", "NOT_FOUND"),
|
|
2715
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2716
|
+
category: error.ErrorCategory.USER,
|
|
2717
|
+
text: `Thread ${id} not found`,
|
|
2718
|
+
details: {
|
|
2719
|
+
status: 404,
|
|
2720
|
+
threadId: id
|
|
2721
|
+
}
|
|
2722
|
+
});
|
|
2723
|
+
}
|
|
2724
|
+
const updatedThread = {
|
|
2725
|
+
...thread,
|
|
2726
|
+
title,
|
|
2727
|
+
metadata: {
|
|
2728
|
+
...thread.metadata,
|
|
2729
|
+
...metadata
|
|
2730
|
+
}
|
|
2731
|
+
};
|
|
2096
2732
|
try {
|
|
2097
|
-
this.
|
|
2098
|
-
|
|
2099
|
-
|
|
2733
|
+
await this.#client.execute({
|
|
2734
|
+
sql: `UPDATE ${storage.TABLE_THREADS} SET title = ?, metadata = ? WHERE id = ?`,
|
|
2735
|
+
args: [title, JSON.stringify(updatedThread.metadata), id]
|
|
2736
|
+
});
|
|
2737
|
+
return updatedThread;
|
|
2100
2738
|
} catch (error$1) {
|
|
2101
2739
|
throw new error.MastraError(
|
|
2102
2740
|
{
|
|
2103
|
-
id: "
|
|
2741
|
+
id: storage.createStorageErrorId("LIBSQL", "UPDATE_THREAD", "FAILED"),
|
|
2104
2742
|
domain: error.ErrorDomain.STORAGE,
|
|
2105
2743
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
}
|
|
2744
|
+
text: `Failed to update thread ${id}`,
|
|
2745
|
+
details: { threadId: id }
|
|
2109
2746
|
},
|
|
2110
2747
|
error$1
|
|
2111
|
-
);
|
|
2112
|
-
}
|
|
2113
|
-
}
|
|
2114
|
-
getSqlType(type) {
|
|
2115
|
-
switch (type) {
|
|
2116
|
-
case "bigint":
|
|
2117
|
-
return "INTEGER";
|
|
2118
|
-
// SQLite uses INTEGER for all integer sizes
|
|
2119
|
-
case "jsonb":
|
|
2120
|
-
return "TEXT";
|
|
2121
|
-
// Store JSON as TEXT in SQLite
|
|
2122
|
-
default:
|
|
2123
|
-
return super.getSqlType(type);
|
|
2124
|
-
}
|
|
2125
|
-
}
|
|
2126
|
-
async doInsert({
|
|
2127
|
-
tableName,
|
|
2128
|
-
record
|
|
2129
|
-
}) {
|
|
2130
|
-
await this.client.execute(
|
|
2131
|
-
prepareStatement({
|
|
2132
|
-
tableName,
|
|
2133
|
-
record
|
|
2134
|
-
})
|
|
2135
|
-
);
|
|
2136
|
-
}
|
|
2137
|
-
insert(args) {
|
|
2138
|
-
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
2139
|
-
logger: this.logger,
|
|
2140
|
-
maxRetries: this.maxRetries,
|
|
2141
|
-
initialBackoffMs: this.initialBackoffMs
|
|
2142
|
-
});
|
|
2143
|
-
return executeWriteOperationWithRetry(() => this.doInsert(args), `insert into table ${args.tableName}`);
|
|
2144
|
-
}
|
|
2145
|
-
async load({ tableName, keys }) {
|
|
2146
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
2147
|
-
const parsedKeys = Object.keys(keys).map((key) => utils.parseSqlIdentifier(key, "column name"));
|
|
2148
|
-
const conditions = parsedKeys.map((key) => `${key} = ?`).join(" AND ");
|
|
2149
|
-
const values = Object.values(keys);
|
|
2150
|
-
const result = await this.client.execute({
|
|
2151
|
-
sql: `SELECT * FROM ${parsedTableName} WHERE ${conditions} ORDER BY createdAt DESC LIMIT 1`,
|
|
2152
|
-
args: values
|
|
2153
|
-
});
|
|
2154
|
-
if (!result.rows || result.rows.length === 0) {
|
|
2155
|
-
return null;
|
|
2156
|
-
}
|
|
2157
|
-
const row = result.rows[0];
|
|
2158
|
-
const parsed = Object.fromEntries(
|
|
2159
|
-
Object.entries(row || {}).map(([k, v]) => {
|
|
2160
|
-
try {
|
|
2161
|
-
return [k, typeof v === "string" ? v.startsWith("{") || v.startsWith("[") ? JSON.parse(v) : v : v];
|
|
2162
|
-
} catch {
|
|
2163
|
-
return [k, v];
|
|
2164
|
-
}
|
|
2165
|
-
})
|
|
2166
|
-
);
|
|
2167
|
-
return parsed;
|
|
2168
|
-
}
|
|
2169
|
-
async loadMany({
|
|
2170
|
-
tableName,
|
|
2171
|
-
whereClause,
|
|
2172
|
-
orderBy,
|
|
2173
|
-
offset,
|
|
2174
|
-
limit,
|
|
2175
|
-
args
|
|
2176
|
-
}) {
|
|
2177
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
2178
|
-
let statement = `SELECT * FROM ${parsedTableName}`;
|
|
2179
|
-
if (whereClause?.sql) {
|
|
2180
|
-
statement += `${whereClause.sql}`;
|
|
2181
|
-
}
|
|
2182
|
-
if (orderBy) {
|
|
2183
|
-
statement += ` ORDER BY ${orderBy}`;
|
|
2184
|
-
}
|
|
2185
|
-
if (limit) {
|
|
2186
|
-
statement += ` LIMIT ${limit}`;
|
|
2187
|
-
}
|
|
2188
|
-
if (offset) {
|
|
2189
|
-
statement += ` OFFSET ${offset}`;
|
|
2748
|
+
);
|
|
2190
2749
|
}
|
|
2191
|
-
const result = await this.client.execute({
|
|
2192
|
-
sql: statement,
|
|
2193
|
-
args: [...whereClause?.args ?? [], ...args ?? []]
|
|
2194
|
-
});
|
|
2195
|
-
return result.rows;
|
|
2196
2750
|
}
|
|
2197
|
-
async
|
|
2198
|
-
|
|
2199
|
-
|
|
2200
|
-
|
|
2201
|
-
|
|
2202
|
-
|
|
2203
|
-
|
|
2204
|
-
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2751
|
+
async deleteThread({ threadId }) {
|
|
2752
|
+
try {
|
|
2753
|
+
await this.#client.execute({
|
|
2754
|
+
sql: `DELETE FROM ${storage.TABLE_MESSAGES} WHERE thread_id = ?`,
|
|
2755
|
+
args: [threadId]
|
|
2756
|
+
});
|
|
2757
|
+
await this.#client.execute({
|
|
2758
|
+
sql: `DELETE FROM ${storage.TABLE_THREADS} WHERE id = ?`,
|
|
2759
|
+
args: [threadId]
|
|
2760
|
+
});
|
|
2761
|
+
} catch (error$1) {
|
|
2762
|
+
throw new error.MastraError(
|
|
2763
|
+
{
|
|
2764
|
+
id: storage.createStorageErrorId("LIBSQL", "DELETE_THREAD", "FAILED"),
|
|
2765
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2766
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
2767
|
+
details: { threadId }
|
|
2768
|
+
},
|
|
2769
|
+
error$1
|
|
2770
|
+
);
|
|
2209
2771
|
}
|
|
2210
|
-
return result.rows[0]?.count ?? 0;
|
|
2211
2772
|
}
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2773
|
+
};
|
|
2774
|
+
var ObservabilityLibSQL = class extends storage.ObservabilityStorage {
|
|
2775
|
+
#db;
|
|
2776
|
+
constructor(config) {
|
|
2777
|
+
super();
|
|
2778
|
+
const client = resolveClient(config);
|
|
2779
|
+
this.#db = new LibSQLDB({ client, maxRetries: config.maxRetries, initialBackoffMs: config.initialBackoffMs });
|
|
2219
2780
|
}
|
|
2220
|
-
async
|
|
2221
|
-
tableName,
|
|
2222
|
-
keys,
|
|
2223
|
-
data
|
|
2224
|
-
}) {
|
|
2225
|
-
await this.client.execute(prepareUpdateStatement({ tableName, updates: data, keys }));
|
|
2781
|
+
async init() {
|
|
2782
|
+
await this.#db.createTable({ tableName: storage.TABLE_SPANS, schema: storage.SPAN_SCHEMA });
|
|
2226
2783
|
}
|
|
2227
|
-
async
|
|
2228
|
-
tableName
|
|
2229
|
-
records
|
|
2230
|
-
}) {
|
|
2231
|
-
if (records.length === 0) return;
|
|
2232
|
-
const batchStatements = records.map((r) => prepareStatement({ tableName, record: r }));
|
|
2233
|
-
await this.client.batch(batchStatements, "write");
|
|
2784
|
+
async dangerouslyClearAll() {
|
|
2785
|
+
await this.#db.deleteData({ tableName: storage.TABLE_SPANS });
|
|
2234
2786
|
}
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
|
|
2787
|
+
get tracingStrategy() {
|
|
2788
|
+
return {
|
|
2789
|
+
preferred: "batch-with-updates",
|
|
2790
|
+
supported: ["batch-with-updates", "insert-only"]
|
|
2791
|
+
};
|
|
2792
|
+
}
|
|
2793
|
+
async createSpan(args) {
|
|
2794
|
+
const { span } = args;
|
|
2795
|
+
try {
|
|
2796
|
+
const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
|
|
2797
|
+
const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
|
|
2798
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2799
|
+
const record = {
|
|
2800
|
+
...span,
|
|
2801
|
+
startedAt,
|
|
2802
|
+
endedAt,
|
|
2803
|
+
createdAt: now,
|
|
2804
|
+
updatedAt: now
|
|
2805
|
+
};
|
|
2806
|
+
return this.#db.insert({ tableName: storage.TABLE_SPANS, record });
|
|
2807
|
+
} catch (error$1) {
|
|
2245
2808
|
throw new error.MastraError(
|
|
2246
2809
|
{
|
|
2247
|
-
id: "
|
|
2810
|
+
id: storage.createStorageErrorId("LIBSQL", "CREATE_SPAN", "FAILED"),
|
|
2248
2811
|
domain: error.ErrorDomain.STORAGE,
|
|
2249
|
-
category: error.ErrorCategory.
|
|
2812
|
+
category: error.ErrorCategory.USER,
|
|
2250
2813
|
details: {
|
|
2251
|
-
|
|
2814
|
+
spanId: span.spanId,
|
|
2815
|
+
traceId: span.traceId,
|
|
2816
|
+
spanType: span.spanType,
|
|
2817
|
+
name: span.name
|
|
2252
2818
|
}
|
|
2253
2819
|
},
|
|
2254
2820
|
error$1
|
|
2255
2821
|
);
|
|
2256
|
-
}
|
|
2822
|
+
}
|
|
2257
2823
|
}
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2824
|
+
async getSpan(args) {
|
|
2825
|
+
const { traceId, spanId } = args;
|
|
2826
|
+
try {
|
|
2827
|
+
const rows = await this.#db.selectMany({
|
|
2828
|
+
tableName: storage.TABLE_SPANS,
|
|
2829
|
+
whereClause: { sql: " WHERE traceId = ? AND spanId = ?", args: [traceId, spanId] },
|
|
2830
|
+
limit: 1
|
|
2831
|
+
});
|
|
2832
|
+
if (!rows || rows.length === 0) {
|
|
2833
|
+
return null;
|
|
2834
|
+
}
|
|
2835
|
+
return {
|
|
2836
|
+
span: transformFromSqlRow({ tableName: storage.TABLE_SPANS, sqlRow: rows[0] })
|
|
2837
|
+
};
|
|
2838
|
+
} catch (error$1) {
|
|
2271
2839
|
throw new error.MastraError(
|
|
2272
2840
|
{
|
|
2273
|
-
id: "
|
|
2841
|
+
id: storage.createStorageErrorId("LIBSQL", "GET_SPAN", "FAILED"),
|
|
2274
2842
|
domain: error.ErrorDomain.STORAGE,
|
|
2275
|
-
category: error.ErrorCategory.
|
|
2276
|
-
details: {
|
|
2277
|
-
tableName: args.tableName
|
|
2278
|
-
}
|
|
2843
|
+
category: error.ErrorCategory.USER,
|
|
2844
|
+
details: { traceId, spanId }
|
|
2279
2845
|
},
|
|
2280
2846
|
error$1
|
|
2281
2847
|
);
|
|
2282
|
-
}
|
|
2848
|
+
}
|
|
2283
2849
|
}
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
|
-
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
|
|
2294
|
-
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
}
|
|
2298
|
-
)
|
|
2299
|
-
|
|
2850
|
+
async getRootSpan(args) {
|
|
2851
|
+
const { traceId } = args;
|
|
2852
|
+
try {
|
|
2853
|
+
const rows = await this.#db.selectMany({
|
|
2854
|
+
tableName: storage.TABLE_SPANS,
|
|
2855
|
+
whereClause: { sql: " WHERE traceId = ? AND parentSpanId IS NULL", args: [traceId] },
|
|
2856
|
+
limit: 1
|
|
2857
|
+
});
|
|
2858
|
+
if (!rows || rows.length === 0) {
|
|
2859
|
+
return null;
|
|
2860
|
+
}
|
|
2861
|
+
return {
|
|
2862
|
+
span: transformFromSqlRow({ tableName: storage.TABLE_SPANS, sqlRow: rows[0] })
|
|
2863
|
+
};
|
|
2864
|
+
} catch (error$1) {
|
|
2865
|
+
throw new error.MastraError(
|
|
2866
|
+
{
|
|
2867
|
+
id: storage.createStorageErrorId("LIBSQL", "GET_ROOT_SPAN", "FAILED"),
|
|
2868
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2869
|
+
category: error.ErrorCategory.USER,
|
|
2870
|
+
details: { traceId }
|
|
2871
|
+
},
|
|
2872
|
+
error$1
|
|
2873
|
+
);
|
|
2874
|
+
}
|
|
2300
2875
|
}
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2876
|
+
async getTrace(args) {
|
|
2877
|
+
const { traceId } = args;
|
|
2878
|
+
try {
|
|
2879
|
+
const spans = await this.#db.selectMany({
|
|
2880
|
+
tableName: storage.TABLE_SPANS,
|
|
2881
|
+
whereClause: { sql: " WHERE traceId = ?", args: [traceId] },
|
|
2882
|
+
orderBy: "startedAt ASC"
|
|
2883
|
+
});
|
|
2884
|
+
if (!spans || spans.length === 0) {
|
|
2885
|
+
return null;
|
|
2886
|
+
}
|
|
2887
|
+
return {
|
|
2888
|
+
traceId,
|
|
2889
|
+
spans: spans.map((span) => transformFromSqlRow({ tableName: storage.TABLE_SPANS, sqlRow: span }))
|
|
2890
|
+
};
|
|
2891
|
+
} catch (error$1) {
|
|
2314
2892
|
throw new error.MastraError(
|
|
2315
2893
|
{
|
|
2316
|
-
id: "
|
|
2894
|
+
id: storage.createStorageErrorId("LIBSQL", "GET_TRACE", "FAILED"),
|
|
2317
2895
|
domain: error.ErrorDomain.STORAGE,
|
|
2318
|
-
category: error.ErrorCategory.
|
|
2896
|
+
category: error.ErrorCategory.USER,
|
|
2319
2897
|
details: {
|
|
2320
|
-
|
|
2898
|
+
traceId
|
|
2321
2899
|
}
|
|
2322
2900
|
},
|
|
2323
2901
|
error$1
|
|
2324
2902
|
);
|
|
2325
|
-
}
|
|
2903
|
+
}
|
|
2326
2904
|
}
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
|
|
2905
|
+
async updateSpan(args) {
|
|
2906
|
+
const { traceId, spanId, updates } = args;
|
|
2907
|
+
try {
|
|
2908
|
+
const data = { ...updates };
|
|
2909
|
+
if (data.endedAt instanceof Date) {
|
|
2910
|
+
data.endedAt = data.endedAt.toISOString();
|
|
2911
|
+
}
|
|
2912
|
+
if (data.startedAt instanceof Date) {
|
|
2913
|
+
data.startedAt = data.startedAt.toISOString();
|
|
2914
|
+
}
|
|
2915
|
+
data.updatedAt = (/* @__PURE__ */ new Date()).toISOString();
|
|
2916
|
+
await this.#db.update({
|
|
2917
|
+
tableName: storage.TABLE_SPANS,
|
|
2918
|
+
keys: { spanId, traceId },
|
|
2919
|
+
data
|
|
2920
|
+
});
|
|
2921
|
+
} catch (error$1) {
|
|
2922
|
+
throw new error.MastraError(
|
|
2923
|
+
{
|
|
2924
|
+
id: storage.createStorageErrorId("LIBSQL", "UPDATE_SPAN", "FAILED"),
|
|
2925
|
+
domain: error.ErrorDomain.STORAGE,
|
|
2926
|
+
category: error.ErrorCategory.USER,
|
|
2927
|
+
details: {
|
|
2928
|
+
spanId,
|
|
2929
|
+
traceId
|
|
2930
|
+
}
|
|
2931
|
+
},
|
|
2932
|
+
error$1
|
|
2933
|
+
);
|
|
2934
|
+
}
|
|
2342
2935
|
}
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
|
|
2347
|
-
* @param ifNotExists Array of column names to add if they don't exist
|
|
2348
|
-
*/
|
|
2349
|
-
async alterTable({
|
|
2350
|
-
tableName,
|
|
2351
|
-
schema,
|
|
2352
|
-
ifNotExists
|
|
2353
|
-
}) {
|
|
2354
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
2936
|
+
async listTraces(args) {
|
|
2937
|
+
const { filters, pagination, orderBy } = storage.listTracesArgsSchema.parse(args);
|
|
2938
|
+
const { page, perPage } = pagination;
|
|
2939
|
+
const tableName = utils.parseSqlIdentifier(storage.TABLE_SPANS, "table name");
|
|
2355
2940
|
try {
|
|
2356
|
-
const
|
|
2357
|
-
const
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
|
|
2364
|
-
|
|
2365
|
-
|
|
2366
|
-
|
|
2367
|
-
|
|
2941
|
+
const conditions = ["parentSpanId IS NULL"];
|
|
2942
|
+
const queryArgs = [];
|
|
2943
|
+
if (filters) {
|
|
2944
|
+
if (filters.startedAt?.start) {
|
|
2945
|
+
conditions.push(`startedAt >= ?`);
|
|
2946
|
+
queryArgs.push(filters.startedAt.start.toISOString());
|
|
2947
|
+
}
|
|
2948
|
+
if (filters.startedAt?.end) {
|
|
2949
|
+
conditions.push(`startedAt <= ?`);
|
|
2950
|
+
queryArgs.push(filters.startedAt.end.toISOString());
|
|
2951
|
+
}
|
|
2952
|
+
if (filters.endedAt?.start) {
|
|
2953
|
+
conditions.push(`endedAt >= ?`);
|
|
2954
|
+
queryArgs.push(filters.endedAt.start.toISOString());
|
|
2955
|
+
}
|
|
2956
|
+
if (filters.endedAt?.end) {
|
|
2957
|
+
conditions.push(`endedAt <= ?`);
|
|
2958
|
+
queryArgs.push(filters.endedAt.end.toISOString());
|
|
2959
|
+
}
|
|
2960
|
+
if (filters.spanType !== void 0) {
|
|
2961
|
+
conditions.push(`spanType = ?`);
|
|
2962
|
+
queryArgs.push(filters.spanType);
|
|
2963
|
+
}
|
|
2964
|
+
if (filters.entityType !== void 0) {
|
|
2965
|
+
conditions.push(`entityType = ?`);
|
|
2966
|
+
queryArgs.push(filters.entityType);
|
|
2967
|
+
}
|
|
2968
|
+
if (filters.entityId !== void 0) {
|
|
2969
|
+
conditions.push(`entityId = ?`);
|
|
2970
|
+
queryArgs.push(filters.entityId);
|
|
2971
|
+
}
|
|
2972
|
+
if (filters.entityName !== void 0) {
|
|
2973
|
+
conditions.push(`entityName = ?`);
|
|
2974
|
+
queryArgs.push(filters.entityName);
|
|
2975
|
+
}
|
|
2976
|
+
if (filters.userId !== void 0) {
|
|
2977
|
+
conditions.push(`userId = ?`);
|
|
2978
|
+
queryArgs.push(filters.userId);
|
|
2979
|
+
}
|
|
2980
|
+
if (filters.organizationId !== void 0) {
|
|
2981
|
+
conditions.push(`organizationId = ?`);
|
|
2982
|
+
queryArgs.push(filters.organizationId);
|
|
2983
|
+
}
|
|
2984
|
+
if (filters.resourceId !== void 0) {
|
|
2985
|
+
conditions.push(`resourceId = ?`);
|
|
2986
|
+
queryArgs.push(filters.resourceId);
|
|
2987
|
+
}
|
|
2988
|
+
if (filters.runId !== void 0) {
|
|
2989
|
+
conditions.push(`runId = ?`);
|
|
2990
|
+
queryArgs.push(filters.runId);
|
|
2991
|
+
}
|
|
2992
|
+
if (filters.sessionId !== void 0) {
|
|
2993
|
+
conditions.push(`sessionId = ?`);
|
|
2994
|
+
queryArgs.push(filters.sessionId);
|
|
2995
|
+
}
|
|
2996
|
+
if (filters.threadId !== void 0) {
|
|
2997
|
+
conditions.push(`threadId = ?`);
|
|
2998
|
+
queryArgs.push(filters.threadId);
|
|
2999
|
+
}
|
|
3000
|
+
if (filters.requestId !== void 0) {
|
|
3001
|
+
conditions.push(`requestId = ?`);
|
|
3002
|
+
queryArgs.push(filters.requestId);
|
|
3003
|
+
}
|
|
3004
|
+
if (filters.environment !== void 0) {
|
|
3005
|
+
conditions.push(`environment = ?`);
|
|
3006
|
+
queryArgs.push(filters.environment);
|
|
3007
|
+
}
|
|
3008
|
+
if (filters.source !== void 0) {
|
|
3009
|
+
conditions.push(`source = ?`);
|
|
3010
|
+
queryArgs.push(filters.source);
|
|
3011
|
+
}
|
|
3012
|
+
if (filters.serviceName !== void 0) {
|
|
3013
|
+
conditions.push(`serviceName = ?`);
|
|
3014
|
+
queryArgs.push(filters.serviceName);
|
|
3015
|
+
}
|
|
3016
|
+
if (filters.scope != null) {
|
|
3017
|
+
for (const [key, value] of Object.entries(filters.scope)) {
|
|
3018
|
+
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(key)) {
|
|
3019
|
+
throw new error.MastraError({
|
|
3020
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_TRACES", "INVALID_FILTER_KEY"),
|
|
3021
|
+
domain: error.ErrorDomain.STORAGE,
|
|
3022
|
+
category: error.ErrorCategory.USER,
|
|
3023
|
+
details: { key }
|
|
3024
|
+
});
|
|
3025
|
+
}
|
|
3026
|
+
conditions.push(`json_extract(scope, '$.${key}') = ?`);
|
|
3027
|
+
queryArgs.push(typeof value === "string" ? value : JSON.stringify(value));
|
|
3028
|
+
}
|
|
3029
|
+
}
|
|
3030
|
+
if (filters.metadata != null) {
|
|
3031
|
+
for (const [key, value] of Object.entries(filters.metadata)) {
|
|
3032
|
+
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(key)) {
|
|
3033
|
+
throw new error.MastraError({
|
|
3034
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_TRACES", "INVALID_FILTER_KEY"),
|
|
3035
|
+
domain: error.ErrorDomain.STORAGE,
|
|
3036
|
+
category: error.ErrorCategory.USER,
|
|
3037
|
+
details: { key }
|
|
3038
|
+
});
|
|
3039
|
+
}
|
|
3040
|
+
conditions.push(`json_extract(metadata, '$.${key}') = ?`);
|
|
3041
|
+
queryArgs.push(typeof value === "string" ? value : JSON.stringify(value));
|
|
3042
|
+
}
|
|
3043
|
+
}
|
|
3044
|
+
if (filters.tags != null && filters.tags.length > 0) {
|
|
3045
|
+
for (const tag of filters.tags) {
|
|
3046
|
+
conditions.push(`EXISTS (SELECT 1 FROM json_each(${tableName}.tags) WHERE value = ?)`);
|
|
3047
|
+
queryArgs.push(tag);
|
|
3048
|
+
}
|
|
3049
|
+
}
|
|
3050
|
+
if (filters.status !== void 0) {
|
|
3051
|
+
switch (filters.status) {
|
|
3052
|
+
case storage.TraceStatus.ERROR:
|
|
3053
|
+
conditions.push(`error IS NOT NULL`);
|
|
3054
|
+
break;
|
|
3055
|
+
case storage.TraceStatus.RUNNING:
|
|
3056
|
+
conditions.push(`endedAt IS NULL AND error IS NULL`);
|
|
3057
|
+
break;
|
|
3058
|
+
case storage.TraceStatus.SUCCESS:
|
|
3059
|
+
conditions.push(`endedAt IS NOT NULL AND error IS NULL`);
|
|
3060
|
+
break;
|
|
3061
|
+
}
|
|
2368
3062
|
}
|
|
3063
|
+
if (filters.hasChildError !== void 0) {
|
|
3064
|
+
if (filters.hasChildError) {
|
|
3065
|
+
conditions.push(`EXISTS (
|
|
3066
|
+
SELECT 1 FROM ${tableName} c
|
|
3067
|
+
WHERE c.traceId = ${tableName}.traceId AND c.error IS NOT NULL
|
|
3068
|
+
)`);
|
|
3069
|
+
} else {
|
|
3070
|
+
conditions.push(`NOT EXISTS (
|
|
3071
|
+
SELECT 1 FROM ${tableName} c
|
|
3072
|
+
WHERE c.traceId = ${tableName}.traceId AND c.error IS NOT NULL
|
|
3073
|
+
)`);
|
|
3074
|
+
}
|
|
3075
|
+
}
|
|
3076
|
+
}
|
|
3077
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
3078
|
+
const sortField = orderBy.field;
|
|
3079
|
+
const sortDirection = orderBy.direction;
|
|
3080
|
+
let orderByClause;
|
|
3081
|
+
if (sortField === "endedAt") {
|
|
3082
|
+
orderByClause = sortDirection === "DESC" ? `CASE WHEN ${sortField} IS NULL THEN 0 ELSE 1 END, ${sortField} DESC` : `CASE WHEN ${sortField} IS NULL THEN 1 ELSE 0 END, ${sortField} ASC`;
|
|
3083
|
+
} else {
|
|
3084
|
+
orderByClause = `${sortField} ${sortDirection}`;
|
|
3085
|
+
}
|
|
3086
|
+
const count = await this.#db.selectTotalCount({
|
|
3087
|
+
tableName: storage.TABLE_SPANS,
|
|
3088
|
+
whereClause: { sql: whereClause, args: queryArgs }
|
|
3089
|
+
});
|
|
3090
|
+
if (count === 0) {
|
|
3091
|
+
return {
|
|
3092
|
+
pagination: {
|
|
3093
|
+
total: 0,
|
|
3094
|
+
page,
|
|
3095
|
+
perPage,
|
|
3096
|
+
hasMore: false
|
|
3097
|
+
},
|
|
3098
|
+
spans: []
|
|
3099
|
+
};
|
|
2369
3100
|
}
|
|
3101
|
+
const spans = await this.#db.selectMany({
|
|
3102
|
+
tableName: storage.TABLE_SPANS,
|
|
3103
|
+
whereClause: { sql: whereClause, args: queryArgs },
|
|
3104
|
+
orderBy: orderByClause,
|
|
3105
|
+
offset: page * perPage,
|
|
3106
|
+
limit: perPage
|
|
3107
|
+
});
|
|
3108
|
+
return {
|
|
3109
|
+
pagination: {
|
|
3110
|
+
total: count,
|
|
3111
|
+
page,
|
|
3112
|
+
perPage,
|
|
3113
|
+
hasMore: (page + 1) * perPage < count
|
|
3114
|
+
},
|
|
3115
|
+
spans: spans.map((span) => transformFromSqlRow({ tableName: storage.TABLE_SPANS, sqlRow: span }))
|
|
3116
|
+
};
|
|
2370
3117
|
} catch (error$1) {
|
|
2371
3118
|
throw new error.MastraError(
|
|
2372
3119
|
{
|
|
2373
|
-
id: "
|
|
3120
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_TRACES", "FAILED"),
|
|
2374
3121
|
domain: error.ErrorDomain.STORAGE,
|
|
2375
|
-
category: error.ErrorCategory.
|
|
2376
|
-
details: {
|
|
2377
|
-
tableName
|
|
2378
|
-
}
|
|
3122
|
+
category: error.ErrorCategory.USER
|
|
2379
3123
|
},
|
|
2380
3124
|
error$1
|
|
2381
3125
|
);
|
|
2382
3126
|
}
|
|
2383
3127
|
}
|
|
2384
|
-
async
|
|
2385
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
3128
|
+
async batchCreateSpans(args) {
|
|
2386
3129
|
try {
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
3130
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
3131
|
+
const records = args.records.map((record) => {
|
|
3132
|
+
const startedAt = record.startedAt instanceof Date ? record.startedAt.toISOString() : record.startedAt;
|
|
3133
|
+
const endedAt = record.endedAt instanceof Date ? record.endedAt.toISOString() : record.endedAt;
|
|
3134
|
+
return {
|
|
3135
|
+
...record,
|
|
3136
|
+
startedAt,
|
|
3137
|
+
endedAt,
|
|
3138
|
+
createdAt: now,
|
|
3139
|
+
updatedAt: now
|
|
3140
|
+
};
|
|
3141
|
+
});
|
|
3142
|
+
return this.#db.batchInsert({
|
|
3143
|
+
tableName: storage.TABLE_SPANS,
|
|
3144
|
+
records
|
|
3145
|
+
});
|
|
3146
|
+
} catch (error$1) {
|
|
3147
|
+
throw new error.MastraError(
|
|
2390
3148
|
{
|
|
2391
|
-
id: "
|
|
3149
|
+
id: storage.createStorageErrorId("LIBSQL", "BATCH_CREATE_SPANS", "FAILED"),
|
|
2392
3150
|
domain: error.ErrorDomain.STORAGE,
|
|
2393
|
-
category: error.ErrorCategory.
|
|
2394
|
-
|
|
2395
|
-
|
|
3151
|
+
category: error.ErrorCategory.USER
|
|
3152
|
+
},
|
|
3153
|
+
error$1
|
|
3154
|
+
);
|
|
3155
|
+
}
|
|
3156
|
+
}
|
|
3157
|
+
async batchUpdateSpans(args) {
|
|
3158
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
3159
|
+
try {
|
|
3160
|
+
return this.#db.batchUpdate({
|
|
3161
|
+
tableName: storage.TABLE_SPANS,
|
|
3162
|
+
updates: args.records.map((record) => {
|
|
3163
|
+
const data = { ...record.updates };
|
|
3164
|
+
if (data.endedAt instanceof Date) {
|
|
3165
|
+
data.endedAt = data.endedAt.toISOString();
|
|
3166
|
+
}
|
|
3167
|
+
if (data.startedAt instanceof Date) {
|
|
3168
|
+
data.startedAt = data.startedAt.toISOString();
|
|
2396
3169
|
}
|
|
3170
|
+
data.updatedAt = now;
|
|
3171
|
+
return {
|
|
3172
|
+
keys: { spanId: record.spanId, traceId: record.traceId },
|
|
3173
|
+
data
|
|
3174
|
+
};
|
|
3175
|
+
})
|
|
3176
|
+
});
|
|
3177
|
+
} catch (error$1) {
|
|
3178
|
+
throw new error.MastraError(
|
|
3179
|
+
{
|
|
3180
|
+
id: storage.createStorageErrorId("LIBSQL", "BATCH_UPDATE_SPANS", "FAILED"),
|
|
3181
|
+
domain: error.ErrorDomain.STORAGE,
|
|
3182
|
+
category: error.ErrorCategory.USER
|
|
2397
3183
|
},
|
|
2398
|
-
|
|
3184
|
+
error$1
|
|
2399
3185
|
);
|
|
2400
|
-
this.logger?.trackException?.(mastraError);
|
|
2401
|
-
this.logger?.error?.(mastraError.toString());
|
|
2402
3186
|
}
|
|
2403
3187
|
}
|
|
2404
|
-
async
|
|
2405
|
-
const parsedTableName = utils.parseSqlIdentifier(tableName, "table name");
|
|
3188
|
+
async batchDeleteTraces(args) {
|
|
2406
3189
|
try {
|
|
2407
|
-
|
|
2408
|
-
|
|
3190
|
+
const keys = args.traceIds.map((traceId) => ({ traceId }));
|
|
3191
|
+
return this.#db.batchDelete({
|
|
3192
|
+
tableName: storage.TABLE_SPANS,
|
|
3193
|
+
keys
|
|
3194
|
+
});
|
|
3195
|
+
} catch (error$1) {
|
|
2409
3196
|
throw new error.MastraError(
|
|
2410
3197
|
{
|
|
2411
|
-
id: "
|
|
3198
|
+
id: storage.createStorageErrorId("LIBSQL", "BATCH_DELETE_TRACES", "FAILED"),
|
|
2412
3199
|
domain: error.ErrorDomain.STORAGE,
|
|
2413
|
-
category: error.ErrorCategory.
|
|
2414
|
-
details: {
|
|
2415
|
-
tableName
|
|
2416
|
-
}
|
|
3200
|
+
category: error.ErrorCategory.USER
|
|
2417
3201
|
},
|
|
2418
|
-
|
|
3202
|
+
error$1
|
|
2419
3203
|
);
|
|
2420
3204
|
}
|
|
2421
3205
|
}
|
|
2422
3206
|
};
|
|
2423
3207
|
var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
2424
|
-
|
|
2425
|
-
client;
|
|
2426
|
-
constructor(
|
|
3208
|
+
#db;
|
|
3209
|
+
#client;
|
|
3210
|
+
constructor(config) {
|
|
2427
3211
|
super();
|
|
2428
|
-
|
|
2429
|
-
this
|
|
3212
|
+
const client = resolveClient(config);
|
|
3213
|
+
this.#client = client;
|
|
3214
|
+
this.#db = new LibSQLDB({ client, maxRetries: config.maxRetries, initialBackoffMs: config.initialBackoffMs });
|
|
3215
|
+
}
|
|
3216
|
+
async init() {
|
|
3217
|
+
await this.#db.createTable({ tableName: storage.TABLE_SCORERS, schema: storage.SCORERS_SCHEMA });
|
|
3218
|
+
await this.#db.alterTable({
|
|
3219
|
+
tableName: storage.TABLE_SCORERS,
|
|
3220
|
+
schema: storage.SCORERS_SCHEMA,
|
|
3221
|
+
ifNotExists: ["spanId", "requestContext"]
|
|
3222
|
+
});
|
|
3223
|
+
}
|
|
3224
|
+
async dangerouslyClearAll() {
|
|
3225
|
+
await this.#db.deleteData({ tableName: storage.TABLE_SCORERS });
|
|
2430
3226
|
}
|
|
2431
3227
|
async listScoresByRunId({
|
|
2432
3228
|
runId,
|
|
@@ -2434,7 +3230,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2434
3230
|
}) {
|
|
2435
3231
|
try {
|
|
2436
3232
|
const { page, perPage: perPageInput } = pagination;
|
|
2437
|
-
const countResult = await this
|
|
3233
|
+
const countResult = await this.#client.execute({
|
|
2438
3234
|
sql: `SELECT COUNT(*) as count FROM ${storage.TABLE_SCORERS} WHERE runId = ?`,
|
|
2439
3235
|
args: [runId]
|
|
2440
3236
|
});
|
|
@@ -2454,7 +3250,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2454
3250
|
const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
2455
3251
|
const limitValue = perPageInput === false ? total : perPage;
|
|
2456
3252
|
const end = perPageInput === false ? total : start + perPage;
|
|
2457
|
-
const result = await this
|
|
3253
|
+
const result = await this.#client.execute({
|
|
2458
3254
|
sql: `SELECT * FROM ${storage.TABLE_SCORERS} WHERE runId = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
|
|
2459
3255
|
args: [runId, limitValue, start]
|
|
2460
3256
|
});
|
|
@@ -2471,7 +3267,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2471
3267
|
} catch (error$1) {
|
|
2472
3268
|
throw new error.MastraError(
|
|
2473
3269
|
{
|
|
2474
|
-
id: "
|
|
3270
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_SCORES_BY_RUN_ID", "FAILED"),
|
|
2475
3271
|
domain: error.ErrorDomain.STORAGE,
|
|
2476
3272
|
category: error.ErrorCategory.THIRD_PARTY
|
|
2477
3273
|
},
|
|
@@ -2507,7 +3303,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2507
3303
|
queryParams.push(source);
|
|
2508
3304
|
}
|
|
2509
3305
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
2510
|
-
const countResult = await this
|
|
3306
|
+
const countResult = await this.#client.execute({
|
|
2511
3307
|
sql: `SELECT COUNT(*) as count FROM ${storage.TABLE_SCORERS} ${whereClause}`,
|
|
2512
3308
|
args: queryParams
|
|
2513
3309
|
});
|
|
@@ -2527,7 +3323,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2527
3323
|
const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
2528
3324
|
const limitValue = perPageInput === false ? total : perPage;
|
|
2529
3325
|
const end = perPageInput === false ? total : start + perPage;
|
|
2530
|
-
const result = await this
|
|
3326
|
+
const result = await this.#client.execute({
|
|
2531
3327
|
sql: `SELECT * FROM ${storage.TABLE_SCORERS} ${whereClause} ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
|
|
2532
3328
|
args: [...queryParams, limitValue, start]
|
|
2533
3329
|
});
|
|
@@ -2544,7 +3340,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2544
3340
|
} catch (error$1) {
|
|
2545
3341
|
throw new error.MastraError(
|
|
2546
3342
|
{
|
|
2547
|
-
id: "
|
|
3343
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_SCORES_BY_SCORER_ID", "FAILED"),
|
|
2548
3344
|
domain: error.ErrorDomain.STORAGE,
|
|
2549
3345
|
category: error.ErrorCategory.THIRD_PARTY
|
|
2550
3346
|
},
|
|
@@ -2552,48 +3348,17 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2552
3348
|
);
|
|
2553
3349
|
}
|
|
2554
3350
|
}
|
|
3351
|
+
/**
|
|
3352
|
+
* LibSQL-specific score row transformation.
|
|
3353
|
+
* Maps additionalLLMContext column to additionalContext field.
|
|
3354
|
+
*/
|
|
2555
3355
|
transformScoreRow(row) {
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2559
|
-
const additionalLLMContextValue = row.additionalLLMContext ? storage.safelyParseJSON(row.additionalLLMContext) : null;
|
|
2560
|
-
const requestContextValue = row.requestContext ? storage.safelyParseJSON(row.requestContext) : null;
|
|
2561
|
-
const metadataValue = row.metadata ? storage.safelyParseJSON(row.metadata) : null;
|
|
2562
|
-
const entityValue = row.entity ? storage.safelyParseJSON(row.entity) : null;
|
|
2563
|
-
const preprocessStepResultValue = row.preprocessStepResult ? storage.safelyParseJSON(row.preprocessStepResult) : null;
|
|
2564
|
-
const analyzeStepResultValue = row.analyzeStepResult ? storage.safelyParseJSON(row.analyzeStepResult) : null;
|
|
2565
|
-
return {
|
|
2566
|
-
id: row.id,
|
|
2567
|
-
traceId: row.traceId,
|
|
2568
|
-
spanId: row.spanId,
|
|
2569
|
-
runId: row.runId,
|
|
2570
|
-
scorer: scorerValue,
|
|
2571
|
-
score: row.score,
|
|
2572
|
-
reason: row.reason,
|
|
2573
|
-
preprocessStepResult: preprocessStepResultValue,
|
|
2574
|
-
analyzeStepResult: analyzeStepResultValue,
|
|
2575
|
-
analyzePrompt: row.analyzePrompt,
|
|
2576
|
-
preprocessPrompt: row.preprocessPrompt,
|
|
2577
|
-
generateScorePrompt: row.generateScorePrompt,
|
|
2578
|
-
generateReasonPrompt: row.generateReasonPrompt,
|
|
2579
|
-
metadata: metadataValue,
|
|
2580
|
-
input: inputValue,
|
|
2581
|
-
output: outputValue,
|
|
2582
|
-
additionalContext: additionalLLMContextValue,
|
|
2583
|
-
requestContext: requestContextValue,
|
|
2584
|
-
entityType: row.entityType,
|
|
2585
|
-
entity: entityValue,
|
|
2586
|
-
entityId: row.entityId,
|
|
2587
|
-
scorerId: row.scorerId,
|
|
2588
|
-
source: row.source,
|
|
2589
|
-
resourceId: row.resourceId,
|
|
2590
|
-
threadId: row.threadId,
|
|
2591
|
-
createdAt: row.createdAt,
|
|
2592
|
-
updatedAt: row.updatedAt
|
|
2593
|
-
};
|
|
3356
|
+
return storage.transformScoreRow(row, {
|
|
3357
|
+
fieldMappings: { additionalContext: "additionalLLMContext" }
|
|
3358
|
+
});
|
|
2594
3359
|
}
|
|
2595
3360
|
async getScoreById({ id }) {
|
|
2596
|
-
const result = await this
|
|
3361
|
+
const result = await this.#client.execute({
|
|
2597
3362
|
sql: `SELECT * FROM ${storage.TABLE_SCORERS} WHERE id = ?`,
|
|
2598
3363
|
args: [id]
|
|
2599
3364
|
});
|
|
@@ -2606,15 +3371,15 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2606
3371
|
} catch (error$1) {
|
|
2607
3372
|
throw new error.MastraError(
|
|
2608
3373
|
{
|
|
2609
|
-
id: "
|
|
3374
|
+
id: storage.createStorageErrorId("LIBSQL", "SAVE_SCORE", "VALIDATION_FAILED"),
|
|
2610
3375
|
domain: error.ErrorDomain.STORAGE,
|
|
2611
3376
|
category: error.ErrorCategory.USER,
|
|
2612
3377
|
details: {
|
|
2613
|
-
scorer: score.scorer.id,
|
|
2614
|
-
entityId: score.entityId,
|
|
2615
|
-
entityType: score.entityType,
|
|
2616
|
-
traceId: score.traceId
|
|
2617
|
-
spanId: score.spanId
|
|
3378
|
+
scorer: typeof score.scorer?.id === "string" ? score.scorer.id : String(score.scorer?.id ?? "unknown"),
|
|
3379
|
+
entityId: score.entityId ?? "unknown",
|
|
3380
|
+
entityType: score.entityType ?? "unknown",
|
|
3381
|
+
traceId: score.traceId ?? "",
|
|
3382
|
+
spanId: score.spanId ?? ""
|
|
2618
3383
|
}
|
|
2619
3384
|
},
|
|
2620
3385
|
error$1
|
|
@@ -2622,21 +3387,21 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2622
3387
|
}
|
|
2623
3388
|
try {
|
|
2624
3389
|
const id = crypto.randomUUID();
|
|
2625
|
-
|
|
3390
|
+
const now = /* @__PURE__ */ new Date();
|
|
3391
|
+
await this.#db.insert({
|
|
2626
3392
|
tableName: storage.TABLE_SCORERS,
|
|
2627
3393
|
record: {
|
|
3394
|
+
...parsedScore,
|
|
2628
3395
|
id,
|
|
2629
|
-
createdAt:
|
|
2630
|
-
updatedAt:
|
|
2631
|
-
...parsedScore
|
|
3396
|
+
createdAt: now.toISOString(),
|
|
3397
|
+
updatedAt: now.toISOString()
|
|
2632
3398
|
}
|
|
2633
3399
|
});
|
|
2634
|
-
|
|
2635
|
-
return { score: scoreFromDb };
|
|
3400
|
+
return { score: { ...parsedScore, id, createdAt: now, updatedAt: now } };
|
|
2636
3401
|
} catch (error$1) {
|
|
2637
3402
|
throw new error.MastraError(
|
|
2638
3403
|
{
|
|
2639
|
-
id: "
|
|
3404
|
+
id: storage.createStorageErrorId("LIBSQL", "SAVE_SCORE", "FAILED"),
|
|
2640
3405
|
domain: error.ErrorDomain.STORAGE,
|
|
2641
3406
|
category: error.ErrorCategory.THIRD_PARTY
|
|
2642
3407
|
},
|
|
@@ -2651,7 +3416,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2651
3416
|
}) {
|
|
2652
3417
|
try {
|
|
2653
3418
|
const { page, perPage: perPageInput } = pagination;
|
|
2654
|
-
const countResult = await this
|
|
3419
|
+
const countResult = await this.#client.execute({
|
|
2655
3420
|
sql: `SELECT COUNT(*) as count FROM ${storage.TABLE_SCORERS} WHERE entityId = ? AND entityType = ?`,
|
|
2656
3421
|
args: [entityId, entityType]
|
|
2657
3422
|
});
|
|
@@ -2671,7 +3436,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2671
3436
|
const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
2672
3437
|
const limitValue = perPageInput === false ? total : perPage;
|
|
2673
3438
|
const end = perPageInput === false ? total : start + perPage;
|
|
2674
|
-
const result = await this
|
|
3439
|
+
const result = await this.#client.execute({
|
|
2675
3440
|
sql: `SELECT * FROM ${storage.TABLE_SCORERS} WHERE entityId = ? AND entityType = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
|
|
2676
3441
|
args: [entityId, entityType, limitValue, start]
|
|
2677
3442
|
});
|
|
@@ -2688,7 +3453,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2688
3453
|
} catch (error$1) {
|
|
2689
3454
|
throw new error.MastraError(
|
|
2690
3455
|
{
|
|
2691
|
-
id: "
|
|
3456
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_SCORES_BY_ENTITY_ID", "FAILED"),
|
|
2692
3457
|
domain: error.ErrorDomain.STORAGE,
|
|
2693
3458
|
category: error.ErrorCategory.THIRD_PARTY
|
|
2694
3459
|
},
|
|
@@ -2705,14 +3470,14 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2705
3470
|
const { page, perPage: perPageInput } = pagination;
|
|
2706
3471
|
const perPage = storage.normalizePerPage(perPageInput, 100);
|
|
2707
3472
|
const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
2708
|
-
const countSQLResult = await this
|
|
3473
|
+
const countSQLResult = await this.#client.execute({
|
|
2709
3474
|
sql: `SELECT COUNT(*) as count FROM ${storage.TABLE_SCORERS} WHERE traceId = ? AND spanId = ?`,
|
|
2710
3475
|
args: [traceId, spanId]
|
|
2711
3476
|
});
|
|
2712
3477
|
const total = Number(countSQLResult.rows?.[0]?.count ?? 0);
|
|
2713
3478
|
const limitValue = perPageInput === false ? total : perPage;
|
|
2714
3479
|
const end = perPageInput === false ? total : start + perPage;
|
|
2715
|
-
const result = await this
|
|
3480
|
+
const result = await this.#client.execute({
|
|
2716
3481
|
sql: `SELECT * FROM ${storage.TABLE_SCORERS} WHERE traceId = ? AND spanId = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
|
|
2717
3482
|
args: [traceId, spanId, limitValue, start]
|
|
2718
3483
|
});
|
|
@@ -2729,7 +3494,7 @@ var ScoresLibSQL = class extends storage.ScoresStorage {
|
|
|
2729
3494
|
} catch (error$1) {
|
|
2730
3495
|
throw new error.MastraError(
|
|
2731
3496
|
{
|
|
2732
|
-
id: "
|
|
3497
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_SCORES_BY_SPAN", "FAILED"),
|
|
2733
3498
|
domain: error.ErrorDomain.STORAGE,
|
|
2734
3499
|
category: error.ErrorCategory.THIRD_PARTY
|
|
2735
3500
|
},
|
|
@@ -2757,37 +3522,49 @@ function parseWorkflowRun(row) {
|
|
|
2757
3522
|
};
|
|
2758
3523
|
}
|
|
2759
3524
|
var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
2760
|
-
|
|
2761
|
-
client;
|
|
2762
|
-
|
|
2763
|
-
|
|
2764
|
-
constructor({
|
|
2765
|
-
operations,
|
|
2766
|
-
client,
|
|
2767
|
-
maxRetries = 5,
|
|
2768
|
-
initialBackoffMs = 500
|
|
2769
|
-
}) {
|
|
3525
|
+
#db;
|
|
3526
|
+
#client;
|
|
3527
|
+
executeWithRetry;
|
|
3528
|
+
constructor(config) {
|
|
2770
3529
|
super();
|
|
2771
|
-
|
|
2772
|
-
|
|
2773
|
-
|
|
2774
|
-
this
|
|
3530
|
+
const client = resolveClient(config);
|
|
3531
|
+
const maxRetries = config.maxRetries ?? 5;
|
|
3532
|
+
const initialBackoffMs = config.initialBackoffMs ?? 500;
|
|
3533
|
+
this.#client = client;
|
|
3534
|
+
this.#db = new LibSQLDB({ client, maxRetries, initialBackoffMs });
|
|
3535
|
+
this.executeWithRetry = createExecuteWriteOperationWithRetry({
|
|
3536
|
+
logger: this.logger,
|
|
3537
|
+
maxRetries,
|
|
3538
|
+
initialBackoffMs
|
|
3539
|
+
});
|
|
2775
3540
|
this.setupPragmaSettings().catch(
|
|
2776
3541
|
(err) => this.logger.warn("LibSQL Workflows: Failed to setup PRAGMA settings.", err)
|
|
2777
3542
|
);
|
|
2778
3543
|
}
|
|
3544
|
+
async init() {
|
|
3545
|
+
const schema = storage.TABLE_SCHEMAS[storage.TABLE_WORKFLOW_SNAPSHOT];
|
|
3546
|
+
await this.#db.createTable({ tableName: storage.TABLE_WORKFLOW_SNAPSHOT, schema });
|
|
3547
|
+
await this.#db.alterTable({
|
|
3548
|
+
tableName: storage.TABLE_WORKFLOW_SNAPSHOT,
|
|
3549
|
+
schema,
|
|
3550
|
+
ifNotExists: ["resourceId"]
|
|
3551
|
+
});
|
|
3552
|
+
}
|
|
3553
|
+
async dangerouslyClearAll() {
|
|
3554
|
+
await this.#db.deleteData({ tableName: storage.TABLE_WORKFLOW_SNAPSHOT });
|
|
3555
|
+
}
|
|
2779
3556
|
async setupPragmaSettings() {
|
|
2780
3557
|
try {
|
|
2781
|
-
await this
|
|
3558
|
+
await this.#client.execute("PRAGMA busy_timeout = 10000;");
|
|
2782
3559
|
this.logger.debug("LibSQL Workflows: PRAGMA busy_timeout=10000 set.");
|
|
2783
3560
|
try {
|
|
2784
|
-
await this
|
|
3561
|
+
await this.#client.execute("PRAGMA journal_mode = WAL;");
|
|
2785
3562
|
this.logger.debug("LibSQL Workflows: PRAGMA journal_mode=WAL set.");
|
|
2786
3563
|
} catch {
|
|
2787
3564
|
this.logger.debug("LibSQL Workflows: WAL mode not supported, using default journal mode.");
|
|
2788
3565
|
}
|
|
2789
3566
|
try {
|
|
2790
|
-
await this
|
|
3567
|
+
await this.#client.execute("PRAGMA synchronous = NORMAL;");
|
|
2791
3568
|
this.logger.debug("LibSQL Workflows: PRAGMA synchronous=NORMAL set.");
|
|
2792
3569
|
} catch {
|
|
2793
3570
|
this.logger.debug("LibSQL Workflows: Failed to set synchronous mode.");
|
|
@@ -2796,44 +3573,6 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2796
3573
|
this.logger.warn("LibSQL Workflows: Failed to set PRAGMA settings.", err);
|
|
2797
3574
|
}
|
|
2798
3575
|
}
|
|
2799
|
-
async executeWithRetry(operation) {
|
|
2800
|
-
let attempts = 0;
|
|
2801
|
-
let backoff = this.initialBackoffMs;
|
|
2802
|
-
while (attempts < this.maxRetries) {
|
|
2803
|
-
try {
|
|
2804
|
-
return await operation();
|
|
2805
|
-
} catch (error) {
|
|
2806
|
-
this.logger.debug("LibSQL Workflows: Error caught in retry loop", {
|
|
2807
|
-
errorType: error.constructor.name,
|
|
2808
|
-
errorCode: error.code,
|
|
2809
|
-
errorMessage: error.message,
|
|
2810
|
-
attempts,
|
|
2811
|
-
maxRetries: this.maxRetries
|
|
2812
|
-
});
|
|
2813
|
-
const isLockError = error.code === "SQLITE_BUSY" || error.code === "SQLITE_LOCKED" || error.message?.toLowerCase().includes("database is locked") || error.message?.toLowerCase().includes("database table is locked") || error.message?.toLowerCase().includes("table is locked") || error.constructor.name === "SqliteError" && error.message?.toLowerCase().includes("locked");
|
|
2814
|
-
if (isLockError) {
|
|
2815
|
-
attempts++;
|
|
2816
|
-
if (attempts >= this.maxRetries) {
|
|
2817
|
-
this.logger.error(
|
|
2818
|
-
`LibSQL Workflows: Operation failed after ${this.maxRetries} attempts due to database lock: ${error.message}`,
|
|
2819
|
-
{ error, attempts, maxRetries: this.maxRetries }
|
|
2820
|
-
);
|
|
2821
|
-
throw error;
|
|
2822
|
-
}
|
|
2823
|
-
this.logger.warn(
|
|
2824
|
-
`LibSQL Workflows: Attempt ${attempts} failed due to database lock. Retrying in ${backoff}ms...`,
|
|
2825
|
-
{ errorMessage: error.message, attempts, backoff, maxRetries: this.maxRetries }
|
|
2826
|
-
);
|
|
2827
|
-
await new Promise((resolve) => setTimeout(resolve, backoff));
|
|
2828
|
-
backoff *= 2;
|
|
2829
|
-
} else {
|
|
2830
|
-
this.logger.error("LibSQL Workflows: Non-lock error occurred, not retrying", { error });
|
|
2831
|
-
throw error;
|
|
2832
|
-
}
|
|
2833
|
-
}
|
|
2834
|
-
}
|
|
2835
|
-
throw new Error("LibSQL Workflows: Max retries reached, but no error was re-thrown from the loop.");
|
|
2836
|
-
}
|
|
2837
3576
|
async updateWorkflowResults({
|
|
2838
3577
|
workflowName,
|
|
2839
3578
|
runId,
|
|
@@ -2842,7 +3581,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2842
3581
|
requestContext
|
|
2843
3582
|
}) {
|
|
2844
3583
|
return this.executeWithRetry(async () => {
|
|
2845
|
-
const tx = await this
|
|
3584
|
+
const tx = await this.#client.transaction("write");
|
|
2846
3585
|
try {
|
|
2847
3586
|
const existingSnapshotResult = await tx.execute({
|
|
2848
3587
|
sql: `SELECT snapshot FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
@@ -2882,7 +3621,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2882
3621
|
}
|
|
2883
3622
|
throw error;
|
|
2884
3623
|
}
|
|
2885
|
-
});
|
|
3624
|
+
}, "updateWorkflowResults");
|
|
2886
3625
|
}
|
|
2887
3626
|
async updateWorkflowState({
|
|
2888
3627
|
workflowName,
|
|
@@ -2890,7 +3629,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2890
3629
|
opts
|
|
2891
3630
|
}) {
|
|
2892
3631
|
return this.executeWithRetry(async () => {
|
|
2893
|
-
const tx = await this
|
|
3632
|
+
const tx = await this.#client.transaction("write");
|
|
2894
3633
|
try {
|
|
2895
3634
|
const existingSnapshotResult = await tx.execute({
|
|
2896
3635
|
sql: `SELECT snapshot FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
@@ -2919,24 +3658,27 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2919
3658
|
}
|
|
2920
3659
|
throw error;
|
|
2921
3660
|
}
|
|
2922
|
-
});
|
|
3661
|
+
}, "updateWorkflowState");
|
|
2923
3662
|
}
|
|
2924
3663
|
async persistWorkflowSnapshot({
|
|
2925
3664
|
workflowName,
|
|
2926
3665
|
runId,
|
|
2927
3666
|
resourceId,
|
|
2928
|
-
snapshot
|
|
3667
|
+
snapshot,
|
|
3668
|
+
createdAt,
|
|
3669
|
+
updatedAt
|
|
2929
3670
|
}) {
|
|
3671
|
+
const now = /* @__PURE__ */ new Date();
|
|
2930
3672
|
const data = {
|
|
2931
3673
|
workflow_name: workflowName,
|
|
2932
3674
|
run_id: runId,
|
|
2933
3675
|
resourceId,
|
|
2934
3676
|
snapshot,
|
|
2935
|
-
createdAt:
|
|
2936
|
-
updatedAt:
|
|
3677
|
+
createdAt: createdAt ?? now,
|
|
3678
|
+
updatedAt: updatedAt ?? now
|
|
2937
3679
|
};
|
|
2938
3680
|
this.logger.debug("Persisting workflow snapshot", { workflowName, runId, data });
|
|
2939
|
-
await this.
|
|
3681
|
+
await this.#db.insert({
|
|
2940
3682
|
tableName: storage.TABLE_WORKFLOW_SNAPSHOT,
|
|
2941
3683
|
record: data
|
|
2942
3684
|
});
|
|
@@ -2946,7 +3688,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2946
3688
|
runId
|
|
2947
3689
|
}) {
|
|
2948
3690
|
this.logger.debug("Loading workflow snapshot", { workflowName, runId });
|
|
2949
|
-
const d = await this.
|
|
3691
|
+
const d = await this.#db.select({
|
|
2950
3692
|
tableName: storage.TABLE_WORKFLOW_SNAPSHOT,
|
|
2951
3693
|
keys: { workflow_name: workflowName, run_id: runId }
|
|
2952
3694
|
});
|
|
@@ -2968,7 +3710,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2968
3710
|
}
|
|
2969
3711
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
2970
3712
|
try {
|
|
2971
|
-
const result = await this
|
|
3713
|
+
const result = await this.#client.execute({
|
|
2972
3714
|
sql: `SELECT * FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} ${whereClause} ORDER BY createdAt DESC LIMIT 1`,
|
|
2973
3715
|
args
|
|
2974
3716
|
});
|
|
@@ -2979,7 +3721,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2979
3721
|
} catch (error$1) {
|
|
2980
3722
|
throw new error.MastraError(
|
|
2981
3723
|
{
|
|
2982
|
-
id: "
|
|
3724
|
+
id: storage.createStorageErrorId("LIBSQL", "GET_WORKFLOW_RUN_BY_ID", "FAILED"),
|
|
2983
3725
|
domain: error.ErrorDomain.STORAGE,
|
|
2984
3726
|
category: error.ErrorCategory.THIRD_PARTY
|
|
2985
3727
|
},
|
|
@@ -2987,6 +3729,26 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
2987
3729
|
);
|
|
2988
3730
|
}
|
|
2989
3731
|
}
|
|
3732
|
+
async deleteWorkflowRunById({ runId, workflowName }) {
|
|
3733
|
+
return this.executeWithRetry(async () => {
|
|
3734
|
+
try {
|
|
3735
|
+
await this.#client.execute({
|
|
3736
|
+
sql: `DELETE FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
3737
|
+
args: [workflowName, runId]
|
|
3738
|
+
});
|
|
3739
|
+
} catch (error$1) {
|
|
3740
|
+
throw new error.MastraError(
|
|
3741
|
+
{
|
|
3742
|
+
id: storage.createStorageErrorId("LIBSQL", "DELETE_WORKFLOW_RUN_BY_ID", "FAILED"),
|
|
3743
|
+
domain: error.ErrorDomain.STORAGE,
|
|
3744
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
3745
|
+
details: { runId, workflowName }
|
|
3746
|
+
},
|
|
3747
|
+
error$1
|
|
3748
|
+
);
|
|
3749
|
+
}
|
|
3750
|
+
}, "deleteWorkflowRunById");
|
|
3751
|
+
}
|
|
2990
3752
|
async listWorkflowRuns({
|
|
2991
3753
|
workflowName,
|
|
2992
3754
|
fromDate,
|
|
@@ -3016,7 +3778,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
3016
3778
|
args.push(toDate.toISOString());
|
|
3017
3779
|
}
|
|
3018
3780
|
if (resourceId) {
|
|
3019
|
-
const hasResourceId = await this.
|
|
3781
|
+
const hasResourceId = await this.#db.hasColumn(storage.TABLE_WORKFLOW_SNAPSHOT, "resourceId");
|
|
3020
3782
|
if (hasResourceId) {
|
|
3021
3783
|
conditions.push("resourceId = ?");
|
|
3022
3784
|
args.push(resourceId);
|
|
@@ -3028,7 +3790,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
3028
3790
|
let total = 0;
|
|
3029
3791
|
const usePagination = typeof perPage === "number" && typeof page === "number";
|
|
3030
3792
|
if (usePagination) {
|
|
3031
|
-
const countResult = await this
|
|
3793
|
+
const countResult = await this.#client.execute({
|
|
3032
3794
|
sql: `SELECT COUNT(*) as count FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} ${whereClause}`,
|
|
3033
3795
|
args
|
|
3034
3796
|
});
|
|
@@ -3036,7 +3798,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
3036
3798
|
}
|
|
3037
3799
|
const normalizedPerPage = usePagination ? storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER) : 0;
|
|
3038
3800
|
const offset = usePagination ? page * normalizedPerPage : 0;
|
|
3039
|
-
const result = await this
|
|
3801
|
+
const result = await this.#client.execute({
|
|
3040
3802
|
sql: `SELECT * FROM ${storage.TABLE_WORKFLOW_SNAPSHOT} ${whereClause} ORDER BY createdAt DESC${usePagination ? ` LIMIT ? OFFSET ?` : ""}`,
|
|
3041
3803
|
args: usePagination ? [...args, normalizedPerPage, offset] : args
|
|
3042
3804
|
});
|
|
@@ -3045,7 +3807,7 @@ var WorkflowsLibSQL = class extends storage.WorkflowsStorage {
|
|
|
3045
3807
|
} catch (error$1) {
|
|
3046
3808
|
throw new error.MastraError(
|
|
3047
3809
|
{
|
|
3048
|
-
id: "
|
|
3810
|
+
id: storage.createStorageErrorId("LIBSQL", "LIST_WORKFLOW_RUNS", "FAILED"),
|
|
3049
3811
|
domain: error.ErrorDomain.STORAGE,
|
|
3050
3812
|
category: error.ErrorCategory.THIRD_PARTY
|
|
3051
3813
|
},
|
|
@@ -3065,7 +3827,7 @@ var LibSQLStore = class extends storage.MastraStorage {
|
|
|
3065
3827
|
if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
|
|
3066
3828
|
throw new Error("LibSQLStore: id must be provided and cannot be empty.");
|
|
3067
3829
|
}
|
|
3068
|
-
super({ id: config.id, name: `LibSQLStore
|
|
3830
|
+
super({ id: config.id, name: `LibSQLStore`, disableInit: config.disableInit });
|
|
3069
3831
|
this.maxRetries = config.maxRetries ?? 5;
|
|
3070
3832
|
this.initialBackoffMs = config.initialBackoffMs ?? 100;
|
|
3071
3833
|
if ("url" in config) {
|
|
@@ -3083,207 +3845,24 @@ var LibSQLStore = class extends storage.MastraStorage {
|
|
|
3083
3845
|
} else {
|
|
3084
3846
|
this.client = config.client;
|
|
3085
3847
|
}
|
|
3086
|
-
const
|
|
3848
|
+
const domainConfig = {
|
|
3087
3849
|
client: this.client,
|
|
3088
3850
|
maxRetries: this.maxRetries,
|
|
3089
3851
|
initialBackoffMs: this.initialBackoffMs
|
|
3090
|
-
}
|
|
3091
|
-
const scores = new ScoresLibSQL(
|
|
3092
|
-
const workflows = new WorkflowsLibSQL(
|
|
3093
|
-
const memory = new MemoryLibSQL(
|
|
3094
|
-
const observability = new ObservabilityLibSQL(
|
|
3852
|
+
};
|
|
3853
|
+
const scores = new ScoresLibSQL(domainConfig);
|
|
3854
|
+
const workflows = new WorkflowsLibSQL(domainConfig);
|
|
3855
|
+
const memory = new MemoryLibSQL(domainConfig);
|
|
3856
|
+
const observability = new ObservabilityLibSQL(domainConfig);
|
|
3857
|
+
const agents = new AgentsLibSQL(domainConfig);
|
|
3095
3858
|
this.stores = {
|
|
3096
|
-
operations,
|
|
3097
3859
|
scores,
|
|
3098
3860
|
workflows,
|
|
3099
3861
|
memory,
|
|
3100
|
-
observability
|
|
3101
|
-
|
|
3102
|
-
}
|
|
3103
|
-
get supports() {
|
|
3104
|
-
return {
|
|
3105
|
-
selectByIncludeResourceScope: true,
|
|
3106
|
-
resourceWorkingMemory: true,
|
|
3107
|
-
hasColumn: true,
|
|
3108
|
-
createTable: true,
|
|
3109
|
-
deleteMessages: true,
|
|
3110
|
-
observabilityInstance: true,
|
|
3111
|
-
listScoresBySpan: true
|
|
3862
|
+
observability,
|
|
3863
|
+
agents
|
|
3112
3864
|
};
|
|
3113
3865
|
}
|
|
3114
|
-
async createTable({
|
|
3115
|
-
tableName,
|
|
3116
|
-
schema
|
|
3117
|
-
}) {
|
|
3118
|
-
await this.stores.operations.createTable({ tableName, schema });
|
|
3119
|
-
}
|
|
3120
|
-
/**
|
|
3121
|
-
* Alters table schema to add columns if they don't exist
|
|
3122
|
-
* @param tableName Name of the table
|
|
3123
|
-
* @param schema Schema of the table
|
|
3124
|
-
* @param ifNotExists Array of column names to add if they don't exist
|
|
3125
|
-
*/
|
|
3126
|
-
async alterTable({
|
|
3127
|
-
tableName,
|
|
3128
|
-
schema,
|
|
3129
|
-
ifNotExists
|
|
3130
|
-
}) {
|
|
3131
|
-
await this.stores.operations.alterTable({ tableName, schema, ifNotExists });
|
|
3132
|
-
}
|
|
3133
|
-
async clearTable({ tableName }) {
|
|
3134
|
-
await this.stores.operations.clearTable({ tableName });
|
|
3135
|
-
}
|
|
3136
|
-
async dropTable({ tableName }) {
|
|
3137
|
-
await this.stores.operations.dropTable({ tableName });
|
|
3138
|
-
}
|
|
3139
|
-
insert(args) {
|
|
3140
|
-
return this.stores.operations.insert(args);
|
|
3141
|
-
}
|
|
3142
|
-
batchInsert(args) {
|
|
3143
|
-
return this.stores.operations.batchInsert(args);
|
|
3144
|
-
}
|
|
3145
|
-
async load({ tableName, keys }) {
|
|
3146
|
-
return this.stores.operations.load({ tableName, keys });
|
|
3147
|
-
}
|
|
3148
|
-
async getThreadById({ threadId }) {
|
|
3149
|
-
return this.stores.memory.getThreadById({ threadId });
|
|
3150
|
-
}
|
|
3151
|
-
async saveThread({ thread }) {
|
|
3152
|
-
return this.stores.memory.saveThread({ thread });
|
|
3153
|
-
}
|
|
3154
|
-
async updateThread({
|
|
3155
|
-
id,
|
|
3156
|
-
title,
|
|
3157
|
-
metadata
|
|
3158
|
-
}) {
|
|
3159
|
-
return this.stores.memory.updateThread({ id, title, metadata });
|
|
3160
|
-
}
|
|
3161
|
-
async deleteThread({ threadId }) {
|
|
3162
|
-
return this.stores.memory.deleteThread({ threadId });
|
|
3163
|
-
}
|
|
3164
|
-
async listMessagesById({ messageIds }) {
|
|
3165
|
-
return this.stores.memory.listMessagesById({ messageIds });
|
|
3166
|
-
}
|
|
3167
|
-
async saveMessages(args) {
|
|
3168
|
-
const result = await this.stores.memory.saveMessages({ messages: args.messages });
|
|
3169
|
-
return { messages: result.messages };
|
|
3170
|
-
}
|
|
3171
|
-
async updateMessages({
|
|
3172
|
-
messages
|
|
3173
|
-
}) {
|
|
3174
|
-
return this.stores.memory.updateMessages({ messages });
|
|
3175
|
-
}
|
|
3176
|
-
async deleteMessages(messageIds) {
|
|
3177
|
-
return this.stores.memory.deleteMessages(messageIds);
|
|
3178
|
-
}
|
|
3179
|
-
async getScoreById({ id }) {
|
|
3180
|
-
return this.stores.scores.getScoreById({ id });
|
|
3181
|
-
}
|
|
3182
|
-
async saveScore(score) {
|
|
3183
|
-
return this.stores.scores.saveScore(score);
|
|
3184
|
-
}
|
|
3185
|
-
async listScoresByScorerId({
|
|
3186
|
-
scorerId,
|
|
3187
|
-
entityId,
|
|
3188
|
-
entityType,
|
|
3189
|
-
source,
|
|
3190
|
-
pagination
|
|
3191
|
-
}) {
|
|
3192
|
-
return this.stores.scores.listScoresByScorerId({ scorerId, entityId, entityType, source, pagination });
|
|
3193
|
-
}
|
|
3194
|
-
async listScoresByRunId({
|
|
3195
|
-
runId,
|
|
3196
|
-
pagination
|
|
3197
|
-
}) {
|
|
3198
|
-
return this.stores.scores.listScoresByRunId({ runId, pagination });
|
|
3199
|
-
}
|
|
3200
|
-
async listScoresByEntityId({
|
|
3201
|
-
entityId,
|
|
3202
|
-
entityType,
|
|
3203
|
-
pagination
|
|
3204
|
-
}) {
|
|
3205
|
-
return this.stores.scores.listScoresByEntityId({ entityId, entityType, pagination });
|
|
3206
|
-
}
|
|
3207
|
-
/**
|
|
3208
|
-
* WORKFLOWS
|
|
3209
|
-
*/
|
|
3210
|
-
async updateWorkflowResults({
|
|
3211
|
-
workflowName,
|
|
3212
|
-
runId,
|
|
3213
|
-
stepId,
|
|
3214
|
-
result,
|
|
3215
|
-
requestContext
|
|
3216
|
-
}) {
|
|
3217
|
-
return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
|
|
3218
|
-
}
|
|
3219
|
-
async updateWorkflowState({
|
|
3220
|
-
workflowName,
|
|
3221
|
-
runId,
|
|
3222
|
-
opts
|
|
3223
|
-
}) {
|
|
3224
|
-
return this.stores.workflows.updateWorkflowState({ workflowName, runId, opts });
|
|
3225
|
-
}
|
|
3226
|
-
async persistWorkflowSnapshot({
|
|
3227
|
-
workflowName,
|
|
3228
|
-
runId,
|
|
3229
|
-
resourceId,
|
|
3230
|
-
snapshot
|
|
3231
|
-
}) {
|
|
3232
|
-
return this.stores.workflows.persistWorkflowSnapshot({ workflowName, runId, resourceId, snapshot });
|
|
3233
|
-
}
|
|
3234
|
-
async loadWorkflowSnapshot({
|
|
3235
|
-
workflowName,
|
|
3236
|
-
runId
|
|
3237
|
-
}) {
|
|
3238
|
-
return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
|
|
3239
|
-
}
|
|
3240
|
-
async listWorkflowRuns(args = {}) {
|
|
3241
|
-
return this.stores.workflows.listWorkflowRuns(args);
|
|
3242
|
-
}
|
|
3243
|
-
async getWorkflowRunById({
|
|
3244
|
-
runId,
|
|
3245
|
-
workflowName
|
|
3246
|
-
}) {
|
|
3247
|
-
return this.stores.workflows.getWorkflowRunById({ runId, workflowName });
|
|
3248
|
-
}
|
|
3249
|
-
async getResourceById({ resourceId }) {
|
|
3250
|
-
return this.stores.memory.getResourceById({ resourceId });
|
|
3251
|
-
}
|
|
3252
|
-
async saveResource({ resource }) {
|
|
3253
|
-
return this.stores.memory.saveResource({ resource });
|
|
3254
|
-
}
|
|
3255
|
-
async updateResource({
|
|
3256
|
-
resourceId,
|
|
3257
|
-
workingMemory,
|
|
3258
|
-
metadata
|
|
3259
|
-
}) {
|
|
3260
|
-
return this.stores.memory.updateResource({ resourceId, workingMemory, metadata });
|
|
3261
|
-
}
|
|
3262
|
-
async createSpan(span) {
|
|
3263
|
-
return this.stores.observability.createSpan(span);
|
|
3264
|
-
}
|
|
3265
|
-
async updateSpan(params) {
|
|
3266
|
-
return this.stores.observability.updateSpan(params);
|
|
3267
|
-
}
|
|
3268
|
-
async getTrace(traceId) {
|
|
3269
|
-
return this.stores.observability.getTrace(traceId);
|
|
3270
|
-
}
|
|
3271
|
-
async getTracesPaginated(args) {
|
|
3272
|
-
return this.stores.observability.getTracesPaginated(args);
|
|
3273
|
-
}
|
|
3274
|
-
async listScoresBySpan({
|
|
3275
|
-
traceId,
|
|
3276
|
-
spanId,
|
|
3277
|
-
pagination
|
|
3278
|
-
}) {
|
|
3279
|
-
return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination });
|
|
3280
|
-
}
|
|
3281
|
-
async batchCreateSpans(args) {
|
|
3282
|
-
return this.stores.observability.batchCreateSpans(args);
|
|
3283
|
-
}
|
|
3284
|
-
async batchUpdateSpans(args) {
|
|
3285
|
-
return this.stores.observability.batchUpdateSpans(args);
|
|
3286
|
-
}
|
|
3287
3866
|
};
|
|
3288
3867
|
|
|
3289
3868
|
// src/vector/prompt.ts
|
|
@@ -3385,9 +3964,14 @@ Example Complex Query:
|
|
|
3385
3964
|
]
|
|
3386
3965
|
}`;
|
|
3387
3966
|
|
|
3967
|
+
exports.AgentsLibSQL = AgentsLibSQL;
|
|
3388
3968
|
exports.DefaultStorage = LibSQLStore;
|
|
3389
3969
|
exports.LIBSQL_PROMPT = LIBSQL_PROMPT;
|
|
3390
3970
|
exports.LibSQLStore = LibSQLStore;
|
|
3391
3971
|
exports.LibSQLVector = LibSQLVector;
|
|
3972
|
+
exports.MemoryLibSQL = MemoryLibSQL;
|
|
3973
|
+
exports.ObservabilityLibSQL = ObservabilityLibSQL;
|
|
3974
|
+
exports.ScoresLibSQL = ScoresLibSQL;
|
|
3975
|
+
exports.WorkflowsLibSQL = WorkflowsLibSQL;
|
|
3392
3976
|
//# sourceMappingURL=index.cjs.map
|
|
3393
3977
|
//# sourceMappingURL=index.cjs.map
|