@memberjunction/sqlserver-dataprovider 2.111.0 → 2.112.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +20 -20
- package/dist/SQLServerDataProvider.d.ts +1 -1
- package/dist/SQLServerDataProvider.d.ts.map +1 -1
- package/dist/SQLServerDataProvider.js +140 -132
- package/dist/SQLServerDataProvider.js.map +1 -1
- package/dist/SQLServerTransactionGroup.d.ts +1 -1
- package/dist/SQLServerTransactionGroup.d.ts.map +1 -1
- package/dist/SQLServerTransactionGroup.js +15 -15
- package/dist/SQLServerTransactionGroup.js.map +1 -1
- package/dist/UserCache.d.ts +1 -1
- package/dist/UserCache.d.ts.map +1 -1
- package/dist/UserCache.js +6 -6
- package/dist/UserCache.js.map +1 -1
- package/dist/config.d.ts +2 -2
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +6 -6
- package/dist/config.js.map +1 -1
- package/dist/queryParameterProcessor.d.ts +1 -1
- package/dist/queryParameterProcessor.d.ts.map +1 -1
- package/dist/queryParameterProcessor.js +10 -10
- package/dist/queryParameterProcessor.js.map +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +2 -2
- package/dist/types.js.map +1 -1
- package/package.json +9 -10
|
@@ -41,7 +41,7 @@ exports.SQLServerDataProvider = void 0;
|
|
|
41
41
|
* In practice - this FILE will NOT exist in the entities library, we need to move to its own separate project
|
|
42
42
|
* so it is only included by the consumer of the entities library if they want to use it.
|
|
43
43
|
**************************************************************************************************************/
|
|
44
|
-
const
|
|
44
|
+
const global_1 = require("@memberjunction/global");
|
|
45
45
|
const queryParameterProcessor_1 = require("./queryParameterProcessor");
|
|
46
46
|
const core_entities_1 = require("@memberjunction/core-entities");
|
|
47
47
|
const aiengine_1 = require("@memberjunction/aiengine");
|
|
@@ -118,10 +118,7 @@ async function executeSQLCore(query, parameters, context, options) {
|
|
|
118
118
|
}
|
|
119
119
|
}
|
|
120
120
|
// Execute query and logging in parallel
|
|
121
|
-
const [result] = await Promise.all([
|
|
122
|
-
request.query(processedQuery),
|
|
123
|
-
logPromise
|
|
124
|
-
]);
|
|
121
|
+
const [result] = await Promise.all([request.query(processedQuery), logPromise]);
|
|
125
122
|
return result;
|
|
126
123
|
}
|
|
127
124
|
catch (error) {
|
|
@@ -153,7 +150,7 @@ async function executeSQLCore(query, parameters, context, options) {
|
|
|
153
150
|
* await provider.Config();
|
|
154
151
|
* ```
|
|
155
152
|
*/
|
|
156
|
-
class SQLServerDataProvider extends
|
|
153
|
+
class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
157
154
|
_pool;
|
|
158
155
|
// Instance transaction properties
|
|
159
156
|
_transaction;
|
|
@@ -161,7 +158,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
161
158
|
_savepointCounter = 0;
|
|
162
159
|
_savepointStack = [];
|
|
163
160
|
// Query cache instance
|
|
164
|
-
queryCache = new
|
|
161
|
+
queryCache = new global_1.QueryCache();
|
|
165
162
|
// Removed _transactionRequest - creating new Request objects for each query to avoid concurrency issues
|
|
166
163
|
_localStorageProvider;
|
|
167
164
|
_bAllowRefresh = true;
|
|
@@ -244,7 +241,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
244
241
|
return super.Config(configData, providerToUse); // now parent class can do it's config
|
|
245
242
|
}
|
|
246
243
|
catch (e) {
|
|
247
|
-
(0,
|
|
244
|
+
(0, global_1.LogError)(e);
|
|
248
245
|
throw e;
|
|
249
246
|
}
|
|
250
247
|
}
|
|
@@ -256,14 +253,16 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
256
253
|
// Each instance gets its own queue processor, but only do this ONCE if we get this method called more than once we don't need to reinit
|
|
257
254
|
// the sub, taht would cause duplicate rprocessing.
|
|
258
255
|
if (!this._queueSubscription) {
|
|
259
|
-
this._queueSubscription = this._sqlQueue
|
|
256
|
+
this._queueSubscription = this._sqlQueue$
|
|
257
|
+
.pipe((0, rxjs_1.concatMap)((item) => (0, rxjs_1.from)(executeSQLCore(item.query, item.parameters, item.context, item.options)).pipe(
|
|
260
258
|
// Handle success
|
|
261
|
-
(0, rxjs_1.tap)(result => item.resolve(result)),
|
|
259
|
+
(0, rxjs_1.tap)((result) => item.resolve(result)),
|
|
262
260
|
// Handle errors
|
|
263
|
-
(0, rxjs_1.catchError)(error => {
|
|
261
|
+
(0, rxjs_1.catchError)((error) => {
|
|
264
262
|
item.reject(error);
|
|
265
263
|
return (0, rxjs_1.of)(null); // Continue processing queue even on errors
|
|
266
|
-
}))))
|
|
264
|
+
}))))
|
|
265
|
+
.subscribe();
|
|
267
266
|
}
|
|
268
267
|
}
|
|
269
268
|
/**
|
|
@@ -338,7 +337,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
338
337
|
const mjCoreSchema = this.ConfigData.MJCoreSchemaName;
|
|
339
338
|
const session = new SqlLogger_js_1.SqlLoggingSessionImpl(sessionId, filePath, {
|
|
340
339
|
defaultSchemaName: mjCoreSchema,
|
|
341
|
-
...options // if defaultSchemaName is not provided, it will use the MJCoreSchemaName, otherwise
|
|
340
|
+
...options, // if defaultSchemaName is not provided, it will use the MJCoreSchemaName, otherwise
|
|
342
341
|
// the caller's defaultSchemaName will be used
|
|
343
342
|
});
|
|
344
343
|
// Initialize the session (create file, write header)
|
|
@@ -423,16 +422,16 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
423
422
|
}
|
|
424
423
|
// Check if any session has verbose output enabled for debug logging
|
|
425
424
|
const allSessions = Array.from(this._sqlLoggingSessions.values());
|
|
426
|
-
const hasVerboseSession = allSessions.some(s => s.options.verboseOutput === true);
|
|
425
|
+
const hasVerboseSession = allSessions.some((s) => s.options.verboseOutput === true);
|
|
427
426
|
if (hasVerboseSession) {
|
|
428
427
|
console.log('=== SQL LOGGING DEBUG ===');
|
|
429
428
|
console.log(`Query to log: ${query.substring(0, 100)}...`);
|
|
430
429
|
console.log(`Context user email: ${contextUser?.Email || 'NOT_PROVIDED'}`);
|
|
431
430
|
console.log(`Active sessions count: ${this._sqlLoggingSessions.size}`);
|
|
432
|
-
console.log(`All sessions:`, allSessions.map(s => ({
|
|
431
|
+
console.log(`All sessions:`, allSessions.map((s) => ({
|
|
433
432
|
id: s.id,
|
|
434
433
|
filterByUserId: s.options.filterByUserId,
|
|
435
|
-
sessionName: s.options.sessionName
|
|
434
|
+
sessionName: s.options.sessionName,
|
|
436
435
|
})));
|
|
437
436
|
}
|
|
438
437
|
const filteredSessions = allSessions.filter((session) => {
|
|
@@ -449,7 +448,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
449
448
|
console.log(`Session ${session.id} filter check:`, {
|
|
450
449
|
filterByUserId: session.options.filterByUserId,
|
|
451
450
|
contextUserEmail: contextUser.Email,
|
|
452
|
-
matches: matches
|
|
451
|
+
matches: matches,
|
|
453
452
|
});
|
|
454
453
|
}
|
|
455
454
|
return matches;
|
|
@@ -480,7 +479,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
480
479
|
*/
|
|
481
480
|
static async LogSQLStatement(query, parameters, description, isMutation = false, simpleSQLFallback, contextUser) {
|
|
482
481
|
// Get the current provider instance
|
|
483
|
-
const provider =
|
|
482
|
+
const provider = global_1.Metadata.Provider;
|
|
484
483
|
if (provider && provider._sqlLoggingSessions.size > 0) {
|
|
485
484
|
await provider._logSqlStatement(query, parameters, description, false, isMutation, simpleSQLFallback, contextUser);
|
|
486
485
|
}
|
|
@@ -538,17 +537,17 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
538
537
|
if (!categoryPath)
|
|
539
538
|
return null;
|
|
540
539
|
// Split path and clean segments - remove empty strings from leading/trailing slashes
|
|
541
|
-
const segments = categoryPath
|
|
542
|
-
.
|
|
543
|
-
.
|
|
540
|
+
const segments = categoryPath
|
|
541
|
+
.split('/')
|
|
542
|
+
.map((s) => s.trim())
|
|
543
|
+
.filter((s) => s.length > 0);
|
|
544
544
|
if (segments.length === 0)
|
|
545
545
|
return null;
|
|
546
546
|
// Walk down the hierarchy to find the target category
|
|
547
547
|
let currentCategory = null;
|
|
548
548
|
for (const segment of segments) {
|
|
549
549
|
const parentId = currentCategory?.ID || null;
|
|
550
|
-
currentCategory = this.QueryCategories.find(cat => cat.Name.trim().toLowerCase() === segment.toLowerCase() &&
|
|
551
|
-
cat.ParentID === parentId);
|
|
550
|
+
currentCategory = this.QueryCategories.find((cat) => cat.Name.trim().toLowerCase() === segment.toLowerCase() && cat.ParentID === parentId);
|
|
552
551
|
if (!currentCategory) {
|
|
553
552
|
return null; // Path not found
|
|
554
553
|
}
|
|
@@ -568,7 +567,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
568
567
|
*/
|
|
569
568
|
async findQuery(QueryID, QueryName, CategoryID, CategoryPath, refreshMetadataIfNotFound = false) {
|
|
570
569
|
// First, get the query metadata
|
|
571
|
-
const queries = this.Queries.filter(q => {
|
|
570
|
+
const queries = this.Queries.filter((q) => {
|
|
572
571
|
if (QueryID) {
|
|
573
572
|
return q.ID.trim().toLowerCase() === QueryID.trim().toLowerCase();
|
|
574
573
|
}
|
|
@@ -639,11 +638,11 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
639
638
|
ExecutionTime: executionTime,
|
|
640
639
|
ErrorMessage: '',
|
|
641
640
|
AppliedParameters: appliedParameters,
|
|
642
|
-
CacheHit: false
|
|
641
|
+
CacheHit: false,
|
|
643
642
|
};
|
|
644
643
|
}
|
|
645
644
|
catch (e) {
|
|
646
|
-
(0,
|
|
645
|
+
(0, global_1.LogError)(e);
|
|
647
646
|
const errorMessage = e instanceof Error ? e.message : String(e);
|
|
648
647
|
return {
|
|
649
648
|
Success: false,
|
|
@@ -705,7 +704,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
705
704
|
}
|
|
706
705
|
else if (parameters && Object.keys(parameters).length > 0) {
|
|
707
706
|
// Warn if parameters were provided but query doesn't use templates
|
|
708
|
-
(0,
|
|
707
|
+
(0, global_1.LogStatus)('Warning: Parameters provided but query does not use templates. Parameters will be ignored.');
|
|
709
708
|
}
|
|
710
709
|
return { finalSQL, appliedParameters };
|
|
711
710
|
}
|
|
@@ -721,10 +720,10 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
721
720
|
if (!cachedEntry) {
|
|
722
721
|
return null;
|
|
723
722
|
}
|
|
724
|
-
(0,
|
|
723
|
+
(0, global_1.LogStatus)(`Cache hit for query ${query.Name} (${query.ID})`);
|
|
725
724
|
// Apply pagination to cached results
|
|
726
725
|
const { paginatedResult, totalRowCount } = this.applyQueryPagination(cachedEntry.results, params);
|
|
727
|
-
const remainingTTL =
|
|
726
|
+
const remainingTTL = cachedEntry.timestamp + cachedEntry.ttlMinutes * 60 * 1000 - Date.now();
|
|
728
727
|
return {
|
|
729
728
|
Success: true,
|
|
730
729
|
QueryID: query.ID,
|
|
@@ -736,7 +735,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
736
735
|
ErrorMessage: '',
|
|
737
736
|
AppliedParameters: appliedParameters,
|
|
738
737
|
CacheHit: true,
|
|
739
|
-
CacheTTLRemaining: remainingTTL
|
|
738
|
+
CacheTTLRemaining: remainingTTL,
|
|
740
739
|
};
|
|
741
740
|
}
|
|
742
741
|
/**
|
|
@@ -751,7 +750,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
751
750
|
}
|
|
752
751
|
return {
|
|
753
752
|
result,
|
|
754
|
-
executionTime: end - start
|
|
753
|
+
executionTime: end - start,
|
|
755
754
|
};
|
|
756
755
|
}
|
|
757
756
|
/**
|
|
@@ -786,11 +785,11 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
786
785
|
RowCount: rowCount,
|
|
787
786
|
TotalRowCount: totalRowCount,
|
|
788
787
|
ExecutionTime: executionTime,
|
|
789
|
-
SQL: finalSQL // After parameter substitution
|
|
788
|
+
SQL: finalSQL, // After parameter substitution
|
|
790
789
|
}), null, // entityId - No specific entity for queries
|
|
791
790
|
query.ID, // recordId
|
|
792
791
|
params.AuditLogDescription, { IgnoreDirtyState: true } // saveOptions
|
|
793
|
-
).catch(error => {
|
|
792
|
+
).catch((error) => {
|
|
794
793
|
console.error('Error creating audit log:', error);
|
|
795
794
|
});
|
|
796
795
|
}
|
|
@@ -804,7 +803,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
804
803
|
}
|
|
805
804
|
// Cache the full result set (before pagination)
|
|
806
805
|
this.queryCache.set(query.ID, parameters, results, cacheConfig);
|
|
807
|
-
(0,
|
|
806
|
+
(0, global_1.LogStatus)(`Cached results for query ${query.Name} (${query.ID})`);
|
|
808
807
|
}
|
|
809
808
|
/**************************************************************************/
|
|
810
809
|
// END ---- IRunQueryProvider
|
|
@@ -865,7 +864,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
865
864
|
return sWhere;
|
|
866
865
|
}
|
|
867
866
|
catch (e) {
|
|
868
|
-
(0,
|
|
867
|
+
(0, global_1.LogError)(e);
|
|
869
868
|
throw e;
|
|
870
869
|
}
|
|
871
870
|
}
|
|
@@ -912,7 +911,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
912
911
|
const saveViewResults = params.SaveViewResults;
|
|
913
912
|
let topSQL = '';
|
|
914
913
|
// Only use TOP if we're NOT using OFFSET/FETCH pagination
|
|
915
|
-
const usingPagination = params.MaxRows && params.MaxRows > 0 &&
|
|
914
|
+
const usingPagination = params.MaxRows && params.MaxRows > 0 && params.StartRow !== undefined && params.StartRow >= 0;
|
|
916
915
|
if (params.IgnoreMaxRows === true) {
|
|
917
916
|
// do nothing, leave it blank, this structure is here to make the code easier to read
|
|
918
917
|
}
|
|
@@ -930,7 +929,9 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
930
929
|
const fields = this.getRunTimeViewFieldString(params, viewEntity);
|
|
931
930
|
let viewSQL = `SELECT ${topSQL} ${fields} FROM [${entityInfo.SchemaName}].${entityInfo.BaseView}`;
|
|
932
931
|
// We need countSQL for pagination (to get total count) or when using TOP (to show limited vs total)
|
|
933
|
-
let countSQL =
|
|
932
|
+
let countSQL = usingPagination || (topSQL && topSQL.length > 0)
|
|
933
|
+
? `SELECT COUNT(*) AS TotalRowCount FROM [${entityInfo.SchemaName}].${entityInfo.BaseView}`
|
|
934
|
+
: null;
|
|
934
935
|
let whereSQL = '';
|
|
935
936
|
let bHasWhere = false;
|
|
936
937
|
let userViewRunID = '';
|
|
@@ -992,9 +993,9 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
992
993
|
}
|
|
993
994
|
}
|
|
994
995
|
// NEXT, apply Row Level Security (RLS)
|
|
995
|
-
if (!entityInfo.UserExemptFromRowLevelSecurity(user,
|
|
996
|
+
if (!entityInfo.UserExemptFromRowLevelSecurity(user, global_1.EntityPermissionType.Read)) {
|
|
996
997
|
// user is NOT exempt from RLS, so we need to apply it
|
|
997
|
-
const rlsWhereClause = entityInfo.GetUserRowLevelSecurityWhereClause(user,
|
|
998
|
+
const rlsWhereClause = entityInfo.GetUserRowLevelSecurityWhereClause(user, global_1.EntityPermissionType.Read, '');
|
|
998
999
|
if (rlsWhereClause && rlsWhereClause.length > 0) {
|
|
999
1000
|
if (bHasWhere) {
|
|
1000
1001
|
whereSQL += ` AND (${rlsWhereClause})`;
|
|
@@ -1031,7 +1032,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1031
1032
|
viewSQL += ` ORDER BY ${orderBy}`;
|
|
1032
1033
|
}
|
|
1033
1034
|
// Apply pagination using OFFSET/FETCH if both MaxRows and StartRow are specified
|
|
1034
|
-
if (params.MaxRows && params.MaxRows > 0 &&
|
|
1035
|
+
if (params.MaxRows && params.MaxRows > 0 && params.StartRow !== undefined && params.StartRow >= 0 && entityInfo.FirstPrimaryKey) {
|
|
1035
1036
|
// If no ORDER BY was already added, add one based on primary key (required for OFFSET/FETCH)
|
|
1036
1037
|
if (!orderBy) {
|
|
1037
1038
|
viewSQL += ` ORDER BY ${entityInfo.FirstPrimaryKey.Name} `;
|
|
@@ -1055,7 +1056,9 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1055
1056
|
}
|
|
1056
1057
|
const stopTime = new Date();
|
|
1057
1058
|
if (params.ForceAuditLog ||
|
|
1058
|
-
(viewEntity?.ID &&
|
|
1059
|
+
(viewEntity?.ID &&
|
|
1060
|
+
(extraFilter === undefined || extraFilter === null || extraFilter?.trim().length === 0) &&
|
|
1061
|
+
entityInfo.AuditViewRuns)) {
|
|
1059
1062
|
// ONLY LOG TOP LEVEL VIEW EXECUTION - this would be for views with an ID, and don't have ExtraFilter as ExtraFilter
|
|
1060
1063
|
// is only used in the system on a tab or just for ad hoc view execution
|
|
1061
1064
|
// we do NOT want to wait for this, so no await,
|
|
@@ -1089,7 +1092,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1089
1092
|
}
|
|
1090
1093
|
catch (e) {
|
|
1091
1094
|
const exceptionStopTime = new Date();
|
|
1092
|
-
(0,
|
|
1095
|
+
(0, global_1.LogError)(e);
|
|
1093
1096
|
return {
|
|
1094
1097
|
RowCount: 0,
|
|
1095
1098
|
TotalRowCount: 0,
|
|
@@ -1180,7 +1183,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1180
1183
|
if (field)
|
|
1181
1184
|
fieldList.push(field);
|
|
1182
1185
|
else
|
|
1183
|
-
(0,
|
|
1186
|
+
(0, global_1.LogError)(`Field ${f} not found in entity ${entityInfo.Name}`);
|
|
1184
1187
|
});
|
|
1185
1188
|
}
|
|
1186
1189
|
else {
|
|
@@ -1196,7 +1199,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1196
1199
|
fieldList.push(c.EntityField);
|
|
1197
1200
|
}
|
|
1198
1201
|
else {
|
|
1199
|
-
(0,
|
|
1202
|
+
(0, global_1.LogError)(`View Field ${c.Name} doesn't match an Entity Field in entity ${entityInfo.Name}. This can happen if the view was saved with a field that no longer exists in the entity. It is best to update the view to remove this field.`);
|
|
1200
1203
|
}
|
|
1201
1204
|
}
|
|
1202
1205
|
});
|
|
@@ -1209,7 +1212,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1209
1212
|
}
|
|
1210
1213
|
}
|
|
1211
1214
|
catch (e) {
|
|
1212
|
-
(0,
|
|
1215
|
+
(0, global_1.LogError)(e);
|
|
1213
1216
|
}
|
|
1214
1217
|
finally {
|
|
1215
1218
|
return fieldList;
|
|
@@ -1260,7 +1263,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1260
1263
|
else {
|
|
1261
1264
|
// we have multiple words, so we need to convert the spaces to AND
|
|
1262
1265
|
// but first, let's strip the stopwords out of the string
|
|
1263
|
-
u = (0,
|
|
1266
|
+
u = (0, global_1.StripStopWords)(userSearchString);
|
|
1264
1267
|
// next, include "AND" between all the words so that we have a full text search on all the words
|
|
1265
1268
|
u = u.replace(/ /g, ' AND ');
|
|
1266
1269
|
}
|
|
@@ -1292,7 +1295,9 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1292
1295
|
const authorization = authorizationName
|
|
1293
1296
|
? this.Authorizations.find((a) => a?.Name?.trim().toLowerCase() === authorizationName.trim().toLowerCase())
|
|
1294
1297
|
: null;
|
|
1295
|
-
const auditLogType = auditLogTypeName
|
|
1298
|
+
const auditLogType = auditLogTypeName
|
|
1299
|
+
? this.AuditLogTypes.find((a) => a?.Name?.trim().toLowerCase() === auditLogTypeName.trim().toLowerCase())
|
|
1300
|
+
: null;
|
|
1296
1301
|
if (!user)
|
|
1297
1302
|
throw new Error(`User is a required parameter`);
|
|
1298
1303
|
if (!auditLogType) {
|
|
@@ -1321,7 +1326,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1321
1326
|
throw new Error(`Error saving audit log record`);
|
|
1322
1327
|
}
|
|
1323
1328
|
catch (err) {
|
|
1324
|
-
(0,
|
|
1329
|
+
(0, global_1.LogError)(err);
|
|
1325
1330
|
return null;
|
|
1326
1331
|
}
|
|
1327
1332
|
}
|
|
@@ -1345,7 +1350,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1345
1350
|
// START ---- IEntityDataProvider
|
|
1346
1351
|
/**************************************************************************/
|
|
1347
1352
|
get ProviderType() {
|
|
1348
|
-
return
|
|
1353
|
+
return global_1.ProviderType.Database;
|
|
1349
1354
|
}
|
|
1350
1355
|
async GetRecordFavoriteStatus(userId, entityName, CompositeKey, contextUser) {
|
|
1351
1356
|
const id = await this.GetRecordFavoriteID(userId, entityName, CompositeKey, contextUser);
|
|
@@ -1361,7 +1366,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1361
1366
|
return null;
|
|
1362
1367
|
}
|
|
1363
1368
|
catch (e) {
|
|
1364
|
-
(0,
|
|
1369
|
+
(0, global_1.LogError)(e);
|
|
1365
1370
|
throw e;
|
|
1366
1371
|
}
|
|
1367
1372
|
}
|
|
@@ -1394,7 +1399,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1394
1399
|
}
|
|
1395
1400
|
}
|
|
1396
1401
|
catch (e) {
|
|
1397
|
-
(0,
|
|
1402
|
+
(0, global_1.LogError)(e);
|
|
1398
1403
|
throw e;
|
|
1399
1404
|
}
|
|
1400
1405
|
}
|
|
@@ -1404,7 +1409,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1404
1409
|
return this.ExecuteSQL(sSQL, undefined, undefined, contextUser);
|
|
1405
1410
|
}
|
|
1406
1411
|
catch (e) {
|
|
1407
|
-
(0,
|
|
1412
|
+
(0, global_1.LogError)(e);
|
|
1408
1413
|
throw e;
|
|
1409
1414
|
}
|
|
1410
1415
|
}
|
|
@@ -1424,7 +1429,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1424
1429
|
// we do this in SQL by combining the pirmary key name and value for each row using the default separator defined by the CompositeKey class
|
|
1425
1430
|
// the output of this should be like the following 'Field1|Value1||Field2|Value2||Field3|Value3' where the || is the CompositeKey.DefaultFieldDelimiter and the | is the CompositeKey.DefaultValueDelimiter
|
|
1426
1431
|
const quotes = entity.FirstPrimaryKey.NeedsQuotes ? "'" : '';
|
|
1427
|
-
const primaryKeySelectString = `CONCAT(${entity.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${
|
|
1432
|
+
const primaryKeySelectString = `CONCAT(${entity.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${global_1.CompositeKey.DefaultFieldDelimiter}',`)})`;
|
|
1428
1433
|
// for this entity, check to see if it has any fields that are soft links, and for each of those, generate the SQL
|
|
1429
1434
|
entity.Fields.filter((f) => f.EntityIDFieldName && f.EntityIDFieldName.length > 0).forEach((f) => {
|
|
1430
1435
|
// each field in f must be processed
|
|
@@ -1453,7 +1458,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1453
1458
|
const entityInfo = this.Entities.find((e) => e.Name.trim().toLowerCase() === entityDependency.EntityName?.trim().toLowerCase());
|
|
1454
1459
|
const quotes = entityInfo.FirstPrimaryKey.NeedsQuotes ? "'" : '';
|
|
1455
1460
|
const relatedEntityInfo = this.Entities.find((e) => e.Name.trim().toLowerCase() === entityDependency.RelatedEntityName?.trim().toLowerCase());
|
|
1456
|
-
const primaryKeySelectString = `CONCAT(${entityInfo.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${
|
|
1461
|
+
const primaryKeySelectString = `CONCAT(${entityInfo.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${global_1.CompositeKey.DefaultFieldDelimiter}',`)})`;
|
|
1457
1462
|
if (sSQL.length > 0)
|
|
1458
1463
|
sSQL += ' UNION ALL ';
|
|
1459
1464
|
sSQL += `SELECT
|
|
@@ -1503,13 +1508,13 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1503
1508
|
// entityInfo.PrimaryKeys.forEach((pk) => {
|
|
1504
1509
|
// pkeyValues.push({FieldName: pk.Name, Value: r[pk.Name]}) // add all of the primary keys, which often is as simple as just "ID", but this is generic way to do it
|
|
1505
1510
|
// })
|
|
1506
|
-
const compositeKey = new
|
|
1511
|
+
const compositeKey = new global_1.CompositeKey();
|
|
1507
1512
|
// the row r will have a PrimaryKeyValue field that is a string that is a concatenation of the primary key field names and values
|
|
1508
1513
|
// we need to parse that out so that we can then pass it to the CompositeKey object
|
|
1509
1514
|
const pkeys = {};
|
|
1510
|
-
const keyValues = r.PrimaryKeyValue.split(
|
|
1515
|
+
const keyValues = r.PrimaryKeyValue.split(global_1.CompositeKey.DefaultFieldDelimiter);
|
|
1511
1516
|
keyValues.forEach((kv) => {
|
|
1512
|
-
const parts = kv.split(
|
|
1517
|
+
const parts = kv.split(global_1.CompositeKey.DefaultValueDelimiter);
|
|
1513
1518
|
pkeys[parts[0]] = parts[1];
|
|
1514
1519
|
});
|
|
1515
1520
|
compositeKey.LoadFromEntityInfoAndRecord(entityInfo, pkeys);
|
|
@@ -1525,7 +1530,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1525
1530
|
}
|
|
1526
1531
|
catch (e) {
|
|
1527
1532
|
// log and throw
|
|
1528
|
-
(0,
|
|
1533
|
+
(0, global_1.LogError)(e);
|
|
1529
1534
|
throw e;
|
|
1530
1535
|
}
|
|
1531
1536
|
}
|
|
@@ -1657,7 +1662,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1657
1662
|
return result;
|
|
1658
1663
|
}
|
|
1659
1664
|
catch (e) {
|
|
1660
|
-
(0,
|
|
1665
|
+
(0, global_1.LogError)(e);
|
|
1661
1666
|
await this.RollbackTransaction();
|
|
1662
1667
|
// attempt to persist the status to the DB, although that might fail
|
|
1663
1668
|
await this.CompleteMergeLogging(mergeRecordLog, result, contextUser);
|
|
@@ -1689,7 +1694,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1689
1694
|
throw new Error(`Error saving record merge log`);
|
|
1690
1695
|
}
|
|
1691
1696
|
catch (e) {
|
|
1692
|
-
(0,
|
|
1697
|
+
(0, global_1.LogError)(e);
|
|
1693
1698
|
throw e;
|
|
1694
1699
|
}
|
|
1695
1700
|
}
|
|
@@ -1706,7 +1711,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1706
1711
|
if (await recordMergeLog.Save()) {
|
|
1707
1712
|
// top level saved, now let's create the deletion detail records for each of the records that were merged
|
|
1708
1713
|
for (const d of result.RecordStatus) {
|
|
1709
|
-
const recordMergeDeletionLog = await this.GetEntityObject('Record Merge Deletion Logs', contextUser);
|
|
1714
|
+
const recordMergeDeletionLog = (await this.GetEntityObject('Record Merge Deletion Logs', contextUser));
|
|
1710
1715
|
recordMergeDeletionLog.NewRecord();
|
|
1711
1716
|
recordMergeDeletionLog.RecordMergeLogID = recordMergeLog.ID;
|
|
1712
1717
|
recordMergeDeletionLog.DeletedRecordID = d.CompositeKey.Values(); // this would join together all of the primary key values, which is fine as the primary key is a string
|
|
@@ -1721,7 +1726,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1721
1726
|
}
|
|
1722
1727
|
catch (e) {
|
|
1723
1728
|
// do nothing here because we often will get here since some conditions lead to no DB updates possible...
|
|
1724
|
-
(0,
|
|
1729
|
+
(0, global_1.LogError)(e);
|
|
1725
1730
|
// don't bubble up the error here as we're sometimes already in an exception block in caller
|
|
1726
1731
|
}
|
|
1727
1732
|
}
|
|
@@ -1756,7 +1761,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1756
1761
|
let oldData = null;
|
|
1757
1762
|
// use SQL Server CONCAT function to combine all of the primary key values and then combine them together
|
|
1758
1763
|
// using the default field delimiter and default value delimiter as defined in the CompositeKey class
|
|
1759
|
-
const concatPKIDString = `CONCAT(${entity.EntityInfo.PrimaryKeys.map((pk) => `'${pk.CodeName}','${
|
|
1764
|
+
const concatPKIDString = `CONCAT(${entity.EntityInfo.PrimaryKeys.map((pk) => `'${pk.CodeName}','${global_1.CompositeKey.DefaultValueDelimiter}',${pk.Name}`).join(`,'${global_1.CompositeKey.DefaultFieldDelimiter}',`)})`;
|
|
1760
1765
|
if (!bNewRecord)
|
|
1761
1766
|
oldData = entity.GetAll(true); // get all the OLD values, only do for existing records, for new records, not relevant
|
|
1762
1767
|
const logRecordChangeSQL = this.GetLogRecordChangeSQL(entity.GetAll(false), oldData, entity.EntityInfo.Name, '@ID', entity.EntityInfo, bNewRecord ? 'Create' : 'Update', user, false);
|
|
@@ -1836,7 +1841,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1836
1841
|
const invocationType = baseType === 'validate' ? 'Validate' : before ? 'Before' + baseTypeType : 'After' + baseTypeType;
|
|
1837
1842
|
const invocationTypeEntity = engine.InvocationTypes.find((i) => i.Name === invocationType);
|
|
1838
1843
|
if (!invocationTypeEntity) {
|
|
1839
|
-
(0,
|
|
1844
|
+
(0, global_1.LogError)(`Invocation Type ${invocationType} not found in metadata`);
|
|
1840
1845
|
return [];
|
|
1841
1846
|
// throw new Error(`Invocation Type ${invocationType} not found in metadata`);
|
|
1842
1847
|
}
|
|
@@ -1854,7 +1859,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1854
1859
|
return results;
|
|
1855
1860
|
}
|
|
1856
1861
|
catch (e) {
|
|
1857
|
-
(0,
|
|
1862
|
+
(0, global_1.LogError)(e);
|
|
1858
1863
|
return [];
|
|
1859
1864
|
}
|
|
1860
1865
|
}
|
|
@@ -1903,7 +1908,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1903
1908
|
}
|
|
1904
1909
|
}
|
|
1905
1910
|
catch (e) {
|
|
1906
|
-
(0,
|
|
1911
|
+
(0, global_1.LogError)(e.message);
|
|
1907
1912
|
}
|
|
1908
1913
|
}
|
|
1909
1914
|
}
|
|
@@ -1911,16 +1916,16 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1911
1916
|
}
|
|
1912
1917
|
}
|
|
1913
1918
|
catch (e) {
|
|
1914
|
-
(0,
|
|
1919
|
+
(0, global_1.LogError)(e);
|
|
1915
1920
|
}
|
|
1916
1921
|
}
|
|
1917
1922
|
async Save(entity, user, options) {
|
|
1918
|
-
const entityResult = new
|
|
1923
|
+
const entityResult = new global_1.BaseEntityResult();
|
|
1919
1924
|
try {
|
|
1920
1925
|
entity.RegisterTransactionPreprocessing();
|
|
1921
1926
|
const bNewRecord = !entity.IsSaved;
|
|
1922
1927
|
if (!options)
|
|
1923
|
-
options = new
|
|
1928
|
+
options = new global_1.EntitySaveOptions();
|
|
1924
1929
|
const bReplay = !!options.ReplayOnly;
|
|
1925
1930
|
if (!bReplay && !bNewRecord && !entity.EntityInfo.AllowUpdateAPI) {
|
|
1926
1931
|
// existing record and not allowed to update
|
|
@@ -1941,7 +1946,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1941
1946
|
f.ActiveStatusAssertions = false; // turn off warnings for this operation
|
|
1942
1947
|
const ret = {
|
|
1943
1948
|
FieldName: f.Name,
|
|
1944
|
-
Value: f.Value
|
|
1949
|
+
Value: f.Value,
|
|
1945
1950
|
};
|
|
1946
1951
|
f.ActiveStatusAssertions = tempStatus; // restore the status assertions
|
|
1947
1952
|
return ret;
|
|
@@ -1986,10 +1991,10 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
1986
1991
|
// we are part of a transaction group, so just add our query to the list
|
|
1987
1992
|
// and when the transaction is committed, we will send all the queries at once
|
|
1988
1993
|
this._bAllowRefresh = false; // stop refreshes of metadata while we're doing work
|
|
1989
|
-
entity.TransactionGroup.AddTransaction(new
|
|
1994
|
+
entity.TransactionGroup.AddTransaction(new global_1.TransactionItem(entity, entityResult.Type === 'create' ? 'Create' : 'Update', sSQL, null, {
|
|
1990
1995
|
dataSource: this._pool,
|
|
1991
1996
|
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined,
|
|
1992
|
-
entityName: entity.EntityInfo.Name
|
|
1997
|
+
entityName: entity.EntityInfo.Name,
|
|
1993
1998
|
}, (transactionResult, success) => {
|
|
1994
1999
|
// we get here whenever the transaction group does gets around to committing
|
|
1995
2000
|
// our query.
|
|
@@ -2031,7 +2036,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2031
2036
|
const rawResult = await this.ExecuteSQL(sSQL, null, {
|
|
2032
2037
|
isMutation: true,
|
|
2033
2038
|
description: `Save ${entity.EntityInfo.Name}`,
|
|
2034
|
-
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined
|
|
2039
|
+
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined,
|
|
2035
2040
|
}, user);
|
|
2036
2041
|
result = await this.ProcessEntityRows(rawResult, entity.EntityInfo);
|
|
2037
2042
|
}
|
|
@@ -2071,7 +2076,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2071
2076
|
this._bAllowRefresh = true; // allow refreshes again if we get a failure here
|
|
2072
2077
|
entityResult.EndedAt = new Date();
|
|
2073
2078
|
entityResult.Message = e.message;
|
|
2074
|
-
(0,
|
|
2079
|
+
(0, global_1.LogError)(e);
|
|
2075
2080
|
throw e; // rethrow the error
|
|
2076
2081
|
}
|
|
2077
2082
|
}
|
|
@@ -2188,7 +2193,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2188
2193
|
variablesSQL: declarations.length > 0 ? `DECLARE ${declarations.join(',\n ')}` : '',
|
|
2189
2194
|
setSQL: setStatements.join('\n'),
|
|
2190
2195
|
execParams: execParams.join(',\n '),
|
|
2191
|
-
simpleParams: simpleParams
|
|
2196
|
+
simpleParams: simpleParams,
|
|
2192
2197
|
};
|
|
2193
2198
|
}
|
|
2194
2199
|
/**
|
|
@@ -2200,7 +2205,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2200
2205
|
generateSetStatementValue(f, value) {
|
|
2201
2206
|
let val = value;
|
|
2202
2207
|
switch (f.TSType) {
|
|
2203
|
-
case
|
|
2208
|
+
case global_1.EntityFieldTSType.Boolean:
|
|
2204
2209
|
// check to see if the value is a string and if it is equal to true, if so, set the value to 1
|
|
2205
2210
|
if (typeof value === 'string' && value.trim().toLowerCase() === 'true')
|
|
2206
2211
|
val = 1;
|
|
@@ -2209,7 +2214,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2209
2214
|
else
|
|
2210
2215
|
val = value ? 1 : 0;
|
|
2211
2216
|
return val.toString();
|
|
2212
|
-
case
|
|
2217
|
+
case global_1.EntityFieldTSType.String:
|
|
2213
2218
|
// Handle string escaping for SET statements
|
|
2214
2219
|
if (typeof val === 'string') {
|
|
2215
2220
|
val = val.replace(/'/g, "''");
|
|
@@ -2221,7 +2226,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2221
2226
|
val = val.replace(/'/g, "''");
|
|
2222
2227
|
}
|
|
2223
2228
|
return `${f.UnicodePrefix}'${val}'`;
|
|
2224
|
-
case
|
|
2229
|
+
case global_1.EntityFieldTSType.Date:
|
|
2225
2230
|
if (val !== null && val !== undefined) {
|
|
2226
2231
|
if (typeof val === 'number') {
|
|
2227
2232
|
// we have a timestamp - milliseconds since Unix Epoch
|
|
@@ -2234,7 +2239,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2234
2239
|
val = val.toISOString(); // convert the date to ISO format for storage in the DB
|
|
2235
2240
|
}
|
|
2236
2241
|
return `'${val}'`;
|
|
2237
|
-
case
|
|
2242
|
+
case global_1.EntityFieldTSType.Number:
|
|
2238
2243
|
return val.toString();
|
|
2239
2244
|
default:
|
|
2240
2245
|
// For other types, convert to string and quote if needed
|
|
@@ -2252,7 +2257,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2252
2257
|
let quotes = '';
|
|
2253
2258
|
let val = value;
|
|
2254
2259
|
switch (f.TSType) {
|
|
2255
|
-
case
|
|
2260
|
+
case global_1.EntityFieldTSType.Boolean:
|
|
2256
2261
|
// check to see if the value is a string and if it is equal to true, if so, set the value to 1
|
|
2257
2262
|
if (typeof value === 'string' && value.trim().toLowerCase() === 'true')
|
|
2258
2263
|
val = 1;
|
|
@@ -2261,10 +2266,10 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2261
2266
|
else
|
|
2262
2267
|
val = value ? 1 : 0;
|
|
2263
2268
|
break;
|
|
2264
|
-
case
|
|
2269
|
+
case global_1.EntityFieldTSType.String:
|
|
2265
2270
|
quotes = "'";
|
|
2266
2271
|
break;
|
|
2267
|
-
case
|
|
2272
|
+
case global_1.EntityFieldTSType.Date:
|
|
2268
2273
|
quotes = "'";
|
|
2269
2274
|
if (val !== null && val !== undefined) {
|
|
2270
2275
|
if (typeof val === 'number') {
|
|
@@ -2419,7 +2424,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2419
2424
|
}
|
|
2420
2425
|
// Handle arrays recursively
|
|
2421
2426
|
if (Array.isArray(obj)) {
|
|
2422
|
-
return obj.map(item => this.escapeQuotesInProperties(item, quoteToEscape));
|
|
2427
|
+
return obj.map((item) => this.escapeQuotesInProperties(item, quoteToEscape));
|
|
2423
2428
|
}
|
|
2424
2429
|
// Handle objects recursively
|
|
2425
2430
|
if (typeof obj === 'object') {
|
|
@@ -2493,14 +2498,14 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2493
2498
|
bDiff = false; // this branch of logic ensures that undefined and null are treated the same
|
|
2494
2499
|
else {
|
|
2495
2500
|
switch (f.TSType) {
|
|
2496
|
-
case
|
|
2501
|
+
case global_1.EntityFieldTSType.String:
|
|
2497
2502
|
bDiff = oldData[key] !== newData[key];
|
|
2498
2503
|
break;
|
|
2499
|
-
case
|
|
2504
|
+
case global_1.EntityFieldTSType.Date:
|
|
2500
2505
|
bDiff = new Date(oldData[key]).getTime() !== new Date(newData[key]).getTime();
|
|
2501
2506
|
break;
|
|
2502
|
-
case
|
|
2503
|
-
case
|
|
2507
|
+
case global_1.EntityFieldTSType.Number:
|
|
2508
|
+
case global_1.EntityFieldTSType.Boolean:
|
|
2504
2509
|
bDiff = oldData[key] !== newData[key];
|
|
2505
2510
|
break;
|
|
2506
2511
|
}
|
|
@@ -2553,7 +2558,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2553
2558
|
const ret = d[0];
|
|
2554
2559
|
// we need to post process the retrieval to see if we have any char or nchar fields and we need to remove their trailing spaces
|
|
2555
2560
|
for (const field of entity.EntityInfo.Fields) {
|
|
2556
|
-
if (field.TSType ===
|
|
2561
|
+
if (field.TSType === global_1.EntityFieldTSType.String &&
|
|
2557
2562
|
field.Type.toLowerCase().includes('char') &&
|
|
2558
2563
|
!field.Type.toLowerCase().includes('varchar')) {
|
|
2559
2564
|
// trim trailing spaces for char and nchar fields
|
|
@@ -2685,11 +2690,11 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2685
2690
|
return { fullSQL: sSQL, simpleSQL: sSimpleSQL };
|
|
2686
2691
|
}
|
|
2687
2692
|
async Delete(entity, options, user) {
|
|
2688
|
-
const result = new
|
|
2693
|
+
const result = new global_1.BaseEntityResult();
|
|
2689
2694
|
try {
|
|
2690
2695
|
entity.RegisterTransactionPreprocessing();
|
|
2691
2696
|
if (!options)
|
|
2692
|
-
options = new
|
|
2697
|
+
options = new global_1.EntityDeleteOptions();
|
|
2693
2698
|
const bReplay = options.ReplayOnly;
|
|
2694
2699
|
if (!entity.IsSaved && !bReplay)
|
|
2695
2700
|
// existing record and not allowed to update
|
|
@@ -2717,10 +2722,10 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2717
2722
|
entity.RaiseReadyForTransaction();
|
|
2718
2723
|
// we are part of a transaction group, so just add our query to the list
|
|
2719
2724
|
// and when the transaction is committed, we will send all the queries at once
|
|
2720
|
-
entity.TransactionGroup.AddTransaction(new
|
|
2725
|
+
entity.TransactionGroup.AddTransaction(new global_1.TransactionItem(entity, 'Delete', sSQL, null, {
|
|
2721
2726
|
dataSource: this._pool,
|
|
2722
2727
|
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined,
|
|
2723
|
-
entityName: entity.EntityInfo.Name
|
|
2728
|
+
entityName: entity.EntityInfo.Name,
|
|
2724
2729
|
}, (transactionResult, success) => {
|
|
2725
2730
|
// we get here whenever the transaction group does gets around to committing
|
|
2726
2731
|
// our query.
|
|
@@ -2761,7 +2766,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2761
2766
|
d = await this.ExecuteSQL(sSQL, null, {
|
|
2762
2767
|
isMutation: true,
|
|
2763
2768
|
description: `Delete ${entity.EntityInfo.Name}`,
|
|
2764
|
-
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined
|
|
2769
|
+
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined,
|
|
2765
2770
|
}, user);
|
|
2766
2771
|
}
|
|
2767
2772
|
if (d && d[0]) {
|
|
@@ -2791,7 +2796,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2791
2796
|
}
|
|
2792
2797
|
}
|
|
2793
2798
|
catch (e) {
|
|
2794
|
-
(0,
|
|
2799
|
+
(0, global_1.LogError)(e);
|
|
2795
2800
|
result.Message = e.message;
|
|
2796
2801
|
result.Success = false;
|
|
2797
2802
|
result.EndedAt = new Date();
|
|
@@ -2902,7 +2907,8 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
2902
2907
|
}
|
|
2903
2908
|
}
|
|
2904
2909
|
return acc;
|
|
2905
|
-
}, new Date(0)
|
|
2910
|
+
}, new Date(0) // Unix epoch - lowest possible date to start with
|
|
2911
|
+
);
|
|
2906
2912
|
return {
|
|
2907
2913
|
DatasetID: items[0].DatasetID,
|
|
2908
2914
|
DatasetName: datasetName,
|
|
@@ -3000,7 +3006,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3000
3006
|
// the reason we continue below if we have NOT loaded Entities is that when the system first bootstraps, DATASET gets loaded
|
|
3001
3007
|
// FIRST before Entities are loaded to load the entity metadata so this would ALWAYS fail :)
|
|
3002
3008
|
// entity not found, return a failed result, shouldn't ever get here due to the foreign key constraint on the table
|
|
3003
|
-
(0,
|
|
3009
|
+
(0, global_1.LogError)(`Entity not found for dataset item ${item.Code} in dataset ${datasetName}`);
|
|
3004
3010
|
return null;
|
|
3005
3011
|
}
|
|
3006
3012
|
else {
|
|
@@ -3014,7 +3020,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3014
3020
|
}
|
|
3015
3021
|
});
|
|
3016
3022
|
if (invalidColumns.length > 0) {
|
|
3017
|
-
(0,
|
|
3023
|
+
(0, global_1.LogError)(`Invalid columns specified for dataset item ${item.Code} in dataset ${datasetName}: ${invalidColumns.join(', ')}`);
|
|
3018
3024
|
return null;
|
|
3019
3025
|
}
|
|
3020
3026
|
}
|
|
@@ -3133,7 +3139,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3133
3139
|
const appEntities = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwApplicationEntities ORDER BY ApplicationName`, undefined, undefined, contextUser);
|
|
3134
3140
|
const ret = [];
|
|
3135
3141
|
for (let i = 0; i < apps.length; i++) {
|
|
3136
|
-
ret.push(new
|
|
3142
|
+
ret.push(new global_1.ApplicationInfo(this, {
|
|
3137
3143
|
...apps[i],
|
|
3138
3144
|
ApplicationEntities: appEntities.filter((ae) => ae.ApplicationName.trim().toLowerCase() === apps[i].Name.trim().toLowerCase()),
|
|
3139
3145
|
}));
|
|
@@ -3144,7 +3150,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3144
3150
|
const alts = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwAuditLogTypes`, null, undefined, contextUser);
|
|
3145
3151
|
const ret = [];
|
|
3146
3152
|
for (let i = 0; i < alts.length; i++) {
|
|
3147
|
-
const alt = new
|
|
3153
|
+
const alt = new global_1.AuditLogTypeInfo(alts[i]);
|
|
3148
3154
|
ret.push(alt);
|
|
3149
3155
|
}
|
|
3150
3156
|
return ret;
|
|
@@ -3154,7 +3160,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3154
3160
|
const userRoles = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwUserRoles ORDER BY UserID`, undefined, undefined, contextUser);
|
|
3155
3161
|
const ret = [];
|
|
3156
3162
|
for (let i = 0; i < users.length; i++) {
|
|
3157
|
-
ret.push(new
|
|
3163
|
+
ret.push(new global_1.UserInfo(this, {
|
|
3158
3164
|
...users[i],
|
|
3159
3165
|
UserRoles: userRoles.filter((ur) => ur.UserID === users[i].ID),
|
|
3160
3166
|
}));
|
|
@@ -3166,7 +3172,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3166
3172
|
const authRoles = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwAuthorizationRoles ORDER BY AuthorizationName`, undefined, undefined, contextUser);
|
|
3167
3173
|
const ret = [];
|
|
3168
3174
|
for (let i = 0; i < auths.length; i++) {
|
|
3169
|
-
ret.push(new
|
|
3175
|
+
ret.push(new global_1.AuthorizationInfo(this, {
|
|
3170
3176
|
...auths[i],
|
|
3171
3177
|
AuthorizationRoles: authRoles.filter((ar) => ar.AuthorizationName.trim().toLowerCase() === auths[i].Name.trim().toLowerCase()),
|
|
3172
3178
|
}));
|
|
@@ -3187,7 +3193,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3187
3193
|
return rows;
|
|
3188
3194
|
}
|
|
3189
3195
|
// Find all datetime fields in the entity
|
|
3190
|
-
const datetimeFields = entityInfo.Fields.filter((field) => field.TSType ===
|
|
3196
|
+
const datetimeFields = entityInfo.Fields.filter((field) => field.TSType === global_1.EntityFieldTSType.Date);
|
|
3191
3197
|
// If there are no datetime fields, return the rows as-is
|
|
3192
3198
|
if (datetimeFields.length === 0) {
|
|
3193
3199
|
return rows;
|
|
@@ -3289,7 +3295,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3289
3295
|
context,
|
|
3290
3296
|
options,
|
|
3291
3297
|
resolve,
|
|
3292
|
-
reject
|
|
3298
|
+
reject,
|
|
3293
3299
|
});
|
|
3294
3300
|
});
|
|
3295
3301
|
}
|
|
@@ -3345,16 +3351,18 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3345
3351
|
logSqlStatement: this._logSqlStatement.bind(this),
|
|
3346
3352
|
clearTransaction: () => {
|
|
3347
3353
|
this._transaction = null;
|
|
3348
|
-
}
|
|
3354
|
+
},
|
|
3349
3355
|
};
|
|
3350
3356
|
// Convert logging options to internal format
|
|
3351
|
-
const options = loggingOptions
|
|
3352
|
-
|
|
3353
|
-
|
|
3354
|
-
|
|
3355
|
-
|
|
3356
|
-
|
|
3357
|
-
|
|
3357
|
+
const options = loggingOptions
|
|
3358
|
+
? {
|
|
3359
|
+
description: loggingOptions.description,
|
|
3360
|
+
ignoreLogging: loggingOptions.ignoreLogging,
|
|
3361
|
+
isMutation: loggingOptions.isMutation,
|
|
3362
|
+
simpleSQLFallback: loggingOptions.simpleSQLFallback,
|
|
3363
|
+
contextUser: loggingOptions.contextUser,
|
|
3364
|
+
}
|
|
3365
|
+
: undefined;
|
|
3358
3366
|
// Delegate to instance method
|
|
3359
3367
|
return this._internalExecuteSQLInstance(query, parameters, context, options);
|
|
3360
3368
|
}
|
|
@@ -3373,7 +3381,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3373
3381
|
ignoreLogging: options?.ignoreLogging,
|
|
3374
3382
|
isMutation: options?.isMutation,
|
|
3375
3383
|
simpleSQLFallback: options?.simpleSQLFallback,
|
|
3376
|
-
contextUser: contextUser
|
|
3384
|
+
contextUser: contextUser,
|
|
3377
3385
|
});
|
|
3378
3386
|
// Return recordset for consistency with TypeORM behavior
|
|
3379
3387
|
// If multiple recordsets, return recordsets array
|
|
@@ -3404,14 +3412,14 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3404
3412
|
logSqlStatement: async (q, p, d, i, m, s, u) => {
|
|
3405
3413
|
// Use static logging method
|
|
3406
3414
|
await SQLServerDataProvider.LogSQLStatement(q, p, d || 'ExecuteSQLWithPool', m || false, s, u);
|
|
3407
|
-
}
|
|
3415
|
+
},
|
|
3408
3416
|
};
|
|
3409
3417
|
// Create options
|
|
3410
3418
|
const options = {
|
|
3411
3419
|
description: 'ExecuteSQLWithPool',
|
|
3412
3420
|
ignoreLogging: false,
|
|
3413
3421
|
isMutation: false,
|
|
3414
|
-
contextUser: contextUser
|
|
3422
|
+
contextUser: contextUser,
|
|
3415
3423
|
};
|
|
3416
3424
|
// Use the static execution method
|
|
3417
3425
|
const result = await SQLServerDataProvider._internalExecuteSQLStatic(query, parameters, context, options);
|
|
@@ -3497,7 +3505,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3497
3505
|
transaction: transaction,
|
|
3498
3506
|
logSqlStatement: async (q, p, d, i, m, s, u) => {
|
|
3499
3507
|
await SQLServerDataProvider.LogSQLStatement(q, p, d || 'Batch execution', m || false, s, u);
|
|
3500
|
-
}
|
|
3508
|
+
},
|
|
3501
3509
|
};
|
|
3502
3510
|
// Use named parameters for batch SQL
|
|
3503
3511
|
const namedParams = batchParameters;
|
|
@@ -3506,7 +3514,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3506
3514
|
description: 'Batch execution',
|
|
3507
3515
|
ignoreLogging: false,
|
|
3508
3516
|
isMutation: false,
|
|
3509
|
-
contextUser: contextUser
|
|
3517
|
+
contextUser: contextUser,
|
|
3510
3518
|
});
|
|
3511
3519
|
// Return array of recordsets - one for each query
|
|
3512
3520
|
// Handle both single and multiple recordsets
|
|
@@ -3580,14 +3588,14 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3580
3588
|
logSqlStatement: this._logSqlStatement.bind(this),
|
|
3581
3589
|
clearTransaction: () => {
|
|
3582
3590
|
this._transaction = null;
|
|
3583
|
-
}
|
|
3591
|
+
},
|
|
3584
3592
|
};
|
|
3585
3593
|
// Execute using instance method (which handles queue for transactions)
|
|
3586
3594
|
const result = await this._internalExecuteSQLInstance(batchSQL, batchParameters, context, {
|
|
3587
3595
|
description: options?.description || 'Batch execution',
|
|
3588
3596
|
ignoreLogging: options?.ignoreLogging || false,
|
|
3589
3597
|
isMutation: options?.isMutation || false,
|
|
3590
|
-
contextUser: contextUser
|
|
3598
|
+
contextUser: contextUser,
|
|
3591
3599
|
});
|
|
3592
3600
|
// Return array of recordsets - one for each query
|
|
3593
3601
|
// Handle both single and multiple recordsets
|
|
@@ -3602,7 +3610,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3602
3610
|
}
|
|
3603
3611
|
}
|
|
3604
3612
|
catch (e) {
|
|
3605
|
-
(0,
|
|
3613
|
+
(0, global_1.LogError)(e);
|
|
3606
3614
|
throw e;
|
|
3607
3615
|
}
|
|
3608
3616
|
}
|
|
@@ -3704,13 +3712,13 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3704
3712
|
// Create savepoint for nested transaction
|
|
3705
3713
|
await this.ExecuteSQL(`SAVE TRANSACTION ${savepointName}`, null, {
|
|
3706
3714
|
description: `Creating savepoint ${savepointName} at depth ${this._transactionDepth}`,
|
|
3707
|
-
ignoreLogging: true
|
|
3715
|
+
ignoreLogging: true,
|
|
3708
3716
|
});
|
|
3709
3717
|
}
|
|
3710
3718
|
}
|
|
3711
3719
|
catch (e) {
|
|
3712
3720
|
this._transactionDepth--; // Restore depth on error
|
|
3713
|
-
(0,
|
|
3721
|
+
(0, global_1.LogError)(e);
|
|
3714
3722
|
throw e; // force caller to handle
|
|
3715
3723
|
}
|
|
3716
3724
|
}
|
|
@@ -3741,7 +3749,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3741
3749
|
}
|
|
3742
3750
|
}
|
|
3743
3751
|
catch (e) {
|
|
3744
|
-
(0,
|
|
3752
|
+
(0, global_1.LogError)(e);
|
|
3745
3753
|
throw e; // force caller to handle
|
|
3746
3754
|
}
|
|
3747
3755
|
}
|
|
@@ -3767,7 +3775,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3767
3775
|
const deferredCount = this._deferredTasks.length;
|
|
3768
3776
|
this._deferredTasks = [];
|
|
3769
3777
|
if (deferredCount > 0) {
|
|
3770
|
-
(0,
|
|
3778
|
+
(0, global_1.LogStatus)(`Cleared ${deferredCount} deferred tasks after transaction rollback`);
|
|
3771
3779
|
}
|
|
3772
3780
|
}
|
|
3773
3781
|
else {
|
|
@@ -3778,7 +3786,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3778
3786
|
}
|
|
3779
3787
|
await this.ExecuteSQL(`ROLLBACK TRANSACTION ${savepointName}`, null, {
|
|
3780
3788
|
description: `Rolling back to savepoint ${savepointName}`,
|
|
3781
|
-
ignoreLogging: true
|
|
3789
|
+
ignoreLogging: true,
|
|
3782
3790
|
});
|
|
3783
3791
|
this._savepointStack.pop();
|
|
3784
3792
|
this._transactionDepth--;
|
|
@@ -3793,7 +3801,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3793
3801
|
this._savepointCounter = 0;
|
|
3794
3802
|
this._transactionState$.next(false);
|
|
3795
3803
|
}
|
|
3796
|
-
(0,
|
|
3804
|
+
(0, global_1.LogError)(e);
|
|
3797
3805
|
throw e; // force caller to handle
|
|
3798
3806
|
}
|
|
3799
3807
|
}
|
|
@@ -3805,7 +3813,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3805
3813
|
async RefreshIfNeeded() {
|
|
3806
3814
|
// Skip refresh if a transaction is active
|
|
3807
3815
|
if (this.isTransactionActive) {
|
|
3808
|
-
(0,
|
|
3816
|
+
(0, global_1.LogStatus)('Skipping metadata refresh - transaction is active');
|
|
3809
3817
|
return false;
|
|
3810
3818
|
}
|
|
3811
3819
|
// Call parent implementation if no transaction
|
|
@@ -3819,7 +3827,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3819
3827
|
async processDeferredTasks() {
|
|
3820
3828
|
if (this._deferredTasks.length === 0)
|
|
3821
3829
|
return;
|
|
3822
|
-
(0,
|
|
3830
|
+
(0, global_1.LogStatus)(`Processing ${this._deferredTasks.length} deferred tasks after transaction commit`);
|
|
3823
3831
|
// Copy and clear the deferred tasks array
|
|
3824
3832
|
const tasksToProcess = [...this._deferredTasks];
|
|
3825
3833
|
this._deferredTasks = [];
|
|
@@ -3833,11 +3841,11 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3833
3841
|
// Add other task types here as needed
|
|
3834
3842
|
}
|
|
3835
3843
|
catch (error) {
|
|
3836
|
-
(0,
|
|
3844
|
+
(0, global_1.LogError)(`Failed to process deferred ${task.type} task: ${error}`);
|
|
3837
3845
|
// Continue processing other tasks even if one fails
|
|
3838
3846
|
}
|
|
3839
3847
|
}
|
|
3840
|
-
(0,
|
|
3848
|
+
(0, global_1.LogStatus)(`Completed processing deferred tasks`);
|
|
3841
3849
|
}
|
|
3842
3850
|
get LocalStorageProvider() {
|
|
3843
3851
|
if (!this._localStorageProvider)
|
|
@@ -3867,13 +3875,13 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3867
3875
|
return data[0][fields[0]]; // return first field
|
|
3868
3876
|
}
|
|
3869
3877
|
else {
|
|
3870
|
-
(0,
|
|
3878
|
+
(0, global_1.LogError)(`Entity ${entityName} record ${CompositeKey.ToString()} not found, returning null`);
|
|
3871
3879
|
return null;
|
|
3872
3880
|
}
|
|
3873
3881
|
}
|
|
3874
3882
|
}
|
|
3875
3883
|
catch (e) {
|
|
3876
|
-
(0,
|
|
3884
|
+
(0, global_1.LogError)(e);
|
|
3877
3885
|
return null;
|
|
3878
3886
|
}
|
|
3879
3887
|
}
|
|
@@ -3886,7 +3894,7 @@ class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
|
3886
3894
|
if (!f)
|
|
3887
3895
|
f = e.Fields.find((f) => f.Name === 'Name');
|
|
3888
3896
|
if (!f) {
|
|
3889
|
-
(0,
|
|
3897
|
+
(0, global_1.LogError)(`Entity ${entityName} does not have an IsNameField or a field with the column name of Name, returning null, use recordId`);
|
|
3890
3898
|
return null;
|
|
3891
3899
|
}
|
|
3892
3900
|
else {
|