@memberjunction/sqlserver-dataprovider 2.112.0 → 2.113.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +20 -20
- package/dist/SQLServerDataProvider.d.ts +1 -1
- package/dist/SQLServerDataProvider.d.ts.map +1 -1
- package/dist/SQLServerDataProvider.js +141 -142
- package/dist/SQLServerDataProvider.js.map +1 -1
- package/dist/SQLServerTransactionGroup.d.ts +1 -1
- package/dist/SQLServerTransactionGroup.d.ts.map +1 -1
- package/dist/SQLServerTransactionGroup.js +15 -15
- package/dist/SQLServerTransactionGroup.js.map +1 -1
- package/dist/UserCache.d.ts +1 -1
- package/dist/UserCache.d.ts.map +1 -1
- package/dist/UserCache.js +6 -6
- package/dist/UserCache.js.map +1 -1
- package/dist/config.d.ts +2 -2
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +6 -6
- package/dist/config.js.map +1 -1
- package/dist/queryParameterProcessor.d.ts +1 -1
- package/dist/queryParameterProcessor.d.ts.map +1 -1
- package/dist/queryParameterProcessor.js +10 -10
- package/dist/queryParameterProcessor.js.map +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +2 -2
- package/dist/types.js.map +1 -1
- package/package.json +10 -9
|
@@ -41,7 +41,7 @@ exports.SQLServerDataProvider = void 0;
|
|
|
41
41
|
* In practice - this FILE will NOT exist in the entities library, we need to move to its own separate project
|
|
42
42
|
* so it is only included by the consumer of the entities library if they want to use it.
|
|
43
43
|
**************************************************************************************************************/
|
|
44
|
-
const
|
|
44
|
+
const core_1 = require("@memberjunction/core");
|
|
45
45
|
const queryParameterProcessor_1 = require("./queryParameterProcessor");
|
|
46
46
|
const core_entities_1 = require("@memberjunction/core-entities");
|
|
47
47
|
const aiengine_1 = require("@memberjunction/aiengine");
|
|
@@ -118,7 +118,10 @@ async function executeSQLCore(query, parameters, context, options) {
|
|
|
118
118
|
}
|
|
119
119
|
}
|
|
120
120
|
// Execute query and logging in parallel
|
|
121
|
-
const [result] = await Promise.all([
|
|
121
|
+
const [result] = await Promise.all([
|
|
122
|
+
request.query(processedQuery),
|
|
123
|
+
logPromise
|
|
124
|
+
]);
|
|
122
125
|
return result;
|
|
123
126
|
}
|
|
124
127
|
catch (error) {
|
|
@@ -150,7 +153,7 @@ async function executeSQLCore(query, parameters, context, options) {
|
|
|
150
153
|
* await provider.Config();
|
|
151
154
|
* ```
|
|
152
155
|
*/
|
|
153
|
-
class SQLServerDataProvider extends
|
|
156
|
+
class SQLServerDataProvider extends core_1.DatabaseProviderBase {
|
|
154
157
|
_pool;
|
|
155
158
|
// Instance transaction properties
|
|
156
159
|
_transaction;
|
|
@@ -158,7 +161,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
158
161
|
_savepointCounter = 0;
|
|
159
162
|
_savepointStack = [];
|
|
160
163
|
// Query cache instance
|
|
161
|
-
queryCache = new
|
|
164
|
+
queryCache = new core_1.QueryCache();
|
|
162
165
|
// Removed _transactionRequest - creating new Request objects for each query to avoid concurrency issues
|
|
163
166
|
_localStorageProvider;
|
|
164
167
|
_bAllowRefresh = true;
|
|
@@ -241,7 +244,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
241
244
|
return super.Config(configData, providerToUse); // now parent class can do it's config
|
|
242
245
|
}
|
|
243
246
|
catch (e) {
|
|
244
|
-
(0,
|
|
247
|
+
(0, core_1.LogError)(e);
|
|
245
248
|
throw e;
|
|
246
249
|
}
|
|
247
250
|
}
|
|
@@ -253,16 +256,14 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
253
256
|
// Each instance gets its own queue processor, but only do this ONCE if we get this method called more than once we don't need to reinit
|
|
254
257
|
// the sub, taht would cause duplicate rprocessing.
|
|
255
258
|
if (!this._queueSubscription) {
|
|
256
|
-
this._queueSubscription = this._sqlQueue
|
|
257
|
-
.pipe((0, rxjs_1.concatMap)((item) => (0, rxjs_1.from)(executeSQLCore(item.query, item.parameters, item.context, item.options)).pipe(
|
|
259
|
+
this._queueSubscription = this._sqlQueue$.pipe((0, rxjs_1.concatMap)(item => (0, rxjs_1.from)(executeSQLCore(item.query, item.parameters, item.context, item.options)).pipe(
|
|
258
260
|
// Handle success
|
|
259
|
-
(0, rxjs_1.tap)(
|
|
261
|
+
(0, rxjs_1.tap)(result => item.resolve(result)),
|
|
260
262
|
// Handle errors
|
|
261
|
-
(0, rxjs_1.catchError)(
|
|
263
|
+
(0, rxjs_1.catchError)(error => {
|
|
262
264
|
item.reject(error);
|
|
263
265
|
return (0, rxjs_1.of)(null); // Continue processing queue even on errors
|
|
264
|
-
}))))
|
|
265
|
-
.subscribe();
|
|
266
|
+
})))).subscribe();
|
|
266
267
|
}
|
|
267
268
|
}
|
|
268
269
|
/**
|
|
@@ -337,7 +338,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
337
338
|
const mjCoreSchema = this.ConfigData.MJCoreSchemaName;
|
|
338
339
|
const session = new SqlLogger_js_1.SqlLoggingSessionImpl(sessionId, filePath, {
|
|
339
340
|
defaultSchemaName: mjCoreSchema,
|
|
340
|
-
...options
|
|
341
|
+
...options // if defaultSchemaName is not provided, it will use the MJCoreSchemaName, otherwise
|
|
341
342
|
// the caller's defaultSchemaName will be used
|
|
342
343
|
});
|
|
343
344
|
// Initialize the session (create file, write header)
|
|
@@ -422,16 +423,16 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
422
423
|
}
|
|
423
424
|
// Check if any session has verbose output enabled for debug logging
|
|
424
425
|
const allSessions = Array.from(this._sqlLoggingSessions.values());
|
|
425
|
-
const hasVerboseSession = allSessions.some(
|
|
426
|
+
const hasVerboseSession = allSessions.some(s => s.options.verboseOutput === true);
|
|
426
427
|
if (hasVerboseSession) {
|
|
427
428
|
console.log('=== SQL LOGGING DEBUG ===');
|
|
428
429
|
console.log(`Query to log: ${query.substring(0, 100)}...`);
|
|
429
430
|
console.log(`Context user email: ${contextUser?.Email || 'NOT_PROVIDED'}`);
|
|
430
431
|
console.log(`Active sessions count: ${this._sqlLoggingSessions.size}`);
|
|
431
|
-
console.log(`All sessions:`, allSessions.map(
|
|
432
|
+
console.log(`All sessions:`, allSessions.map(s => ({
|
|
432
433
|
id: s.id,
|
|
433
434
|
filterByUserId: s.options.filterByUserId,
|
|
434
|
-
sessionName: s.options.sessionName
|
|
435
|
+
sessionName: s.options.sessionName
|
|
435
436
|
})));
|
|
436
437
|
}
|
|
437
438
|
const filteredSessions = allSessions.filter((session) => {
|
|
@@ -448,7 +449,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
448
449
|
console.log(`Session ${session.id} filter check:`, {
|
|
449
450
|
filterByUserId: session.options.filterByUserId,
|
|
450
451
|
contextUserEmail: contextUser.Email,
|
|
451
|
-
matches: matches
|
|
452
|
+
matches: matches
|
|
452
453
|
});
|
|
453
454
|
}
|
|
454
455
|
return matches;
|
|
@@ -479,7 +480,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
479
480
|
*/
|
|
480
481
|
static async LogSQLStatement(query, parameters, description, isMutation = false, simpleSQLFallback, contextUser) {
|
|
481
482
|
// Get the current provider instance
|
|
482
|
-
const provider =
|
|
483
|
+
const provider = core_1.Metadata.Provider;
|
|
483
484
|
if (provider && provider._sqlLoggingSessions.size > 0) {
|
|
484
485
|
await provider._logSqlStatement(query, parameters, description, false, isMutation, simpleSQLFallback, contextUser);
|
|
485
486
|
}
|
|
@@ -537,17 +538,17 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
537
538
|
if (!categoryPath)
|
|
538
539
|
return null;
|
|
539
540
|
// Split path and clean segments - remove empty strings from leading/trailing slashes
|
|
540
|
-
const segments = categoryPath
|
|
541
|
-
.
|
|
542
|
-
.
|
|
543
|
-
.filter((s) => s.length > 0);
|
|
541
|
+
const segments = categoryPath.split('/')
|
|
542
|
+
.map(s => s.trim())
|
|
543
|
+
.filter(s => s.length > 0);
|
|
544
544
|
if (segments.length === 0)
|
|
545
545
|
return null;
|
|
546
546
|
// Walk down the hierarchy to find the target category
|
|
547
547
|
let currentCategory = null;
|
|
548
548
|
for (const segment of segments) {
|
|
549
549
|
const parentId = currentCategory?.ID || null;
|
|
550
|
-
currentCategory = this.QueryCategories.find(
|
|
550
|
+
currentCategory = this.QueryCategories.find(cat => cat.Name.trim().toLowerCase() === segment.toLowerCase() &&
|
|
551
|
+
cat.ParentID === parentId);
|
|
551
552
|
if (!currentCategory) {
|
|
552
553
|
return null; // Path not found
|
|
553
554
|
}
|
|
@@ -567,7 +568,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
567
568
|
*/
|
|
568
569
|
async findQuery(QueryID, QueryName, CategoryID, CategoryPath, refreshMetadataIfNotFound = false) {
|
|
569
570
|
// First, get the query metadata
|
|
570
|
-
const queries = this.Queries.filter(
|
|
571
|
+
const queries = this.Queries.filter(q => {
|
|
571
572
|
if (QueryID) {
|
|
572
573
|
return q.ID.trim().toLowerCase() === QueryID.trim().toLowerCase();
|
|
573
574
|
}
|
|
@@ -638,11 +639,11 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
638
639
|
ExecutionTime: executionTime,
|
|
639
640
|
ErrorMessage: '',
|
|
640
641
|
AppliedParameters: appliedParameters,
|
|
641
|
-
CacheHit: false
|
|
642
|
+
CacheHit: false
|
|
642
643
|
};
|
|
643
644
|
}
|
|
644
645
|
catch (e) {
|
|
645
|
-
(0,
|
|
646
|
+
(0, core_1.LogError)(e);
|
|
646
647
|
const errorMessage = e instanceof Error ? e.message : String(e);
|
|
647
648
|
return {
|
|
648
649
|
Success: false,
|
|
@@ -704,7 +705,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
704
705
|
}
|
|
705
706
|
else if (parameters && Object.keys(parameters).length > 0) {
|
|
706
707
|
// Warn if parameters were provided but query doesn't use templates
|
|
707
|
-
(0,
|
|
708
|
+
(0, core_1.LogStatus)('Warning: Parameters provided but query does not use templates. Parameters will be ignored.');
|
|
708
709
|
}
|
|
709
710
|
return { finalSQL, appliedParameters };
|
|
710
711
|
}
|
|
@@ -720,10 +721,10 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
720
721
|
if (!cachedEntry) {
|
|
721
722
|
return null;
|
|
722
723
|
}
|
|
723
|
-
(0,
|
|
724
|
+
(0, core_1.LogStatus)(`Cache hit for query ${query.Name} (${query.ID})`);
|
|
724
725
|
// Apply pagination to cached results
|
|
725
726
|
const { paginatedResult, totalRowCount } = this.applyQueryPagination(cachedEntry.results, params);
|
|
726
|
-
const remainingTTL = cachedEntry.timestamp + cachedEntry.ttlMinutes * 60 * 1000 - Date.now();
|
|
727
|
+
const remainingTTL = (cachedEntry.timestamp + (cachedEntry.ttlMinutes * 60 * 1000)) - Date.now();
|
|
727
728
|
return {
|
|
728
729
|
Success: true,
|
|
729
730
|
QueryID: query.ID,
|
|
@@ -735,7 +736,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
735
736
|
ErrorMessage: '',
|
|
736
737
|
AppliedParameters: appliedParameters,
|
|
737
738
|
CacheHit: true,
|
|
738
|
-
CacheTTLRemaining: remainingTTL
|
|
739
|
+
CacheTTLRemaining: remainingTTL
|
|
739
740
|
};
|
|
740
741
|
}
|
|
741
742
|
/**
|
|
@@ -750,7 +751,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
750
751
|
}
|
|
751
752
|
return {
|
|
752
753
|
result,
|
|
753
|
-
executionTime: end - start
|
|
754
|
+
executionTime: end - start
|
|
754
755
|
};
|
|
755
756
|
}
|
|
756
757
|
/**
|
|
@@ -785,11 +786,11 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
785
786
|
RowCount: rowCount,
|
|
786
787
|
TotalRowCount: totalRowCount,
|
|
787
788
|
ExecutionTime: executionTime,
|
|
788
|
-
SQL: finalSQL
|
|
789
|
+
SQL: finalSQL // After parameter substitution
|
|
789
790
|
}), null, // entityId - No specific entity for queries
|
|
790
791
|
query.ID, // recordId
|
|
791
792
|
params.AuditLogDescription, { IgnoreDirtyState: true } // saveOptions
|
|
792
|
-
).catch(
|
|
793
|
+
).catch(error => {
|
|
793
794
|
console.error('Error creating audit log:', error);
|
|
794
795
|
});
|
|
795
796
|
}
|
|
@@ -803,7 +804,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
803
804
|
}
|
|
804
805
|
// Cache the full result set (before pagination)
|
|
805
806
|
this.queryCache.set(query.ID, parameters, results, cacheConfig);
|
|
806
|
-
(0,
|
|
807
|
+
(0, core_1.LogStatus)(`Cached results for query ${query.Name} (${query.ID})`);
|
|
807
808
|
}
|
|
808
809
|
/**************************************************************************/
|
|
809
810
|
// END ---- IRunQueryProvider
|
|
@@ -864,7 +865,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
864
865
|
return sWhere;
|
|
865
866
|
}
|
|
866
867
|
catch (e) {
|
|
867
|
-
(0,
|
|
868
|
+
(0, core_1.LogError)(e);
|
|
868
869
|
throw e;
|
|
869
870
|
}
|
|
870
871
|
}
|
|
@@ -911,7 +912,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
911
912
|
const saveViewResults = params.SaveViewResults;
|
|
912
913
|
let topSQL = '';
|
|
913
914
|
// Only use TOP if we're NOT using OFFSET/FETCH pagination
|
|
914
|
-
const usingPagination = params.MaxRows && params.MaxRows > 0 && params.StartRow !== undefined && params.StartRow >= 0;
|
|
915
|
+
const usingPagination = params.MaxRows && params.MaxRows > 0 && (params.StartRow !== undefined && params.StartRow >= 0);
|
|
915
916
|
if (params.IgnoreMaxRows === true) {
|
|
916
917
|
// do nothing, leave it blank, this structure is here to make the code easier to read
|
|
917
918
|
}
|
|
@@ -929,9 +930,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
929
930
|
const fields = this.getRunTimeViewFieldString(params, viewEntity);
|
|
930
931
|
let viewSQL = `SELECT ${topSQL} ${fields} FROM [${entityInfo.SchemaName}].${entityInfo.BaseView}`;
|
|
931
932
|
// We need countSQL for pagination (to get total count) or when using TOP (to show limited vs total)
|
|
932
|
-
let countSQL = usingPagination || (topSQL && topSQL.length > 0)
|
|
933
|
-
? `SELECT COUNT(*) AS TotalRowCount FROM [${entityInfo.SchemaName}].${entityInfo.BaseView}`
|
|
934
|
-
: null;
|
|
933
|
+
let countSQL = (usingPagination || (topSQL && topSQL.length > 0)) ? `SELECT COUNT(*) AS TotalRowCount FROM [${entityInfo.SchemaName}].${entityInfo.BaseView}` : null;
|
|
935
934
|
let whereSQL = '';
|
|
936
935
|
let bHasWhere = false;
|
|
937
936
|
let userViewRunID = '';
|
|
@@ -993,9 +992,9 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
993
992
|
}
|
|
994
993
|
}
|
|
995
994
|
// NEXT, apply Row Level Security (RLS)
|
|
996
|
-
if (!entityInfo.UserExemptFromRowLevelSecurity(user,
|
|
995
|
+
if (!entityInfo.UserExemptFromRowLevelSecurity(user, core_1.EntityPermissionType.Read)) {
|
|
997
996
|
// user is NOT exempt from RLS, so we need to apply it
|
|
998
|
-
const rlsWhereClause = entityInfo.GetUserRowLevelSecurityWhereClause(user,
|
|
997
|
+
const rlsWhereClause = entityInfo.GetUserRowLevelSecurityWhereClause(user, core_1.EntityPermissionType.Read, '');
|
|
999
998
|
if (rlsWhereClause && rlsWhereClause.length > 0) {
|
|
1000
999
|
if (bHasWhere) {
|
|
1001
1000
|
whereSQL += ` AND (${rlsWhereClause})`;
|
|
@@ -1032,7 +1031,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1032
1031
|
viewSQL += ` ORDER BY ${orderBy}`;
|
|
1033
1032
|
}
|
|
1034
1033
|
// Apply pagination using OFFSET/FETCH if both MaxRows and StartRow are specified
|
|
1035
|
-
if (params.MaxRows && params.MaxRows > 0 && params.StartRow !== undefined && params.StartRow >= 0 && entityInfo.FirstPrimaryKey) {
|
|
1034
|
+
if (params.MaxRows && params.MaxRows > 0 && (params.StartRow !== undefined && params.StartRow >= 0) && entityInfo.FirstPrimaryKey) {
|
|
1036
1035
|
// If no ORDER BY was already added, add one based on primary key (required for OFFSET/FETCH)
|
|
1037
1036
|
if (!orderBy) {
|
|
1038
1037
|
viewSQL += ` ORDER BY ${entityInfo.FirstPrimaryKey.Name} `;
|
|
@@ -1056,9 +1055,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1056
1055
|
}
|
|
1057
1056
|
const stopTime = new Date();
|
|
1058
1057
|
if (params.ForceAuditLog ||
|
|
1059
|
-
(viewEntity?.ID &&
|
|
1060
|
-
(extraFilter === undefined || extraFilter === null || extraFilter?.trim().length === 0) &&
|
|
1061
|
-
entityInfo.AuditViewRuns)) {
|
|
1058
|
+
(viewEntity?.ID && (extraFilter === undefined || extraFilter === null || extraFilter?.trim().length === 0) && entityInfo.AuditViewRuns)) {
|
|
1062
1059
|
// ONLY LOG TOP LEVEL VIEW EXECUTION - this would be for views with an ID, and don't have ExtraFilter as ExtraFilter
|
|
1063
1060
|
// is only used in the system on a tab or just for ad hoc view execution
|
|
1064
1061
|
// we do NOT want to wait for this, so no await,
|
|
@@ -1092,7 +1089,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1092
1089
|
}
|
|
1093
1090
|
catch (e) {
|
|
1094
1091
|
const exceptionStopTime = new Date();
|
|
1095
|
-
(0,
|
|
1092
|
+
(0, core_1.LogError)(e);
|
|
1096
1093
|
return {
|
|
1097
1094
|
RowCount: 0,
|
|
1098
1095
|
TotalRowCount: 0,
|
|
@@ -1183,7 +1180,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1183
1180
|
if (field)
|
|
1184
1181
|
fieldList.push(field);
|
|
1185
1182
|
else
|
|
1186
|
-
(0,
|
|
1183
|
+
(0, core_1.LogError)(`Field ${f} not found in entity ${entityInfo.Name}`);
|
|
1187
1184
|
});
|
|
1188
1185
|
}
|
|
1189
1186
|
else {
|
|
@@ -1199,7 +1196,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1199
1196
|
fieldList.push(c.EntityField);
|
|
1200
1197
|
}
|
|
1201
1198
|
else {
|
|
1202
|
-
(0,
|
|
1199
|
+
(0, core_1.LogError)(`View Field ${c.Name} doesn't match an Entity Field in entity ${entityInfo.Name}. This can happen if the view was saved with a field that no longer exists in the entity. It is best to update the view to remove this field.`);
|
|
1203
1200
|
}
|
|
1204
1201
|
}
|
|
1205
1202
|
});
|
|
@@ -1212,7 +1209,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1212
1209
|
}
|
|
1213
1210
|
}
|
|
1214
1211
|
catch (e) {
|
|
1215
|
-
(0,
|
|
1212
|
+
(0, core_1.LogError)(e);
|
|
1216
1213
|
}
|
|
1217
1214
|
finally {
|
|
1218
1215
|
return fieldList;
|
|
@@ -1263,7 +1260,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1263
1260
|
else {
|
|
1264
1261
|
// we have multiple words, so we need to convert the spaces to AND
|
|
1265
1262
|
// but first, let's strip the stopwords out of the string
|
|
1266
|
-
u = (0,
|
|
1263
|
+
u = (0, core_1.StripStopWords)(userSearchString);
|
|
1267
1264
|
// next, include "AND" between all the words so that we have a full text search on all the words
|
|
1268
1265
|
u = u.replace(/ /g, ' AND ');
|
|
1269
1266
|
}
|
|
@@ -1295,9 +1292,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1295
1292
|
const authorization = authorizationName
|
|
1296
1293
|
? this.Authorizations.find((a) => a?.Name?.trim().toLowerCase() === authorizationName.trim().toLowerCase())
|
|
1297
1294
|
: null;
|
|
1298
|
-
const auditLogType = auditLogTypeName
|
|
1299
|
-
? this.AuditLogTypes.find((a) => a?.Name?.trim().toLowerCase() === auditLogTypeName.trim().toLowerCase())
|
|
1300
|
-
: null;
|
|
1295
|
+
const auditLogType = auditLogTypeName ? this.AuditLogTypes.find((a) => a?.Name?.trim().toLowerCase() === auditLogTypeName.trim().toLowerCase()) : null;
|
|
1301
1296
|
if (!user)
|
|
1302
1297
|
throw new Error(`User is a required parameter`);
|
|
1303
1298
|
if (!auditLogType) {
|
|
@@ -1326,7 +1321,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1326
1321
|
throw new Error(`Error saving audit log record`);
|
|
1327
1322
|
}
|
|
1328
1323
|
catch (err) {
|
|
1329
|
-
(0,
|
|
1324
|
+
(0, core_1.LogError)(err);
|
|
1330
1325
|
return null;
|
|
1331
1326
|
}
|
|
1332
1327
|
}
|
|
@@ -1350,7 +1345,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1350
1345
|
// START ---- IEntityDataProvider
|
|
1351
1346
|
/**************************************************************************/
|
|
1352
1347
|
get ProviderType() {
|
|
1353
|
-
return
|
|
1348
|
+
return core_1.ProviderType.Database;
|
|
1354
1349
|
}
|
|
1355
1350
|
async GetRecordFavoriteStatus(userId, entityName, CompositeKey, contextUser) {
|
|
1356
1351
|
const id = await this.GetRecordFavoriteID(userId, entityName, CompositeKey, contextUser);
|
|
@@ -1366,7 +1361,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1366
1361
|
return null;
|
|
1367
1362
|
}
|
|
1368
1363
|
catch (e) {
|
|
1369
|
-
(0,
|
|
1364
|
+
(0, core_1.LogError)(e);
|
|
1370
1365
|
throw e;
|
|
1371
1366
|
}
|
|
1372
1367
|
}
|
|
@@ -1399,7 +1394,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1399
1394
|
}
|
|
1400
1395
|
}
|
|
1401
1396
|
catch (e) {
|
|
1402
|
-
(0,
|
|
1397
|
+
(0, core_1.LogError)(e);
|
|
1403
1398
|
throw e;
|
|
1404
1399
|
}
|
|
1405
1400
|
}
|
|
@@ -1409,7 +1404,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1409
1404
|
return this.ExecuteSQL(sSQL, undefined, undefined, contextUser);
|
|
1410
1405
|
}
|
|
1411
1406
|
catch (e) {
|
|
1412
|
-
(0,
|
|
1407
|
+
(0, core_1.LogError)(e);
|
|
1413
1408
|
throw e;
|
|
1414
1409
|
}
|
|
1415
1410
|
}
|
|
@@ -1429,7 +1424,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1429
1424
|
// we do this in SQL by combining the pirmary key name and value for each row using the default separator defined by the CompositeKey class
|
|
1430
1425
|
// the output of this should be like the following 'Field1|Value1||Field2|Value2||Field3|Value3' where the || is the CompositeKey.DefaultFieldDelimiter and the | is the CompositeKey.DefaultValueDelimiter
|
|
1431
1426
|
const quotes = entity.FirstPrimaryKey.NeedsQuotes ? "'" : '';
|
|
1432
|
-
const primaryKeySelectString = `CONCAT(${entity.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${
|
|
1427
|
+
const primaryKeySelectString = `CONCAT(${entity.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${core_1.CompositeKey.DefaultFieldDelimiter}',`)})`;
|
|
1433
1428
|
// for this entity, check to see if it has any fields that are soft links, and for each of those, generate the SQL
|
|
1434
1429
|
entity.Fields.filter((f) => f.EntityIDFieldName && f.EntityIDFieldName.length > 0).forEach((f) => {
|
|
1435
1430
|
// each field in f must be processed
|
|
@@ -1458,7 +1453,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1458
1453
|
const entityInfo = this.Entities.find((e) => e.Name.trim().toLowerCase() === entityDependency.EntityName?.trim().toLowerCase());
|
|
1459
1454
|
const quotes = entityInfo.FirstPrimaryKey.NeedsQuotes ? "'" : '';
|
|
1460
1455
|
const relatedEntityInfo = this.Entities.find((e) => e.Name.trim().toLowerCase() === entityDependency.RelatedEntityName?.trim().toLowerCase());
|
|
1461
|
-
const primaryKeySelectString = `CONCAT(${entityInfo.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${
|
|
1456
|
+
const primaryKeySelectString = `CONCAT(${entityInfo.PrimaryKeys.map((pk) => `'${pk.Name}|', CAST(${pk.Name} AS NVARCHAR(MAX))`).join(`,'${core_1.CompositeKey.DefaultFieldDelimiter}',`)})`;
|
|
1462
1457
|
if (sSQL.length > 0)
|
|
1463
1458
|
sSQL += ' UNION ALL ';
|
|
1464
1459
|
sSQL += `SELECT
|
|
@@ -1508,13 +1503,13 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1508
1503
|
// entityInfo.PrimaryKeys.forEach((pk) => {
|
|
1509
1504
|
// pkeyValues.push({FieldName: pk.Name, Value: r[pk.Name]}) // add all of the primary keys, which often is as simple as just "ID", but this is generic way to do it
|
|
1510
1505
|
// })
|
|
1511
|
-
const compositeKey = new
|
|
1506
|
+
const compositeKey = new core_1.CompositeKey();
|
|
1512
1507
|
// the row r will have a PrimaryKeyValue field that is a string that is a concatenation of the primary key field names and values
|
|
1513
1508
|
// we need to parse that out so that we can then pass it to the CompositeKey object
|
|
1514
1509
|
const pkeys = {};
|
|
1515
|
-
const keyValues = r.PrimaryKeyValue.split(
|
|
1510
|
+
const keyValues = r.PrimaryKeyValue.split(core_1.CompositeKey.DefaultFieldDelimiter);
|
|
1516
1511
|
keyValues.forEach((kv) => {
|
|
1517
|
-
const parts = kv.split(
|
|
1512
|
+
const parts = kv.split(core_1.CompositeKey.DefaultValueDelimiter);
|
|
1518
1513
|
pkeys[parts[0]] = parts[1];
|
|
1519
1514
|
});
|
|
1520
1515
|
compositeKey.LoadFromEntityInfoAndRecord(entityInfo, pkeys);
|
|
@@ -1530,7 +1525,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1530
1525
|
}
|
|
1531
1526
|
catch (e) {
|
|
1532
1527
|
// log and throw
|
|
1533
|
-
(0,
|
|
1528
|
+
(0, core_1.LogError)(e);
|
|
1534
1529
|
throw e;
|
|
1535
1530
|
}
|
|
1536
1531
|
}
|
|
@@ -1662,7 +1657,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1662
1657
|
return result;
|
|
1663
1658
|
}
|
|
1664
1659
|
catch (e) {
|
|
1665
|
-
(0,
|
|
1660
|
+
(0, core_1.LogError)(e);
|
|
1666
1661
|
await this.RollbackTransaction();
|
|
1667
1662
|
// attempt to persist the status to the DB, although that might fail
|
|
1668
1663
|
await this.CompleteMergeLogging(mergeRecordLog, result, contextUser);
|
|
@@ -1694,7 +1689,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1694
1689
|
throw new Error(`Error saving record merge log`);
|
|
1695
1690
|
}
|
|
1696
1691
|
catch (e) {
|
|
1697
|
-
(0,
|
|
1692
|
+
(0, core_1.LogError)(e);
|
|
1698
1693
|
throw e;
|
|
1699
1694
|
}
|
|
1700
1695
|
}
|
|
@@ -1711,7 +1706,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1711
1706
|
if (await recordMergeLog.Save()) {
|
|
1712
1707
|
// top level saved, now let's create the deletion detail records for each of the records that were merged
|
|
1713
1708
|
for (const d of result.RecordStatus) {
|
|
1714
|
-
const recordMergeDeletionLog =
|
|
1709
|
+
const recordMergeDeletionLog = await this.GetEntityObject('Record Merge Deletion Logs', contextUser);
|
|
1715
1710
|
recordMergeDeletionLog.NewRecord();
|
|
1716
1711
|
recordMergeDeletionLog.RecordMergeLogID = recordMergeLog.ID;
|
|
1717
1712
|
recordMergeDeletionLog.DeletedRecordID = d.CompositeKey.Values(); // this would join together all of the primary key values, which is fine as the primary key is a string
|
|
@@ -1726,7 +1721,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1726
1721
|
}
|
|
1727
1722
|
catch (e) {
|
|
1728
1723
|
// do nothing here because we often will get here since some conditions lead to no DB updates possible...
|
|
1729
|
-
(0,
|
|
1724
|
+
(0, core_1.LogError)(e);
|
|
1730
1725
|
// don't bubble up the error here as we're sometimes already in an exception block in caller
|
|
1731
1726
|
}
|
|
1732
1727
|
}
|
|
@@ -1761,7 +1756,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1761
1756
|
let oldData = null;
|
|
1762
1757
|
// use SQL Server CONCAT function to combine all of the primary key values and then combine them together
|
|
1763
1758
|
// using the default field delimiter and default value delimiter as defined in the CompositeKey class
|
|
1764
|
-
const concatPKIDString = `CONCAT(${entity.EntityInfo.PrimaryKeys.map((pk) => `'${pk.CodeName}','${
|
|
1759
|
+
const concatPKIDString = `CONCAT(${entity.EntityInfo.PrimaryKeys.map((pk) => `'${pk.CodeName}','${core_1.CompositeKey.DefaultValueDelimiter}',${pk.Name}`).join(`,'${core_1.CompositeKey.DefaultFieldDelimiter}',`)})`;
|
|
1765
1760
|
if (!bNewRecord)
|
|
1766
1761
|
oldData = entity.GetAll(true); // get all the OLD values, only do for existing records, for new records, not relevant
|
|
1767
1762
|
const logRecordChangeSQL = this.GetLogRecordChangeSQL(entity.GetAll(false), oldData, entity.EntityInfo.Name, '@ID', entity.EntityInfo, bNewRecord ? 'Create' : 'Update', user, false);
|
|
@@ -1841,7 +1836,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1841
1836
|
const invocationType = baseType === 'validate' ? 'Validate' : before ? 'Before' + baseTypeType : 'After' + baseTypeType;
|
|
1842
1837
|
const invocationTypeEntity = engine.InvocationTypes.find((i) => i.Name === invocationType);
|
|
1843
1838
|
if (!invocationTypeEntity) {
|
|
1844
|
-
(0,
|
|
1839
|
+
(0, core_1.LogError)(`Invocation Type ${invocationType} not found in metadata`);
|
|
1845
1840
|
return [];
|
|
1846
1841
|
// throw new Error(`Invocation Type ${invocationType} not found in metadata`);
|
|
1847
1842
|
}
|
|
@@ -1859,7 +1854,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1859
1854
|
return results;
|
|
1860
1855
|
}
|
|
1861
1856
|
catch (e) {
|
|
1862
|
-
(0,
|
|
1857
|
+
(0, core_1.LogError)(e);
|
|
1863
1858
|
return [];
|
|
1864
1859
|
}
|
|
1865
1860
|
}
|
|
@@ -1908,7 +1903,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1908
1903
|
}
|
|
1909
1904
|
}
|
|
1910
1905
|
catch (e) {
|
|
1911
|
-
(0,
|
|
1906
|
+
(0, core_1.LogError)(e.message);
|
|
1912
1907
|
}
|
|
1913
1908
|
}
|
|
1914
1909
|
}
|
|
@@ -1916,16 +1911,16 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1916
1911
|
}
|
|
1917
1912
|
}
|
|
1918
1913
|
catch (e) {
|
|
1919
|
-
(0,
|
|
1914
|
+
(0, core_1.LogError)(e);
|
|
1920
1915
|
}
|
|
1921
1916
|
}
|
|
1922
1917
|
async Save(entity, user, options) {
|
|
1923
|
-
const entityResult = new
|
|
1918
|
+
const entityResult = new core_1.BaseEntityResult();
|
|
1924
1919
|
try {
|
|
1925
1920
|
entity.RegisterTransactionPreprocessing();
|
|
1926
1921
|
const bNewRecord = !entity.IsSaved;
|
|
1927
1922
|
if (!options)
|
|
1928
|
-
options = new
|
|
1923
|
+
options = new core_1.EntitySaveOptions();
|
|
1929
1924
|
const bReplay = !!options.ReplayOnly;
|
|
1930
1925
|
if (!bReplay && !bNewRecord && !entity.EntityInfo.AllowUpdateAPI) {
|
|
1931
1926
|
// existing record and not allowed to update
|
|
@@ -1946,7 +1941,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1946
1941
|
f.ActiveStatusAssertions = false; // turn off warnings for this operation
|
|
1947
1942
|
const ret = {
|
|
1948
1943
|
FieldName: f.Name,
|
|
1949
|
-
Value: f.Value
|
|
1944
|
+
Value: f.Value
|
|
1950
1945
|
};
|
|
1951
1946
|
f.ActiveStatusAssertions = tempStatus; // restore the status assertions
|
|
1952
1947
|
return ret;
|
|
@@ -1991,10 +1986,10 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
1991
1986
|
// we are part of a transaction group, so just add our query to the list
|
|
1992
1987
|
// and when the transaction is committed, we will send all the queries at once
|
|
1993
1988
|
this._bAllowRefresh = false; // stop refreshes of metadata while we're doing work
|
|
1994
|
-
entity.TransactionGroup.AddTransaction(new
|
|
1989
|
+
entity.TransactionGroup.AddTransaction(new core_1.TransactionItem(entity, entityResult.Type === 'create' ? 'Create' : 'Update', sSQL, null, {
|
|
1995
1990
|
dataSource: this._pool,
|
|
1996
1991
|
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined,
|
|
1997
|
-
entityName: entity.EntityInfo.Name
|
|
1992
|
+
entityName: entity.EntityInfo.Name
|
|
1998
1993
|
}, (transactionResult, success) => {
|
|
1999
1994
|
// we get here whenever the transaction group does gets around to committing
|
|
2000
1995
|
// our query.
|
|
@@ -2036,7 +2031,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2036
2031
|
const rawResult = await this.ExecuteSQL(sSQL, null, {
|
|
2037
2032
|
isMutation: true,
|
|
2038
2033
|
description: `Save ${entity.EntityInfo.Name}`,
|
|
2039
|
-
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined
|
|
2034
|
+
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined
|
|
2040
2035
|
}, user);
|
|
2041
2036
|
result = await this.ProcessEntityRows(rawResult, entity.EntityInfo);
|
|
2042
2037
|
}
|
|
@@ -2076,7 +2071,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2076
2071
|
this._bAllowRefresh = true; // allow refreshes again if we get a failure here
|
|
2077
2072
|
entityResult.EndedAt = new Date();
|
|
2078
2073
|
entityResult.Message = e.message;
|
|
2079
|
-
(0,
|
|
2074
|
+
(0, core_1.LogError)(e);
|
|
2080
2075
|
throw e; // rethrow the error
|
|
2081
2076
|
}
|
|
2082
2077
|
}
|
|
@@ -2193,7 +2188,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2193
2188
|
variablesSQL: declarations.length > 0 ? `DECLARE ${declarations.join(',\n ')}` : '',
|
|
2194
2189
|
setSQL: setStatements.join('\n'),
|
|
2195
2190
|
execParams: execParams.join(',\n '),
|
|
2196
|
-
simpleParams: simpleParams
|
|
2191
|
+
simpleParams: simpleParams
|
|
2197
2192
|
};
|
|
2198
2193
|
}
|
|
2199
2194
|
/**
|
|
@@ -2205,7 +2200,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2205
2200
|
generateSetStatementValue(f, value) {
|
|
2206
2201
|
let val = value;
|
|
2207
2202
|
switch (f.TSType) {
|
|
2208
|
-
case
|
|
2203
|
+
case core_1.EntityFieldTSType.Boolean:
|
|
2209
2204
|
// check to see if the value is a string and if it is equal to true, if so, set the value to 1
|
|
2210
2205
|
if (typeof value === 'string' && value.trim().toLowerCase() === 'true')
|
|
2211
2206
|
val = 1;
|
|
@@ -2214,7 +2209,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2214
2209
|
else
|
|
2215
2210
|
val = value ? 1 : 0;
|
|
2216
2211
|
return val.toString();
|
|
2217
|
-
case
|
|
2212
|
+
case core_1.EntityFieldTSType.String:
|
|
2218
2213
|
// Handle string escaping for SET statements
|
|
2219
2214
|
if (typeof val === 'string') {
|
|
2220
2215
|
val = val.replace(/'/g, "''");
|
|
@@ -2226,7 +2221,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2226
2221
|
val = val.replace(/'/g, "''");
|
|
2227
2222
|
}
|
|
2228
2223
|
return `${f.UnicodePrefix}'${val}'`;
|
|
2229
|
-
case
|
|
2224
|
+
case core_1.EntityFieldTSType.Date:
|
|
2230
2225
|
if (val !== null && val !== undefined) {
|
|
2231
2226
|
if (typeof val === 'number') {
|
|
2232
2227
|
// we have a timestamp - milliseconds since Unix Epoch
|
|
@@ -2239,7 +2234,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2239
2234
|
val = val.toISOString(); // convert the date to ISO format for storage in the DB
|
|
2240
2235
|
}
|
|
2241
2236
|
return `'${val}'`;
|
|
2242
|
-
case
|
|
2237
|
+
case core_1.EntityFieldTSType.Number:
|
|
2243
2238
|
return val.toString();
|
|
2244
2239
|
default:
|
|
2245
2240
|
// For other types, convert to string and quote if needed
|
|
@@ -2257,7 +2252,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2257
2252
|
let quotes = '';
|
|
2258
2253
|
let val = value;
|
|
2259
2254
|
switch (f.TSType) {
|
|
2260
|
-
case
|
|
2255
|
+
case core_1.EntityFieldTSType.Boolean:
|
|
2261
2256
|
// check to see if the value is a string and if it is equal to true, if so, set the value to 1
|
|
2262
2257
|
if (typeof value === 'string' && value.trim().toLowerCase() === 'true')
|
|
2263
2258
|
val = 1;
|
|
@@ -2266,10 +2261,10 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2266
2261
|
else
|
|
2267
2262
|
val = value ? 1 : 0;
|
|
2268
2263
|
break;
|
|
2269
|
-
case
|
|
2264
|
+
case core_1.EntityFieldTSType.String:
|
|
2270
2265
|
quotes = "'";
|
|
2271
2266
|
break;
|
|
2272
|
-
case
|
|
2267
|
+
case core_1.EntityFieldTSType.Date:
|
|
2273
2268
|
quotes = "'";
|
|
2274
2269
|
if (val !== null && val !== undefined) {
|
|
2275
2270
|
if (typeof val === 'number') {
|
|
@@ -2424,7 +2419,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2424
2419
|
}
|
|
2425
2420
|
// Handle arrays recursively
|
|
2426
2421
|
if (Array.isArray(obj)) {
|
|
2427
|
-
return obj.map(
|
|
2422
|
+
return obj.map(item => this.escapeQuotesInProperties(item, quoteToEscape));
|
|
2428
2423
|
}
|
|
2429
2424
|
// Handle objects recursively
|
|
2430
2425
|
if (typeof obj === 'object') {
|
|
@@ -2498,14 +2493,14 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2498
2493
|
bDiff = false; // this branch of logic ensures that undefined and null are treated the same
|
|
2499
2494
|
else {
|
|
2500
2495
|
switch (f.TSType) {
|
|
2501
|
-
case
|
|
2496
|
+
case core_1.EntityFieldTSType.String:
|
|
2502
2497
|
bDiff = oldData[key] !== newData[key];
|
|
2503
2498
|
break;
|
|
2504
|
-
case
|
|
2499
|
+
case core_1.EntityFieldTSType.Date:
|
|
2505
2500
|
bDiff = new Date(oldData[key]).getTime() !== new Date(newData[key]).getTime();
|
|
2506
2501
|
break;
|
|
2507
|
-
case
|
|
2508
|
-
case
|
|
2502
|
+
case core_1.EntityFieldTSType.Number:
|
|
2503
|
+
case core_1.EntityFieldTSType.Boolean:
|
|
2509
2504
|
bDiff = oldData[key] !== newData[key];
|
|
2510
2505
|
break;
|
|
2511
2506
|
}
|
|
@@ -2558,7 +2553,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2558
2553
|
const ret = d[0];
|
|
2559
2554
|
// we need to post process the retrieval to see if we have any char or nchar fields and we need to remove their trailing spaces
|
|
2560
2555
|
for (const field of entity.EntityInfo.Fields) {
|
|
2561
|
-
if (field.TSType ===
|
|
2556
|
+
if (field.TSType === core_1.EntityFieldTSType.String &&
|
|
2562
2557
|
field.Type.toLowerCase().includes('char') &&
|
|
2563
2558
|
!field.Type.toLowerCase().includes('varchar')) {
|
|
2564
2559
|
// trim trailing spaces for char and nchar fields
|
|
@@ -2690,11 +2685,11 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2690
2685
|
return { fullSQL: sSQL, simpleSQL: sSimpleSQL };
|
|
2691
2686
|
}
|
|
2692
2687
|
async Delete(entity, options, user) {
|
|
2693
|
-
const result = new
|
|
2688
|
+
const result = new core_1.BaseEntityResult();
|
|
2694
2689
|
try {
|
|
2695
2690
|
entity.RegisterTransactionPreprocessing();
|
|
2696
2691
|
if (!options)
|
|
2697
|
-
options = new
|
|
2692
|
+
options = new core_1.EntityDeleteOptions();
|
|
2698
2693
|
const bReplay = options.ReplayOnly;
|
|
2699
2694
|
if (!entity.IsSaved && !bReplay)
|
|
2700
2695
|
// existing record and not allowed to update
|
|
@@ -2722,10 +2717,10 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2722
2717
|
entity.RaiseReadyForTransaction();
|
|
2723
2718
|
// we are part of a transaction group, so just add our query to the list
|
|
2724
2719
|
// and when the transaction is committed, we will send all the queries at once
|
|
2725
|
-
entity.TransactionGroup.AddTransaction(new
|
|
2720
|
+
entity.TransactionGroup.AddTransaction(new core_1.TransactionItem(entity, 'Delete', sSQL, null, {
|
|
2726
2721
|
dataSource: this._pool,
|
|
2727
2722
|
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined,
|
|
2728
|
-
entityName: entity.EntityInfo.Name
|
|
2723
|
+
entityName: entity.EntityInfo.Name
|
|
2729
2724
|
}, (transactionResult, success) => {
|
|
2730
2725
|
// we get here whenever the transaction group does gets around to committing
|
|
2731
2726
|
// our query.
|
|
@@ -2766,13 +2761,20 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2766
2761
|
d = await this.ExecuteSQL(sSQL, null, {
|
|
2767
2762
|
isMutation: true,
|
|
2768
2763
|
description: `Delete ${entity.EntityInfo.Name}`,
|
|
2769
|
-
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined
|
|
2764
|
+
simpleSQLFallback: entity.EntityInfo.TrackRecordChanges ? sqlDetails.simpleSQL : undefined
|
|
2770
2765
|
}, user);
|
|
2771
2766
|
}
|
|
2772
|
-
if (d && d
|
|
2767
|
+
if (d && d.length > 0) {
|
|
2773
2768
|
// SP executed, now make sure the return value matches up as that is how we know the SP was succesfully internally
|
|
2769
|
+
// Note: When CASCADE operations exist, multiple result sets are returned (d is array of arrays).
|
|
2770
|
+
// When no CASCADE operations exist, a single result set is returned (d is array of objects).
|
|
2771
|
+
// We need to handle both cases by checking if the first element is an array.
|
|
2772
|
+
const isMultipleResultSets = Array.isArray(d[0]);
|
|
2773
|
+
const deletedRecord = isMultipleResultSets
|
|
2774
|
+
? d[d.length - 1][0] // Multiple result sets: get last result set, first row
|
|
2775
|
+
: d[0]; // Single result set: get first row directly
|
|
2774
2776
|
for (const key of entity.PrimaryKeys) {
|
|
2775
|
-
if (key.Value !==
|
|
2777
|
+
if (key.Value !== deletedRecord[key.Name]) {
|
|
2776
2778
|
// we can get here if the sp returns NULL for a given key. The reason that would be the case is if the record
|
|
2777
2779
|
// was not found in the DB. This was the existing logic prior to the SP modifications in 2.68.0, just documenting
|
|
2778
2780
|
// it here for clarity.
|
|
@@ -2796,7 +2798,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2796
2798
|
}
|
|
2797
2799
|
}
|
|
2798
2800
|
catch (e) {
|
|
2799
|
-
(0,
|
|
2801
|
+
(0, core_1.LogError)(e);
|
|
2800
2802
|
result.Message = e.message;
|
|
2801
2803
|
result.Success = false;
|
|
2802
2804
|
result.EndedAt = new Date();
|
|
@@ -2907,8 +2909,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
2907
2909
|
}
|
|
2908
2910
|
}
|
|
2909
2911
|
return acc;
|
|
2910
|
-
}, new Date(0)
|
|
2911
|
-
);
|
|
2912
|
+
}, new Date(0));
|
|
2912
2913
|
return {
|
|
2913
2914
|
DatasetID: items[0].DatasetID,
|
|
2914
2915
|
DatasetName: datasetName,
|
|
@@ -3006,7 +3007,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3006
3007
|
// the reason we continue below if we have NOT loaded Entities is that when the system first bootstraps, DATASET gets loaded
|
|
3007
3008
|
// FIRST before Entities are loaded to load the entity metadata so this would ALWAYS fail :)
|
|
3008
3009
|
// entity not found, return a failed result, shouldn't ever get here due to the foreign key constraint on the table
|
|
3009
|
-
(0,
|
|
3010
|
+
(0, core_1.LogError)(`Entity not found for dataset item ${item.Code} in dataset ${datasetName}`);
|
|
3010
3011
|
return null;
|
|
3011
3012
|
}
|
|
3012
3013
|
else {
|
|
@@ -3020,7 +3021,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3020
3021
|
}
|
|
3021
3022
|
});
|
|
3022
3023
|
if (invalidColumns.length > 0) {
|
|
3023
|
-
(0,
|
|
3024
|
+
(0, core_1.LogError)(`Invalid columns specified for dataset item ${item.Code} in dataset ${datasetName}: ${invalidColumns.join(', ')}`);
|
|
3024
3025
|
return null;
|
|
3025
3026
|
}
|
|
3026
3027
|
}
|
|
@@ -3139,7 +3140,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3139
3140
|
const appEntities = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwApplicationEntities ORDER BY ApplicationName`, undefined, undefined, contextUser);
|
|
3140
3141
|
const ret = [];
|
|
3141
3142
|
for (let i = 0; i < apps.length; i++) {
|
|
3142
|
-
ret.push(new
|
|
3143
|
+
ret.push(new core_1.ApplicationInfo(this, {
|
|
3143
3144
|
...apps[i],
|
|
3144
3145
|
ApplicationEntities: appEntities.filter((ae) => ae.ApplicationName.trim().toLowerCase() === apps[i].Name.trim().toLowerCase()),
|
|
3145
3146
|
}));
|
|
@@ -3150,7 +3151,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3150
3151
|
const alts = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwAuditLogTypes`, null, undefined, contextUser);
|
|
3151
3152
|
const ret = [];
|
|
3152
3153
|
for (let i = 0; i < alts.length; i++) {
|
|
3153
|
-
const alt = new
|
|
3154
|
+
const alt = new core_1.AuditLogTypeInfo(alts[i]);
|
|
3154
3155
|
ret.push(alt);
|
|
3155
3156
|
}
|
|
3156
3157
|
return ret;
|
|
@@ -3160,7 +3161,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3160
3161
|
const userRoles = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwUserRoles ORDER BY UserID`, undefined, undefined, contextUser);
|
|
3161
3162
|
const ret = [];
|
|
3162
3163
|
for (let i = 0; i < users.length; i++) {
|
|
3163
|
-
ret.push(new
|
|
3164
|
+
ret.push(new core_1.UserInfo(this, {
|
|
3164
3165
|
...users[i],
|
|
3165
3166
|
UserRoles: userRoles.filter((ur) => ur.UserID === users[i].ID),
|
|
3166
3167
|
}));
|
|
@@ -3172,7 +3173,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3172
3173
|
const authRoles = await this.ExecuteSQL(`SELECT * FROM [${this.MJCoreSchemaName}].vwAuthorizationRoles ORDER BY AuthorizationName`, undefined, undefined, contextUser);
|
|
3173
3174
|
const ret = [];
|
|
3174
3175
|
for (let i = 0; i < auths.length; i++) {
|
|
3175
|
-
ret.push(new
|
|
3176
|
+
ret.push(new core_1.AuthorizationInfo(this, {
|
|
3176
3177
|
...auths[i],
|
|
3177
3178
|
AuthorizationRoles: authRoles.filter((ar) => ar.AuthorizationName.trim().toLowerCase() === auths[i].Name.trim().toLowerCase()),
|
|
3178
3179
|
}));
|
|
@@ -3193,7 +3194,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3193
3194
|
return rows;
|
|
3194
3195
|
}
|
|
3195
3196
|
// Find all datetime fields in the entity
|
|
3196
|
-
const datetimeFields = entityInfo.Fields.filter((field) => field.TSType ===
|
|
3197
|
+
const datetimeFields = entityInfo.Fields.filter((field) => field.TSType === core_1.EntityFieldTSType.Date);
|
|
3197
3198
|
// If there are no datetime fields, return the rows as-is
|
|
3198
3199
|
if (datetimeFields.length === 0) {
|
|
3199
3200
|
return rows;
|
|
@@ -3295,7 +3296,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3295
3296
|
context,
|
|
3296
3297
|
options,
|
|
3297
3298
|
resolve,
|
|
3298
|
-
reject
|
|
3299
|
+
reject
|
|
3299
3300
|
});
|
|
3300
3301
|
});
|
|
3301
3302
|
}
|
|
@@ -3351,18 +3352,16 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3351
3352
|
logSqlStatement: this._logSqlStatement.bind(this),
|
|
3352
3353
|
clearTransaction: () => {
|
|
3353
3354
|
this._transaction = null;
|
|
3354
|
-
}
|
|
3355
|
+
}
|
|
3355
3356
|
};
|
|
3356
3357
|
// Convert logging options to internal format
|
|
3357
|
-
const options = loggingOptions
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
|
-
|
|
3361
|
-
|
|
3362
|
-
|
|
3363
|
-
|
|
3364
|
-
}
|
|
3365
|
-
: undefined;
|
|
3358
|
+
const options = loggingOptions ? {
|
|
3359
|
+
description: loggingOptions.description,
|
|
3360
|
+
ignoreLogging: loggingOptions.ignoreLogging,
|
|
3361
|
+
isMutation: loggingOptions.isMutation,
|
|
3362
|
+
simpleSQLFallback: loggingOptions.simpleSQLFallback,
|
|
3363
|
+
contextUser: loggingOptions.contextUser
|
|
3364
|
+
} : undefined;
|
|
3366
3365
|
// Delegate to instance method
|
|
3367
3366
|
return this._internalExecuteSQLInstance(query, parameters, context, options);
|
|
3368
3367
|
}
|
|
@@ -3381,7 +3380,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3381
3380
|
ignoreLogging: options?.ignoreLogging,
|
|
3382
3381
|
isMutation: options?.isMutation,
|
|
3383
3382
|
simpleSQLFallback: options?.simpleSQLFallback,
|
|
3384
|
-
contextUser: contextUser
|
|
3383
|
+
contextUser: contextUser
|
|
3385
3384
|
});
|
|
3386
3385
|
// Return recordset for consistency with TypeORM behavior
|
|
3387
3386
|
// If multiple recordsets, return recordsets array
|
|
@@ -3412,14 +3411,14 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3412
3411
|
logSqlStatement: async (q, p, d, i, m, s, u) => {
|
|
3413
3412
|
// Use static logging method
|
|
3414
3413
|
await SQLServerDataProvider.LogSQLStatement(q, p, d || 'ExecuteSQLWithPool', m || false, s, u);
|
|
3415
|
-
}
|
|
3414
|
+
}
|
|
3416
3415
|
};
|
|
3417
3416
|
// Create options
|
|
3418
3417
|
const options = {
|
|
3419
3418
|
description: 'ExecuteSQLWithPool',
|
|
3420
3419
|
ignoreLogging: false,
|
|
3421
3420
|
isMutation: false,
|
|
3422
|
-
contextUser: contextUser
|
|
3421
|
+
contextUser: contextUser
|
|
3423
3422
|
};
|
|
3424
3423
|
// Use the static execution method
|
|
3425
3424
|
const result = await SQLServerDataProvider._internalExecuteSQLStatic(query, parameters, context, options);
|
|
@@ -3505,7 +3504,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3505
3504
|
transaction: transaction,
|
|
3506
3505
|
logSqlStatement: async (q, p, d, i, m, s, u) => {
|
|
3507
3506
|
await SQLServerDataProvider.LogSQLStatement(q, p, d || 'Batch execution', m || false, s, u);
|
|
3508
|
-
}
|
|
3507
|
+
}
|
|
3509
3508
|
};
|
|
3510
3509
|
// Use named parameters for batch SQL
|
|
3511
3510
|
const namedParams = batchParameters;
|
|
@@ -3514,7 +3513,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3514
3513
|
description: 'Batch execution',
|
|
3515
3514
|
ignoreLogging: false,
|
|
3516
3515
|
isMutation: false,
|
|
3517
|
-
contextUser: contextUser
|
|
3516
|
+
contextUser: contextUser
|
|
3518
3517
|
});
|
|
3519
3518
|
// Return array of recordsets - one for each query
|
|
3520
3519
|
// Handle both single and multiple recordsets
|
|
@@ -3588,14 +3587,14 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3588
3587
|
logSqlStatement: this._logSqlStatement.bind(this),
|
|
3589
3588
|
clearTransaction: () => {
|
|
3590
3589
|
this._transaction = null;
|
|
3591
|
-
}
|
|
3590
|
+
}
|
|
3592
3591
|
};
|
|
3593
3592
|
// Execute using instance method (which handles queue for transactions)
|
|
3594
3593
|
const result = await this._internalExecuteSQLInstance(batchSQL, batchParameters, context, {
|
|
3595
3594
|
description: options?.description || 'Batch execution',
|
|
3596
3595
|
ignoreLogging: options?.ignoreLogging || false,
|
|
3597
3596
|
isMutation: options?.isMutation || false,
|
|
3598
|
-
contextUser: contextUser
|
|
3597
|
+
contextUser: contextUser
|
|
3599
3598
|
});
|
|
3600
3599
|
// Return array of recordsets - one for each query
|
|
3601
3600
|
// Handle both single and multiple recordsets
|
|
@@ -3610,7 +3609,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3610
3609
|
}
|
|
3611
3610
|
}
|
|
3612
3611
|
catch (e) {
|
|
3613
|
-
(0,
|
|
3612
|
+
(0, core_1.LogError)(e);
|
|
3614
3613
|
throw e;
|
|
3615
3614
|
}
|
|
3616
3615
|
}
|
|
@@ -3712,13 +3711,13 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3712
3711
|
// Create savepoint for nested transaction
|
|
3713
3712
|
await this.ExecuteSQL(`SAVE TRANSACTION ${savepointName}`, null, {
|
|
3714
3713
|
description: `Creating savepoint ${savepointName} at depth ${this._transactionDepth}`,
|
|
3715
|
-
ignoreLogging: true
|
|
3714
|
+
ignoreLogging: true
|
|
3716
3715
|
});
|
|
3717
3716
|
}
|
|
3718
3717
|
}
|
|
3719
3718
|
catch (e) {
|
|
3720
3719
|
this._transactionDepth--; // Restore depth on error
|
|
3721
|
-
(0,
|
|
3720
|
+
(0, core_1.LogError)(e);
|
|
3722
3721
|
throw e; // force caller to handle
|
|
3723
3722
|
}
|
|
3724
3723
|
}
|
|
@@ -3749,7 +3748,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3749
3748
|
}
|
|
3750
3749
|
}
|
|
3751
3750
|
catch (e) {
|
|
3752
|
-
(0,
|
|
3751
|
+
(0, core_1.LogError)(e);
|
|
3753
3752
|
throw e; // force caller to handle
|
|
3754
3753
|
}
|
|
3755
3754
|
}
|
|
@@ -3775,7 +3774,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3775
3774
|
const deferredCount = this._deferredTasks.length;
|
|
3776
3775
|
this._deferredTasks = [];
|
|
3777
3776
|
if (deferredCount > 0) {
|
|
3778
|
-
(0,
|
|
3777
|
+
(0, core_1.LogStatus)(`Cleared ${deferredCount} deferred tasks after transaction rollback`);
|
|
3779
3778
|
}
|
|
3780
3779
|
}
|
|
3781
3780
|
else {
|
|
@@ -3786,7 +3785,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3786
3785
|
}
|
|
3787
3786
|
await this.ExecuteSQL(`ROLLBACK TRANSACTION ${savepointName}`, null, {
|
|
3788
3787
|
description: `Rolling back to savepoint ${savepointName}`,
|
|
3789
|
-
ignoreLogging: true
|
|
3788
|
+
ignoreLogging: true
|
|
3790
3789
|
});
|
|
3791
3790
|
this._savepointStack.pop();
|
|
3792
3791
|
this._transactionDepth--;
|
|
@@ -3801,7 +3800,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3801
3800
|
this._savepointCounter = 0;
|
|
3802
3801
|
this._transactionState$.next(false);
|
|
3803
3802
|
}
|
|
3804
|
-
(0,
|
|
3803
|
+
(0, core_1.LogError)(e);
|
|
3805
3804
|
throw e; // force caller to handle
|
|
3806
3805
|
}
|
|
3807
3806
|
}
|
|
@@ -3813,7 +3812,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3813
3812
|
async RefreshIfNeeded() {
|
|
3814
3813
|
// Skip refresh if a transaction is active
|
|
3815
3814
|
if (this.isTransactionActive) {
|
|
3816
|
-
(0,
|
|
3815
|
+
(0, core_1.LogStatus)('Skipping metadata refresh - transaction is active');
|
|
3817
3816
|
return false;
|
|
3818
3817
|
}
|
|
3819
3818
|
// Call parent implementation if no transaction
|
|
@@ -3827,7 +3826,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3827
3826
|
async processDeferredTasks() {
|
|
3828
3827
|
if (this._deferredTasks.length === 0)
|
|
3829
3828
|
return;
|
|
3830
|
-
(0,
|
|
3829
|
+
(0, core_1.LogStatus)(`Processing ${this._deferredTasks.length} deferred tasks after transaction commit`);
|
|
3831
3830
|
// Copy and clear the deferred tasks array
|
|
3832
3831
|
const tasksToProcess = [...this._deferredTasks];
|
|
3833
3832
|
this._deferredTasks = [];
|
|
@@ -3841,11 +3840,11 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3841
3840
|
// Add other task types here as needed
|
|
3842
3841
|
}
|
|
3843
3842
|
catch (error) {
|
|
3844
|
-
(0,
|
|
3843
|
+
(0, core_1.LogError)(`Failed to process deferred ${task.type} task: ${error}`);
|
|
3845
3844
|
// Continue processing other tasks even if one fails
|
|
3846
3845
|
}
|
|
3847
3846
|
}
|
|
3848
|
-
(0,
|
|
3847
|
+
(0, core_1.LogStatus)(`Completed processing deferred tasks`);
|
|
3849
3848
|
}
|
|
3850
3849
|
get LocalStorageProvider() {
|
|
3851
3850
|
if (!this._localStorageProvider)
|
|
@@ -3875,13 +3874,13 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3875
3874
|
return data[0][fields[0]]; // return first field
|
|
3876
3875
|
}
|
|
3877
3876
|
else {
|
|
3878
|
-
(0,
|
|
3877
|
+
(0, core_1.LogError)(`Entity ${entityName} record ${CompositeKey.ToString()} not found, returning null`);
|
|
3879
3878
|
return null;
|
|
3880
3879
|
}
|
|
3881
3880
|
}
|
|
3882
3881
|
}
|
|
3883
3882
|
catch (e) {
|
|
3884
|
-
(0,
|
|
3883
|
+
(0, core_1.LogError)(e);
|
|
3885
3884
|
return null;
|
|
3886
3885
|
}
|
|
3887
3886
|
}
|
|
@@ -3894,7 +3893,7 @@ class SQLServerDataProvider extends global_1.DatabaseProviderBase {
|
|
|
3894
3893
|
if (!f)
|
|
3895
3894
|
f = e.Fields.find((f) => f.Name === 'Name');
|
|
3896
3895
|
if (!f) {
|
|
3897
|
-
(0,
|
|
3896
|
+
(0, core_1.LogError)(`Entity ${entityName} does not have an IsNameField or a field with the column name of Name, returning null, use recordId`);
|
|
3898
3897
|
return null;
|
|
3899
3898
|
}
|
|
3900
3899
|
else {
|