@checkstack/healthcheck-backend 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +60 -0
- package/package.json +1 -1
- package/src/aggregation-utils.test.ts +65 -0
- package/src/aggregation-utils.ts +111 -29
- package/src/aggregation.test.ts +382 -0
- package/src/router.ts +4 -0
- package/src/service-ordering.test.ts +316 -0
- package/src/service.ts +52 -13
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,65 @@
|
|
|
1
1
|
# @checkstack/healthcheck-backend
|
|
2
2
|
|
|
3
|
+
## 0.7.0
|
|
4
|
+
|
|
5
|
+
### Minor Changes
|
|
6
|
+
|
|
7
|
+
- 1f81b60: ### Clickable Run History with Deep Linking
|
|
8
|
+
|
|
9
|
+
**Backend (`healthcheck-backend`):**
|
|
10
|
+
|
|
11
|
+
- Added `getRunById` service method to fetch a single health check run by ID
|
|
12
|
+
|
|
13
|
+
**Schema (`healthcheck-common`):**
|
|
14
|
+
|
|
15
|
+
- Added `getRunById` RPC procedure for fetching individual runs
|
|
16
|
+
- Added `historyRun` route for deep linking to specific runs (`/history/:systemId/:configurationId/:runId`)
|
|
17
|
+
|
|
18
|
+
**Frontend (`healthcheck-frontend`):**
|
|
19
|
+
|
|
20
|
+
- Table rows in Recent Runs and Run History now navigate to detailed view instead of expanding inline
|
|
21
|
+
- Added "Selected Run" card that displays when navigating to a specific run
|
|
22
|
+
- Extracted `ExpandedResultView` into reusable component
|
|
23
|
+
- Fixed layout shift during table pagination by preserving previous data while loading
|
|
24
|
+
- Removed accordion expansion in favor of consistent navigation UX
|
|
25
|
+
|
|
26
|
+
### Patch Changes
|
|
27
|
+
|
|
28
|
+
- 090143b: ### Health Check Aggregation & UI Fixes
|
|
29
|
+
|
|
30
|
+
**Backend (`healthcheck-backend`):**
|
|
31
|
+
|
|
32
|
+
- Fixed tail-end bucket truncation where the last aggregated bucket was cut off at the interval boundary instead of extending to the query end date
|
|
33
|
+
- Added `rangeEnd` parameter to `reaggregateBuckets()` to properly extend the last bucket
|
|
34
|
+
- Fixed cross-tier merge logic (`mergeTieredBuckets`) to prevent hourly aggregates from blocking fresh raw data
|
|
35
|
+
|
|
36
|
+
**Schema (`healthcheck-common`):**
|
|
37
|
+
|
|
38
|
+
- Added `bucketEnd` field to `AggregatedBucketBaseSchema` so frontends know the actual end time of each bucket
|
|
39
|
+
|
|
40
|
+
**Frontend (`healthcheck-frontend`):**
|
|
41
|
+
|
|
42
|
+
- Updated all components to use `bucket.bucketEnd` instead of calculating from `bucketIntervalSeconds`
|
|
43
|
+
- Fixed aggregation mode detection: changed `>` to `>=` so 7-day queries use aggregated data when `rawRetentionDays` is 7
|
|
44
|
+
- Added ref-based memoization in `useHealthCheckData` to prevent layout shift during signal-triggered refetches
|
|
45
|
+
- Exposed `isFetching` state to show loading spinner during background refetches
|
|
46
|
+
- Added debounced custom date range with Apply button to prevent fetching on every field change
|
|
47
|
+
- Added validation preventing start date >= end date in custom ranges
|
|
48
|
+
- Added sparkline downsampling: when there are 60+ data points, they are aggregated into buckets with informative tooltips
|
|
49
|
+
|
|
50
|
+
**UI (`ui`):**
|
|
51
|
+
|
|
52
|
+
- Fixed `DateRangeFilter` presets to use true sliding windows (removed `startOfDay` from 7-day and 30-day ranges)
|
|
53
|
+
- Added `disabled` prop to `DateRangeFilter` and `DateTimePicker` components
|
|
54
|
+
- Added `onCustomChange` prop to `DateRangeFilter` for debounced custom date handling
|
|
55
|
+
- Improved layout: custom date pickers now inline with preset buttons on desktop
|
|
56
|
+
- Added responsive mobile layout: date pickers stack vertically with down arrow
|
|
57
|
+
- Added validation error display for invalid date ranges
|
|
58
|
+
|
|
59
|
+
- Updated dependencies [1f81b60]
|
|
60
|
+
- Updated dependencies [090143b]
|
|
61
|
+
- @checkstack/healthcheck-common@0.7.0
|
|
62
|
+
|
|
3
63
|
## 0.6.0
|
|
4
64
|
|
|
5
65
|
### Minor Changes
|
package/package.json
CHANGED
|
@@ -342,6 +342,66 @@ describe("aggregation-utils", () => {
|
|
|
342
342
|
expect(result[1].sourceTier).toBe("hourly");
|
|
343
343
|
expect(result[2].sourceTier).toBe("raw");
|
|
344
344
|
});
|
|
345
|
+
|
|
346
|
+
it("raw buckets take precedence even when hourly starts earlier (regression test)", () => {
|
|
347
|
+
/**
|
|
348
|
+
* Regression test for the "Tail-End Stale" bug:
|
|
349
|
+
* When an hourly aggregate (e.g., 21:00-22:00) exists and raw data
|
|
350
|
+
* arrives mid-hour (e.g., 21:48), the raw data should take precedence,
|
|
351
|
+
* not be blocked by the hourly aggregate.
|
|
352
|
+
*
|
|
353
|
+
* Bug scenario:
|
|
354
|
+
* - Hourly aggregate: 21:00 to 22:00
|
|
355
|
+
* - Raw buckets: 21:48 to 22:11 (fresh data)
|
|
356
|
+
* - Old buggy behavior: hourly was processed first (earlier start time),
|
|
357
|
+
* set coveredUntil=22:00, and raw was skipped
|
|
358
|
+
* - Correct behavior: raw always takes precedence, hourly is excluded
|
|
359
|
+
*/
|
|
360
|
+
const baseTime = 21 * HOUR; // 21:00
|
|
361
|
+
|
|
362
|
+
// Hourly bucket covering 21:00-22:00 (stale aggregate)
|
|
363
|
+
const hourlyBuckets = [
|
|
364
|
+
createBucket({
|
|
365
|
+
startMs: baseTime,
|
|
366
|
+
durationMs: HOUR,
|
|
367
|
+
runCount: 60, // Old stale data
|
|
368
|
+
sourceTier: "hourly",
|
|
369
|
+
}),
|
|
370
|
+
];
|
|
371
|
+
|
|
372
|
+
// Raw buckets at 21:48 and 22:00 (fresh data that should NOT be blocked)
|
|
373
|
+
const rawBuckets = [
|
|
374
|
+
createBucket({
|
|
375
|
+
startMs: baseTime + 48 * MINUTE, // 21:48
|
|
376
|
+
durationMs: 12 * MINUTE,
|
|
377
|
+
runCount: 12, // Fresh data
|
|
378
|
+
sourceTier: "raw",
|
|
379
|
+
}),
|
|
380
|
+
createBucket({
|
|
381
|
+
startMs: baseTime + HOUR, // 22:00
|
|
382
|
+
durationMs: 11 * MINUTE,
|
|
383
|
+
runCount: 11, // Fresh data
|
|
384
|
+
sourceTier: "raw",
|
|
385
|
+
}),
|
|
386
|
+
];
|
|
387
|
+
|
|
388
|
+
const result = mergeTieredBuckets({
|
|
389
|
+
rawBuckets,
|
|
390
|
+
hourlyBuckets,
|
|
391
|
+
dailyBuckets: [],
|
|
392
|
+
});
|
|
393
|
+
|
|
394
|
+
// CRITICAL: Both raw buckets should be included
|
|
395
|
+
expect(result).toHaveLength(2);
|
|
396
|
+
expect(result[0].sourceTier).toBe("raw");
|
|
397
|
+
expect(result[1].sourceTier).toBe("raw");
|
|
398
|
+
expect(result[0].runCount).toBe(12); // 21:48 bucket
|
|
399
|
+
expect(result[1].runCount).toBe(11); // 22:00 bucket
|
|
400
|
+
|
|
401
|
+
// Hourly bucket should be excluded because raw data covers its range
|
|
402
|
+
const hourlyInResult = result.find((b) => b.sourceTier === "hourly");
|
|
403
|
+
expect(hourlyInResult).toBeUndefined();
|
|
404
|
+
});
|
|
345
405
|
});
|
|
346
406
|
|
|
347
407
|
describe("combineBuckets", () => {
|
|
@@ -520,6 +580,7 @@ describe("aggregation-utils", () => {
|
|
|
520
580
|
sourceBuckets: [],
|
|
521
581
|
targetIntervalMs: HOUR,
|
|
522
582
|
rangeStart: new Date(0),
|
|
583
|
+
rangeEnd: new Date(HOUR),
|
|
523
584
|
});
|
|
524
585
|
|
|
525
586
|
expect(result).toEqual([]);
|
|
@@ -552,6 +613,7 @@ describe("aggregation-utils", () => {
|
|
|
552
613
|
sourceBuckets,
|
|
553
614
|
targetIntervalMs: HOUR,
|
|
554
615
|
rangeStart: new Date(0),
|
|
616
|
+
rangeEnd: new Date(HOUR),
|
|
555
617
|
});
|
|
556
618
|
|
|
557
619
|
expect(result).toHaveLength(1);
|
|
@@ -585,6 +647,7 @@ describe("aggregation-utils", () => {
|
|
|
585
647
|
sourceBuckets,
|
|
586
648
|
targetIntervalMs: HOUR,
|
|
587
649
|
rangeStart: new Date(0),
|
|
650
|
+
rangeEnd: new Date(2 * HOUR),
|
|
588
651
|
});
|
|
589
652
|
|
|
590
653
|
expect(result).toHaveLength(2);
|
|
@@ -611,6 +674,7 @@ describe("aggregation-utils", () => {
|
|
|
611
674
|
sourceBuckets,
|
|
612
675
|
targetIntervalMs: HOUR,
|
|
613
676
|
rangeStart,
|
|
677
|
+
rangeEnd: new Date(rangeStart.getTime() + HOUR),
|
|
614
678
|
});
|
|
615
679
|
|
|
616
680
|
expect(result).toHaveLength(1);
|
|
@@ -633,6 +697,7 @@ describe("aggregation-utils", () => {
|
|
|
633
697
|
sourceBuckets,
|
|
634
698
|
targetIntervalMs: HOUR,
|
|
635
699
|
rangeStart: new Date(0),
|
|
700
|
+
rangeEnd: new Date(3 * HOUR),
|
|
636
701
|
});
|
|
637
702
|
|
|
638
703
|
expect(result).toHaveLength(3);
|
package/src/aggregation-utils.ts
CHANGED
|
@@ -193,6 +193,10 @@ const TIER_PRIORITY: Record<NormalizedBucket["sourceTier"], number> = {
|
|
|
193
193
|
/**
|
|
194
194
|
* Merge buckets from different tiers, preferring most granular data.
|
|
195
195
|
* For overlapping time periods, uses priority: raw > hourly > daily.
|
|
196
|
+
*
|
|
197
|
+
* IMPORTANT: Raw buckets always take precedence over hourly/daily aggregates,
|
|
198
|
+
* even when the aggregate bucket starts earlier. This ensures fresh raw data
|
|
199
|
+
* is never blocked by stale pre-computed aggregates.
|
|
196
200
|
*/
|
|
197
201
|
export function mergeTieredBuckets(params: {
|
|
198
202
|
rawBuckets: NormalizedBucket[];
|
|
@@ -201,42 +205,108 @@ export function mergeTieredBuckets(params: {
|
|
|
201
205
|
}): NormalizedBucket[] {
|
|
202
206
|
const { rawBuckets, hourlyBuckets, dailyBuckets } = params;
|
|
203
207
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
+
if (
|
|
209
|
+
rawBuckets.length === 0 &&
|
|
210
|
+
hourlyBuckets.length === 0 &&
|
|
211
|
+
dailyBuckets.length === 0
|
|
212
|
+
) {
|
|
208
213
|
return [];
|
|
209
214
|
}
|
|
210
215
|
|
|
211
|
-
//
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
216
|
+
// Two-pass approach:
|
|
217
|
+
// 1. First, collect all time ranges covered by raw data (highest priority)
|
|
218
|
+
// 2. Then, add hourly/daily buckets only for gaps not covered by raw data
|
|
219
|
+
|
|
220
|
+
// Build a sorted list of raw bucket time ranges for efficient lookup
|
|
221
|
+
const rawTimeRanges = rawBuckets
|
|
222
|
+
.map((b) => ({
|
|
223
|
+
start: b.bucketStart.getTime(),
|
|
224
|
+
end: b.bucketEndMs,
|
|
225
|
+
}))
|
|
226
|
+
.toSorted((a, b) => a.start - b.start);
|
|
227
|
+
|
|
228
|
+
// Merge overlapping raw time ranges into continuous coverage
|
|
229
|
+
const rawCoverage: Array<{ start: number; end: number }> = [];
|
|
230
|
+
for (const range of rawTimeRanges) {
|
|
231
|
+
if (rawCoverage.length === 0) {
|
|
232
|
+
rawCoverage.push({ ...range });
|
|
233
|
+
} else {
|
|
234
|
+
const last = rawCoverage.at(-1)!;
|
|
235
|
+
// If this range overlaps or is adjacent to the last, extend it
|
|
236
|
+
if (range.start <= last.end) {
|
|
237
|
+
last.end = Math.max(last.end, range.end);
|
|
238
|
+
} else {
|
|
239
|
+
rawCoverage.push({ ...range });
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
217
243
|
|
|
218
|
-
//
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
244
|
+
// Helper: check if a bucket has ANY overlap with raw data
|
|
245
|
+
// Two ranges overlap if: start1 < end2 AND start2 < end1
|
|
246
|
+
const doesBucketOverlapWithRaw = (bucket: NormalizedBucket): boolean => {
|
|
247
|
+
const bucketStart = bucket.bucketStart.getTime();
|
|
248
|
+
const bucketEnd = bucket.bucketEndMs;
|
|
249
|
+
|
|
250
|
+
for (const range of rawCoverage) {
|
|
251
|
+
// Check for overlap: ranges overlap if they intersect
|
|
252
|
+
if (bucketStart < range.end && range.start < bucketEnd) {
|
|
253
|
+
return true;
|
|
254
|
+
}
|
|
255
|
+
// Optimization: if raw range starts after bucket ends, no more overlaps possible
|
|
256
|
+
if (range.start >= bucketEnd) {
|
|
257
|
+
break;
|
|
232
258
|
}
|
|
233
|
-
continue;
|
|
234
259
|
}
|
|
260
|
+
return false;
|
|
261
|
+
};
|
|
235
262
|
|
|
236
|
-
|
|
237
|
-
|
|
263
|
+
// Start with all raw buckets (they always take precedence)
|
|
264
|
+
const result: NormalizedBucket[] = [...rawBuckets];
|
|
265
|
+
|
|
266
|
+
// Add hourly buckets that don't overlap with raw data
|
|
267
|
+
for (const bucket of hourlyBuckets) {
|
|
268
|
+
if (!doesBucketOverlapWithRaw(bucket)) {
|
|
269
|
+
result.push(bucket);
|
|
270
|
+
}
|
|
238
271
|
}
|
|
239
272
|
|
|
273
|
+
// Add daily buckets that don't overlap with raw or hourly data
|
|
274
|
+
// Build hourly coverage to check against
|
|
275
|
+
const hourlyTimeRanges = hourlyBuckets
|
|
276
|
+
.map((b) => ({
|
|
277
|
+
start: b.bucketStart.getTime(),
|
|
278
|
+
end: b.bucketEndMs,
|
|
279
|
+
}))
|
|
280
|
+
.toSorted((a, b) => a.start - b.start);
|
|
281
|
+
|
|
282
|
+
// Helper: check if a bucket has ANY overlap with hourly data
|
|
283
|
+
const doesBucketOverlapWithHourly = (bucket: NormalizedBucket): boolean => {
|
|
284
|
+
const bucketStart = bucket.bucketStart.getTime();
|
|
285
|
+
const bucketEnd = bucket.bucketEndMs;
|
|
286
|
+
|
|
287
|
+
for (const range of hourlyTimeRanges) {
|
|
288
|
+
if (bucketStart < range.end && range.start < bucketEnd) {
|
|
289
|
+
return true;
|
|
290
|
+
}
|
|
291
|
+
if (range.start >= bucketEnd) {
|
|
292
|
+
break;
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
return false;
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
for (const bucket of dailyBuckets) {
|
|
299
|
+
if (
|
|
300
|
+
!doesBucketOverlapWithRaw(bucket) &&
|
|
301
|
+
!doesBucketOverlapWithHourly(bucket)
|
|
302
|
+
) {
|
|
303
|
+
result.push(bucket);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// Sort final result by bucket start time
|
|
308
|
+
result.sort((a, b) => a.bucketStart.getTime() - b.bucketStart.getTime());
|
|
309
|
+
|
|
240
310
|
return result;
|
|
241
311
|
}
|
|
242
312
|
|
|
@@ -349,19 +419,24 @@ export function combineBuckets(params: {
|
|
|
349
419
|
/**
|
|
350
420
|
* Re-aggregate a list of normalized buckets into target-sized buckets.
|
|
351
421
|
* Groups source buckets by target bucket boundaries and combines them.
|
|
422
|
+
*
|
|
423
|
+
* @param rangeEnd - The end of the query range. The last bucket will extend
|
|
424
|
+
* to this time to ensure data is visually represented up to the query end.
|
|
352
425
|
*/
|
|
353
426
|
export function reaggregateBuckets(params: {
|
|
354
427
|
sourceBuckets: NormalizedBucket[];
|
|
355
428
|
targetIntervalMs: number;
|
|
356
429
|
rangeStart: Date;
|
|
430
|
+
rangeEnd: Date;
|
|
357
431
|
}): NormalizedBucket[] {
|
|
358
|
-
const { sourceBuckets, targetIntervalMs, rangeStart } = params;
|
|
432
|
+
const { sourceBuckets, targetIntervalMs, rangeStart, rangeEnd } = params;
|
|
359
433
|
|
|
360
434
|
if (sourceBuckets.length === 0) {
|
|
361
435
|
return [];
|
|
362
436
|
}
|
|
363
437
|
|
|
364
438
|
const rangeStartMs = rangeStart.getTime();
|
|
439
|
+
const rangeEndMs = rangeEnd.getTime();
|
|
365
440
|
|
|
366
441
|
// Group source buckets by target bucket index
|
|
367
442
|
const bucketGroups = new Map<number, NormalizedBucket[]>();
|
|
@@ -379,9 +454,16 @@ export function reaggregateBuckets(params: {
|
|
|
379
454
|
// Combine each group into a single target bucket
|
|
380
455
|
const result: NormalizedBucket[] = [];
|
|
381
456
|
|
|
457
|
+
// Find the maximum bucket index to identify the last bucket
|
|
458
|
+
const maxIndex = Math.max(...bucketGroups.keys());
|
|
459
|
+
|
|
382
460
|
for (const [index, buckets] of bucketGroups) {
|
|
383
461
|
const targetBucketStart = new Date(rangeStartMs + index * targetIntervalMs);
|
|
384
|
-
const
|
|
462
|
+
const intervalEndMs = targetBucketStart.getTime() + targetIntervalMs;
|
|
463
|
+
|
|
464
|
+
// For the last bucket, extend to rangeEnd to capture all trailing data
|
|
465
|
+
const targetBucketEndMs =
|
|
466
|
+
index === maxIndex ? Math.max(intervalEndMs, rangeEndMs) : intervalEndMs;
|
|
385
467
|
|
|
386
468
|
result.push(
|
|
387
469
|
combineBuckets({
|
package/src/aggregation.test.ts
CHANGED
|
@@ -476,4 +476,386 @@ describe("HealthCheckService.getAggregatedHistory", () => {
|
|
|
476
476
|
expect(collectorData.successRate).toBe(100);
|
|
477
477
|
});
|
|
478
478
|
});
|
|
479
|
+
|
|
480
|
+
describe("recent runs near endDate - edge case for live data", () => {
|
|
481
|
+
/**
|
|
482
|
+
* This test suite verifies that runs occurring close to the query endDate
|
|
483
|
+
* are properly included in the last bucket. This is critical for real-time
|
|
484
|
+
* dashboards where users expect to see data up to "now".
|
|
485
|
+
*
|
|
486
|
+
* Scenario: User queries "Last 7 days" and runs have been occurring every minute.
|
|
487
|
+
* The last bucket should include runs up to the endDate, not stop at a previous
|
|
488
|
+
* bucket boundary.
|
|
489
|
+
*/
|
|
490
|
+
|
|
491
|
+
it("includes runs right up to endDate in the last bucket", async () => {
|
|
492
|
+
// Simulate a 7-day query range that creates ~23 minute buckets
|
|
493
|
+
// 7 days = 604,800 seconds / 500 target points = 1,209.6 seconds (~20 min) per bucket
|
|
494
|
+
const endDate = new Date("2026-01-20T22:05:00Z"); // Current time
|
|
495
|
+
const startDate = new Date("2026-01-13T22:05:00Z"); // 7 days ago
|
|
496
|
+
|
|
497
|
+
// Create runs: some old ones and some very recent ones near endDate
|
|
498
|
+
const runs = [
|
|
499
|
+
// Old run at the start of the range
|
|
500
|
+
{
|
|
501
|
+
id: "run-old-1",
|
|
502
|
+
systemId: "sys-1",
|
|
503
|
+
configurationId: "config-1",
|
|
504
|
+
strategyId: "http",
|
|
505
|
+
status: "healthy" as const,
|
|
506
|
+
latencyMs: 100,
|
|
507
|
+
result: { statusCode: 200 },
|
|
508
|
+
timestamp: new Date("2026-01-13T22:10:00Z"),
|
|
509
|
+
},
|
|
510
|
+
// Run ~25 minutes before endDate
|
|
511
|
+
{
|
|
512
|
+
id: "run-recent-1",
|
|
513
|
+
systemId: "sys-1",
|
|
514
|
+
configurationId: "config-1",
|
|
515
|
+
strategyId: "http",
|
|
516
|
+
status: "unhealthy" as const,
|
|
517
|
+
latencyMs: 2500,
|
|
518
|
+
result: { statusCode: 500 },
|
|
519
|
+
timestamp: new Date("2026-01-20T21:40:00Z"),
|
|
520
|
+
},
|
|
521
|
+
// Run ~15 minutes before endDate
|
|
522
|
+
{
|
|
523
|
+
id: "run-recent-2",
|
|
524
|
+
systemId: "sys-1",
|
|
525
|
+
configurationId: "config-1",
|
|
526
|
+
strategyId: "http",
|
|
527
|
+
status: "unhealthy" as const,
|
|
528
|
+
latencyMs: 3000,
|
|
529
|
+
result: { statusCode: 503 },
|
|
530
|
+
timestamp: new Date("2026-01-20T21:50:00Z"),
|
|
531
|
+
},
|
|
532
|
+
// Run ~5 minutes before endDate
|
|
533
|
+
{
|
|
534
|
+
id: "run-recent-3",
|
|
535
|
+
systemId: "sys-1",
|
|
536
|
+
configurationId: "config-1",
|
|
537
|
+
strategyId: "http",
|
|
538
|
+
status: "unhealthy" as const,
|
|
539
|
+
latencyMs: 2800,
|
|
540
|
+
result: { statusCode: 502 },
|
|
541
|
+
timestamp: new Date("2026-01-20T22:00:00Z"),
|
|
542
|
+
},
|
|
543
|
+
// Run 2 minutes before endDate - SHOULD BE IN LAST BUCKET
|
|
544
|
+
{
|
|
545
|
+
id: "run-recent-4",
|
|
546
|
+
systemId: "sys-1",
|
|
547
|
+
configurationId: "config-1",
|
|
548
|
+
strategyId: "http",
|
|
549
|
+
status: "unhealthy" as const,
|
|
550
|
+
latencyMs: 2600,
|
|
551
|
+
result: { statusCode: 500 },
|
|
552
|
+
timestamp: new Date("2026-01-20T22:03:00Z"),
|
|
553
|
+
},
|
|
554
|
+
// Run 1 minute before endDate - SHOULD BE IN LAST BUCKET
|
|
555
|
+
{
|
|
556
|
+
id: "run-recent-5",
|
|
557
|
+
systemId: "sys-1",
|
|
558
|
+
configurationId: "config-1",
|
|
559
|
+
strategyId: "http",
|
|
560
|
+
status: "unhealthy" as const,
|
|
561
|
+
latencyMs: 2700,
|
|
562
|
+
result: { statusCode: 500 },
|
|
563
|
+
timestamp: new Date("2026-01-20T22:04:00Z"),
|
|
564
|
+
},
|
|
565
|
+
];
|
|
566
|
+
|
|
567
|
+
mockRunsResult = runs;
|
|
568
|
+
mockConfigResult = { id: "config-1", strategyId: "http" };
|
|
569
|
+
|
|
570
|
+
const result = await service.getAggregatedHistory(
|
|
571
|
+
{
|
|
572
|
+
systemId: "sys-1",
|
|
573
|
+
configurationId: "config-1",
|
|
574
|
+
startDate,
|
|
575
|
+
endDate,
|
|
576
|
+
targetPoints: 500,
|
|
577
|
+
},
|
|
578
|
+
{ includeAggregatedResult: true },
|
|
579
|
+
);
|
|
580
|
+
|
|
581
|
+
// We should have buckets that cover the entire range
|
|
582
|
+
expect(result.buckets.length).toBeGreaterThan(0);
|
|
583
|
+
|
|
584
|
+
// Find the last bucket
|
|
585
|
+
const lastBucket = result.buckets[result.buckets.length - 1];
|
|
586
|
+
|
|
587
|
+
// The last bucket should contain runs from the most recent times
|
|
588
|
+
// Specifically, the runs at 22:03 and 22:04 should be in some bucket
|
|
589
|
+
const allRunCounts = result.buckets.reduce(
|
|
590
|
+
(sum, b) => sum + b.runCount,
|
|
591
|
+
0,
|
|
592
|
+
);
|
|
593
|
+
expect(allRunCounts).toBe(6); // All 6 runs should be accounted for
|
|
594
|
+
|
|
595
|
+
// The last bucket's bucketStart + interval should cover endDate
|
|
596
|
+
const bucketIntervalMs = result.bucketIntervalSeconds * 1000;
|
|
597
|
+
const lastBucketEnd = new Date(
|
|
598
|
+
lastBucket.bucketStart.getTime() + bucketIntervalMs,
|
|
599
|
+
);
|
|
600
|
+
|
|
601
|
+
// Last bucket should extend close to endDate
|
|
602
|
+
// (within one bucket interval of endDate)
|
|
603
|
+
expect(lastBucketEnd.getTime()).toBeGreaterThanOrEqual(
|
|
604
|
+
endDate.getTime() - bucketIntervalMs,
|
|
605
|
+
);
|
|
606
|
+
});
|
|
607
|
+
|
|
608
|
+
it("includes partial last bucket when endDate is mid-bucket", async () => {
|
|
609
|
+
// Scenario: Query ends at a time that's not aligned to bucket boundaries
|
|
610
|
+
// Runs exist right before endDate and should still appear
|
|
611
|
+
const startDate = new Date("2026-01-20T21:00:00Z");
|
|
612
|
+
const endDate = new Date("2026-01-20T22:00:00Z"); // 1 hour range
|
|
613
|
+
|
|
614
|
+
// Create runs every 5 minutes for the hour
|
|
615
|
+
const runs = [];
|
|
616
|
+
for (let i = 0; i < 12; i++) {
|
|
617
|
+
runs.push({
|
|
618
|
+
id: `run-${i}`,
|
|
619
|
+
systemId: "sys-1",
|
|
620
|
+
configurationId: "config-1",
|
|
621
|
+
strategyId: "http",
|
|
622
|
+
status: "healthy" as const,
|
|
623
|
+
latencyMs: 100 + i * 10,
|
|
624
|
+
result: { statusCode: 200 },
|
|
625
|
+
timestamp: new Date(
|
|
626
|
+
startDate.getTime() + i * 5 * 60 * 1000, // Every 5 minutes
|
|
627
|
+
),
|
|
628
|
+
});
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
mockRunsResult = runs;
|
|
632
|
+
mockConfigResult = { id: "config-1", strategyId: "http" };
|
|
633
|
+
|
|
634
|
+
// Use 10 target points = 6 minute buckets
|
|
635
|
+
const result = await service.getAggregatedHistory(
|
|
636
|
+
{
|
|
637
|
+
systemId: "sys-1",
|
|
638
|
+
configurationId: "config-1",
|
|
639
|
+
startDate,
|
|
640
|
+
endDate,
|
|
641
|
+
targetPoints: 10,
|
|
642
|
+
},
|
|
643
|
+
{ includeAggregatedResult: true },
|
|
644
|
+
);
|
|
645
|
+
|
|
646
|
+
// All 12 runs should be accounted for in the buckets
|
|
647
|
+
const allRunCounts = result.buckets.reduce(
|
|
648
|
+
(sum, b) => sum + b.runCount,
|
|
649
|
+
0,
|
|
650
|
+
);
|
|
651
|
+
expect(allRunCounts).toBe(12);
|
|
652
|
+
|
|
653
|
+
// Last bucket should exist and have runs in it
|
|
654
|
+
const lastBucket = result.buckets[result.buckets.length - 1];
|
|
655
|
+
expect(lastBucket.runCount).toBeGreaterThan(0);
|
|
656
|
+
|
|
657
|
+
// The run at 21:55 (55 minutes after start) should be in a bucket
|
|
658
|
+
// That's in bucket index floor(55/6) = 9 (the last bucket)
|
|
659
|
+
expect(result.buckets.length).toBe(10);
|
|
660
|
+
});
|
|
661
|
+
|
|
662
|
+
it("handles runs at exact endDate boundary", async () => {
|
|
663
|
+
const startDate = new Date("2026-01-20T21:00:00Z");
|
|
664
|
+
const endDate = new Date("2026-01-20T22:00:00Z");
|
|
665
|
+
|
|
666
|
+
const runs = [
|
|
667
|
+
{
|
|
668
|
+
id: "run-start",
|
|
669
|
+
systemId: "sys-1",
|
|
670
|
+
configurationId: "config-1",
|
|
671
|
+
strategyId: "http",
|
|
672
|
+
status: "healthy" as const,
|
|
673
|
+
latencyMs: 100,
|
|
674
|
+
result: { statusCode: 200 },
|
|
675
|
+
timestamp: new Date("2026-01-20T21:00:00Z"), // Exact start
|
|
676
|
+
},
|
|
677
|
+
{
|
|
678
|
+
id: "run-end",
|
|
679
|
+
systemId: "sys-1",
|
|
680
|
+
configurationId: "config-1",
|
|
681
|
+
strategyId: "http",
|
|
682
|
+
status: "unhealthy" as const,
|
|
683
|
+
latencyMs: 5000,
|
|
684
|
+
result: { statusCode: 500 },
|
|
685
|
+
timestamp: new Date("2026-01-20T22:00:00Z"), // Exact end
|
|
686
|
+
},
|
|
687
|
+
];
|
|
688
|
+
|
|
689
|
+
mockRunsResult = runs;
|
|
690
|
+
mockConfigResult = { id: "config-1", strategyId: "http" };
|
|
691
|
+
|
|
692
|
+
const result = await service.getAggregatedHistory(
|
|
693
|
+
{
|
|
694
|
+
systemId: "sys-1",
|
|
695
|
+
configurationId: "config-1",
|
|
696
|
+
startDate,
|
|
697
|
+
endDate,
|
|
698
|
+
targetPoints: 10,
|
|
699
|
+
},
|
|
700
|
+
{ includeAggregatedResult: true },
|
|
701
|
+
);
|
|
702
|
+
|
|
703
|
+
// Both runs should be included
|
|
704
|
+
const allRunCounts = result.buckets.reduce(
|
|
705
|
+
(sum, b) => sum + b.runCount,
|
|
706
|
+
0,
|
|
707
|
+
);
|
|
708
|
+
expect(allRunCounts).toBe(2);
|
|
709
|
+
|
|
710
|
+
// First bucket should have the start run
|
|
711
|
+
expect(result.buckets[0].runCount).toBeGreaterThan(0);
|
|
712
|
+
|
|
713
|
+
// Last bucket should have the end run
|
|
714
|
+
const lastBucket = result.buckets[result.buckets.length - 1];
|
|
715
|
+
expect(lastBucket.runCount).toBeGreaterThan(0);
|
|
716
|
+
});
|
|
717
|
+
|
|
718
|
+
it("simulates real-world 7-day dashboard with minute-by-minute runs", async () => {
|
|
719
|
+
/**
|
|
720
|
+
* Real-world simulation:
|
|
721
|
+
* - 7 day query range
|
|
722
|
+
* - Health check runs every minute
|
|
723
|
+
* - Query with 500 target points (~20 min buckets)
|
|
724
|
+
* - Recent runs should appear in the latest bucket
|
|
725
|
+
*/
|
|
726
|
+
const endDate = new Date("2026-01-20T22:05:00Z");
|
|
727
|
+
const startDate = new Date("2026-01-13T22:05:00Z");
|
|
728
|
+
|
|
729
|
+
// Create runs for the last 30 minutes (simulates recent activity)
|
|
730
|
+
const runs = [];
|
|
731
|
+
for (let i = 0; i < 30; i++) {
|
|
732
|
+
const timestamp = new Date(endDate.getTime() - (30 - i) * 60 * 1000);
|
|
733
|
+
runs.push({
|
|
734
|
+
id: `run-${i}`,
|
|
735
|
+
systemId: "sys-1",
|
|
736
|
+
configurationId: "config-1",
|
|
737
|
+
strategyId: "http",
|
|
738
|
+
status: i % 3 === 0 ? ("unhealthy" as const) : ("healthy" as const),
|
|
739
|
+
latencyMs: 100 + i * 10,
|
|
740
|
+
result: { statusCode: i % 3 === 0 ? 500 : 200 },
|
|
741
|
+
timestamp,
|
|
742
|
+
});
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
mockRunsResult = runs;
|
|
746
|
+
mockConfigResult = { id: "config-1", strategyId: "http" };
|
|
747
|
+
|
|
748
|
+
const result = await service.getAggregatedHistory(
|
|
749
|
+
{
|
|
750
|
+
systemId: "sys-1",
|
|
751
|
+
configurationId: "config-1",
|
|
752
|
+
startDate,
|
|
753
|
+
endDate,
|
|
754
|
+
targetPoints: 500,
|
|
755
|
+
},
|
|
756
|
+
{ includeAggregatedResult: true },
|
|
757
|
+
);
|
|
758
|
+
|
|
759
|
+
// Verify bucket interval is roughly 20 minutes for 7-day / 500 points
|
|
760
|
+
// 7 days = 604,800 seconds / 500 = 1,209.6 seconds (~20 min)
|
|
761
|
+
expect(result.bucketIntervalSeconds).toBe(1210);
|
|
762
|
+
|
|
763
|
+
// All 30 runs should be in buckets
|
|
764
|
+
const allRunCounts = result.buckets.reduce(
|
|
765
|
+
(sum, b) => sum + b.runCount,
|
|
766
|
+
0,
|
|
767
|
+
);
|
|
768
|
+
expect(allRunCounts).toBe(30);
|
|
769
|
+
|
|
770
|
+
// The most recent run (at endDate - 1 minute) should be in a bucket
|
|
771
|
+
// This is the critical assertion for the bug we're fixing
|
|
772
|
+
const lastBucket = result.buckets[result.buckets.length - 1];
|
|
773
|
+
|
|
774
|
+
// Last bucket should have some runs
|
|
775
|
+
expect(lastBucket.runCount).toBeGreaterThan(0);
|
|
776
|
+
|
|
777
|
+
// The last bucket's end time should be near endDate
|
|
778
|
+
const bucketIntervalMs = result.bucketIntervalSeconds * 1000;
|
|
779
|
+
const lastBucketEnd = new Date(
|
|
780
|
+
lastBucket.bucketStart.getTime() + bucketIntervalMs,
|
|
781
|
+
);
|
|
782
|
+
|
|
783
|
+
// Last bucket should extend to cover the endDate or be within one interval
|
|
784
|
+
const timeDiffMs = endDate.getTime() - lastBucketEnd.getTime();
|
|
785
|
+
expect(timeDiffMs).toBeLessThan(bucketIntervalMs);
|
|
786
|
+
});
|
|
787
|
+
|
|
788
|
+
it("verifies last bucket contains the most recent timestamp", async () => {
|
|
789
|
+
const startDate = new Date("2026-01-20T21:00:00Z");
|
|
790
|
+
const endDate = new Date("2026-01-20T22:05:00Z"); // 65 minutes
|
|
791
|
+
|
|
792
|
+
// Create runs with known timestamps
|
|
793
|
+
const runs = [
|
|
794
|
+
{
|
|
795
|
+
id: "run-1",
|
|
796
|
+
systemId: "sys-1",
|
|
797
|
+
configurationId: "config-1",
|
|
798
|
+
strategyId: "http",
|
|
799
|
+
status: "healthy" as const,
|
|
800
|
+
latencyMs: 100,
|
|
801
|
+
result: {},
|
|
802
|
+
timestamp: new Date("2026-01-20T21:05:00Z"),
|
|
803
|
+
},
|
|
804
|
+
{
|
|
805
|
+
id: "run-2",
|
|
806
|
+
systemId: "sys-1",
|
|
807
|
+
configurationId: "config-1",
|
|
808
|
+
strategyId: "http",
|
|
809
|
+
status: "healthy" as const,
|
|
810
|
+
latencyMs: 100,
|
|
811
|
+
result: {},
|
|
812
|
+
timestamp: new Date("2026-01-20T21:35:00Z"),
|
|
813
|
+
},
|
|
814
|
+
// Most recent run - 2 minutes before endDate
|
|
815
|
+
{
|
|
816
|
+
id: "run-latest",
|
|
817
|
+
systemId: "sys-1",
|
|
818
|
+
configurationId: "config-1",
|
|
819
|
+
strategyId: "http",
|
|
820
|
+
status: "unhealthy" as const,
|
|
821
|
+
latencyMs: 5000,
|
|
822
|
+
result: { statusCode: 500 },
|
|
823
|
+
timestamp: new Date("2026-01-20T22:03:00Z"),
|
|
824
|
+
},
|
|
825
|
+
];
|
|
826
|
+
|
|
827
|
+
mockRunsResult = runs;
|
|
828
|
+
mockConfigResult = { id: "config-1", strategyId: "http" };
|
|
829
|
+
|
|
830
|
+
// Use 5 target points = 13 minute buckets
|
|
831
|
+
const result = await service.getAggregatedHistory(
|
|
832
|
+
{
|
|
833
|
+
systemId: "sys-1",
|
|
834
|
+
configurationId: "config-1",
|
|
835
|
+
startDate,
|
|
836
|
+
endDate,
|
|
837
|
+
targetPoints: 5,
|
|
838
|
+
},
|
|
839
|
+
{ includeAggregatedResult: true },
|
|
840
|
+
);
|
|
841
|
+
|
|
842
|
+
expect(result.buckets.length).toBeGreaterThan(0);
|
|
843
|
+
|
|
844
|
+
// All 3 runs should be included
|
|
845
|
+
const allRunCounts = result.buckets.reduce(
|
|
846
|
+
(sum, b) => sum + b.runCount,
|
|
847
|
+
0,
|
|
848
|
+
);
|
|
849
|
+
expect(allRunCounts).toBe(3);
|
|
850
|
+
|
|
851
|
+
// The last bucket should have the unhealthy run
|
|
852
|
+
const lastBucket = result.buckets[result.buckets.length - 1];
|
|
853
|
+
expect(lastBucket.unhealthyCount).toBe(1);
|
|
854
|
+
|
|
855
|
+
// Bucket containing 22:03 run should have bucketStart before 22:03
|
|
856
|
+
expect(lastBucket.bucketStart.getTime()).toBeLessThanOrEqual(
|
|
857
|
+
new Date("2026-01-20T22:03:00Z").getTime(),
|
|
858
|
+
);
|
|
859
|
+
});
|
|
860
|
+
});
|
|
479
861
|
});
|
package/src/router.ts
CHANGED
|
@@ -178,6 +178,10 @@ export const createHealthCheckRouter = (
|
|
|
178
178
|
return service.getDetailedHistory(input);
|
|
179
179
|
}),
|
|
180
180
|
|
|
181
|
+
getRunById: os.getRunById.handler(async ({ input }) => {
|
|
182
|
+
return service.getRunById(input);
|
|
183
|
+
}),
|
|
184
|
+
|
|
181
185
|
getAggregatedHistory: os.getAggregatedHistory.handler(async ({ input }) => {
|
|
182
186
|
return service.getAggregatedHistory(input, {
|
|
183
187
|
includeAggregatedResult: false,
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
import { describe, it, expect, mock, beforeEach } from "bun:test";
|
|
2
|
+
import type { InferSelectModel } from "drizzle-orm";
|
|
3
|
+
import { HealthCheckService } from "./service";
|
|
4
|
+
import {
|
|
5
|
+
healthCheckRuns,
|
|
6
|
+
systemHealthChecks,
|
|
7
|
+
healthCheckConfigurations,
|
|
8
|
+
} from "./schema";
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Tests for correct data ordering in service methods.
|
|
12
|
+
* Verifies the Parametric Sort-at-Source standard:
|
|
13
|
+
* - getSystemHealthOverview returns runs in chronological order (oldest first) for sparklines
|
|
14
|
+
* - getHistory and getDetailedHistory respect the sortOrder parameter
|
|
15
|
+
*/
|
|
16
|
+
describe("HealthCheckService data ordering", () => {
|
|
17
|
+
let mockRegistry: ReturnType<typeof createMockRegistry>;
|
|
18
|
+
let service: HealthCheckService;
|
|
19
|
+
|
|
20
|
+
// Use real types from schema to catch schema drift
|
|
21
|
+
type HealthCheckRun = InferSelectModel<typeof healthCheckRuns>;
|
|
22
|
+
type SystemHealthCheck = InferSelectModel<typeof systemHealthChecks>;
|
|
23
|
+
type HealthCheckConfiguration = InferSelectModel<
|
|
24
|
+
typeof healthCheckConfigurations
|
|
25
|
+
>;
|
|
26
|
+
|
|
27
|
+
// Mock data storage - using Partial to allow minimal test data
|
|
28
|
+
type MockAssociation = Pick<
|
|
29
|
+
SystemHealthCheck,
|
|
30
|
+
"configurationId" | "enabled" | "stateThresholds"
|
|
31
|
+
> &
|
|
32
|
+
Pick<HealthCheckConfiguration, "strategyId" | "intervalSeconds"> & {
|
|
33
|
+
configName: string;
|
|
34
|
+
};
|
|
35
|
+
let mockAssociations: MockAssociation[] = [];
|
|
36
|
+
let mockRuns: Partial<HealthCheckRun>[] = [];
|
|
37
|
+
|
|
38
|
+
function createMockRegistry() {
|
|
39
|
+
return {
|
|
40
|
+
register: mock(),
|
|
41
|
+
getStrategies: mock(() => []),
|
|
42
|
+
getStrategy: mock(() => null),
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function createMockDb() {
|
|
47
|
+
// Create a comprehensive mock that handles all query patterns
|
|
48
|
+
// The service uses different patterns:
|
|
49
|
+
// - getHistory/getDetailedHistory: .orderBy().limit().offset()
|
|
50
|
+
// - getSystemHealthOverview: .orderBy().limit() (no offset)
|
|
51
|
+
const offsetMock = mock(() => Promise.resolve([...mockRuns]));
|
|
52
|
+
|
|
53
|
+
// limitMock needs to be both a thenable (for await) and have .offset()
|
|
54
|
+
const createLimitResult = () => {
|
|
55
|
+
const result = Promise.resolve([...mockRuns]);
|
|
56
|
+
// @ts-expect-error - Adding offset to Promise
|
|
57
|
+
result.offset = offsetMock;
|
|
58
|
+
return result;
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
const orderByMock = mock(() => ({
|
|
62
|
+
limit: mock(createLimitResult),
|
|
63
|
+
}));
|
|
64
|
+
const whereMock = mock(() => ({
|
|
65
|
+
orderBy: orderByMock,
|
|
66
|
+
limit: mock(createLimitResult),
|
|
67
|
+
}));
|
|
68
|
+
const innerJoinMock = mock(() => ({
|
|
69
|
+
where: mock(() => Promise.resolve([...mockAssociations])),
|
|
70
|
+
}));
|
|
71
|
+
const fromMock = mock(() => ({
|
|
72
|
+
where: whereMock,
|
|
73
|
+
innerJoin: innerJoinMock,
|
|
74
|
+
orderBy: orderByMock,
|
|
75
|
+
}));
|
|
76
|
+
|
|
77
|
+
return {
|
|
78
|
+
select: mock(() => ({ from: fromMock })),
|
|
79
|
+
$count: mock(() => Promise.resolve(mockRuns.length)),
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
beforeEach(() => {
|
|
84
|
+
mockAssociations = [];
|
|
85
|
+
mockRuns = [];
|
|
86
|
+
mockRegistry = createMockRegistry();
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
describe("getSystemHealthOverview", () => {
|
|
90
|
+
it("returns recentRuns in chronological order (oldest first) for sparkline display", async () => {
|
|
91
|
+
// Setup: Runs that would be returned from DB in DESC order (newest first)
|
|
92
|
+
const oldRun = {
|
|
93
|
+
id: "run-old",
|
|
94
|
+
status: "healthy" as const,
|
|
95
|
+
timestamp: new Date("2024-01-01T10:00:00Z"),
|
|
96
|
+
};
|
|
97
|
+
const midRun = {
|
|
98
|
+
id: "run-mid",
|
|
99
|
+
status: "degraded" as const,
|
|
100
|
+
timestamp: new Date("2024-01-01T11:00:00Z"),
|
|
101
|
+
};
|
|
102
|
+
const newRun = {
|
|
103
|
+
id: "run-new",
|
|
104
|
+
status: "unhealthy" as const,
|
|
105
|
+
timestamp: new Date("2024-01-01T12:00:00Z"),
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
mockAssociations = [
|
|
109
|
+
{
|
|
110
|
+
configurationId: "config-1",
|
|
111
|
+
configName: "Test Config",
|
|
112
|
+
strategyId: "http",
|
|
113
|
+
intervalSeconds: 60,
|
|
114
|
+
enabled: true,
|
|
115
|
+
stateThresholds: null,
|
|
116
|
+
},
|
|
117
|
+
];
|
|
118
|
+
|
|
119
|
+
// DB returns DESC order (newest first) - service should reverse for sparkline
|
|
120
|
+
mockRuns = [newRun, midRun, oldRun];
|
|
121
|
+
|
|
122
|
+
const mockDb = createMockDb();
|
|
123
|
+
service = new HealthCheckService(mockDb as never, mockRegistry as never);
|
|
124
|
+
|
|
125
|
+
const result = await service.getSystemHealthOverview("system-1");
|
|
126
|
+
|
|
127
|
+
// Verify recentRuns are in chronological order (oldest first)
|
|
128
|
+
// This is what sparklines expect: oldest on left, newest on right
|
|
129
|
+
expect(result.checks).toHaveLength(1);
|
|
130
|
+
expect(result.checks[0].recentRuns).toHaveLength(3);
|
|
131
|
+
expect(result.checks[0].recentRuns[0].id).toBe("run-old"); // Oldest first
|
|
132
|
+
expect(result.checks[0].recentRuns[1].id).toBe("run-mid");
|
|
133
|
+
expect(result.checks[0].recentRuns[2].id).toBe("run-new"); // Newest last
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
it("returns most recent 25 runs in chronological order", async () => {
|
|
137
|
+
mockAssociations = [
|
|
138
|
+
{
|
|
139
|
+
configurationId: "config-1",
|
|
140
|
+
configName: "Test Config",
|
|
141
|
+
strategyId: "http",
|
|
142
|
+
intervalSeconds: 60,
|
|
143
|
+
enabled: true,
|
|
144
|
+
stateThresholds: null,
|
|
145
|
+
},
|
|
146
|
+
];
|
|
147
|
+
|
|
148
|
+
// Create 25 runs in DESC order (how DB returns them)
|
|
149
|
+
mockRuns = Array.from({ length: 25 }, (_, i) => ({
|
|
150
|
+
id: `run-${24 - i}`, // 24 down to 0 (newest to oldest)
|
|
151
|
+
status: "healthy" as const,
|
|
152
|
+
timestamp: new Date(
|
|
153
|
+
`2024-01-01T${String(24 - i).padStart(2, "0")}:00:00Z`,
|
|
154
|
+
),
|
|
155
|
+
}));
|
|
156
|
+
|
|
157
|
+
const mockDb = createMockDb();
|
|
158
|
+
service = new HealthCheckService(mockDb as never, mockRegistry as never);
|
|
159
|
+
|
|
160
|
+
const result = await service.getSystemHealthOverview("system-1");
|
|
161
|
+
|
|
162
|
+
// After service reverses: oldest (run-0) should be first, newest (run-24) last
|
|
163
|
+
expect(result.checks[0].recentRuns).toHaveLength(25);
|
|
164
|
+
expect(result.checks[0].recentRuns[0].id).toBe("run-0"); // Oldest first
|
|
165
|
+
expect(result.checks[0].recentRuns[24].id).toBe("run-24"); // Newest last
|
|
166
|
+
});
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
describe("getHistory sortOrder parameter", () => {
|
|
170
|
+
it("respects sortOrder asc - returns runs in chronological order", async () => {
|
|
171
|
+
// Simulate DB returning runs in ASC order
|
|
172
|
+
mockRuns = [
|
|
173
|
+
{
|
|
174
|
+
id: "1",
|
|
175
|
+
status: "healthy" as const,
|
|
176
|
+
timestamp: new Date("2024-01-01T10:00:00Z"),
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "2",
|
|
180
|
+
status: "healthy" as const,
|
|
181
|
+
timestamp: new Date("2024-01-01T11:00:00Z"),
|
|
182
|
+
},
|
|
183
|
+
{
|
|
184
|
+
id: "3",
|
|
185
|
+
status: "healthy" as const,
|
|
186
|
+
timestamp: new Date("2024-01-01T12:00:00Z"),
|
|
187
|
+
},
|
|
188
|
+
];
|
|
189
|
+
|
|
190
|
+
const mockDb = createMockDb();
|
|
191
|
+
service = new HealthCheckService(mockDb as never, mockRegistry as never);
|
|
192
|
+
|
|
193
|
+
const result = await service.getHistory({
|
|
194
|
+
systemId: "sys-1",
|
|
195
|
+
configurationId: "config-1",
|
|
196
|
+
sortOrder: "asc",
|
|
197
|
+
limit: 10,
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
// Verify runs are returned as-is from mock (ASC order)
|
|
201
|
+
expect(result.runs[0].id).toBe("1"); // Oldest first
|
|
202
|
+
expect(result.runs[2].id).toBe("3"); // Newest last
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
it("respects sortOrder desc - returns runs in reverse chronological order", async () => {
|
|
206
|
+
// Simulate DB returning runs in DESC order
|
|
207
|
+
mockRuns = [
|
|
208
|
+
{
|
|
209
|
+
id: "3",
|
|
210
|
+
status: "healthy" as const,
|
|
211
|
+
timestamp: new Date("2024-01-01T12:00:00Z"),
|
|
212
|
+
},
|
|
213
|
+
{
|
|
214
|
+
id: "2",
|
|
215
|
+
status: "healthy" as const,
|
|
216
|
+
timestamp: new Date("2024-01-01T11:00:00Z"),
|
|
217
|
+
},
|
|
218
|
+
{
|
|
219
|
+
id: "1",
|
|
220
|
+
status: "healthy" as const,
|
|
221
|
+
timestamp: new Date("2024-01-01T10:00:00Z"),
|
|
222
|
+
},
|
|
223
|
+
];
|
|
224
|
+
|
|
225
|
+
const mockDb = createMockDb();
|
|
226
|
+
service = new HealthCheckService(mockDb as never, mockRegistry as never);
|
|
227
|
+
|
|
228
|
+
const result = await service.getHistory({
|
|
229
|
+
systemId: "sys-1",
|
|
230
|
+
configurationId: "config-1",
|
|
231
|
+
sortOrder: "desc",
|
|
232
|
+
limit: 10,
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
// Verify runs are returned as-is from mock (DESC order)
|
|
236
|
+
expect(result.runs[0].id).toBe("3"); // Newest first
|
|
237
|
+
expect(result.runs[2].id).toBe("1"); // Oldest last
|
|
238
|
+
});
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
describe("getDetailedHistory sortOrder parameter", () => {
|
|
242
|
+
it("respects sortOrder asc - returns runs in chronological order", async () => {
|
|
243
|
+
// Simulate DB returning runs in ASC order
|
|
244
|
+
mockRuns = [
|
|
245
|
+
{
|
|
246
|
+
id: "1",
|
|
247
|
+
status: "healthy" as const,
|
|
248
|
+
timestamp: new Date("2024-01-01T10:00:00Z"),
|
|
249
|
+
latencyMs: 100,
|
|
250
|
+
result: {},
|
|
251
|
+
},
|
|
252
|
+
{
|
|
253
|
+
id: "2",
|
|
254
|
+
status: "healthy" as const,
|
|
255
|
+
timestamp: new Date("2024-01-01T11:00:00Z"),
|
|
256
|
+
latencyMs: 150,
|
|
257
|
+
result: {},
|
|
258
|
+
},
|
|
259
|
+
];
|
|
260
|
+
|
|
261
|
+
const mockDb = createMockDb();
|
|
262
|
+
service = new HealthCheckService(mockDb as never, mockRegistry as never);
|
|
263
|
+
|
|
264
|
+
const result = await service.getDetailedHistory({
|
|
265
|
+
systemId: "sys-1",
|
|
266
|
+
configurationId: "config-1",
|
|
267
|
+
startDate: new Date("2024-01-01T00:00:00Z"),
|
|
268
|
+
endDate: new Date("2024-01-02T00:00:00Z"),
|
|
269
|
+
sortOrder: "asc",
|
|
270
|
+
limit: 10,
|
|
271
|
+
offset: 0,
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
// Verify runs are returned in ASC order
|
|
275
|
+
expect(result.runs[0].id).toBe("1"); // Oldest first
|
|
276
|
+
expect(result.runs[1].id).toBe("2"); // Newest last
|
|
277
|
+
});
|
|
278
|
+
|
|
279
|
+
it("respects sortOrder desc - returns runs in reverse chronological order", async () => {
|
|
280
|
+
// Simulate DB returning runs in DESC order
|
|
281
|
+
mockRuns = [
|
|
282
|
+
{
|
|
283
|
+
id: "2",
|
|
284
|
+
status: "healthy" as const,
|
|
285
|
+
timestamp: new Date("2024-01-01T11:00:00Z"),
|
|
286
|
+
latencyMs: 150,
|
|
287
|
+
result: {},
|
|
288
|
+
},
|
|
289
|
+
{
|
|
290
|
+
id: "1",
|
|
291
|
+
status: "healthy" as const,
|
|
292
|
+
timestamp: new Date("2024-01-01T10:00:00Z"),
|
|
293
|
+
latencyMs: 100,
|
|
294
|
+
result: {},
|
|
295
|
+
},
|
|
296
|
+
];
|
|
297
|
+
|
|
298
|
+
const mockDb = createMockDb();
|
|
299
|
+
service = new HealthCheckService(mockDb as never, mockRegistry as never);
|
|
300
|
+
|
|
301
|
+
const result = await service.getDetailedHistory({
|
|
302
|
+
systemId: "sys-1",
|
|
303
|
+
configurationId: "config-1",
|
|
304
|
+
startDate: new Date("2024-01-01T00:00:00Z"),
|
|
305
|
+
endDate: new Date("2024-01-02T00:00:00Z"),
|
|
306
|
+
sortOrder: "desc",
|
|
307
|
+
limit: 10,
|
|
308
|
+
offset: 0,
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
// Verify runs are returned in DESC order
|
|
312
|
+
expect(result.runs[0].id).toBe("2"); // Newest first
|
|
313
|
+
expect(result.runs[1].id).toBe("1"); // Oldest last
|
|
314
|
+
});
|
|
315
|
+
});
|
|
316
|
+
});
|
package/src/service.ts
CHANGED
|
@@ -414,7 +414,7 @@ export class HealthCheckService {
|
|
|
414
414
|
const sparklineLimit = 25;
|
|
415
415
|
|
|
416
416
|
for (const assoc of associations) {
|
|
417
|
-
// Get last 25 runs for sparkline
|
|
417
|
+
// Get last 25 runs for sparkline (newest first, then reverse for chronological display)
|
|
418
418
|
const runs = await this.db
|
|
419
419
|
.select({
|
|
420
420
|
id: healthCheckRuns.id,
|
|
@@ -431,15 +431,18 @@ export class HealthCheckService {
|
|
|
431
431
|
.orderBy(desc(healthCheckRuns.timestamp))
|
|
432
432
|
.limit(sparklineLimit);
|
|
433
433
|
|
|
434
|
+
// Reverse to chronological order (oldest first) for sparkline display
|
|
435
|
+
const chronologicalRuns = runs.toReversed();
|
|
436
|
+
|
|
434
437
|
// Migrate and extract thresholds
|
|
435
438
|
let thresholds: StateThresholds | undefined;
|
|
436
439
|
if (assoc.stateThresholds) {
|
|
437
440
|
thresholds = await stateThresholds.parse(assoc.stateThresholds);
|
|
438
441
|
}
|
|
439
442
|
|
|
440
|
-
// Evaluate current status
|
|
443
|
+
// Evaluate current status (runs are in DESC order - newest first - as evaluateHealthStatus expects)
|
|
441
444
|
const status = evaluateHealthStatus({
|
|
442
|
-
runs
|
|
445
|
+
runs,
|
|
443
446
|
thresholds,
|
|
444
447
|
});
|
|
445
448
|
|
|
@@ -451,7 +454,7 @@ export class HealthCheckService {
|
|
|
451
454
|
enabled: assoc.enabled,
|
|
452
455
|
status,
|
|
453
456
|
stateThresholds: thresholds,
|
|
454
|
-
recentRuns:
|
|
457
|
+
recentRuns: chronologicalRuns.map((r) => ({
|
|
455
458
|
id: r.id,
|
|
456
459
|
status: r.status,
|
|
457
460
|
timestamp: r.timestamp,
|
|
@@ -464,6 +467,7 @@ export class HealthCheckService {
|
|
|
464
467
|
|
|
465
468
|
/**
|
|
466
469
|
* Get paginated health check run history (public - no result data).
|
|
470
|
+
* @param sortOrder - 'asc' for chronological (oldest first), 'desc' for reverse (newest first)
|
|
467
471
|
*/
|
|
468
472
|
async getHistory(props: {
|
|
469
473
|
systemId?: string;
|
|
@@ -472,6 +476,7 @@ export class HealthCheckService {
|
|
|
472
476
|
endDate?: Date;
|
|
473
477
|
limit?: number;
|
|
474
478
|
offset?: number;
|
|
479
|
+
sortOrder: "asc" | "desc";
|
|
475
480
|
}) {
|
|
476
481
|
const {
|
|
477
482
|
systemId,
|
|
@@ -480,6 +485,7 @@ export class HealthCheckService {
|
|
|
480
485
|
endDate,
|
|
481
486
|
limit = 10,
|
|
482
487
|
offset = 0,
|
|
488
|
+
sortOrder,
|
|
483
489
|
} = props;
|
|
484
490
|
|
|
485
491
|
const conditions = [];
|
|
@@ -495,16 +501,17 @@ export class HealthCheckService {
|
|
|
495
501
|
// Get total count using drizzle $count
|
|
496
502
|
const total = await this.db.$count(healthCheckRuns, whereClause);
|
|
497
503
|
|
|
498
|
-
// Get paginated runs
|
|
504
|
+
// Get paginated runs with requested sort order
|
|
499
505
|
let query = this.db.select().from(healthCheckRuns);
|
|
500
506
|
if (whereClause) {
|
|
501
507
|
// @ts-expect-error drizzle-orm type mismatch
|
|
502
508
|
query = query.where(whereClause);
|
|
503
509
|
}
|
|
504
|
-
const
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
510
|
+
const orderColumn =
|
|
511
|
+
sortOrder === "desc"
|
|
512
|
+
? desc(healthCheckRuns.timestamp)
|
|
513
|
+
: healthCheckRuns.timestamp;
|
|
514
|
+
const runs = await query.orderBy(orderColumn).limit(limit).offset(offset);
|
|
508
515
|
|
|
509
516
|
// Return without result field for public access (latencyMs is public data)
|
|
510
517
|
return {
|
|
@@ -523,6 +530,7 @@ export class HealthCheckService {
|
|
|
523
530
|
/**
|
|
524
531
|
* Get detailed health check run history with full result data.
|
|
525
532
|
* Restricted to users with manage access.
|
|
533
|
+
* @param sortOrder - 'asc' for chronological (oldest first), 'desc' for reverse (newest first)
|
|
526
534
|
*/
|
|
527
535
|
async getDetailedHistory(props: {
|
|
528
536
|
systemId?: string;
|
|
@@ -531,6 +539,7 @@ export class HealthCheckService {
|
|
|
531
539
|
endDate?: Date;
|
|
532
540
|
limit?: number;
|
|
533
541
|
offset?: number;
|
|
542
|
+
sortOrder: "asc" | "desc";
|
|
534
543
|
}) {
|
|
535
544
|
const {
|
|
536
545
|
systemId,
|
|
@@ -539,6 +548,7 @@ export class HealthCheckService {
|
|
|
539
548
|
endDate,
|
|
540
549
|
limit = 10,
|
|
541
550
|
offset = 0,
|
|
551
|
+
sortOrder,
|
|
542
552
|
} = props;
|
|
543
553
|
|
|
544
554
|
const conditions = [];
|
|
@@ -556,10 +566,11 @@ export class HealthCheckService {
|
|
|
556
566
|
// @ts-expect-error drizzle-orm type mismatch
|
|
557
567
|
query = query.where(whereClause);
|
|
558
568
|
}
|
|
559
|
-
const
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
569
|
+
const orderColumn =
|
|
570
|
+
sortOrder === "desc"
|
|
571
|
+
? desc(healthCheckRuns.timestamp)
|
|
572
|
+
: healthCheckRuns.timestamp;
|
|
573
|
+
const runs = await query.orderBy(orderColumn).limit(limit).offset(offset);
|
|
563
574
|
|
|
564
575
|
// Return with full result data for manage access
|
|
565
576
|
return {
|
|
@@ -576,6 +587,32 @@ export class HealthCheckService {
|
|
|
576
587
|
};
|
|
577
588
|
}
|
|
578
589
|
|
|
590
|
+
/**
|
|
591
|
+
* Get a single health check run by its ID.
|
|
592
|
+
*/
|
|
593
|
+
async getRunById(props: { runId: string }) {
|
|
594
|
+
const run = await this.db
|
|
595
|
+
.select()
|
|
596
|
+
.from(healthCheckRuns)
|
|
597
|
+
.where(eq(healthCheckRuns.id, props.runId))
|
|
598
|
+
.limit(1);
|
|
599
|
+
|
|
600
|
+
if (run.length === 0) {
|
|
601
|
+
return;
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
const r = run[0];
|
|
605
|
+
return {
|
|
606
|
+
id: r.id,
|
|
607
|
+
configurationId: r.configurationId,
|
|
608
|
+
systemId: r.systemId,
|
|
609
|
+
status: r.status,
|
|
610
|
+
result: r.result ?? {},
|
|
611
|
+
timestamp: r.timestamp,
|
|
612
|
+
latencyMs: r.latencyMs ?? undefined,
|
|
613
|
+
};
|
|
614
|
+
}
|
|
615
|
+
|
|
579
616
|
/**
|
|
580
617
|
* Get aggregated health check history with dynamically-sized buckets.
|
|
581
618
|
* Queries all three tiers (raw, hourly, daily) and merges with priority.
|
|
@@ -718,6 +755,7 @@ export class HealthCheckService {
|
|
|
718
755
|
sourceBuckets: mergedBuckets,
|
|
719
756
|
targetIntervalMs: bucketIntervalMs,
|
|
720
757
|
rangeStart: startDate,
|
|
758
|
+
rangeEnd: endDate,
|
|
721
759
|
});
|
|
722
760
|
|
|
723
761
|
// Convert to output format
|
|
@@ -731,6 +769,7 @@ export class HealthCheckService {
|
|
|
731
769
|
|
|
732
770
|
const baseBucket = {
|
|
733
771
|
bucketStart: bucket.bucketStart,
|
|
772
|
+
bucketEnd: new Date(bucket.bucketEndMs),
|
|
734
773
|
bucketIntervalSeconds,
|
|
735
774
|
runCount: bucket.runCount,
|
|
736
775
|
healthyCount: bucket.healthyCount,
|