node-event-stream 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,469 @@
1
+ import { Readable } from 'stream';
2
+ import {
3
+ BusOptions,
4
+ CleanupReport,
5
+ ConsumerOptions,
6
+ LogRecord,
7
+ MessageHandler,
8
+ ProduceOptions,
9
+ QueryOptions,
10
+ QueryStreamOptions,
11
+ RetentionPolicy,
12
+ TopicStats,
13
+ } from '../interface/interface';
14
+ import RetentionEngine from '../retention/retentionEngine';
15
+ import DiskStore from '../storage/diskStorage';
16
+ import BufferMemory from '../storage/bufferMemory';
17
+ import SecondaryIndex from '../storage/secondaryIndex';
18
+ import path from 'path';
19
+ import * as fs from 'fs';
20
+
21
+ interface Consumer<T> {
22
+ groupId: string;
23
+ handler: MessageHandler<T>;
24
+ offset: number;
25
+ }
26
+
27
+ export class Topic<T = Record<string, unknown>> {
28
+ memory = new BufferMemory<T>(50_000);
29
+ index = new SecondaryIndex();
30
+ disk: DiskStore;
31
+ consumers = new Map<string, Consumer<T>>();
32
+ nextOffset = 0;
33
+ private writeCount = 0;
34
+ private lastStatTs = Date.now();
35
+ writesPerSec = 0;
36
+
37
+ constructor(
38
+ public readonly name: string,
39
+ dataDir: string,
40
+ ) {
41
+ this.disk = new DiskStore(dataDir, name);
42
+ this.replayFromDisk();
43
+ }
44
+
45
+ private replayFromDisk(): void {
46
+ let max = -1;
47
+ for (const rec of this.disk.replayAll()) {
48
+ this.index.insert({
49
+ offset: rec.offset,
50
+ timestamp: rec.timestamp,
51
+ key: rec.key,
52
+ });
53
+ if (rec.offset > max) max = rec.offset;
54
+ }
55
+ if (max >= 0) this.nextOffset = max + 1;
56
+ }
57
+
58
+ trackWrite(): void {
59
+ this.writeCount++;
60
+ const now = Date.now();
61
+ const elapsed = now - this.lastStatTs;
62
+ if (elapsed >= 1000) {
63
+ this.writesPerSec = Math.round((this.writeCount * 1000) / elapsed);
64
+ this.writeCount = 0;
65
+ this.lastStatTs = now;
66
+ }
67
+ }
68
+ }
69
+
70
+ export class MessageBus {
71
+ private topics = new Map<string, Topic>();
72
+ private offsetFile: string;
73
+ private retention: RetentionEngine | null = null;
74
+ private cleanupTimer: ReturnType<typeof setInterval> | null = null;
75
+ private streamListeners = new Map<
76
+ string,
77
+ Map<string, Set<(rec: LogRecord) => void>>
78
+ >();
79
+
80
+ constructor(
81
+ private readonly dataDir: string = './msgbus-data',
82
+ private readonly options: BusOptions = {},
83
+ ) {
84
+ fs.mkdirSync(dataDir, { recursive: true });
85
+ this.offsetFile = path.join(dataDir, '_consumer-offsets.json');
86
+
87
+ if (options.retention) {
88
+ this.retention = new RetentionEngine(options.retention);
89
+ }
90
+
91
+ const intervalMin = options.cleanupIntervalMinutes ?? 60;
92
+ if (intervalMin > 0 && this.retention) {
93
+ const ms = intervalMin * 60_000;
94
+ this.cleanupTimer = setInterval(() => {
95
+ console.log('[bus] auto-cleanup running...');
96
+ this.cleanup();
97
+ }, ms);
98
+ if ((this.cleanupTimer as any).unref) (this.cleanupTimer as any).unref();
99
+ console.log(`[bus] auto-cleanup scheduled every ${intervalMin} min`);
100
+ }
101
+ }
102
+
103
+ private getTopic(name: string): Topic {
104
+ if (!this.topics.has(name))
105
+ this.topics.set(name, new Topic(name, this.dataDir));
106
+ return this.topics.get(name)!;
107
+ }
108
+
109
+ private loadOffsets(): Record<string, Record<string, number>> {
110
+ try {
111
+ if (fs.existsSync(this.offsetFile))
112
+ return JSON.parse(fs.readFileSync(this.offsetFile, 'utf-8'));
113
+ } catch {}
114
+ return {};
115
+ }
116
+
117
+ private saveOffsets(): void {
118
+ const snap: Record<string, Record<string, number>> = {};
119
+ for (const [name, topic] of this.topics) {
120
+ snap[name] = {};
121
+ for (const [gid, c] of topic.consumers) snap[name][gid] = c.offset;
122
+ }
123
+ fs.writeFileSync(this.offsetFile, JSON.stringify(snap), 'utf-8');
124
+ }
125
+
126
+ produce(opts: ProduceOptions): number {
127
+ const topic = this.getTopic(opts.topic);
128
+
129
+ const record: LogRecord = {
130
+ offset: topic.nextOffset++,
131
+ topic: opts.topic,
132
+ key: opts.key,
133
+ timestamp: Date.now(),
134
+ data: opts.data,
135
+ };
136
+
137
+ topic.memory.push(record);
138
+
139
+ topic.index.insert({
140
+ offset: record.offset,
141
+ timestamp: record.timestamp,
142
+ key: record.key,
143
+ });
144
+
145
+ topic.disk.enqueue(record);
146
+
147
+ this.deliver(topic, record);
148
+
149
+ topic.trackWrite();
150
+
151
+ return record.offset;
152
+ }
153
+
154
+ produceBatch(records: ProduceOptions[]): number[] {
155
+ return records.map((r) => this.produce(r));
156
+ }
157
+
158
+ subscribe<T = Record<string, unknown>>(
159
+ opts: ConsumerOptions,
160
+ handler: MessageHandler<T>,
161
+ ): () => void {
162
+ const topic = this.getTopic(opts.topic) as Topic<T>;
163
+ const saved = this.loadOffsets();
164
+ const offset =
165
+ saved[opts.topic]?.[opts.groupId] ??
166
+ (opts.fromBeginning ? 0 : topic.nextOffset);
167
+
168
+ const consumer: Consumer<T> = { groupId: opts.groupId, handler, offset };
169
+ topic.consumers.set(opts.groupId, consumer);
170
+
171
+ if (consumer.offset < topic.nextOffset) {
172
+ setImmediate(() =>
173
+ this.catchUp(
174
+ topic as unknown as Topic,
175
+ consumer as unknown as Consumer<unknown>,
176
+ ),
177
+ );
178
+ }
179
+
180
+ return () => {
181
+ topic.consumers.delete(opts.groupId);
182
+ this.saveOffsets();
183
+ };
184
+ }
185
+
186
+ private deliver(topic: Topic, record: LogRecord): void {
187
+ for (const consumer of topic.consumers.values()) {
188
+ if (consumer.offset === record.offset) {
189
+ consumer.offset++;
190
+ try {
191
+ const p = consumer.handler(record);
192
+ if (p instanceof Promise)
193
+ p.then(() => this.saveOffsets()).catch((e) =>
194
+ console.error(`[bus][${consumer.groupId}]`, e),
195
+ );
196
+ else this.saveOffsets();
197
+ } catch (e) {
198
+ console.error(`[bus][${consumer.groupId}]`, e);
199
+ }
200
+ }
201
+ }
202
+
203
+ this.notifyStreamListeners(topic.name, record);
204
+ }
205
+
206
+ private notifyStreamListeners(topicName: string, record: LogRecord): void {
207
+ const topicMap = this.streamListeners.get(topicName);
208
+
209
+ if (!topicMap) return;
210
+
211
+ const keyListeners = topicMap.get(record.key);
212
+
213
+ if (keyListeners) for (const fn of keyListeners) fn(record);
214
+
215
+ const wildcardListeners = topicMap.get('*');
216
+
217
+ if (wildcardListeners) for (const fn of wildcardListeners) fn(record);
218
+ }
219
+
220
+ private addStreamListener(
221
+ topicName: string,
222
+ key: string,
223
+ fn: (rec: LogRecord) => void,
224
+ ): () => void {
225
+ if (!this.streamListeners.has(topicName))
226
+ this.streamListeners.set(topicName, new Map());
227
+
228
+ const topicMap = this.streamListeners.get(topicName)!;
229
+
230
+ if (!topicMap.has(key)) topicMap.set(key, new Set());
231
+
232
+ topicMap.get(key)!.add(fn);
233
+
234
+ return () => topicMap.get(key)?.delete(fn);
235
+ }
236
+
237
+ private catchUp(topic: Topic, consumer: Consumer<unknown>): void {
238
+ const ringMin = topic.memory.minOffset;
239
+ const fromDisk = new Set<number>();
240
+
241
+ if (ringMin !== -1 && consumer.offset < ringMin) {
242
+ for (let o = consumer.offset; o < ringMin; o++) fromDisk.add(o);
243
+ }
244
+
245
+ for (const rec of topic.disk.readByOffsets(fromDisk)) {
246
+ consumer.offset = rec.offset + 1;
247
+ try {
248
+ consumer.handler(rec);
249
+ } catch (e) {
250
+ console.error(`[bus][${consumer.groupId}] catchup`, e);
251
+ }
252
+ }
253
+
254
+ for (const rec of topic.memory.from(consumer.offset)) {
255
+ consumer.offset = rec.offset + 1;
256
+ try {
257
+ consumer.handler(rec);
258
+ } catch (e) {
259
+ console.error(`[bus][${consumer.groupId}] catchup`, e);
260
+ }
261
+ }
262
+
263
+ this.saveOffsets();
264
+ }
265
+
266
+ // ── Query ─────────────────────────────────────────────────────────────────
267
+
268
+ query(opts: QueryOptions): LogRecord[] {
269
+ const topic = this.getTopic(opts.topic);
270
+ const limit = opts.limit ?? 100;
271
+ const skip = opts.skip ?? 0;
272
+ const order = opts.order ?? 'desc';
273
+
274
+ const offsets = topic.index.query({
275
+ key: opts.key,
276
+ fromTime: opts.fromTime,
277
+ toTime: opts.toTime,
278
+ limit: limit + skip,
279
+ skip: 0,
280
+ order,
281
+ });
282
+
283
+ const paginated = offsets.slice(skip, skip + limit);
284
+ if (paginated.length === 0) return [];
285
+
286
+ const set = new Set(paginated);
287
+ const ringMin = topic.memory.minOffset;
288
+ const fromRing = new Set<number>();
289
+ const fromDisk = new Set<number>();
290
+
291
+ for (const off of set) {
292
+ if (ringMin !== -1 && off >= ringMin) fromRing.add(off);
293
+ else fromDisk.add(off);
294
+ }
295
+
296
+ const results: LogRecord[] = [];
297
+ for (const rec of topic.memory) {
298
+ if (fromRing.has(rec.offset)) results.push(rec);
299
+ }
300
+
301
+ for (const rec of topic.disk.readByOffsets(fromDisk)) {
302
+ results.push(rec);
303
+ }
304
+
305
+ results.sort((a, b) =>
306
+ order === 'asc' ? a.offset - b.offset : b.offset - a.offset,
307
+ );
308
+ return results;
309
+ }
310
+
311
+ queryStream(opts: QueryStreamOptions): Readable {
312
+ const historySize = opts.historySize ?? 1000;
313
+ const order = opts.order ?? 'asc';
314
+ const listenerKey = opts.key ?? '*';
315
+
316
+ let removeLive: (() => void) | null = null;
317
+
318
+ const stream = new Readable({ objectMode: false, read() {} });
319
+
320
+ const push = (record: LogRecord): void => {
321
+ if (stream.destroyed) return;
322
+ if (opts.fromTime && record.timestamp < opts.fromTime) return;
323
+ if (opts.toTime && record.timestamp > opts.toTime) return;
324
+ stream.push(JSON.stringify(record) + '\n');
325
+ };
326
+
327
+ setImmediate(() => {
328
+ if (stream.destroyed) return;
329
+
330
+ const history = this.query({
331
+ topic: opts.topic,
332
+ key: opts.key,
333
+ limit: historySize,
334
+ skip: 0,
335
+ order,
336
+ fromTime: opts.fromTime,
337
+ toTime: opts.toTime,
338
+ });
339
+
340
+ for (const record of history) {
341
+ if (stream.destroyed) return;
342
+ push(record);
343
+ }
344
+
345
+ if (!stream.destroyed) {
346
+ removeLive = this.addStreamListener(opts.topic, listenerKey, push);
347
+ }
348
+ });
349
+
350
+ stream.on('close', () => removeLive?.());
351
+
352
+ return stream;
353
+ }
354
+
355
+ // ── Retention / Cleanup ───────────────────────────────────────────────────
356
+
357
+ cleanup(
358
+ topicName?: string,
359
+ policyOverride?: RetentionPolicy,
360
+ ): CleanupReport[] {
361
+ const engine = policyOverride
362
+ ? new RetentionEngine(policyOverride)
363
+ : this.retention;
364
+
365
+ if (!engine) {
366
+ console.warn('[bus] cleanup() called but no retention policy is set.');
367
+ return [];
368
+ }
369
+
370
+ const targets = topicName
371
+ ? [this.getTopic(topicName)]
372
+ : [...this.topics.values()];
373
+
374
+ const reports: CleanupReport[] = [];
375
+
376
+ for (const topic of targets) {
377
+ topic.disk.flush();
378
+
379
+ const metas = topic.disk.buildSegmentMetas();
380
+ const { toDelete, reasons } = engine.evaluate(metas);
381
+
382
+ if (toDelete.length === 0) {
383
+ reports.push({
384
+ topic: topic.name,
385
+ segmentsDeleted: 0,
386
+ recordsDropped: 0,
387
+ bytesFreed: 0,
388
+ reasons: ['nothing to delete'],
389
+ skippedConsumers: [],
390
+ });
391
+ continue;
392
+ }
393
+
394
+ const deletedBases = new Set(toDelete.map((s) => s.baseOffset));
395
+ const surviving = metas.filter((m) => !deletedBases.has(m.baseOffset));
396
+ const earliestSurviving =
397
+ surviving.length > 0
398
+ ? Math.min(...surviving.map((m) => m.baseOffset))
399
+ : topic.nextOffset;
400
+
401
+ // Advance any slow consumers past the gap — "delete anyway" behaviour
402
+ const skippedConsumers: CleanupReport['skippedConsumers'] = [];
403
+ for (const consumer of topic.consumers.values()) {
404
+ if (consumer.offset < earliestSurviving) {
405
+ console.warn(
406
+ `[bus][${topic.name}] consumer "${consumer.groupId}" at offset ${consumer.offset} ` +
407
+ `skipped to ${earliestSurviving} (segment deleted)`,
408
+ );
409
+ skippedConsumers.push({
410
+ groupId: consumer.groupId,
411
+ skippedToOffset: earliestSurviving,
412
+ });
413
+ consumer.offset = earliestSurviving;
414
+ }
415
+ }
416
+
417
+ // Delete segments and purge memory index
418
+ let bytesFreed = 0;
419
+ let recordsDropped = 0;
420
+ for (const seg of toDelete) {
421
+ bytesFreed += topic.disk.deleteSegment(seg.baseOffset);
422
+ recordsDropped += seg.recordCount;
423
+ }
424
+
425
+ topic.index.purgeBelow(earliestSurviving);
426
+ this.saveOffsets();
427
+
428
+ const report: CleanupReport = {
429
+ topic: topic.name,
430
+ segmentsDeleted: toDelete.length,
431
+ recordsDropped,
432
+ bytesFreed,
433
+ reasons,
434
+ skippedConsumers,
435
+ };
436
+
437
+ console.log(
438
+ `[bus] cleanup [${topic.name}]: ${toDelete.length} segments deleted, ` +
439
+ `${(bytesFreed / 1e6).toFixed(1)} MB freed, ${recordsDropped} records dropped`,
440
+ );
441
+
442
+ reports.push(report);
443
+ }
444
+
445
+ return reports;
446
+ }
447
+
448
+ // ── Stats ─────────────────────────────────────────────────────────────────
449
+ stats(topicName: string): TopicStats {
450
+ const t = this.getTopic(topicName);
451
+ return {
452
+ topic: topicName,
453
+ totalRecords: t.index.totalEntries,
454
+ latestOffset: t.nextOffset - 1,
455
+ consumers: [...t.consumers.keys()],
456
+ indexedKeys: t.index.totalKeys,
457
+ memoryRecords: t.memory.size,
458
+ writesPerSec: t.writesPerSec,
459
+ diskSegments: t.disk.segmentCount,
460
+ diskSizeBytes: t.disk.totalDiskBytes,
461
+ };
462
+ }
463
+
464
+ close(): void {
465
+ if (this.cleanupTimer) clearInterval(this.cleanupTimer);
466
+ this.saveOffsets();
467
+ for (const topic of this.topics.values()) topic.disk.close();
468
+ }
469
+ }
@@ -0,0 +1,87 @@
1
+ export interface LogRecord<T = Record<string, unknown>> {
2
+ offset: number;
3
+ topic: string;
4
+ key: string;
5
+ timestamp: number;
6
+ data: T;
7
+ }
8
+
9
+ export interface ProduceOptions {
10
+ topic: string;
11
+ key: string;
12
+ data: Record<string, unknown>;
13
+ }
14
+
15
+ export type MessageHandler<T = Record<string, unknown>> = (
16
+ record: LogRecord<T>,
17
+ ) => void | Promise<void>;
18
+
19
+ export interface ConsumerOptions {
20
+ topic: string;
21
+ groupId: string;
22
+ fromBeginning?: boolean;
23
+ }
24
+
25
+ export interface QueryOptions {
26
+ topic: string;
27
+ key?: string;
28
+ fromTime?: number;
29
+ toTime?: number;
30
+ limit?: number;
31
+ skip?: number;
32
+ order?: 'asc' | 'desc';
33
+ }
34
+
35
+ export interface RetentionPolicy {
36
+ maxAgeDays?: number;
37
+ maxAgeHours?: number;
38
+ maxSizeGB?: number;
39
+ maxSizeMB?: number;
40
+ maxRecords?: number;
41
+ }
42
+
43
+ export interface BusOptions {
44
+ retention?: RetentionPolicy;
45
+ cleanupIntervalMinutes?: number;
46
+ }
47
+
48
+ export interface TopicStats {
49
+ topic: string;
50
+ totalRecords: number;
51
+ latestOffset: number;
52
+ consumers: string[];
53
+ indexedKeys: number;
54
+ memoryRecords: number;
55
+ writesPerSec: number;
56
+ diskSegments: number;
57
+ diskSizeBytes: number;
58
+ }
59
+
60
+ export interface CleanupReport {
61
+ topic: string;
62
+ segmentsDeleted: number;
63
+ recordsDropped: number;
64
+ bytesFreed: number;
65
+ reasons: string[];
66
+ skippedConsumers: { groupId: string; skippedToOffset: number }[];
67
+ }
68
+
69
+ export interface QueryStreamOptions {
70
+ topic: string;
71
+ key?: string;
72
+ historySize?: number;
73
+ order?: 'asc' | 'desc';
74
+ fromTime?: number;
75
+ toTime?: number;
76
+ }
77
+
78
+ export interface SegmentMeta {
79
+ filePath: string;
80
+ baseOffset: number;
81
+ lastOffset: number;
82
+ sizeBytes: number;
83
+ oldestTimestamp: number;
84
+ newestTimestamp: number;
85
+ recordCount: number;
86
+ isActive: boolean;
87
+ }
File without changes
@@ -0,0 +1,100 @@
1
+ // ─────────────────────────────────────────────────────────────────────────────
2
+ // RETENTION ENGINE
3
+ // ─────────────────────────────────────────────────────────────────────────────
4
+
5
+ import { RetentionPolicy, SegmentMeta } from '../interface/interface';
6
+
7
+ export default class RetentionEngine {
8
+ constructor(private readonly policy: RetentionPolicy) {}
9
+
10
+ evaluate(metas: SegmentMeta[]): {
11
+ toDelete: SegmentMeta[];
12
+ reasons: string[];
13
+ } {
14
+ const candidates = metas.filter((m) => !m.isActive);
15
+ const deleteSet = new Set<number>();
16
+ const reasons: string[] = [];
17
+
18
+ // ── Rule 1: Time-based ────────────────────────────────────────────────
19
+ const maxAgeMs =
20
+ this.policy.maxAgeDays != null
21
+ ? this.policy.maxAgeDays * 86_400_000
22
+ : this.policy.maxAgeHours != null
23
+ ? this.policy.maxAgeHours * 3_600_000
24
+ : null;
25
+
26
+ if (maxAgeMs !== null) {
27
+ const cutoff = Date.now() - maxAgeMs;
28
+ for (const seg of candidates) {
29
+ if (seg.newestTimestamp > 0 && seg.newestTimestamp < cutoff) {
30
+ deleteSet.add(seg.baseOffset);
31
+ const ageDays = (
32
+ (Date.now() - seg.newestTimestamp) /
33
+ 86_400_000
34
+ ).toFixed(1);
35
+ reasons.push(
36
+ `[time] segment ${seg.baseOffset}: newest record is ${ageDays} days old (limit: ${
37
+ this.policy.maxAgeDays ??
38
+ (this.policy.maxAgeHours! / 24).toFixed(1)
39
+ } days)`,
40
+ );
41
+ }
42
+ }
43
+ }
44
+
45
+ // ── Rule 2: Size-based ────────────────────────────────────────────────
46
+ const maxBytes =
47
+ this.policy.maxSizeGB != null
48
+ ? this.policy.maxSizeGB * 1_073_741_824
49
+ : this.policy.maxSizeMB != null
50
+ ? this.policy.maxSizeMB * 1_048_576
51
+ : null;
52
+
53
+ if (maxBytes !== null) {
54
+ let totalBytes = metas.reduce((s, m) => s + m.sizeBytes, 0);
55
+ const sorted = [...candidates].sort(
56
+ (a, b) => a.baseOffset - b.baseOffset,
57
+ );
58
+ for (const seg of sorted) {
59
+ if (totalBytes <= maxBytes) break;
60
+ if (!deleteSet.has(seg.baseOffset)) {
61
+ deleteSet.add(seg.baseOffset);
62
+ reasons.push(
63
+ `[size] segment ${seg.baseOffset}: total ${(totalBytes / 1e9).toFixed(2)} GB ` +
64
+ `> limit ${(maxBytes / 1e9).toFixed(2)} GB — freeing ${(seg.sizeBytes / 1e6).toFixed(1)} MB`,
65
+ );
66
+ }
67
+ totalBytes -= seg.sizeBytes;
68
+ }
69
+ }
70
+
71
+ // ── Rule 3: Record count ──────────────────────────────────────────────
72
+ if (this.policy.maxRecords != null) {
73
+ const total = metas.reduce((s, m) => s + m.recordCount, 0);
74
+ if (total > this.policy.maxRecords) {
75
+ let excess = total - this.policy.maxRecords;
76
+ const sorted = [...candidates].sort(
77
+ (a, b) => a.baseOffset - b.baseOffset,
78
+ );
79
+ for (const seg of sorted) {
80
+ if (excess <= 0) break;
81
+ if (!deleteSet.has(seg.baseOffset)) {
82
+ deleteSet.add(seg.baseOffset);
83
+ reasons.push(
84
+ `[records] segment ${seg.baseOffset}: ${total.toLocaleString()} records ` +
85
+ `> limit ${this.policy.maxRecords.toLocaleString()} — dropping ${seg.recordCount} records`,
86
+ );
87
+ }
88
+ excess -= seg.recordCount;
89
+ }
90
+ }
91
+ }
92
+
93
+ return {
94
+ toDelete: candidates
95
+ .filter((m) => deleteSet.has(m.baseOffset))
96
+ .sort((a, b) => a.baseOffset - b.baseOffset),
97
+ reasons,
98
+ };
99
+ }
100
+ }
package/src/server.ts ADDED
@@ -0,0 +1 @@
1
+ console.log('Server Running');
@@ -0,0 +1,39 @@
1
+ import { LogRecord } from '../interface/interface';
2
+
3
+ export default class BufferMemory<T = Record<string, unknown>> {
4
+ private buffer: Array<LogRecord<T> | undefined>;
5
+ private head = 0;
6
+ private _size = 0;
7
+
8
+ constructor(private readonly capacity: number) {
9
+ this.buffer = new Array(capacity);
10
+ }
11
+
12
+ push(record: LogRecord<T>): void {
13
+ this.buffer[this.head] = record;
14
+ this.head = (this.head + 1) % this.capacity;
15
+ if (this._size < this.capacity) this._size++;
16
+ }
17
+
18
+ *[Symbol.iterator](): Iterator<LogRecord<T>> {
19
+ if (this._size === 0) return;
20
+ const start = this._size < this.capacity ? 0 : this.head;
21
+ for (let i = 0; i < this._size; i++) {
22
+ const rec = this.buffer[(start + i) % this.capacity];
23
+ if (rec !== undefined) yield rec;
24
+ }
25
+ }
26
+
27
+ *from(fromOffset: number): Generator<LogRecord<T>> {
28
+ for (const rec of this) if (rec.offset >= fromOffset) yield rec;
29
+ }
30
+
31
+ get size(): number {
32
+ return this._size;
33
+ }
34
+
35
+ get minOffset(): number {
36
+ for (const r of this) return r.offset;
37
+ return -1;
38
+ }
39
+ }