@transitive-sdk/clickhouse 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -1,19 +1,44 @@
1
+ const _ = require('lodash');
1
2
  const { createClient } = require('@clickhouse/client');
3
+ const { topicToPath } = require('@transitive-sdk/datacache');
4
+
5
+ // Default TTL in days for mqtt_history table
6
+ const DEFAULT_TTL_DAYS = 30;
7
+
8
+ // Shared multi-tenant schema components used by createTable and ensureMqttHistoryTable
9
+ const MULTI_TENANT_SCHEMA = {
10
+ // Column definitions for OrgId and DeviceId
11
+ columns: [
12
+ 'OrgId LowCardinality(String) CODEC(ZSTD(1))',
13
+ 'DeviceId LowCardinality(String) CODEC(ZSTD(1))'
14
+ ],
15
+ // Bloom filter indexes for efficient filtering
16
+ indexes: [
17
+ 'INDEX idx_orgid (OrgId) TYPE bloom_filter(0.01) GRANULARITY 1',
18
+ 'INDEX idx_deviceid (DeviceId) TYPE bloom_filter(0.01) GRANULARITY 1'
19
+ ]
20
+ };
2
21
 
3
22
  /** Singleton ClickHouse client wrapper with multi-tenant table support */
4
23
  class ClickHouse {
5
- init({url, dbName, user, password} = {}) {
24
+
25
+ _client = null;
26
+ mqttHistoryTable = null; /// name of the table used for MQTT history, if used
27
+
28
+ /** Create the client, connecting to Clickhouse */
29
+ init({ url, dbName, user, password } = {}) {
6
30
  const _url = url || process.env.CLICKHOUSE_URL || 'http://clickhouse:8123';
7
31
  const _dbName = dbName || process.env.CLICKHOUSE_DB || 'default';
8
32
  const _user = user || process.env.CLICKHOUSE_USER || 'default';
9
- const _password = password || process.env.CLICKHOUSE_PASSWORD || '';
10
- console.debug(`Creating ClickHouse client for URL: ${_url}, DB: ${_dbName}, User: ${_user}`);
33
+
34
+ // console.debug(`Creating ClickHouse client for URL: ${_url}, DB: ${_dbName}, User: ${_user}`);
35
+
11
36
  this._client = createClient({
12
37
  url: _url,
13
38
  max_open_connections: 10,
14
39
  database: _dbName,
15
40
  username: _user,
16
- password: _password,
41
+ password: password || process.env.CLICKHOUSE_PASSWORD || '',
17
42
  clickhouse_settings: {
18
43
  // https://clickhouse.com/docs/en/operations/settings/settings#async-insert
19
44
  async_insert: 1,
@@ -25,10 +50,14 @@ class ClickHouse {
25
50
  async_insert_busy_timeout_ms: 1000,
26
51
  // Allows to insert serialized JS Dates (such as '2023-12-06T10:54:48.000Z')
27
52
  date_time_input_format: 'best_effort',
53
+ // Include MATERILIZED columns in query results
54
+ // https://clickhouse.com/docs/operations/settings/settings#asterisk_include_materialized_columns
55
+ // asterisk_include_materialized_columns: 1
28
56
  },
29
57
  });
30
58
  }
31
59
 
60
+ /** Get the Clickhouse client (from @clickhouse/client) */
32
61
  get client() {
33
62
  if (this._client == undefined) {
34
63
  console.warn('Cannot access ClickHouse client before init() is called');
@@ -37,9 +66,8 @@ class ClickHouse {
37
66
  return this._client;
38
67
  }
39
68
 
40
- /** Create a table if it does not already exist
41
- * adding OrgId and DeviceId columns to the schema
42
- * for multi-tenancy support.
69
+ /** Create a table if it does not already exist adding OrgId and DeviceId
70
+ * columns to the schema for multi-tenancy support.
43
71
  * @param {string} tableName - name of the table to create
44
72
  * @param {Array<string>} columns - array of column definitions and indexes, e.g. ['Timestamp DateTime CODEC(ZSTD(1))', 'Value Float32 CODEC(ZSTD(1))']
45
73
  * @param {Array<string>} settings - array of table settings, e.g. ['ENGINE = MergeTree()', 'ORDER BY (Timestamp)']
@@ -47,10 +75,8 @@ class ClickHouse {
47
75
  async createTable(tableName, columns, settings = []) {
48
76
  const fullSchema = [
49
77
  ...columns,
50
- 'OrgId String CODEC(ZSTD(1))',
51
- 'DeviceId String CODEC(ZSTD(1))',
52
- 'INDEX idx_orgid (OrgId) TYPE bloom_filter(0.01) GRANULARITY 1',
53
- 'INDEX idx_deviceid (DeviceId) TYPE bloom_filter(0.01) GRANULARITY 1'
78
+ ...MULTI_TENANT_SCHEMA.columns,
79
+ ...MULTI_TENANT_SCHEMA.indexes
54
80
  ];
55
81
  const query = `CREATE TABLE IF NOT EXISTS ${tableName} (${fullSchema.join(', ')}) ${settings.join(' ')}`;
56
82
 
@@ -86,13 +112,199 @@ class ClickHouse {
86
112
  OrgId: orgId,
87
113
  DeviceId: deviceId
88
114
  }));
115
+
89
116
  return await this.client.insert({
90
117
  table: tableName,
91
118
  values: rowsWithIds,
92
119
  format: 'JSONEachRow'
93
120
  });
94
121
  }
122
+
123
+ /** Update the TTL for the mqtt_history table
124
+ * @param {number} ttlDays - TTL in days
125
+ */
126
+ async updateMqttHistoryTTL(ttlDays) {
127
+ // console.log(`updating ttl to ${ttlDays}`);
128
+ await this.client.command({
129
+ query: `ALTER TABLE ${this.mqttHistoryTable} MODIFY TTL toDateTime(Timestamp) + toIntervalDay(${ttlDays})`,
130
+ clickhouse_settings: {
131
+ wait_end_of_query: 1,
132
+ }
133
+ });
134
+ }
135
+
136
+ /** Ensure the mqtt_history table exists with the correct schema
137
+ * @param {number} ttlDays - TTL in days (default: 30)
138
+ */
139
+ async ensureMqttHistoryTable(tableName = 'mqtt_history', ttlDays = DEFAULT_TTL_DAYS) {
140
+ if (this.mqttHistoryTable != tableName) {
141
+ console.warn(`creating or altering mqtt history table ${tableName}`);
142
+ }
143
+
144
+ const ttlExpression = `TTL toDateTime(Timestamp) + toIntervalDay(${ttlDays})`;
145
+
146
+ // Check if table already exists before creating
147
+ const tableExists = await this.client.query({
148
+ query: `SELECT name, create_table_query FROM system.tables WHERE name = '${this.mqttHistoryTable}' AND database = currentDatabase()`,
149
+ format: 'JSONEachRow'
150
+ });
151
+ const tables = await tableExists.json();
152
+
153
+ if (tables.length > 0) {
154
+ // table already exists, verify TTL
155
+ const originalCreateQuery = tables[0].create_table_query;
156
+
157
+ // Update table if it differs
158
+ if (!originalCreateQuery.includes(ttlExpression)) {
159
+ await this.updateMqttHistoryTTL(ttlDays);
160
+ }
161
+
162
+ } else {
163
+ // Create the table
164
+
165
+ const columns = [
166
+ // High-precision event time; Delta + ZSTD is a common combo for time-series
167
+ 'Timestamp DateTime64(6) CODEC(Delta, ZSTD(1))',
168
+ // Raw MQTT topic split into parts; kept as Array(String) for flexibility
169
+ 'TopicParts Array(String) CODEC(ZSTD(1))',
170
+ // Org/device fields materialized from TopicParts (always computed, not overridable)
171
+ 'OrgId LowCardinality(String) MATERIALIZED TopicParts[1] CODEC(ZSTD(1))',
172
+ 'DeviceId LowCardinality(String) MATERIALIZED TopicParts[2] CODEC(ZSTD(1))',
173
+ 'Scope LowCardinality(String) MATERIALIZED TopicParts[3] CODEC(ZSTD(1))',
174
+ 'CapabilityName LowCardinality(String) MATERIALIZED TopicParts[4] CODEC(ZSTD(1))',
175
+ 'CapabilityVersion LowCardinality(String) MATERIALIZED TopicParts[5] CODEC(ZSTD(1))',
176
+ // Remaining topic segments stored as an array for less-structured suffixes
177
+ 'SubTopic Array(String) MATERIALIZED arraySlice(TopicParts, 6) CODEC(ZSTD(1))',
178
+ // Payload stored as a String, compressed with ZSTD(1). This allows us to
179
+ // store atomic values (still stringified) as opposed to only JSON objects,
180
+ // as the JSON type would require.
181
+ 'Payload String CODEC(ZSTD(1))',
182
+ // Bloom filter indexes (shared multi-tenant indexes)
183
+ ...MULTI_TENANT_SCHEMA.indexes,
184
+ 'INDEX idx_scope (Scope) TYPE bloom_filter(0.01) GRANULARITY 1',
185
+ 'INDEX idx_capability (CapabilityName) TYPE bloom_filter(0.01) GRANULARITY 1'
186
+ ];
187
+
188
+ const query = `CREATE TABLE IF NOT EXISTS ${tableName} (${columns.join(', ')})
189
+ ENGINE = MergeTree()
190
+ PARTITION BY toYYYYMMDD(Timestamp)
191
+ ORDER BY (OrgId, toUnixTimestamp64Micro(Timestamp), TopicParts)
192
+ ${ttlExpression}
193
+ SETTINGS
194
+ index_granularity = 8192,
195
+ ttl_only_drop_parts = 1`;
196
+ // Note: PRIMARY KEY is not needed because we want it to be the same as
197
+ // ORDER BY, which is what ClickHouse does automatically.
198
+
199
+ await this.client.command({
200
+ query,
201
+ clickhouse_settings: {
202
+ wait_end_of_query: 1,
203
+ }
204
+ });
205
+ }
206
+
207
+ this.mqttHistoryTable = tableName;
208
+ }
209
+
210
+ /** Register an MQTT topic for storage in ClickHouse subscribes to the topic
211
+ * and stores incoming messages JSON.stringify'd in a ClickHouse table.
212
+ * Retrieve using `queryMQTTHistory`, or, when quering directly, e.g., from
213
+ * Grafana, use the ClickHouse built-in functionality for parsing JSON, e.g.,
214
+ * after inserting `{ x: 1 }` use
215
+ * `select JSON_VALUE(Payload, '$.x') AS x FROM default.mqtt_history`.
216
+ * NOTE: `ensureMqttHistoryTable` must be called before registering topics
217
+ * @param {Object} dataCache - DataCache instance to use for subscribing
218
+ * @param {string} topic - MQTT topic to register
219
+ */
220
+ registerMqttTopicForStorage(dataCache, topic) {
221
+ if (!this.mqttHistoryTable) {
222
+ throw new Error('ensureMqttHistoryTable must be called before registerMqttTopicForStorage');
223
+ }
224
+
225
+ // Subscribe to the topic
226
+ dataCache.subscribePath(topic, async (value, topicString) => {
227
+ const row = {
228
+ Timestamp: new Date(),
229
+ TopicParts: topicToPath(topicString), // topic as array
230
+ };
231
+
232
+ if (value !== null && value !== undefined) {
233
+ row.Payload = JSON.stringify(value);
234
+ } // else: omit
235
+
236
+ try {
237
+ await this.client.insert({
238
+ table: this.mqttHistoryTable,
239
+ values: [row],
240
+ format: 'JSONEachRow'
241
+ });
242
+ } catch (error) {
243
+ console.error('Error inserting MQTT message into ClickHouse:', error.message);
244
+ }
245
+ });
246
+ }
247
+
248
+ /** Query historic MQTT payloads based on topic selector (with the usual
249
+ * wildcards), as well as a time range. Does the inverse transform of the
250
+ * payload of registerMqttTopicForStorage. */
251
+ async queryMQTTHistory(options = {}) {
252
+
253
+ const {
254
+ topicSelector,
255
+ since = undefined,
256
+ until = undefined,
257
+ orderBy = 'Timestamp ASC',
258
+ limit = 1000
259
+ } = options;
260
+
261
+ const [OrgId, DeviceId, Scope, CapabilityName, CapabilityVersion, ...subPath]
262
+ = topicToPath(topicSelector);
263
+ // store as objects so we can refer to them by column name
264
+ const fields = { OrgId, DeviceId, Scope, CapabilityName, CapabilityVersion };
265
+
266
+ const selectors = ['Payload', 'TopicParts', 'Timestamp', 'SubTopic'];
267
+ const where = [];
268
+
269
+ // interpret wildcards
270
+ _.forEach(fields, (value, field) => {
271
+ if (value.startsWith('+')) {
272
+ // it's a wild card, add to selectors
273
+ selectors.push(field);
274
+ } else {
275
+ // it's a constant, filter by it
276
+ where.push(`${field} = '${value}'`);
277
+ }
278
+ });
279
+
280
+ // special WHERE conditions for SubPath (if given)
281
+ subPath?.forEach((value, i) =>
282
+ !value.startsWith('+') && where.push(`SubTopic[${i}] = '${value}'`));
283
+
284
+ since && where.push(`Timestamp >= fromUnixTimestamp64Milli(${since.getTime()})`);
285
+ until && where.push(`Timestamp <= fromUnixTimestamp64Milli(${until.getTime()})`);
286
+
287
+ const whereStatement = where.length > 0
288
+ ? `WHERE ${where.join(' AND ')}`
289
+ : '';
290
+
291
+ const result = await this.client.query({
292
+ query: `SELECT ${selectors.join(',')} FROM ${this.mqttHistoryTable} ${
293
+ whereStatement} ORDER BY ${orderBy} ${limit ? ` LIMIT ${limit}` : ''}`,
294
+ format: 'JSONEachRow'
295
+ });
296
+
297
+ const rows = await result.json();
298
+
299
+ // map payloads back from JSON; this is the inverse of what we do in
300
+ // registerMqttTopicForStorage
301
+ return rows.map(row => {
302
+ row.Payload = row.Payload ? JSON.parse(row.Payload) : null;
303
+ row.Timestamp = new Date(row.Timestamp);
304
+ return row;
305
+ });
306
+ }
95
307
  }
96
308
 
97
309
  const instance = new ClickHouse();
98
- module.exports = instance;
310
+ module.exports = instance;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@transitive-sdk/clickhouse",
3
- "version": "0.1.1",
3
+ "version": "0.2.0",
4
4
  "description": "A tiny ClickHouse utility class for use in the Transitive framework.",
5
5
  "homepage": "https://transitiverobotics.com",
6
6
  "repository": {
@@ -20,9 +20,15 @@
20
20
  "access": "public"
21
21
  },
22
22
  "main": "index.js",
23
- "scripts": {},
23
+ "scripts": {
24
+ "test": "mocha -w test/*.test.js -b"
25
+ },
24
26
  "dependencies": {
25
- "@clickhouse/client": "^1.12.1"
27
+ "@clickhouse/client": "^1.12.1",
28
+ "@transitive-sdk/datacache": "^0.14.1"
26
29
  },
27
- "devDependencies": {}
30
+ "devDependencies": {
31
+ "dotenv": "^17.2.3",
32
+ "mocha": "^11.7.5"
33
+ }
28
34
  }
@@ -0,0 +1,396 @@
1
+ const assert = require('assert');
2
+ const { EventEmitter, once } = require('node:events');
3
+ const dotenv = require('dotenv');
4
+ const { DataCache } = require('@transitive-sdk/datacache');
5
+ const { wait } = require('../../index');
6
+
7
+ const clickhouse = require('../index');
8
+
9
+ dotenv.config({path: '~transitive/.env'});
10
+ const CLICKHOUSE_URL = 'http://clickhouse.localhost';
11
+ const STANDARD_TOPIC_PATTERN = '/+org/+device/+scope/+cap/+version/#';
12
+
13
+ const TABLE_NAME = 'mqtt_history_tests';
14
+
15
+ /** Wrap client.insert in an event emitter so we can get notified of insert
16
+ * events. */
17
+ const interceptInserts = () => {
18
+ const emitter = new EventEmitter();
19
+
20
+ const originalInsert = clickhouse.client.insert.bind(clickhouse.client);
21
+ clickhouse.client.insert = async (...args) => {
22
+ const result = await originalInsert(...args);
23
+ emitter.emit('insert');
24
+ return result;
25
+ };
26
+
27
+ return emitter;
28
+ }
29
+
30
+
31
+ /** Query mqtt_history rows for a given org */
32
+ const queryRowsByOrg = async (org, options = {}) =>
33
+ await clickhouse.queryMQTTHistory({ topicSelector: `/${org}/+/+/+/+/+` });
34
+
35
+ /** Generate unique org ID for test isolation */
36
+ const testOrg = (suffix) => `clickhouse_test_${suffix}_${Date.now()}`;
37
+
38
+
39
+ describe('ClickHouse', function() {
40
+ this.timeout(10000);
41
+
42
+ let emitter;
43
+
44
+ before(async () => {
45
+ clickhouse.init({ url: CLICKHOUSE_URL });
46
+ /* Register for `insert` events on ClickHouse client */
47
+ emitter = interceptInserts();
48
+
49
+ await clickhouse.client.command({
50
+ query: `DROP TABLE IF EXISTS ${TABLE_NAME}`,
51
+ clickhouse_settings: { wait_end_of_query: 1 }
52
+ });
53
+ });
54
+
55
+ describe('ensureMqttHistoryTable', () => {
56
+ it('should create the mqtt_history table', async () => {
57
+ await clickhouse.ensureMqttHistoryTable(TABLE_NAME, 31);
58
+
59
+ const result = await clickhouse.client.query({
60
+ query: `SELECT name FROM system.tables WHERE name = '${TABLE_NAME}'`,
61
+ format: 'JSONEachRow'
62
+ });
63
+ const tables = await result.json();
64
+
65
+ assert(tables.length > 0, 'mqtt_history table should exist');
66
+ });
67
+ });
68
+
69
+ describe('registerMqttTopicForStorage', () => {
70
+ before(async () => {
71
+ await clickhouse.ensureMqttHistoryTable(TABLE_NAME, 32);
72
+ });
73
+
74
+ it('should insert MQTT messages into ClickHouse', async () => {
75
+ const dataCache = new DataCache({});
76
+ const org = testOrg('insert');
77
+
78
+ clickhouse.registerMqttTopicForStorage(dataCache, STANDARD_TOPIC_PATTERN);
79
+ dataCache.update([org, 'device1', '@myscope', 'test-cap', '1.0.0', 'data'], 42.5);
80
+ await once(emitter, 'insert');
81
+
82
+ const [row] = await queryRowsByOrg(org, { limit: 1 });
83
+ assert(!!row);
84
+ assert.strictEqual(row.DeviceId, 'device1');
85
+ assert.strictEqual(row.Scope, '@myscope');
86
+ assert.strictEqual(row.CapabilityName, 'test-cap');
87
+ assert.strictEqual(row.CapabilityVersion, '1.0.0');
88
+ assert.deepStrictEqual(row.SubTopic, ['data']);
89
+ assert.strictEqual(row.Payload, 42.5);
90
+ });
91
+
92
+ it('should store string payloads as-is', async () => {
93
+ const dataCache = new DataCache({});
94
+ const org = testOrg('string');
95
+
96
+ clickhouse.registerMqttTopicForStorage(dataCache, STANDARD_TOPIC_PATTERN);
97
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'msg'], 'hello world');
98
+ await once(emitter, 'insert');
99
+
100
+ const [row] = await queryRowsByOrg(org, { limit: 1 });
101
+
102
+ assert.strictEqual(row.Payload, 'hello world');
103
+ });
104
+
105
+ it('should store null values as NULL (omitted)', async () => {
106
+ const dataCache = new DataCache({});
107
+ const org = testOrg('null');
108
+ // const done = interceptInserts(2);
109
+
110
+ clickhouse.registerMqttTopicForStorage(dataCache, '/+org/+device/#');
111
+ dataCache.update([org, 'device1', 'data'], 'initial');
112
+ // Small delay to ensure timestamp ordering
113
+ await new Promise(resolve => setTimeout(resolve, 10));
114
+ dataCache.update([org, 'device1', 'data'], null);
115
+ await once(emitter, 'insert');
116
+ await once(emitter, 'insert');
117
+
118
+ const rows = await queryRowsByOrg(org);
119
+
120
+ assert.strictEqual(rows.length, 2);
121
+ assert.strictEqual(rows[0].Payload, 'initial');
122
+ assert.strictEqual(rows[1].Payload, null);
123
+ });
124
+
125
+ it('should store object payloads as JSON', async () => {
126
+ const dataCache = new DataCache({});
127
+ const org = testOrg('object');
128
+ const payload = { sensor: 'temp', value: 25.5, nested: { a: 1 } };
129
+
130
+ clickhouse.registerMqttTopicForStorage(dataCache, STANDARD_TOPIC_PATTERN);
131
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'readings'], payload);
132
+ await once(emitter, 'insert');
133
+
134
+ const [row] = await queryRowsByOrg(org, { limit: 1 });
135
+
136
+ assert.deepStrictEqual(row.Payload, payload);
137
+ assert.deepStrictEqual(row.SubTopic, ['readings']);
138
+ });
139
+
140
+ it('should parse nested subtopics correctly', async () => {
141
+ const dataCache = new DataCache({});
142
+ const org = testOrg('subtopic');
143
+
144
+ clickhouse.registerMqttTopicForStorage(dataCache, STANDARD_TOPIC_PATTERN);
145
+ dataCache.update([org, 'device1', '@myscope', 'cap', '2.0.0', 'level1', 'level2'], 'value');
146
+ await once(emitter, 'insert');
147
+
148
+ const [row] = await queryRowsByOrg(org, { limit: 1 });
149
+
150
+ assert.strictEqual(row.Scope, '@myscope');
151
+ assert.strictEqual(row.CapabilityVersion, '2.0.0');
152
+ assert.deepStrictEqual(row.SubTopic, ['level1', 'level2']);
153
+ });
154
+
155
+ it('should handle multiple updates to different subtopics', async () => {
156
+ const dataCache = new DataCache({});
157
+ const org = testOrg('multi');
158
+
159
+ clickhouse.registerMqttTopicForStorage(dataCache, STANDARD_TOPIC_PATTERN);
160
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'battery'], 85);
161
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'temperature'], 42);
162
+ await once(emitter, 'insert');
163
+
164
+ const rows = await queryRowsByOrg(org);
165
+
166
+ assert.strictEqual(rows.length, 2);
167
+ const subtopics = rows.map(r => r.SubTopic[0]).sort();
168
+ const payloads = Object.fromEntries(rows.map(r => [r.SubTopic[0], r.Payload]));
169
+ assert.deepStrictEqual(subtopics, ['battery', 'temperature']);
170
+ assert.strictEqual(payloads['battery'], 85);
171
+ assert.strictEqual(payloads['temperature'], 42);
172
+ });
173
+
174
+ it('should work with unnamed wildcards', async () => {
175
+ const dataCache = new DataCache({});
176
+ const org = testOrg('unnamed');
177
+
178
+ clickhouse.registerMqttTopicForStorage(dataCache, '/+/+/+/+/+/#');
179
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'data'], { x: 1 });
180
+ await once(emitter, 'insert');
181
+
182
+ const [row] = await queryRowsByOrg(org, { limit: 1 });
183
+
184
+ assert.strictEqual(row.DeviceId, 'device1');
185
+ assert.deepStrictEqual(row.Payload, { x: 1 });
186
+ });
187
+ });
188
+
189
+
190
+ describe('queryMQTTHistory', () => {
191
+
192
+ const dataCache = new DataCache({});
193
+ const org = testOrg('query');
194
+
195
+ before(async () => {
196
+ await clickhouse.ensureMqttHistoryTable(TABLE_NAME, 33);
197
+
198
+ // clear
199
+ await clickhouse.client.command({
200
+ query: `TRUNCATE TABLE ${TABLE_NAME}`,
201
+ clickhouse_settings: { wait_end_of_query: 1 }
202
+ });
203
+
204
+ clickhouse.registerMqttTopicForStorage(dataCache, '#');
205
+ dataCache.update([org, 'device1', '@myscope', 'nullcap', '1.0.0', 'willBeNull'], 1234);
206
+ dataCache.update([org, 'device1', '@myscope', 'capdata', '1.0.0', 'data'], { x: 1 });
207
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'data2'], { y: 2 });
208
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0',
209
+ 'sub1', 'sub2', 'sub3.1'],
210
+ { isSub: 3.1, data: {string: 'some string'} });
211
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0',
212
+ 'sub1', 'sub2', 'sub3.2'],
213
+ { isSub: 3.3, data: {aNumber: 1234} });
214
+ await once(emitter, 'insert');
215
+ await wait(100);
216
+ // another value, after a delay
217
+ dataCache.update([org, 'device1', '@myscope', 'cap', '1.0.0', 'data'], { x: 2 });
218
+ dataCache.update([org, 'device1', '@myscope', 'nullcap', '1.0.0', 'willBeNull'], null);
219
+
220
+ await once(emitter, 'insert');
221
+ });
222
+
223
+ it('queries with wild cards', async () => {
224
+ const rows = await clickhouse.queryMQTTHistory({
225
+ topicSelector: `/${org}/+/+/+/+/+` });
226
+ assert(rows.length > 0);
227
+ });
228
+
229
+ it('queries with multiple selectors', async () => {
230
+ const [row] = await clickhouse.queryMQTTHistory({
231
+ topicSelector: `/${org}/+/+/capdata/+/+` });
232
+ assert.strictEqual(row.DeviceId, 'device1');
233
+ assert.deepEqual(row.SubTopic, ['data']);
234
+ assert.deepStrictEqual(row.Payload, { x: 1 });
235
+ });
236
+
237
+
238
+ it('queries based on sub-topic selectors', async () => {
239
+ const [row] = await clickhouse.queryMQTTHistory({
240
+ topicSelector: `/${org}/+/+/+/+/+/data2` });
241
+ assert.strictEqual(row.DeviceId, 'device1');
242
+ assert.deepStrictEqual(row.Payload, { y: 2 });
243
+ });
244
+
245
+ it('queries based on sub-topic selectors with wildcards', async () => {
246
+ const [row] = await clickhouse.queryMQTTHistory({
247
+ topicSelector: `/${org}/+/+/+/+/+/+/sub2/+` });
248
+ assert.deepStrictEqual(row.SubTopic[2], 'sub3.1');
249
+ });
250
+
251
+ it('queries based on multiple sub-topic selectors with wildcards', async () => {
252
+ const rows = await clickhouse.queryMQTTHistory({
253
+ topicSelector: `/${org}/+/+/+/+/+/sub1/+/+` });
254
+ assert.strictEqual(rows[0].SubTopic.length, 3);
255
+ assert.strictEqual(rows[0].SubTopic[2], 'sub3.1');
256
+ assert.strictEqual(rows[1].SubTopic[2], 'sub3.2');
257
+ });
258
+
259
+ it('returns the history', async () => {
260
+ const rows = await clickhouse.queryMQTTHistory({
261
+ topicSelector: `/${org}/+/+/+/+/+/data/+/+` });
262
+ assert.deepStrictEqual(rows.length, 2);
263
+ assert.deepStrictEqual(rows[0].Payload, {x: 1});
264
+ assert.deepStrictEqual(rows[1].Payload, {x: 2});
265
+ assert(rows[0].Timestamp < rows[1].Timestamp);
266
+ });
267
+
268
+ it('handles null values', async () => {
269
+ const rows = await clickhouse.queryMQTTHistory({
270
+ topicSelector: `/${org}/+/+/+/+/+/willBeNull` });
271
+ assert.strictEqual(rows.at(-1).Payload, null);
272
+ });
273
+ });
274
+
275
+ /** Test performance of the table (index). */
276
+ describe('performance', () => {
277
+
278
+ const ROWS = 1_000_000; // number of rows to insert (mock)
279
+ // time gap between inserted values (to stretch over several partitions):
280
+ const GAP = 1_000;
281
+ const dataCache = new DataCache({});
282
+ const now = Date.now();
283
+
284
+ before(async () => {
285
+ await clickhouse.ensureMqttHistoryTable(TABLE_NAME, 33);
286
+
287
+ // clear
288
+ await clickhouse.client.exec({
289
+ query: `TRUNCATE TABLE ${TABLE_NAME}`,
290
+ clickhouse_settings: { wait_end_of_query: 1 }
291
+ });
292
+
293
+ const rows = [];
294
+ for (let i = 0; i < ROWS; i++) {
295
+ rows.push({
296
+ Timestamp: new Date(now + i * GAP), // use current date to avoid immediate TTL cleanup
297
+ TopicParts: [`org${i % 50}`, `device${i % 1000}`, '@myscope', `cap${i % 100}`, `1.${i % 100}.0`, 'data', i],
298
+ Payload: { i },
299
+ })
300
+ }
301
+
302
+ await clickhouse.client.insert({
303
+ table: TABLE_NAME,
304
+ values: [rows],
305
+ format: 'JSONEachRow',
306
+ clickhouse_settings: { wait_end_of_query: 1 }
307
+ });
308
+
309
+ console.log(`inserted ${rows.length} rows into ${TABLE_NAME}`);
310
+ });
311
+
312
+ let start;
313
+ beforeEach(() => {
314
+ console.time('elapsed');
315
+ start = performance.now();
316
+ });
317
+
318
+ afterEach(() => {
319
+ console.timeEnd('elapsed');
320
+ });
321
+
322
+ /** Assert that no more than limit ms have passed since start of test case. */
323
+ const assertTimelimit = (limit) => {
324
+ assert(performance.now() - start < limit, `Less than ${limit} ms`);
325
+ }
326
+
327
+ it('returns the entire history in reasonable time', async () => {
328
+ const rows = await clickhouse.queryMQTTHistory({
329
+ topicSelector: `/+/+/+/+/+/+`,
330
+ limit: 2 * ROWS,
331
+ });
332
+ assert.equal(rows.length, ROWS);
333
+ assert(rows[0].Timestamp < rows[1].Timestamp);
334
+ assertTimelimit(ROWS / 100);
335
+ });
336
+
337
+ it('quickly filters by OrgId', async () => {
338
+ const rows = await clickhouse.queryMQTTHistory({
339
+ topicSelector: `/org42/+/+/+/+/+`,
340
+ limit: 2 * ROWS,
341
+ });
342
+ assert.equal(rows.length, ROWS / 50);
343
+ assertTimelimit(ROWS / 1000);
344
+ });
345
+
346
+ it('quickly filters by DeviceId', async () => {
347
+ const rows = await clickhouse.queryMQTTHistory({
348
+ topicSelector: `/+/device123/+/+/+/+/data`,
349
+ limit: 2 * ROWS,
350
+ });
351
+ assert.equal(rows.length, ROWS / 1000);
352
+ assertTimelimit(ROWS / 10000);
353
+ });
354
+
355
+ it('quickly filters by CapabilityName', async () => {
356
+ const rows = await clickhouse.queryMQTTHistory({
357
+ topicSelector: `/+/+/+/cap34/+/+/data`,
358
+ limit: 2 * ROWS,
359
+ });
360
+ assert.equal(rows.length, ROWS / 100);
361
+ assertTimelimit(ROWS / 10000);
362
+ });
363
+
364
+ it('quickly filters by time: since', async () => {
365
+ const rows = await clickhouse.queryMQTTHistory({
366
+ topicSelector: `/+/+/+/+/+/+/data`,
367
+ since: new Date(now + (ROWS - 400) * GAP),
368
+ limit: 2 * ROWS,
369
+ });
370
+ assert.equal(rows.length, 400);
371
+ assertTimelimit(ROWS / 10000);
372
+ });
373
+
374
+ it('quickly filters by time: until', async () => {
375
+ const rows = await clickhouse.queryMQTTHistory({
376
+ topicSelector: `/+/+/+/+/+/+/data`,
377
+ until: new Date(now + 400 * GAP),
378
+ limit: 2 * ROWS,
379
+ });
380
+ assert.equal(rows.length, 401);
381
+ assertTimelimit(ROWS / 10000);
382
+ });
383
+
384
+ it('quickly filters by org and time: since', async () => {
385
+ const rows = await clickhouse.queryMQTTHistory({
386
+ topicSelector: `/org23/+/+/+/+/+/data`,
387
+ since: new Date(now + (ROWS - 400) * GAP),
388
+ limit: 2 * ROWS,
389
+ });
390
+ assert.equal(rows.length, 8);
391
+ assertTimelimit(ROWS / 10000);
392
+ });
393
+
394
+ });
395
+
396
+ });