dt-common-device 3.0.0 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +47 -15
  2. package/dist/device/cloud/interface.d.ts +101 -0
  3. package/dist/device/cloud/interface.js +3 -0
  4. package/dist/device/cloud/interfaces/IDeviceConnectionService.d.ts +7 -0
  5. package/dist/device/cloud/interfaces/IDeviceConnectionService.js +3 -0
  6. package/dist/device/cloud/interfaces/IDevicesService.d.ts +9 -0
  7. package/dist/device/cloud/interfaces/IDevicesService.js +2 -0
  8. package/dist/device/cloud/services/Device.service.d.ts +39 -0
  9. package/dist/device/cloud/services/Device.service.js +9 -0
  10. package/dist/device/cloud/services/DeviceCloudService.d.ts +42 -0
  11. package/dist/device/cloud/services/DeviceCloudService.js +59 -0
  12. package/dist/device/cloud/services/DeviceHub.service.d.ts +3 -0
  13. package/dist/device/cloud/services/DeviceHub.service.js +6 -0
  14. package/dist/device/cloud/services/Hub.service.d.ts +25 -0
  15. package/dist/device/cloud/services/Hub.service.js +9 -0
  16. package/dist/device/cloud/services/SmartThingsDeviceService.d.ts +38 -0
  17. package/dist/device/cloud/services/SmartThingsDeviceService.js +52 -0
  18. package/dist/device/index.d.ts +4 -0
  19. package/dist/device/index.js +20 -0
  20. package/dist/device/local/events/EventHandler.js +6 -6
  21. package/dist/device/local/events/Events.d.ts +12 -33
  22. package/dist/device/local/events/Events.js +12 -33
  23. package/dist/device/local/interface.d.ts +0 -0
  24. package/dist/device/local/interface.js +1 -0
  25. package/dist/device/local/services/DeviceHub.service.d.ts +11 -0
  26. package/dist/device/local/services/DeviceHub.service.js +40 -0
  27. package/dist/queue/entities/HybridHttpQueue.d.ts +4 -3
  28. package/dist/queue/entities/HybridHttpQueue.js +95 -43
  29. package/dist/queue/interfaces/IHybridHttpQueue.d.ts +3 -2
  30. package/dist/queue/interfaces/IJobResult.d.ts +8 -0
  31. package/dist/queue/services/QueueService.d.ts +3 -3
  32. package/dist/queue/types/queue.types.d.ts +0 -4
  33. package/dist/queue/utils/queueUtils.js +3 -2
  34. package/dist/queue/utils/rateLimit.utils.d.ts +4 -0
  35. package/dist/queue/utils/rateLimit.utils.js +54 -1
  36. package/package.json +1 -1
  37. package/src/queue/entities/HybridHttpQueue.ts +140 -64
  38. package/src/queue/interfaces/IHybridHttpQueue.ts +3 -2
  39. package/src/queue/interfaces/IJobResult.ts +9 -0
  40. package/src/queue/services/QueueService.ts +3 -3
  41. package/src/queue/types/queue.types.ts +0 -1
  42. package/src/queue/utils/queueUtils.ts +3 -2
  43. package/src/queue/utils/rateLimit.utils.ts +74 -1
@@ -6,7 +6,13 @@ import { Service } from "typedi";
6
6
  import { RateLimitUtils } from "../utils/rateLimit.utils";
7
7
  import { JobUtils } from "../utils/jobUtils";
8
8
  import { QueueUtils } from "../utils/queueUtils";
9
- import { IRateLimitConfig, IJobResult, IHttpRequestJob } from "../interfaces";
9
+ import {
10
+ IRateLimitConfig,
11
+ IJobResult,
12
+ IHttpRequestJob,
13
+ IQueueResponse,
14
+ } from "../interfaces";
15
+ import { Queue } from "bullmq";
10
16
 
11
17
  @Service()
12
18
  export class HybridHttpQueue {
@@ -19,56 +25,102 @@ export class HybridHttpQueue {
19
25
  this.rateLimitConfigs = RateLimitUtils.initializeRateLimitConfigs();
20
26
  }
21
27
 
22
- private async handleRateLimit(job: any): Promise<void> {
23
- const { connectionId, provider, url, method, options } = job.data;
24
- const microservice = options.queueOptions?.microservice || "default";
25
- const isMaxRetries = job.attemptsMade >= 2;
28
+ private async handleRateLimitAndQueue(
29
+ url: string,
30
+ method: string,
31
+ options: HttpCallOption
32
+ ): Promise<IQueueResponse> {
33
+ const { connectionId, provider, microservice } =
34
+ JobUtils.extractConnectionDetails(options);
35
+ const key = `rate_limit:${provider}:${connectionId}`;
36
+ const config = this.rateLimitConfigs.get(provider);
37
+ const windowMs = config?.windowMs ?? 60000;
38
+
39
+ const timestamps = await RateLimitUtils.getRawRequestTimestamps(key);
40
+ const now = Date.now();
41
+ const windowStart = now - windowMs;
42
+ const recentRequests = timestamps.filter((t) => t > windowStart);
43
+ const nextAvailableTime =
44
+ recentRequests.length > 0 ? recentRequests[0] + windowMs : now + 1000;
45
+ const delay = Math.max(nextAvailableTime - now, 1000); // at least 1s delay
46
+
47
+ // Create job data
48
+ const jobData = {
49
+ microservice,
50
+ connectionId,
51
+ provider,
52
+ url,
53
+ method,
54
+ options,
55
+ timestamp: Date.now(),
56
+ };
57
+
58
+ // Add job to queue with delay (background processing)
59
+ const queueKey = QueueUtils.getQueueKey(
60
+ microservice,
61
+ connectionId,
62
+ provider
63
+ );
64
+ const queue = QueueUtils.getOrCreateQueue(queueKey, this.queues);
65
+
66
+ QueueUtils.getOrCreateWorker(
67
+ queueKey,
68
+ this.workers,
69
+ this.processHttpRequest.bind(this),
70
+ this.jobResults
71
+ );
72
+
73
+ const job = await queue.add("http-request", jobData, {
74
+ delay,
75
+ attempts: 1,
76
+ removeOnComplete: { age: 300, count: 1 },
77
+ removeOnFail: { age: 300, count: 1 },
78
+ });
26
79
 
27
80
  await publishAudit({
28
- eventType: isMaxRetries
29
- ? "http.request.failed"
30
- : "http.request.rateLimitExceeded",
81
+ eventType: "http.request.rateLimitQueued",
31
82
  properties: {
83
+ resource: microservice,
32
84
  connectionId,
33
85
  provider,
34
86
  endpoint: url,
35
87
  method,
36
88
  timestamp: Date.now(),
37
89
  queueId: job.id,
38
- reason: isMaxRetries ? "max_retries_exceeded" : "rate_limit_exceeded",
39
- ...(!isMaxRetries && {
40
- retryCount: job.attemptsMade + 1,
41
- maxRetries: 3,
42
- }),
90
+ reason: "rate_limit_exceeded_queued",
91
+ delay,
92
+ estimatedProcessingTime: now + delay,
43
93
  },
44
94
  });
45
95
 
46
- if (isMaxRetries) {
47
- await this.queues
48
- .get(QueueUtils.getQueueKey(microservice, connectionId, provider))
49
- ?.obliterate({ force: true });
50
- // Don't throw error for max retries - just return
51
- return;
52
- }
53
-
54
- throw new Error("Rate limit exceeded");
96
+ // Return immediate response to controller
97
+ return {
98
+ success: true,
99
+ queued: true,
100
+ estimatedProcessingTime: now + delay,
101
+ jobId: job.id,
102
+ };
55
103
  }
56
104
 
57
105
  private async processHttpRequest(job: any): Promise<any> {
58
106
  const { connectionId, provider, url, method, options } = job.data;
59
107
 
60
- // Check rate limit
61
- if (
62
- !(await RateLimitUtils.checkRateLimit(
63
- connectionId,
64
- provider,
65
- this.rateLimitConfigs
66
- ))
67
- ) {
68
- await this.handleRateLimit(job);
108
+ const allowed = await RateLimitUtils.isRateLimitAllowed(
109
+ connectionId,
110
+ provider,
111
+ this.rateLimitConfigs
112
+ );
113
+
114
+ if (!allowed) {
115
+ // This shouldn't happen since we check before queuing, but handle it gracefully
116
+ getConfig().LOGGER.warn(
117
+ `Job ${job.id} still rate limited after delay, skipping`
118
+ );
69
119
  return;
70
120
  }
71
121
 
122
+ await RateLimitUtils.recordRequest(connectionId, provider);
123
+
72
124
  try {
73
125
  getConfig().LOGGER.info(
74
126
  `Executing HTTP request: ${method} ${url} for ${provider}`
@@ -120,7 +172,7 @@ export class HybridHttpQueue {
120
172
  connectionProvider: string;
121
173
  microservice: string;
122
174
  };
123
- }): Promise<any> {
175
+ }): Promise<IQueueResponse> {
124
176
  const { method, url, body, headers, queueOptions } = options;
125
177
  // Create HttpCallOption object
126
178
  const httpCallOption: HttpCallOption = {
@@ -136,48 +188,72 @@ export class HybridHttpQueue {
136
188
  url: string,
137
189
  method: string,
138
190
  options: HttpCallOption
139
- ): Promise<any> {
191
+ ): Promise<IQueueResponse> {
140
192
  const { connectionId, provider, microservice } =
141
193
  JobUtils.extractConnectionDetails(options);
142
- const queueKey = QueueUtils.getQueueKey(
143
- microservice,
194
+
195
+ // Check rate limit first
196
+ const allowed = await RateLimitUtils.isRateLimitAllowed(
144
197
  connectionId,
145
- provider
198
+ provider,
199
+ this.rateLimitConfigs
146
200
  );
147
201
 
202
+ if (!allowed) {
203
+ // Rate limited - queue the request and return immediate response
204
+ return this.handleRateLimitAndQueue(url, method, options);
205
+ }
206
+
207
+ // Not rate limited - process immediately
148
208
  getConfig().LOGGER.info(
149
- `Queueing: ${method} ${url} -> ${provider} [${connectionId}]`
209
+ `Processing immediately: ${method} ${url} -> ${provider} [${connectionId}]`
150
210
  );
151
211
 
152
- QueueUtils.getOrCreateWorker(
153
- queueKey,
154
- this.workers,
155
- this.processHttpRequest.bind(this),
156
- this.jobResults
157
- );
158
- const queue = QueueUtils.getOrCreateQueue(queueKey, this.queues);
212
+ try {
213
+ // Record the request first
214
+ await RateLimitUtils.recordRequest(connectionId, provider);
159
215
 
160
- const job = await queue.add(
161
- "http-request",
162
- {
163
- microservice,
164
- connectionId,
165
- provider,
166
- url,
167
- method,
168
- options,
169
- timestamp: Date.now(),
170
- },
171
- {
172
- attempts: 3,
173
- backoff: { type: "exponential", delay: 5000 },
174
- removeOnComplete: { age: 300, count: 1 },
175
- removeOnFail: { age: 300, count: 1 },
176
- }
177
- );
216
+ // Execute the HTTP request
217
+ const response = await axios({
218
+ method: method.toLowerCase(),
219
+ url: url,
220
+ headers: options.headers || {},
221
+ timeout: 30000,
222
+ ...(options.body && { data: options.body }),
223
+ ...(options.params && { params: options.params }),
224
+ });
225
+
226
+ getConfig().LOGGER.info(
227
+ `HTTP request successful: ${method} ${url} for ${provider}`
228
+ );
178
229
 
179
- getConfig().LOGGER.info(`Job ${job.id} queued, waiting for completion...`);
180
- return JobUtils.waitForJobCompletion(job, queueKey, this.jobResults);
230
+ return {
231
+ success: true,
232
+ data: response.data,
233
+ queued: false,
234
+ };
235
+ } catch (error: any) {
236
+ getConfig().LOGGER.error(`HTTP request failed: ${error.message}`);
237
+
238
+ await publishAudit({
239
+ eventType: "http.request.error",
240
+ properties: {
241
+ connectionId,
242
+ provider,
243
+ endpoint: url,
244
+ method,
245
+ timestamp: Date.now(),
246
+ reason: "execution_error",
247
+ errorMessage: error.message,
248
+ },
249
+ });
250
+
251
+ return {
252
+ success: false,
253
+ error: `HTTP request failed: ${error.message}`,
254
+ queued: false,
255
+ };
256
+ }
181
257
  }
182
258
 
183
259
  async shutdown(): Promise<void> {
@@ -1,4 +1,5 @@
1
1
  import { HttpCallOption } from "../types/http.types";
2
+ import { IQueueResponse } from "./IJobResult";
2
3
 
3
4
  export interface IHybridHttpQueue {
4
5
  request(options: {
@@ -11,13 +12,13 @@ export interface IHybridHttpQueue {
11
12
  connectionProvider: string;
12
13
  microservice: string;
13
14
  };
14
- }): Promise<any>;
15
+ }): Promise<IQueueResponse>;
15
16
 
16
17
  handleRequest(
17
18
  url: string,
18
19
  method: string,
19
20
  options: HttpCallOption
20
- ): Promise<any>;
21
+ ): Promise<IQueueResponse>;
21
22
 
22
23
  shutdown(): Promise<void>;
23
24
  }
@@ -4,3 +4,12 @@ export interface IJobResult {
4
4
  resolved: boolean;
5
5
  timestamp: number;
6
6
  }
7
+
8
+ export interface IQueueResponse {
9
+ success: boolean;
10
+ data?: any;
11
+ error?: string;
12
+ queued?: boolean;
13
+ estimatedProcessingTime?: number;
14
+ jobId?: string;
15
+ }
@@ -1,6 +1,6 @@
1
1
  import { Service } from "typedi";
2
2
  import { HybridHttpQueue } from "../entities/HybridHttpQueue";
3
- import { IHybridHttpQueue } from "../interfaces";
3
+ import { IHybridHttpQueue, IQueueResponse } from "../interfaces";
4
4
  import { HttpCallOption } from "../types/http.types";
5
5
 
6
6
  @Service()
@@ -21,7 +21,7 @@ export class QueueService implements IHybridHttpQueue {
21
21
  connectionProvider: string;
22
22
  microservice: string;
23
23
  };
24
- }): Promise<any> {
24
+ }): Promise<IQueueResponse> {
25
25
  return this.hybridQueue.request(options);
26
26
  }
27
27
 
@@ -29,7 +29,7 @@ export class QueueService implements IHybridHttpQueue {
29
29
  url: string,
30
30
  method: string,
31
31
  options: HttpCallOption
32
- ): Promise<any> {
32
+ ): Promise<IQueueResponse> {
33
33
  return this.hybridQueue.handleRequest(url, method, options);
34
34
  }
35
35
 
@@ -8,7 +8,6 @@ export interface QueueConfig {
8
8
 
9
9
  export interface JobOptions {
10
10
  attempts: number;
11
- backoff: { type: string; delay: number };
12
11
  removeOnComplete: { age: number; count: number };
13
12
  removeOnFail: { age: number; count: number };
14
13
  }
@@ -1,4 +1,5 @@
1
1
  import { getConfig } from "../../config/config";
2
+ import { getRedisClient } from "../../db/redis";
2
3
 
3
4
  export class QueueUtils {
4
5
  static getQueueKey(
@@ -16,7 +17,7 @@ export class QueueUtils {
16
17
  .set(
17
18
  queueKey,
18
19
  new (require("bullmq").Queue)(queueKey, {
19
- connection: require("../../db/redis").getRedisClient(),
20
+ connection: getRedisClient(),
20
21
  })
21
22
  )
22
23
  .get(queueKey)!
@@ -33,7 +34,7 @@ export class QueueUtils {
33
34
 
34
35
  const { Worker } = require("bullmq");
35
36
  const worker = new Worker(queueKey, processFunction, {
36
- connection: require("../../db/redis").getRedisClient(),
37
+ connection: getRedisClient(),
37
38
  concurrency: 1,
38
39
  removeOnComplete: { age: 300, count: 1 },
39
40
  removeOnFail: { age: 300, count: 1 },
@@ -48,11 +48,84 @@ export class RateLimitUtils {
48
48
 
49
49
  // Configure rate limits for different providers
50
50
  configs.set("Sensibo", {
51
- maxRequests: 40,
51
+ maxRequests: 5,
52
52
  windowMs: 60000,
53
53
  provider: "Sensibo",
54
54
  });
55
55
 
56
56
  return configs;
57
57
  }
58
+
59
+ static async isRateLimitAllowed(
60
+ connectionId: string,
61
+ provider: string,
62
+ rateLimitConfigs: Map<string, IRateLimitConfig>
63
+ ): Promise<boolean> {
64
+ const config = rateLimitConfigs.get(provider);
65
+ if (!config) {
66
+ getConfig().LOGGER.warn(
67
+ `No rate limit config found for provider: ${provider}`
68
+ );
69
+ return true;
70
+ }
71
+
72
+ const key = `rate_limit:${provider}:${connectionId}`;
73
+ const now = Date.now();
74
+ const windowStart = now - config.windowMs;
75
+
76
+ try {
77
+ const data = await this.redisClient.get(key);
78
+ const requests = data
79
+ ? JSON.parse(data).filter((t: number) => t > windowStart)
80
+ : [];
81
+
82
+ return requests.length < config.maxRequests;
83
+ } catch (error) {
84
+ getConfig().LOGGER.error(`Rate limit check error: ${error}`);
85
+ return true;
86
+ }
87
+ }
88
+ static async recordRequest(
89
+ connectionId: string,
90
+ provider: string
91
+ ): Promise<void> {
92
+ const config = this.getRateLimitConfig(provider);
93
+ if (!config) return;
94
+
95
+ const key = `rate_limit:${provider}:${connectionId}`;
96
+ const now = Date.now();
97
+ const windowStart = now - config.windowMs;
98
+
99
+ try {
100
+ const data = await this.redisClient.get(key);
101
+ const requests = data
102
+ ? JSON.parse(data).filter((t: number) => t > windowStart)
103
+ : [];
104
+
105
+ requests.push(now);
106
+ await this.redisClient.setex(
107
+ key,
108
+ Math.ceil(config.windowMs / 1000),
109
+ JSON.stringify(requests)
110
+ );
111
+ } catch (error) {
112
+ getConfig().LOGGER.error(`Rate limit record error: ${error}`);
113
+ }
114
+ }
115
+
116
+ static async getRawRequestTimestamps(key: string): Promise<number[]> {
117
+ try {
118
+ const data = await this.redisClient.get(key);
119
+ return data ? JSON.parse(data) : [];
120
+ } catch (error) {
121
+ getConfig().LOGGER.error(
122
+ `Error fetching raw request timestamps: ${error}`
123
+ );
124
+ return [];
125
+ }
126
+ }
127
+
128
+ static getRateLimitConfig(provider: string): IRateLimitConfig | undefined {
129
+ return this.initializeRateLimitConfigs().get(provider);
130
+ }
58
131
  }