dt-common-device 3.0.0 → 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +47 -15
  2. package/dist/device/cloud/interface.d.ts +101 -0
  3. package/dist/device/cloud/interface.js +3 -0
  4. package/dist/device/cloud/interfaces/IDeviceConnectionService.d.ts +7 -0
  5. package/dist/device/cloud/interfaces/IDeviceConnectionService.js +3 -0
  6. package/dist/device/cloud/interfaces/IDevicesService.d.ts +9 -0
  7. package/dist/device/cloud/interfaces/IDevicesService.js +2 -0
  8. package/dist/device/cloud/services/Device.service.d.ts +39 -0
  9. package/dist/device/cloud/services/Device.service.js +9 -0
  10. package/dist/device/cloud/services/DeviceCloudService.d.ts +42 -0
  11. package/dist/device/cloud/services/DeviceCloudService.js +59 -0
  12. package/dist/device/cloud/services/DeviceHub.service.d.ts +3 -0
  13. package/dist/device/cloud/services/DeviceHub.service.js +6 -0
  14. package/dist/device/cloud/services/Hub.service.d.ts +25 -0
  15. package/dist/device/cloud/services/Hub.service.js +9 -0
  16. package/dist/device/cloud/services/SmartThingsDeviceService.d.ts +38 -0
  17. package/dist/device/cloud/services/SmartThingsDeviceService.js +52 -0
  18. package/dist/device/index.d.ts +4 -0
  19. package/dist/device/index.js +20 -0
  20. package/dist/device/local/events/EventHandler.js +6 -6
  21. package/dist/device/local/events/Events.d.ts +12 -33
  22. package/dist/device/local/events/Events.js +12 -33
  23. package/dist/device/local/interface.d.ts +0 -0
  24. package/dist/device/local/interface.js +1 -0
  25. package/dist/device/local/services/DeviceHub.service.d.ts +11 -0
  26. package/dist/device/local/services/DeviceHub.service.js +40 -0
  27. package/dist/queue/entities/HybridHttpQueue.d.ts +5 -3
  28. package/dist/queue/entities/HybridHttpQueue.js +97 -44
  29. package/dist/queue/interfaces/IHybridHttpQueue.d.ts +4 -2
  30. package/dist/queue/interfaces/IJobResult.d.ts +8 -0
  31. package/dist/queue/services/QueueService.d.ts +4 -3
  32. package/dist/queue/types/http.types.d.ts +1 -0
  33. package/dist/queue/types/queue.types.d.ts +0 -4
  34. package/dist/queue/utils/queueUtils.js +3 -2
  35. package/dist/queue/utils/rateLimit.utils.d.ts +4 -0
  36. package/dist/queue/utils/rateLimit.utils.js +54 -1
  37. package/package.json +1 -1
  38. package/src/queue/entities/HybridHttpQueue.ts +143 -65
  39. package/src/queue/interfaces/IHybridHttpQueue.ts +4 -2
  40. package/src/queue/interfaces/IJobResult.ts +9 -0
  41. package/src/queue/services/QueueService.ts +4 -3
  42. package/src/queue/types/http.types.ts +1 -0
  43. package/src/queue/types/queue.types.ts +0 -1
  44. package/src/queue/utils/queueUtils.ts +3 -2
  45. package/src/queue/utils/rateLimit.utils.ts +74 -1
@@ -6,7 +6,13 @@ import { Service } from "typedi";
6
6
  import { RateLimitUtils } from "../utils/rateLimit.utils";
7
7
  import { JobUtils } from "../utils/jobUtils";
8
8
  import { QueueUtils } from "../utils/queueUtils";
9
- import { IRateLimitConfig, IJobResult, IHttpRequestJob } from "../interfaces";
9
+ import {
10
+ IRateLimitConfig,
11
+ IJobResult,
12
+ IHttpRequestJob,
13
+ IQueueResponse,
14
+ } from "../interfaces";
15
+ import { Queue } from "bullmq";
10
16
 
11
17
  @Service()
12
18
  export class HybridHttpQueue {
@@ -19,56 +25,102 @@ export class HybridHttpQueue {
19
25
  this.rateLimitConfigs = RateLimitUtils.initializeRateLimitConfigs();
20
26
  }
21
27
 
22
- private async handleRateLimit(job: any): Promise<void> {
23
- const { connectionId, provider, url, method, options } = job.data;
24
- const microservice = options.queueOptions?.microservice || "default";
25
- const isMaxRetries = job.attemptsMade >= 2;
28
+ private async handleRateLimitAndQueue(
29
+ url: string,
30
+ method: string,
31
+ options: HttpCallOption
32
+ ): Promise<IQueueResponse> {
33
+ const { connectionId, provider, microservice } =
34
+ JobUtils.extractConnectionDetails(options);
35
+ const key = `rate_limit:${provider}:${connectionId}`;
36
+ const config = this.rateLimitConfigs.get(provider);
37
+ const windowMs = config?.windowMs ?? 60000;
38
+
39
+ const timestamps = await RateLimitUtils.getRawRequestTimestamps(key);
40
+ const now = Date.now();
41
+ const windowStart = now - windowMs;
42
+ const recentRequests = timestamps.filter((t) => t > windowStart);
43
+ const nextAvailableTime =
44
+ recentRequests.length > 0 ? recentRequests[0] + windowMs : now + 1000;
45
+ const delay = Math.max(nextAvailableTime - now, 1000); // at least 1s delay
46
+
47
+ // Create job data
48
+ const jobData = {
49
+ microservice,
50
+ connectionId,
51
+ provider,
52
+ url,
53
+ method,
54
+ options,
55
+ timestamp: Date.now(),
56
+ };
57
+
58
+ // Add job to queue with delay (background processing)
59
+ const queueKey = QueueUtils.getQueueKey(
60
+ microservice,
61
+ connectionId,
62
+ provider
63
+ );
64
+ const queue = QueueUtils.getOrCreateQueue(queueKey, this.queues);
65
+
66
+ QueueUtils.getOrCreateWorker(
67
+ queueKey,
68
+ this.workers,
69
+ this.processHttpRequest.bind(this),
70
+ this.jobResults
71
+ );
72
+
73
+ const job = await queue.add("http-request", jobData, {
74
+ delay,
75
+ attempts: 1,
76
+ removeOnComplete: { age: 300, count: 1 },
77
+ removeOnFail: { age: 300, count: 1 },
78
+ });
26
79
 
27
80
  await publishAudit({
28
- eventType: isMaxRetries
29
- ? "http.request.failed"
30
- : "http.request.rateLimitExceeded",
81
+ eventType: "http.request.rateLimitQueued",
31
82
  properties: {
83
+ resource: microservice,
32
84
  connectionId,
33
85
  provider,
34
86
  endpoint: url,
35
87
  method,
36
88
  timestamp: Date.now(),
37
89
  queueId: job.id,
38
- reason: isMaxRetries ? "max_retries_exceeded" : "rate_limit_exceeded",
39
- ...(!isMaxRetries && {
40
- retryCount: job.attemptsMade + 1,
41
- maxRetries: 3,
42
- }),
90
+ reason: "rate_limit_exceeded_queued",
91
+ delay,
92
+ estimatedProcessingTime: now + delay,
43
93
  },
44
94
  });
45
95
 
46
- if (isMaxRetries) {
47
- await this.queues
48
- .get(QueueUtils.getQueueKey(microservice, connectionId, provider))
49
- ?.obliterate({ force: true });
50
- // Don't throw error for max retries - just return
51
- return;
52
- }
53
-
54
- throw new Error("Rate limit exceeded");
96
+ // Return immediate response to controller
97
+ return {
98
+ success: true,
99
+ queued: true,
100
+ estimatedProcessingTime: now + delay,
101
+ jobId: job.id,
102
+ };
55
103
  }
56
104
 
57
105
  private async processHttpRequest(job: any): Promise<any> {
58
106
  const { connectionId, provider, url, method, options } = job.data;
59
107
 
60
- // Check rate limit
61
- if (
62
- !(await RateLimitUtils.checkRateLimit(
63
- connectionId,
64
- provider,
65
- this.rateLimitConfigs
66
- ))
67
- ) {
68
- await this.handleRateLimit(job);
108
+ const allowed = await RateLimitUtils.isRateLimitAllowed(
109
+ connectionId,
110
+ provider,
111
+ this.rateLimitConfigs
112
+ );
113
+
114
+ if (!allowed) {
115
+ // This shouldn't happen since we check before queuing, but handle it gracefully
116
+ getConfig().LOGGER.warn(
117
+ `Job ${job.id} still rate limited after delay, skipping`
118
+ );
69
119
  return;
70
120
  }
71
121
 
122
+ await RateLimitUtils.recordRequest(connectionId, provider);
123
+
72
124
  try {
73
125
  getConfig().LOGGER.info(
74
126
  `Executing HTTP request: ${method} ${url} for ${provider}`
@@ -114,18 +166,20 @@ export class HybridHttpQueue {
114
166
  method: string;
115
167
  url: string;
116
168
  body?: any;
169
+ params?: Record<string, any>;
117
170
  headers?: Record<string, string>;
118
171
  queueOptions?: {
119
172
  connectionId: string;
120
173
  connectionProvider: string;
121
174
  microservice: string;
122
175
  };
123
- }): Promise<any> {
124
- const { method, url, body, headers, queueOptions } = options;
176
+ }): Promise<IQueueResponse> {
177
+ const { method, url, body, params, headers, queueOptions } = options;
125
178
  // Create HttpCallOption object
126
179
  const httpCallOption: HttpCallOption = {
127
180
  headers,
128
181
  body,
182
+ params,
129
183
  queueOptions,
130
184
  };
131
185
  // Call handleRequest with the constructed parameters
@@ -136,48 +190,72 @@ export class HybridHttpQueue {
136
190
  url: string,
137
191
  method: string,
138
192
  options: HttpCallOption
139
- ): Promise<any> {
193
+ ): Promise<IQueueResponse> {
140
194
  const { connectionId, provider, microservice } =
141
195
  JobUtils.extractConnectionDetails(options);
142
- const queueKey = QueueUtils.getQueueKey(
143
- microservice,
196
+
197
+ // Check rate limit first
198
+ const allowed = await RateLimitUtils.isRateLimitAllowed(
144
199
  connectionId,
145
- provider
200
+ provider,
201
+ this.rateLimitConfigs
146
202
  );
147
203
 
204
+ if (!allowed) {
205
+ // Rate limited - queue the request and return immediate response
206
+ return this.handleRateLimitAndQueue(url, method, options);
207
+ }
208
+
209
+ // Not rate limited - process immediately
148
210
  getConfig().LOGGER.info(
149
- `Queueing: ${method} ${url} -> ${provider} [${connectionId}]`
211
+ `Processing immediately: ${method} ${url} -> ${provider} [${connectionId}]`
150
212
  );
151
213
 
152
- QueueUtils.getOrCreateWorker(
153
- queueKey,
154
- this.workers,
155
- this.processHttpRequest.bind(this),
156
- this.jobResults
157
- );
158
- const queue = QueueUtils.getOrCreateQueue(queueKey, this.queues);
214
+ try {
215
+ // Record the request first
216
+ await RateLimitUtils.recordRequest(connectionId, provider);
159
217
 
160
- const job = await queue.add(
161
- "http-request",
162
- {
163
- microservice,
164
- connectionId,
165
- provider,
166
- url,
167
- method,
168
- options,
169
- timestamp: Date.now(),
170
- },
171
- {
172
- attempts: 3,
173
- backoff: { type: "exponential", delay: 5000 },
174
- removeOnComplete: { age: 300, count: 1 },
175
- removeOnFail: { age: 300, count: 1 },
176
- }
177
- );
218
+ // Execute the HTTP request
219
+ const response = await axios({
220
+ method: method.toLowerCase(),
221
+ url: url,
222
+ headers: options.headers || {},
223
+ timeout: 30000,
224
+ ...(options.body && { data: options.body }),
225
+ ...(options.params && { params: options.params }),
226
+ });
227
+
228
+ getConfig().LOGGER.info(
229
+ `HTTP request successful: ${method} ${url} for ${provider}`
230
+ );
178
231
 
179
- getConfig().LOGGER.info(`Job ${job.id} queued, waiting for completion...`);
180
- return JobUtils.waitForJobCompletion(job, queueKey, this.jobResults);
232
+ return {
233
+ success: true,
234
+ data: response.data,
235
+ queued: false,
236
+ };
237
+ } catch (error: any) {
238
+ getConfig().LOGGER.error(`HTTP request failed: ${error.message}`);
239
+
240
+ await publishAudit({
241
+ eventType: "http.request.error",
242
+ properties: {
243
+ connectionId,
244
+ provider,
245
+ endpoint: url,
246
+ method,
247
+ timestamp: Date.now(),
248
+ reason: "execution_error",
249
+ errorMessage: error.message,
250
+ },
251
+ });
252
+
253
+ return {
254
+ success: false,
255
+ error: `HTTP request failed: ${error.message}`,
256
+ queued: false,
257
+ };
258
+ }
181
259
  }
182
260
 
183
261
  async shutdown(): Promise<void> {
@@ -1,23 +1,25 @@
1
1
  import { HttpCallOption } from "../types/http.types";
2
+ import { IQueueResponse } from "./IJobResult";
2
3
 
3
4
  export interface IHybridHttpQueue {
4
5
  request(options: {
5
6
  method: string;
6
7
  url: string;
7
8
  body?: any;
9
+ params?: Record<string, any>;
8
10
  headers?: Record<string, string>;
9
11
  queueOptions?: {
10
12
  connectionId: string;
11
13
  connectionProvider: string;
12
14
  microservice: string;
13
15
  };
14
- }): Promise<any>;
16
+ }): Promise<IQueueResponse>;
15
17
 
16
18
  handleRequest(
17
19
  url: string,
18
20
  method: string,
19
21
  options: HttpCallOption
20
- ): Promise<any>;
22
+ ): Promise<IQueueResponse>;
21
23
 
22
24
  shutdown(): Promise<void>;
23
25
  }
@@ -4,3 +4,12 @@ export interface IJobResult {
4
4
  resolved: boolean;
5
5
  timestamp: number;
6
6
  }
7
+
8
+ export interface IQueueResponse {
9
+ success: boolean;
10
+ data?: any;
11
+ error?: string;
12
+ queued?: boolean;
13
+ estimatedProcessingTime?: number;
14
+ jobId?: string;
15
+ }
@@ -1,6 +1,6 @@
1
1
  import { Service } from "typedi";
2
2
  import { HybridHttpQueue } from "../entities/HybridHttpQueue";
3
- import { IHybridHttpQueue } from "../interfaces";
3
+ import { IHybridHttpQueue, IQueueResponse } from "../interfaces";
4
4
  import { HttpCallOption } from "../types/http.types";
5
5
 
6
6
  @Service()
@@ -15,13 +15,14 @@ export class QueueService implements IHybridHttpQueue {
15
15
  method: string;
16
16
  url: string;
17
17
  body?: any;
18
+ params?: Record<string, any>;
18
19
  headers?: Record<string, string>;
19
20
  queueOptions?: {
20
21
  connectionId: string;
21
22
  connectionProvider: string;
22
23
  microservice: string;
23
24
  };
24
- }): Promise<any> {
25
+ }): Promise<IQueueResponse> {
25
26
  return this.hybridQueue.request(options);
26
27
  }
27
28
 
@@ -29,7 +30,7 @@ export class QueueService implements IHybridHttpQueue {
29
30
  url: string,
30
31
  method: string,
31
32
  options: HttpCallOption
32
- ): Promise<any> {
33
+ ): Promise<IQueueResponse> {
33
34
  return this.hybridQueue.handleRequest(url, method, options);
34
35
  }
35
36
 
@@ -13,6 +13,7 @@ export interface HttpRequestOptions {
13
13
  method: string;
14
14
  url: string;
15
15
  body?: any;
16
+ params?: Record<string, any>;
16
17
  headers?: Record<string, string>;
17
18
  queueOptions?: {
18
19
  connectionId: string;
@@ -8,7 +8,6 @@ export interface QueueConfig {
8
8
 
9
9
  export interface JobOptions {
10
10
  attempts: number;
11
- backoff: { type: string; delay: number };
12
11
  removeOnComplete: { age: number; count: number };
13
12
  removeOnFail: { age: number; count: number };
14
13
  }
@@ -1,4 +1,5 @@
1
1
  import { getConfig } from "../../config/config";
2
+ import { getRedisClient } from "../../db/redis";
2
3
 
3
4
  export class QueueUtils {
4
5
  static getQueueKey(
@@ -16,7 +17,7 @@ export class QueueUtils {
16
17
  .set(
17
18
  queueKey,
18
19
  new (require("bullmq").Queue)(queueKey, {
19
- connection: require("../../db/redis").getRedisClient(),
20
+ connection: getRedisClient(),
20
21
  })
21
22
  )
22
23
  .get(queueKey)!
@@ -33,7 +34,7 @@ export class QueueUtils {
33
34
 
34
35
  const { Worker } = require("bullmq");
35
36
  const worker = new Worker(queueKey, processFunction, {
36
- connection: require("../../db/redis").getRedisClient(),
37
+ connection: getRedisClient(),
37
38
  concurrency: 1,
38
39
  removeOnComplete: { age: 300, count: 1 },
39
40
  removeOnFail: { age: 300, count: 1 },
@@ -48,11 +48,84 @@ export class RateLimitUtils {
48
48
 
49
49
  // Configure rate limits for different providers
50
50
  configs.set("Sensibo", {
51
- maxRequests: 40,
51
+ maxRequests: 5,
52
52
  windowMs: 60000,
53
53
  provider: "Sensibo",
54
54
  });
55
55
 
56
56
  return configs;
57
57
  }
58
+
59
+ static async isRateLimitAllowed(
60
+ connectionId: string,
61
+ provider: string,
62
+ rateLimitConfigs: Map<string, IRateLimitConfig>
63
+ ): Promise<boolean> {
64
+ const config = rateLimitConfigs.get(provider);
65
+ if (!config) {
66
+ getConfig().LOGGER.warn(
67
+ `No rate limit config found for provider: ${provider}`
68
+ );
69
+ return true;
70
+ }
71
+
72
+ const key = `rate_limit:${provider}:${connectionId}`;
73
+ const now = Date.now();
74
+ const windowStart = now - config.windowMs;
75
+
76
+ try {
77
+ const data = await this.redisClient.get(key);
78
+ const requests = data
79
+ ? JSON.parse(data).filter((t: number) => t > windowStart)
80
+ : [];
81
+
82
+ return requests.length < config.maxRequests;
83
+ } catch (error) {
84
+ getConfig().LOGGER.error(`Rate limit check error: ${error}`);
85
+ return true;
86
+ }
87
+ }
88
+ static async recordRequest(
89
+ connectionId: string,
90
+ provider: string
91
+ ): Promise<void> {
92
+ const config = this.getRateLimitConfig(provider);
93
+ if (!config) return;
94
+
95
+ const key = `rate_limit:${provider}:${connectionId}`;
96
+ const now = Date.now();
97
+ const windowStart = now - config.windowMs;
98
+
99
+ try {
100
+ const data = await this.redisClient.get(key);
101
+ const requests = data
102
+ ? JSON.parse(data).filter((t: number) => t > windowStart)
103
+ : [];
104
+
105
+ requests.push(now);
106
+ await this.redisClient.setex(
107
+ key,
108
+ Math.ceil(config.windowMs / 1000),
109
+ JSON.stringify(requests)
110
+ );
111
+ } catch (error) {
112
+ getConfig().LOGGER.error(`Rate limit record error: ${error}`);
113
+ }
114
+ }
115
+
116
+ static async getRawRequestTimestamps(key: string): Promise<number[]> {
117
+ try {
118
+ const data = await this.redisClient.get(key);
119
+ return data ? JSON.parse(data) : [];
120
+ } catch (error) {
121
+ getConfig().LOGGER.error(
122
+ `Error fetching raw request timestamps: ${error}`
123
+ );
124
+ return [];
125
+ }
126
+ }
127
+
128
+ static getRateLimitConfig(provider: string): IRateLimitConfig | undefined {
129
+ return this.initializeRateLimitConfigs().get(provider);
130
+ }
58
131
  }