dt-common-device 3.0.0 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -15
- package/dist/device/cloud/interface.d.ts +101 -0
- package/dist/device/cloud/interface.js +3 -0
- package/dist/device/cloud/interfaces/IDeviceConnectionService.d.ts +7 -0
- package/dist/device/cloud/interfaces/IDeviceConnectionService.js +3 -0
- package/dist/device/cloud/interfaces/IDevicesService.d.ts +9 -0
- package/dist/device/cloud/interfaces/IDevicesService.js +2 -0
- package/dist/device/cloud/services/Device.service.d.ts +39 -0
- package/dist/device/cloud/services/Device.service.js +9 -0
- package/dist/device/cloud/services/DeviceCloudService.d.ts +42 -0
- package/dist/device/cloud/services/DeviceCloudService.js +59 -0
- package/dist/device/cloud/services/DeviceHub.service.d.ts +3 -0
- package/dist/device/cloud/services/DeviceHub.service.js +6 -0
- package/dist/device/cloud/services/Hub.service.d.ts +25 -0
- package/dist/device/cloud/services/Hub.service.js +9 -0
- package/dist/device/cloud/services/SmartThingsDeviceService.d.ts +38 -0
- package/dist/device/cloud/services/SmartThingsDeviceService.js +52 -0
- package/dist/device/index.d.ts +4 -0
- package/dist/device/index.js +20 -0
- package/dist/device/local/events/EventHandler.js +6 -6
- package/dist/device/local/events/Events.d.ts +12 -33
- package/dist/device/local/events/Events.js +12 -33
- package/dist/device/local/interface.d.ts +0 -0
- package/dist/device/local/interface.js +1 -0
- package/dist/device/local/services/DeviceHub.service.d.ts +11 -0
- package/dist/device/local/services/DeviceHub.service.js +40 -0
- package/dist/queue/entities/HybridHttpQueue.d.ts +4 -3
- package/dist/queue/entities/HybridHttpQueue.js +95 -43
- package/dist/queue/interfaces/IHybridHttpQueue.d.ts +3 -2
- package/dist/queue/interfaces/IJobResult.d.ts +8 -0
- package/dist/queue/services/QueueService.d.ts +3 -3
- package/dist/queue/types/queue.types.d.ts +0 -4
- package/dist/queue/utils/queueUtils.js +3 -2
- package/dist/queue/utils/rateLimit.utils.d.ts +4 -0
- package/dist/queue/utils/rateLimit.utils.js +54 -1
- package/package.json +1 -1
- package/src/queue/entities/HybridHttpQueue.ts +140 -64
- package/src/queue/interfaces/IHybridHttpQueue.ts +3 -2
- package/src/queue/interfaces/IJobResult.ts +9 -0
- package/src/queue/services/QueueService.ts +3 -3
- package/src/queue/types/queue.types.ts +0 -1
- package/src/queue/utils/queueUtils.ts +3 -2
- package/src/queue/utils/rateLimit.utils.ts +74 -1
|
@@ -6,7 +6,13 @@ import { Service } from "typedi";
|
|
|
6
6
|
import { RateLimitUtils } from "../utils/rateLimit.utils";
|
|
7
7
|
import { JobUtils } from "../utils/jobUtils";
|
|
8
8
|
import { QueueUtils } from "../utils/queueUtils";
|
|
9
|
-
import {
|
|
9
|
+
import {
|
|
10
|
+
IRateLimitConfig,
|
|
11
|
+
IJobResult,
|
|
12
|
+
IHttpRequestJob,
|
|
13
|
+
IQueueResponse,
|
|
14
|
+
} from "../interfaces";
|
|
15
|
+
import { Queue } from "bullmq";
|
|
10
16
|
|
|
11
17
|
@Service()
|
|
12
18
|
export class HybridHttpQueue {
|
|
@@ -19,56 +25,102 @@ export class HybridHttpQueue {
|
|
|
19
25
|
this.rateLimitConfigs = RateLimitUtils.initializeRateLimitConfigs();
|
|
20
26
|
}
|
|
21
27
|
|
|
22
|
-
private async
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
28
|
+
private async handleRateLimitAndQueue(
|
|
29
|
+
url: string,
|
|
30
|
+
method: string,
|
|
31
|
+
options: HttpCallOption
|
|
32
|
+
): Promise<IQueueResponse> {
|
|
33
|
+
const { connectionId, provider, microservice } =
|
|
34
|
+
JobUtils.extractConnectionDetails(options);
|
|
35
|
+
const key = `rate_limit:${provider}:${connectionId}`;
|
|
36
|
+
const config = this.rateLimitConfigs.get(provider);
|
|
37
|
+
const windowMs = config?.windowMs ?? 60000;
|
|
38
|
+
|
|
39
|
+
const timestamps = await RateLimitUtils.getRawRequestTimestamps(key);
|
|
40
|
+
const now = Date.now();
|
|
41
|
+
const windowStart = now - windowMs;
|
|
42
|
+
const recentRequests = timestamps.filter((t) => t > windowStart);
|
|
43
|
+
const nextAvailableTime =
|
|
44
|
+
recentRequests.length > 0 ? recentRequests[0] + windowMs : now + 1000;
|
|
45
|
+
const delay = Math.max(nextAvailableTime - now, 1000); // at least 1s delay
|
|
46
|
+
|
|
47
|
+
// Create job data
|
|
48
|
+
const jobData = {
|
|
49
|
+
microservice,
|
|
50
|
+
connectionId,
|
|
51
|
+
provider,
|
|
52
|
+
url,
|
|
53
|
+
method,
|
|
54
|
+
options,
|
|
55
|
+
timestamp: Date.now(),
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
// Add job to queue with delay (background processing)
|
|
59
|
+
const queueKey = QueueUtils.getQueueKey(
|
|
60
|
+
microservice,
|
|
61
|
+
connectionId,
|
|
62
|
+
provider
|
|
63
|
+
);
|
|
64
|
+
const queue = QueueUtils.getOrCreateQueue(queueKey, this.queues);
|
|
65
|
+
|
|
66
|
+
QueueUtils.getOrCreateWorker(
|
|
67
|
+
queueKey,
|
|
68
|
+
this.workers,
|
|
69
|
+
this.processHttpRequest.bind(this),
|
|
70
|
+
this.jobResults
|
|
71
|
+
);
|
|
72
|
+
|
|
73
|
+
const job = await queue.add("http-request", jobData, {
|
|
74
|
+
delay,
|
|
75
|
+
attempts: 1,
|
|
76
|
+
removeOnComplete: { age: 300, count: 1 },
|
|
77
|
+
removeOnFail: { age: 300, count: 1 },
|
|
78
|
+
});
|
|
26
79
|
|
|
27
80
|
await publishAudit({
|
|
28
|
-
eventType:
|
|
29
|
-
? "http.request.failed"
|
|
30
|
-
: "http.request.rateLimitExceeded",
|
|
81
|
+
eventType: "http.request.rateLimitQueued",
|
|
31
82
|
properties: {
|
|
83
|
+
resource: microservice,
|
|
32
84
|
connectionId,
|
|
33
85
|
provider,
|
|
34
86
|
endpoint: url,
|
|
35
87
|
method,
|
|
36
88
|
timestamp: Date.now(),
|
|
37
89
|
queueId: job.id,
|
|
38
|
-
reason:
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
maxRetries: 3,
|
|
42
|
-
}),
|
|
90
|
+
reason: "rate_limit_exceeded_queued",
|
|
91
|
+
delay,
|
|
92
|
+
estimatedProcessingTime: now + delay,
|
|
43
93
|
},
|
|
44
94
|
});
|
|
45
95
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
throw new Error("Rate limit exceeded");
|
|
96
|
+
// Return immediate response to controller
|
|
97
|
+
return {
|
|
98
|
+
success: true,
|
|
99
|
+
queued: true,
|
|
100
|
+
estimatedProcessingTime: now + delay,
|
|
101
|
+
jobId: job.id,
|
|
102
|
+
};
|
|
55
103
|
}
|
|
56
104
|
|
|
57
105
|
private async processHttpRequest(job: any): Promise<any> {
|
|
58
106
|
const { connectionId, provider, url, method, options } = job.data;
|
|
59
107
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
108
|
+
const allowed = await RateLimitUtils.isRateLimitAllowed(
|
|
109
|
+
connectionId,
|
|
110
|
+
provider,
|
|
111
|
+
this.rateLimitConfigs
|
|
112
|
+
);
|
|
113
|
+
|
|
114
|
+
if (!allowed) {
|
|
115
|
+
// This shouldn't happen since we check before queuing, but handle it gracefully
|
|
116
|
+
getConfig().LOGGER.warn(
|
|
117
|
+
`Job ${job.id} still rate limited after delay, skipping`
|
|
118
|
+
);
|
|
69
119
|
return;
|
|
70
120
|
}
|
|
71
121
|
|
|
122
|
+
await RateLimitUtils.recordRequest(connectionId, provider);
|
|
123
|
+
|
|
72
124
|
try {
|
|
73
125
|
getConfig().LOGGER.info(
|
|
74
126
|
`Executing HTTP request: ${method} ${url} for ${provider}`
|
|
@@ -120,7 +172,7 @@ export class HybridHttpQueue {
|
|
|
120
172
|
connectionProvider: string;
|
|
121
173
|
microservice: string;
|
|
122
174
|
};
|
|
123
|
-
}): Promise<
|
|
175
|
+
}): Promise<IQueueResponse> {
|
|
124
176
|
const { method, url, body, headers, queueOptions } = options;
|
|
125
177
|
// Create HttpCallOption object
|
|
126
178
|
const httpCallOption: HttpCallOption = {
|
|
@@ -136,48 +188,72 @@ export class HybridHttpQueue {
|
|
|
136
188
|
url: string,
|
|
137
189
|
method: string,
|
|
138
190
|
options: HttpCallOption
|
|
139
|
-
): Promise<
|
|
191
|
+
): Promise<IQueueResponse> {
|
|
140
192
|
const { connectionId, provider, microservice } =
|
|
141
193
|
JobUtils.extractConnectionDetails(options);
|
|
142
|
-
|
|
143
|
-
|
|
194
|
+
|
|
195
|
+
// Check rate limit first
|
|
196
|
+
const allowed = await RateLimitUtils.isRateLimitAllowed(
|
|
144
197
|
connectionId,
|
|
145
|
-
provider
|
|
198
|
+
provider,
|
|
199
|
+
this.rateLimitConfigs
|
|
146
200
|
);
|
|
147
201
|
|
|
202
|
+
if (!allowed) {
|
|
203
|
+
// Rate limited - queue the request and return immediate response
|
|
204
|
+
return this.handleRateLimitAndQueue(url, method, options);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Not rate limited - process immediately
|
|
148
208
|
getConfig().LOGGER.info(
|
|
149
|
-
`
|
|
209
|
+
`Processing immediately: ${method} ${url} -> ${provider} [${connectionId}]`
|
|
150
210
|
);
|
|
151
211
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
this.processHttpRequest.bind(this),
|
|
156
|
-
this.jobResults
|
|
157
|
-
);
|
|
158
|
-
const queue = QueueUtils.getOrCreateQueue(queueKey, this.queues);
|
|
212
|
+
try {
|
|
213
|
+
// Record the request first
|
|
214
|
+
await RateLimitUtils.recordRequest(connectionId, provider);
|
|
159
215
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
backoff: { type: "exponential", delay: 5000 },
|
|
174
|
-
removeOnComplete: { age: 300, count: 1 },
|
|
175
|
-
removeOnFail: { age: 300, count: 1 },
|
|
176
|
-
}
|
|
177
|
-
);
|
|
216
|
+
// Execute the HTTP request
|
|
217
|
+
const response = await axios({
|
|
218
|
+
method: method.toLowerCase(),
|
|
219
|
+
url: url,
|
|
220
|
+
headers: options.headers || {},
|
|
221
|
+
timeout: 30000,
|
|
222
|
+
...(options.body && { data: options.body }),
|
|
223
|
+
...(options.params && { params: options.params }),
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
getConfig().LOGGER.info(
|
|
227
|
+
`HTTP request successful: ${method} ${url} for ${provider}`
|
|
228
|
+
);
|
|
178
229
|
|
|
179
|
-
|
|
180
|
-
|
|
230
|
+
return {
|
|
231
|
+
success: true,
|
|
232
|
+
data: response.data,
|
|
233
|
+
queued: false,
|
|
234
|
+
};
|
|
235
|
+
} catch (error: any) {
|
|
236
|
+
getConfig().LOGGER.error(`HTTP request failed: ${error.message}`);
|
|
237
|
+
|
|
238
|
+
await publishAudit({
|
|
239
|
+
eventType: "http.request.error",
|
|
240
|
+
properties: {
|
|
241
|
+
connectionId,
|
|
242
|
+
provider,
|
|
243
|
+
endpoint: url,
|
|
244
|
+
method,
|
|
245
|
+
timestamp: Date.now(),
|
|
246
|
+
reason: "execution_error",
|
|
247
|
+
errorMessage: error.message,
|
|
248
|
+
},
|
|
249
|
+
});
|
|
250
|
+
|
|
251
|
+
return {
|
|
252
|
+
success: false,
|
|
253
|
+
error: `HTTP request failed: ${error.message}`,
|
|
254
|
+
queued: false,
|
|
255
|
+
};
|
|
256
|
+
}
|
|
181
257
|
}
|
|
182
258
|
|
|
183
259
|
async shutdown(): Promise<void> {
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { HttpCallOption } from "../types/http.types";
|
|
2
|
+
import { IQueueResponse } from "./IJobResult";
|
|
2
3
|
|
|
3
4
|
export interface IHybridHttpQueue {
|
|
4
5
|
request(options: {
|
|
@@ -11,13 +12,13 @@ export interface IHybridHttpQueue {
|
|
|
11
12
|
connectionProvider: string;
|
|
12
13
|
microservice: string;
|
|
13
14
|
};
|
|
14
|
-
}): Promise<
|
|
15
|
+
}): Promise<IQueueResponse>;
|
|
15
16
|
|
|
16
17
|
handleRequest(
|
|
17
18
|
url: string,
|
|
18
19
|
method: string,
|
|
19
20
|
options: HttpCallOption
|
|
20
|
-
): Promise<
|
|
21
|
+
): Promise<IQueueResponse>;
|
|
21
22
|
|
|
22
23
|
shutdown(): Promise<void>;
|
|
23
24
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Service } from "typedi";
|
|
2
2
|
import { HybridHttpQueue } from "../entities/HybridHttpQueue";
|
|
3
|
-
import { IHybridHttpQueue } from "../interfaces";
|
|
3
|
+
import { IHybridHttpQueue, IQueueResponse } from "../interfaces";
|
|
4
4
|
import { HttpCallOption } from "../types/http.types";
|
|
5
5
|
|
|
6
6
|
@Service()
|
|
@@ -21,7 +21,7 @@ export class QueueService implements IHybridHttpQueue {
|
|
|
21
21
|
connectionProvider: string;
|
|
22
22
|
microservice: string;
|
|
23
23
|
};
|
|
24
|
-
}): Promise<
|
|
24
|
+
}): Promise<IQueueResponse> {
|
|
25
25
|
return this.hybridQueue.request(options);
|
|
26
26
|
}
|
|
27
27
|
|
|
@@ -29,7 +29,7 @@ export class QueueService implements IHybridHttpQueue {
|
|
|
29
29
|
url: string,
|
|
30
30
|
method: string,
|
|
31
31
|
options: HttpCallOption
|
|
32
|
-
): Promise<
|
|
32
|
+
): Promise<IQueueResponse> {
|
|
33
33
|
return this.hybridQueue.handleRequest(url, method, options);
|
|
34
34
|
}
|
|
35
35
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { getConfig } from "../../config/config";
|
|
2
|
+
import { getRedisClient } from "../../db/redis";
|
|
2
3
|
|
|
3
4
|
export class QueueUtils {
|
|
4
5
|
static getQueueKey(
|
|
@@ -16,7 +17,7 @@ export class QueueUtils {
|
|
|
16
17
|
.set(
|
|
17
18
|
queueKey,
|
|
18
19
|
new (require("bullmq").Queue)(queueKey, {
|
|
19
|
-
connection:
|
|
20
|
+
connection: getRedisClient(),
|
|
20
21
|
})
|
|
21
22
|
)
|
|
22
23
|
.get(queueKey)!
|
|
@@ -33,7 +34,7 @@ export class QueueUtils {
|
|
|
33
34
|
|
|
34
35
|
const { Worker } = require("bullmq");
|
|
35
36
|
const worker = new Worker(queueKey, processFunction, {
|
|
36
|
-
connection:
|
|
37
|
+
connection: getRedisClient(),
|
|
37
38
|
concurrency: 1,
|
|
38
39
|
removeOnComplete: { age: 300, count: 1 },
|
|
39
40
|
removeOnFail: { age: 300, count: 1 },
|
|
@@ -48,11 +48,84 @@ export class RateLimitUtils {
|
|
|
48
48
|
|
|
49
49
|
// Configure rate limits for different providers
|
|
50
50
|
configs.set("Sensibo", {
|
|
51
|
-
maxRequests:
|
|
51
|
+
maxRequests: 5,
|
|
52
52
|
windowMs: 60000,
|
|
53
53
|
provider: "Sensibo",
|
|
54
54
|
});
|
|
55
55
|
|
|
56
56
|
return configs;
|
|
57
57
|
}
|
|
58
|
+
|
|
59
|
+
static async isRateLimitAllowed(
|
|
60
|
+
connectionId: string,
|
|
61
|
+
provider: string,
|
|
62
|
+
rateLimitConfigs: Map<string, IRateLimitConfig>
|
|
63
|
+
): Promise<boolean> {
|
|
64
|
+
const config = rateLimitConfigs.get(provider);
|
|
65
|
+
if (!config) {
|
|
66
|
+
getConfig().LOGGER.warn(
|
|
67
|
+
`No rate limit config found for provider: ${provider}`
|
|
68
|
+
);
|
|
69
|
+
return true;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const key = `rate_limit:${provider}:${connectionId}`;
|
|
73
|
+
const now = Date.now();
|
|
74
|
+
const windowStart = now - config.windowMs;
|
|
75
|
+
|
|
76
|
+
try {
|
|
77
|
+
const data = await this.redisClient.get(key);
|
|
78
|
+
const requests = data
|
|
79
|
+
? JSON.parse(data).filter((t: number) => t > windowStart)
|
|
80
|
+
: [];
|
|
81
|
+
|
|
82
|
+
return requests.length < config.maxRequests;
|
|
83
|
+
} catch (error) {
|
|
84
|
+
getConfig().LOGGER.error(`Rate limit check error: ${error}`);
|
|
85
|
+
return true;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
static async recordRequest(
|
|
89
|
+
connectionId: string,
|
|
90
|
+
provider: string
|
|
91
|
+
): Promise<void> {
|
|
92
|
+
const config = this.getRateLimitConfig(provider);
|
|
93
|
+
if (!config) return;
|
|
94
|
+
|
|
95
|
+
const key = `rate_limit:${provider}:${connectionId}`;
|
|
96
|
+
const now = Date.now();
|
|
97
|
+
const windowStart = now - config.windowMs;
|
|
98
|
+
|
|
99
|
+
try {
|
|
100
|
+
const data = await this.redisClient.get(key);
|
|
101
|
+
const requests = data
|
|
102
|
+
? JSON.parse(data).filter((t: number) => t > windowStart)
|
|
103
|
+
: [];
|
|
104
|
+
|
|
105
|
+
requests.push(now);
|
|
106
|
+
await this.redisClient.setex(
|
|
107
|
+
key,
|
|
108
|
+
Math.ceil(config.windowMs / 1000),
|
|
109
|
+
JSON.stringify(requests)
|
|
110
|
+
);
|
|
111
|
+
} catch (error) {
|
|
112
|
+
getConfig().LOGGER.error(`Rate limit record error: ${error}`);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
static async getRawRequestTimestamps(key: string): Promise<number[]> {
|
|
117
|
+
try {
|
|
118
|
+
const data = await this.redisClient.get(key);
|
|
119
|
+
return data ? JSON.parse(data) : [];
|
|
120
|
+
} catch (error) {
|
|
121
|
+
getConfig().LOGGER.error(
|
|
122
|
+
`Error fetching raw request timestamps: ${error}`
|
|
123
|
+
);
|
|
124
|
+
return [];
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
static getRateLimitConfig(provider: string): IRateLimitConfig | undefined {
|
|
129
|
+
return this.initializeRateLimitConfigs().get(provider);
|
|
130
|
+
}
|
|
58
131
|
}
|