@sentio/runtime 2.59.0-rc.9 → 2.59.1-rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/{chunk-J23VJQSB.js → chunk-4UYFH4YB.js} +4387 -4321
- package/lib/{chunk-J23VJQSB.js.map → chunk-4UYFH4YB.js.map} +1 -1
- package/lib/{chunk-3T4PWGQU.js → chunk-LCS6SRJY.js} +1036 -122
- package/lib/chunk-LCS6SRJY.js.map +1 -0
- package/lib/{chunk-GD7EL7GH.js → chunk-U5UZYRQ4.js} +2 -2
- package/lib/index.d.ts +193 -3
- package/lib/index.js +4 -2
- package/lib/index.js.map +1 -1
- package/lib/processor-runner.js +116 -94
- package/lib/processor-runner.js.map +1 -1
- package/lib/service-worker.d.ts +5 -6
- package/lib/service-worker.js +49 -47
- package/lib/service-worker.js.map +1 -1
- package/lib/test-processor.test.js.map +1 -1
- package/package.json +1 -1
- package/src/db-context.ts +3 -5
- package/src/full-service.ts +59 -13
- package/src/gen/processor/protos/processor.ts +1077 -159
- package/src/gen/service/common/protos/common.ts +17 -0
- package/src/metrics.ts +8 -4
- package/src/plugin.ts +24 -0
- package/src/processor-runner.ts +10 -3
- package/src/service-manager.ts +72 -106
- package/src/service-worker.ts +49 -60
- package/src/service.ts +89 -46
- package/src/utils.ts +20 -4
- package/lib/chunk-3T4PWGQU.js.map +0 -1
- /package/lib/{chunk-GD7EL7GH.js.map → chunk-U5UZYRQ4.js.map} +0 -0
@@ -1592,6 +1592,7 @@ export interface Account {
|
|
1592
1592
|
address: string;
|
1593
1593
|
paymentMethod: PayMethod;
|
1594
1594
|
usageOverCapLimit: string;
|
1595
|
+
status: string;
|
1595
1596
|
}
|
1596
1597
|
|
1597
1598
|
export interface ImportedProject {
|
@@ -10316,6 +10317,7 @@ function createBaseAccount(): Account {
|
|
10316
10317
|
address: "",
|
10317
10318
|
paymentMethod: 0,
|
10318
10319
|
usageOverCapLimit: "",
|
10320
|
+
status: "",
|
10319
10321
|
};
|
10320
10322
|
}
|
10321
10323
|
|
@@ -10348,6 +10350,9 @@ export const Account = {
|
|
10348
10350
|
if (message.usageOverCapLimit !== "") {
|
10349
10351
|
writer.uint32(90).string(message.usageOverCapLimit);
|
10350
10352
|
}
|
10353
|
+
if (message.status !== "") {
|
10354
|
+
writer.uint32(98).string(message.status);
|
10355
|
+
}
|
10351
10356
|
return writer;
|
10352
10357
|
},
|
10353
10358
|
|
@@ -10421,6 +10426,13 @@ export const Account = {
|
|
10421
10426
|
|
10422
10427
|
message.usageOverCapLimit = reader.string();
|
10423
10428
|
continue;
|
10429
|
+
case 12:
|
10430
|
+
if (tag !== 98) {
|
10431
|
+
break;
|
10432
|
+
}
|
10433
|
+
|
10434
|
+
message.status = reader.string();
|
10435
|
+
continue;
|
10424
10436
|
}
|
10425
10437
|
if ((tag & 7) === 4 || tag === 0) {
|
10426
10438
|
break;
|
@@ -10441,6 +10453,7 @@ export const Account = {
|
|
10441
10453
|
address: isSet(object.address) ? globalThis.String(object.address) : "",
|
10442
10454
|
paymentMethod: isSet(object.paymentMethod) ? payMethodFromJSON(object.paymentMethod) : 0,
|
10443
10455
|
usageOverCapLimit: isSet(object.usageOverCapLimit) ? globalThis.String(object.usageOverCapLimit) : "",
|
10456
|
+
status: isSet(object.status) ? globalThis.String(object.status) : "",
|
10444
10457
|
};
|
10445
10458
|
},
|
10446
10459
|
|
@@ -10473,6 +10486,9 @@ export const Account = {
|
|
10473
10486
|
if (message.usageOverCapLimit !== "") {
|
10474
10487
|
obj.usageOverCapLimit = message.usageOverCapLimit;
|
10475
10488
|
}
|
10489
|
+
if (message.status !== "") {
|
10490
|
+
obj.status = message.status;
|
10491
|
+
}
|
10476
10492
|
return obj;
|
10477
10493
|
},
|
10478
10494
|
|
@@ -10490,6 +10506,7 @@ export const Account = {
|
|
10490
10506
|
message.address = object.address ?? "";
|
10491
10507
|
message.paymentMethod = object.paymentMethod ?? 0;
|
10492
10508
|
message.usageOverCapLimit = object.usageOverCapLimit ?? "";
|
10509
|
+
message.status = object.status ?? "";
|
10493
10510
|
return message;
|
10494
10511
|
},
|
10495
10512
|
};
|
package/src/metrics.ts
CHANGED
@@ -77,25 +77,29 @@ export const dbMetrics = {
|
|
77
77
|
get: new C('store_get_send'),
|
78
78
|
upsert: new C('store_upsert_send'),
|
79
79
|
list: new C('store_list_send'),
|
80
|
-
delete: new C('store_delete_send')
|
80
|
+
delete: new C('store_delete_send'),
|
81
|
+
update: new C('store_update_send')
|
81
82
|
},
|
82
83
|
recv_counts: {
|
83
84
|
get: new C('store_get_recv'),
|
84
85
|
upsert: new C('store_upsert_recv'),
|
85
86
|
list: new C('store_list_recv'),
|
86
|
-
delete: new C('store_delete_recv')
|
87
|
+
delete: new C('store_delete_recv'),
|
88
|
+
update: new C('store_update_recv')
|
87
89
|
},
|
88
90
|
request_times: {
|
89
91
|
get: new C('store_get_time'),
|
90
92
|
upsert: new C('store_upsert_time'),
|
91
93
|
list: new C('store_list_time'),
|
92
|
-
delete: new C('store_delete_time')
|
94
|
+
delete: new C('store_delete_time'),
|
95
|
+
update: new C('store_update_time')
|
93
96
|
},
|
94
97
|
request_errors: {
|
95
98
|
get: new C('store_get_error'),
|
96
99
|
upsert: new C('store_upsert_error'),
|
97
100
|
list: new C('store_list_error'),
|
98
|
-
delete: new C('store_delete_error')
|
101
|
+
delete: new C('store_delete_error'),
|
102
|
+
update: new C('store_update_error')
|
99
103
|
},
|
100
104
|
batched_total_count: new C('batched_total_count'),
|
101
105
|
batched_request_count: new C('batched_request_count'),
|
package/src/plugin.ts
CHANGED
@@ -5,6 +5,8 @@ import {
|
|
5
5
|
PreprocessResult,
|
6
6
|
ProcessConfigResponse,
|
7
7
|
ProcessResult,
|
8
|
+
ProcessStreamResponse_Partitions,
|
9
|
+
ProcessStreamResponse_Partitions_Partition_SysValue,
|
8
10
|
StartRequest
|
9
11
|
} from '@sentio/protos'
|
10
12
|
import { IStoreContext, StoreContext } from './db-context.js'
|
@@ -33,6 +35,20 @@ export abstract class Plugin {
|
|
33
35
|
return PreprocessResult.create()
|
34
36
|
}
|
35
37
|
|
38
|
+
async partition(request: DataBinding): Promise<ProcessStreamResponse_Partitions> {
|
39
|
+
return {
|
40
|
+
partitions: request.handlerIds.reduce(
|
41
|
+
(acc, id) => ({
|
42
|
+
...acc,
|
43
|
+
[id]: {
|
44
|
+
sysValue: ProcessStreamResponse_Partitions_Partition_SysValue.UNRECOGNIZED
|
45
|
+
}
|
46
|
+
}),
|
47
|
+
{}
|
48
|
+
)
|
49
|
+
}
|
50
|
+
}
|
51
|
+
|
36
52
|
/**
|
37
53
|
* method used by action server only
|
38
54
|
* @param port
|
@@ -104,6 +120,14 @@ export class PluginManager {
|
|
104
120
|
})
|
105
121
|
}
|
106
122
|
|
123
|
+
async partition(request: DataBinding): Promise<ProcessStreamResponse_Partitions> {
|
124
|
+
const plugin = this.typesToPlugin.get(request.handlerType)
|
125
|
+
if (!plugin) {
|
126
|
+
throw new Error(`No plugin for ${request.handlerType}`)
|
127
|
+
}
|
128
|
+
return plugin.partition(request)
|
129
|
+
}
|
130
|
+
|
107
131
|
preprocessBinding(
|
108
132
|
request: DataBinding,
|
109
133
|
preprocessStore: { [k: string]: any },
|
package/src/processor-runner.ts
CHANGED
@@ -49,7 +49,14 @@ export const optionDefinitions = [
|
|
49
49
|
{ name: 'debug', type: Boolean, defaultValue: false },
|
50
50
|
{ name: 'otlp-debug', type: Boolean, defaultValue: false },
|
51
51
|
{ name: 'start-action-server', type: Boolean, defaultValue: false },
|
52
|
-
{ name: 'worker', type: Number, defaultValue: workerNum }
|
52
|
+
{ name: 'worker', type: Number, defaultValue: workerNum },
|
53
|
+
{ name: 'process-timeout', type: Number, defaultValue: 60 },
|
54
|
+
{ name: 'worker-timeout', type: Number, defaultValue: 60 },
|
55
|
+
{
|
56
|
+
name: 'enable-partition',
|
57
|
+
type: Boolean,
|
58
|
+
defaultValue: process.env['SENTIO_ENABLE_BINDING_DATA_PARTITION'] === 'true'
|
59
|
+
}
|
53
60
|
]
|
54
61
|
|
55
62
|
const options = commandLineArgs(optionDefinitions, { partial: true })
|
@@ -88,9 +95,9 @@ if (options['start-action-server']) {
|
|
88
95
|
.use(errorDetailsServerMiddleware)
|
89
96
|
|
90
97
|
if (options.worker > 1) {
|
91
|
-
baseService = new ServiceManager(
|
98
|
+
baseService = new ServiceManager(loader, options, server.shutdown)
|
92
99
|
} else {
|
93
|
-
baseService = new ProcessorServiceImpl(loader, server.shutdown)
|
100
|
+
baseService = new ProcessorServiceImpl(loader, options, server.shutdown)
|
94
101
|
}
|
95
102
|
|
96
103
|
const service = new FullProcessorServiceImpl(baseService)
|
package/src/service-manager.ts
CHANGED
@@ -1,30 +1,21 @@
|
|
1
1
|
import { CallContext } from 'nice-grpc'
|
2
2
|
import { Piscina } from 'piscina'
|
3
3
|
import {
|
4
|
-
DataBinding,
|
5
|
-
DBRequest,
|
6
|
-
DBResponse,
|
7
4
|
DeepPartial,
|
8
5
|
Empty,
|
9
|
-
HandlerType,
|
10
6
|
ProcessConfigRequest,
|
11
7
|
ProcessConfigResponse,
|
12
8
|
ProcessResult,
|
13
9
|
ProcessStreamRequest,
|
14
10
|
ProcessStreamResponse,
|
11
|
+
ProcessStreamResponse_Partitions,
|
15
12
|
StartRequest
|
16
13
|
} from '@sentio/protos'
|
17
|
-
|
18
|
-
import { IStoreContext } from './db-context.js'
|
19
14
|
import { Subject } from 'rxjs'
|
20
15
|
|
21
|
-
import { processMetrics } from './metrics.js'
|
22
16
|
import { MessageChannel } from 'node:worker_threads'
|
23
17
|
import { ProcessorServiceImpl } from './service.js'
|
24
18
|
import { TemplateInstanceState } from './state.js'
|
25
|
-
|
26
|
-
const { process_binding_count, process_binding_time, process_binding_error } = processMetrics
|
27
|
-
|
28
19
|
;(BigInt.prototype as any).toJSON = function () {
|
29
20
|
return this.toString()
|
30
21
|
}
|
@@ -34,11 +25,11 @@ export class ServiceManager extends ProcessorServiceImpl {
|
|
34
25
|
private workerData: any = {}
|
35
26
|
|
36
27
|
constructor(
|
37
|
-
readonly options: any,
|
38
28
|
loader: () => Promise<any>,
|
29
|
+
readonly options: any,
|
39
30
|
shutdownHandler?: () => void
|
40
31
|
) {
|
41
|
-
super(loader, shutdownHandler)
|
32
|
+
super(loader, options, shutdownHandler)
|
42
33
|
this.workerData.options = options
|
43
34
|
}
|
44
35
|
|
@@ -70,80 +61,76 @@ export class ServiceManager extends ProcessorServiceImpl {
|
|
70
61
|
return await super.stop(request, context)
|
71
62
|
}
|
72
63
|
|
73
|
-
async process(request: DataBinding, dbContext?: ChannelStoreContext): Promise<ProcessResult> {
|
74
|
-
if (!this.pool) {
|
75
|
-
await this.initPool()
|
76
|
-
}
|
77
|
-
|
78
|
-
return this.pool.run(
|
79
|
-
{ request, workerPort: dbContext?.workerPort },
|
80
|
-
{ transferList: dbContext?.workerPort ? [dbContext?.workerPort] : [] }
|
81
|
-
)
|
82
|
-
}
|
83
|
-
|
84
64
|
private readonly contexts = new Contexts()
|
85
65
|
|
86
66
|
protected async handleRequests(
|
87
67
|
requests: AsyncIterable<ProcessStreamRequest>,
|
88
68
|
subject: Subject<DeepPartial<ProcessStreamResponse>>
|
89
69
|
) {
|
70
|
+
if (!this.pool) {
|
71
|
+
await this.initPool()
|
72
|
+
}
|
90
73
|
for await (const request of requests) {
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
}
|
105
|
-
|
106
|
-
const binding = request.binding
|
107
|
-
|
108
|
-
const dbContext = this.contexts.new(request.processId, subject)
|
109
|
-
|
110
|
-
const start = Date.now()
|
111
|
-
this.process(binding, dbContext)
|
112
|
-
.then(async (result) => {
|
113
|
-
subject.next({
|
114
|
-
result,
|
115
|
-
processId: request.processId
|
116
|
-
})
|
117
|
-
})
|
118
|
-
.catch((e) => {
|
119
|
-
dbContext.error(request.processId, e)
|
120
|
-
process_binding_error.add(1)
|
121
|
-
})
|
122
|
-
.finally(() => {
|
123
|
-
const cost = Date.now() - start
|
124
|
-
process_binding_time.add(cost)
|
125
|
-
this.contexts.delete(request.processId)
|
126
|
-
})
|
127
|
-
}
|
128
|
-
if (request.dbResult) {
|
129
|
-
const dbContext = this.contexts.get(request.processId)
|
130
|
-
try {
|
131
|
-
dbContext?.result(request.dbResult)
|
132
|
-
} catch (e) {
|
133
|
-
subject.error(new Error('db result error, process should stop'))
|
134
|
-
}
|
74
|
+
this.handleSingleRequest(request, subject)
|
75
|
+
}
|
76
|
+
}
|
77
|
+
|
78
|
+
async handleSingleRequest(request: ProcessStreamRequest, subject: Subject<DeepPartial<ProcessStreamResponse>>) {
|
79
|
+
const processId = request.processId
|
80
|
+
if (request.binding) {
|
81
|
+
const context = this.contexts.new(processId)
|
82
|
+
context.mainPort.on('message', (resp: ProcessStreamResponse) => {
|
83
|
+
subject.next(resp)
|
84
|
+
if (resp.result) {
|
85
|
+
// last response
|
86
|
+
this.contexts.delete(processId)
|
135
87
|
}
|
136
|
-
}
|
137
|
-
|
138
|
-
|
88
|
+
})
|
89
|
+
try {
|
90
|
+
await this.pool.run(
|
91
|
+
{ request, workerPort: context.workerPort, processId },
|
92
|
+
{ transferList: [context.workerPort] }
|
93
|
+
)
|
94
|
+
} catch (err) {
|
95
|
+
console.error('Error processing request:', err)
|
96
|
+
subject.error(err)
|
139
97
|
}
|
98
|
+
} else {
|
99
|
+
const context = this.contexts.get(processId)
|
100
|
+
if (!context) {
|
101
|
+
console.error('No context found for processId:', processId)
|
102
|
+
throw new Error(`No context found for processId: ${processId}`)
|
103
|
+
}
|
104
|
+
context.sendRequest(request)
|
105
|
+
}
|
106
|
+
}
|
107
|
+
|
108
|
+
async process(processId: number, context: ChannelContext): Promise<ProcessResult | ProcessStreamResponse_Partitions> {
|
109
|
+
if (!this.pool) {
|
110
|
+
await this.initPool()
|
140
111
|
}
|
112
|
+
|
113
|
+
return this.pool.run(
|
114
|
+
{ workerPort: context?.workerPort, processId },
|
115
|
+
{ transferList: context?.workerPort ? [context?.workerPort] : [] }
|
116
|
+
)
|
141
117
|
}
|
142
118
|
|
143
119
|
private async initPool() {
|
144
120
|
if (this.pool) {
|
145
121
|
await this.pool.close()
|
146
122
|
}
|
123
|
+
|
124
|
+
if (this.enablePartition) {
|
125
|
+
const concurrent = parseInt(process.env['PROCESS_CONCURRENCY'] || '0')
|
126
|
+
if (this.options.worker < concurrent) {
|
127
|
+
console.warn(
|
128
|
+
`When partition is enabled, the worker count must >= 'PROCESS_CONCURRENCY', will set worker count to ${concurrent})`
|
129
|
+
)
|
130
|
+
this.options.worker = concurrent
|
131
|
+
}
|
132
|
+
}
|
133
|
+
|
147
134
|
console.info('Initializing worker pool with worker count:', this.options.worker)
|
148
135
|
this.pool = new Piscina({
|
149
136
|
maxThreads: this.options.worker,
|
@@ -161,17 +148,19 @@ export class ServiceManager extends ProcessorServiceImpl {
|
|
161
148
|
}
|
162
149
|
}
|
163
150
|
|
164
|
-
export type WorkerMessage = DBRequest & { processId: number }
|
165
|
-
|
166
151
|
class Contexts {
|
167
|
-
private contexts: Map<number,
|
152
|
+
private contexts: Map<number, ChannelContext> = new Map()
|
168
153
|
|
169
154
|
get(processId: number) {
|
170
155
|
return this.contexts.get(processId)
|
171
156
|
}
|
172
157
|
|
173
|
-
new(processId: number
|
174
|
-
|
158
|
+
new(processId: number) {
|
159
|
+
let context = this.get(processId)
|
160
|
+
if (context) {
|
161
|
+
return context
|
162
|
+
}
|
163
|
+
context = new ChannelContext(processId)
|
175
164
|
this.contexts.set(processId, context)
|
176
165
|
return context
|
177
166
|
}
|
@@ -181,25 +170,19 @@ class Contexts {
|
|
181
170
|
context?.close()
|
182
171
|
this.contexts.delete(processId)
|
183
172
|
}
|
173
|
+
|
174
|
+
has(processId: number) {
|
175
|
+
return this.contexts.has(processId)
|
176
|
+
}
|
184
177
|
}
|
185
178
|
|
186
|
-
export class
|
179
|
+
export class ChannelContext {
|
187
180
|
channel = new MessageChannel()
|
188
181
|
|
189
|
-
constructor(
|
190
|
-
readonly subject: Subject<DeepPartial<ProcessStreamResponse>>,
|
191
|
-
readonly processId: number
|
192
|
-
) {
|
193
|
-
this.mainPort.on('message', (req: ProcessStreamRequest) => {
|
194
|
-
subject.next({
|
195
|
-
...req,
|
196
|
-
processId: processId
|
197
|
-
})
|
198
|
-
})
|
199
|
-
}
|
182
|
+
constructor(readonly processId: number) {}
|
200
183
|
|
201
|
-
sendRequest(request:
|
202
|
-
|
184
|
+
sendRequest(request: ProcessStreamRequest) {
|
185
|
+
this.mainPort.postMessage(request)
|
203
186
|
}
|
204
187
|
|
205
188
|
get workerPort() {
|
@@ -210,24 +193,7 @@ export class ChannelStoreContext implements IStoreContext {
|
|
210
193
|
return this.channel.port1
|
211
194
|
}
|
212
195
|
|
213
|
-
result(dbResult: DBResponse) {
|
214
|
-
this.mainPort.postMessage(dbResult)
|
215
|
-
}
|
216
|
-
|
217
196
|
close(): void {
|
218
197
|
this.mainPort.close()
|
219
198
|
}
|
220
|
-
|
221
|
-
error(processId: number, e: any): void {
|
222
|
-
console.error('process error', processId, e)
|
223
|
-
const errorResult = ProcessResult.create({
|
224
|
-
states: {
|
225
|
-
error: e?.toString()
|
226
|
-
}
|
227
|
-
})
|
228
|
-
this.subject.next({
|
229
|
-
result: errorResult,
|
230
|
-
processId
|
231
|
-
})
|
232
|
-
}
|
233
199
|
}
|
package/src/service-worker.ts
CHANGED
@@ -1,24 +1,14 @@
|
|
1
|
-
import {
|
2
|
-
DataBinding,
|
3
|
-
DBResponse,
|
4
|
-
DeepPartial,
|
5
|
-
Empty,
|
6
|
-
ProcessConfigRequest,
|
7
|
-
ProcessConfigResponse,
|
8
|
-
ProcessStreamResponse,
|
9
|
-
StartRequest
|
10
|
-
} from '@sentio/protos'
|
1
|
+
import { DeepPartial, Empty, ProcessStreamRequest, ProcessStreamResponse, StartRequest } from '@sentio/protos'
|
11
2
|
import { CallContext, ServerError, Status } from 'nice-grpc'
|
12
|
-
import { PluginManager } from './plugin.js'
|
13
3
|
import { errorString } from './utils.js'
|
14
4
|
import { freezeGlobalConfig } from './global-config.js'
|
15
5
|
import { DebugInfo, RichServerError } from 'nice-grpc-error-details'
|
16
|
-
import {
|
17
|
-
import {
|
6
|
+
import { ProcessorServiceImpl } from './service.js'
|
7
|
+
import { MessagePort, threadId } from 'worker_threads'
|
18
8
|
import { Piscina } from 'piscina'
|
19
9
|
import { configureEndpoints } from './endpoints.js'
|
20
10
|
import { setupLogger } from './logger.js'
|
21
|
-
import {
|
11
|
+
import { Subject } from 'rxjs'
|
22
12
|
|
23
13
|
let started = false
|
24
14
|
|
@@ -38,16 +28,11 @@ process
|
|
38
28
|
unhandled = reason as Error
|
39
29
|
// shutdownServers(1)
|
40
30
|
})
|
31
|
+
.on('exit', () => {
|
32
|
+
console.info('Worker thread exiting, threadId:', threadId)
|
33
|
+
})
|
41
34
|
|
42
|
-
|
43
|
-
if (!started) {
|
44
|
-
throw new ServerError(Status.UNAVAILABLE, 'Service Not started.')
|
45
|
-
}
|
46
|
-
|
47
|
-
const newConfig = ProcessConfigResponse.fromPartial({})
|
48
|
-
await PluginManager.INSTANCE.configure(newConfig)
|
49
|
-
return newConfig
|
50
|
-
}
|
35
|
+
let service: ProcessorServiceImpl | undefined
|
51
36
|
|
52
37
|
const loader = async (options: any) => {
|
53
38
|
if (options.target) {
|
@@ -57,10 +42,7 @@ const loader = async (options: any) => {
|
|
57
42
|
}
|
58
43
|
}
|
59
44
|
|
60
|
-
const
|
61
|
-
configureChannel.onmessage = (request: ProcessConfigRequest) => {
|
62
|
-
getConfig(request)
|
63
|
-
}
|
45
|
+
const emptyCallContext = <CallContext>{}
|
64
46
|
|
65
47
|
async function start(request: StartRequest, options: any): Promise<Empty> {
|
66
48
|
if (started) {
|
@@ -69,24 +51,24 @@ async function start(request: StartRequest, options: any): Promise<Empty> {
|
|
69
51
|
freezeGlobalConfig()
|
70
52
|
|
71
53
|
try {
|
72
|
-
|
54
|
+
service = new ProcessorServiceImpl(() => loader(options), options)
|
73
55
|
} catch (e) {
|
74
56
|
throw new ServerError(Status.INVALID_ARGUMENT, 'Failed to load processor: ' + errorString(e))
|
75
57
|
}
|
76
58
|
|
77
|
-
await
|
59
|
+
await service.start(request, emptyCallContext)
|
78
60
|
started = true
|
79
61
|
return {}
|
80
62
|
}
|
81
63
|
|
82
64
|
export default async function ({
|
83
|
-
request,
|
84
65
|
processId,
|
66
|
+
request: firstRequest,
|
85
67
|
workerPort
|
86
68
|
}: {
|
87
|
-
request: DataBinding
|
88
69
|
processId: number
|
89
|
-
|
70
|
+
request: ProcessStreamRequest
|
71
|
+
workerPort: MessagePort
|
90
72
|
}) {
|
91
73
|
const { startRequest, configRequest, options } = Piscina.workerData
|
92
74
|
if (!started) {
|
@@ -97,18 +79,19 @@ export default async function ({
|
|
97
79
|
|
98
80
|
if (startRequest) {
|
99
81
|
await start(startRequest, options)
|
100
|
-
console.debug('worker started, template instance:', startRequest.templateInstances?.length)
|
82
|
+
console.debug('worker', threadId, ' started, template instance:', startRequest.templateInstances?.length)
|
101
83
|
}
|
102
84
|
|
103
85
|
if (configRequest) {
|
104
|
-
await getConfig(configRequest)
|
105
|
-
console.debug('worker configured')
|
86
|
+
await service?.getConfig(configRequest, emptyCallContext)
|
87
|
+
console.debug('worker', threadId, ' configured')
|
106
88
|
}
|
107
89
|
}
|
108
90
|
|
109
91
|
if (unhandled) {
|
110
92
|
const err = unhandled
|
111
93
|
unhandled = undefined
|
94
|
+
console.error('Unhandled exception/rejection in previous request:', err)
|
112
95
|
throw new RichServerError(
|
113
96
|
Status.UNAVAILABLE,
|
114
97
|
'Unhandled exception/rejection in previous request: ' + errorString(err),
|
@@ -120,31 +103,37 @@ export default async function ({
|
|
120
103
|
]
|
121
104
|
)
|
122
105
|
}
|
123
|
-
|
124
|
-
const
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
) {
|
138
|
-
super(processId)
|
139
|
-
this.port.on('message', (resp: DBResponse) => {
|
140
|
-
this.result(resp)
|
106
|
+
const timeout = options['worker-timeout'] || 0
|
107
|
+
const enablePartition = options['enable-partition'] || false
|
108
|
+
await new Promise<void>((resolve, reject) => {
|
109
|
+
const subject = new Subject<DeepPartial<ProcessStreamResponse>>()
|
110
|
+
let timeoutId: NodeJS.Timeout | undefined = undefined
|
111
|
+
subject.subscribe((resp: ProcessStreamResponse) => {
|
112
|
+
console.debug('Worker', threadId, 'send response:', resp.result ? 'result' : 'dbResult')
|
113
|
+
workerPort.postMessage(resp)
|
114
|
+
// receive the response from the processor , close and resolve the promise
|
115
|
+
if (resp.result) {
|
116
|
+
if (timeoutId) clearTimeout(timeoutId)
|
117
|
+
resolve()
|
118
|
+
workerPort.close()
|
119
|
+
}
|
141
120
|
})
|
142
|
-
|
143
|
-
|
121
|
+
workerPort.on('message', (msg: ProcessStreamRequest) => {
|
122
|
+
const request = msg as ProcessStreamRequest
|
123
|
+
console.debug('Worker', threadId, 'received request:', request.start ? 'start' : 'dbResult')
|
124
|
+
service?.handleRequest(request, firstRequest.binding, subject)
|
125
|
+
if (enablePartition && request.start && timeout > 0) {
|
126
|
+
timeoutId = setTimeout(async () => {
|
127
|
+
reject(new RichServerError(Status.DEADLINE_EXCEEDED, 'Worker timeout exceeded'))
|
128
|
+
}, timeout)
|
129
|
+
}
|
144
130
|
})
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
131
|
+
console.debug('Worker', threadId, 'handle request: binding')
|
132
|
+
service?.handleRequest(firstRequest, firstRequest.binding, subject)
|
133
|
+
if (!enablePartition && timeout > 0) {
|
134
|
+
timeoutId = setTimeout(() => {
|
135
|
+
reject(new RichServerError(Status.DEADLINE_EXCEEDED, 'Worker timeout exceeded'))
|
136
|
+
}, timeout)
|
137
|
+
}
|
138
|
+
})
|
150
139
|
}
|