@temporalio/core-bridge 0.20.2 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +1 -0
- package/package.json +3 -3
- package/releases/aarch64-apple-darwin/index.node +0 -0
- package/releases/aarch64-unknown-linux-gnu/index.node +0 -0
- package/releases/x86_64-apple-darwin/index.node +0 -0
- package/releases/x86_64-pc-windows-msvc/index.node +0 -0
- package/releases/x86_64-unknown-linux-gnu/index.node +0 -0
- package/sdk-core/core/src/core_tests/queries.rs +107 -1
- package/sdk-core/core/src/workflow/workflow_tasks/mod.rs +58 -32
- package/src/errors.rs +9 -2
- package/src/lib.rs +39 -16
package/index.d.ts
CHANGED
|
@@ -162,6 +162,7 @@ export declare function newReplayWorker(
|
|
|
162
162
|
callback: WorkerCallback
|
|
163
163
|
): void;
|
|
164
164
|
export declare function workerShutdown(worker: Worker, callback: VoidCallback): void;
|
|
165
|
+
export declare function clientClose(client: Client): void;
|
|
165
166
|
export declare function runtimeShutdown(runtime: Runtime, callback: VoidCallback): void;
|
|
166
167
|
export declare function pollLogs(runtime: Runtime, callback: LogsCallback): void;
|
|
167
168
|
export declare function workerPollWorkflowActivation(
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@temporalio/core-bridge",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.21.0",
|
|
4
4
|
"description": "Temporal.io SDK Core<>Node bridge",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"types": "index.d.ts",
|
|
@@ -20,7 +20,7 @@
|
|
|
20
20
|
"license": "MIT",
|
|
21
21
|
"dependencies": {
|
|
22
22
|
"@opentelemetry/api": "^1.0.3",
|
|
23
|
-
"@temporalio/internal-non-workflow-common": "^0.
|
|
23
|
+
"@temporalio/internal-non-workflow-common": "^0.21.0",
|
|
24
24
|
"arg": "^5.0.1",
|
|
25
25
|
"cargo-cp-artifact": "^0.1.4",
|
|
26
26
|
"which": "^2.0.2"
|
|
@@ -43,5 +43,5 @@
|
|
|
43
43
|
"publishConfig": {
|
|
44
44
|
"access": "public"
|
|
45
45
|
},
|
|
46
|
-
"gitHead": "
|
|
46
|
+
"gitHead": "eb5901f47e16f8c8fe36c1154d5176c5f3205efc"
|
|
47
47
|
}
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -21,7 +21,8 @@ use temporal_sdk_core_protos::{
|
|
|
21
21
|
history::v1::History,
|
|
22
22
|
query::v1::WorkflowQuery,
|
|
23
23
|
workflowservice::v1::{
|
|
24
|
-
|
|
24
|
+
GetWorkflowExecutionHistoryResponse, RespondQueryTaskCompletedResponse,
|
|
25
|
+
RespondWorkflowTaskCompletedResponse,
|
|
25
26
|
},
|
|
26
27
|
},
|
|
27
28
|
};
|
|
@@ -381,3 +382,108 @@ async fn legacy_query_after_complete(#[values(false, true)] full_history: bool)
|
|
|
381
382
|
|
|
382
383
|
core.shutdown().await;
|
|
383
384
|
}
|
|
385
|
+
|
|
386
|
+
#[tokio::test]
|
|
387
|
+
async fn query_cache_miss_causes_page_fetch_dont_reply_wft_too_early() {
|
|
388
|
+
let wfid = "fake_wf_id";
|
|
389
|
+
let query_resp = "response";
|
|
390
|
+
let t = canned_histories::single_timer("1");
|
|
391
|
+
let full_hist = t.get_full_history_info().unwrap();
|
|
392
|
+
let tasks = VecDeque::from(vec![{
|
|
393
|
+
// Create a partial task
|
|
394
|
+
let mut pr = hist_to_poll_resp(
|
|
395
|
+
&t,
|
|
396
|
+
wfid.to_owned(),
|
|
397
|
+
ResponseType::OneTask(2),
|
|
398
|
+
TEST_Q.to_string(),
|
|
399
|
+
);
|
|
400
|
+
pr.queries = HashMap::new();
|
|
401
|
+
pr.queries.insert(
|
|
402
|
+
"the-query".to_string(),
|
|
403
|
+
WorkflowQuery {
|
|
404
|
+
query_type: "query-type".to_string(),
|
|
405
|
+
query_args: Some(b"hi".into()),
|
|
406
|
+
header: None,
|
|
407
|
+
},
|
|
408
|
+
);
|
|
409
|
+
pr
|
|
410
|
+
}]);
|
|
411
|
+
let mut mock_client = mock_workflow_client();
|
|
412
|
+
mock_client
|
|
413
|
+
.expect_get_workflow_execution_history()
|
|
414
|
+
.returning(move |_, _, _| {
|
|
415
|
+
Ok(GetWorkflowExecutionHistoryResponse {
|
|
416
|
+
history: Some(full_hist.clone().into()),
|
|
417
|
+
..Default::default()
|
|
418
|
+
})
|
|
419
|
+
});
|
|
420
|
+
mock_client
|
|
421
|
+
.expect_complete_workflow_task()
|
|
422
|
+
.times(1)
|
|
423
|
+
.returning(|resp| {
|
|
424
|
+
// Verify both the complete command and the query response are sent
|
|
425
|
+
assert_eq!(resp.commands.len(), 1);
|
|
426
|
+
assert_eq!(resp.query_responses.len(), 1);
|
|
427
|
+
|
|
428
|
+
Ok(RespondWorkflowTaskCompletedResponse::default())
|
|
429
|
+
});
|
|
430
|
+
|
|
431
|
+
let mut mock = MocksHolder::from_client_with_responses(mock_client, tasks, vec![]);
|
|
432
|
+
mock.worker_cfg(|wc| wc.max_cached_workflows = 10);
|
|
433
|
+
let core = mock_worker(mock);
|
|
434
|
+
let task = core.poll_workflow_activation().await.unwrap();
|
|
435
|
+
// The first task should *only* start the workflow. It should *not* have a query in it, which
|
|
436
|
+
// was the bug. Query should only appear after we have caught up on replay.
|
|
437
|
+
assert_matches!(
|
|
438
|
+
task.jobs.as_slice(),
|
|
439
|
+
[WorkflowActivationJob {
|
|
440
|
+
variant: Some(workflow_activation_job::Variant::StartWorkflow(_)),
|
|
441
|
+
}]
|
|
442
|
+
);
|
|
443
|
+
core.complete_workflow_activation(WorkflowActivationCompletion::from_cmd(
|
|
444
|
+
task.run_id,
|
|
445
|
+
start_timer_cmd(1, Duration::from_secs(1)),
|
|
446
|
+
))
|
|
447
|
+
.await
|
|
448
|
+
.unwrap();
|
|
449
|
+
|
|
450
|
+
let task = core.poll_workflow_activation().await.unwrap();
|
|
451
|
+
assert_matches!(
|
|
452
|
+
task.jobs.as_slice(),
|
|
453
|
+
[WorkflowActivationJob {
|
|
454
|
+
variant: Some(workflow_activation_job::Variant::FireTimer(_)),
|
|
455
|
+
}]
|
|
456
|
+
);
|
|
457
|
+
core.complete_workflow_activation(WorkflowActivationCompletion::from_cmd(
|
|
458
|
+
task.run_id,
|
|
459
|
+
CompleteWorkflowExecution { result: None }.into(),
|
|
460
|
+
))
|
|
461
|
+
.await
|
|
462
|
+
.unwrap();
|
|
463
|
+
|
|
464
|
+
// Now the query shall arrive
|
|
465
|
+
let task = core.poll_workflow_activation().await.unwrap();
|
|
466
|
+
assert_matches!(
|
|
467
|
+
task.jobs[0],
|
|
468
|
+
WorkflowActivationJob {
|
|
469
|
+
variant: Some(workflow_activation_job::Variant::QueryWorkflow(_)),
|
|
470
|
+
}
|
|
471
|
+
);
|
|
472
|
+
core.complete_workflow_activation(WorkflowActivationCompletion::from_cmd(
|
|
473
|
+
task.run_id,
|
|
474
|
+
QueryResult {
|
|
475
|
+
query_id: "the-query".to_string(),
|
|
476
|
+
variant: Some(
|
|
477
|
+
QuerySuccess {
|
|
478
|
+
response: Some(query_resp.into()),
|
|
479
|
+
}
|
|
480
|
+
.into(),
|
|
481
|
+
),
|
|
482
|
+
}
|
|
483
|
+
.into(),
|
|
484
|
+
))
|
|
485
|
+
.await
|
|
486
|
+
.unwrap();
|
|
487
|
+
|
|
488
|
+
core.shutdown().await;
|
|
489
|
+
}
|
|
@@ -57,7 +57,7 @@ pub struct WorkflowTaskManager {
|
|
|
57
57
|
pending_activations: PendingActivations,
|
|
58
58
|
/// Holds activations which are purely query activations needed to respond to legacy queries.
|
|
59
59
|
/// Activations may only be added here for runs which do not have other pending activations.
|
|
60
|
-
|
|
60
|
+
pending_queries: SegQueue<WorkflowActivation>,
|
|
61
61
|
/// Holds poll wft responses from the server that need to be applied
|
|
62
62
|
ready_buffered_wft: SegQueue<ValidPollWFTQResponse>,
|
|
63
63
|
/// Used to wake blocked workflow task polling
|
|
@@ -74,9 +74,8 @@ pub struct WorkflowTaskManager {
|
|
|
74
74
|
#[derive(Clone, Debug)]
|
|
75
75
|
pub(crate) struct OutstandingTask {
|
|
76
76
|
pub info: WorkflowTaskInfo,
|
|
77
|
-
///
|
|
78
|
-
|
|
79
|
-
pub legacy_query: Option<QueryWorkflow>,
|
|
77
|
+
/// Set if the outstanding task has quer(ies) which must be fulfilled upon finishing replay
|
|
78
|
+
pub pending_queries: Vec<QueryWorkflow>,
|
|
80
79
|
start_time: Instant,
|
|
81
80
|
}
|
|
82
81
|
|
|
@@ -179,7 +178,7 @@ impl WorkflowTaskManager {
|
|
|
179
178
|
Self {
|
|
180
179
|
workflow_machines: WorkflowConcurrencyManager::new(),
|
|
181
180
|
pending_activations: Default::default(),
|
|
182
|
-
|
|
181
|
+
pending_queries: Default::default(),
|
|
183
182
|
ready_buffered_wft: Default::default(),
|
|
184
183
|
pending_activations_notifier,
|
|
185
184
|
cache_manager: Mutex::new(WorkflowCacheManager::new(eviction_policy, metrics.clone())),
|
|
@@ -188,8 +187,8 @@ impl WorkflowTaskManager {
|
|
|
188
187
|
}
|
|
189
188
|
|
|
190
189
|
pub(crate) fn next_pending_activation(&self) -> Option<WorkflowActivation> {
|
|
191
|
-
// Dispatch pending
|
|
192
|
-
if let leg_q @ Some(_) = self.
|
|
190
|
+
// Dispatch pending queries first
|
|
191
|
+
if let leg_q @ Some(_) = self.pending_queries.pop() {
|
|
193
192
|
return leg_q;
|
|
194
193
|
}
|
|
195
194
|
// It is important that we do not issue pending activations for any workflows which already
|
|
@@ -333,33 +332,45 @@ impl WorkflowTaskManager {
|
|
|
333
332
|
.take()
|
|
334
333
|
.map(|q| query_to_job(LEGACY_QUERY_ID.to_string(), q));
|
|
335
334
|
|
|
336
|
-
let (info, mut next_activation) =
|
|
335
|
+
let (info, mut next_activation, mut pending_queries) =
|
|
337
336
|
match self.instantiate_or_update_workflow(work, client).await {
|
|
338
|
-
Ok(
|
|
337
|
+
Ok(res) => res,
|
|
339
338
|
Err(e) => {
|
|
340
339
|
return NewWfTaskOutcome::Evict(e);
|
|
341
340
|
}
|
|
342
341
|
};
|
|
343
342
|
|
|
343
|
+
if !pending_queries.is_empty() && legacy_query.is_some() {
|
|
344
|
+
error!(
|
|
345
|
+
"Server issued both normal and legacy queries. This should not happen. Please \
|
|
346
|
+
file a bug report."
|
|
347
|
+
);
|
|
348
|
+
return NewWfTaskOutcome::Evict(WorkflowUpdateError {
|
|
349
|
+
source: WFMachinesError::Fatal(
|
|
350
|
+
"Server issued both normal and legacy query".to_string(),
|
|
351
|
+
),
|
|
352
|
+
run_id: next_activation.run_id,
|
|
353
|
+
});
|
|
354
|
+
}
|
|
355
|
+
|
|
344
356
|
// Immediately dispatch query activation if no other jobs
|
|
345
|
-
let
|
|
346
|
-
if
|
|
357
|
+
if let Some(lq) = legacy_query {
|
|
358
|
+
if next_activation.jobs.is_empty() {
|
|
347
359
|
debug!("Dispatching legacy query {}", &lq);
|
|
348
360
|
next_activation
|
|
349
361
|
.jobs
|
|
350
362
|
.push(workflow_activation_job::Variant::QueryWorkflow(lq).into());
|
|
363
|
+
} else {
|
|
364
|
+
pending_queries.push(lq);
|
|
351
365
|
}
|
|
352
|
-
|
|
353
|
-
} else {
|
|
354
|
-
legacy_query
|
|
355
|
-
};
|
|
366
|
+
}
|
|
356
367
|
|
|
357
368
|
self.workflow_machines
|
|
358
369
|
.insert_wft(
|
|
359
370
|
&next_activation.run_id,
|
|
360
371
|
OutstandingTask {
|
|
361
372
|
info,
|
|
362
|
-
|
|
373
|
+
pending_queries,
|
|
363
374
|
start_time: task_start_time,
|
|
364
375
|
},
|
|
365
376
|
)
|
|
@@ -401,11 +412,11 @@ impl WorkflowTaskManager {
|
|
|
401
412
|
return Ok(None);
|
|
402
413
|
}
|
|
403
414
|
|
|
404
|
-
let (task_token,
|
|
415
|
+
let (task_token, has_pending_query, start_time) =
|
|
405
416
|
if let Some(entry) = self.workflow_machines.get_task(run_id) {
|
|
406
417
|
(
|
|
407
418
|
entry.info.task_token.clone(),
|
|
408
|
-
entry.
|
|
419
|
+
!entry.pending_queries.is_empty(),
|
|
409
420
|
entry.start_time,
|
|
410
421
|
)
|
|
411
422
|
} else {
|
|
@@ -506,7 +517,7 @@ impl WorkflowTaskManager {
|
|
|
506
517
|
let must_heartbeat = self
|
|
507
518
|
.wait_for_local_acts_or_heartbeat(run_id, wft_heartbeat_deadline)
|
|
508
519
|
.await;
|
|
509
|
-
let is_query_playback =
|
|
520
|
+
let is_query_playback = has_pending_query && query_responses.is_empty();
|
|
510
521
|
|
|
511
522
|
// We only actually want to send commands back to the server if there are no more
|
|
512
523
|
// pending activations and we are caught up on replay. We don't want to complete a wft
|
|
@@ -592,7 +603,8 @@ impl WorkflowTaskManager {
|
|
|
592
603
|
&self,
|
|
593
604
|
poll_wf_resp: ValidPollWFTQResponse,
|
|
594
605
|
client: Arc<WorkerClientBag>,
|
|
595
|
-
) -> Result<(WorkflowTaskInfo, WorkflowActivation), WorkflowUpdateError>
|
|
606
|
+
) -> Result<(WorkflowTaskInfo, WorkflowActivation, Vec<QueryWorkflow>), WorkflowUpdateError>
|
|
607
|
+
{
|
|
596
608
|
let run_id = poll_wf_resp.workflow_execution.run_id.clone();
|
|
597
609
|
|
|
598
610
|
let wft_info = WorkflowTaskInfo {
|
|
@@ -607,10 +619,12 @@ impl WorkflowTaskManager {
|
|
|
607
619
|
.map(|ev| ev.event_id > 1)
|
|
608
620
|
.unwrap_or_default();
|
|
609
621
|
|
|
622
|
+
let mut did_miss_cache = false;
|
|
610
623
|
let page_token = if !self.workflow_machines.exists(&run_id) && poll_resp_is_incremental {
|
|
611
624
|
debug!(run_id=?run_id, "Workflow task has partial history, but workflow is not in \
|
|
612
625
|
cache. Will fetch history");
|
|
613
626
|
self.metrics.sticky_cache_miss();
|
|
627
|
+
did_miss_cache = true;
|
|
614
628
|
NextPageToken::FetchFromStart
|
|
615
629
|
} else {
|
|
616
630
|
poll_wf_resp.next_page_token.into()
|
|
@@ -639,16 +653,26 @@ impl WorkflowTaskManager {
|
|
|
639
653
|
.await
|
|
640
654
|
{
|
|
641
655
|
Ok(mut activation) => {
|
|
642
|
-
// If there are in-poll queries, insert jobs for those queries into the activation
|
|
656
|
+
// If there are in-poll queries, insert jobs for those queries into the activation,
|
|
657
|
+
// but only if we hit the cache. If we didn't, those queries will need to be dealt
|
|
658
|
+
// with once replay is over
|
|
659
|
+
let mut pending_queries = vec![];
|
|
643
660
|
if !poll_wf_resp.query_requests.is_empty() {
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
661
|
+
if !did_miss_cache {
|
|
662
|
+
let query_jobs = poll_wf_resp
|
|
663
|
+
.query_requests
|
|
664
|
+
.into_iter()
|
|
665
|
+
.map(|q| workflow_activation_job::Variant::QueryWorkflow(q).into());
|
|
666
|
+
activation.jobs.extend(query_jobs);
|
|
667
|
+
} else {
|
|
668
|
+
poll_wf_resp
|
|
669
|
+
.query_requests
|
|
670
|
+
.into_iter()
|
|
671
|
+
.for_each(|q| pending_queries.push(q));
|
|
672
|
+
}
|
|
649
673
|
}
|
|
650
674
|
|
|
651
|
-
Ok((wft_info, activation))
|
|
675
|
+
Ok((wft_info, activation, pending_queries))
|
|
652
676
|
}
|
|
653
677
|
Err(source) => Err(WorkflowUpdateError { source, run_id }),
|
|
654
678
|
}
|
|
@@ -675,16 +699,18 @@ impl WorkflowTaskManager {
|
|
|
675
699
|
// removed from the outstanding tasks map
|
|
676
700
|
let retme = if !self.pending_activations.has_pending(run_id) {
|
|
677
701
|
if !just_evicted {
|
|
678
|
-
// Check if there was a
|
|
679
|
-
// a new pending activation for it.
|
|
702
|
+
// Check if there was a pending query which must be fulfilled, and if there is
|
|
703
|
+
// create a new pending activation for it.
|
|
680
704
|
if let Some(ref mut ot) = &mut *self
|
|
681
705
|
.workflow_machines
|
|
682
706
|
.get_task_mut(run_id)
|
|
683
707
|
.expect("Machine must exist")
|
|
684
708
|
{
|
|
685
|
-
if
|
|
686
|
-
|
|
687
|
-
|
|
709
|
+
if !ot.pending_queries.is_empty() {
|
|
710
|
+
for query in ot.pending_queries.drain(..) {
|
|
711
|
+
let na = create_query_activation(run_id.to_string(), [query]);
|
|
712
|
+
self.pending_queries.push(na);
|
|
713
|
+
}
|
|
688
714
|
self.pending_activations_notifier.notify_waiters();
|
|
689
715
|
return false;
|
|
690
716
|
}
|
package/src/errors.rs
CHANGED
|
@@ -10,6 +10,8 @@ pub static SHUTDOWN_ERROR: OnceCell<Root<JsFunction>> = OnceCell::new();
|
|
|
10
10
|
pub static NO_WORKER_ERROR: OnceCell<Root<JsFunction>> = OnceCell::new();
|
|
11
11
|
/// Something unexpected happened, considered fatal
|
|
12
12
|
pub static UNEXPECTED_ERROR: OnceCell<Root<JsFunction>> = OnceCell::new();
|
|
13
|
+
/// Used in different parts of the project to signal that something unexpected has happened
|
|
14
|
+
pub static ILLEGAL_STATE_ERROR: OnceCell<Root<JsFunction>> = OnceCell::new();
|
|
13
15
|
|
|
14
16
|
static ALREADY_REGISTERED_ERRORS: OnceCell<bool> = OnceCell::new();
|
|
15
17
|
|
|
@@ -70,9 +72,9 @@ pub fn register_errors(mut cx: FunctionContext) -> JsResult<JsUndefined> {
|
|
|
70
72
|
let res = ALREADY_REGISTERED_ERRORS.set(true);
|
|
71
73
|
if res.is_err() {
|
|
72
74
|
// Don't do anything if errors are already registered
|
|
73
|
-
return Ok(cx.undefined())
|
|
75
|
+
return Ok(cx.undefined());
|
|
74
76
|
}
|
|
75
|
-
|
|
77
|
+
|
|
76
78
|
let mapping = cx.argument::<JsObject>(0)?;
|
|
77
79
|
let shutdown_error = mapping
|
|
78
80
|
.get(&mut cx, "ShutdownError")?
|
|
@@ -90,11 +92,16 @@ pub fn register_errors(mut cx: FunctionContext) -> JsResult<JsUndefined> {
|
|
|
90
92
|
.get(&mut cx, "UnexpectedError")?
|
|
91
93
|
.downcast_or_throw::<JsFunction, FunctionContext>(&mut cx)?
|
|
92
94
|
.root(&mut cx);
|
|
95
|
+
let illegal_state_error = mapping
|
|
96
|
+
.get(&mut cx, "IllegalStateError")?
|
|
97
|
+
.downcast_or_throw::<JsFunction, FunctionContext>(&mut cx)?
|
|
98
|
+
.root(&mut cx);
|
|
93
99
|
|
|
94
100
|
TRANSPORT_ERROR.get_or_try_init(|| Ok(transport_error))?;
|
|
95
101
|
SHUTDOWN_ERROR.get_or_try_init(|| Ok(shutdown_error))?;
|
|
96
102
|
NO_WORKER_ERROR.get_or_try_init(|| Ok(no_worker_error))?;
|
|
97
103
|
UNEXPECTED_ERROR.get_or_try_init(|| Ok(unexpected_error))?;
|
|
104
|
+
ILLEGAL_STATE_ERROR.get_or_try_init(|| Ok(illegal_state_error))?;
|
|
98
105
|
|
|
99
106
|
Ok(cx.undefined())
|
|
100
107
|
}
|
package/src/lib.rs
CHANGED
|
@@ -8,6 +8,7 @@ use once_cell::sync::OnceCell;
|
|
|
8
8
|
use opentelemetry::trace::{FutureExt, SpanContext, TraceContextExt};
|
|
9
9
|
use prost::Message;
|
|
10
10
|
use std::{
|
|
11
|
+
cell::RefCell,
|
|
11
12
|
fmt::Display,
|
|
12
13
|
future::Future,
|
|
13
14
|
sync::Arc,
|
|
@@ -135,7 +136,7 @@ struct Client {
|
|
|
135
136
|
core_client: Arc<RawClient>,
|
|
136
137
|
}
|
|
137
138
|
|
|
138
|
-
type BoxedClient = JsBox<Client
|
|
139
|
+
type BoxedClient = JsBox<RefCell<Option<Client>>>;
|
|
139
140
|
impl Finalize for Client {}
|
|
140
141
|
|
|
141
142
|
/// Worker struct, hold a reference for the channel sender responsible for sending requests from
|
|
@@ -291,10 +292,10 @@ fn start_bridge_loop(event_queue: Arc<EventQueue>, receiver: &mut UnboundedRecei
|
|
|
291
292
|
}
|
|
292
293
|
Ok(client) => {
|
|
293
294
|
send_result(event_queue.clone(), callback, |cx| {
|
|
294
|
-
Ok(cx.boxed(Client {
|
|
295
|
+
Ok(cx.boxed(RefCell::new(Some(Client {
|
|
295
296
|
runtime,
|
|
296
297
|
core_client: Arc::new(client),
|
|
297
|
-
}))
|
|
298
|
+
}))))
|
|
298
299
|
});
|
|
299
300
|
}
|
|
300
301
|
}
|
|
@@ -590,15 +591,23 @@ fn worker_new(mut cx: FunctionContext) -> JsResult<JsUndefined> {
|
|
|
590
591
|
let callback = cx.argument::<JsFunction>(2)?;
|
|
591
592
|
|
|
592
593
|
let config = worker_options.as_worker_config(&mut cx)?;
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
594
|
+
match &*client.borrow() {
|
|
595
|
+
None => {
|
|
596
|
+
callback_with_error(&mut cx, callback, move |cx| {
|
|
597
|
+
UNEXPECTED_ERROR.from_string(cx, "Tried to use closed Client".to_string())
|
|
598
|
+
})?;
|
|
599
|
+
}
|
|
600
|
+
Some(client) => {
|
|
601
|
+
let request = Request::InitWorker {
|
|
602
|
+
client: client.core_client.clone(),
|
|
603
|
+
runtime: client.runtime.clone(),
|
|
604
|
+
config,
|
|
605
|
+
callback: callback.root(&mut cx),
|
|
606
|
+
};
|
|
607
|
+
if let Err(err) = client.runtime.sender.send(request) {
|
|
608
|
+
callback_with_unexpected_error(&mut cx, callback, err)?;
|
|
609
|
+
};
|
|
610
|
+
}
|
|
602
611
|
};
|
|
603
612
|
|
|
604
613
|
Ok(cx.undefined())
|
|
@@ -783,13 +792,26 @@ fn worker_record_activity_heartbeat(mut cx: FunctionContext) -> JsResult<JsUndef
|
|
|
783
792
|
fn worker_shutdown(mut cx: FunctionContext) -> JsResult<JsUndefined> {
|
|
784
793
|
let worker = cx.argument::<BoxedWorker>(0)?;
|
|
785
794
|
let callback = cx.argument::<JsFunction>(1)?;
|
|
786
|
-
|
|
795
|
+
if let Err(err) = worker.runtime.sender.send(Request::ShutdownWorker {
|
|
787
796
|
worker: worker.core_worker.clone(),
|
|
788
797
|
callback: callback.root(&mut cx),
|
|
789
798
|
}) {
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
799
|
+
UNEXPECTED_ERROR
|
|
800
|
+
.from_error(&mut cx, err)
|
|
801
|
+
.and_then(|err| cx.throw(err))?;
|
|
802
|
+
};
|
|
803
|
+
Ok(cx.undefined())
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
/// Drop a reference to a Client, once all references are dropped, the Client will be closed.
|
|
807
|
+
fn client_close(mut cx: FunctionContext) -> JsResult<JsUndefined> {
|
|
808
|
+
let client = cx.argument::<BoxedClient>(0)?;
|
|
809
|
+
if client.replace(None).is_none() {
|
|
810
|
+
ILLEGAL_STATE_ERROR
|
|
811
|
+
.from_error(&mut cx, "Client already closed")
|
|
812
|
+
.and_then(|err| cx.throw(err))?;
|
|
813
|
+
};
|
|
814
|
+
Ok(cx.undefined())
|
|
793
815
|
}
|
|
794
816
|
|
|
795
817
|
/// Convert Rust SystemTime into a JS array with 2 numbers (seconds, nanos)
|
|
@@ -824,6 +846,7 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
|
|
|
824
846
|
cx.export_function("newWorker", worker_new)?;
|
|
825
847
|
cx.export_function("newReplayWorker", replay_worker_new)?;
|
|
826
848
|
cx.export_function("workerShutdown", worker_shutdown)?;
|
|
849
|
+
cx.export_function("clientClose", client_close)?;
|
|
827
850
|
cx.export_function("runtimeShutdown", runtime_shutdown)?;
|
|
828
851
|
cx.export_function("pollLogs", poll_logs)?;
|
|
829
852
|
cx.export_function(
|