temporalio 0.0.1 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (310) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +180 -7
  3. data/bridge/Cargo.lock +208 -76
  4. data/bridge/Cargo.toml +5 -2
  5. data/bridge/sdk-core/Cargo.toml +1 -1
  6. data/bridge/sdk-core/README.md +20 -10
  7. data/bridge/sdk-core/client/Cargo.toml +1 -1
  8. data/bridge/sdk-core/client/src/lib.rs +227 -59
  9. data/bridge/sdk-core/client/src/metrics.rs +17 -8
  10. data/bridge/sdk-core/client/src/raw.rs +13 -12
  11. data/bridge/sdk-core/client/src/retry.rs +132 -43
  12. data/bridge/sdk-core/core/Cargo.toml +28 -15
  13. data/bridge/sdk-core/core/benches/workflow_replay.rs +13 -10
  14. data/bridge/sdk-core/core/src/abstractions.rs +225 -36
  15. data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +217 -79
  16. data/bridge/sdk-core/core/src/core_tests/determinism.rs +165 -2
  17. data/bridge/sdk-core/core/src/core_tests/local_activities.rs +565 -34
  18. data/bridge/sdk-core/core/src/core_tests/queries.rs +247 -90
  19. data/bridge/sdk-core/core/src/core_tests/workers.rs +3 -5
  20. data/bridge/sdk-core/core/src/core_tests/workflow_cancels.rs +1 -1
  21. data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +430 -67
  22. data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +106 -12
  23. data/bridge/sdk-core/core/src/internal_flags.rs +136 -0
  24. data/bridge/sdk-core/core/src/lib.rs +148 -34
  25. data/bridge/sdk-core/core/src/protosext/mod.rs +1 -1
  26. data/bridge/sdk-core/core/src/replay/mod.rs +185 -41
  27. data/bridge/sdk-core/core/src/telemetry/log_export.rs +190 -0
  28. data/bridge/sdk-core/core/src/telemetry/metrics.rs +219 -140
  29. data/bridge/sdk-core/core/src/telemetry/mod.rs +326 -315
  30. data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +20 -14
  31. data/bridge/sdk-core/core/src/test_help/mod.rs +85 -21
  32. data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +112 -156
  33. data/bridge/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +89 -0
  34. data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +364 -128
  35. data/bridge/sdk-core/core/src/worker/activities.rs +263 -170
  36. data/bridge/sdk-core/core/src/worker/client/mocks.rs +23 -3
  37. data/bridge/sdk-core/core/src/worker/client.rs +48 -6
  38. data/bridge/sdk-core/core/src/worker/mod.rs +186 -75
  39. data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +1 -3
  40. data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +13 -24
  41. data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +879 -226
  42. data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +101 -48
  43. data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +8 -12
  44. data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +6 -9
  45. data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +90 -32
  46. data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +6 -9
  47. data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +7 -10
  48. data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +6 -9
  49. data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +160 -83
  50. data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +36 -54
  51. data/bridge/sdk-core/core/src/worker/workflow/machines/modify_workflow_properties_state_machine.rs +179 -0
  52. data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +104 -157
  53. data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +8 -12
  54. data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +9 -13
  55. data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +10 -4
  56. data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +14 -11
  57. data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +6 -17
  58. data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +395 -299
  59. data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +12 -20
  60. data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +33 -18
  61. data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +1032 -374
  62. data/bridge/sdk-core/core/src/worker/workflow/mod.rs +525 -392
  63. data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +40 -57
  64. data/bridge/sdk-core/core/src/worker/workflow/wft_extraction.rs +125 -0
  65. data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +3 -6
  66. data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/saved_wf_inputs.rs +117 -0
  67. data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/tonic_status_serde.rs +24 -0
  68. data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +456 -681
  69. data/bridge/sdk-core/core-api/Cargo.toml +6 -4
  70. data/bridge/sdk-core/core-api/src/errors.rs +1 -34
  71. data/bridge/sdk-core/core-api/src/lib.rs +7 -45
  72. data/bridge/sdk-core/core-api/src/telemetry.rs +141 -0
  73. data/bridge/sdk-core/core-api/src/worker.rs +27 -1
  74. data/bridge/sdk-core/etc/deps.svg +115 -140
  75. data/bridge/sdk-core/etc/regen-depgraph.sh +5 -0
  76. data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +18 -15
  77. data/bridge/sdk-core/fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.stderr +1 -1
  78. data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +8 -3
  79. data/bridge/sdk-core/histories/evict_while_la_running_no_interference-16_history.bin +0 -0
  80. data/bridge/sdk-core/histories/evict_while_la_running_no_interference-23_history.bin +0 -0
  81. data/bridge/sdk-core/histories/evict_while_la_running_no_interference-85_history.bin +0 -0
  82. data/bridge/sdk-core/protos/api_upstream/buf.yaml +0 -3
  83. data/bridge/sdk-core/protos/api_upstream/build/go.mod +7 -0
  84. data/bridge/sdk-core/protos/api_upstream/build/go.sum +5 -0
  85. data/bridge/sdk-core/protos/api_upstream/{temporal/api/enums/v1/cluster.proto → build/tools.go} +7 -18
  86. data/bridge/sdk-core/protos/api_upstream/go.mod +6 -0
  87. data/bridge/sdk-core/protos/api_upstream/temporal/api/batch/v1/message.proto +12 -9
  88. data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +15 -26
  89. data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +13 -2
  90. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/batch_operation.proto +3 -2
  91. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +4 -9
  92. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/common.proto +3 -2
  93. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +10 -8
  94. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +28 -2
  95. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/namespace.proto +2 -2
  96. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/query.proto +2 -2
  97. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/reset.proto +2 -2
  98. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/schedule.proto +2 -2
  99. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/task_queue.proto +2 -2
  100. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +24 -19
  101. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/workflow.proto +2 -2
  102. data/bridge/sdk-core/protos/api_upstream/temporal/api/errordetails/v1/message.proto +2 -2
  103. data/bridge/sdk-core/protos/api_upstream/temporal/api/failure/v1/message.proto +2 -2
  104. data/bridge/sdk-core/protos/api_upstream/temporal/api/filter/v1/message.proto +2 -2
  105. data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +62 -26
  106. data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +4 -2
  107. data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +24 -61
  108. data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/service.proto +2 -21
  109. data/bridge/sdk-core/protos/api_upstream/temporal/api/protocol/v1/message.proto +57 -0
  110. data/bridge/sdk-core/protos/api_upstream/temporal/api/query/v1/message.proto +2 -2
  111. data/bridge/sdk-core/protos/api_upstream/temporal/api/replication/v1/message.proto +2 -2
  112. data/bridge/sdk-core/protos/api_upstream/temporal/api/schedule/v1/message.proto +110 -31
  113. data/bridge/sdk-core/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto +63 -0
  114. data/bridge/sdk-core/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +4 -4
  115. data/bridge/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +71 -6
  116. data/bridge/sdk-core/protos/api_upstream/temporal/api/version/v1/message.proto +2 -2
  117. data/bridge/sdk-core/protos/api_upstream/temporal/api/workflow/v1/message.proto +3 -2
  118. data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +111 -36
  119. data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +19 -5
  120. data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +1 -0
  121. data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +1 -0
  122. data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +1 -0
  123. data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +1 -0
  124. data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +1 -0
  125. data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +1 -0
  126. data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +9 -0
  127. data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +9 -1
  128. data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +6 -0
  129. data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto +2 -2
  130. data/bridge/sdk-core/protos/testsrv_upstream/temporal/api/testservice/v1/service.proto +2 -2
  131. data/bridge/sdk-core/sdk/Cargo.toml +4 -3
  132. data/bridge/sdk-core/sdk/src/interceptors.rs +36 -3
  133. data/bridge/sdk-core/sdk/src/lib.rs +94 -25
  134. data/bridge/sdk-core/sdk/src/workflow_context.rs +13 -2
  135. data/bridge/sdk-core/sdk/src/workflow_future.rs +10 -13
  136. data/bridge/sdk-core/sdk-core-protos/Cargo.toml +5 -2
  137. data/bridge/sdk-core/sdk-core-protos/build.rs +36 -2
  138. data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +164 -104
  139. data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +27 -23
  140. data/bridge/sdk-core/sdk-core-protos/src/lib.rs +252 -74
  141. data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +12 -2
  142. data/bridge/sdk-core/test-utils/Cargo.toml +4 -1
  143. data/bridge/sdk-core/test-utils/src/canned_histories.rs +106 -296
  144. data/bridge/sdk-core/test-utils/src/histfetch.rs +1 -1
  145. data/bridge/sdk-core/test-utils/src/lib.rs +161 -50
  146. data/bridge/sdk-core/test-utils/src/wf_input_saver.rs +50 -0
  147. data/bridge/sdk-core/test-utils/src/workflows.rs +29 -0
  148. data/bridge/sdk-core/tests/fuzzy_workflow.rs +130 -0
  149. data/bridge/sdk-core/tests/{load_tests.rs → heavy_tests.rs} +125 -51
  150. data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +25 -3
  151. data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +10 -5
  152. data/bridge/sdk-core/tests/integ_tests/metrics_tests.rs +239 -0
  153. data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +4 -60
  154. data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +5 -128
  155. data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +83 -25
  156. data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +93 -69
  157. data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +1 -0
  158. data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +6 -13
  159. data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +1 -0
  160. data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +6 -2
  161. data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +3 -10
  162. data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +151 -116
  163. data/bridge/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +54 -0
  164. data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +7 -28
  165. data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +115 -24
  166. data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +1 -0
  167. data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +18 -14
  168. data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +6 -20
  169. data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +10 -21
  170. data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +6 -4
  171. data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +27 -18
  172. data/bridge/sdk-core/tests/main.rs +8 -16
  173. data/bridge/sdk-core/tests/runner.rs +75 -36
  174. data/bridge/sdk-core/tests/wf_input_replay.rs +32 -0
  175. data/bridge/src/connection.rs +117 -82
  176. data/bridge/src/lib.rs +356 -42
  177. data/bridge/src/runtime.rs +10 -3
  178. data/bridge/src/test_server.rs +153 -0
  179. data/bridge/src/worker.rs +133 -9
  180. data/lib/gen/temporal/api/batch/v1/message_pb.rb +8 -6
  181. data/lib/gen/temporal/api/command/v1/message_pb.rb +10 -16
  182. data/lib/gen/temporal/api/common/v1/message_pb.rb +5 -1
  183. data/lib/gen/temporal/api/enums/v1/batch_operation_pb.rb +2 -1
  184. data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +3 -3
  185. data/lib/gen/temporal/api/enums/v1/common_pb.rb +2 -1
  186. data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +5 -4
  187. data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +9 -1
  188. data/lib/gen/temporal/api/enums/v1/namespace_pb.rb +1 -1
  189. data/lib/gen/temporal/api/enums/v1/query_pb.rb +1 -1
  190. data/lib/gen/temporal/api/enums/v1/reset_pb.rb +1 -1
  191. data/lib/gen/temporal/api/enums/v1/schedule_pb.rb +1 -1
  192. data/lib/gen/temporal/api/enums/v1/task_queue_pb.rb +1 -1
  193. data/lib/gen/temporal/api/enums/v1/update_pb.rb +7 -10
  194. data/lib/gen/temporal/api/enums/v1/workflow_pb.rb +1 -1
  195. data/lib/gen/temporal/api/errordetails/v1/message_pb.rb +1 -1
  196. data/lib/gen/temporal/api/failure/v1/message_pb.rb +1 -1
  197. data/lib/gen/temporal/api/filter/v1/message_pb.rb +1 -1
  198. data/lib/gen/temporal/api/history/v1/message_pb.rb +34 -25
  199. data/lib/gen/temporal/api/namespace/v1/message_pb.rb +2 -1
  200. data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +14 -51
  201. data/lib/gen/temporal/api/operatorservice/v1/service_pb.rb +1 -1
  202. data/lib/gen/temporal/api/protocol/v1/message_pb.rb +30 -0
  203. data/lib/gen/temporal/api/query/v1/message_pb.rb +1 -1
  204. data/lib/gen/temporal/api/replication/v1/message_pb.rb +1 -1
  205. data/lib/gen/temporal/api/schedule/v1/message_pb.rb +22 -1
  206. data/lib/gen/temporal/api/sdk/v1/task_complete_metadata_pb.rb +23 -0
  207. data/lib/gen/temporal/api/taskqueue/v1/message_pb.rb +2 -2
  208. data/lib/gen/temporal/api/testservice/v1/request_response_pb.rb +49 -0
  209. data/lib/gen/temporal/api/testservice/v1/service_pb.rb +21 -0
  210. data/lib/gen/temporal/api/update/v1/message_pb.rb +49 -3
  211. data/lib/gen/temporal/api/version/v1/message_pb.rb +1 -1
  212. data/lib/gen/temporal/api/workflow/v1/message_pb.rb +2 -1
  213. data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +47 -20
  214. data/lib/gen/temporal/api/workflowservice/v1/service_pb.rb +1 -1
  215. data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +13 -9
  216. data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +10 -6
  217. data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +13 -9
  218. data/lib/gen/temporal/sdk/core/common/common_pb.rb +7 -3
  219. data/lib/gen/temporal/sdk/core/core_interface_pb.rb +9 -3
  220. data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +7 -3
  221. data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +28 -21
  222. data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +32 -24
  223. data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +12 -5
  224. data/lib/temporalio/activity/context.rb +102 -0
  225. data/lib/temporalio/activity/info.rb +67 -0
  226. data/lib/temporalio/activity.rb +85 -0
  227. data/lib/temporalio/bridge/connect_options.rb +15 -0
  228. data/lib/temporalio/bridge/error.rb +8 -0
  229. data/lib/temporalio/bridge/retry_config.rb +24 -0
  230. data/lib/temporalio/bridge/tls_options.rb +19 -0
  231. data/lib/temporalio/bridge.rb +14 -0
  232. data/lib/{temporal → temporalio}/client/implementation.rb +57 -56
  233. data/lib/{temporal → temporalio}/client/workflow_handle.rb +35 -35
  234. data/lib/{temporal → temporalio}/client.rb +19 -32
  235. data/lib/temporalio/connection/retry_config.rb +44 -0
  236. data/lib/temporalio/connection/service.rb +20 -0
  237. data/lib/temporalio/connection/test_service.rb +92 -0
  238. data/lib/temporalio/connection/tls_options.rb +51 -0
  239. data/lib/temporalio/connection/workflow_service.rb +731 -0
  240. data/lib/temporalio/connection.rb +86 -0
  241. data/lib/{temporal → temporalio}/data_converter.rb +76 -35
  242. data/lib/{temporal → temporalio}/error/failure.rb +6 -6
  243. data/lib/{temporal → temporalio}/error/workflow_failure.rb +4 -2
  244. data/lib/{temporal → temporalio}/errors.rb +19 -1
  245. data/lib/{temporal → temporalio}/failure_converter/base.rb +5 -5
  246. data/lib/{temporal → temporalio}/failure_converter/basic.rb +58 -52
  247. data/lib/temporalio/failure_converter.rb +7 -0
  248. data/lib/temporalio/interceptor/activity_inbound.rb +22 -0
  249. data/lib/temporalio/interceptor/activity_outbound.rb +24 -0
  250. data/lib/{temporal → temporalio}/interceptor/chain.rb +7 -6
  251. data/lib/{temporal → temporalio}/interceptor/client.rb +27 -2
  252. data/lib/temporalio/interceptor.rb +22 -0
  253. data/lib/{temporal → temporalio}/payload_codec/base.rb +5 -5
  254. data/lib/{temporal → temporalio}/payload_converter/base.rb +3 -3
  255. data/lib/{temporal → temporalio}/payload_converter/bytes.rb +4 -3
  256. data/lib/{temporal → temporalio}/payload_converter/composite.rb +7 -5
  257. data/lib/{temporal → temporalio}/payload_converter/encoding_base.rb +4 -4
  258. data/lib/{temporal → temporalio}/payload_converter/json.rb +4 -3
  259. data/lib/{temporal → temporalio}/payload_converter/nil.rb +4 -3
  260. data/lib/temporalio/payload_converter.rb +14 -0
  261. data/lib/{temporal → temporalio}/retry_policy.rb +17 -7
  262. data/lib/{temporal → temporalio}/retry_state.rb +1 -1
  263. data/lib/temporalio/runtime.rb +25 -0
  264. data/lib/temporalio/testing/time_skipping_handle.rb +32 -0
  265. data/lib/temporalio/testing/time_skipping_interceptor.rb +23 -0
  266. data/lib/temporalio/testing/workflow_environment.rb +112 -0
  267. data/lib/temporalio/testing.rb +175 -0
  268. data/lib/{temporal → temporalio}/timeout_type.rb +2 -2
  269. data/lib/temporalio/version.rb +3 -0
  270. data/lib/temporalio/worker/activity_runner.rb +114 -0
  271. data/lib/temporalio/worker/activity_worker.rb +164 -0
  272. data/lib/temporalio/worker/reactor.rb +46 -0
  273. data/lib/temporalio/worker/runner.rb +63 -0
  274. data/lib/temporalio/worker/sync_worker.rb +124 -0
  275. data/lib/temporalio/worker/thread_pool_executor.rb +51 -0
  276. data/lib/temporalio/worker.rb +204 -0
  277. data/lib/temporalio/workflow/async.rb +46 -0
  278. data/lib/{temporal → temporalio}/workflow/execution_info.rb +4 -4
  279. data/lib/{temporal → temporalio}/workflow/execution_status.rb +1 -1
  280. data/lib/temporalio/workflow/future.rb +138 -0
  281. data/lib/{temporal → temporalio}/workflow/id_reuse_policy.rb +6 -6
  282. data/lib/temporalio/workflow/info.rb +76 -0
  283. data/lib/{temporal → temporalio}/workflow/query_reject_condition.rb +5 -5
  284. data/lib/temporalio.rb +12 -3
  285. data/temporalio.gemspec +11 -6
  286. metadata +137 -64
  287. data/bridge/sdk-core/Cargo.lock +0 -2606
  288. data/bridge/sdk-core/bridge-ffi/Cargo.toml +0 -24
  289. data/bridge/sdk-core/bridge-ffi/LICENSE.txt +0 -23
  290. data/bridge/sdk-core/bridge-ffi/build.rs +0 -25
  291. data/bridge/sdk-core/bridge-ffi/include/sdk-core-bridge.h +0 -249
  292. data/bridge/sdk-core/bridge-ffi/src/lib.rs +0 -825
  293. data/bridge/sdk-core/bridge-ffi/src/wrappers.rs +0 -211
  294. data/bridge/sdk-core/core/src/log_export.rs +0 -62
  295. data/bridge/sdk-core/core/src/worker/workflow/machines/mutable_side_effect_state_machine.rs +0 -127
  296. data/bridge/sdk-core/core/src/worker/workflow/machines/side_effect_state_machine.rs +0 -71
  297. data/bridge/sdk-core/protos/api_upstream/temporal/api/cluster/v1/message.proto +0 -83
  298. data/bridge/sdk-core/protos/local/temporal/sdk/core/bridge/bridge.proto +0 -210
  299. data/bridge/sdk-core/sdk/src/conversions.rs +0 -8
  300. data/lib/bridge.so +0 -0
  301. data/lib/gen/temporal/api/cluster/v1/message_pb.rb +0 -67
  302. data/lib/gen/temporal/api/enums/v1/cluster_pb.rb +0 -26
  303. data/lib/gen/temporal/sdk/core/bridge/bridge_pb.rb +0 -222
  304. data/lib/temporal/bridge.rb +0 -14
  305. data/lib/temporal/connection.rb +0 -736
  306. data/lib/temporal/failure_converter.rb +0 -8
  307. data/lib/temporal/payload_converter.rb +0 -14
  308. data/lib/temporal/runtime.rb +0 -22
  309. data/lib/temporal/version.rb +0 -3
  310. data/lib/temporal.rb +0 -8
@@ -1,57 +1,95 @@
1
1
  use crate::{
2
- replay::{HistoryInfo, TestHistoryBuilder},
3
- worker::client::WorkerClient,
2
+ protosext::ValidPollWFTQResponse,
3
+ worker::{
4
+ client::WorkerClient,
5
+ workflow::{CacheMissFetchReq, PermittedWFT, PreparedWFT},
6
+ },
4
7
  };
5
- use futures::{future::BoxFuture, stream, stream::BoxStream, FutureExt, Stream, StreamExt};
8
+ use futures::{future::BoxFuture, FutureExt, Stream};
9
+ use itertools::Itertools;
6
10
  use std::{
7
11
  collections::VecDeque,
8
12
  fmt::Debug,
9
13
  future::Future,
14
+ mem,
15
+ mem::transmute,
10
16
  pin::Pin,
11
17
  sync::Arc,
12
18
  task::{Context, Poll},
13
19
  };
14
20
  use temporal_sdk_core_protos::temporal::api::{
15
21
  enums::v1::EventType,
16
- history::v1::{History, HistoryEvent},
17
- workflowservice::v1::GetWorkflowExecutionHistoryResponse,
22
+ history::v1::{history_event, History, HistoryEvent, WorkflowTaskCompletedEventAttributes},
18
23
  };
19
24
  use tracing::Instrument;
20
25
 
21
- /// A slimmed down version of a poll workflow task response which includes just the info needed
22
- /// by [WorkflowManager]. History events are expected to be consumed from it and applied to the
23
- /// state machines.
26
+ lazy_static::lazy_static! {
27
+ static ref EMPTY_FETCH_ERR: tonic::Status
28
+ = tonic::Status::data_loss("Fetched empty history page");
29
+ static ref EMPTY_TASK_ERR: tonic::Status
30
+ = tonic::Status::data_loss("Received an empty workflow task with no queries or history");
31
+ }
32
+
33
+ /// Represents one or more complete WFT sequences. History events are expected to be consumed from
34
+ /// it and applied to the state machines via [HistoryUpdate::take_next_wft_sequence]
35
+ #[cfg_attr(
36
+ feature = "save_wf_inputs",
37
+ derive(serde::Serialize, serde::Deserialize)
38
+ )]
24
39
  pub struct HistoryUpdate {
25
- events: BoxStream<'static, Result<HistoryEvent, tonic::Status>>,
26
- /// It is useful to be able to look ahead up to one workflow task beyond the currently
27
- /// requested one. The initial (possibly only) motivation for this being to be able to
28
- /// pre-emptively notify lang about patch markers so that calls to `changed` do not need to
29
- /// be async.
30
- buffered: VecDeque<HistoryEvent>,
31
- pub previous_started_event_id: i64,
40
+ events: Vec<HistoryEvent>,
41
+ /// The event ID of the last started WFT, as according to the WFT which this update was
42
+ /// extracted from. Hence, while processing multiple logical WFTs during replay which were part
43
+ /// of one large history fetched from server, multiple updates may have the same value here.
44
+ pub previous_wft_started_id: i64,
45
+ /// True if this update contains the final WFT in history, and no more attempts to extract
46
+ /// additional updates should be made.
47
+ has_last_wft: bool,
32
48
  }
33
49
  impl Debug for HistoryUpdate {
34
50
  fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
35
- write!(
36
- f,
37
- "HistoryUpdate(previous_started_event_id: {})",
38
- self.previous_started_event_id
39
- )
51
+ if self.is_real() {
52
+ write!(
53
+ f,
54
+ "HistoryUpdate(previous_started_event_id: {}, length: {}, first_event_id: {:?})",
55
+ self.previous_wft_started_id,
56
+ self.events.len(),
57
+ self.events.first().map(|e| e.event_id)
58
+ )
59
+ } else {
60
+ write!(f, "DummyHistoryUpdate")
61
+ }
40
62
  }
41
63
  }
42
64
 
65
+ #[derive(Debug)]
66
+ pub enum NextWFT {
67
+ ReplayOver,
68
+ WFT(Vec<HistoryEvent>, bool),
69
+ NeedFetch,
70
+ }
71
+
72
+ #[derive(derive_more::DebugCustom)]
73
+ #[debug(fmt = "HistoryPaginator(run_id: {run_id})")]
74
+ #[cfg_attr(
75
+ feature = "save_wf_inputs",
76
+ derive(serde::Serialize, serde::Deserialize),
77
+ serde(default = "HistoryPaginator::fake_deserialized")
78
+ )]
43
79
  pub struct HistoryPaginator {
44
- // Potentially this could actually be a ref w/ lifetime here
80
+ pub(crate) wf_id: String,
81
+ pub(crate) run_id: String,
82
+ pub(crate) previous_wft_started_id: i64,
83
+
84
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
45
85
  client: Arc<dyn WorkerClient>,
86
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
46
87
  event_queue: VecDeque<HistoryEvent>,
47
- wf_id: String,
48
- run_id: String,
88
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
49
89
  next_page_token: NextPageToken,
50
- open_history_request:
51
- Option<BoxFuture<'static, Result<GetWorkflowExecutionHistoryResponse, tonic::Status>>>,
52
90
  /// These are events that should be returned once pagination has finished. This only happens
53
91
  /// during cache misses, where we got a partial task but need to fetch history from the start.
54
- /// We use this to apply any
92
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
55
93
  final_events: Vec<HistoryEvent>,
56
94
  }
57
95
 
@@ -77,8 +115,68 @@ impl From<Vec<u8>> for NextPageToken {
77
115
  }
78
116
 
79
117
  impl HistoryPaginator {
80
- pub(crate) fn new(
118
+ /// Use a new poll response to create a new [WFTPaginator], returning it and the
119
+ /// [PreparedWFT] extracted from it that can be fed into workflow state.
120
+ pub(super) async fn from_poll(
121
+ wft: ValidPollWFTQResponse,
122
+ client: Arc<dyn WorkerClient>,
123
+ ) -> Result<(Self, PreparedWFT), tonic::Status> {
124
+ let empty_hist = wft.history.events.is_empty();
125
+ let npt = if empty_hist {
126
+ NextPageToken::FetchFromStart
127
+ } else {
128
+ wft.next_page_token.into()
129
+ };
130
+ let mut paginator = HistoryPaginator::new(
131
+ wft.history,
132
+ wft.previous_started_event_id,
133
+ wft.workflow_execution.workflow_id.clone(),
134
+ wft.workflow_execution.run_id.clone(),
135
+ npt,
136
+ client,
137
+ );
138
+ if empty_hist && wft.legacy_query.is_none() && wft.query_requests.is_empty() {
139
+ return Err(EMPTY_TASK_ERR.clone());
140
+ }
141
+ let update = if empty_hist {
142
+ HistoryUpdate::from_events([], wft.previous_started_event_id, true).0
143
+ } else {
144
+ paginator.extract_next_update().await?
145
+ };
146
+ let prepared = PreparedWFT {
147
+ task_token: wft.task_token,
148
+ attempt: wft.attempt,
149
+ execution: wft.workflow_execution,
150
+ workflow_type: wft.workflow_type,
151
+ legacy_query: wft.legacy_query,
152
+ query_requests: wft.query_requests,
153
+ update,
154
+ };
155
+ Ok((paginator, prepared))
156
+ }
157
+
158
+ pub(super) async fn from_fetchreq(
159
+ mut req: CacheMissFetchReq,
160
+ client: Arc<dyn WorkerClient>,
161
+ ) -> Result<PermittedWFT, tonic::Status> {
162
+ let mut paginator = Self {
163
+ wf_id: req.original_wft.work.execution.workflow_id.clone(),
164
+ run_id: req.original_wft.work.execution.run_id.clone(),
165
+ previous_wft_started_id: req.original_wft.work.update.previous_wft_started_id,
166
+ client,
167
+ event_queue: Default::default(),
168
+ next_page_token: NextPageToken::FetchFromStart,
169
+ final_events: vec![],
170
+ };
171
+ let first_update = paginator.extract_next_update().await?;
172
+ req.original_wft.work.update = first_update;
173
+ req.original_wft.paginator = paginator;
174
+ Ok(req.original_wft)
175
+ }
176
+
177
+ fn new(
81
178
  initial_history: History,
179
+ previous_wft_started_id: i64,
82
180
  wf_id: String,
83
181
  run_id: String,
84
182
  next_page_token: impl Into<NextPageToken>,
@@ -97,20 +195,107 @@ impl HistoryPaginator {
97
195
  wf_id,
98
196
  run_id,
99
197
  next_page_token,
100
- open_history_request: None,
101
198
  final_events,
199
+ previous_wft_started_id,
102
200
  }
103
201
  }
104
202
 
105
- fn extend_queue_with_new_page(&mut self, resp: GetWorkflowExecutionHistoryResponse) {
106
- self.next_page_token = resp.next_page_token.into();
203
+ #[cfg(feature = "save_wf_inputs")]
204
+ pub(super) fn fake_deserialized() -> HistoryPaginator {
205
+ use crate::worker::client::mocks::mock_manual_workflow_client;
206
+ HistoryPaginator {
207
+ client: Arc::new(mock_manual_workflow_client()),
208
+ event_queue: Default::default(),
209
+ wf_id: "".to_string(),
210
+ run_id: "".to_string(),
211
+ next_page_token: NextPageToken::FetchFromStart,
212
+ final_events: vec![],
213
+ previous_wft_started_id: -2,
214
+ }
215
+ }
216
+
217
+ /// Return at least the next two WFT sequences (as determined by the passed-in ID) as a
218
+ /// [HistoryUpdate]. Two sequences supports the required peek-ahead during replay without
219
+ /// unnecessary back-and-forth.
220
+ ///
221
+ /// If there are already enough events buffered in memory, they will all be returned. Including
222
+ /// possibly (likely, during replay) more than just the next two WFTs.
223
+ ///
224
+ /// If there are insufficient events to constitute two WFTs, then we will fetch pages until
225
+ /// we have two, or until we are at the end of history.
226
+ pub(crate) async fn extract_next_update(&mut self) -> Result<HistoryUpdate, tonic::Status> {
227
+ loop {
228
+ self.get_next_page().await?;
229
+ let current_events = mem::take(&mut self.event_queue);
230
+ if current_events.is_empty() {
231
+ // If next page fetching happened, and we still ended up with no events, something
232
+ // is wrong. We're expecting there to be more events to be able to extract this
233
+ // update, but server isn't giving us any. We have no choice except to give up and
234
+ // evict.
235
+ error!(
236
+ "We expected to be able to fetch more events but server says there are none"
237
+ );
238
+ return Err(EMPTY_FETCH_ERR.clone());
239
+ }
240
+ let first_event_id = current_events.front().unwrap().event_id;
241
+ // If there are some events at the end of the fetched events which represent only a
242
+ // portion of a complete WFT, retain them to be used in the next extraction.
243
+ let no_more = matches!(self.next_page_token, NextPageToken::Done);
244
+ let (update, extra) =
245
+ HistoryUpdate::from_events(current_events, self.previous_wft_started_id, no_more);
246
+ let extra_eid_same = extra
247
+ .first()
248
+ .map(|e| e.event_id == first_event_id)
249
+ .unwrap_or_default();
250
+ self.event_queue = extra.into();
251
+ if !no_more && extra_eid_same {
252
+ // There was not a meaningful WFT in the whole page. We must fetch more
253
+ continue;
254
+ }
255
+ return Ok(update);
256
+ }
257
+ }
258
+
259
+ /// Fetches the next page and adds it to the internal queue. Returns true if a fetch was
260
+ /// performed, false if there is no next page.
261
+ async fn get_next_page(&mut self) -> Result<bool, tonic::Status> {
262
+ let history = loop {
263
+ let npt = match mem::replace(&mut self.next_page_token, NextPageToken::Done) {
264
+ // If there's no open request and the last page token we got was empty, we're done.
265
+ NextPageToken::Done => return Ok(false),
266
+ NextPageToken::FetchFromStart => vec![],
267
+ NextPageToken::Next(v) => v,
268
+ };
269
+ debug!(run_id=%self.run_id, "Fetching new history page");
270
+ let fetch_res = self
271
+ .client
272
+ .get_workflow_execution_history(self.wf_id.clone(), Some(self.run_id.clone()), npt)
273
+ .instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
274
+ .await?;
275
+
276
+ self.next_page_token = fetch_res.next_page_token.into();
277
+
278
+ let history_is_empty = fetch_res
279
+ .history
280
+ .as_ref()
281
+ .map(|h| h.events.is_empty())
282
+ .unwrap_or(true);
283
+ if history_is_empty && matches!(&self.next_page_token, NextPageToken::Next(_)) {
284
+ // If the fetch returned an empty history, but there *was* a next page token,
285
+ // immediately try to get that.
286
+ continue;
287
+ }
288
+ // Async doesn't love recursion so we do this instead.
289
+ break fetch_res.history;
290
+ };
291
+
107
292
  self.event_queue
108
- .extend(resp.history.map(|h| h.events).unwrap_or_default());
293
+ .extend(history.map(|h| h.events).unwrap_or_default());
109
294
  if matches!(&self.next_page_token, NextPageToken::Done) {
110
295
  // If finished, we need to extend the queue with the final events, skipping any
111
296
  // which are already present.
112
297
  if let Some(last_event_id) = self.event_queue.back().map(|e| e.event_id) {
113
- let final_events = std::mem::take(&mut self.final_events);
298
+ let final_events = mem::take(&mut self.final_events);
114
299
  self.event_queue.extend(
115
300
  final_events
116
301
  .into_iter()
@@ -118,63 +303,143 @@ impl HistoryPaginator {
118
303
  );
119
304
  }
120
305
  };
306
+ Ok(true)
307
+ }
308
+ }
309
+
310
+ #[pin_project::pin_project]
311
+ struct StreamingHistoryPaginator {
312
+ inner: HistoryPaginator,
313
+ #[pin]
314
+ open_history_request: Option<BoxFuture<'static, Result<(), tonic::Status>>>,
315
+ }
316
+
317
+ impl StreamingHistoryPaginator {
318
+ // Kept since can be used for history downloading
319
+ #[cfg(test)]
320
+ pub fn new(inner: HistoryPaginator) -> Self {
321
+ Self {
322
+ inner,
323
+ open_history_request: None,
324
+ }
121
325
  }
122
326
  }
123
327
 
124
- impl Stream for HistoryPaginator {
328
+ impl Stream for StreamingHistoryPaginator {
125
329
  type Item = Result<HistoryEvent, tonic::Status>;
126
330
 
127
- fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
128
- if let Some(e) = self.event_queue.pop_front() {
331
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
332
+ let mut this = self.project();
333
+
334
+ if let Some(e) = this.inner.event_queue.pop_front() {
129
335
  return Poll::Ready(Some(Ok(e)));
130
336
  }
131
- let history_req = if let Some(req) = self.open_history_request.as_mut() {
132
- req
133
- } else {
134
- let npt = match std::mem::replace(&mut self.next_page_token, NextPageToken::Done) {
135
- // If there's no open request and the last page token we got was empty, we're done.
136
- NextPageToken::Done => return Poll::Ready(None),
137
- NextPageToken::FetchFromStart => vec![],
138
- NextPageToken::Next(v) => v,
139
- };
140
- debug!(run_id=%self.run_id, "Fetching new history page");
141
- let gw = self.client.clone();
142
- let wid = self.wf_id.clone();
143
- let rid = self.run_id.clone();
144
- let resp_fut = async move {
145
- gw.get_workflow_execution_history(wid, Some(rid), npt)
146
- .instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
147
- .await
148
- };
149
- self.open_history_request.insert(resp_fut.boxed())
150
- };
337
+ if this.open_history_request.is_none() {
338
+ // SAFETY: This is safe because the inner paginator cannot be dropped before the future,
339
+ // and the future won't be moved from out of this struct.
340
+ this.open_history_request.set(Some(unsafe {
341
+ transmute(HistoryPaginator::get_next_page(this.inner).boxed())
342
+ }));
343
+ }
344
+ let history_req = this.open_history_request.as_mut().as_pin_mut().unwrap();
151
345
 
152
- return match Future::poll(history_req.as_mut(), cx) {
346
+ match Future::poll(history_req, cx) {
153
347
  Poll::Ready(resp) => {
154
- self.open_history_request = None;
348
+ this.open_history_request.set(None);
155
349
  match resp {
156
350
  Err(neterr) => Poll::Ready(Some(Err(neterr))),
157
- Ok(resp) => {
158
- self.extend_queue_with_new_page(resp);
159
- Poll::Ready(self.event_queue.pop_front().map(Ok))
160
- }
351
+ Ok(_) => Poll::Ready(this.inner.event_queue.pop_front().map(Ok)),
161
352
  }
162
353
  }
163
354
  Poll::Pending => Poll::Pending,
164
- };
355
+ }
165
356
  }
166
357
  }
167
358
 
168
359
  impl HistoryUpdate {
169
- pub fn new(history_iterator: HistoryPaginator, previous_wft_started_id: i64) -> Self {
360
+ /// Sometimes it's useful to take an update out of something without needing to use an option
361
+ /// field. Use this to replace the field with an empty update.
362
+ pub fn dummy() -> Self {
170
363
  Self {
171
- events: history_iterator.fuse().boxed(),
172
- buffered: VecDeque::new(),
173
- previous_started_event_id: previous_wft_started_id,
364
+ events: vec![],
365
+ previous_wft_started_id: -1,
366
+ has_last_wft: false,
174
367
  }
175
368
  }
369
+ pub fn is_real(&self) -> bool {
370
+ self.previous_wft_started_id >= 0
371
+ }
372
+ pub fn first_event_id(&self) -> Option<i64> {
373
+ self.events.get(0).map(|e| e.event_id)
374
+ }
375
+
376
+ /// Create an instance of an update directly from events. If the passed in event iterator has a
377
+ /// partial WFT sequence at the end, all events after the last complete WFT sequence (ending
378
+ /// with WFT started) are returned back to the caller, since the history update only works in
379
+ /// terms of complete WFT sequences.
380
+ pub fn from_events<I: IntoIterator<Item = HistoryEvent>>(
381
+ events: I,
382
+ previous_wft_started_id: i64,
383
+ has_last_wft: bool,
384
+ ) -> (Self, Vec<HistoryEvent>)
385
+ where
386
+ <I as IntoIterator>::IntoIter: Send + 'static,
387
+ {
388
+ let mut all_events: Vec<_> = events.into_iter().collect();
389
+ let mut last_end =
390
+ find_end_index_of_next_wft_seq(all_events.as_slice(), previous_wft_started_id);
391
+ if matches!(last_end, NextWFTSeqEndIndex::Incomplete(_)) {
392
+ return if has_last_wft {
393
+ (
394
+ Self {
395
+ events: all_events,
396
+ previous_wft_started_id,
397
+ has_last_wft,
398
+ },
399
+ vec![],
400
+ )
401
+ } else {
402
+ (
403
+ Self {
404
+ events: vec![],
405
+ previous_wft_started_id,
406
+ has_last_wft,
407
+ },
408
+ all_events,
409
+ )
410
+ };
411
+ }
412
+ while let NextWFTSeqEndIndex::Complete(next_end_ix) = last_end {
413
+ let next_end_eid = all_events[next_end_ix].event_id;
414
+ // To save skipping all events at the front of this slice, only pass the relevant
415
+ // portion, but that means the returned index must be adjusted, hence the addition.
416
+ let next_end = find_end_index_of_next_wft_seq(&all_events[next_end_ix..], next_end_eid)
417
+ .add(next_end_ix);
418
+ if matches!(next_end, NextWFTSeqEndIndex::Incomplete(_)) {
419
+ break;
420
+ }
421
+ last_end = next_end;
422
+ }
423
+ let remaining_events = if all_events.is_empty() {
424
+ vec![]
425
+ } else {
426
+ all_events.split_off(last_end.index() + 1)
427
+ };
176
428
 
177
- /// Create an instance of an update directly from events - should only be used for replaying.
429
+ (
430
+ Self {
431
+ events: all_events,
432
+ previous_wft_started_id,
433
+ has_last_wft,
434
+ },
435
+ remaining_events,
436
+ )
437
+ }
438
+
439
+ /// Create an instance of an update directly from events. The passed in events *must* consist
440
+ /// of one or more complete WFT sequences. IE: The event iterator must not end in the middle
441
+ /// of a WFT sequence.
442
+ #[cfg(test)]
178
443
  pub fn new_from_events<I: IntoIterator<Item = HistoryEvent>>(
179
444
  events: I,
180
445
  previous_wft_started_id: i64,
@@ -183,235 +448,417 @@ impl HistoryUpdate {
183
448
  <I as IntoIterator>::IntoIter: Send + 'static,
184
449
  {
185
450
  Self {
186
- events: stream::iter(events.into_iter().map(Ok)).boxed(),
187
- buffered: VecDeque::new(),
188
- previous_started_event_id: previous_wft_started_id,
451
+ events: events.into_iter().collect(),
452
+ previous_wft_started_id,
453
+ has_last_wft: true,
189
454
  }
190
455
  }
191
456
 
192
- /// Given a workflow task started id, return all events starting at that number (inclusive) to
193
- /// the next WFT started event (inclusive). If there is no subsequent WFT started event,
194
- /// remaining history is returned.
195
- ///
196
- /// Events are *consumed* by this process, to keep things efficient in workflow machines, and
197
- /// the function may call out to server to fetch more pages if they are known to exist and
198
- /// needed to complete the WFT sequence.
457
+ /// Given a workflow task started id, return all events starting at that number (exclusive) to
458
+ /// the next WFT started event (inclusive).
199
459
  ///
200
- /// Always buffers the WFT sequence *after* the returned one as well, if it is available.
460
+ /// Events are *consumed* by this process, to keep things efficient in workflow machines.
201
461
  ///
202
- /// Can return a tonic error in the event that fetching additional history was needed and failed
203
- pub async fn take_next_wft_sequence(
204
- &mut self,
205
- from_wft_started_id: i64,
206
- ) -> Result<Vec<HistoryEvent>, tonic::Status> {
207
- let (next_wft_events, maybe_bonus_event) = self
208
- .take_next_wft_sequence_impl(from_wft_started_id)
209
- .await?;
210
- if let Some(be) = maybe_bonus_event {
211
- self.buffered.push_back(be);
462
+ /// If we are out of WFT sequences that can be yielded by this update, it will return an empty
463
+ /// vec, indicating more pages will need to be fetched.
464
+ pub fn take_next_wft_sequence(&mut self, from_wft_started_id: i64) -> NextWFT {
465
+ // First, drop any events from the queue which are earlier than the passed-in id.
466
+ if let Some(ix_first_relevant) = self.starting_index_after_skipping(from_wft_started_id) {
467
+ self.events.drain(0..ix_first_relevant);
212
468
  }
213
-
214
- if let Some(last_event_id) = next_wft_events.last().map(|he| he.event_id) {
215
- // Always attempt to fetch the *next* WFT sequence as well, to buffer it for lookahead
216
- let (buffer_these_events, maybe_bonus_event) =
217
- self.take_next_wft_sequence_impl(last_event_id).await?;
218
- self.buffered.extend(buffer_these_events);
219
- if let Some(be) = maybe_bonus_event {
220
- self.buffered.push_back(be);
469
+ let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
470
+ match next_wft_ix {
471
+ NextWFTSeqEndIndex::Incomplete(siz) => {
472
+ if self.has_last_wft {
473
+ if siz == 0 {
474
+ NextWFT::ReplayOver
475
+ } else {
476
+ self.build_next_wft(siz)
477
+ }
478
+ } else {
479
+ if siz != 0 {
480
+ panic!(
481
+ "HistoryUpdate was created with an incomplete WFT. This is an SDK bug."
482
+ );
483
+ }
484
+ NextWFT::NeedFetch
485
+ }
221
486
  }
487
+ NextWFTSeqEndIndex::Complete(next_wft_ix) => self.build_next_wft(next_wft_ix),
222
488
  }
489
+ }
223
490
 
224
- Ok(next_wft_events)
491
+ fn build_next_wft(&mut self, drain_this_much: usize) -> NextWFT {
492
+ NextWFT::WFT(
493
+ self.events.drain(0..=drain_this_much).collect(),
494
+ self.events.is_empty() && self.has_last_wft,
495
+ )
225
496
  }
226
497
 
227
498
  /// Lets the caller peek ahead at the next WFT sequence that will be returned by
228
- /// [take_next_wft_sequence]. Will always return an empty iterator if that has not been called
229
- /// first. May also return an empty iterator or incomplete sequence if we are at the end of
230
- /// history.
231
- pub fn peek_next_wft_sequence(&self) -> impl Iterator<Item = &HistoryEvent> {
232
- self.buffered.iter()
233
- }
234
-
235
- async fn take_next_wft_sequence_impl(
236
- &mut self,
237
- from_event_id: i64,
238
- ) -> Result<(Vec<HistoryEvent>, Option<HistoryEvent>), tonic::Status> {
239
- let mut events_to_next_wft_started: Vec<HistoryEvent> = vec![];
240
-
241
- // This flag tracks if, while determining events to be returned, we have seen the next
242
- // logically significant WFT started event which follows the one that was passed in as a
243
- // parameter. If a WFT fails or times out, it is not significant. So we will stop returning
244
- // events (exclusive) as soon as we see an event following a WFT started that is *not*
245
- // failed or timed out.
246
- let mut saw_next_wft = false;
247
- let mut should_pop = |e: &HistoryEvent| {
248
- if e.event_id <= from_event_id {
249
- return true;
250
- } else if e.event_type == EventType::WorkflowTaskStarted as i32 {
251
- saw_next_wft = true;
252
- return true;
253
- }
499
+ /// [take_next_wft_sequence]. Will always return the first available WFT sequence if that has
500
+ /// not been called first. May also return an empty iterator or incomplete sequence if we are at
501
+ /// the end of history.
502
+ pub fn peek_next_wft_sequence(&self, from_wft_started_id: i64) -> &[HistoryEvent] {
503
+ let ix_first_relevant = self
504
+ .starting_index_after_skipping(from_wft_started_id)
505
+ .unwrap_or_default();
506
+ let relevant_events = &self.events[ix_first_relevant..];
507
+ if relevant_events.is_empty() {
508
+ return relevant_events;
509
+ }
510
+ let ix_end = find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id).index();
511
+ &relevant_events[0..=ix_end]
512
+ }
254
513
 
255
- if saw_next_wft {
256
- // Must ignore failures and timeouts
257
- if e.event_type == EventType::WorkflowTaskFailed as i32
258
- || e.event_type == EventType::WorkflowTaskTimedOut as i32
259
- {
260
- saw_next_wft = false;
261
- return true;
262
- }
514
+ /// Returns true if this update has the next needed WFT sequence, false if events will need to
515
+ /// be fetched in order to create a complete update with the entire next WFT sequence.
516
+ pub fn can_take_next_wft_sequence(&self, from_wft_started_id: i64) -> bool {
517
+ let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
518
+ if let NextWFTSeqEndIndex::Incomplete(_) = next_wft_ix {
519
+ if !self.has_last_wft {
263
520
  return false;
264
521
  }
522
+ }
523
+ true
524
+ }
265
525
 
266
- true
267
- };
268
-
269
- // Fetch events from the buffer first, then from the network
270
- let mut event_q = stream::iter(self.buffered.drain(..).map(Ok)).chain(&mut self.events);
271
-
272
- let mut extra_e = None;
273
- let mut last_seen_id = None;
274
- while let Some(e) = event_q.next().await {
275
- let e = e?;
276
-
277
- // This little block prevents us from infinitely fetching work from the server in the
278
- // event that, for whatever reason, it keeps returning stuff we've already seen.
279
- if let Some(last_id) = last_seen_id {
280
- if e.event_id <= last_id {
281
- error!("Server returned history event IDs that went backwards!");
282
- break;
526
+ /// Returns the next WFT completed event attributes, if any, starting at (inclusive) the
527
+ /// `from_id`
528
+ pub fn peek_next_wft_completed(
529
+ &self,
530
+ from_id: i64,
531
+ ) -> Option<&WorkflowTaskCompletedEventAttributes> {
532
+ self.events
533
+ .iter()
534
+ .skip_while(|e| e.event_id < from_id)
535
+ .find_map(|e| match &e.attributes {
536
+ Some(history_event::Attributes::WorkflowTaskCompletedEventAttributes(ref a)) => {
537
+ Some(a)
283
538
  }
284
- }
285
- last_seen_id = Some(e.event_id);
286
-
287
- // It's possible to have gotten a new history update without eviction (ex: unhandled
288
- // command on completion), where we may need to skip events we already handled.
289
- if e.event_id > from_event_id {
290
- if !should_pop(&e) {
291
- extra_e = Some(e);
292
- break;
293
- }
294
- events_to_next_wft_started.push(e);
295
- }
296
- }
539
+ _ => None,
540
+ })
541
+ }
297
542
 
298
- Ok((events_to_next_wft_started, extra_e))
543
+ fn starting_index_after_skipping(&self, from_wft_started_id: i64) -> Option<usize> {
544
+ self.events
545
+ .iter()
546
+ .find_position(|e| e.event_id > from_wft_started_id)
547
+ .map(|(ix, _)| ix)
299
548
  }
300
549
  }
301
550
 
302
- impl From<HistoryInfo> for HistoryUpdate {
303
- fn from(v: HistoryInfo) -> Self {
304
- Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
551
+ #[derive(Debug, Copy, Clone)]
552
+ enum NextWFTSeqEndIndex {
553
+ /// The next WFT sequence is completely contained within the passed-in iterator
554
+ Complete(usize),
555
+ /// The next WFT sequence is not found within the passed-in iterator, and the contained
556
+ /// value is the last index of the iterator.
557
+ Incomplete(usize),
558
+ }
559
+ impl NextWFTSeqEndIndex {
560
+ fn index(self) -> usize {
561
+ match self {
562
+ NextWFTSeqEndIndex::Complete(ix) | NextWFTSeqEndIndex::Incomplete(ix) => ix,
563
+ }
564
+ }
565
+ fn add(self, val: usize) -> Self {
566
+ match self {
567
+ NextWFTSeqEndIndex::Complete(ix) => NextWFTSeqEndIndex::Complete(ix + val),
568
+ NextWFTSeqEndIndex::Incomplete(ix) => NextWFTSeqEndIndex::Incomplete(ix + val),
569
+ }
305
570
  }
306
571
  }
307
572
 
308
- pub trait TestHBExt {
309
- fn as_history_update(&self) -> HistoryUpdate;
310
- }
573
+ /// Discovers the index of the last event in next WFT sequence within the passed-in slice
574
+ fn find_end_index_of_next_wft_seq(
575
+ events: &[HistoryEvent],
576
+ from_event_id: i64,
577
+ ) -> NextWFTSeqEndIndex {
578
+ if events.is_empty() {
579
+ return NextWFTSeqEndIndex::Incomplete(0);
580
+ }
581
+ let mut last_index = 0;
582
+ let mut saw_any_non_wft_event = false;
583
+ for (ix, e) in events.iter().enumerate() {
584
+ last_index = ix;
585
+
586
+ // It's possible to have gotten a new history update without eviction (ex: unhandled
587
+ // command on completion), where we may need to skip events we already handled.
588
+ if e.event_id <= from_event_id {
589
+ continue;
590
+ }
591
+
592
+ if !matches!(
593
+ e.event_type(),
594
+ EventType::WorkflowTaskFailed
595
+ | EventType::WorkflowTaskTimedOut
596
+ | EventType::WorkflowTaskScheduled
597
+ | EventType::WorkflowTaskStarted
598
+ | EventType::WorkflowTaskCompleted
599
+ ) {
600
+ saw_any_non_wft_event = true;
601
+ }
602
+ if e.is_final_wf_execution_event() {
603
+ return NextWFTSeqEndIndex::Complete(last_index);
604
+ }
311
605
 
312
- impl TestHBExt for TestHistoryBuilder {
313
- fn as_history_update(&self) -> HistoryUpdate {
314
- self.get_full_history_info().unwrap().into()
606
+ if e.event_type() == EventType::WorkflowTaskStarted {
607
+ if let Some(next_event) = events.get(ix + 1) {
608
+ let et = next_event.event_type();
609
+ // If the next event is WFT timeout or fail, or abrupt WF execution end, that
610
+ // doesn't conclude a WFT sequence.
611
+ if matches!(
612
+ et,
613
+ EventType::WorkflowTaskFailed
614
+ | EventType::WorkflowTaskTimedOut
615
+ | EventType::WorkflowExecutionTimedOut
616
+ | EventType::WorkflowExecutionTerminated
617
+ | EventType::WorkflowExecutionCanceled
618
+ ) {
619
+ continue;
620
+ }
621
+ // If we've never seen an interesting event and the next two events are a completion
622
+ // followed immediately again by scheduled, then this is a WFT heartbeat and also
623
+ // doesn't conclude the sequence.
624
+ else if et == EventType::WorkflowTaskCompleted {
625
+ if let Some(next_next_event) = events.get(ix + 2) {
626
+ if next_next_event.event_type() == EventType::WorkflowTaskScheduled {
627
+ continue;
628
+ } else {
629
+ saw_any_non_wft_event = true;
630
+ }
631
+ }
632
+ }
633
+ }
634
+ if saw_any_non_wft_event {
635
+ return NextWFTSeqEndIndex::Complete(ix);
636
+ }
637
+ }
315
638
  }
639
+
640
+ NextWFTSeqEndIndex::Incomplete(last_index)
316
641
  }
317
642
 
318
643
  #[cfg(test)]
319
644
  pub mod tests {
320
645
  use super::*;
321
- use crate::{test_help::canned_histories, worker::client::mocks::mock_workflow_client};
646
+ use crate::{
647
+ replay::{HistoryInfo, TestHistoryBuilder},
648
+ test_help::canned_histories,
649
+ worker::client::mocks::mock_workflow_client,
650
+ };
651
+ use futures_util::TryStreamExt;
652
+ use temporal_sdk_core_protos::temporal::api::workflowservice::v1::GetWorkflowExecutionHistoryResponse;
322
653
 
323
- #[tokio::test]
324
- async fn consumes_standard_wft_sequence() {
654
+ impl From<HistoryInfo> for HistoryUpdate {
655
+ fn from(v: HistoryInfo) -> Self {
656
+ Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
657
+ }
658
+ }
659
+
660
+ pub trait TestHBExt {
661
+ fn as_history_update(&self) -> HistoryUpdate;
662
+ }
663
+
664
+ impl TestHBExt for TestHistoryBuilder {
665
+ fn as_history_update(&self) -> HistoryUpdate {
666
+ self.get_full_history_info().unwrap().into()
667
+ }
668
+ }
669
+
670
+ impl NextWFT {
671
+ fn unwrap_events(self) -> Vec<HistoryEvent> {
672
+ match self {
673
+ NextWFT::WFT(e, _) => e,
674
+ o => panic!("Must be complete WFT: {o:?}"),
675
+ }
676
+ }
677
+ }
678
+
679
+ fn next_check_peek(update: &mut HistoryUpdate, from_id: i64) -> Vec<HistoryEvent> {
680
+ let seq_peeked = update.peek_next_wft_sequence(from_id).to_vec();
681
+ let seq = update.take_next_wft_sequence(from_id).unwrap_events();
682
+ assert_eq!(seq, seq_peeked);
683
+ seq
684
+ }
685
+
686
+ #[test]
687
+ fn consumes_standard_wft_sequence() {
325
688
  let timer_hist = canned_histories::single_timer("t");
326
689
  let mut update = timer_hist.as_history_update();
327
- let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
690
+ let seq_1 = next_check_peek(&mut update, 0);
328
691
  assert_eq!(seq_1.len(), 3);
329
692
  assert_eq!(seq_1.last().unwrap().event_id, 3);
330
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
693
+ let seq_2_peeked = update.peek_next_wft_sequence(0).to_vec();
694
+ let seq_2 = next_check_peek(&mut update, 3);
695
+ assert_eq!(seq_2, seq_2_peeked);
331
696
  assert_eq!(seq_2.len(), 5);
332
697
  assert_eq!(seq_2.last().unwrap().event_id, 8);
333
698
  }
334
699
 
335
- #[tokio::test]
336
- async fn skips_wft_failed() {
700
+ #[test]
701
+ fn skips_wft_failed() {
337
702
  let failed_hist = canned_histories::workflow_fails_with_reset_after_timer("t", "runid");
338
703
  let mut update = failed_hist.as_history_update();
339
- let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
704
+ let seq_1 = next_check_peek(&mut update, 0);
340
705
  assert_eq!(seq_1.len(), 3);
341
706
  assert_eq!(seq_1.last().unwrap().event_id, 3);
342
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
707
+ let seq_2 = next_check_peek(&mut update, 3);
343
708
  assert_eq!(seq_2.len(), 8);
344
709
  assert_eq!(seq_2.last().unwrap().event_id, 11);
345
710
  }
346
711
 
347
- #[tokio::test]
348
- async fn skips_wft_timeout() {
712
+ #[test]
713
+ fn skips_wft_timeout() {
349
714
  let failed_hist = canned_histories::wft_timeout_repro();
350
715
  let mut update = failed_hist.as_history_update();
351
- let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
716
+ let seq_1 = next_check_peek(&mut update, 0);
352
717
  assert_eq!(seq_1.len(), 3);
353
718
  assert_eq!(seq_1.last().unwrap().event_id, 3);
354
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
719
+ let seq_2 = next_check_peek(&mut update, 3);
355
720
  assert_eq!(seq_2.len(), 11);
356
721
  assert_eq!(seq_2.last().unwrap().event_id, 14);
357
722
  }
358
723
 
359
- #[tokio::test]
360
- async fn skips_events_before_desired_wft() {
724
+ #[test]
725
+ fn skips_events_before_desired_wft() {
361
726
  let timer_hist = canned_histories::single_timer("t");
362
727
  let mut update = timer_hist.as_history_update();
363
728
  // We haven't processed the first 3 events, but we should still only get the second sequence
364
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
729
+ let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
365
730
  assert_eq!(seq_2.len(), 5);
366
731
  assert_eq!(seq_2.last().unwrap().event_id, 8);
367
732
  }
368
733
 
369
- #[tokio::test]
370
- async fn paginator_fetches_new_pages() {
371
- // Note that this test triggers the "event ids that went backwards" error, acceptably.
372
- // Can be fixed by having mock not return earlier events.
373
- let wft_count = 500;
374
- let long_hist = canned_histories::long_sequential_timers(wft_count);
375
- let initial_hist = long_hist.get_history_info(10).unwrap();
376
- let prev_started = initial_hist.previous_started_event_id();
734
+ #[test]
735
+ fn history_ends_abruptly() {
736
+ let mut timer_hist = canned_histories::single_timer("t");
737
+ timer_hist.add_workflow_execution_terminated();
738
+ let mut update = timer_hist.as_history_update();
739
+ let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
740
+ assert_eq!(seq_2.len(), 6);
741
+ assert_eq!(seq_2.last().unwrap().event_id, 9);
742
+ }
743
+
744
+ #[test]
745
+ fn heartbeats_skipped() {
746
+ let mut t = TestHistoryBuilder::default();
747
+ t.add_by_type(EventType::WorkflowExecutionStarted);
748
+ t.add_full_wf_task();
749
+ t.add_full_wf_task(); // wft started 6
750
+ t.add_by_type(EventType::TimerStarted);
751
+ t.add_full_wf_task(); // wft started 10
752
+ t.add_full_wf_task();
753
+ t.add_full_wf_task();
754
+ t.add_full_wf_task(); // wft started 19
755
+ t.add_by_type(EventType::TimerStarted);
756
+ t.add_full_wf_task(); // wft started 23
757
+ t.add_we_signaled("whee", vec![]);
758
+ t.add_full_wf_task();
759
+ t.add_workflow_execution_completed();
760
+
761
+ let mut update = t.as_history_update();
762
+ let seq = next_check_peek(&mut update, 0);
763
+ assert_eq!(seq.len(), 6);
764
+ let seq = next_check_peek(&mut update, 6);
765
+ assert_eq!(seq.len(), 13);
766
+ let seq = next_check_peek(&mut update, 19);
767
+ assert_eq!(seq.len(), 4);
768
+ let seq = next_check_peek(&mut update, 23);
769
+ assert_eq!(seq.len(), 4);
770
+ let seq = next_check_peek(&mut update, 27);
771
+ assert_eq!(seq.len(), 2);
772
+ }
773
+
774
+ #[test]
775
+ fn heartbeat_marker_end() {
776
+ let mut t = TestHistoryBuilder::default();
777
+ t.add_by_type(EventType::WorkflowExecutionStarted);
778
+ t.add_full_wf_task();
779
+ t.add_full_wf_task();
780
+ t.add_local_activity_result_marker(1, "1", "done".into());
781
+ t.add_workflow_execution_completed();
782
+
783
+ let mut update = t.as_history_update();
784
+ let seq = next_check_peek(&mut update, 3);
785
+ // completed, sched, started
786
+ assert_eq!(seq.len(), 3);
787
+ let seq = next_check_peek(&mut update, 6);
788
+ assert_eq!(seq.len(), 3);
789
+ }
790
+
791
+ fn paginator_setup(history: TestHistoryBuilder, chunk_size: usize) -> HistoryPaginator {
792
+ let full_hist = history.get_full_history_info().unwrap().into_events();
793
+ let initial_hist = full_hist.chunks(chunk_size).next().unwrap().to_vec();
377
794
  let mut mock_client = mock_workflow_client();
378
795
 
379
- let mut npt = 2;
796
+ let mut npt = 1;
380
797
  mock_client
381
798
  .expect_get_workflow_execution_history()
382
799
  .returning(move |_, _, passed_npt| {
383
800
  assert_eq!(passed_npt, vec![npt]);
384
- let history = long_hist.get_history_info(10 * npt as usize).unwrap();
801
+ let mut hist_chunks = full_hist.chunks(chunk_size).peekable();
802
+ let next_chunks = hist_chunks.nth(npt.into()).unwrap_or_default();
385
803
  npt += 1;
804
+ let next_page_token = if hist_chunks.peek().is_none() {
805
+ vec![]
806
+ } else {
807
+ vec![npt]
808
+ };
386
809
  Ok(GetWorkflowExecutionHistoryResponse {
387
- history: Some(history.into()),
810
+ history: Some(History {
811
+ events: next_chunks.into(),
812
+ }),
388
813
  raw_history: vec![],
389
- next_page_token: vec![npt],
814
+ next_page_token,
390
815
  archived: false,
391
816
  })
392
817
  });
393
818
 
394
- let mut update = HistoryUpdate::new(
395
- HistoryPaginator::new(
396
- initial_hist.into(),
397
- "wfid".to_string(),
398
- "runid".to_string(),
399
- vec![2], // Start at page "2"
400
- Arc::new(mock_client),
401
- ),
402
- prev_started,
819
+ HistoryPaginator::new(
820
+ History {
821
+ events: initial_hist,
822
+ },
823
+ 0,
824
+ "wfid".to_string(),
825
+ "runid".to_string(),
826
+ vec![1],
827
+ Arc::new(mock_client),
828
+ )
829
+ }
830
+
831
+ #[rstest::rstest]
832
+ #[tokio::test]
833
+ async fn paginator_extracts_updates(#[values(10, 11, 12, 13, 14)] chunk_size: usize) {
834
+ let wft_count = 100;
835
+ let mut paginator = paginator_setup(
836
+ canned_histories::long_sequential_timers(wft_count),
837
+ chunk_size,
403
838
  );
839
+ let mut update = paginator.extract_next_update().await.unwrap();
404
840
 
405
- let seq = update.take_next_wft_sequence(0).await.unwrap();
841
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
406
842
  assert_eq!(seq.len(), 3);
407
843
 
408
844
  let mut last_event_id = 3;
409
845
  let mut last_started_id = 3;
410
- for _ in 1..wft_count {
411
- let seq = update
412
- .take_next_wft_sequence(last_started_id)
413
- .await
414
- .unwrap();
846
+ for i in 1..wft_count {
847
+ let seq = {
848
+ match update.take_next_wft_sequence(last_started_id) {
849
+ NextWFT::WFT(seq, _) => seq,
850
+ NextWFT::NeedFetch => {
851
+ update = paginator.extract_next_update().await.unwrap();
852
+ update
853
+ .take_next_wft_sequence(last_started_id)
854
+ .unwrap_events()
855
+ }
856
+ NextWFT::ReplayOver => {
857
+ assert_eq!(i, wft_count - 1);
858
+ break;
859
+ }
860
+ }
861
+ };
415
862
  for e in &seq {
416
863
  last_event_id += 1;
417
864
  assert_eq!(e.event_id, last_event_id);
@@ -421,10 +868,124 @@ pub mod tests {
421
868
  }
422
869
  }
423
870
 
871
+ #[tokio::test]
872
+ async fn paginator_streams() {
873
+ let wft_count = 10;
874
+ let paginator = StreamingHistoryPaginator::new(paginator_setup(
875
+ canned_histories::long_sequential_timers(wft_count),
876
+ 10,
877
+ ));
878
+ let everything: Vec<_> = paginator.try_collect().await.unwrap();
879
+ assert_eq!(everything.len(), (wft_count + 1) * 5);
880
+ everything.iter().fold(1, |event_id, e| {
881
+ assert_eq!(event_id, e.event_id);
882
+ e.event_id + 1
883
+ });
884
+ }
885
+
886
+ fn three_wfts_then_heartbeats() -> TestHistoryBuilder {
887
+ let mut t = TestHistoryBuilder::default();
888
+ // Start with two complete normal WFTs
889
+ t.add_by_type(EventType::WorkflowExecutionStarted);
890
+ t.add_full_wf_task(); // wft start - 3
891
+ t.add_by_type(EventType::TimerStarted);
892
+ t.add_full_wf_task(); // wft start - 7
893
+ t.add_by_type(EventType::TimerStarted);
894
+ t.add_full_wf_task(); // wft start - 11
895
+ for _ in 1..50 {
896
+ // Add a bunch of heartbeats with no commands, which count as one task
897
+ t.add_full_wf_task();
898
+ }
899
+ t.add_workflow_execution_completed();
900
+ t
901
+ }
902
+
903
+ #[tokio::test]
904
+ async fn needs_fetch_if_ending_in_middle_of_wft_seq() {
905
+ let t = three_wfts_then_heartbeats();
906
+ let mut ends_in_middle_of_seq = t.as_history_update().events;
907
+ ends_in_middle_of_seq.truncate(19);
908
+ // The update should contain the first two complete WFTs, ending on the 8th event which
909
+ // is WFT started. The remaining events should be returned. False flags means the creator
910
+ // knows there are more events, so we should return need fetch
911
+ let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
912
+ assert_eq!(remaining[0].event_id, 8);
913
+ assert_eq!(remaining.last().unwrap().event_id, 19);
914
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
915
+ assert_eq!(seq.last().unwrap().event_id, 3);
916
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
917
+ assert_eq!(seq.last().unwrap().event_id, 7);
918
+ let next = update.take_next_wft_sequence(7);
919
+ assert_matches!(next, NextWFT::NeedFetch);
920
+ }
921
+
922
+ // Like the above, but if the history happens to be cut off at a wft boundary, (even though
923
+ // there may have been many heartbeats after we have no way of knowing about), it's going to
924
+ // count events 7-20 as a WFT since there is started, completed, timer command, ..heartbeats..
925
+ #[tokio::test]
926
+ async fn needs_fetch_after_complete_seq_with_heartbeats() {
927
+ let t = three_wfts_then_heartbeats();
928
+ let mut ends_in_middle_of_seq = t.as_history_update().events;
929
+ ends_in_middle_of_seq.truncate(20);
930
+ let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
931
+ assert!(remaining.is_empty());
932
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
933
+ assert_eq!(seq.last().unwrap().event_id, 3);
934
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
935
+ assert_eq!(seq.last().unwrap().event_id, 7);
936
+ let seq = update.take_next_wft_sequence(7).unwrap_events();
937
+ assert_eq!(seq.last().unwrap().event_id, 20);
938
+ let next = update.take_next_wft_sequence(20);
939
+ assert_matches!(next, NextWFT::NeedFetch);
940
+ }
941
+
942
+ #[rstest::rstest]
943
+ #[tokio::test]
944
+ async fn paginator_works_with_wft_over_multiple_pages(
945
+ #[values(10, 11, 12, 13, 14)] chunk_size: usize,
946
+ ) {
947
+ let t = three_wfts_then_heartbeats();
948
+ let mut paginator = paginator_setup(t, chunk_size);
949
+ let mut update = paginator.extract_next_update().await.unwrap();
950
+ let mut last_id = 0;
951
+ loop {
952
+ let seq = update.take_next_wft_sequence(last_id);
953
+ match seq {
954
+ NextWFT::WFT(seq, _) => {
955
+ last_id = seq.last().unwrap().event_id;
956
+ }
957
+ NextWFT::NeedFetch => {
958
+ update = paginator.extract_next_update().await.unwrap();
959
+ }
960
+ NextWFT::ReplayOver => break,
961
+ }
962
+ }
963
+ assert_eq!(last_id, 160);
964
+ }
965
+
966
+ #[tokio::test]
967
+ async fn task_just_before_heartbeat_chain_is_taken() {
968
+ let t = three_wfts_then_heartbeats();
969
+ let mut update = t.as_history_update();
970
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
971
+ assert_eq!(seq.last().unwrap().event_id, 3);
972
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
973
+ assert_eq!(seq.last().unwrap().event_id, 7);
974
+ let seq = update.take_next_wft_sequence(7).unwrap_events();
975
+ assert_eq!(seq.last().unwrap().event_id, 158);
976
+ let seq = update.take_next_wft_sequence(158).unwrap_events();
977
+ assert_eq!(seq.last().unwrap().event_id, 160);
978
+ assert_eq!(
979
+ seq.last().unwrap().event_type(),
980
+ EventType::WorkflowExecutionCompleted
981
+ );
982
+ }
983
+
424
984
  #[tokio::test]
425
985
  async fn handles_cache_misses() {
426
986
  let timer_hist = canned_histories::single_timer("t");
427
987
  let partial_task = timer_hist.get_one_wft(2).unwrap();
988
+ let prev_started_wft_id = partial_task.previous_started_event_id();
428
989
  let mut history_from_get: GetWorkflowExecutionHistoryResponse =
429
990
  timer_hist.get_history_info(2).unwrap().into();
430
991
  // Chop off the last event, which is WFT started, which server doesn't return in get
@@ -435,24 +996,116 @@ pub mod tests {
435
996
  .expect_get_workflow_execution_history()
436
997
  .returning(move |_, _, _| Ok(history_from_get.clone()));
437
998
 
438
- let mut update = HistoryUpdate::new(
439
- HistoryPaginator::new(
440
- partial_task.into(),
441
- "wfid".to_string(),
442
- "runid".to_string(),
443
- // A cache miss means we'll try to fetch from start
444
- NextPageToken::FetchFromStart,
445
- Arc::new(mock_client),
446
- ),
447
- 1,
999
+ let mut paginator = HistoryPaginator::new(
1000
+ partial_task.into(),
1001
+ prev_started_wft_id,
1002
+ "wfid".to_string(),
1003
+ "runid".to_string(),
1004
+ // A cache miss means we'll try to fetch from start
1005
+ NextPageToken::FetchFromStart,
1006
+ Arc::new(mock_client),
448
1007
  );
1008
+ let mut update = paginator.extract_next_update().await.unwrap();
449
1009
  // We expect if we try to take the first task sequence that the first event is the first
450
1010
  // event in the sequence.
451
- let seq = update.take_next_wft_sequence(0).await.unwrap();
1011
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
452
1012
  assert_eq!(seq[0].event_id, 1);
453
- let seq = update.take_next_wft_sequence(3).await.unwrap();
1013
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
454
1014
  // Verify anything extra (which should only ever be WFT started) was re-appended to the
455
1015
  // end of the event iteration after fetching the old history.
456
1016
  assert_eq!(seq.last().unwrap().event_id, 8);
457
1017
  }
1018
+
1019
+ #[test]
1020
+ fn la_marker_chunking() {
1021
+ let mut t = TestHistoryBuilder::default();
1022
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1023
+ t.add_full_wf_task();
1024
+ t.add_we_signaled("whatever", vec![]);
1025
+ t.add_full_wf_task(); // started - 7
1026
+ t.add_local_activity_result_marker(1, "hi", Default::default());
1027
+ let act_s = t.add_activity_task_scheduled("1");
1028
+ let act_st = t.add_activity_task_started(act_s);
1029
+ t.add_activity_task_completed(act_s, act_st, Default::default());
1030
+ t.add_workflow_task_scheduled_and_started();
1031
+ t.add_workflow_task_timed_out();
1032
+ t.add_workflow_task_scheduled_and_started();
1033
+ t.add_workflow_task_timed_out();
1034
+ t.add_workflow_task_scheduled_and_started();
1035
+
1036
+ let mut update = t.as_history_update();
1037
+ let seq = next_check_peek(&mut update, 0);
1038
+ assert_eq!(seq.len(), 3);
1039
+ let seq = next_check_peek(&mut update, 3);
1040
+ assert_eq!(seq.len(), 4);
1041
+ let seq = next_check_peek(&mut update, 7);
1042
+ assert_eq!(seq.len(), 13);
1043
+ }
1044
+
1045
+ #[tokio::test]
1046
+ async fn handles_blank_fetch_response() {
1047
+ let timer_hist = canned_histories::single_timer("t");
1048
+ let partial_task = timer_hist.get_one_wft(2).unwrap();
1049
+ let prev_started_wft_id = partial_task.previous_started_event_id();
1050
+ let mut mock_client = mock_workflow_client();
1051
+ mock_client
1052
+ .expect_get_workflow_execution_history()
1053
+ .returning(move |_, _, _| Ok(Default::default()));
1054
+
1055
+ let mut paginator = HistoryPaginator::new(
1056
+ partial_task.into(),
1057
+ prev_started_wft_id,
1058
+ "wfid".to_string(),
1059
+ "runid".to_string(),
1060
+ // A cache miss means we'll try to fetch from start
1061
+ NextPageToken::FetchFromStart,
1062
+ Arc::new(mock_client),
1063
+ );
1064
+ let err = paginator.extract_next_update().await.unwrap_err();
1065
+ assert_matches!(err.code(), tonic::Code::DataLoss);
1066
+ }
1067
+
1068
+ #[tokio::test]
1069
+ async fn handles_empty_page_with_next_token() {
1070
+ let timer_hist = canned_histories::single_timer("t");
1071
+ let partial_task = timer_hist.get_one_wft(2).unwrap();
1072
+ let prev_started_wft_id = partial_task.previous_started_event_id();
1073
+ let full_resp: GetWorkflowExecutionHistoryResponse =
1074
+ timer_hist.get_full_history_info().unwrap().into();
1075
+ let mut mock_client = mock_workflow_client();
1076
+ mock_client
1077
+ .expect_get_workflow_execution_history()
1078
+ .returning(move |_, _, _| {
1079
+ Ok(GetWorkflowExecutionHistoryResponse {
1080
+ history: Some(History { events: vec![] }),
1081
+ raw_history: vec![],
1082
+ next_page_token: vec![2],
1083
+ archived: false,
1084
+ })
1085
+ })
1086
+ .times(1);
1087
+ mock_client
1088
+ .expect_get_workflow_execution_history()
1089
+ .returning(move |_, _, _| Ok(full_resp.clone()))
1090
+ .times(1);
1091
+
1092
+ let mut paginator = HistoryPaginator::new(
1093
+ partial_task.into(),
1094
+ prev_started_wft_id,
1095
+ "wfid".to_string(),
1096
+ "runid".to_string(),
1097
+ // A cache miss means we'll try to fetch from start
1098
+ NextPageToken::FetchFromStart,
1099
+ Arc::new(mock_client),
1100
+ );
1101
+ let mut update = paginator.extract_next_update().await.unwrap();
1102
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1103
+ assert_eq!(seq.last().unwrap().event_id, 3);
1104
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1105
+ assert_eq!(seq.last().unwrap().event_id, 8);
1106
+ assert_matches!(update.take_next_wft_sequence(8), NextWFT::ReplayOver);
1107
+ }
1108
+
1109
+ // TODO: Test we dont re-feed pointless updates if fetching returns <= events we already
1110
+ // processed
458
1111
  }