temporalio 0.0.2 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (320) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +25 -23
  3. data/bridge/Cargo.lock +185 -76
  4. data/bridge/Cargo.toml +6 -4
  5. data/bridge/sdk-core/README.md +19 -6
  6. data/bridge/sdk-core/client/src/lib.rs +215 -39
  7. data/bridge/sdk-core/client/src/metrics.rs +17 -8
  8. data/bridge/sdk-core/client/src/raw.rs +4 -4
  9. data/bridge/sdk-core/client/src/retry.rs +32 -20
  10. data/bridge/sdk-core/core/Cargo.toml +22 -9
  11. data/bridge/sdk-core/core/src/abstractions.rs +203 -14
  12. data/bridge/sdk-core/core/src/core_tests/activity_tasks.rs +76 -41
  13. data/bridge/sdk-core/core/src/core_tests/determinism.rs +165 -2
  14. data/bridge/sdk-core/core/src/core_tests/local_activities.rs +204 -83
  15. data/bridge/sdk-core/core/src/core_tests/queries.rs +3 -4
  16. data/bridge/sdk-core/core/src/core_tests/workers.rs +1 -3
  17. data/bridge/sdk-core/core/src/core_tests/workflow_tasks.rs +397 -54
  18. data/bridge/sdk-core/core/src/ephemeral_server/mod.rs +106 -12
  19. data/bridge/sdk-core/core/src/internal_flags.rs +136 -0
  20. data/bridge/sdk-core/core/src/lib.rs +16 -9
  21. data/bridge/sdk-core/core/src/telemetry/log_export.rs +1 -1
  22. data/bridge/sdk-core/core/src/telemetry/metrics.rs +69 -35
  23. data/bridge/sdk-core/core/src/telemetry/mod.rs +29 -13
  24. data/bridge/sdk-core/core/src/telemetry/prometheus_server.rs +17 -12
  25. data/bridge/sdk-core/core/src/test_help/mod.rs +62 -12
  26. data/bridge/sdk-core/core/src/worker/activities/activity_heartbeat_manager.rs +112 -156
  27. data/bridge/sdk-core/core/src/worker/activities/activity_task_poller_stream.rs +89 -0
  28. data/bridge/sdk-core/core/src/worker/activities/local_activities.rs +352 -122
  29. data/bridge/sdk-core/core/src/worker/activities.rs +233 -157
  30. data/bridge/sdk-core/core/src/worker/client/mocks.rs +22 -2
  31. data/bridge/sdk-core/core/src/worker/client.rs +18 -2
  32. data/bridge/sdk-core/core/src/worker/mod.rs +165 -58
  33. data/bridge/sdk-core/core/src/worker/workflow/bridge.rs +1 -3
  34. data/bridge/sdk-core/core/src/worker/workflow/driven_workflow.rs +3 -5
  35. data/bridge/sdk-core/core/src/worker/workflow/history_update.rs +856 -277
  36. data/bridge/sdk-core/core/src/worker/workflow/machines/activity_state_machine.rs +100 -43
  37. data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_external_state_machine.rs +7 -7
  38. data/bridge/sdk-core/core/src/worker/workflow/machines/cancel_workflow_state_machine.rs +5 -4
  39. data/bridge/sdk-core/core/src/worker/workflow/machines/child_workflow_state_machine.rs +87 -27
  40. data/bridge/sdk-core/core/src/worker/workflow/machines/complete_workflow_state_machine.rs +5 -4
  41. data/bridge/sdk-core/core/src/worker/workflow/machines/continue_as_new_workflow_state_machine.rs +5 -4
  42. data/bridge/sdk-core/core/src/worker/workflow/machines/fail_workflow_state_machine.rs +5 -4
  43. data/bridge/sdk-core/core/src/worker/workflow/machines/local_activity_state_machine.rs +137 -62
  44. data/bridge/sdk-core/core/src/worker/workflow/machines/mod.rs +25 -17
  45. data/bridge/sdk-core/core/src/worker/workflow/machines/modify_workflow_properties_state_machine.rs +7 -6
  46. data/bridge/sdk-core/core/src/worker/workflow/machines/patch_state_machine.rs +103 -152
  47. data/bridge/sdk-core/core/src/worker/workflow/machines/signal_external_state_machine.rs +7 -7
  48. data/bridge/sdk-core/core/src/worker/workflow/machines/timer_state_machine.rs +9 -9
  49. data/bridge/sdk-core/core/src/worker/workflow/machines/transition_coverage.rs +2 -2
  50. data/bridge/sdk-core/core/src/worker/workflow/machines/upsert_search_attributes_state_machine.rs +14 -7
  51. data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines/local_acts.rs +5 -16
  52. data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_machines.rs +201 -121
  53. data/bridge/sdk-core/core/src/worker/workflow/machines/workflow_task_state_machine.rs +11 -14
  54. data/bridge/sdk-core/core/src/worker/workflow/managed_run/managed_wf_test.rs +30 -15
  55. data/bridge/sdk-core/core/src/worker/workflow/managed_run.rs +1026 -376
  56. data/bridge/sdk-core/core/src/worker/workflow/mod.rs +460 -384
  57. data/bridge/sdk-core/core/src/worker/workflow/run_cache.rs +40 -57
  58. data/bridge/sdk-core/core/src/worker/workflow/wft_extraction.rs +125 -0
  59. data/bridge/sdk-core/core/src/worker/workflow/wft_poller.rs +1 -4
  60. data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/saved_wf_inputs.rs +117 -0
  61. data/bridge/sdk-core/core/src/worker/workflow/workflow_stream/tonic_status_serde.rs +24 -0
  62. data/bridge/sdk-core/core/src/worker/workflow/workflow_stream.rs +448 -718
  63. data/bridge/sdk-core/core-api/Cargo.toml +2 -1
  64. data/bridge/sdk-core/core-api/src/errors.rs +1 -34
  65. data/bridge/sdk-core/core-api/src/lib.rs +6 -2
  66. data/bridge/sdk-core/core-api/src/telemetry.rs +0 -6
  67. data/bridge/sdk-core/core-api/src/worker.rs +14 -1
  68. data/bridge/sdk-core/fsm/rustfsm_procmacro/src/lib.rs +18 -15
  69. data/bridge/sdk-core/fsm/rustfsm_trait/src/lib.rs +8 -3
  70. data/bridge/sdk-core/histories/evict_while_la_running_no_interference-16_history.bin +0 -0
  71. data/bridge/sdk-core/protos/api_upstream/temporal/api/command/v1/message.proto +5 -17
  72. data/bridge/sdk-core/protos/api_upstream/temporal/api/common/v1/message.proto +11 -0
  73. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/command_type.proto +1 -6
  74. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/event_type.proto +6 -6
  75. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +5 -0
  76. data/bridge/sdk-core/protos/api_upstream/temporal/api/enums/v1/update.proto +22 -6
  77. data/bridge/sdk-core/protos/api_upstream/temporal/api/history/v1/message.proto +48 -19
  78. data/bridge/sdk-core/protos/api_upstream/temporal/api/namespace/v1/message.proto +2 -0
  79. data/bridge/sdk-core/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +3 -0
  80. data/bridge/sdk-core/protos/api_upstream/temporal/api/{enums/v1/interaction_type.proto → protocol/v1/message.proto} +29 -11
  81. data/bridge/sdk-core/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto +63 -0
  82. data/bridge/sdk-core/protos/api_upstream/temporal/api/update/v1/message.proto +111 -0
  83. data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +59 -28
  84. data/bridge/sdk-core/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +2 -2
  85. data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_result/activity_result.proto +1 -0
  86. data/bridge/sdk-core/protos/local/temporal/sdk/core/activity_task/activity_task.proto +1 -0
  87. data/bridge/sdk-core/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto +1 -0
  88. data/bridge/sdk-core/protos/local/temporal/sdk/core/common/common.proto +1 -0
  89. data/bridge/sdk-core/protos/local/temporal/sdk/core/core_interface.proto +1 -0
  90. data/bridge/sdk-core/protos/local/temporal/sdk/core/external_data/external_data.proto +1 -0
  91. data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_activation/workflow_activation.proto +7 -0
  92. data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_commands/workflow_commands.proto +1 -0
  93. data/bridge/sdk-core/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto +6 -0
  94. data/bridge/sdk-core/sdk/Cargo.toml +3 -2
  95. data/bridge/sdk-core/sdk/src/lib.rs +87 -20
  96. data/bridge/sdk-core/sdk/src/workflow_future.rs +9 -8
  97. data/bridge/sdk-core/sdk-core-protos/Cargo.toml +5 -2
  98. data/bridge/sdk-core/sdk-core-protos/build.rs +36 -1
  99. data/bridge/sdk-core/sdk-core-protos/src/history_builder.rs +100 -87
  100. data/bridge/sdk-core/sdk-core-protos/src/history_info.rs +5 -1
  101. data/bridge/sdk-core/sdk-core-protos/src/lib.rs +175 -57
  102. data/bridge/sdk-core/sdk-core-protos/src/task_token.rs +12 -2
  103. data/bridge/sdk-core/test-utils/Cargo.toml +3 -1
  104. data/bridge/sdk-core/test-utils/src/canned_histories.rs +106 -296
  105. data/bridge/sdk-core/test-utils/src/histfetch.rs +1 -1
  106. data/bridge/sdk-core/test-utils/src/lib.rs +82 -23
  107. data/bridge/sdk-core/test-utils/src/wf_input_saver.rs +50 -0
  108. data/bridge/sdk-core/test-utils/src/workflows.rs +29 -0
  109. data/bridge/sdk-core/tests/fuzzy_workflow.rs +130 -0
  110. data/bridge/sdk-core/tests/{load_tests.rs → heavy_tests.rs} +125 -51
  111. data/bridge/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +25 -3
  112. data/bridge/sdk-core/tests/integ_tests/heartbeat_tests.rs +5 -3
  113. data/bridge/sdk-core/tests/integ_tests/metrics_tests.rs +218 -16
  114. data/bridge/sdk-core/tests/integ_tests/polling_tests.rs +4 -47
  115. data/bridge/sdk-core/tests/integ_tests/queries_tests.rs +5 -128
  116. data/bridge/sdk-core/tests/integ_tests/visibility_tests.rs +83 -25
  117. data/bridge/sdk-core/tests/integ_tests/workflow_tests/activities.rs +93 -69
  118. data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +1 -0
  119. data/bridge/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +6 -13
  120. data/bridge/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +1 -0
  121. data/bridge/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +6 -2
  122. data/bridge/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +3 -10
  123. data/bridge/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +72 -191
  124. data/bridge/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +1 -0
  125. data/bridge/sdk-core/tests/integ_tests/workflow_tests/patches.rs +7 -28
  126. data/bridge/sdk-core/tests/integ_tests/workflow_tests/replay.rs +12 -7
  127. data/bridge/sdk-core/tests/integ_tests/workflow_tests/resets.rs +1 -0
  128. data/bridge/sdk-core/tests/integ_tests/workflow_tests/signals.rs +18 -14
  129. data/bridge/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +6 -20
  130. data/bridge/sdk-core/tests/integ_tests/workflow_tests/timers.rs +10 -21
  131. data/bridge/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +6 -4
  132. data/bridge/sdk-core/tests/integ_tests/workflow_tests.rs +10 -11
  133. data/bridge/sdk-core/tests/main.rs +3 -13
  134. data/bridge/sdk-core/tests/runner.rs +75 -36
  135. data/bridge/sdk-core/tests/wf_input_replay.rs +32 -0
  136. data/bridge/src/connection.rs +41 -25
  137. data/bridge/src/lib.rs +269 -14
  138. data/bridge/src/runtime.rs +1 -1
  139. data/bridge/src/test_server.rs +153 -0
  140. data/bridge/src/worker.rs +89 -16
  141. data/lib/gen/temporal/api/command/v1/message_pb.rb +4 -18
  142. data/lib/gen/temporal/api/common/v1/message_pb.rb +4 -0
  143. data/lib/gen/temporal/api/enums/v1/command_type_pb.rb +1 -3
  144. data/lib/gen/temporal/api/enums/v1/event_type_pb.rb +3 -3
  145. data/lib/gen/temporal/api/enums/v1/failed_cause_pb.rb +2 -0
  146. data/lib/gen/temporal/api/enums/v1/update_pb.rb +6 -4
  147. data/lib/gen/temporal/api/history/v1/message_pb.rb +27 -19
  148. data/lib/gen/temporal/api/namespace/v1/message_pb.rb +1 -0
  149. data/lib/gen/temporal/api/operatorservice/v1/request_response_pb.rb +3 -0
  150. data/lib/gen/temporal/api/protocol/v1/message_pb.rb +30 -0
  151. data/lib/gen/temporal/api/sdk/v1/task_complete_metadata_pb.rb +23 -0
  152. data/lib/gen/temporal/api/testservice/v1/request_response_pb.rb +49 -0
  153. data/lib/gen/temporal/api/testservice/v1/service_pb.rb +21 -0
  154. data/lib/gen/temporal/api/update/v1/message_pb.rb +72 -0
  155. data/lib/gen/temporal/api/workflowservice/v1/request_response_pb.rb +26 -16
  156. data/lib/gen/temporal/sdk/core/activity_result/activity_result_pb.rb +13 -9
  157. data/lib/gen/temporal/sdk/core/activity_task/activity_task_pb.rb +10 -6
  158. data/lib/gen/temporal/sdk/core/child_workflow/child_workflow_pb.rb +13 -9
  159. data/lib/gen/temporal/sdk/core/common/common_pb.rb +7 -3
  160. data/lib/gen/temporal/sdk/core/core_interface_pb.rb +9 -3
  161. data/lib/gen/temporal/sdk/core/external_data/external_data_pb.rb +7 -3
  162. data/lib/gen/temporal/sdk/core/workflow_activation/workflow_activation_pb.rb +27 -21
  163. data/lib/gen/temporal/sdk/core/workflow_commands/workflow_commands_pb.rb +28 -24
  164. data/lib/gen/temporal/sdk/core/workflow_completion/workflow_completion_pb.rb +12 -5
  165. data/lib/temporalio/activity/context.rb +13 -8
  166. data/lib/temporalio/activity/info.rb +1 -1
  167. data/lib/temporalio/bridge/connect_options.rb +15 -0
  168. data/lib/temporalio/bridge/retry_config.rb +24 -0
  169. data/lib/temporalio/bridge/tls_options.rb +19 -0
  170. data/lib/temporalio/bridge.rb +1 -1
  171. data/lib/temporalio/client/implementation.rb +8 -8
  172. data/lib/temporalio/connection/retry_config.rb +44 -0
  173. data/lib/temporalio/connection/service.rb +20 -0
  174. data/lib/temporalio/connection/test_service.rb +92 -0
  175. data/lib/temporalio/connection/tls_options.rb +51 -0
  176. data/lib/temporalio/connection/workflow_service.rb +731 -0
  177. data/lib/temporalio/connection.rb +55 -720
  178. data/lib/temporalio/interceptor/activity_inbound.rb +22 -0
  179. data/lib/temporalio/interceptor/activity_outbound.rb +24 -0
  180. data/lib/temporalio/interceptor/chain.rb +5 -5
  181. data/lib/temporalio/interceptor/client.rb +8 -4
  182. data/lib/temporalio/interceptor.rb +22 -0
  183. data/lib/temporalio/retry_policy.rb +13 -3
  184. data/lib/temporalio/testing/time_skipping_handle.rb +32 -0
  185. data/lib/temporalio/testing/time_skipping_interceptor.rb +23 -0
  186. data/lib/temporalio/testing/workflow_environment.rb +112 -0
  187. data/lib/temporalio/testing.rb +175 -0
  188. data/lib/temporalio/version.rb +1 -1
  189. data/lib/temporalio/worker/activity_runner.rb +26 -4
  190. data/lib/temporalio/worker/activity_worker.rb +44 -18
  191. data/lib/temporalio/worker/sync_worker.rb +47 -11
  192. data/lib/temporalio/worker.rb +27 -21
  193. data/lib/temporalio/workflow/async.rb +46 -0
  194. data/lib/temporalio/workflow/future.rb +138 -0
  195. data/lib/temporalio/workflow/info.rb +76 -0
  196. data/lib/thermite_patch.rb +10 -0
  197. data/sig/async.rbs +17 -0
  198. data/sig/protobuf.rbs +16 -0
  199. data/sig/protos/dependencies/gogoproto/gogo.rbs +914 -0
  200. data/sig/protos/google/protobuf/any.rbs +157 -0
  201. data/sig/protos/google/protobuf/descriptor.rbs +2825 -0
  202. data/sig/protos/google/protobuf/duration.rbs +114 -0
  203. data/sig/protos/google/protobuf/empty.rbs +36 -0
  204. data/sig/protos/google/protobuf/timestamp.rbs +145 -0
  205. data/sig/protos/google/protobuf/wrappers.rbs +358 -0
  206. data/sig/protos/temporal/api/batch/v1/message.rbs +300 -0
  207. data/sig/protos/temporal/api/command/v1/message.rbs +1399 -0
  208. data/sig/protos/temporal/api/common/v1/message.rbs +528 -0
  209. data/sig/protos/temporal/api/enums/v1/batch_operation.rbs +79 -0
  210. data/sig/protos/temporal/api/enums/v1/command_type.rbs +68 -0
  211. data/sig/protos/temporal/api/enums/v1/common.rbs +118 -0
  212. data/sig/protos/temporal/api/enums/v1/event_type.rbs +264 -0
  213. data/sig/protos/temporal/api/enums/v1/failed_cause.rbs +277 -0
  214. data/sig/protos/temporal/api/enums/v1/namespace.rbs +108 -0
  215. data/sig/protos/temporal/api/enums/v1/query.rbs +81 -0
  216. data/sig/protos/temporal/api/enums/v1/reset.rbs +44 -0
  217. data/sig/protos/temporal/api/enums/v1/schedule.rbs +72 -0
  218. data/sig/protos/temporal/api/enums/v1/task_queue.rbs +92 -0
  219. data/sig/protos/temporal/api/enums/v1/update.rbs +64 -0
  220. data/sig/protos/temporal/api/enums/v1/workflow.rbs +371 -0
  221. data/sig/protos/temporal/api/errordetails/v1/message.rbs +551 -0
  222. data/sig/protos/temporal/api/failure/v1/message.rbs +581 -0
  223. data/sig/protos/temporal/api/filter/v1/message.rbs +171 -0
  224. data/sig/protos/temporal/api/history/v1/message.rbs +4609 -0
  225. data/sig/protos/temporal/api/namespace/v1/message.rbs +410 -0
  226. data/sig/protos/temporal/api/operatorservice/v1/request_response.rbs +643 -0
  227. data/sig/protos/temporal/api/operatorservice/v1/service.rbs +17 -0
  228. data/sig/protos/temporal/api/protocol/v1/message.rbs +84 -0
  229. data/sig/protos/temporal/api/query/v1/message.rbs +182 -0
  230. data/sig/protos/temporal/api/replication/v1/message.rbs +148 -0
  231. data/sig/protos/temporal/api/schedule/v1/message.rbs +1488 -0
  232. data/sig/protos/temporal/api/sdk/v1/task_complete_metadata.rbs +110 -0
  233. data/sig/protos/temporal/api/taskqueue/v1/message.rbs +486 -0
  234. data/sig/protos/temporal/api/testservice/v1/request_response.rbs +249 -0
  235. data/sig/protos/temporal/api/testservice/v1/service.rbs +15 -0
  236. data/sig/protos/temporal/api/update/v1/message.rbs +489 -0
  237. data/sig/protos/temporal/api/version/v1/message.rbs +184 -0
  238. data/sig/protos/temporal/api/workflow/v1/message.rbs +824 -0
  239. data/sig/protos/temporal/api/workflowservice/v1/request_response.rbs +7250 -0
  240. data/sig/protos/temporal/api/workflowservice/v1/service.rbs +22 -0
  241. data/sig/protos/temporal/sdk/core/activity_result/activity_result.rbs +380 -0
  242. data/sig/protos/temporal/sdk/core/activity_task/activity_task.rbs +386 -0
  243. data/sig/protos/temporal/sdk/core/child_workflow/child_workflow.rbs +323 -0
  244. data/sig/protos/temporal/sdk/core/common/common.rbs +62 -0
  245. data/sig/protos/temporal/sdk/core/core_interface.rbs +101 -0
  246. data/sig/protos/temporal/sdk/core/external_data/external_data.rbs +119 -0
  247. data/sig/protos/temporal/sdk/core/workflow_activation/workflow_activation.rbs +1473 -0
  248. data/sig/protos/temporal/sdk/core/workflow_commands/workflow_commands.rbs +1784 -0
  249. data/sig/protos/temporal/sdk/core/workflow_completion/workflow_completion.rbs +180 -0
  250. data/sig/ruby.rbs +12 -0
  251. data/sig/temporalio/activity/context.rbs +29 -0
  252. data/sig/temporalio/activity/info.rbs +43 -0
  253. data/sig/temporalio/activity.rbs +19 -0
  254. data/sig/temporalio/bridge/connect_options.rbs +19 -0
  255. data/sig/temporalio/bridge/error.rbs +8 -0
  256. data/sig/temporalio/bridge/retry_config.rbs +21 -0
  257. data/sig/temporalio/bridge/tls_options.rbs +17 -0
  258. data/sig/temporalio/bridge.rbs +71 -0
  259. data/sig/temporalio/client/implementation.rbs +38 -0
  260. data/sig/temporalio/client/workflow_handle.rbs +41 -0
  261. data/sig/temporalio/client.rbs +35 -0
  262. data/sig/temporalio/connection/retry_config.rbs +37 -0
  263. data/sig/temporalio/connection/service.rbs +14 -0
  264. data/sig/temporalio/connection/test_service.rbs +13 -0
  265. data/sig/temporalio/connection/tls_options.rbs +43 -0
  266. data/sig/temporalio/connection/workflow_service.rbs +48 -0
  267. data/sig/temporalio/connection.rbs +30 -0
  268. data/sig/temporalio/data_converter.rbs +35 -0
  269. data/sig/temporalio/error/failure.rbs +121 -0
  270. data/sig/temporalio/error/workflow_failure.rbs +9 -0
  271. data/sig/temporalio/errors.rbs +36 -0
  272. data/sig/temporalio/failure_converter/base.rbs +12 -0
  273. data/sig/temporalio/failure_converter/basic.rbs +86 -0
  274. data/sig/temporalio/failure_converter.rbs +5 -0
  275. data/sig/temporalio/interceptor/activity_inbound.rbs +21 -0
  276. data/sig/temporalio/interceptor/activity_outbound.rbs +10 -0
  277. data/sig/temporalio/interceptor/chain.rbs +24 -0
  278. data/sig/temporalio/interceptor/client.rbs +148 -0
  279. data/sig/temporalio/interceptor.rbs +6 -0
  280. data/sig/temporalio/payload_codec/base.rbs +12 -0
  281. data/sig/temporalio/payload_converter/base.rbs +12 -0
  282. data/sig/temporalio/payload_converter/bytes.rbs +9 -0
  283. data/sig/temporalio/payload_converter/composite.rbs +19 -0
  284. data/sig/temporalio/payload_converter/encoding_base.rbs +14 -0
  285. data/sig/temporalio/payload_converter/json.rbs +9 -0
  286. data/sig/temporalio/payload_converter/nil.rbs +9 -0
  287. data/sig/temporalio/payload_converter.rbs +5 -0
  288. data/sig/temporalio/retry_policy.rbs +25 -0
  289. data/sig/temporalio/retry_state.rbs +20 -0
  290. data/sig/temporalio/runtime.rbs +12 -0
  291. data/sig/temporalio/testing/time_skipping_handle.rbs +15 -0
  292. data/sig/temporalio/testing/time_skipping_interceptor.rbs +13 -0
  293. data/sig/temporalio/testing/workflow_environment.rbs +22 -0
  294. data/sig/temporalio/testing.rbs +35 -0
  295. data/sig/temporalio/timeout_type.rbs +15 -0
  296. data/sig/temporalio/version.rbs +3 -0
  297. data/sig/temporalio/worker/activity_runner.rbs +35 -0
  298. data/sig/temporalio/worker/activity_worker.rbs +44 -0
  299. data/sig/temporalio/worker/reactor.rbs +22 -0
  300. data/sig/temporalio/worker/runner.rbs +21 -0
  301. data/sig/temporalio/worker/sync_worker.rbs +23 -0
  302. data/sig/temporalio/worker/thread_pool_executor.rbs +23 -0
  303. data/sig/temporalio/worker.rbs +46 -0
  304. data/sig/temporalio/workflow/async.rbs +9 -0
  305. data/sig/temporalio/workflow/execution_info.rbs +55 -0
  306. data/sig/temporalio/workflow/execution_status.rbs +21 -0
  307. data/sig/temporalio/workflow/future.rbs +40 -0
  308. data/sig/temporalio/workflow/id_reuse_policy.rbs +15 -0
  309. data/sig/temporalio/workflow/info.rbs +55 -0
  310. data/sig/temporalio/workflow/query_reject_condition.rbs +14 -0
  311. data/sig/temporalio.rbs +2 -0
  312. data/sig/thermite_patch.rbs +15 -0
  313. data/temporalio.gemspec +6 -4
  314. metadata +183 -17
  315. data/bridge/sdk-core/Cargo.lock +0 -2606
  316. data/bridge/sdk-core/protos/api_upstream/temporal/api/interaction/v1/message.proto +0 -87
  317. data/lib/bridge.so +0 -0
  318. data/lib/gen/temporal/api/enums/v1/interaction_type_pb.rb +0 -25
  319. data/lib/gen/temporal/api/interaction/v1/message_pb.rb +0 -49
  320. data/lib/gen/temporal/sdk/core/bridge/bridge_pb.rb +0 -222
@@ -1,57 +1,95 @@
1
1
  use crate::{
2
- replay::{HistoryInfo, TestHistoryBuilder},
3
- worker::client::WorkerClient,
2
+ protosext::ValidPollWFTQResponse,
3
+ worker::{
4
+ client::WorkerClient,
5
+ workflow::{CacheMissFetchReq, PermittedWFT, PreparedWFT},
6
+ },
4
7
  };
5
- use futures::{future::BoxFuture, stream, stream::BoxStream, FutureExt, Stream, StreamExt};
8
+ use futures::{future::BoxFuture, FutureExt, Stream};
9
+ use itertools::Itertools;
6
10
  use std::{
7
11
  collections::VecDeque,
8
12
  fmt::Debug,
9
13
  future::Future,
14
+ mem,
15
+ mem::transmute,
10
16
  pin::Pin,
11
17
  sync::Arc,
12
18
  task::{Context, Poll},
13
19
  };
14
20
  use temporal_sdk_core_protos::temporal::api::{
15
21
  enums::v1::EventType,
16
- history::v1::{History, HistoryEvent},
17
- workflowservice::v1::GetWorkflowExecutionHistoryResponse,
22
+ history::v1::{history_event, History, HistoryEvent, WorkflowTaskCompletedEventAttributes},
18
23
  };
19
24
  use tracing::Instrument;
20
25
 
21
- /// A slimmed down version of a poll workflow task response which includes just the info needed
22
- /// by [WorkflowManager]. History events are expected to be consumed from it and applied to the
23
- /// state machines.
26
+ lazy_static::lazy_static! {
27
+ static ref EMPTY_FETCH_ERR: tonic::Status
28
+ = tonic::Status::data_loss("Fetched empty history page");
29
+ static ref EMPTY_TASK_ERR: tonic::Status
30
+ = tonic::Status::data_loss("Received an empty workflow task with no queries or history");
31
+ }
32
+
33
+ /// Represents one or more complete WFT sequences. History events are expected to be consumed from
34
+ /// it and applied to the state machines via [HistoryUpdate::take_next_wft_sequence]
35
+ #[cfg_attr(
36
+ feature = "save_wf_inputs",
37
+ derive(serde::Serialize, serde::Deserialize)
38
+ )]
24
39
  pub struct HistoryUpdate {
25
- events: BoxStream<'static, Result<HistoryEvent, tonic::Status>>,
26
- /// It is useful to be able to look ahead up to one workflow task beyond the currently
27
- /// requested one. The initial (possibly only) motivation for this being to be able to
28
- /// pre-emptively notify lang about patch markers so that calls to `changed` do not need to
29
- /// be async.
30
- buffered: VecDeque<HistoryEvent>,
31
- pub previous_started_event_id: i64,
40
+ events: Vec<HistoryEvent>,
41
+ /// The event ID of the last started WFT, as according to the WFT which this update was
42
+ /// extracted from. Hence, while processing multiple logical WFTs during replay which were part
43
+ /// of one large history fetched from server, multiple updates may have the same value here.
44
+ pub previous_wft_started_id: i64,
45
+ /// True if this update contains the final WFT in history, and no more attempts to extract
46
+ /// additional updates should be made.
47
+ has_last_wft: bool,
32
48
  }
33
49
  impl Debug for HistoryUpdate {
34
50
  fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
35
- write!(
36
- f,
37
- "HistoryUpdate(previous_started_event_id: {})",
38
- self.previous_started_event_id
39
- )
51
+ if self.is_real() {
52
+ write!(
53
+ f,
54
+ "HistoryUpdate(previous_started_event_id: {}, length: {}, first_event_id: {:?})",
55
+ self.previous_wft_started_id,
56
+ self.events.len(),
57
+ self.events.first().map(|e| e.event_id)
58
+ )
59
+ } else {
60
+ write!(f, "DummyHistoryUpdate")
61
+ }
40
62
  }
41
63
  }
42
64
 
65
+ #[derive(Debug)]
66
+ pub enum NextWFT {
67
+ ReplayOver,
68
+ WFT(Vec<HistoryEvent>, bool),
69
+ NeedFetch,
70
+ }
71
+
72
+ #[derive(derive_more::DebugCustom)]
73
+ #[debug(fmt = "HistoryPaginator(run_id: {run_id})")]
74
+ #[cfg_attr(
75
+ feature = "save_wf_inputs",
76
+ derive(serde::Serialize, serde::Deserialize),
77
+ serde(default = "HistoryPaginator::fake_deserialized")
78
+ )]
43
79
  pub struct HistoryPaginator {
44
- // Potentially this could actually be a ref w/ lifetime here
80
+ pub(crate) wf_id: String,
81
+ pub(crate) run_id: String,
82
+ pub(crate) previous_wft_started_id: i64,
83
+
84
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
45
85
  client: Arc<dyn WorkerClient>,
86
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
46
87
  event_queue: VecDeque<HistoryEvent>,
47
- wf_id: String,
48
- run_id: String,
88
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
49
89
  next_page_token: NextPageToken,
50
- open_history_request:
51
- Option<BoxFuture<'static, Result<GetWorkflowExecutionHistoryResponse, tonic::Status>>>,
52
90
  /// These are events that should be returned once pagination has finished. This only happens
53
91
  /// during cache misses, where we got a partial task but need to fetch history from the start.
54
- /// We use this to apply any
92
+ #[cfg_attr(feature = "save_wf_inputs", serde(skip))]
55
93
  final_events: Vec<HistoryEvent>,
56
94
  }
57
95
 
@@ -77,8 +115,68 @@ impl From<Vec<u8>> for NextPageToken {
77
115
  }
78
116
 
79
117
  impl HistoryPaginator {
80
- pub(crate) fn new(
118
+ /// Use a new poll response to create a new [WFTPaginator], returning it and the
119
+ /// [PreparedWFT] extracted from it that can be fed into workflow state.
120
+ pub(super) async fn from_poll(
121
+ wft: ValidPollWFTQResponse,
122
+ client: Arc<dyn WorkerClient>,
123
+ ) -> Result<(Self, PreparedWFT), tonic::Status> {
124
+ let empty_hist = wft.history.events.is_empty();
125
+ let npt = if empty_hist {
126
+ NextPageToken::FetchFromStart
127
+ } else {
128
+ wft.next_page_token.into()
129
+ };
130
+ let mut paginator = HistoryPaginator::new(
131
+ wft.history,
132
+ wft.previous_started_event_id,
133
+ wft.workflow_execution.workflow_id.clone(),
134
+ wft.workflow_execution.run_id.clone(),
135
+ npt,
136
+ client,
137
+ );
138
+ if empty_hist && wft.legacy_query.is_none() && wft.query_requests.is_empty() {
139
+ return Err(EMPTY_TASK_ERR.clone());
140
+ }
141
+ let update = if empty_hist {
142
+ HistoryUpdate::from_events([], wft.previous_started_event_id, true).0
143
+ } else {
144
+ paginator.extract_next_update().await?
145
+ };
146
+ let prepared = PreparedWFT {
147
+ task_token: wft.task_token,
148
+ attempt: wft.attempt,
149
+ execution: wft.workflow_execution,
150
+ workflow_type: wft.workflow_type,
151
+ legacy_query: wft.legacy_query,
152
+ query_requests: wft.query_requests,
153
+ update,
154
+ };
155
+ Ok((paginator, prepared))
156
+ }
157
+
158
+ pub(super) async fn from_fetchreq(
159
+ mut req: CacheMissFetchReq,
160
+ client: Arc<dyn WorkerClient>,
161
+ ) -> Result<PermittedWFT, tonic::Status> {
162
+ let mut paginator = Self {
163
+ wf_id: req.original_wft.work.execution.workflow_id.clone(),
164
+ run_id: req.original_wft.work.execution.run_id.clone(),
165
+ previous_wft_started_id: req.original_wft.work.update.previous_wft_started_id,
166
+ client,
167
+ event_queue: Default::default(),
168
+ next_page_token: NextPageToken::FetchFromStart,
169
+ final_events: vec![],
170
+ };
171
+ let first_update = paginator.extract_next_update().await?;
172
+ req.original_wft.work.update = first_update;
173
+ req.original_wft.paginator = paginator;
174
+ Ok(req.original_wft)
175
+ }
176
+
177
+ fn new(
81
178
  initial_history: History,
179
+ previous_wft_started_id: i64,
82
180
  wf_id: String,
83
181
  run_id: String,
84
182
  next_page_token: impl Into<NextPageToken>,
@@ -97,20 +195,107 @@ impl HistoryPaginator {
97
195
  wf_id,
98
196
  run_id,
99
197
  next_page_token,
100
- open_history_request: None,
101
198
  final_events,
199
+ previous_wft_started_id,
200
+ }
201
+ }
202
+
203
+ #[cfg(feature = "save_wf_inputs")]
204
+ pub(super) fn fake_deserialized() -> HistoryPaginator {
205
+ use crate::worker::client::mocks::mock_manual_workflow_client;
206
+ HistoryPaginator {
207
+ client: Arc::new(mock_manual_workflow_client()),
208
+ event_queue: Default::default(),
209
+ wf_id: "".to_string(),
210
+ run_id: "".to_string(),
211
+ next_page_token: NextPageToken::FetchFromStart,
212
+ final_events: vec![],
213
+ previous_wft_started_id: -2,
214
+ }
215
+ }
216
+
217
+ /// Return at least the next two WFT sequences (as determined by the passed-in ID) as a
218
+ /// [HistoryUpdate]. Two sequences supports the required peek-ahead during replay without
219
+ /// unnecessary back-and-forth.
220
+ ///
221
+ /// If there are already enough events buffered in memory, they will all be returned. Including
222
+ /// possibly (likely, during replay) more than just the next two WFTs.
223
+ ///
224
+ /// If there are insufficient events to constitute two WFTs, then we will fetch pages until
225
+ /// we have two, or until we are at the end of history.
226
+ pub(crate) async fn extract_next_update(&mut self) -> Result<HistoryUpdate, tonic::Status> {
227
+ loop {
228
+ self.get_next_page().await?;
229
+ let current_events = mem::take(&mut self.event_queue);
230
+ if current_events.is_empty() {
231
+ // If next page fetching happened, and we still ended up with no events, something
232
+ // is wrong. We're expecting there to be more events to be able to extract this
233
+ // update, but server isn't giving us any. We have no choice except to give up and
234
+ // evict.
235
+ error!(
236
+ "We expected to be able to fetch more events but server says there are none"
237
+ );
238
+ return Err(EMPTY_FETCH_ERR.clone());
239
+ }
240
+ let first_event_id = current_events.front().unwrap().event_id;
241
+ // If there are some events at the end of the fetched events which represent only a
242
+ // portion of a complete WFT, retain them to be used in the next extraction.
243
+ let no_more = matches!(self.next_page_token, NextPageToken::Done);
244
+ let (update, extra) =
245
+ HistoryUpdate::from_events(current_events, self.previous_wft_started_id, no_more);
246
+ let extra_eid_same = extra
247
+ .first()
248
+ .map(|e| e.event_id == first_event_id)
249
+ .unwrap_or_default();
250
+ self.event_queue = extra.into();
251
+ if !no_more && extra_eid_same {
252
+ // There was not a meaningful WFT in the whole page. We must fetch more
253
+ continue;
254
+ }
255
+ return Ok(update);
102
256
  }
103
257
  }
104
258
 
105
- fn extend_queue_with_new_page(&mut self, resp: GetWorkflowExecutionHistoryResponse) {
106
- self.next_page_token = resp.next_page_token.into();
259
+ /// Fetches the next page and adds it to the internal queue. Returns true if a fetch was
260
+ /// performed, false if there is no next page.
261
+ async fn get_next_page(&mut self) -> Result<bool, tonic::Status> {
262
+ let history = loop {
263
+ let npt = match mem::replace(&mut self.next_page_token, NextPageToken::Done) {
264
+ // If there's no open request and the last page token we got was empty, we're done.
265
+ NextPageToken::Done => return Ok(false),
266
+ NextPageToken::FetchFromStart => vec![],
267
+ NextPageToken::Next(v) => v,
268
+ };
269
+ debug!(run_id=%self.run_id, "Fetching new history page");
270
+ let fetch_res = self
271
+ .client
272
+ .get_workflow_execution_history(self.wf_id.clone(), Some(self.run_id.clone()), npt)
273
+ .instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
274
+ .await?;
275
+
276
+ self.next_page_token = fetch_res.next_page_token.into();
277
+
278
+ let history_is_empty = fetch_res
279
+ .history
280
+ .as_ref()
281
+ .map(|h| h.events.is_empty())
282
+ .unwrap_or(true);
283
+ if history_is_empty && matches!(&self.next_page_token, NextPageToken::Next(_)) {
284
+ // If the fetch returned an empty history, but there *was* a next page token,
285
+ // immediately try to get that.
286
+ continue;
287
+ }
288
+ // Async doesn't love recursion so we do this instead.
289
+ break fetch_res.history;
290
+ };
291
+
107
292
  self.event_queue
108
- .extend(resp.history.map(|h| h.events).unwrap_or_default());
293
+ .extend(history.map(|h| h.events).unwrap_or_default());
109
294
  if matches!(&self.next_page_token, NextPageToken::Done) {
110
295
  // If finished, we need to extend the queue with the final events, skipping any
111
296
  // which are already present.
112
297
  if let Some(last_event_id) = self.event_queue.back().map(|e| e.event_id) {
113
- let final_events = std::mem::take(&mut self.final_events);
298
+ let final_events = mem::take(&mut self.final_events);
114
299
  self.event_queue.extend(
115
300
  final_events
116
301
  .into_iter()
@@ -118,63 +303,143 @@ impl HistoryPaginator {
118
303
  );
119
304
  }
120
305
  };
306
+ Ok(true)
121
307
  }
122
308
  }
123
309
 
124
- impl Stream for HistoryPaginator {
310
+ #[pin_project::pin_project]
311
+ struct StreamingHistoryPaginator {
312
+ inner: HistoryPaginator,
313
+ #[pin]
314
+ open_history_request: Option<BoxFuture<'static, Result<(), tonic::Status>>>,
315
+ }
316
+
317
+ impl StreamingHistoryPaginator {
318
+ // Kept since can be used for history downloading
319
+ #[cfg(test)]
320
+ pub fn new(inner: HistoryPaginator) -> Self {
321
+ Self {
322
+ inner,
323
+ open_history_request: None,
324
+ }
325
+ }
326
+ }
327
+
328
+ impl Stream for StreamingHistoryPaginator {
125
329
  type Item = Result<HistoryEvent, tonic::Status>;
126
330
 
127
- fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
128
- if let Some(e) = self.event_queue.pop_front() {
331
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
332
+ let mut this = self.project();
333
+
334
+ if let Some(e) = this.inner.event_queue.pop_front() {
129
335
  return Poll::Ready(Some(Ok(e)));
130
336
  }
131
- let history_req = if let Some(req) = self.open_history_request.as_mut() {
132
- req
133
- } else {
134
- let npt = match std::mem::replace(&mut self.next_page_token, NextPageToken::Done) {
135
- // If there's no open request and the last page token we got was empty, we're done.
136
- NextPageToken::Done => return Poll::Ready(None),
137
- NextPageToken::FetchFromStart => vec![],
138
- NextPageToken::Next(v) => v,
139
- };
140
- debug!(run_id=%self.run_id, "Fetching new history page");
141
- let gw = self.client.clone();
142
- let wid = self.wf_id.clone();
143
- let rid = self.run_id.clone();
144
- let resp_fut = async move {
145
- gw.get_workflow_execution_history(wid, Some(rid), npt)
146
- .instrument(span!(tracing::Level::TRACE, "fetch_history_in_paginator"))
147
- .await
148
- };
149
- self.open_history_request.insert(resp_fut.boxed())
150
- };
337
+ if this.open_history_request.is_none() {
338
+ // SAFETY: This is safe because the inner paginator cannot be dropped before the future,
339
+ // and the future won't be moved from out of this struct.
340
+ this.open_history_request.set(Some(unsafe {
341
+ transmute(HistoryPaginator::get_next_page(this.inner).boxed())
342
+ }));
343
+ }
344
+ let history_req = this.open_history_request.as_mut().as_pin_mut().unwrap();
151
345
 
152
- return match Future::poll(history_req.as_mut(), cx) {
346
+ match Future::poll(history_req, cx) {
153
347
  Poll::Ready(resp) => {
154
- self.open_history_request = None;
348
+ this.open_history_request.set(None);
155
349
  match resp {
156
350
  Err(neterr) => Poll::Ready(Some(Err(neterr))),
157
- Ok(resp) => {
158
- self.extend_queue_with_new_page(resp);
159
- Poll::Ready(self.event_queue.pop_front().map(Ok))
160
- }
351
+ Ok(_) => Poll::Ready(this.inner.event_queue.pop_front().map(Ok)),
161
352
  }
162
353
  }
163
354
  Poll::Pending => Poll::Pending,
164
- };
355
+ }
165
356
  }
166
357
  }
167
358
 
168
359
  impl HistoryUpdate {
169
- pub fn new(history_iterator: HistoryPaginator, previous_wft_started_id: i64) -> Self {
360
+ /// Sometimes it's useful to take an update out of something without needing to use an option
361
+ /// field. Use this to replace the field with an empty update.
362
+ pub fn dummy() -> Self {
170
363
  Self {
171
- events: history_iterator.fuse().boxed(),
172
- buffered: VecDeque::new(),
173
- previous_started_event_id: previous_wft_started_id,
364
+ events: vec![],
365
+ previous_wft_started_id: -1,
366
+ has_last_wft: false,
367
+ }
368
+ }
369
+ pub fn is_real(&self) -> bool {
370
+ self.previous_wft_started_id >= 0
371
+ }
372
+ pub fn first_event_id(&self) -> Option<i64> {
373
+ self.events.get(0).map(|e| e.event_id)
374
+ }
375
+
376
+ /// Create an instance of an update directly from events. If the passed in event iterator has a
377
+ /// partial WFT sequence at the end, all events after the last complete WFT sequence (ending
378
+ /// with WFT started) are returned back to the caller, since the history update only works in
379
+ /// terms of complete WFT sequences.
380
+ pub fn from_events<I: IntoIterator<Item = HistoryEvent>>(
381
+ events: I,
382
+ previous_wft_started_id: i64,
383
+ has_last_wft: bool,
384
+ ) -> (Self, Vec<HistoryEvent>)
385
+ where
386
+ <I as IntoIterator>::IntoIter: Send + 'static,
387
+ {
388
+ let mut all_events: Vec<_> = events.into_iter().collect();
389
+ let mut last_end =
390
+ find_end_index_of_next_wft_seq(all_events.as_slice(), previous_wft_started_id);
391
+ if matches!(last_end, NextWFTSeqEndIndex::Incomplete(_)) {
392
+ return if has_last_wft {
393
+ (
394
+ Self {
395
+ events: all_events,
396
+ previous_wft_started_id,
397
+ has_last_wft,
398
+ },
399
+ vec![],
400
+ )
401
+ } else {
402
+ (
403
+ Self {
404
+ events: vec![],
405
+ previous_wft_started_id,
406
+ has_last_wft,
407
+ },
408
+ all_events,
409
+ )
410
+ };
411
+ }
412
+ while let NextWFTSeqEndIndex::Complete(next_end_ix) = last_end {
413
+ let next_end_eid = all_events[next_end_ix].event_id;
414
+ // To save skipping all events at the front of this slice, only pass the relevant
415
+ // portion, but that means the returned index must be adjusted, hence the addition.
416
+ let next_end = find_end_index_of_next_wft_seq(&all_events[next_end_ix..], next_end_eid)
417
+ .add(next_end_ix);
418
+ if matches!(next_end, NextWFTSeqEndIndex::Incomplete(_)) {
419
+ break;
420
+ }
421
+ last_end = next_end;
174
422
  }
423
+ let remaining_events = if all_events.is_empty() {
424
+ vec![]
425
+ } else {
426
+ all_events.split_off(last_end.index() + 1)
427
+ };
428
+
429
+ (
430
+ Self {
431
+ events: all_events,
432
+ previous_wft_started_id,
433
+ has_last_wft,
434
+ },
435
+ remaining_events,
436
+ )
175
437
  }
176
438
 
177
- /// Create an instance of an update directly from events - should only be used for replaying.
439
+ /// Create an instance of an update directly from events. The passed in events *must* consist
440
+ /// of one or more complete WFT sequences. IE: The event iterator must not end in the middle
441
+ /// of a WFT sequence.
442
+ #[cfg(test)]
178
443
  pub fn new_from_events<I: IntoIterator<Item = HistoryEvent>>(
179
444
  events: I,
180
445
  previous_wft_started_id: i64,
@@ -183,309 +448,417 @@ impl HistoryUpdate {
183
448
  <I as IntoIterator>::IntoIter: Send + 'static,
184
449
  {
185
450
  Self {
186
- events: stream::iter(events.into_iter().map(Ok)).boxed(),
187
- buffered: VecDeque::new(),
188
- previous_started_event_id: previous_wft_started_id,
451
+ events: events.into_iter().collect(),
452
+ previous_wft_started_id,
453
+ has_last_wft: true,
189
454
  }
190
455
  }
191
456
 
192
- /// Given a workflow task started id, return all events starting at that number (inclusive) to
193
- /// the next WFT started event (inclusive). If there is no subsequent WFT started event,
194
- /// remaining history is returned.
195
- ///
196
- /// Events are *consumed* by this process, to keep things efficient in workflow machines, and
197
- /// the function may call out to server to fetch more pages if they are known to exist and
198
- /// needed to complete the WFT sequence.
457
+ /// Given a workflow task started id, return all events starting at that number (exclusive) to
458
+ /// the next WFT started event (inclusive).
199
459
  ///
200
- /// Always buffers the WFT sequence *after* the returned one as well, if it is available.
460
+ /// Events are *consumed* by this process, to keep things efficient in workflow machines.
201
461
  ///
202
- /// Can return a tonic error in the event that fetching additional history was needed and failed
203
- pub async fn take_next_wft_sequence(
204
- &mut self,
205
- from_wft_started_id: i64,
206
- ) -> Result<Vec<HistoryEvent>, tonic::Status> {
207
- let (next_wft_events, maybe_bonus_events) = self
208
- .take_next_wft_sequence_impl(from_wft_started_id)
209
- .await?;
210
- if !maybe_bonus_events.is_empty() {
211
- self.buffered.extend(maybe_bonus_events);
462
+ /// If we are out of WFT sequences that can be yielded by this update, it will return an empty
463
+ /// vec, indicating more pages will need to be fetched.
464
+ pub fn take_next_wft_sequence(&mut self, from_wft_started_id: i64) -> NextWFT {
465
+ // First, drop any events from the queue which are earlier than the passed-in id.
466
+ if let Some(ix_first_relevant) = self.starting_index_after_skipping(from_wft_started_id) {
467
+ self.events.drain(0..ix_first_relevant);
212
468
  }
213
-
214
- if let Some(last_event_id) = next_wft_events.last().map(|he| he.event_id) {
215
- // Always attempt to fetch the *next* WFT sequence as well, to buffer it for lookahead
216
- let (buffer_these_events, maybe_bonus_events) =
217
- self.take_next_wft_sequence_impl(last_event_id).await?;
218
- self.buffered.extend(buffer_these_events);
219
- if !maybe_bonus_events.is_empty() {
220
- self.buffered.extend(maybe_bonus_events);
469
+ let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
470
+ match next_wft_ix {
471
+ NextWFTSeqEndIndex::Incomplete(siz) => {
472
+ if self.has_last_wft {
473
+ if siz == 0 {
474
+ NextWFT::ReplayOver
475
+ } else {
476
+ self.build_next_wft(siz)
477
+ }
478
+ } else {
479
+ if siz != 0 {
480
+ panic!(
481
+ "HistoryUpdate was created with an incomplete WFT. This is an SDK bug."
482
+ );
483
+ }
484
+ NextWFT::NeedFetch
485
+ }
221
486
  }
487
+ NextWFTSeqEndIndex::Complete(next_wft_ix) => self.build_next_wft(next_wft_ix),
222
488
  }
489
+ }
223
490
 
224
- Ok(next_wft_events)
491
+ fn build_next_wft(&mut self, drain_this_much: usize) -> NextWFT {
492
+ NextWFT::WFT(
493
+ self.events.drain(0..=drain_this_much).collect(),
494
+ self.events.is_empty() && self.has_last_wft,
495
+ )
225
496
  }
226
497
 
227
498
  /// Lets the caller peek ahead at the next WFT sequence that will be returned by
228
- /// [take_next_wft_sequence]. Will always return an empty iterator if that has not been called
229
- /// first. May also return an empty iterator or incomplete sequence if we are at the end of
230
- /// history.
231
- pub fn peek_next_wft_sequence(&self) -> impl Iterator<Item = &HistoryEvent> {
232
- self.buffered.iter()
233
- }
234
-
235
- /// Retrieve the next WFT sequence, first from buffered events and then from the real stream.
236
- /// Returns (events up to the next logical wft sequence, extra events that were taken but
237
- /// should be re-appended to the end of the buffer).
238
- async fn take_next_wft_sequence_impl(
239
- &mut self,
240
- from_event_id: i64,
241
- ) -> Result<(Vec<HistoryEvent>, Vec<HistoryEvent>), tonic::Status> {
242
- let mut events_to_next_wft_started: Vec<HistoryEvent> = vec![];
243
-
244
- // This flag tracks if, while determining events to be returned, we have seen the next
245
- // logically significant WFT started event which follows the one that was passed in as a
246
- // parameter. If a WFT fails, times out, or is devoid of commands (ie: a heartbeat) it is
247
- // not significant. So we will stop returning events (exclusive) as soon as we see an event
248
- // following a WFT started that is *not* failed, timed out, or completed with a command.
249
- let mut next_wft_state = NextWftState::NotSeen;
250
- let mut should_pop = |e: &HistoryEvent| {
251
- if e.event_id <= from_event_id {
252
- return true;
253
- } else if e.event_type() == EventType::WorkflowTaskStarted {
254
- next_wft_state = NextWftState::Seen;
255
- return true;
499
+ /// [take_next_wft_sequence]. Will always return the first available WFT sequence if that has
500
+ /// not been called first. May also return an empty iterator or incomplete sequence if we are at
501
+ /// the end of history.
502
+ pub fn peek_next_wft_sequence(&self, from_wft_started_id: i64) -> &[HistoryEvent] {
503
+ let ix_first_relevant = self
504
+ .starting_index_after_skipping(from_wft_started_id)
505
+ .unwrap_or_default();
506
+ let relevant_events = &self.events[ix_first_relevant..];
507
+ if relevant_events.is_empty() {
508
+ return relevant_events;
509
+ }
510
+ let ix_end = find_end_index_of_next_wft_seq(relevant_events, from_wft_started_id).index();
511
+ &relevant_events[0..=ix_end]
512
+ }
513
+
514
+ /// Returns true if this update has the next needed WFT sequence, false if events will need to
515
+ /// be fetched in order to create a complete update with the entire next WFT sequence.
516
+ pub fn can_take_next_wft_sequence(&self, from_wft_started_id: i64) -> bool {
517
+ let next_wft_ix = find_end_index_of_next_wft_seq(&self.events, from_wft_started_id);
518
+ if let NextWFTSeqEndIndex::Incomplete(_) = next_wft_ix {
519
+ if !self.has_last_wft {
520
+ return false;
256
521
  }
522
+ }
523
+ true
524
+ }
257
525
 
258
- match next_wft_state {
259
- NextWftState::Seen => {
260
- // Must ignore failures and timeouts
261
- if e.event_type() == EventType::WorkflowTaskFailed
262
- || e.event_type() == EventType::WorkflowTaskTimedOut
263
- {
264
- next_wft_state = NextWftState::NotSeen;
265
- return true;
266
- } else if e.event_type() == EventType::WorkflowTaskCompleted {
267
- next_wft_state = NextWftState::SeenCompleted;
268
- return true;
269
- }
270
- false
271
- }
272
- NextWftState::SeenCompleted => {
273
- // If we've seen the WFT be completed, and this event is another scheduled, then
274
- // this was an empty heartbeat we should ignore.
275
- if e.event_type() == EventType::WorkflowTaskScheduled {
276
- next_wft_state = NextWftState::NotSeen;
277
- return true;
278
- }
279
- // Otherwise, we're done here
280
- false
526
+ /// Returns the next WFT completed event attributes, if any, starting at (inclusive) the
527
+ /// `from_id`
528
+ pub fn peek_next_wft_completed(
529
+ &self,
530
+ from_id: i64,
531
+ ) -> Option<&WorkflowTaskCompletedEventAttributes> {
532
+ self.events
533
+ .iter()
534
+ .skip_while(|e| e.event_id < from_id)
535
+ .find_map(|e| match &e.attributes {
536
+ Some(history_event::Attributes::WorkflowTaskCompletedEventAttributes(ref a)) => {
537
+ Some(a)
281
538
  }
282
- NextWftState::NotSeen => true,
283
- }
284
- };
539
+ _ => None,
540
+ })
541
+ }
285
542
 
286
- // Fetch events from the buffer first, then from the network
287
- let mut event_q = stream::iter(self.buffered.drain(..).map(Ok)).chain(&mut self.events);
543
+ fn starting_index_after_skipping(&self, from_wft_started_id: i64) -> Option<usize> {
544
+ self.events
545
+ .iter()
546
+ .find_position(|e| e.event_id > from_wft_started_id)
547
+ .map(|(ix, _)| ix)
548
+ }
549
+ }
288
550
 
289
- let mut extra_e = vec![];
290
- let mut last_seen_id = None;
291
- while let Some(e) = event_q.next().await {
292
- let e = e?;
551
+ #[derive(Debug, Copy, Clone)]
552
+ enum NextWFTSeqEndIndex {
553
+ /// The next WFT sequence is completely contained within the passed-in iterator
554
+ Complete(usize),
555
+ /// The next WFT sequence is not found within the passed-in iterator, and the contained
556
+ /// value is the last index of the iterator.
557
+ Incomplete(usize),
558
+ }
559
+ impl NextWFTSeqEndIndex {
560
+ fn index(self) -> usize {
561
+ match self {
562
+ NextWFTSeqEndIndex::Complete(ix) | NextWFTSeqEndIndex::Incomplete(ix) => ix,
563
+ }
564
+ }
565
+ fn add(self, val: usize) -> Self {
566
+ match self {
567
+ NextWFTSeqEndIndex::Complete(ix) => NextWFTSeqEndIndex::Complete(ix + val),
568
+ NextWFTSeqEndIndex::Incomplete(ix) => NextWFTSeqEndIndex::Incomplete(ix + val),
569
+ }
570
+ }
571
+ }
572
+
573
+ /// Discovers the index of the last event in next WFT sequence within the passed-in slice
574
+ fn find_end_index_of_next_wft_seq(
575
+ events: &[HistoryEvent],
576
+ from_event_id: i64,
577
+ ) -> NextWFTSeqEndIndex {
578
+ if events.is_empty() {
579
+ return NextWFTSeqEndIndex::Incomplete(0);
580
+ }
581
+ let mut last_index = 0;
582
+ let mut saw_any_non_wft_event = false;
583
+ for (ix, e) in events.iter().enumerate() {
584
+ last_index = ix;
585
+
586
+ // It's possible to have gotten a new history update without eviction (ex: unhandled
587
+ // command on completion), where we may need to skip events we already handled.
588
+ if e.event_id <= from_event_id {
589
+ continue;
590
+ }
591
+
592
+ if !matches!(
593
+ e.event_type(),
594
+ EventType::WorkflowTaskFailed
595
+ | EventType::WorkflowTaskTimedOut
596
+ | EventType::WorkflowTaskScheduled
597
+ | EventType::WorkflowTaskStarted
598
+ | EventType::WorkflowTaskCompleted
599
+ ) {
600
+ saw_any_non_wft_event = true;
601
+ }
602
+ if e.is_final_wf_execution_event() {
603
+ return NextWFTSeqEndIndex::Complete(last_index);
604
+ }
293
605
 
294
- // This little block prevents us from infinitely fetching work from the server in the
295
- // event that, for whatever reason, it keeps returning stuff we've already seen.
296
- if let Some(last_id) = last_seen_id {
297
- if e.event_id <= last_id {
298
- error!("Server returned history event IDs that went backwards!");
299
- break;
606
+ if e.event_type() == EventType::WorkflowTaskStarted {
607
+ if let Some(next_event) = events.get(ix + 1) {
608
+ let et = next_event.event_type();
609
+ // If the next event is WFT timeout or fail, or abrupt WF execution end, that
610
+ // doesn't conclude a WFT sequence.
611
+ if matches!(
612
+ et,
613
+ EventType::WorkflowTaskFailed
614
+ | EventType::WorkflowTaskTimedOut
615
+ | EventType::WorkflowExecutionTimedOut
616
+ | EventType::WorkflowExecutionTerminated
617
+ | EventType::WorkflowExecutionCanceled
618
+ ) {
619
+ continue;
300
620
  }
301
- }
302
- last_seen_id = Some(e.event_id);
303
-
304
- // It's possible to have gotten a new history update without eviction (ex: unhandled
305
- // command on completion), where we may need to skip events we already handled.
306
- if e.event_id > from_event_id {
307
- if !should_pop(&e) {
308
- if next_wft_state == NextWftState::SeenCompleted {
309
- // We have seen the wft completed event, but decided to exit. We don't
310
- // want to return that event as part of this sequence, so include it for
311
- // re-buffering along with the event we're currently on.
312
- extra_e.push(
313
- events_to_next_wft_started
314
- .pop()
315
- .expect("There is an element here by definition"),
316
- );
621
+ // If we've never seen an interesting event and the next two events are a completion
622
+ // followed immediately again by scheduled, then this is a WFT heartbeat and also
623
+ // doesn't conclude the sequence.
624
+ else if et == EventType::WorkflowTaskCompleted {
625
+ if let Some(next_next_event) = events.get(ix + 2) {
626
+ if next_next_event.event_type() == EventType::WorkflowTaskScheduled {
627
+ continue;
628
+ } else {
629
+ saw_any_non_wft_event = true;
630
+ }
317
631
  }
318
- extra_e.push(e);
319
- break;
320
632
  }
321
- events_to_next_wft_started.push(e);
633
+ }
634
+ if saw_any_non_wft_event {
635
+ return NextWFTSeqEndIndex::Complete(ix);
322
636
  }
323
637
  }
324
-
325
- Ok((events_to_next_wft_started, extra_e))
326
638
  }
327
- }
328
639
 
329
- #[derive(Eq, PartialEq, Debug)]
330
- enum NextWftState {
331
- NotSeen,
332
- Seen,
333
- SeenCompleted,
640
+ NextWFTSeqEndIndex::Incomplete(last_index)
334
641
  }
335
642
 
336
- impl From<HistoryInfo> for HistoryUpdate {
337
- fn from(v: HistoryInfo) -> Self {
338
- Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
643
+ #[cfg(test)]
644
+ pub mod tests {
645
+ use super::*;
646
+ use crate::{
647
+ replay::{HistoryInfo, TestHistoryBuilder},
648
+ test_help::canned_histories,
649
+ worker::client::mocks::mock_workflow_client,
650
+ };
651
+ use futures_util::TryStreamExt;
652
+ use temporal_sdk_core_protos::temporal::api::workflowservice::v1::GetWorkflowExecutionHistoryResponse;
653
+
654
+ impl From<HistoryInfo> for HistoryUpdate {
655
+ fn from(v: HistoryInfo) -> Self {
656
+ Self::new_from_events(v.events().to_vec(), v.previous_started_event_id())
657
+ }
339
658
  }
340
- }
341
659
 
342
- pub trait TestHBExt {
343
- fn as_history_update(&self) -> HistoryUpdate;
344
- }
660
+ pub trait TestHBExt {
661
+ fn as_history_update(&self) -> HistoryUpdate;
662
+ }
345
663
 
346
- impl TestHBExt for TestHistoryBuilder {
347
- fn as_history_update(&self) -> HistoryUpdate {
348
- self.get_full_history_info().unwrap().into()
664
+ impl TestHBExt for TestHistoryBuilder {
665
+ fn as_history_update(&self) -> HistoryUpdate {
666
+ self.get_full_history_info().unwrap().into()
667
+ }
349
668
  }
350
- }
351
669
 
352
- #[cfg(test)]
353
- pub mod tests {
354
- use super::*;
355
- use crate::{test_help::canned_histories, worker::client::mocks::mock_workflow_client};
670
+ impl NextWFT {
671
+ fn unwrap_events(self) -> Vec<HistoryEvent> {
672
+ match self {
673
+ NextWFT::WFT(e, _) => e,
674
+ o => panic!("Must be complete WFT: {o:?}"),
675
+ }
676
+ }
677
+ }
356
678
 
357
- #[tokio::test]
358
- async fn consumes_standard_wft_sequence() {
679
+ fn next_check_peek(update: &mut HistoryUpdate, from_id: i64) -> Vec<HistoryEvent> {
680
+ let seq_peeked = update.peek_next_wft_sequence(from_id).to_vec();
681
+ let seq = update.take_next_wft_sequence(from_id).unwrap_events();
682
+ assert_eq!(seq, seq_peeked);
683
+ seq
684
+ }
685
+
686
+ #[test]
687
+ fn consumes_standard_wft_sequence() {
359
688
  let timer_hist = canned_histories::single_timer("t");
360
689
  let mut update = timer_hist.as_history_update();
361
- let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
690
+ let seq_1 = next_check_peek(&mut update, 0);
362
691
  assert_eq!(seq_1.len(), 3);
363
692
  assert_eq!(seq_1.last().unwrap().event_id, 3);
364
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
693
+ let seq_2_peeked = update.peek_next_wft_sequence(0).to_vec();
694
+ let seq_2 = next_check_peek(&mut update, 3);
695
+ assert_eq!(seq_2, seq_2_peeked);
365
696
  assert_eq!(seq_2.len(), 5);
366
697
  assert_eq!(seq_2.last().unwrap().event_id, 8);
367
698
  }
368
699
 
369
- #[tokio::test]
370
- async fn skips_wft_failed() {
700
+ #[test]
701
+ fn skips_wft_failed() {
371
702
  let failed_hist = canned_histories::workflow_fails_with_reset_after_timer("t", "runid");
372
703
  let mut update = failed_hist.as_history_update();
373
- let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
704
+ let seq_1 = next_check_peek(&mut update, 0);
374
705
  assert_eq!(seq_1.len(), 3);
375
706
  assert_eq!(seq_1.last().unwrap().event_id, 3);
376
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
707
+ let seq_2 = next_check_peek(&mut update, 3);
377
708
  assert_eq!(seq_2.len(), 8);
378
709
  assert_eq!(seq_2.last().unwrap().event_id, 11);
379
710
  }
380
711
 
381
- #[tokio::test]
382
- async fn skips_wft_timeout() {
712
+ #[test]
713
+ fn skips_wft_timeout() {
383
714
  let failed_hist = canned_histories::wft_timeout_repro();
384
715
  let mut update = failed_hist.as_history_update();
385
- let seq_1 = update.take_next_wft_sequence(0).await.unwrap();
716
+ let seq_1 = next_check_peek(&mut update, 0);
386
717
  assert_eq!(seq_1.len(), 3);
387
718
  assert_eq!(seq_1.last().unwrap().event_id, 3);
388
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
719
+ let seq_2 = next_check_peek(&mut update, 3);
389
720
  assert_eq!(seq_2.len(), 11);
390
721
  assert_eq!(seq_2.last().unwrap().event_id, 14);
391
722
  }
392
723
 
393
- #[tokio::test]
394
- async fn skips_events_before_desired_wft() {
724
+ #[test]
725
+ fn skips_events_before_desired_wft() {
395
726
  let timer_hist = canned_histories::single_timer("t");
396
727
  let mut update = timer_hist.as_history_update();
397
728
  // We haven't processed the first 3 events, but we should still only get the second sequence
398
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
729
+ let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
399
730
  assert_eq!(seq_2.len(), 5);
400
731
  assert_eq!(seq_2.last().unwrap().event_id, 8);
401
732
  }
402
733
 
403
- #[tokio::test]
404
- async fn history_ends_abruptly() {
734
+ #[test]
735
+ fn history_ends_abruptly() {
405
736
  let mut timer_hist = canned_histories::single_timer("t");
406
737
  timer_hist.add_workflow_execution_terminated();
407
738
  let mut update = timer_hist.as_history_update();
408
- let seq_2 = update.take_next_wft_sequence(3).await.unwrap();
409
- assert_eq!(seq_2.len(), 5);
410
- assert_eq!(seq_2.last().unwrap().event_id, 8);
739
+ let seq_2 = update.take_next_wft_sequence(3).unwrap_events();
740
+ assert_eq!(seq_2.len(), 6);
741
+ assert_eq!(seq_2.last().unwrap().event_id, 9);
411
742
  }
412
743
 
413
- #[tokio::test]
414
- async fn heartbeats_skipped() {
744
+ #[test]
745
+ fn heartbeats_skipped() {
415
746
  let mut t = TestHistoryBuilder::default();
416
747
  t.add_by_type(EventType::WorkflowExecutionStarted);
417
748
  t.add_full_wf_task();
418
- t.add_full_wf_task();
419
- t.add_get_event_id(EventType::TimerStarted, None);
420
- t.add_full_wf_task();
421
- t.add_full_wf_task();
749
+ t.add_full_wf_task(); // wft started 6
750
+ t.add_by_type(EventType::TimerStarted);
751
+ t.add_full_wf_task(); // wft started 10
422
752
  t.add_full_wf_task();
423
753
  t.add_full_wf_task();
424
- t.add_get_event_id(EventType::TimerStarted, None);
425
- t.add_full_wf_task();
754
+ t.add_full_wf_task(); // wft started 19
755
+ t.add_by_type(EventType::TimerStarted);
756
+ t.add_full_wf_task(); // wft started 23
426
757
  t.add_we_signaled("whee", vec![]);
427
758
  t.add_full_wf_task();
428
759
  t.add_workflow_execution_completed();
429
760
 
430
761
  let mut update = t.as_history_update();
431
- let seq = update.take_next_wft_sequence(0).await.unwrap();
762
+ let seq = next_check_peek(&mut update, 0);
432
763
  assert_eq!(seq.len(), 6);
433
- let seq = update.take_next_wft_sequence(6).await.unwrap();
764
+ let seq = next_check_peek(&mut update, 6);
434
765
  assert_eq!(seq.len(), 13);
435
- let seq = update.take_next_wft_sequence(19).await.unwrap();
766
+ let seq = next_check_peek(&mut update, 19);
436
767
  assert_eq!(seq.len(), 4);
437
- let seq = update.take_next_wft_sequence(23).await.unwrap();
768
+ let seq = next_check_peek(&mut update, 23);
438
769
  assert_eq!(seq.len(), 4);
439
- let seq = update.take_next_wft_sequence(27).await.unwrap();
770
+ let seq = next_check_peek(&mut update, 27);
440
771
  assert_eq!(seq.len(), 2);
441
772
  }
442
773
 
443
- #[tokio::test]
444
- async fn paginator_fetches_new_pages() {
445
- // Note that this test triggers the "event ids that went backwards" error, acceptably.
446
- // Can be fixed by having mock not return earlier events.
447
- let wft_count = 500;
448
- let long_hist = canned_histories::long_sequential_timers(wft_count);
449
- let initial_hist = long_hist.get_history_info(10).unwrap();
450
- let prev_started = initial_hist.previous_started_event_id();
774
+ #[test]
775
+ fn heartbeat_marker_end() {
776
+ let mut t = TestHistoryBuilder::default();
777
+ t.add_by_type(EventType::WorkflowExecutionStarted);
778
+ t.add_full_wf_task();
779
+ t.add_full_wf_task();
780
+ t.add_local_activity_result_marker(1, "1", "done".into());
781
+ t.add_workflow_execution_completed();
782
+
783
+ let mut update = t.as_history_update();
784
+ let seq = next_check_peek(&mut update, 3);
785
+ // completed, sched, started
786
+ assert_eq!(seq.len(), 3);
787
+ let seq = next_check_peek(&mut update, 6);
788
+ assert_eq!(seq.len(), 3);
789
+ }
790
+
791
+ fn paginator_setup(history: TestHistoryBuilder, chunk_size: usize) -> HistoryPaginator {
792
+ let full_hist = history.get_full_history_info().unwrap().into_events();
793
+ let initial_hist = full_hist.chunks(chunk_size).next().unwrap().to_vec();
451
794
  let mut mock_client = mock_workflow_client();
452
795
 
453
- let mut npt = 2;
796
+ let mut npt = 1;
454
797
  mock_client
455
798
  .expect_get_workflow_execution_history()
456
799
  .returning(move |_, _, passed_npt| {
457
800
  assert_eq!(passed_npt, vec![npt]);
458
- let history = long_hist.get_history_info(10 * npt as usize).unwrap();
801
+ let mut hist_chunks = full_hist.chunks(chunk_size).peekable();
802
+ let next_chunks = hist_chunks.nth(npt.into()).unwrap_or_default();
459
803
  npt += 1;
804
+ let next_page_token = if hist_chunks.peek().is_none() {
805
+ vec![]
806
+ } else {
807
+ vec![npt]
808
+ };
460
809
  Ok(GetWorkflowExecutionHistoryResponse {
461
- history: Some(history.into()),
810
+ history: Some(History {
811
+ events: next_chunks.into(),
812
+ }),
462
813
  raw_history: vec![],
463
- next_page_token: vec![npt],
814
+ next_page_token,
464
815
  archived: false,
465
816
  })
466
817
  });
467
818
 
468
- let mut update = HistoryUpdate::new(
469
- HistoryPaginator::new(
470
- initial_hist.into(),
471
- "wfid".to_string(),
472
- "runid".to_string(),
473
- vec![2], // Start at page "2"
474
- Arc::new(mock_client),
475
- ),
476
- prev_started,
819
+ HistoryPaginator::new(
820
+ History {
821
+ events: initial_hist,
822
+ },
823
+ 0,
824
+ "wfid".to_string(),
825
+ "runid".to_string(),
826
+ vec![1],
827
+ Arc::new(mock_client),
828
+ )
829
+ }
830
+
831
+ #[rstest::rstest]
832
+ #[tokio::test]
833
+ async fn paginator_extracts_updates(#[values(10, 11, 12, 13, 14)] chunk_size: usize) {
834
+ let wft_count = 100;
835
+ let mut paginator = paginator_setup(
836
+ canned_histories::long_sequential_timers(wft_count),
837
+ chunk_size,
477
838
  );
839
+ let mut update = paginator.extract_next_update().await.unwrap();
478
840
 
479
- let seq = update.take_next_wft_sequence(0).await.unwrap();
841
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
480
842
  assert_eq!(seq.len(), 3);
481
843
 
482
844
  let mut last_event_id = 3;
483
845
  let mut last_started_id = 3;
484
- for _ in 1..wft_count {
485
- let seq = update
486
- .take_next_wft_sequence(last_started_id)
487
- .await
488
- .unwrap();
846
+ for i in 1..wft_count {
847
+ let seq = {
848
+ match update.take_next_wft_sequence(last_started_id) {
849
+ NextWFT::WFT(seq, _) => seq,
850
+ NextWFT::NeedFetch => {
851
+ update = paginator.extract_next_update().await.unwrap();
852
+ update
853
+ .take_next_wft_sequence(last_started_id)
854
+ .unwrap_events()
855
+ }
856
+ NextWFT::ReplayOver => {
857
+ assert_eq!(i, wft_count - 1);
858
+ break;
859
+ }
860
+ }
861
+ };
489
862
  for e in &seq {
490
863
  last_event_id += 1;
491
864
  assert_eq!(e.event_id, last_event_id);
@@ -495,10 +868,124 @@ pub mod tests {
495
868
  }
496
869
  }
497
870
 
871
+ #[tokio::test]
872
+ async fn paginator_streams() {
873
+ let wft_count = 10;
874
+ let paginator = StreamingHistoryPaginator::new(paginator_setup(
875
+ canned_histories::long_sequential_timers(wft_count),
876
+ 10,
877
+ ));
878
+ let everything: Vec<_> = paginator.try_collect().await.unwrap();
879
+ assert_eq!(everything.len(), (wft_count + 1) * 5);
880
+ everything.iter().fold(1, |event_id, e| {
881
+ assert_eq!(event_id, e.event_id);
882
+ e.event_id + 1
883
+ });
884
+ }
885
+
886
+ fn three_wfts_then_heartbeats() -> TestHistoryBuilder {
887
+ let mut t = TestHistoryBuilder::default();
888
+ // Start with two complete normal WFTs
889
+ t.add_by_type(EventType::WorkflowExecutionStarted);
890
+ t.add_full_wf_task(); // wft start - 3
891
+ t.add_by_type(EventType::TimerStarted);
892
+ t.add_full_wf_task(); // wft start - 7
893
+ t.add_by_type(EventType::TimerStarted);
894
+ t.add_full_wf_task(); // wft start - 11
895
+ for _ in 1..50 {
896
+ // Add a bunch of heartbeats with no commands, which count as one task
897
+ t.add_full_wf_task();
898
+ }
899
+ t.add_workflow_execution_completed();
900
+ t
901
+ }
902
+
903
+ #[tokio::test]
904
+ async fn needs_fetch_if_ending_in_middle_of_wft_seq() {
905
+ let t = three_wfts_then_heartbeats();
906
+ let mut ends_in_middle_of_seq = t.as_history_update().events;
907
+ ends_in_middle_of_seq.truncate(19);
908
+ // The update should contain the first two complete WFTs, ending on the 8th event which
909
+ // is WFT started. The remaining events should be returned. False flags means the creator
910
+ // knows there are more events, so we should return need fetch
911
+ let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
912
+ assert_eq!(remaining[0].event_id, 8);
913
+ assert_eq!(remaining.last().unwrap().event_id, 19);
914
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
915
+ assert_eq!(seq.last().unwrap().event_id, 3);
916
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
917
+ assert_eq!(seq.last().unwrap().event_id, 7);
918
+ let next = update.take_next_wft_sequence(7);
919
+ assert_matches!(next, NextWFT::NeedFetch);
920
+ }
921
+
922
+ // Like the above, but if the history happens to be cut off at a wft boundary, (even though
923
+ // there may have been many heartbeats after we have no way of knowing about), it's going to
924
+ // count events 7-20 as a WFT since there is started, completed, timer command, ..heartbeats..
925
+ #[tokio::test]
926
+ async fn needs_fetch_after_complete_seq_with_heartbeats() {
927
+ let t = three_wfts_then_heartbeats();
928
+ let mut ends_in_middle_of_seq = t.as_history_update().events;
929
+ ends_in_middle_of_seq.truncate(20);
930
+ let (mut update, remaining) = HistoryUpdate::from_events(ends_in_middle_of_seq, 0, false);
931
+ assert!(remaining.is_empty());
932
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
933
+ assert_eq!(seq.last().unwrap().event_id, 3);
934
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
935
+ assert_eq!(seq.last().unwrap().event_id, 7);
936
+ let seq = update.take_next_wft_sequence(7).unwrap_events();
937
+ assert_eq!(seq.last().unwrap().event_id, 20);
938
+ let next = update.take_next_wft_sequence(20);
939
+ assert_matches!(next, NextWFT::NeedFetch);
940
+ }
941
+
942
+ #[rstest::rstest]
943
+ #[tokio::test]
944
+ async fn paginator_works_with_wft_over_multiple_pages(
945
+ #[values(10, 11, 12, 13, 14)] chunk_size: usize,
946
+ ) {
947
+ let t = three_wfts_then_heartbeats();
948
+ let mut paginator = paginator_setup(t, chunk_size);
949
+ let mut update = paginator.extract_next_update().await.unwrap();
950
+ let mut last_id = 0;
951
+ loop {
952
+ let seq = update.take_next_wft_sequence(last_id);
953
+ match seq {
954
+ NextWFT::WFT(seq, _) => {
955
+ last_id = seq.last().unwrap().event_id;
956
+ }
957
+ NextWFT::NeedFetch => {
958
+ update = paginator.extract_next_update().await.unwrap();
959
+ }
960
+ NextWFT::ReplayOver => break,
961
+ }
962
+ }
963
+ assert_eq!(last_id, 160);
964
+ }
965
+
966
+ #[tokio::test]
967
+ async fn task_just_before_heartbeat_chain_is_taken() {
968
+ let t = three_wfts_then_heartbeats();
969
+ let mut update = t.as_history_update();
970
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
971
+ assert_eq!(seq.last().unwrap().event_id, 3);
972
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
973
+ assert_eq!(seq.last().unwrap().event_id, 7);
974
+ let seq = update.take_next_wft_sequence(7).unwrap_events();
975
+ assert_eq!(seq.last().unwrap().event_id, 158);
976
+ let seq = update.take_next_wft_sequence(158).unwrap_events();
977
+ assert_eq!(seq.last().unwrap().event_id, 160);
978
+ assert_eq!(
979
+ seq.last().unwrap().event_type(),
980
+ EventType::WorkflowExecutionCompleted
981
+ );
982
+ }
983
+
498
984
  #[tokio::test]
499
985
  async fn handles_cache_misses() {
500
986
  let timer_hist = canned_histories::single_timer("t");
501
987
  let partial_task = timer_hist.get_one_wft(2).unwrap();
988
+ let prev_started_wft_id = partial_task.previous_started_event_id();
502
989
  let mut history_from_get: GetWorkflowExecutionHistoryResponse =
503
990
  timer_hist.get_history_info(2).unwrap().into();
504
991
  // Chop off the last event, which is WFT started, which server doesn't return in get
@@ -509,24 +996,116 @@ pub mod tests {
509
996
  .expect_get_workflow_execution_history()
510
997
  .returning(move |_, _, _| Ok(history_from_get.clone()));
511
998
 
512
- let mut update = HistoryUpdate::new(
513
- HistoryPaginator::new(
514
- partial_task.into(),
515
- "wfid".to_string(),
516
- "runid".to_string(),
517
- // A cache miss means we'll try to fetch from start
518
- NextPageToken::FetchFromStart,
519
- Arc::new(mock_client),
520
- ),
521
- 1,
999
+ let mut paginator = HistoryPaginator::new(
1000
+ partial_task.into(),
1001
+ prev_started_wft_id,
1002
+ "wfid".to_string(),
1003
+ "runid".to_string(),
1004
+ // A cache miss means we'll try to fetch from start
1005
+ NextPageToken::FetchFromStart,
1006
+ Arc::new(mock_client),
522
1007
  );
1008
+ let mut update = paginator.extract_next_update().await.unwrap();
523
1009
  // We expect if we try to take the first task sequence that the first event is the first
524
1010
  // event in the sequence.
525
- let seq = update.take_next_wft_sequence(0).await.unwrap();
1011
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
526
1012
  assert_eq!(seq[0].event_id, 1);
527
- let seq = update.take_next_wft_sequence(3).await.unwrap();
1013
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
528
1014
  // Verify anything extra (which should only ever be WFT started) was re-appended to the
529
1015
  // end of the event iteration after fetching the old history.
530
1016
  assert_eq!(seq.last().unwrap().event_id, 8);
531
1017
  }
1018
+
1019
+ #[test]
1020
+ fn la_marker_chunking() {
1021
+ let mut t = TestHistoryBuilder::default();
1022
+ t.add_by_type(EventType::WorkflowExecutionStarted);
1023
+ t.add_full_wf_task();
1024
+ t.add_we_signaled("whatever", vec![]);
1025
+ t.add_full_wf_task(); // started - 7
1026
+ t.add_local_activity_result_marker(1, "hi", Default::default());
1027
+ let act_s = t.add_activity_task_scheduled("1");
1028
+ let act_st = t.add_activity_task_started(act_s);
1029
+ t.add_activity_task_completed(act_s, act_st, Default::default());
1030
+ t.add_workflow_task_scheduled_and_started();
1031
+ t.add_workflow_task_timed_out();
1032
+ t.add_workflow_task_scheduled_and_started();
1033
+ t.add_workflow_task_timed_out();
1034
+ t.add_workflow_task_scheduled_and_started();
1035
+
1036
+ let mut update = t.as_history_update();
1037
+ let seq = next_check_peek(&mut update, 0);
1038
+ assert_eq!(seq.len(), 3);
1039
+ let seq = next_check_peek(&mut update, 3);
1040
+ assert_eq!(seq.len(), 4);
1041
+ let seq = next_check_peek(&mut update, 7);
1042
+ assert_eq!(seq.len(), 13);
1043
+ }
1044
+
1045
+ #[tokio::test]
1046
+ async fn handles_blank_fetch_response() {
1047
+ let timer_hist = canned_histories::single_timer("t");
1048
+ let partial_task = timer_hist.get_one_wft(2).unwrap();
1049
+ let prev_started_wft_id = partial_task.previous_started_event_id();
1050
+ let mut mock_client = mock_workflow_client();
1051
+ mock_client
1052
+ .expect_get_workflow_execution_history()
1053
+ .returning(move |_, _, _| Ok(Default::default()));
1054
+
1055
+ let mut paginator = HistoryPaginator::new(
1056
+ partial_task.into(),
1057
+ prev_started_wft_id,
1058
+ "wfid".to_string(),
1059
+ "runid".to_string(),
1060
+ // A cache miss means we'll try to fetch from start
1061
+ NextPageToken::FetchFromStart,
1062
+ Arc::new(mock_client),
1063
+ );
1064
+ let err = paginator.extract_next_update().await.unwrap_err();
1065
+ assert_matches!(err.code(), tonic::Code::DataLoss);
1066
+ }
1067
+
1068
+ #[tokio::test]
1069
+ async fn handles_empty_page_with_next_token() {
1070
+ let timer_hist = canned_histories::single_timer("t");
1071
+ let partial_task = timer_hist.get_one_wft(2).unwrap();
1072
+ let prev_started_wft_id = partial_task.previous_started_event_id();
1073
+ let full_resp: GetWorkflowExecutionHistoryResponse =
1074
+ timer_hist.get_full_history_info().unwrap().into();
1075
+ let mut mock_client = mock_workflow_client();
1076
+ mock_client
1077
+ .expect_get_workflow_execution_history()
1078
+ .returning(move |_, _, _| {
1079
+ Ok(GetWorkflowExecutionHistoryResponse {
1080
+ history: Some(History { events: vec![] }),
1081
+ raw_history: vec![],
1082
+ next_page_token: vec![2],
1083
+ archived: false,
1084
+ })
1085
+ })
1086
+ .times(1);
1087
+ mock_client
1088
+ .expect_get_workflow_execution_history()
1089
+ .returning(move |_, _, _| Ok(full_resp.clone()))
1090
+ .times(1);
1091
+
1092
+ let mut paginator = HistoryPaginator::new(
1093
+ partial_task.into(),
1094
+ prev_started_wft_id,
1095
+ "wfid".to_string(),
1096
+ "runid".to_string(),
1097
+ // A cache miss means we'll try to fetch from start
1098
+ NextPageToken::FetchFromStart,
1099
+ Arc::new(mock_client),
1100
+ );
1101
+ let mut update = paginator.extract_next_update().await.unwrap();
1102
+ let seq = update.take_next_wft_sequence(0).unwrap_events();
1103
+ assert_eq!(seq.last().unwrap().event_id, 3);
1104
+ let seq = update.take_next_wft_sequence(3).unwrap_events();
1105
+ assert_eq!(seq.last().unwrap().event_id, 8);
1106
+ assert_matches!(update.take_next_wft_sequence(8), NextWFT::ReplayOver);
1107
+ }
1108
+
1109
+ // TODO: Test we dont re-feed pointless updates if fetching returns <= events we already
1110
+ // processed
532
1111
  }