temporalio 0.2.0-x86_64-darwin

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. checksums.yaml +7 -0
  2. data/Gemfile +23 -0
  3. data/Rakefile +387 -0
  4. data/lib/temporalio/activity/complete_async_error.rb +11 -0
  5. data/lib/temporalio/activity/context.rb +107 -0
  6. data/lib/temporalio/activity/definition.rb +77 -0
  7. data/lib/temporalio/activity/info.rb +63 -0
  8. data/lib/temporalio/activity.rb +69 -0
  9. data/lib/temporalio/api/batch/v1/message.rb +31 -0
  10. data/lib/temporalio/api/cloud/cloudservice/v1/request_response.rb +93 -0
  11. data/lib/temporalio/api/cloud/cloudservice/v1/service.rb +25 -0
  12. data/lib/temporalio/api/cloud/cloudservice.rb +3 -0
  13. data/lib/temporalio/api/cloud/identity/v1/message.rb +36 -0
  14. data/lib/temporalio/api/cloud/namespace/v1/message.rb +35 -0
  15. data/lib/temporalio/api/cloud/operation/v1/message.rb +27 -0
  16. data/lib/temporalio/api/cloud/region/v1/message.rb +23 -0
  17. data/lib/temporalio/api/command/v1/message.rb +46 -0
  18. data/lib/temporalio/api/common/v1/grpc_status.rb +23 -0
  19. data/lib/temporalio/api/common/v1/message.rb +41 -0
  20. data/lib/temporalio/api/enums/v1/batch_operation.rb +22 -0
  21. data/lib/temporalio/api/enums/v1/command_type.rb +21 -0
  22. data/lib/temporalio/api/enums/v1/common.rb +26 -0
  23. data/lib/temporalio/api/enums/v1/event_type.rb +21 -0
  24. data/lib/temporalio/api/enums/v1/failed_cause.rb +26 -0
  25. data/lib/temporalio/api/enums/v1/namespace.rb +23 -0
  26. data/lib/temporalio/api/enums/v1/query.rb +22 -0
  27. data/lib/temporalio/api/enums/v1/reset.rb +23 -0
  28. data/lib/temporalio/api/enums/v1/schedule.rb +21 -0
  29. data/lib/temporalio/api/enums/v1/task_queue.rb +25 -0
  30. data/lib/temporalio/api/enums/v1/update.rb +22 -0
  31. data/lib/temporalio/api/enums/v1/workflow.rb +30 -0
  32. data/lib/temporalio/api/errordetails/v1/message.rb +42 -0
  33. data/lib/temporalio/api/export/v1/message.rb +24 -0
  34. data/lib/temporalio/api/failure/v1/message.rb +35 -0
  35. data/lib/temporalio/api/filter/v1/message.rb +27 -0
  36. data/lib/temporalio/api/history/v1/message.rb +90 -0
  37. data/lib/temporalio/api/namespace/v1/message.rb +31 -0
  38. data/lib/temporalio/api/nexus/v1/message.rb +40 -0
  39. data/lib/temporalio/api/operatorservice/v1/request_response.rb +49 -0
  40. data/lib/temporalio/api/operatorservice/v1/service.rb +23 -0
  41. data/lib/temporalio/api/operatorservice.rb +3 -0
  42. data/lib/temporalio/api/protocol/v1/message.rb +23 -0
  43. data/lib/temporalio/api/query/v1/message.rb +27 -0
  44. data/lib/temporalio/api/replication/v1/message.rb +26 -0
  45. data/lib/temporalio/api/schedule/v1/message.rb +42 -0
  46. data/lib/temporalio/api/sdk/v1/enhanced_stack_trace.rb +25 -0
  47. data/lib/temporalio/api/sdk/v1/task_complete_metadata.rb +21 -0
  48. data/lib/temporalio/api/sdk/v1/user_metadata.rb +23 -0
  49. data/lib/temporalio/api/sdk/v1/workflow_metadata.rb +23 -0
  50. data/lib/temporalio/api/taskqueue/v1/message.rb +45 -0
  51. data/lib/temporalio/api/update/v1/message.rb +33 -0
  52. data/lib/temporalio/api/version/v1/message.rb +26 -0
  53. data/lib/temporalio/api/workflow/v1/message.rb +43 -0
  54. data/lib/temporalio/api/workflowservice/v1/request_response.rb +189 -0
  55. data/lib/temporalio/api/workflowservice/v1/service.rb +23 -0
  56. data/lib/temporalio/api/workflowservice.rb +3 -0
  57. data/lib/temporalio/api.rb +13 -0
  58. data/lib/temporalio/cancellation.rb +150 -0
  59. data/lib/temporalio/client/activity_id_reference.rb +32 -0
  60. data/lib/temporalio/client/async_activity_handle.rb +110 -0
  61. data/lib/temporalio/client/connection/cloud_service.rb +648 -0
  62. data/lib/temporalio/client/connection/operator_service.rb +249 -0
  63. data/lib/temporalio/client/connection/service.rb +41 -0
  64. data/lib/temporalio/client/connection/workflow_service.rb +1218 -0
  65. data/lib/temporalio/client/connection.rb +270 -0
  66. data/lib/temporalio/client/interceptor.rb +316 -0
  67. data/lib/temporalio/client/workflow_execution.rb +103 -0
  68. data/lib/temporalio/client/workflow_execution_count.rb +36 -0
  69. data/lib/temporalio/client/workflow_execution_status.rb +18 -0
  70. data/lib/temporalio/client/workflow_handle.rb +446 -0
  71. data/lib/temporalio/client/workflow_query_reject_condition.rb +14 -0
  72. data/lib/temporalio/client/workflow_update_handle.rb +67 -0
  73. data/lib/temporalio/client/workflow_update_wait_stage.rb +17 -0
  74. data/lib/temporalio/client.rb +404 -0
  75. data/lib/temporalio/common_enums.rb +24 -0
  76. data/lib/temporalio/converters/data_converter.rb +102 -0
  77. data/lib/temporalio/converters/failure_converter.rb +200 -0
  78. data/lib/temporalio/converters/payload_codec.rb +26 -0
  79. data/lib/temporalio/converters/payload_converter/binary_null.rb +34 -0
  80. data/lib/temporalio/converters/payload_converter/binary_plain.rb +35 -0
  81. data/lib/temporalio/converters/payload_converter/binary_protobuf.rb +42 -0
  82. data/lib/temporalio/converters/payload_converter/composite.rb +62 -0
  83. data/lib/temporalio/converters/payload_converter/encoding.rb +35 -0
  84. data/lib/temporalio/converters/payload_converter/json_plain.rb +44 -0
  85. data/lib/temporalio/converters/payload_converter/json_protobuf.rb +41 -0
  86. data/lib/temporalio/converters/payload_converter.rb +73 -0
  87. data/lib/temporalio/converters.rb +9 -0
  88. data/lib/temporalio/error/failure.rb +219 -0
  89. data/lib/temporalio/error.rb +147 -0
  90. data/lib/temporalio/internal/bridge/3.1/temporalio_bridge.bundle +0 -0
  91. data/lib/temporalio/internal/bridge/3.2/temporalio_bridge.bundle +0 -0
  92. data/lib/temporalio/internal/bridge/3.3/temporalio_bridge.bundle +0 -0
  93. data/lib/temporalio/internal/bridge/api/activity_result/activity_result.rb +34 -0
  94. data/lib/temporalio/internal/bridge/api/activity_task/activity_task.rb +31 -0
  95. data/lib/temporalio/internal/bridge/api/child_workflow/child_workflow.rb +33 -0
  96. data/lib/temporalio/internal/bridge/api/common/common.rb +26 -0
  97. data/lib/temporalio/internal/bridge/api/core_interface.rb +36 -0
  98. data/lib/temporalio/internal/bridge/api/external_data/external_data.rb +27 -0
  99. data/lib/temporalio/internal/bridge/api/workflow_activation/workflow_activation.rb +52 -0
  100. data/lib/temporalio/internal/bridge/api/workflow_commands/workflow_commands.rb +54 -0
  101. data/lib/temporalio/internal/bridge/api/workflow_completion/workflow_completion.rb +30 -0
  102. data/lib/temporalio/internal/bridge/api.rb +3 -0
  103. data/lib/temporalio/internal/bridge/client.rb +90 -0
  104. data/lib/temporalio/internal/bridge/runtime.rb +53 -0
  105. data/lib/temporalio/internal/bridge/testing.rb +46 -0
  106. data/lib/temporalio/internal/bridge/worker.rb +83 -0
  107. data/lib/temporalio/internal/bridge.rb +36 -0
  108. data/lib/temporalio/internal/client/implementation.rb +525 -0
  109. data/lib/temporalio/internal/proto_utils.rb +54 -0
  110. data/lib/temporalio/internal/worker/activity_worker.rb +345 -0
  111. data/lib/temporalio/internal/worker/multi_runner.rb +169 -0
  112. data/lib/temporalio/internal.rb +7 -0
  113. data/lib/temporalio/retry_policy.rb +51 -0
  114. data/lib/temporalio/runtime.rb +271 -0
  115. data/lib/temporalio/scoped_logger.rb +96 -0
  116. data/lib/temporalio/search_attributes.rb +300 -0
  117. data/lib/temporalio/testing/activity_environment.rb +132 -0
  118. data/lib/temporalio/testing/workflow_environment.rb +137 -0
  119. data/lib/temporalio/testing.rb +10 -0
  120. data/lib/temporalio/version.rb +5 -0
  121. data/lib/temporalio/worker/activity_executor/fiber.rb +49 -0
  122. data/lib/temporalio/worker/activity_executor/thread_pool.rb +254 -0
  123. data/lib/temporalio/worker/activity_executor.rb +55 -0
  124. data/lib/temporalio/worker/interceptor.rb +88 -0
  125. data/lib/temporalio/worker/tuner.rb +151 -0
  126. data/lib/temporalio/worker.rb +426 -0
  127. data/lib/temporalio/workflow_history.rb +22 -0
  128. data/lib/temporalio.rb +7 -0
  129. data/temporalio.gemspec +28 -0
  130. metadata +191 -0
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Temporalio
4
+ class Worker
5
+ # Mixin for intercepting worker work. Clases that `include` may implement their own {intercept_activity} that
6
+ # returns their own instance of {ActivityInbound}.
7
+ #
8
+ # @note Input classes herein may get new required fields added and therefore the constructors of the Input classes
9
+ # may change in backwards incompatible ways. Users should not try to construct Input classes themselves.
10
+ module Interceptor
11
+ # Method called when intercepting an activity. This is called when starting an activity attempt.
12
+ #
13
+ # @param next_interceptor [ActivityInbound] Next interceptor in the chain that should be called. This is usually
14
+ # passed to {ActivityInbound} constructor.
15
+ # @return [ActivityInbound] Interceptor to be called for activity calls.
16
+ def intercept_activity(next_interceptor)
17
+ next_interceptor
18
+ end
19
+
20
+ # Input for {ActivityInbound.execute}.
21
+ ExecuteActivityInput = Struct.new(
22
+ :proc,
23
+ :args,
24
+ :headers,
25
+ keyword_init: true
26
+ )
27
+
28
+ # Input for {ActivityOutbound.heartbeat}.
29
+ HeartbeatActivityInput = Struct.new(
30
+ :details,
31
+ keyword_init: true
32
+ )
33
+
34
+ # Inbound interceptor for intercepting inbound activity calls. This should be extended by users needing to
35
+ # intercept activities.
36
+ class ActivityInbound
37
+ # @return [ActivityInbound] Next interceptor in the chain.
38
+ attr_reader :next_interceptor
39
+
40
+ # Initialize inbound with the next interceptor in the chain.
41
+ #
42
+ # @param next_interceptor [ActivityInbound] Next interceptor in the chain.
43
+ def initialize(next_interceptor)
44
+ @next_interceptor = next_interceptor
45
+ end
46
+
47
+ # Initialize the outbound interceptor. This should be extended by users to return their own {ActivityOutbound}
48
+ # implementation that wraps the parameter here.
49
+ #
50
+ # @param outbound [ActivityOutbound] Next outbound interceptor in the chain.
51
+ # @return [ActivityOutbound] Outbound activity interceptor.
52
+ def init(outbound)
53
+ @next_interceptor.init(outbound)
54
+ end
55
+
56
+ # Execute an activity and return result or raise exception. Next interceptor in chain (i.e. `super`) will
57
+ # perform the execution.
58
+ #
59
+ # @param input [ExecuteActivityInput] Input information.
60
+ # @return [Object] Activity result.
61
+ def execute(input)
62
+ @next_interceptor.execute(input)
63
+ end
64
+ end
65
+
66
+ # Outbound interceptor for intercepting outbound activity calls. This should be extended by users needing to
67
+ # intercept activity calls.
68
+ class ActivityOutbound
69
+ # @return [ActivityInbound] Next interceptor in the chain.
70
+ attr_reader :next_interceptor
71
+
72
+ # Initialize outbound with the next interceptor in the chain.
73
+ #
74
+ # @param next_interceptor [ActivityOutbound] Next interceptor in the chain.
75
+ def initialize(next_interceptor)
76
+ @next_interceptor = next_interceptor
77
+ end
78
+
79
+ # Issue a heartbeat.
80
+ #
81
+ # @param input [HeartbeatActivityInput] Input information.
82
+ def heartbeat(input)
83
+ @next_interceptor.heartbeat(input)
84
+ end
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,151 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Temporalio
4
+ class Worker
5
+ # Worker tuner that allows for dynamic customization of some aspects of worker configuration.
6
+ class Tuner
7
+ # Slot supplier used for reserving slots for execution. Currently the only implementations allowed are {Fixed} and
8
+ # {ResourceBased}.
9
+ class SlotSupplier
10
+ # A fixed-size slot supplier that will never issue more than a fixed number of slots.
11
+ class Fixed < SlotSupplier
12
+ # @return [Integer] The maximum number of slots that can be issued.
13
+ attr_reader :slots
14
+
15
+ # Create fixed-size slot supplier.
16
+ #
17
+ # @param slots [Integer] The maximum number of slots that can be issued.
18
+ def initialize(slots) # rubocop:disable Lint/MissingSuper
19
+ @slots = slots
20
+ end
21
+ end
22
+
23
+ # A slot supplier that will dynamically adjust the number of slots based on resource usage.
24
+ #
25
+ # @note WARNING: This API is experimental.
26
+ class ResourceBased < SlotSupplier
27
+ attr_reader :tuner_options, :slot_options
28
+
29
+ # Create a reosurce-based slot supplier.
30
+ #
31
+ # @param tuner_options [ResourceBasedTunerOptions] General tuner options.
32
+ # @param slot_options [ResourceBasedSlotOptions] Slot-supplier-specific tuner options.
33
+ def initialize(tuner_options:, slot_options:) # rubocop:disable Lint/MissingSuper
34
+ @tuner_options = tuner_options
35
+ @slot_options = slot_options
36
+ end
37
+ end
38
+ end
39
+
40
+ # Options for {create_resource_based} or {SlotSupplier::ResourceBased}.
41
+ #
42
+ # @!attribute target_memory_usage
43
+ # @return [Float] A value between 0 and 1 that represents the target (system) memory usage. It's not recommended
44
+ # to set this higher than 0.8, since how much memory a workflow may use is not predictable, and you don't want
45
+ # to encounter OOM errors.
46
+ # @!attribute target_cpu_usage
47
+ # @return [Float] A value between 0 and 1 that represents the target (system) CPU usage. This can be set to 1.0
48
+ # if desired, but it's recommended to leave some headroom for other processes.
49
+ ResourceBasedTunerOptions = Struct.new(
50
+ :target_memory_usage,
51
+ :target_cpu_usage,
52
+ keyword_init: true
53
+ )
54
+
55
+ # Options for a specific slot type being used with {SlotSupplier::ResourceBased}.
56
+ #
57
+ # @!attribute min_slots
58
+ # @return [Integer, nil] Amount of slots that will be issued regardless of any other checks. Defaults to 5 for
59
+ # workflows and 1 for activities.
60
+ # @!attribute max_slots
61
+ # @return [Integer, nil] Maximum amount of slots permitted. Defaults to 500.
62
+ # @!attribute ramp_throttle
63
+ # @return [Float, nil] Minimum time we will wait (after passing the minimum slots number) between handing out
64
+ # new slots in seconds. Defaults to 0 for workflows and 0.05 for activities.
65
+ #
66
+ # This value matters because how many resources a task will use cannot be determined ahead of time, and thus
67
+ # the system should wait to see how much resources are used before issuing more slots.
68
+ ResourceBasedSlotOptions = Struct.new(
69
+ :min_slots,
70
+ :max_slots,
71
+ :ramp_throttle,
72
+ keyword_init: true
73
+ )
74
+
75
+ # Create a fixed-size tuner with the provided number of slots.
76
+ #
77
+ # @param workflow_slots [Integer] Maximum number of workflow task slots.
78
+ # @param activity_slots [Integer] Maximum number of activity slots.
79
+ # @param local_activity_slots [Integer] Maximum number of local activity slots.
80
+ # @return [Tuner] Created tuner.
81
+ def self.create_fixed(
82
+ workflow_slots: 100,
83
+ activity_slots: 100,
84
+ local_activity_slots: 100
85
+ )
86
+ new(
87
+ workflow_slot_supplier: SlotSupplier::Fixed.new(workflow_slots),
88
+ activity_slot_supplier: SlotSupplier::Fixed.new(activity_slots),
89
+ local_activity_slot_supplier: SlotSupplier::Fixed.new(local_activity_slots)
90
+ )
91
+ end
92
+
93
+ # Create a resource-based tuner with the provided options.
94
+ #
95
+ # @param target_memory_usage [Float] A value between 0 and 1 that represents the target (system) memory usage.
96
+ # It's not recommended to set this higher than 0.8, since how much memory a workflow may use is not predictable,
97
+ # and you don't want to encounter OOM errors.
98
+ # @param target_cpu_usage [Float] A value between 0 and 1 that represents the target (system) CPU usage. This can
99
+ # be set to 1.0 if desired, but it's recommended to leave some headroom for other processes.
100
+ # @param workflow_options [ResourceBasedSlotOptions] Resource-based options for workflow slot supplier.
101
+ # @param activity_options [ResourceBasedSlotOptions] Resource-based options for activity slot supplier.
102
+ # @param local_activity_options [ResourceBasedSlotOptions] Resource-based options for local activity slot
103
+ # supplier.
104
+ # @return [Tuner] Created tuner.
105
+ def self.create_resource_based(
106
+ target_memory_usage:,
107
+ target_cpu_usage:,
108
+ workflow_options: ResourceBasedSlotOptions.new(min_slots: 5, max_slots: 500, ramp_throttle: 0.0),
109
+ activity_options: ResourceBasedSlotOptions.new(min_slots: 1, max_slots: 500, ramp_throttle: 0.05),
110
+ local_activity_options: ResourceBasedSlotOptions.new(min_slots: 1, max_slots: 500, ramp_throttle: 0.05)
111
+ )
112
+ tuner_options = ResourceBasedTunerOptions.new(target_memory_usage:, target_cpu_usage:)
113
+ new(
114
+ workflow_slot_supplier: SlotSupplier::ResourceBased.new(
115
+ tuner_options:, slot_options: workflow_options
116
+ ),
117
+ activity_slot_supplier: SlotSupplier::ResourceBased.new(
118
+ tuner_options:, slot_options: activity_options
119
+ ),
120
+ local_activity_slot_supplier: SlotSupplier::ResourceBased.new(
121
+ tuner_options:, slot_options: local_activity_options
122
+ )
123
+ )
124
+ end
125
+
126
+ # @return [SlotSupplier] Slot supplier for workflows.
127
+ attr_reader :workflow_slot_supplier
128
+
129
+ # @return [SlotSupplier] Slot supplier for activities.
130
+ attr_reader :activity_slot_supplier
131
+
132
+ # @return [SlotSupplier] Slot supplier for local activities.
133
+ attr_reader :local_activity_slot_supplier
134
+
135
+ # Create a tuner from 3 slot suppliers.
136
+ #
137
+ # @param workflow_slot_supplier [SlotSupplier] Slot supplier for workflows.
138
+ # @param activity_slot_supplier [SlotSupplier] Slot supplier for activities.
139
+ # @param local_activity_slot_supplier [SlotSupplier] Slot supplier for local activities.
140
+ def initialize(
141
+ workflow_slot_supplier:,
142
+ activity_slot_supplier:,
143
+ local_activity_slot_supplier:
144
+ )
145
+ @workflow_slot_supplier = workflow_slot_supplier
146
+ @activity_slot_supplier = activity_slot_supplier
147
+ @local_activity_slot_supplier = local_activity_slot_supplier
148
+ end
149
+ end
150
+ end
151
+ end
@@ -0,0 +1,426 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'temporalio/activity'
4
+ require 'temporalio/cancellation'
5
+ require 'temporalio/client'
6
+ require 'temporalio/error'
7
+ require 'temporalio/internal/bridge'
8
+ require 'temporalio/internal/bridge/worker'
9
+ require 'temporalio/internal/worker/activity_worker'
10
+ require 'temporalio/internal/worker/multi_runner'
11
+ require 'temporalio/worker/activity_executor'
12
+ require 'temporalio/worker/interceptor'
13
+ require 'temporalio/worker/tuner'
14
+
15
+ module Temporalio
16
+ # Worker for processing activities and workflows on a task queue.
17
+ #
18
+ # Workers are created for a task queue and the items they can run. Then {run} is used for running a single worker, or
19
+ # {run_all} is used for a collection of workers. These can wait until a block is complete or a {Cancellation} is
20
+ # canceled.
21
+ class Worker
22
+ # Options as returned from {options} for `**to_h`` splat use in {initialize}. See {initialize} for details.
23
+ Options = Struct.new(
24
+ :client,
25
+ :task_queue,
26
+ :activities,
27
+ :activity_executors,
28
+ :tuner,
29
+ :interceptors,
30
+ :build_id,
31
+ :identity,
32
+ :logger,
33
+ :max_cached_workflows,
34
+ :max_concurrent_workflow_task_polls,
35
+ :nonsticky_to_sticky_poll_ratio,
36
+ :max_concurrent_activity_task_polls,
37
+ :no_remote_activities,
38
+ :sticky_queue_schedule_to_start_timeout,
39
+ :max_heartbeat_throttle_interval,
40
+ :default_heartbeat_throttle_interval,
41
+ :max_activities_per_second,
42
+ :max_task_queue_activities_per_second,
43
+ :graceful_shutdown_period,
44
+ :use_worker_versioning,
45
+ keyword_init: true
46
+ )
47
+
48
+ # @return [String] Memoized default build ID. This default value is built as a checksum of all of the loaded Ruby
49
+ # source files in `$LOADED_FEATURES`. Users may prefer to set the build ID to a better representation of the
50
+ # source.
51
+ def self.default_build_id
52
+ @default_build_id ||= _load_default_build_id
53
+ end
54
+
55
+ # @!visibility private
56
+ def self._load_default_build_id
57
+ # The goal is to get a hash of runtime code, both Temporal's and the
58
+ # user's. After all options were explored, we have decided to default to
59
+ # hashing all bytecode of required files. This means later/dynamic require
60
+ # won't be accounted for because this is memoized. It also means the
61
+ # tiniest code change will affect this, which is what we want since this
62
+ # is meant to be a "binary checksum". We have chosen to use MD5 for speed,
63
+ # similarity with other SDKs, and because security is not a factor.
64
+ require 'digest'
65
+
66
+ saw_bridge = false
67
+ build_id = $LOADED_FEATURES.each_with_object(Digest::MD5.new) do |file, digest|
68
+ saw_bridge = true if file.include?('temporalio_bridge.')
69
+ digest.update(File.read(file)) if File.file?(file)
70
+ end.hexdigest
71
+ raise 'Temporal bridge library not in $LOADED_FEATURES, unable to calculate default build ID' unless saw_bridge
72
+
73
+ build_id
74
+ end
75
+
76
+ # Run all workers until cancellation or optional block completes. When the cancellation or block is complete, the
77
+ # workers are shut down. This will return the block result if everything successful or raise an error if not. See
78
+ # {run} for details on how worker shutdown works.
79
+ #
80
+ # @param workers [Array<Worker>] Workers to run.
81
+ # @param cancellation [Cancellation] Cancellation that can be canceled to shut down all workers.
82
+ # @param shutdown_signals [Array] Signals to trap and cause worker shutdown.
83
+ # @param raise_in_block_on_shutdown [Exception, nil] Exception to {::Thread.raise} or {::Fiber.raise} if a block is
84
+ # present and still running on shutdown. If nil, `raise` is not used.
85
+ # @param wait_block_complete [Boolean] If block given and shutdown caused by something else (e.g. cancellation
86
+ # canceled), whether to wait on the block to complete before returning.
87
+ # @yield Optional block. This will be run in a new background thread or fiber. Workers will shut down upon
88
+ # completion of this and, assuming no other failures, return/bubble success/exception of the block.
89
+ # @return [Object] Return value of the block or nil of no block given.
90
+ def self.run_all(
91
+ *workers,
92
+ cancellation: Cancellation.new,
93
+ shutdown_signals: [],
94
+ raise_in_block_on_shutdown: Error::CanceledError.new('Workers finished'),
95
+ wait_block_complete: true,
96
+ &block
97
+ )
98
+ # Confirm there is at least one and they are all workers
99
+ raise ArgumentError, 'At least one worker required' if workers.empty?
100
+ raise ArgumentError, 'Not all parameters are workers' unless workers.all? { |w| w.is_a?(Worker) }
101
+
102
+ Internal::Bridge.assert_fiber_compatibility!
103
+
104
+ # Start the multi runner
105
+ runner = Internal::Worker::MultiRunner.new(workers:, shutdown_signals:)
106
+
107
+ # Apply block
108
+ runner.apply_thread_or_fiber_block(&block)
109
+
110
+ # Reuse first worker logger
111
+ logger = workers.first&.options&.logger or raise # Help steep
112
+
113
+ # On cancel, initiate shutdown
114
+ cancellation.add_cancel_callback do
115
+ logger.info('Cancel invoked, beginning worker shutdown')
116
+ runner.initiate_shutdown
117
+ end
118
+
119
+ # Poller loop, run until all pollers shut down
120
+ first_error = nil
121
+ block_result = nil
122
+ loop do
123
+ event = runner.next_event
124
+ case event
125
+ when Internal::Worker::MultiRunner::Event::PollSuccess
126
+ # Successful poll
127
+ event.worker._on_poll_bytes(event.worker_type, event.bytes)
128
+ when Internal::Worker::MultiRunner::Event::PollFailure
129
+ # Poll failure, this causes shutdown of all workers
130
+ logger.error('Poll failure (beginning worker shutdown if not alaredy occurring)')
131
+ logger.error(event.error)
132
+ first_error ||= event.error
133
+ runner.initiate_shutdown
134
+ when Internal::Worker::MultiRunner::Event::PollerShutDown
135
+ # Individual poller shut down. Nothing to do here until we support
136
+ # worker status or something.
137
+ when Internal::Worker::MultiRunner::Event::AllPollersShutDown
138
+ # This is where we break the loop, no more polling can happen
139
+ break
140
+ when Internal::Worker::MultiRunner::Event::BlockSuccess
141
+ logger.info('Block completed, beginning worker shutdown')
142
+ block_result = event
143
+ runner.initiate_shutdown
144
+ when Internal::Worker::MultiRunner::Event::BlockFailure
145
+ logger.error('Block failure (beginning worker shutdown)')
146
+ logger.error(event.error)
147
+ block_result = event
148
+ first_error ||= event.error
149
+ runner.initiate_shutdown
150
+ when Internal::Worker::MultiRunner::Event::ShutdownSignalReceived
151
+ logger.info('Signal received, beginning worker shutdown')
152
+ runner.initiate_shutdown
153
+ else
154
+ raise "Unexpected event: #{event}"
155
+ end
156
+ end
157
+
158
+ # Now that all pollers have stopped, let's wait for all to complete
159
+ begin
160
+ runner.wait_complete_and_finalize_shutdown
161
+ rescue StandardError => e
162
+ logger.warn('Failed waiting and finalizing')
163
+ logger.warn(e)
164
+ end
165
+
166
+ # If there was a block but not a result yet, we want to raise if that is
167
+ # wanted, and wait if that is wanted
168
+ if block_given? && block_result.nil?
169
+ runner.raise_in_thread_or_fiber_block(raise_in_block_on_shutdown) unless raise_in_block_on_shutdown.nil?
170
+ if wait_block_complete
171
+ event = runner.next_event
172
+ case event
173
+ when Internal::Worker::MultiRunner::Event::BlockSuccess
174
+ logger.info('Block completed (after worker shutdown)')
175
+ block_result = event
176
+ when Internal::Worker::MultiRunner::Event::BlockFailure
177
+ logger.error('Block failure (after worker shutdown)')
178
+ logger.error(event.error)
179
+ block_result = event
180
+ first_error ||= event.error
181
+ when Internal::Worker::MultiRunner::Event::ShutdownSignalReceived
182
+ # Do nothing, waiting for block
183
+ else
184
+ raise "Unexpected event: #{event}"
185
+ end
186
+ end
187
+ end
188
+
189
+ # If there was an shutdown-causing error, we raise that
190
+ if !first_error.nil?
191
+ raise first_error
192
+ elsif block_result.is_a?(Internal::Worker::MultiRunner::Event::BlockSuccess)
193
+ block_result.result
194
+ end
195
+ end
196
+
197
+ # @return [Options] Frozen options for this client which has the same attributes as {initialize}.
198
+ attr_reader :options
199
+
200
+ # Create a new worker. At least one activity or workflow must be present.
201
+ #
202
+ # @param client [Client] Client for this worker.
203
+ # @param task_queue [String] Task queue for this worker.
204
+ # @param activities [Array<Activity, Class<Activity>, Activity::Definition>] Activities for this worker.
205
+ # @param activity_executors [Hash<Symbol, Worker::ActivityExecutor>] Executors that activities can run within.
206
+ # @param tuner [Tuner] Tuner that controls the amount of concurrent activities/workflows that run at a time.
207
+ # @param interceptors [Array<Interceptor>] Interceptors specific to this worker. Note, interceptors set on the
208
+ # client that include the {Interceptor} module are automatically included here, so no need to specify them again.
209
+ # @param build_id [String] Unique identifier for the current runtime. This is best set as a unique value
210
+ # representing all code and should change only when code does. This can be something like a git commit hash. If
211
+ # unset, default is hash of known Ruby code.
212
+ # @param identity [String, nil] Override the identity for this worker. If unset, client identity is used.
213
+ # @param max_cached_workflows [Integer] Number of workflows held in cache for use by sticky task queue. If set to 0,
214
+ # workflow caching and sticky queuing are disabled.
215
+ # @param max_concurrent_workflow_task_polls [Integer] Maximum number of concurrent poll workflow task requests we
216
+ # will perform at a time on this worker's task queue.
217
+ # @param nonsticky_to_sticky_poll_ratio [Float] `max_concurrent_workflow_task_polls`` * this number = the number of
218
+ # max pollers that will be allowed for the nonsticky queue when sticky tasks are enabled. If both defaults are
219
+ # used, the sticky queue will allow 4 max pollers while the nonsticky queue will allow one. The minimum for either
220
+ # poller is 1, so if `max_concurrent_workflow_task_polls` is 1 and sticky queues are enabled, there will be 2
221
+ # concurrent polls.
222
+ # @param max_concurrent_activity_task_polls [Integer] Maximum number of concurrent poll activity task requests we
223
+ # will perform at a time on this worker's task queue.
224
+ # @param no_remote_activities [Boolean] If true, this worker will only handle workflow tasks and local activities,
225
+ # it will not poll for activity tasks.
226
+ # @param sticky_queue_schedule_to_start_timeout [Float] How long a workflow task is allowed to sit on the sticky
227
+ # queue before it is timed out and moved to the non-sticky queue where it may be picked up by any worker.
228
+ # @param max_heartbeat_throttle_interval [Float] Longest interval for throttling activity heartbeats.
229
+ # @param default_heartbeat_throttle_interval [Float] Default interval for throttling activity heartbeats in case
230
+ # per-activity heartbeat timeout is unset. Otherwise, it's the per-activity heartbeat timeout * 0.8.
231
+ # @param max_activities_per_second [Float, nil] Limits the number of activities per second that this worker will
232
+ # process. The worker will not poll for new activities if by doing so it might receive and execute an activity
233
+ # which would cause it to exceed this limit.
234
+ # @param max_task_queue_activities_per_second [Float, nil] Sets the maximum number of activities per second the task
235
+ # queue will dispatch, controlled server-side. Note that this only takes effect upon an activity poll request. If
236
+ # multiple workers on the same queue have different values set, they will thrash with the last poller winning.
237
+ # @param graceful_shutdown_period [Float] Amount of time after shutdown is called that activities are given to
238
+ # complete before their tasks are canceled.
239
+ # @param use_worker_versioning [Boolean] If true, the `build_id` argument must be specified, and this worker opts
240
+ # into the worker versioning feature. This ensures it only receives workflow tasks for workflows which it claims
241
+ # to be compatible with. For more information, see https://docs.temporal.io/workers#worker-versioning.
242
+ def initialize(
243
+ client:,
244
+ task_queue:,
245
+ activities: [],
246
+ activity_executors: ActivityExecutor.defaults,
247
+ tuner: Tuner.create_fixed,
248
+ interceptors: [],
249
+ build_id: Worker.default_build_id,
250
+ identity: nil,
251
+ logger: client.options.logger,
252
+ max_cached_workflows: 1000,
253
+ max_concurrent_workflow_task_polls: 5,
254
+ nonsticky_to_sticky_poll_ratio: 0.2,
255
+ max_concurrent_activity_task_polls: 5,
256
+ no_remote_activities: false,
257
+ sticky_queue_schedule_to_start_timeout: 10,
258
+ max_heartbeat_throttle_interval: 60,
259
+ default_heartbeat_throttle_interval: 30,
260
+ max_activities_per_second: nil,
261
+ max_task_queue_activities_per_second: nil,
262
+ graceful_shutdown_period: 0,
263
+ use_worker_versioning: false
264
+ )
265
+ # TODO(cretz): Remove when workflows come about
266
+ raise ArgumentError, 'Must have at least one activity' if activities.empty?
267
+
268
+ @options = Options.new(
269
+ client:,
270
+ task_queue:,
271
+ activities:,
272
+ activity_executors:,
273
+ tuner:,
274
+ interceptors:,
275
+ build_id:,
276
+ identity:,
277
+ logger:,
278
+ max_cached_workflows:,
279
+ max_concurrent_workflow_task_polls:,
280
+ nonsticky_to_sticky_poll_ratio:,
281
+ max_concurrent_activity_task_polls:,
282
+ no_remote_activities:,
283
+ sticky_queue_schedule_to_start_timeout:,
284
+ max_heartbeat_throttle_interval:,
285
+ default_heartbeat_throttle_interval:,
286
+ max_activities_per_second:,
287
+ max_task_queue_activities_per_second:,
288
+ graceful_shutdown_period:,
289
+ use_worker_versioning:
290
+ ).freeze
291
+
292
+ # Create the bridge worker
293
+ @bridge_worker = Internal::Bridge::Worker.new(
294
+ client.connection._core_client,
295
+ Internal::Bridge::Worker::Options.new(
296
+ activity: !activities.empty?,
297
+ workflow: false,
298
+ namespace: client.namespace,
299
+ task_queue:,
300
+ tuner: Internal::Bridge::Worker::TunerOptions.new(
301
+ workflow_slot_supplier: to_bridge_slot_supplier_options(tuner.workflow_slot_supplier),
302
+ activity_slot_supplier: to_bridge_slot_supplier_options(tuner.activity_slot_supplier),
303
+ local_activity_slot_supplier: to_bridge_slot_supplier_options(tuner.local_activity_slot_supplier)
304
+ ),
305
+ build_id:,
306
+ identity_override: identity,
307
+ max_cached_workflows:,
308
+ max_concurrent_workflow_task_polls:,
309
+ nonsticky_to_sticky_poll_ratio:,
310
+ max_concurrent_activity_task_polls:,
311
+ no_remote_activities:,
312
+ sticky_queue_schedule_to_start_timeout:,
313
+ max_heartbeat_throttle_interval:,
314
+ default_heartbeat_throttle_interval:,
315
+ max_worker_activities_per_second: max_activities_per_second,
316
+ max_task_queue_activities_per_second:,
317
+ graceful_shutdown_period:,
318
+ use_worker_versioning:
319
+ )
320
+ )
321
+
322
+ # Collect interceptors from client and params
323
+ @all_interceptors = client.options.interceptors.select { |i| i.is_a?(Interceptor) } + interceptors
324
+
325
+ # Cancellation for the whole worker
326
+ @worker_shutdown_cancellation = Cancellation.new
327
+
328
+ # Create workers
329
+ # TODO(cretz): Make conditional when workflows appear
330
+ @activity_worker = Internal::Worker::ActivityWorker.new(self, @bridge_worker)
331
+
332
+ # Validate worker
333
+ @bridge_worker.validate
334
+ end
335
+
336
+ # @return [String] Task queue set on the worker options.
337
+ def task_queue
338
+ @options.task_queue
339
+ end
340
+
341
+ # Run this worker until cancellation or optional block completes. When the cancellation or block is complete, the
342
+ # worker is shut down. This will return the block result if everything successful or raise an error if not.
343
+ #
344
+ # Upon shutdown (either via cancellation, block completion, or worker fatal error), the worker immediately stops
345
+ # accepting new work. Then, after an optional grace period, all activities are canceled. This call then waits for
346
+ # every activity and workflow task to complete before returning.
347
+ #
348
+ # @param cancellation [Cancellation] Cancellation that can be canceled to shut down this worker.
349
+ # @param shutdown_signals [Array] Signals to trap and cause worker shutdown.
350
+ # @param raise_in_block_on_shutdown [Exception, nil] Exception to {::Thread.raise} or {::Fiber.raise} if a block is
351
+ # present and still running on shutdown. If nil, `raise` is not used.
352
+ # @param wait_block_complete [Boolean] If block given and shutdown caused by something else (e.g. cancellation
353
+ # canceled), whether to wait on the block to complete before returning.
354
+ # @yield Optional block. This will be run in a new background thread or fiber. Worker will shut down upon completion
355
+ # of this and, assuming no other failures, return/bubble success/exception of the block.
356
+ # @return [Object] Return value of the block or nil of no block given.
357
+ def run(
358
+ cancellation: Cancellation.new,
359
+ shutdown_signals: [],
360
+ raise_in_block_on_shutdown: Error::CanceledError.new('Workers finished'),
361
+ wait_block_complete: true,
362
+ &block
363
+ )
364
+ Worker.run_all(self, cancellation:, shutdown_signals:, raise_in_block_on_shutdown:, wait_block_complete:, &block)
365
+ end
366
+
367
+ # @!visibility private
368
+ def _worker_shutdown_cancellation
369
+ @worker_shutdown_cancellation
370
+ end
371
+
372
+ # @!visibility private
373
+ def _initiate_shutdown
374
+ _bridge_worker.initiate_shutdown
375
+ _, cancel_proc = _worker_shutdown_cancellation
376
+ cancel_proc.call
377
+ end
378
+
379
+ # @!visibility private
380
+ def _wait_all_complete
381
+ @activity_worker&.wait_all_complete
382
+ end
383
+
384
+ # @!visibility private
385
+ def _bridge_worker
386
+ @bridge_worker
387
+ end
388
+
389
+ # @!visibility private
390
+ def _all_interceptors
391
+ @all_interceptors
392
+ end
393
+
394
+ # @!visibility private
395
+ def _on_poll_bytes(worker_type, bytes)
396
+ # TODO(cretz): Workflow workers
397
+ raise "Unrecognized worker type #{worker_type}" unless worker_type == :activity
398
+
399
+ @activity_worker.handle_task(Internal::Bridge::Api::ActivityTask::ActivityTask.decode(bytes))
400
+ end
401
+
402
+ private
403
+
404
+ def to_bridge_slot_supplier_options(slot_supplier)
405
+ if slot_supplier.is_a?(Tuner::SlotSupplier::Fixed)
406
+ Internal::Bridge::Worker::TunerSlotSupplierOptions.new(
407
+ fixed_size: slot_supplier.slots,
408
+ resource_based: nil
409
+ )
410
+ elsif slot_supplier.is_a?(Tuner::SlotSupplier::ResourceBased)
411
+ Internal::Bridge::Worker::TunerSlotSupplierOptions.new(
412
+ fixed_size: nil,
413
+ resource_based: Internal::Bridge::Worker::TunerResourceBasedSlotSupplierOptions.new(
414
+ target_mem_usage: slot_supplier.tuner_options.target_memory_usage,
415
+ target_cpu_usage: slot_supplier.tuner_options.target_cpu_usage,
416
+ min_slots: slot_supplier.slot_options.min_slots,
417
+ max_slots: slot_supplier.slot_options.max_slots,
418
+ ramp_throttle: slot_supplier.slot_options.ramp_throttle
419
+ )
420
+ )
421
+ else
422
+ raise ArgumentError, 'Tuner slot suppliers must be instances of Fixed or ResourceBased'
423
+ end
424
+ end
425
+ end
426
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Temporalio
4
+ # Representation of a workflow's history.
5
+ class WorkflowHistory
6
+ # History events for the workflow.
7
+ attr_reader :events
8
+
9
+ # @!visibility private
10
+ def initialize(events)
11
+ @events = events
12
+ end
13
+
14
+ # @return [String] ID of the workflow, extracted from the first event.
15
+ def workflow_id
16
+ start = events.first&.workflow_execution_started_event_attributes
17
+ raise 'First event not a start event' if start.nil?
18
+
19
+ start.workflow_id
20
+ end
21
+ end
22
+ end