circuit_breaker-wf 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +4 -0
  3. data/CHANGELOG.md +52 -0
  4. data/Gemfile +10 -0
  5. data/Gemfile.lock +116 -0
  6. data/LICENSE +21 -0
  7. data/README.md +324 -0
  8. data/examples/document/README.md +150 -0
  9. data/examples/document/document_assistant.rb +535 -0
  10. data/examples/document/document_rules.rb +60 -0
  11. data/examples/document/document_token.rb +83 -0
  12. data/examples/document/document_workflow.rb +114 -0
  13. data/examples/document/mock_executor.rb +80 -0
  14. data/lib/circuit_breaker/executors/README.md +664 -0
  15. data/lib/circuit_breaker/executors/agent_executor.rb +187 -0
  16. data/lib/circuit_breaker/executors/assistant_executor.rb +245 -0
  17. data/lib/circuit_breaker/executors/base_executor.rb +56 -0
  18. data/lib/circuit_breaker/executors/docker_executor.rb +56 -0
  19. data/lib/circuit_breaker/executors/dsl.rb +97 -0
  20. data/lib/circuit_breaker/executors/llm/memory.rb +82 -0
  21. data/lib/circuit_breaker/executors/llm/tools.rb +94 -0
  22. data/lib/circuit_breaker/executors/nats_executor.rb +230 -0
  23. data/lib/circuit_breaker/executors/serverless_executor.rb +25 -0
  24. data/lib/circuit_breaker/executors/step_executor.rb +47 -0
  25. data/lib/circuit_breaker/history.rb +81 -0
  26. data/lib/circuit_breaker/rules.rb +251 -0
  27. data/lib/circuit_breaker/templates/mermaid.html.erb +51 -0
  28. data/lib/circuit_breaker/templates/plantuml.html.erb +55 -0
  29. data/lib/circuit_breaker/token.rb +486 -0
  30. data/lib/circuit_breaker/visualizer.rb +173 -0
  31. data/lib/circuit_breaker/workflow_dsl.rb +359 -0
  32. data/lib/circuit_breaker.rb +236 -0
  33. data/workflow-editor/.gitignore +24 -0
  34. data/workflow-editor/README.md +106 -0
  35. data/workflow-editor/eslint.config.js +28 -0
  36. data/workflow-editor/index.html +13 -0
  37. data/workflow-editor/package-lock.json +6864 -0
  38. data/workflow-editor/package.json +50 -0
  39. data/workflow-editor/postcss.config.js +6 -0
  40. data/workflow-editor/public/vite.svg +1 -0
  41. data/workflow-editor/src/App.css +42 -0
  42. data/workflow-editor/src/App.tsx +365 -0
  43. data/workflow-editor/src/assets/react.svg +1 -0
  44. data/workflow-editor/src/components/AddNodeButton.tsx +68 -0
  45. data/workflow-editor/src/components/EdgeDetails.tsx +175 -0
  46. data/workflow-editor/src/components/NodeDetails.tsx +177 -0
  47. data/workflow-editor/src/components/ResizablePanel.tsx +74 -0
  48. data/workflow-editor/src/components/SaveButton.tsx +45 -0
  49. data/workflow-editor/src/config/change_workflow.yaml +59 -0
  50. data/workflow-editor/src/config/constants.ts +11 -0
  51. data/workflow-editor/src/config/flowConfig.ts +189 -0
  52. data/workflow-editor/src/config/uiConfig.ts +77 -0
  53. data/workflow-editor/src/config/workflow.yaml +58 -0
  54. data/workflow-editor/src/hooks/useKeyPress.ts +29 -0
  55. data/workflow-editor/src/index.css +34 -0
  56. data/workflow-editor/src/main.tsx +10 -0
  57. data/workflow-editor/src/server/saveWorkflow.ts +81 -0
  58. data/workflow-editor/src/utils/saveWorkflow.ts +92 -0
  59. data/workflow-editor/src/utils/workflowLoader.ts +26 -0
  60. data/workflow-editor/src/utils/workflowTransformer.ts +91 -0
  61. data/workflow-editor/src/vite-env.d.ts +1 -0
  62. data/workflow-editor/src/yaml.d.ts +4 -0
  63. data/workflow-editor/tailwind.config.js +15 -0
  64. data/workflow-editor/tsconfig.app.json +26 -0
  65. data/workflow-editor/tsconfig.json +7 -0
  66. data/workflow-editor/tsconfig.node.json +24 -0
  67. data/workflow-editor/vite.config.ts +8 -0
  68. metadata +267 -0
@@ -0,0 +1,187 @@
1
+ require_relative 'base_executor'
2
+ require_relative 'llm/memory'
3
+ require_relative 'llm/tools'
4
+
5
+ module CircuitBreaker
6
+ module Executors
7
+ class AgentExecutor < BaseExecutor
8
+ MAX_ITERATIONS = 10
9
+
10
+ executor_config do
11
+ parameter :agent_type, type: :string, description: 'Type of agent'
12
+ parameter :task, type: :string, description: 'Task for the agent to perform'
13
+ parameter :model, type: :string, default: 'gpt-4', description: 'LLM model to use'
14
+ parameter :model_provider, type: :string, description: 'Model provider (ollama/openai)'
15
+ parameter :ollama_base_url, type: :string, default: 'http://localhost:11434', description: 'Ollama server URL'
16
+ parameter :system_prompt, type: :string, description: 'System prompt for the agent'
17
+ parameter :tools, type: :array, default: [], description: 'List of tools available to the agent'
18
+ parameter :parameters, type: :hash, default: {}, description: 'Additional parameters'
19
+ end
20
+
21
+ def initialize(context = {})
22
+ super
23
+ @agent_type = context[:agent_type]
24
+ @task = context[:task]
25
+ @model = context[:model] || 'gpt-4'
26
+ @model_provider = context[:model_provider] || detect_model_provider(@model)
27
+ @ollama_base_url = context[:ollama_base_url] || 'http://localhost:11434'
28
+ @system_prompt = context[:system_prompt]
29
+ @memory = LLM::ChainMemory.new
30
+ @toolkit = setup_toolkit(context[:tools] || [])
31
+ @parameters = context[:parameters] || {}
32
+ end
33
+
34
+ def execute
35
+ return unless @task
36
+
37
+ iteration = 0
38
+ final_output = nil
39
+
40
+ while iteration < MAX_ITERATIONS
41
+ # Get current state and plan next action
42
+ current_state = prepare_state(iteration)
43
+ action_plan = plan_next_action(current_state)
44
+
45
+ break if action_plan[:status] == 'complete'
46
+
47
+ # Execute planned action
48
+ action_result = execute_action(action_plan)
49
+
50
+ # Store intermediate results
51
+ @memory.add_step_result(
52
+ step_name: action_plan[:action],
53
+ input: action_plan[:input],
54
+ output: action_result,
55
+ metadata: { iteration: iteration }
56
+ )
57
+
58
+ final_output = action_result
59
+ iteration += 1
60
+ end
61
+
62
+ @result = {
63
+ task: @task,
64
+ iterations: iteration,
65
+ final_output: final_output,
66
+ memory: @memory.to_h,
67
+ status: iteration < MAX_ITERATIONS ? 'completed' : 'max_iterations_reached'
68
+ }
69
+ end
70
+
71
+ private
72
+
73
+ def detect_model_provider(model)
74
+ return 'ollama' if model.start_with?('llama', 'codellama', 'mistral', 'dolphin')
75
+ 'openai'
76
+ end
77
+
78
+ def setup_toolkit(tools)
79
+ toolkit = LLM::ToolKit.new
80
+ tools.each do |tool|
81
+ toolkit.add_tool(tool)
82
+ end
83
+ toolkit
84
+ end
85
+
86
+ def prepare_state(iteration)
87
+ {
88
+ task: @task,
89
+ iteration: iteration,
90
+ tools: @toolkit.tool_descriptions,
91
+ memory: @memory.get_step_history,
92
+ parameters: @parameters
93
+ }
94
+ end
95
+
96
+ def plan_next_action(state)
97
+ prompt = generate_planning_prompt(state)
98
+
99
+ case @model_provider
100
+ when 'ollama'
101
+ response = make_ollama_request(prompt)
102
+ parse_llm_response(response)
103
+ when 'openai'
104
+ # Existing OpenAI logic here
105
+ if state[:iteration] == 0
106
+ {
107
+ status: 'in_progress',
108
+ action: 'search',
109
+ input: { query: state[:task] }
110
+ }
111
+ else
112
+ { status: 'complete' }
113
+ end
114
+ end
115
+ end
116
+
117
+ def generate_planning_prompt(state)
118
+ # Generate a structured prompt for the LLM
119
+ system_context = @system_prompt || "You are an AI agent tasked with solving problems step by step."
120
+ available_tools = state[:tools].map { |t| "- #{t[:name]}: #{t[:description]}" }.join("\n")
121
+ memory_context = state[:memory].map { |m| "Step #{m[:step]}: #{m[:result]}" }.join("\n")
122
+
123
+ <<~PROMPT
124
+ #{system_context}
125
+
126
+ TASK: #{state[:task]}
127
+ ITERATION: #{state[:iteration]}
128
+
129
+ AVAILABLE TOOLS:
130
+ #{available_tools}
131
+
132
+ PREVIOUS STEPS:
133
+ #{memory_context}
134
+
135
+ Based on the above context, determine the next action:
136
+ 1. If the task is complete, respond with: {"status": "complete"}
137
+ 2. If more work is needed, respond with: {"status": "in_progress", "action": "[tool_name]", "input": {[tool parameters]}}
138
+ PROMPT
139
+ end
140
+
141
+ def make_ollama_request(prompt)
142
+ require 'net/http'
143
+ require 'json'
144
+
145
+ uri = URI("#{@ollama_base_url}/api/generate")
146
+ http = Net::HTTP.new(uri.host, uri.port)
147
+
148
+ request = Net::HTTP::Post.new(uri)
149
+ request['Content-Type'] = 'application/json'
150
+ request.body = {
151
+ model: @model,
152
+ prompt: prompt,
153
+ stream: false
154
+ }.to_json
155
+
156
+ response = http.request(request)
157
+ JSON.parse(response.body)
158
+ rescue => e
159
+ { error: "Ollama request failed: #{e.message}" }
160
+ end
161
+
162
+ def parse_llm_response(response)
163
+ return { status: 'error', message: response[:error] } if response[:error]
164
+
165
+ begin
166
+ # Extract the JSON response from the LLM output
167
+ json_str = response['response'].match(/\{.*\}/m)&.[](0)
168
+ return { status: 'error', message: 'No valid JSON found in response' } unless json_str
169
+
170
+ JSON.parse(json_str, symbolize_names: true)
171
+ rescue JSON::ParserError => e
172
+ { status: 'error', message: "Failed to parse LLM response: #{e.message}" }
173
+ end
174
+ end
175
+
176
+ def execute_action(action_plan)
177
+ return unless action_plan[:action]
178
+
179
+ begin
180
+ @toolkit.execute_tool(action_plan[:action], **action_plan[:input])
181
+ rescue => e
182
+ { error: e.message }
183
+ end
184
+ end
185
+ end
186
+ end
187
+ end
@@ -0,0 +1,245 @@
1
+ require_relative 'base_executor'
2
+ require_relative 'llm/memory'
3
+ require_relative 'llm/tools'
4
+ require_relative 'dsl'
5
+
6
+ module CircuitBreaker
7
+ module Executors
8
+ class AssistantExecutor < BaseExecutor
9
+ include DSL
10
+
11
+ def initialize(context = {})
12
+ super
13
+ @memory = LLM::ConversationMemory.new(system_prompt: @context[:system_prompt])
14
+ @toolkit = LLM::ToolKit.new
15
+ end
16
+
17
+ executor_config do
18
+ parameter :model, type: :string, default: 'gpt-4', description: 'LLM model to use'
19
+ parameter :model_provider, type: :string, description: 'Model provider (ollama/openai)'
20
+ parameter :ollama_base_url, type: :string, default: 'http://localhost:11434', description: 'Ollama server URL'
21
+ parameter :system_prompt, type: :string, description: 'System prompt for the assistant'
22
+ parameter :tools, type: :array, default: [], description: 'List of tools available to the assistant'
23
+ parameter :parameters, type: :hash, default: {}, description: 'Additional parameters'
24
+ parameter :input, type: :string, description: 'Input message for the assistant'
25
+
26
+ validate do |context|
27
+ if context[:model_provider].nil?
28
+ context[:model_provider] = if context[:model].to_s.start_with?('llama', 'codellama', 'mistral', 'dolphin', 'qwen')
29
+ 'ollama'
30
+ else
31
+ 'openai'
32
+ end
33
+ end
34
+ end
35
+
36
+ before_execute do |context|
37
+ @memory.system_prompt = context[:system_prompt] if context[:system_prompt]
38
+ add_tools(context[:tools]) if context[:tools]
39
+ end
40
+ end
41
+
42
+ class << self
43
+ def define(&block)
44
+ new.tap do |executor|
45
+ executor.instance_eval(&block) if block_given?
46
+ executor.validate_parameters
47
+ end
48
+ end
49
+ end
50
+
51
+ def use_model(model_name)
52
+ @context[:model] = model_name
53
+ @context[:model_provider] = if model_name.to_s.start_with?('llama', 'codellama', 'mistral', 'dolphin', 'qwen')
54
+ 'ollama'
55
+ else
56
+ 'openai'
57
+ end
58
+ self
59
+ end
60
+
61
+ def with_system_prompt(prompt)
62
+ @context[:system_prompt] = prompt
63
+ @memory = LLM::ConversationMemory.new(system_prompt: prompt)
64
+ self
65
+ end
66
+
67
+ def with_parameters(params)
68
+ @context[:parameters] = (@context[:parameters] || {}).merge(params)
69
+ self
70
+ end
71
+
72
+ def add_tool(tool)
73
+ @toolkit.add_tool(tool)
74
+ self
75
+ end
76
+
77
+ def add_tools(tools)
78
+ tools.each { |tool| add_tool(tool) }
79
+ self
80
+ end
81
+
82
+ def update_context(new_context)
83
+ @context.merge!(new_context)
84
+ validate_parameters
85
+ self
86
+ end
87
+
88
+ def execute
89
+ input = @context[:input]
90
+ return unless input
91
+
92
+ @memory.add_user_message(input)
93
+ conversation_context = prepare_context
94
+ response = make_llm_call(conversation_context)
95
+ processed_response = process_response(response)
96
+ @memory.add_assistant_message(processed_response[:content])
97
+
98
+ @result = {
99
+ input: input,
100
+ output: processed_response,
101
+ conversation_history: @memory.to_h,
102
+ status: 'completed'
103
+ }
104
+ end
105
+
106
+ private
107
+
108
+ def prepare_context
109
+ {
110
+ messages: @memory.messages,
111
+ tools: @toolkit.tool_descriptions,
112
+ parameters: @context[:parameters]
113
+ }
114
+ end
115
+
116
+ def make_llm_call(context)
117
+ case @context[:model_provider]
118
+ when 'ollama'
119
+ make_ollama_request(context)
120
+ when 'openai'
121
+ make_openai_request(context)
122
+ end
123
+ end
124
+
125
+ def make_ollama_request(context, retries = 3)
126
+ require 'net/http'
127
+ require 'json'
128
+
129
+ messages = format_messages_for_ollama(context[:messages])
130
+ prompt = generate_ollama_prompt(messages, context[:tools])
131
+
132
+ uri = URI("#{@context[:ollama_base_url]}/api/generate")
133
+ http = Net::HTTP.new(uri.host, uri.port)
134
+ http.read_timeout = 120 # Increase timeout to 120 seconds
135
+
136
+ request = Net::HTTP::Post.new(uri)
137
+ request['Content-Type'] = 'application/json'
138
+ request.body = {
139
+ model: @context[:model],
140
+ prompt: prompt,
141
+ stream: false,
142
+ options: @context[:parameters]
143
+ }.to_json
144
+
145
+ begin
146
+ response = http.request(request)
147
+
148
+ if response.code == '200'
149
+ result = JSON.parse(response.body)
150
+ full_response = ""
151
+
152
+ if result.is_a?(Array)
153
+ result.each { |chunk| full_response += chunk['response'].to_s }
154
+ else
155
+ full_response = result['response']
156
+ end
157
+
158
+ {
159
+ content: full_response,
160
+ tool_calls: extract_tool_calls(full_response)
161
+ }
162
+ else
163
+ raise "HTTP Error: #{response.message}"
164
+ end
165
+ rescue => e
166
+ if retries > 0
167
+ puts "Retrying Ollama request (#{retries} attempts left)..."
168
+ sleep(2) # Wait 2 seconds before retrying
169
+ make_ollama_request(context, retries - 1)
170
+ else
171
+ {
172
+ content: "Error: #{e.message}. Please try again later.",
173
+ tool_calls: []
174
+ }
175
+ end
176
+ end
177
+ end
178
+
179
+ def make_openai_request(context)
180
+ # Implement OpenAI API call here
181
+ {
182
+ content: "OpenAI integration not implemented",
183
+ tool_calls: []
184
+ }
185
+ end
186
+
187
+ def format_messages_for_ollama(messages)
188
+ messages.map do |msg|
189
+ {
190
+ role: msg[:role],
191
+ content: msg[:content]
192
+ }
193
+ end
194
+ end
195
+
196
+ def generate_ollama_prompt(messages, tools)
197
+ system_msg = messages.find { |m| m[:role] == 'system' }
198
+ user_msgs = messages.select { |m| m[:role] != 'system' }
199
+
200
+ prompt = []
201
+ prompt << "System: #{system_msg[:content]}" if system_msg
202
+ prompt << "\nAvailable Tools:\n#{format_tools_for_ollama(tools)}" unless tools.empty?
203
+
204
+ user_msgs.each do |msg|
205
+ prompt << "\n#{msg[:role].capitalize}: #{msg[:content]}"
206
+ end
207
+
208
+ prompt << "\nAssistant: "
209
+ prompt.join("\n")
210
+ end
211
+
212
+ def format_tools_for_ollama(tools)
213
+ return "" if tools.nil? || tools.empty?
214
+ tools.map do |tool|
215
+ "#{tool[:name]}: #{tool[:description]}\nParameters: #{tool[:parameters].to_json}"
216
+ end.join("\n\n")
217
+ end
218
+
219
+ def extract_tool_calls(content)
220
+ tool_calls = content.scan(/@(\w+)\((.*?)\)/)
221
+ tool_calls.map do |name, args_str|
222
+ begin
223
+ {
224
+ name: name,
225
+ arguments: JSON.parse(args_str)
226
+ }
227
+ rescue JSON::ParserError
228
+ nil
229
+ end
230
+ end.compact
231
+ end
232
+
233
+ def process_response(response)
234
+ return response unless response[:tool_calls]&.any?
235
+
236
+ tool_results = response[:tool_calls].map do |tool_call|
237
+ result = @toolkit.execute_tool(tool_call[:name], **tool_call[:arguments])
238
+ { tool: tool_call[:name], result: result }
239
+ end
240
+
241
+ response.merge(tool_results: tool_results)
242
+ end
243
+ end
244
+ end
245
+ end
@@ -0,0 +1,56 @@
1
+ require_relative 'dsl'
2
+
3
+ module CircuitBreaker
4
+ module Executors
5
+ class BaseExecutor
6
+ include DSL
7
+
8
+ attr_reader :context, :result
9
+
10
+ def initialize(context = {})
11
+ @context = context
12
+ @result = nil
13
+ validate_parameters
14
+ end
15
+
16
+ def execute
17
+ run_before_hooks
18
+ execute_internal
19
+ run_after_hooks
20
+ @result
21
+ end
22
+
23
+ protected
24
+
25
+ def execute_internal
26
+ raise NotImplementedError, "#{self.class} must implement #execute_internal"
27
+ end
28
+
29
+ private
30
+
31
+ def run_before_hooks
32
+ self.class.get_config[:before_execute].each do |hook|
33
+ instance_exec(@context, &hook)
34
+ end
35
+ end
36
+
37
+ def run_after_hooks
38
+ self.class.get_config[:after_execute].each do |hook|
39
+ instance_exec(@result, &hook)
40
+ end
41
+ end
42
+
43
+ def to_h
44
+ {
45
+ executor: self.class.name,
46
+ context: @context,
47
+ result: @result
48
+ }
49
+ end
50
+
51
+ def to_json(*args)
52
+ to_h.to_json(*args)
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,56 @@
1
+ require_relative 'base_executor'
2
+
3
+ module CircuitBreaker
4
+ module Executors
5
+ class DockerExecutor < BaseExecutor
6
+ executor_config do
7
+ parameter :image,
8
+ type: :string,
9
+ required: true,
10
+ description: 'Docker image to run'
11
+
12
+ parameter :command,
13
+ type: :string,
14
+ description: 'Command to run in the container'
15
+
16
+ parameter :environment,
17
+ type: :hash,
18
+ default: {},
19
+ description: 'Environment variables to set in the container'
20
+
21
+ parameter :volumes,
22
+ type: :array,
23
+ default: [],
24
+ description: 'Volumes to mount in the container'
25
+
26
+ validate do |context|
27
+ if context[:command] && !context[:command].is_a?(String)
28
+ raise ArgumentError, 'Command must be a string'
29
+ end
30
+ end
31
+
32
+ before_execute do |context|
33
+ puts "Preparing to run Docker container with image: #{context[:image]}"
34
+ end
35
+
36
+ after_execute do |result|
37
+ puts "Docker container execution completed with status: #{result[:status]}"
38
+ end
39
+ end
40
+
41
+ protected
42
+
43
+ def execute_internal
44
+ # Implementation for Docker execution would go here
45
+ # This would typically involve running a Docker container
46
+ @result = {
47
+ image: @context[:image],
48
+ command: @context[:command],
49
+ environment: @context[:environment],
50
+ volumes: @context[:volumes],
51
+ status: 'completed'
52
+ }
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,97 @@
1
+ module CircuitBreaker
2
+ module Executors
3
+ module DSL
4
+ def self.included(base)
5
+ base.extend(ClassMethods)
6
+ end
7
+
8
+ module ClassMethods
9
+ def executor_config(&block)
10
+ @config_builder ||= ConfigBuilder.new
11
+ @config_builder.instance_eval(&block) if block_given?
12
+ @config_builder
13
+ end
14
+
15
+ def get_config
16
+ @config_builder&.to_h || {}
17
+ end
18
+ end
19
+
20
+ class ConfigBuilder
21
+ def initialize
22
+ @config = {
23
+ parameters: {},
24
+ validations: [],
25
+ before_execute: [],
26
+ after_execute: []
27
+ }
28
+ end
29
+
30
+ def parameter(name, type: nil, required: false, default: nil, description: nil)
31
+ @config[:parameters][name] = {
32
+ type: type,
33
+ required: required,
34
+ default: default,
35
+ description: description
36
+ }
37
+ end
38
+
39
+ def validate(&block)
40
+ @config[:validations] << block
41
+ end
42
+
43
+ def before_execute(&block)
44
+ @config[:before_execute] << block
45
+ end
46
+
47
+ def after_execute(&block)
48
+ @config[:after_execute] << block
49
+ end
50
+
51
+ def to_h
52
+ @config
53
+ end
54
+ end
55
+
56
+ def validate_parameters
57
+ config = self.class.get_config
58
+ parameters = config[:parameters]
59
+
60
+ parameters.each do |name, opts|
61
+ if opts[:required] && !@context.key?(name)
62
+ raise ArgumentError, "Missing required parameter: #{name}"
63
+ end
64
+
65
+ if @context.key?(name)
66
+ validate_parameter_type(name, @context[name], opts[:type]) if opts[:type]
67
+ elsif opts[:default]
68
+ @context[name] = opts[:default]
69
+ end
70
+ end
71
+
72
+ config[:validations].each do |validation|
73
+ instance_exec(@context, &validation)
74
+ end
75
+ end
76
+
77
+ private
78
+
79
+ def validate_parameter_type(name, value, expected_type)
80
+ case expected_type
81
+ when :string
82
+ raise TypeError, "#{name} must be a String" unless value.is_a?(String)
83
+ when :integer
84
+ raise TypeError, "#{name} must be an Integer" unless value.is_a?(Integer)
85
+ when :array
86
+ raise TypeError, "#{name} must be an Array" unless value.is_a?(Array)
87
+ when :hash
88
+ raise TypeError, "#{name} must be a Hash" unless value.is_a?(Hash)
89
+ when :boolean
90
+ unless [true, false].include?(value)
91
+ raise TypeError, "#{name} must be a Boolean"
92
+ end
93
+ end
94
+ end
95
+ end
96
+ end
97
+ end