language-operator 0.1.31 → 0.1.35
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +7 -8
- data/CHANGELOG.md +14 -0
- data/CI_STATUS.md +56 -0
- data/Gemfile.lock +2 -2
- data/Makefile +22 -6
- data/lib/language_operator/agent/base.rb +10 -6
- data/lib/language_operator/agent/executor.rb +19 -97
- data/lib/language_operator/agent/safety/ast_validator.rb +62 -43
- data/lib/language_operator/agent/safety/safe_executor.rb +27 -2
- data/lib/language_operator/agent/scheduler.rb +60 -0
- data/lib/language_operator/agent/task_executor.rb +548 -0
- data/lib/language_operator/agent.rb +90 -27
- data/lib/language_operator/cli/base_command.rb +117 -0
- data/lib/language_operator/cli/commands/agent.rb +339 -407
- data/lib/language_operator/cli/commands/cluster.rb +274 -290
- data/lib/language_operator/cli/commands/install.rb +110 -119
- data/lib/language_operator/cli/commands/model.rb +284 -184
- data/lib/language_operator/cli/commands/persona.rb +218 -284
- data/lib/language_operator/cli/commands/quickstart.rb +4 -5
- data/lib/language_operator/cli/commands/status.rb +31 -35
- data/lib/language_operator/cli/commands/system.rb +221 -233
- data/lib/language_operator/cli/commands/tool.rb +356 -422
- data/lib/language_operator/cli/commands/use.rb +19 -22
- data/lib/language_operator/cli/helpers/resource_dependency_checker.rb +0 -18
- data/lib/language_operator/cli/wizards/quickstart_wizard.rb +0 -1
- data/lib/language_operator/client/config.rb +20 -21
- data/lib/language_operator/config.rb +115 -3
- data/lib/language_operator/constants.rb +54 -0
- data/lib/language_operator/dsl/agent_context.rb +7 -7
- data/lib/language_operator/dsl/agent_definition.rb +111 -26
- data/lib/language_operator/dsl/config.rb +30 -66
- data/lib/language_operator/dsl/main_definition.rb +114 -0
- data/lib/language_operator/dsl/schema.rb +84 -43
- data/lib/language_operator/dsl/task_definition.rb +315 -0
- data/lib/language_operator/dsl.rb +0 -1
- data/lib/language_operator/instrumentation/task_tracer.rb +285 -0
- data/lib/language_operator/logger.rb +4 -4
- data/lib/language_operator/synthesis_test_harness.rb +324 -0
- data/lib/language_operator/templates/examples/agent_synthesis.tmpl +26 -8
- data/lib/language_operator/templates/schema/CHANGELOG.md +26 -0
- data/lib/language_operator/templates/schema/agent_dsl_openapi.yaml +1 -1
- data/lib/language_operator/templates/schema/agent_dsl_schema.json +84 -42
- data/lib/language_operator/type_coercion.rb +250 -0
- data/lib/language_operator/ux/base.rb +81 -0
- data/lib/language_operator/ux/concerns/README.md +155 -0
- data/lib/language_operator/ux/concerns/headings.rb +90 -0
- data/lib/language_operator/ux/concerns/input_validation.rb +146 -0
- data/lib/language_operator/ux/concerns/provider_helpers.rb +167 -0
- data/lib/language_operator/ux/create_agent.rb +252 -0
- data/lib/language_operator/ux/create_model.rb +267 -0
- data/lib/language_operator/ux/quickstart.rb +594 -0
- data/lib/language_operator/version.rb +1 -1
- data/lib/language_operator.rb +2 -0
- data/requirements/ARCHITECTURE.md +1 -0
- data/requirements/SCRATCH.md +153 -0
- data/requirements/dsl.md +0 -0
- data/requirements/features +1 -0
- data/requirements/personas +1 -0
- data/requirements/proposals +1 -0
- data/requirements/tasks/iterate.md +14 -15
- data/requirements/tasks/optimize.md +13 -4
- data/synth/001/Makefile +90 -0
- data/synth/001/agent.rb +26 -0
- data/synth/001/agent.yaml +7 -0
- data/synth/001/output.log +44 -0
- data/synth/Makefile +39 -0
- data/synth/README.md +342 -0
- metadata +37 -10
- data/lib/language_operator/dsl/workflow_definition.rb +0 -259
- data/test_agent_dsl.rb +0 -108
|
@@ -34,10 +34,32 @@ module LanguageOperator
|
|
|
34
34
|
# Step 2: Execute in sandboxed context
|
|
35
35
|
sandbox = SandboxProxy.new(@context, self)
|
|
36
36
|
|
|
37
|
-
# Step 3:
|
|
37
|
+
# Step 3: Prepend safe constant definitions to the code
|
|
38
|
+
# This makes Ruby type constants available in the evaluated scope
|
|
39
|
+
safe_constants_code = <<~RUBY
|
|
40
|
+
Numeric = ::Numeric
|
|
41
|
+
Integer = ::Integer
|
|
42
|
+
Float = ::Float
|
|
43
|
+
String = ::String
|
|
44
|
+
Array = ::Array
|
|
45
|
+
Hash = ::Hash
|
|
46
|
+
TrueClass = ::TrueClass
|
|
47
|
+
FalseClass = ::FalseClass
|
|
48
|
+
Time = ::Time
|
|
49
|
+
Date = ::Date
|
|
50
|
+
RUBY
|
|
51
|
+
|
|
52
|
+
# Step 4: Execute using instance_eval with safe constants prepended
|
|
38
53
|
# Note: We still use instance_eval but with validated code
|
|
39
54
|
# and wrapped context
|
|
40
|
-
|
|
55
|
+
#
|
|
56
|
+
# The string interpolation below evaluates to:
|
|
57
|
+
# sandbox.instance_eval("Numeric = ::Numeric\nInteger = ::Integer\nFloat = ::Float\n
|
|
58
|
+
# String = ::String\nArray = ::Array\nHash = ::Hash\nTrueClass = ::TrueClass\n
|
|
59
|
+
# FalseClass = ::FalseClass\nTime = ::Time\nDate = ::Date\n<user code>", __FILE__, __LINE__)
|
|
60
|
+
# rubocop:disable Style/DocumentDynamicEvalDefinition
|
|
61
|
+
sandbox.instance_eval("#{safe_constants_code}\n#{code}", __FILE__, __LINE__)
|
|
62
|
+
# rubocop:enable Style/DocumentDynamicEvalDefinition
|
|
41
63
|
rescue ASTValidator::SecurityError => e
|
|
42
64
|
# Re-raise validation errors as executor errors for clarity
|
|
43
65
|
raise SecurityError, "Code validation failed: #{e.message}"
|
|
@@ -104,6 +126,9 @@ module LanguageOperator
|
|
|
104
126
|
return ::LanguageOperator::Dsl::Shell
|
|
105
127
|
end
|
|
106
128
|
|
|
129
|
+
# Ruby type constants are now injected at eval time (see SafeExecutor#eval)
|
|
130
|
+
# but keep this as fallback for dynamic constant access
|
|
131
|
+
|
|
107
132
|
# Otherwise delegate to the context's module
|
|
108
133
|
@__context__.class.const_get(name)
|
|
109
134
|
rescue ::NameError
|
|
@@ -84,6 +84,51 @@ module LanguageOperator
|
|
|
84
84
|
@rufus_scheduler.join
|
|
85
85
|
end
|
|
86
86
|
|
|
87
|
+
# Start the scheduler with a main block (DSL v1)
|
|
88
|
+
#
|
|
89
|
+
# @param agent_def [LanguageOperator::Dsl::AgentDefinition] The agent definition with main block
|
|
90
|
+
# @return [void]
|
|
91
|
+
def start_with_main(agent_def)
|
|
92
|
+
logger.info('Agent starting in scheduled mode with main block',
|
|
93
|
+
agent_name: agent_def.name,
|
|
94
|
+
task_count: agent_def.tasks.size)
|
|
95
|
+
logger.info("Workspace: #{@agent.workspace_path}")
|
|
96
|
+
logger.info("Connected to #{@agent.servers_info.length} MCP server(s)")
|
|
97
|
+
|
|
98
|
+
# Extract schedule from agent definition or use default
|
|
99
|
+
cron_schedule = agent_def.schedule&.cron || '0 6 * * *'
|
|
100
|
+
|
|
101
|
+
logger.info('Scheduling main block execution', cron: cron_schedule, agent: agent_def.name)
|
|
102
|
+
|
|
103
|
+
# Create task executor with constraints config
|
|
104
|
+
require_relative 'task_executor'
|
|
105
|
+
config = build_executor_config(agent_def)
|
|
106
|
+
task_executor = TaskExecutor.new(@agent, agent_def.tasks, config)
|
|
107
|
+
|
|
108
|
+
@rufus_scheduler.cron(cron_schedule) do
|
|
109
|
+
with_span('agent.scheduler.execute', attributes: {
|
|
110
|
+
'scheduler.cron_expression' => cron_schedule,
|
|
111
|
+
'agent.name' => agent_def.name,
|
|
112
|
+
'scheduler.task_type' => 'main_block'
|
|
113
|
+
}) do
|
|
114
|
+
logger.timed('Scheduled main block execution') do
|
|
115
|
+
logger.info('Executing scheduled main block', agent: agent_def.name)
|
|
116
|
+
|
|
117
|
+
# Get inputs from environment or default to empty hash
|
|
118
|
+
inputs = {}
|
|
119
|
+
|
|
120
|
+
# Execute main block
|
|
121
|
+
result = agent_def.main.call(inputs, task_executor)
|
|
122
|
+
|
|
123
|
+
logger.info('Main block completed', result: result)
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
logger.info('Scheduler started, waiting for scheduled tasks')
|
|
129
|
+
@rufus_scheduler.join
|
|
130
|
+
end
|
|
131
|
+
|
|
87
132
|
# Stop the scheduler
|
|
88
133
|
#
|
|
89
134
|
# @return [void]
|
|
@@ -178,6 +223,21 @@ module LanguageOperator
|
|
|
178
223
|
|
|
179
224
|
logger.info('Scheduled: Daily at 6:00 AM')
|
|
180
225
|
end
|
|
226
|
+
|
|
227
|
+
# Build executor configuration from agent definition constraints
|
|
228
|
+
#
|
|
229
|
+
# @param agent_def [LanguageOperator::Dsl::AgentDefinition] The agent definition
|
|
230
|
+
# @return [Hash] Executor configuration
|
|
231
|
+
def build_executor_config(agent_def)
|
|
232
|
+
config = {}
|
|
233
|
+
|
|
234
|
+
if agent_def.constraints
|
|
235
|
+
config[:timeout] = agent_def.constraints[:timeout] if agent_def.constraints[:timeout]
|
|
236
|
+
config[:max_retries] = agent_def.constraints[:max_retries] if agent_def.constraints[:max_retries]
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
config
|
|
240
|
+
end
|
|
181
241
|
end
|
|
182
242
|
end
|
|
183
243
|
end
|
|
@@ -0,0 +1,548 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'timeout'
|
|
4
|
+
require 'socket'
|
|
5
|
+
require_relative '../loggable'
|
|
6
|
+
require_relative 'instrumentation'
|
|
7
|
+
require_relative '../instrumentation/task_tracer'
|
|
8
|
+
|
|
9
|
+
module LanguageOperator
|
|
10
|
+
module Agent
|
|
11
|
+
# Custom error classes for task execution
|
|
12
|
+
class TaskExecutionError < StandardError
|
|
13
|
+
attr_reader :task_name, :original_error
|
|
14
|
+
|
|
15
|
+
def initialize(task_name, message, original_error = nil)
|
|
16
|
+
@task_name = task_name
|
|
17
|
+
@original_error = original_error
|
|
18
|
+
super("Task '#{task_name}' execution failed: #{message}")
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
class TaskValidationError < TaskExecutionError
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
class TaskTimeoutError < TaskExecutionError
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Task Executor for DSL v1 organic functions
|
|
29
|
+
#
|
|
30
|
+
# Executes both neural (LLM-based) and symbolic (code-based) tasks.
|
|
31
|
+
# Provides the `execute_task` method that MainDefinition blocks use
|
|
32
|
+
# to invoke tasks transparently regardless of implementation type.
|
|
33
|
+
#
|
|
34
|
+
# @example Executing a task
|
|
35
|
+
# executor = TaskExecutor.new(agent, tasks_registry)
|
|
36
|
+
# result = executor.execute_task(:fetch_data, inputs: { user_id: 123 })
|
|
37
|
+
#
|
|
38
|
+
# @example In a main block
|
|
39
|
+
# main do |inputs|
|
|
40
|
+
# data = execute_task(:fetch_data, inputs: inputs)
|
|
41
|
+
# execute_task(:process_data, inputs: data)
|
|
42
|
+
# end
|
|
43
|
+
class TaskExecutor
|
|
44
|
+
include LanguageOperator::Loggable
|
|
45
|
+
include Instrumentation
|
|
46
|
+
include LanguageOperator::Instrumentation::TaskTracer
|
|
47
|
+
|
|
48
|
+
# Error types that should be retried
|
|
49
|
+
RETRYABLE_ERRORS = [
|
|
50
|
+
Timeout::Error,
|
|
51
|
+
Errno::ECONNREFUSED,
|
|
52
|
+
Errno::ECONNRESET,
|
|
53
|
+
Errno::ETIMEDOUT,
|
|
54
|
+
SocketError
|
|
55
|
+
].freeze
|
|
56
|
+
|
|
57
|
+
# Error categories for logging and operator integration
|
|
58
|
+
ERROR_CATEGORIES = {
|
|
59
|
+
validation: 'VALIDATION',
|
|
60
|
+
execution: 'EXECUTION',
|
|
61
|
+
timeout: 'TIMEOUT',
|
|
62
|
+
network: 'NETWORK',
|
|
63
|
+
system: 'SYSTEM'
|
|
64
|
+
}.freeze
|
|
65
|
+
|
|
66
|
+
attr_reader :agent, :tasks, :config
|
|
67
|
+
|
|
68
|
+
# Initialize the task executor
|
|
69
|
+
#
|
|
70
|
+
# @param agent [LanguageOperator::Agent::Base] The agent instance (provides LLM client, tools)
|
|
71
|
+
# @param tasks [Hash<Symbol, TaskDefinition>] Registry of task definitions
|
|
72
|
+
# @param config [Hash] Execution configuration
|
|
73
|
+
def initialize(agent, tasks = {}, config = {})
|
|
74
|
+
@agent = agent
|
|
75
|
+
@tasks = tasks
|
|
76
|
+
@config = default_config.merge(config)
|
|
77
|
+
logger.debug('TaskExecutor initialized',
|
|
78
|
+
task_count: @tasks.size,
|
|
79
|
+
timeout: @config[:timeout],
|
|
80
|
+
max_retries: @config[:max_retries])
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
# Execute a task by name with given inputs
|
|
84
|
+
#
|
|
85
|
+
# This is the main entry point called from MainDefinition blocks.
|
|
86
|
+
# Routes to neural or symbolic execution based on task implementation.
|
|
87
|
+
# Includes timeout, retry logic, and comprehensive error handling.
|
|
88
|
+
#
|
|
89
|
+
# @param task_name [Symbol] Name of the task to execute
|
|
90
|
+
# @param inputs [Hash] Input parameters for the task
|
|
91
|
+
# @param timeout [Numeric] Override timeout for this task (seconds)
|
|
92
|
+
# @param max_retries [Integer] Override max retries for this task
|
|
93
|
+
# @return [Hash] Validated output from the task
|
|
94
|
+
# @raise [ArgumentError] If task not found or inputs invalid
|
|
95
|
+
# @raise [TaskExecutionError] If task execution fails after retries
|
|
96
|
+
def execute_task(task_name, inputs: {}, timeout: nil, max_retries: nil)
|
|
97
|
+
execution_start = Time.now
|
|
98
|
+
timeout ||= @config[:timeout]
|
|
99
|
+
max_retries ||= @config[:max_retries]
|
|
100
|
+
|
|
101
|
+
with_span('task_executor.execute_task', attributes: {
|
|
102
|
+
'task.name' => task_name.to_s,
|
|
103
|
+
'task.inputs' => inputs.keys.map(&:to_s).join(','),
|
|
104
|
+
'task.timeout' => timeout,
|
|
105
|
+
'task.max_retries' => max_retries
|
|
106
|
+
}) do
|
|
107
|
+
# Find task definition
|
|
108
|
+
task = @tasks[task_name.to_sym]
|
|
109
|
+
raise ArgumentError, "Task not found: #{task_name}. Available tasks: #{@tasks.keys.join(', ')}" unless task
|
|
110
|
+
|
|
111
|
+
task_type = determine_task_type(task)
|
|
112
|
+
logger.info('Executing task',
|
|
113
|
+
task: task_name,
|
|
114
|
+
type: task_type,
|
|
115
|
+
timeout: timeout,
|
|
116
|
+
max_retries: max_retries)
|
|
117
|
+
|
|
118
|
+
# Execute with retry logic
|
|
119
|
+
execute_with_retry(task, task_name, inputs, timeout, max_retries, execution_start)
|
|
120
|
+
end
|
|
121
|
+
rescue ArgumentError => e
|
|
122
|
+
# Validation errors should not be retried - re-raise immediately
|
|
123
|
+
log_task_error(task_name, e, :validation, execution_start)
|
|
124
|
+
raise TaskValidationError.new(task_name, e.message, e)
|
|
125
|
+
rescue StandardError => e
|
|
126
|
+
# Catch any unexpected errors that escaped retry logic
|
|
127
|
+
log_task_error(task_name, e, :system, execution_start)
|
|
128
|
+
raise create_appropriate_error(task_name, e)
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
# Execute a neural task (instructions-based, LLM-driven)
|
|
132
|
+
#
|
|
133
|
+
# @param task [TaskDefinition] The task definition
|
|
134
|
+
# @param inputs [Hash] Input parameters
|
|
135
|
+
# @return [Hash] Validated outputs
|
|
136
|
+
# @raise [StandardError] If LLM execution fails or output validation fails
|
|
137
|
+
def execute_neural(task, inputs)
|
|
138
|
+
# Validate inputs first
|
|
139
|
+
validated_inputs = task.validate_inputs(inputs)
|
|
140
|
+
|
|
141
|
+
logger.debug('Executing neural task',
|
|
142
|
+
task: task.name,
|
|
143
|
+
instructions: task.instructions_text,
|
|
144
|
+
inputs: validated_inputs)
|
|
145
|
+
|
|
146
|
+
# Build prompt for LLM
|
|
147
|
+
prompt = build_neural_prompt(task, validated_inputs)
|
|
148
|
+
|
|
149
|
+
# Execute LLM call within traced span
|
|
150
|
+
outputs = tracer.in_span('gen_ai.chat', attributes: neural_task_attributes(task, prompt, validated_inputs)) do |span|
|
|
151
|
+
# Call LLM with full tool access
|
|
152
|
+
response = @agent.send_message(prompt)
|
|
153
|
+
response_text = response.is_a?(String) ? response : response.content
|
|
154
|
+
|
|
155
|
+
logger.debug('Neural task response received',
|
|
156
|
+
task: task.name,
|
|
157
|
+
response_length: response_text.length)
|
|
158
|
+
|
|
159
|
+
# Record token usage and response metadata
|
|
160
|
+
record_token_usage(response, span)
|
|
161
|
+
|
|
162
|
+
# Record tool calls if available
|
|
163
|
+
record_tool_calls(response, span)
|
|
164
|
+
|
|
165
|
+
# Parse response within child span
|
|
166
|
+
parsed_outputs = tracer.in_span('task_executor.parse_response') do |parse_span|
|
|
167
|
+
record_parse_metadata(response_text, parse_span)
|
|
168
|
+
parse_neural_response(response_text, task)
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
# Record output metadata
|
|
172
|
+
record_output_metadata(parsed_outputs, span)
|
|
173
|
+
|
|
174
|
+
parsed_outputs
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
# Validate outputs against schema
|
|
178
|
+
task.validate_outputs(outputs)
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# Helper method for symbolic tasks to execute tools
|
|
182
|
+
#
|
|
183
|
+
# This is a simplified interface - symbolic tasks should primarily use
|
|
184
|
+
# execute_llm to leverage tools through the LLM interface, or call tools
|
|
185
|
+
# directly through the MCP client if needed.
|
|
186
|
+
#
|
|
187
|
+
# @param tool_name [String] Name of the tool
|
|
188
|
+
# @param action [String] Tool action/method
|
|
189
|
+
# @param params [Hash] Tool parameters
|
|
190
|
+
# @return [Object] Tool response
|
|
191
|
+
# @note For DSL v1, tools are accessed via LLM tool calling, not direct invocation
|
|
192
|
+
def execute_tool(tool_name, action, params = {})
|
|
193
|
+
# Build prompt to use the tool via LLM
|
|
194
|
+
prompt = "Use the #{tool_name} tool to perform #{action} with parameters: #{params.inspect}"
|
|
195
|
+
execute_llm(prompt)
|
|
196
|
+
# Parse response - for now just return the text
|
|
197
|
+
# TODO: More sophisticated tool result extraction
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
# Helper method for symbolic tasks to call LLM directly
|
|
201
|
+
#
|
|
202
|
+
# @param prompt [String] Prompt to send to LLM
|
|
203
|
+
# @return [String] LLM response
|
|
204
|
+
def execute_llm(prompt)
|
|
205
|
+
response = @agent.send_message(prompt)
|
|
206
|
+
response.is_a?(String) ? response : response.content
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
# Execute multiple tasks in parallel
|
|
210
|
+
#
|
|
211
|
+
# Provides explicit parallelism for task execution. Users specify which tasks
|
|
212
|
+
# should run in parallel, and this method handles the concurrent execution.
|
|
213
|
+
#
|
|
214
|
+
# @param tasks [Array<Hash>] Array of task specifications
|
|
215
|
+
# @param in_threads [Integer] Number of threads to use (default: 4)
|
|
216
|
+
# @return [Array] Results from all tasks in the same order as input
|
|
217
|
+
# @raise [RuntimeError] If any task fails
|
|
218
|
+
#
|
|
219
|
+
# @example Execute multiple independent tasks
|
|
220
|
+
# results = execute_parallel([
|
|
221
|
+
# { name: :fetch_source1 },
|
|
222
|
+
# { name: :fetch_source2 }
|
|
223
|
+
# ])
|
|
224
|
+
# # => [result1, result2]
|
|
225
|
+
#
|
|
226
|
+
# @example With inputs
|
|
227
|
+
# results = execute_parallel([
|
|
228
|
+
# { name: :process, inputs: { data: data1 } },
|
|
229
|
+
# { name: :analyze, inputs: { data: data2 } }
|
|
230
|
+
# ])
|
|
231
|
+
#
|
|
232
|
+
def execute_parallel(tasks, in_threads: 4)
|
|
233
|
+
require 'parallel'
|
|
234
|
+
|
|
235
|
+
logger.info('Executing tasks in parallel', count: tasks.size, threads: in_threads)
|
|
236
|
+
|
|
237
|
+
results = Parallel.map(tasks, in_threads: in_threads) do |task_spec|
|
|
238
|
+
task_name = task_spec[:name]
|
|
239
|
+
task_inputs = task_spec[:inputs] || {}
|
|
240
|
+
|
|
241
|
+
execute_task(task_name, inputs: task_inputs)
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
logger.info('Parallel execution complete', results_count: results.size)
|
|
245
|
+
results
|
|
246
|
+
rescue Parallel::DeadWorker => e
|
|
247
|
+
logger.error('Parallel execution failed - worker died', error: e.message)
|
|
248
|
+
raise "Parallel task execution failed: #{e.message}"
|
|
249
|
+
rescue StandardError => e
|
|
250
|
+
logger.error('Parallel execution failed', error: e.class.name, message: e.message)
|
|
251
|
+
raise
|
|
252
|
+
end
|
|
253
|
+
|
|
254
|
+
private
|
|
255
|
+
|
|
256
|
+
def logger_component
|
|
257
|
+
'Agent::TaskExecutor'
|
|
258
|
+
end
|
|
259
|
+
|
|
260
|
+
# Build prompt for neural task execution
|
|
261
|
+
#
|
|
262
|
+
# @param task [TaskDefinition] The task definition
|
|
263
|
+
# @param inputs [Hash] Validated input parameters
|
|
264
|
+
# @return [String] Prompt for LLM
|
|
265
|
+
def build_neural_prompt(task, inputs)
|
|
266
|
+
prompt = "# Task: #{task.name}\n\n"
|
|
267
|
+
prompt += "## Instructions\n#{task.instructions_text}\n\n"
|
|
268
|
+
|
|
269
|
+
if inputs.any?
|
|
270
|
+
prompt += "## Inputs\n"
|
|
271
|
+
inputs.each do |key, value|
|
|
272
|
+
prompt += "- #{key}: #{value.inspect}\n"
|
|
273
|
+
end
|
|
274
|
+
prompt += "\n"
|
|
275
|
+
end
|
|
276
|
+
|
|
277
|
+
prompt += "## Output Schema\n"
|
|
278
|
+
prompt += "You must return a JSON object with the following fields:\n"
|
|
279
|
+
task.outputs_schema.each do |key, type|
|
|
280
|
+
prompt += "- #{key} (#{type})\n"
|
|
281
|
+
end
|
|
282
|
+
prompt += "\n"
|
|
283
|
+
|
|
284
|
+
prompt += 'Return ONLY valid JSON matching the output schema. '
|
|
285
|
+
prompt += "Use available tools as needed to complete the task.\n"
|
|
286
|
+
|
|
287
|
+
prompt
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
# Parse LLM response to extract output values
|
|
291
|
+
#
|
|
292
|
+
# @param response_text [String] LLM response
|
|
293
|
+
# @param task [TaskDefinition] Task definition for schema
|
|
294
|
+
# @return [Hash] Parsed outputs
|
|
295
|
+
# @raise [RuntimeError] If parsing fails
|
|
296
|
+
def parse_neural_response(response_text, task)
|
|
297
|
+
# Try to extract JSON from response
|
|
298
|
+
# Look for JSON code blocks first
|
|
299
|
+
json_match = response_text.match(/```json\s*\n(.*?)\n```/m)
|
|
300
|
+
json_text = if json_match
|
|
301
|
+
json_match[1]
|
|
302
|
+
else
|
|
303
|
+
# Try to find raw JSON object
|
|
304
|
+
json_object_match = response_text.match(/\{.*\}/m)
|
|
305
|
+
json_object_match ? json_object_match[0] : response_text
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
# Parse JSON
|
|
309
|
+
parsed = JSON.parse(json_text)
|
|
310
|
+
|
|
311
|
+
# Deep convert all string keys to symbols (including nested hashes and arrays)
|
|
312
|
+
deep_symbolize_keys(parsed)
|
|
313
|
+
rescue JSON::ParserError => e
|
|
314
|
+
logger.error('Failed to parse neural task response as JSON',
|
|
315
|
+
task: task.name,
|
|
316
|
+
response: response_text[0..200],
|
|
317
|
+
error: e.message)
|
|
318
|
+
raise "Neural task '#{task.name}' returned invalid JSON: #{e.message}"
|
|
319
|
+
end
|
|
320
|
+
|
|
321
|
+
# Recursively convert all hash keys to symbols
|
|
322
|
+
def deep_symbolize_keys(obj)
|
|
323
|
+
case obj
|
|
324
|
+
when Hash
|
|
325
|
+
obj.transform_keys(&:to_sym).transform_values { |v| deep_symbolize_keys(v) }
|
|
326
|
+
when Array
|
|
327
|
+
obj.map { |item| deep_symbolize_keys(item) }
|
|
328
|
+
else
|
|
329
|
+
obj
|
|
330
|
+
end
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
# Default configuration for task execution
|
|
334
|
+
#
|
|
335
|
+
# @return [Hash] Default configuration
|
|
336
|
+
def default_config
|
|
337
|
+
{
|
|
338
|
+
timeout: 30.0, # Default timeout in seconds
|
|
339
|
+
max_retries: 3, # Default max retry attempts
|
|
340
|
+
retry_delay_base: 1.0, # Base delay for exponential backoff
|
|
341
|
+
retry_delay_max: 10.0 # Maximum delay between retries
|
|
342
|
+
}
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
# Determine task type for logging and telemetry
|
|
346
|
+
#
|
|
347
|
+
# @param task [TaskDefinition] The task definition
|
|
348
|
+
# @return [String] Task type
|
|
349
|
+
def determine_task_type(task)
|
|
350
|
+
if task.neural? && task.symbolic?
|
|
351
|
+
'hybrid'
|
|
352
|
+
elsif task.neural?
|
|
353
|
+
'neural'
|
|
354
|
+
elsif task.symbolic?
|
|
355
|
+
'symbolic'
|
|
356
|
+
else
|
|
357
|
+
'undefined'
|
|
358
|
+
end
|
|
359
|
+
end
|
|
360
|
+
|
|
361
|
+
# Execute task with retry logic and timeout
|
|
362
|
+
#
|
|
363
|
+
# @param task [TaskDefinition] The task definition
|
|
364
|
+
# @param task_name [Symbol] Name of the task
|
|
365
|
+
# @param inputs [Hash] Input parameters
|
|
366
|
+
# @param timeout [Numeric] Timeout in seconds
|
|
367
|
+
# @param max_retries [Integer] Maximum retry attempts
|
|
368
|
+
# @param execution_start [Time] When execution started
|
|
369
|
+
# @return [Hash] Task outputs
|
|
370
|
+
def execute_with_retry(task, task_name, inputs, timeout, max_retries, execution_start)
|
|
371
|
+
attempt = 0
|
|
372
|
+
last_error = nil
|
|
373
|
+
|
|
374
|
+
while attempt <= max_retries
|
|
375
|
+
begin
|
|
376
|
+
return execute_single_attempt(task, task_name, inputs, timeout, attempt, execution_start)
|
|
377
|
+
rescue StandardError => e
|
|
378
|
+
last_error = e
|
|
379
|
+
attempt += 1
|
|
380
|
+
|
|
381
|
+
# Don't retry validation errors or non-retryable errors
|
|
382
|
+
unless retryable_error?(e) && attempt <= max_retries
|
|
383
|
+
# Re-raise ArgumentError so it gets caught by the ArgumentError rescue block
|
|
384
|
+
raise e if e.is_a?(ArgumentError)
|
|
385
|
+
|
|
386
|
+
log_task_error(task_name, e, categorize_error(e), execution_start, attempt - 1)
|
|
387
|
+
raise create_appropriate_error(task_name, e)
|
|
388
|
+
end
|
|
389
|
+
|
|
390
|
+
# Calculate delay for exponential backoff
|
|
391
|
+
delay = calculate_retry_delay(attempt - 1)
|
|
392
|
+
logger.warn('Task execution failed, retrying',
|
|
393
|
+
task: task_name,
|
|
394
|
+
attempt: attempt,
|
|
395
|
+
max_retries: max_retries,
|
|
396
|
+
error: e.class.name,
|
|
397
|
+
message: e.message,
|
|
398
|
+
retry_delay: delay)
|
|
399
|
+
|
|
400
|
+
sleep(delay) if delay.positive?
|
|
401
|
+
end
|
|
402
|
+
end
|
|
403
|
+
|
|
404
|
+
# If we get here, we've exhausted all retries
|
|
405
|
+
log_task_error(task_name, last_error, categorize_error(last_error), execution_start, max_retries)
|
|
406
|
+
raise create_appropriate_error(task_name, last_error)
|
|
407
|
+
end
|
|
408
|
+
|
|
409
|
+
# Execute a single attempt of a task with timeout
|
|
410
|
+
#
|
|
411
|
+
# @param task [TaskDefinition] The task definition
|
|
412
|
+
# @param task_name [Symbol] Name of the task
|
|
413
|
+
# @param inputs [Hash] Input parameters
|
|
414
|
+
# @param timeout [Numeric] Timeout in seconds
|
|
415
|
+
# @param attempt [Integer] Current attempt number
|
|
416
|
+
# @param execution_start [Time] When execution started
|
|
417
|
+
# @return [Hash] Task outputs
|
|
418
|
+
def execute_single_attempt(task, task_name, inputs, timeout, attempt, _execution_start)
|
|
419
|
+
attempt_start = Time.now
|
|
420
|
+
|
|
421
|
+
result = if timeout.positive?
|
|
422
|
+
Timeout.timeout(timeout) do
|
|
423
|
+
execute_task_implementation(task, inputs)
|
|
424
|
+
end
|
|
425
|
+
else
|
|
426
|
+
execute_task_implementation(task, inputs)
|
|
427
|
+
end
|
|
428
|
+
|
|
429
|
+
execution_time = Time.now - attempt_start
|
|
430
|
+
logger.debug('Task execution completed',
|
|
431
|
+
task: task_name,
|
|
432
|
+
attempt: attempt + 1,
|
|
433
|
+
execution_time: execution_time.round(3))
|
|
434
|
+
|
|
435
|
+
result
|
|
436
|
+
rescue Timeout::Error => e
|
|
437
|
+
execution_time = Time.now - attempt_start
|
|
438
|
+
logger.warn('Task execution timed out',
|
|
439
|
+
task: task_name,
|
|
440
|
+
attempt: attempt + 1,
|
|
441
|
+
timeout: timeout,
|
|
442
|
+
execution_time: execution_time.round(3))
|
|
443
|
+
raise TaskTimeoutError.new(task_name, "timed out after #{timeout}s", e)
|
|
444
|
+
end
|
|
445
|
+
|
|
446
|
+
# Execute the actual task implementation (neural or symbolic)
|
|
447
|
+
#
|
|
448
|
+
# @param task [TaskDefinition] The task definition
|
|
449
|
+
# @param inputs [Hash] Input parameters
|
|
450
|
+
# @return [Hash] Task outputs
|
|
451
|
+
def execute_task_implementation(task, inputs)
|
|
452
|
+
if task.neural?
|
|
453
|
+
# Neural execution: LLM with tool access
|
|
454
|
+
execute_neural(task, inputs)
|
|
455
|
+
else
|
|
456
|
+
# Symbolic execution: Direct Ruby code within traced span
|
|
457
|
+
tracer.in_span('task_executor.symbolic', attributes: symbolic_task_attributes(task)) do |span|
|
|
458
|
+
validated_inputs = task.validate_inputs(inputs)
|
|
459
|
+
span.set_attribute('task.input.keys', validated_inputs.keys.map(&:to_s).join(','))
|
|
460
|
+
span.set_attribute('task.input.count', validated_inputs.size)
|
|
461
|
+
|
|
462
|
+
# Pass self as context so symbolic tasks can call execute_task, execute_tool, etc.
|
|
463
|
+
outputs = task.call(validated_inputs, self)
|
|
464
|
+
|
|
465
|
+
record_output_metadata(outputs, span) if outputs.is_a?(Hash)
|
|
466
|
+
outputs
|
|
467
|
+
end
|
|
468
|
+
end
|
|
469
|
+
end
|
|
470
|
+
|
|
471
|
+
# Check if an error should be retried
|
|
472
|
+
#
|
|
473
|
+
# @param error [Exception] The error that occurred
|
|
474
|
+
# @return [Boolean] Whether the error should be retried
|
|
475
|
+
def retryable_error?(error)
|
|
476
|
+
RETRYABLE_ERRORS.any? { |error_class| error.is_a?(error_class) }
|
|
477
|
+
end
|
|
478
|
+
|
|
479
|
+
# Categorize error for logging and operator integration
|
|
480
|
+
#
|
|
481
|
+
# @param error [Exception] The error that occurred
|
|
482
|
+
# @return [Symbol] Error category
|
|
483
|
+
def categorize_error(error)
|
|
484
|
+
case error
|
|
485
|
+
when ArgumentError, TaskValidationError
|
|
486
|
+
:validation
|
|
487
|
+
when Timeout::Error, TaskTimeoutError
|
|
488
|
+
:timeout
|
|
489
|
+
when TaskExecutionError
|
|
490
|
+
# Check the original error for categorization
|
|
491
|
+
error.original_error ? categorize_error(error.original_error) : :execution
|
|
492
|
+
when *RETRYABLE_ERRORS
|
|
493
|
+
:network
|
|
494
|
+
else
|
|
495
|
+
:execution
|
|
496
|
+
end
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
# Calculate retry delay with exponential backoff
|
|
500
|
+
#
|
|
501
|
+
# @param attempt [Integer] Current attempt number (0-based)
|
|
502
|
+
# @return [Float] Delay in seconds
|
|
503
|
+
def calculate_retry_delay(attempt)
|
|
504
|
+
delay = @config[:retry_delay_base] * (2**attempt)
|
|
505
|
+
[delay, @config[:retry_delay_max]].min
|
|
506
|
+
end
|
|
507
|
+
|
|
508
|
+
# Create appropriate error type based on original error
|
|
509
|
+
#
|
|
510
|
+
# @param task_name [Symbol] Name of the task
|
|
511
|
+
# @param original_error [Exception] The original error
|
|
512
|
+
# @return [TaskExecutionError] Appropriate error type
|
|
513
|
+
def create_appropriate_error(task_name, original_error)
|
|
514
|
+
case original_error
|
|
515
|
+
when TaskTimeoutError
|
|
516
|
+
original_error
|
|
517
|
+
when Timeout::Error
|
|
518
|
+
TaskTimeoutError.new(task_name, 'timed out', original_error)
|
|
519
|
+
when ArgumentError
|
|
520
|
+
TaskValidationError.new(task_name, original_error.message, original_error)
|
|
521
|
+
else
|
|
522
|
+
TaskExecutionError.new(task_name, original_error.message, original_error)
|
|
523
|
+
end
|
|
524
|
+
end
|
|
525
|
+
|
|
526
|
+
# Log task error with comprehensive context
|
|
527
|
+
#
|
|
528
|
+
# @param task_name [Symbol] Name of the task
|
|
529
|
+
# @param error [Exception] The error that occurred
|
|
530
|
+
# @param category [Symbol] Error category
|
|
531
|
+
# @param execution_start [Time] When execution started
|
|
532
|
+
# @param retry_count [Integer] Number of retries attempted
|
|
533
|
+
def log_task_error(task_name, error, category, execution_start, retry_count = 0)
|
|
534
|
+
execution_time = Time.now - execution_start
|
|
535
|
+
|
|
536
|
+
logger.error('Task execution failed',
|
|
537
|
+
task: task_name,
|
|
538
|
+
error_category: ERROR_CATEGORIES[category],
|
|
539
|
+
error_class: error.class.name,
|
|
540
|
+
error_message: error.message,
|
|
541
|
+
execution_time: execution_time.round(3),
|
|
542
|
+
retry_count: retry_count,
|
|
543
|
+
retryable: retryable_error?(error),
|
|
544
|
+
backtrace: error.backtrace&.first(5))
|
|
545
|
+
end
|
|
546
|
+
end
|
|
547
|
+
end
|
|
548
|
+
end
|