language-operator 0.1.58 → 0.1.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/lib/language_operator/agent/base.rb +19 -0
- data/lib/language_operator/agent/task_executor.rb +60 -21
- data/lib/language_operator/agent/telemetry.rb +22 -11
- data/lib/language_operator/agent.rb +3 -0
- data/lib/language_operator/cli/base_command.rb +7 -1
- data/lib/language_operator/cli/commands/agent.rb +575 -0
- data/lib/language_operator/cli/formatters/optimization_formatter.rb +226 -0
- data/lib/language_operator/cli/formatters/progress_formatter.rb +1 -1
- data/lib/language_operator/client/base.rb +72 -2
- data/lib/language_operator/client/mcp_connector.rb +4 -6
- data/lib/language_operator/learning/adapters/base_adapter.rb +147 -0
- data/lib/language_operator/learning/adapters/jaeger_adapter.rb +218 -0
- data/lib/language_operator/learning/adapters/signoz_adapter.rb +432 -0
- data/lib/language_operator/learning/adapters/tempo_adapter.rb +236 -0
- data/lib/language_operator/learning/optimizer.rb +318 -0
- data/lib/language_operator/learning/pattern_detector.rb +260 -0
- data/lib/language_operator/learning/task_synthesizer.rb +261 -0
- data/lib/language_operator/learning/trace_analyzer.rb +280 -0
- data/lib/language_operator/templates/schema/agent_dsl_openapi.yaml +1 -1
- data/lib/language_operator/templates/schema/agent_dsl_schema.json +1 -1
- data/lib/language_operator/templates/task_synthesis.tmpl +97 -0
- data/lib/language_operator/ux/concerns/provider_helpers.rb +2 -2
- data/lib/language_operator/version.rb +1 -1
- data/synth/003/Makefile +8 -1
- data/synth/003/output.log +68 -0
- data/synth/README.md +1 -3
- metadata +12 -1
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'net/http'
|
|
4
|
+
require 'json'
|
|
5
|
+
require 'uri'
|
|
6
|
+
require_relative 'base_adapter'
|
|
7
|
+
|
|
8
|
+
module LanguageOperator
|
|
9
|
+
module Learning
|
|
10
|
+
module Adapters
|
|
11
|
+
# Grafana Tempo backend adapter for trace queries
|
|
12
|
+
#
|
|
13
|
+
# Queries Tempo's Parquet-backed trace storage via the /api/search
|
|
14
|
+
# HTTP endpoint with TraceQL query language support.
|
|
15
|
+
#
|
|
16
|
+
# TraceQL provides powerful span filtering with structural operators:
|
|
17
|
+
# - { span.attribute = "value" } - Basic attribute filtering
|
|
18
|
+
# - { span.foo = "bar" && span.baz > 100 } - Multiple conditions
|
|
19
|
+
# - { span.parent } >> { span.child } - Structural relationships
|
|
20
|
+
#
|
|
21
|
+
# @example Basic usage
|
|
22
|
+
# adapter = TempoAdapter.new('http://tempo:3200')
|
|
23
|
+
#
|
|
24
|
+
# spans = adapter.query_spans(
|
|
25
|
+
# filter: { task_name: 'fetch_data' },
|
|
26
|
+
# time_range: (Time.now - 3600)..Time.now,
|
|
27
|
+
# limit: 100
|
|
28
|
+
# )
|
|
29
|
+
class TempoAdapter < BaseAdapter
|
|
30
|
+
# Tempo search endpoint
|
|
31
|
+
SEARCH_PATH = '/api/search'
|
|
32
|
+
|
|
33
|
+
# Check if Tempo is available at endpoint
|
|
34
|
+
#
|
|
35
|
+
# @param endpoint [String] Tempo endpoint URL
|
|
36
|
+
# @param _api_key [String, nil] API key (unused, Tempo typically doesn't require auth)
|
|
37
|
+
# @return [Boolean] True if Tempo API is reachable
|
|
38
|
+
def self.available?(endpoint, _api_key = nil)
|
|
39
|
+
uri = URI.join(endpoint, SEARCH_PATH)
|
|
40
|
+
# Test with minimal query
|
|
41
|
+
uri.query = URI.encode_www_form(q: '{ }', limit: 1)
|
|
42
|
+
|
|
43
|
+
response = Net::HTTP.start(uri.host, uri.port, use_ssl: uri.scheme == 'https', open_timeout: 2, read_timeout: 2) do |http|
|
|
44
|
+
request = Net::HTTP::Get.new(uri.request_uri)
|
|
45
|
+
http.request(request)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
response.is_a?(Net::HTTPSuccess)
|
|
49
|
+
rescue StandardError
|
|
50
|
+
false
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# Query spans from Tempo using TraceQL
|
|
54
|
+
#
|
|
55
|
+
# @param filter [Hash] Filter criteria
|
|
56
|
+
# @option filter [String] :task_name Task name to filter by
|
|
57
|
+
# @param time_range [Range<Time>] Time range for query
|
|
58
|
+
# @param limit [Integer] Maximum traces to return
|
|
59
|
+
# @return [Array<Hash>] Normalized span data
|
|
60
|
+
def query_spans(filter:, time_range:, limit:)
|
|
61
|
+
times = parse_time_range(time_range)
|
|
62
|
+
traceql_query = build_traceql_query(filter)
|
|
63
|
+
traces = search_traces(traceql_query, times, limit)
|
|
64
|
+
extract_spans_from_traces(traces)
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
private
|
|
68
|
+
|
|
69
|
+
# Build TraceQL query from filter
|
|
70
|
+
#
|
|
71
|
+
# @param filter [Hash] Filter criteria
|
|
72
|
+
# @return [String] TraceQL query string
|
|
73
|
+
def build_traceql_query(filter)
|
|
74
|
+
conditions = []
|
|
75
|
+
|
|
76
|
+
# Filter by task name
|
|
77
|
+
conditions << "span.\"task.name\" = \"#{escape_traceql_value(filter[:task_name])}\"" if filter[:task_name]
|
|
78
|
+
|
|
79
|
+
# Additional attribute filters
|
|
80
|
+
if filter[:attributes].is_a?(Hash)
|
|
81
|
+
filter[:attributes].each do |key, value|
|
|
82
|
+
conditions << "span.\"#{escape_traceql_key(key)}\" = \"#{escape_traceql_value(value)}\""
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
# Combine conditions with AND
|
|
87
|
+
query = conditions.any? ? conditions.join(' && ') : ''
|
|
88
|
+
"{ #{query} }"
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
# Escape TraceQL attribute key
|
|
92
|
+
#
|
|
93
|
+
# @param key [String, Symbol] Attribute key
|
|
94
|
+
# @return [String] Escaped key
|
|
95
|
+
def escape_traceql_key(key)
|
|
96
|
+
key.to_s.gsub('"', '\"')
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
# Escape TraceQL value
|
|
100
|
+
#
|
|
101
|
+
# @param value [Object] Attribute value
|
|
102
|
+
# @return [String] Escaped value
|
|
103
|
+
def escape_traceql_value(value)
|
|
104
|
+
value.to_s.gsub('"', '\"').gsub('\\', '\\\\')
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
# Search traces via Tempo HTTP API
|
|
108
|
+
#
|
|
109
|
+
# @param traceql_query [String] TraceQL query
|
|
110
|
+
# @param times [Hash] Start and end times
|
|
111
|
+
# @param limit [Integer] Result limit
|
|
112
|
+
# @return [Array<Hash>] Trace data
|
|
113
|
+
def search_traces(traceql_query, times, limit)
|
|
114
|
+
uri = build_search_uri(traceql_query, times, limit)
|
|
115
|
+
|
|
116
|
+
Net::HTTP.start(uri.host, uri.port, use_ssl: uri.scheme == 'https', open_timeout: 5, read_timeout: 30) do |http|
|
|
117
|
+
request = Net::HTTP::Get.new(uri.request_uri)
|
|
118
|
+
request['Accept'] = 'application/json'
|
|
119
|
+
|
|
120
|
+
response = http.request(request)
|
|
121
|
+
|
|
122
|
+
raise "Tempo query failed: #{response.code} #{response.message}" unless response.is_a?(Net::HTTPSuccess)
|
|
123
|
+
|
|
124
|
+
result = JSON.parse(response.body, symbolize_names: true)
|
|
125
|
+
result[:traces] || []
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
# Build Tempo search URI with query parameters
|
|
130
|
+
#
|
|
131
|
+
# @param traceql_query [String] TraceQL query
|
|
132
|
+
# @param times [Hash] Start and end times
|
|
133
|
+
# @param limit [Integer] Result limit
|
|
134
|
+
# @return [URI] Complete URI with query params
|
|
135
|
+
def build_search_uri(traceql_query, times, limit)
|
|
136
|
+
params = {
|
|
137
|
+
q: traceql_query,
|
|
138
|
+
limit: limit,
|
|
139
|
+
start: times[:start].to_i, # Unix seconds
|
|
140
|
+
end: times[:end].to_i
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
uri = URI.join(@endpoint, SEARCH_PATH)
|
|
144
|
+
uri.query = URI.encode_www_form(params)
|
|
145
|
+
uri
|
|
146
|
+
end
|
|
147
|
+
|
|
148
|
+
# Extract all spans from traces
|
|
149
|
+
#
|
|
150
|
+
# @param traces [Array<Hash>] Tempo trace data
|
|
151
|
+
# @return [Array<Hash>] Normalized spans
|
|
152
|
+
def extract_spans_from_traces(traces)
|
|
153
|
+
spans = []
|
|
154
|
+
|
|
155
|
+
traces.each do |trace|
|
|
156
|
+
trace_id = trace[:traceID]
|
|
157
|
+
|
|
158
|
+
# Tempo returns spanSets (matched span groups)
|
|
159
|
+
(trace[:spanSets] || []).each do |span_set|
|
|
160
|
+
(span_set[:spans] || []).each do |span_data|
|
|
161
|
+
spans << normalize_span(span_data, trace_id)
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
spans
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
# Normalize Tempo span to common format
|
|
170
|
+
#
|
|
171
|
+
# @param span_data [Hash] Raw Tempo span
|
|
172
|
+
# @param trace_id [String] Trace ID
|
|
173
|
+
# @return [Hash] Normalized span
|
|
174
|
+
def normalize_span(span_data, trace_id)
|
|
175
|
+
{
|
|
176
|
+
span_id: span_data[:spanID],
|
|
177
|
+
trace_id: trace_id,
|
|
178
|
+
name: span_data[:name] || 'unknown',
|
|
179
|
+
timestamp: parse_timestamp(span_data[:startTimeUnixNano]),
|
|
180
|
+
duration_ms: parse_duration(span_data[:durationNanos]),
|
|
181
|
+
attributes: parse_attributes(span_data[:attributes])
|
|
182
|
+
}
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
# Parse Tempo timestamp (nanoseconds) to Time
|
|
186
|
+
#
|
|
187
|
+
# @param timestamp [String, Integer] Timestamp in nanoseconds
|
|
188
|
+
# @return [Time] Parsed time
|
|
189
|
+
def parse_timestamp(timestamp)
|
|
190
|
+
return Time.now unless timestamp
|
|
191
|
+
|
|
192
|
+
nanos = timestamp.is_a?(String) ? timestamp.to_i : timestamp
|
|
193
|
+
Time.at(nanos / 1_000_000_000.0)
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
# Parse Tempo duration (nanoseconds) to milliseconds
|
|
197
|
+
#
|
|
198
|
+
# @param duration [Integer] Duration in nanoseconds
|
|
199
|
+
# @return [Float] Duration in milliseconds
|
|
200
|
+
def parse_duration(duration)
|
|
201
|
+
return 0.0 unless duration
|
|
202
|
+
|
|
203
|
+
duration / 1_000_000.0
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
# Parse Tempo attributes into flat hash
|
|
207
|
+
#
|
|
208
|
+
# Tempo attributes format:
|
|
209
|
+
# [
|
|
210
|
+
# { key: "http.method", value: { stringValue: "GET" } },
|
|
211
|
+
# { key: "http.status_code", value: { intValue: 200 } }
|
|
212
|
+
# ]
|
|
213
|
+
#
|
|
214
|
+
# @param attributes [Array<Hash>] Attribute array
|
|
215
|
+
# @return [Hash] Flat attributes
|
|
216
|
+
def parse_attributes(attributes)
|
|
217
|
+
return {} unless attributes.is_a?(Array)
|
|
218
|
+
|
|
219
|
+
attributes.each_with_object({}) do |attr, hash|
|
|
220
|
+
key = attr[:key].to_s
|
|
221
|
+
value_obj = attr[:value] || {}
|
|
222
|
+
|
|
223
|
+
# Extract value based on type
|
|
224
|
+
value = value_obj[:stringValue] ||
|
|
225
|
+
value_obj[:intValue] ||
|
|
226
|
+
value_obj[:doubleValue] ||
|
|
227
|
+
value_obj[:boolValue] ||
|
|
228
|
+
value_obj[:bytesValue]
|
|
229
|
+
|
|
230
|
+
hash[key] = value if value
|
|
231
|
+
end
|
|
232
|
+
end
|
|
233
|
+
end
|
|
234
|
+
end
|
|
235
|
+
end
|
|
236
|
+
end
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'logger'
|
|
4
|
+
|
|
5
|
+
module LanguageOperator
|
|
6
|
+
module Learning
|
|
7
|
+
# Orchestrates the optimization of neural tasks to symbolic implementations
|
|
8
|
+
#
|
|
9
|
+
# The Optimizer analyzes running agents, identifies optimization opportunities,
|
|
10
|
+
# proposes code changes, and applies them with user approval. It integrates
|
|
11
|
+
# TraceAnalyzer (pattern detection) and PatternDetector (code generation) to
|
|
12
|
+
# provide a complete optimization workflow.
|
|
13
|
+
#
|
|
14
|
+
# @example Basic usage
|
|
15
|
+
# optimizer = Optimizer.new(
|
|
16
|
+
# agent_name: 'github-monitor',
|
|
17
|
+
# agent_definition: agent_def,
|
|
18
|
+
# trace_analyzer: TraceAnalyzer.new(endpoint: ENV['OTEL_QUERY_ENDPOINT']),
|
|
19
|
+
# pattern_detector: PatternDetector.new(...)
|
|
20
|
+
# )
|
|
21
|
+
#
|
|
22
|
+
# opportunities = optimizer.analyze
|
|
23
|
+
# opportunities.each do |opp|
|
|
24
|
+
# proposal = optimizer.propose(task_name: opp[:task_name])
|
|
25
|
+
# # Show to user, get approval
|
|
26
|
+
# optimizer.apply(proposal) if approved
|
|
27
|
+
# end
|
|
28
|
+
class Optimizer
|
|
29
|
+
# Minimum consistency score required for optimization
|
|
30
|
+
DEFAULT_MIN_CONSISTENCY = 0.85
|
|
31
|
+
|
|
32
|
+
# Minimum executions required for optimization
|
|
33
|
+
DEFAULT_MIN_EXECUTIONS = 10
|
|
34
|
+
|
|
35
|
+
# Initialize optimizer
|
|
36
|
+
#
|
|
37
|
+
# @param agent_name [String] Name of the agent to optimize
|
|
38
|
+
# @param agent_definition [Dsl::AgentDefinition] Agent definition object
|
|
39
|
+
# @param trace_analyzer [TraceAnalyzer] Analyzer for querying execution traces
|
|
40
|
+
# @param pattern_detector [PatternDetector] Detector for generating symbolic code
|
|
41
|
+
# @param task_synthesizer [TaskSynthesizer, nil] Optional LLM-based synthesizer
|
|
42
|
+
# @param logger [Logger, nil] Logger instance (creates default if nil)
|
|
43
|
+
def initialize(agent_name:, agent_definition:, trace_analyzer:, pattern_detector:, task_synthesizer: nil,
|
|
44
|
+
logger: nil)
|
|
45
|
+
@agent_name = agent_name
|
|
46
|
+
@agent_definition = agent_definition
|
|
47
|
+
@trace_analyzer = trace_analyzer
|
|
48
|
+
@pattern_detector = pattern_detector
|
|
49
|
+
@task_synthesizer = task_synthesizer
|
|
50
|
+
@logger = logger || ::Logger.new($stdout, level: ::Logger::WARN)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# Analyze agent for optimization opportunities
|
|
54
|
+
#
|
|
55
|
+
# Queries execution traces for each neural task and determines which tasks
|
|
56
|
+
# are eligible for optimization based on consistency and execution count.
|
|
57
|
+
#
|
|
58
|
+
# @param min_consistency [Float] Minimum consistency threshold (0.0-1.0)
|
|
59
|
+
# @param min_executions [Integer] Minimum execution count required
|
|
60
|
+
# @param time_range [Integer, Range<Time>, nil] Time range for trace queries
|
|
61
|
+
# @return [Array<Hash>] Array of optimization opportunities
|
|
62
|
+
def analyze(min_consistency: DEFAULT_MIN_CONSISTENCY, min_executions: DEFAULT_MIN_EXECUTIONS, time_range: nil)
|
|
63
|
+
opportunities = []
|
|
64
|
+
|
|
65
|
+
# Find all neural tasks in the agent
|
|
66
|
+
neural_tasks = find_neural_tasks
|
|
67
|
+
|
|
68
|
+
if neural_tasks.empty?
|
|
69
|
+
@logger.info("No neural tasks found in agent '#{@agent_name}'")
|
|
70
|
+
return opportunities
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
# Analyze each neural task
|
|
74
|
+
neural_tasks.each do |task|
|
|
75
|
+
analysis = @trace_analyzer.analyze_patterns(
|
|
76
|
+
task_name: task[:name],
|
|
77
|
+
min_executions: min_executions,
|
|
78
|
+
consistency_threshold: min_consistency,
|
|
79
|
+
time_range: time_range
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
next unless analysis
|
|
83
|
+
|
|
84
|
+
opportunities << {
|
|
85
|
+
task_name: task[:name],
|
|
86
|
+
task_definition: task[:definition],
|
|
87
|
+
execution_count: analysis[:execution_count],
|
|
88
|
+
consistency_score: analysis[:consistency_score],
|
|
89
|
+
ready_for_learning: analysis[:ready_for_learning],
|
|
90
|
+
common_pattern: analysis[:common_pattern],
|
|
91
|
+
reason: analysis[:reason]
|
|
92
|
+
}
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
opportunities
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
# Generate optimization proposal for a specific task
|
|
99
|
+
#
|
|
100
|
+
# Uses PatternDetector to generate symbolic code and calculates
|
|
101
|
+
# the performance impact of the optimization. Falls back to TaskSynthesizer
|
|
102
|
+
# (LLM-based) if pattern detection fails and synthesizer is available.
|
|
103
|
+
#
|
|
104
|
+
# @param task_name [String] Name of task to optimize
|
|
105
|
+
# @param use_synthesis [Boolean] Force use of LLM synthesis instead of pattern detection
|
|
106
|
+
# @return [Hash] Optimization proposal with code, metrics, and metadata
|
|
107
|
+
def propose(task_name:, use_synthesis: false)
|
|
108
|
+
task_def = find_task_definition(task_name)
|
|
109
|
+
raise ArgumentError, "Task '#{task_name}' not found" unless task_def
|
|
110
|
+
|
|
111
|
+
analysis = @trace_analyzer.analyze_patterns(task_name: task_name)
|
|
112
|
+
raise ArgumentError, "No execution data found for task '#{task_name}'" unless analysis
|
|
113
|
+
|
|
114
|
+
traces = @trace_analyzer.query_task_traces(task_name: task_name, limit: 20)
|
|
115
|
+
detection_result = @pattern_detector.detect_pattern(analysis_result: analysis) unless use_synthesis
|
|
116
|
+
|
|
117
|
+
return propose_via_synthesis(task_name, task_def, analysis, traces) if should_use_synthesis?(use_synthesis, detection_result)
|
|
118
|
+
|
|
119
|
+
unless detection_result&.dig(:success)
|
|
120
|
+
raise ArgumentError, "Cannot optimize task '#{task_name}': #{detection_result&.dig(:reason) || 'No common pattern found'}"
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
build_pattern_proposal(task_name, task_def, analysis, detection_result)
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
# Apply optimization proposal
|
|
127
|
+
#
|
|
128
|
+
# This method would update the agent definition in Kubernetes.
|
|
129
|
+
# For now, it returns the updated agent code that would be applied.
|
|
130
|
+
#
|
|
131
|
+
# @param proposal [Hash] Proposal from #propose
|
|
132
|
+
# @return [Hash] Result with updated agent definition
|
|
133
|
+
def apply(proposal:)
|
|
134
|
+
# In a real implementation, this would:
|
|
135
|
+
# 1. Update the agent CRD with new task definition
|
|
136
|
+
# 2. Create new ConfigMap version
|
|
137
|
+
# 3. Trigger pod restart
|
|
138
|
+
# For now, we return what would be applied
|
|
139
|
+
|
|
140
|
+
{
|
|
141
|
+
success: true,
|
|
142
|
+
task_name: proposal[:task_name],
|
|
143
|
+
updated_code: proposal[:proposed_code],
|
|
144
|
+
action: 'would_update_agent_definition',
|
|
145
|
+
message: "Optimization for '#{proposal[:task_name]}' ready to apply"
|
|
146
|
+
}
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
private
|
|
150
|
+
|
|
151
|
+
def should_use_synthesis?(use_synthesis, detection_result)
|
|
152
|
+
(use_synthesis || !detection_result&.dig(:success)) && @task_synthesizer
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def propose_via_synthesis(task_name, task_def, analysis, traces)
|
|
156
|
+
@logger.info("Using LLM synthesis for task '#{task_name}'")
|
|
157
|
+
synthesis_result = @task_synthesizer.synthesize(
|
|
158
|
+
task_definition: task_def,
|
|
159
|
+
traces: traces,
|
|
160
|
+
available_tools: detect_available_tools,
|
|
161
|
+
consistency_score: analysis[:consistency_score],
|
|
162
|
+
common_pattern: analysis[:common_pattern]
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
raise ArgumentError, "Cannot optimize task '#{task_name}': #{synthesis_result[:explanation]}" unless synthesis_result[:is_deterministic]
|
|
166
|
+
|
|
167
|
+
build_synthesis_proposal(task_name: task_name, task_def: task_def, analysis: analysis,
|
|
168
|
+
synthesis_result: synthesis_result)
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
def build_pattern_proposal(task_name, task_def, analysis, detection_result)
|
|
172
|
+
impact = calculate_impact(execution_count: analysis[:execution_count],
|
|
173
|
+
consistency_score: analysis[:consistency_score])
|
|
174
|
+
{
|
|
175
|
+
task_name: task_name, current_code: format_current_code(task_def),
|
|
176
|
+
proposed_code: extract_task_code(detection_result[:generated_code]),
|
|
177
|
+
full_generated_code: detection_result[:generated_code],
|
|
178
|
+
consistency_score: analysis[:consistency_score], execution_count: analysis[:execution_count],
|
|
179
|
+
pattern: analysis[:common_pattern], performance_impact: impact,
|
|
180
|
+
validation_violations: detection_result[:validation_violations],
|
|
181
|
+
ready_to_deploy: detection_result[:ready_to_deploy], synthesis_method: :pattern_detection
|
|
182
|
+
}
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
# Find all neural tasks in the agent definition
|
|
186
|
+
#
|
|
187
|
+
# @return [Array<Hash>] Array of neural task info
|
|
188
|
+
def find_neural_tasks
|
|
189
|
+
return [] unless @agent_definition.respond_to?(:tasks)
|
|
190
|
+
|
|
191
|
+
neural_tasks = @agent_definition.tasks.select do |_name, task_def|
|
|
192
|
+
# Neural tasks have instructions but no code block
|
|
193
|
+
task_def.neural?
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
neural_tasks.map do |name, task_def|
|
|
197
|
+
{
|
|
198
|
+
name: name.to_s,
|
|
199
|
+
definition: task_def
|
|
200
|
+
}
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
|
|
204
|
+
# Find a specific task definition by name
|
|
205
|
+
#
|
|
206
|
+
# @param task_name [String] Task name
|
|
207
|
+
# @return [Dsl::TaskDefinition, nil] Task definition or nil
|
|
208
|
+
def find_task_definition(task_name)
|
|
209
|
+
return nil unless @agent_definition.respond_to?(:tasks)
|
|
210
|
+
|
|
211
|
+
@agent_definition.tasks[task_name.to_sym]
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
# Format current task code for display
|
|
215
|
+
#
|
|
216
|
+
# @param task_def [Dsl::TaskDefinition] Task definition
|
|
217
|
+
# @return [String] Formatted code
|
|
218
|
+
def format_current_code(task_def)
|
|
219
|
+
inputs_str = (task_def.inputs || {}).map { |k, v| "#{k}: '#{v}'" }.join(', ')
|
|
220
|
+
outputs_str = (task_def.outputs || {}).map { |k, v| "#{k}: '#{v}'" }.join(', ')
|
|
221
|
+
|
|
222
|
+
<<~RUBY
|
|
223
|
+
task :#{task_def.name},
|
|
224
|
+
instructions: "#{task_def.instructions}",
|
|
225
|
+
inputs: { #{inputs_str} },
|
|
226
|
+
outputs: { #{outputs_str} }
|
|
227
|
+
RUBY
|
|
228
|
+
end
|
|
229
|
+
|
|
230
|
+
# Extract task code from full agent definition
|
|
231
|
+
#
|
|
232
|
+
# @param full_code [String] Complete agent definition
|
|
233
|
+
# @return [String] Just the task definition portion
|
|
234
|
+
def extract_task_code(full_code)
|
|
235
|
+
# Extract just the task definition from the full agent code
|
|
236
|
+
lines = full_code.lines
|
|
237
|
+
task_start = lines.index { |l| l.strip.start_with?('task :') }
|
|
238
|
+
task_end = lines.index { |l| l.strip == 'end' && l.start_with?(' end') }
|
|
239
|
+
|
|
240
|
+
return full_code unless task_start && task_end
|
|
241
|
+
|
|
242
|
+
lines[task_start..task_end].join
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
# Calculate performance impact of optimization
|
|
246
|
+
#
|
|
247
|
+
# @param execution_count [Integer] Number of executions observed
|
|
248
|
+
# @param consistency_score [Float] Pattern consistency
|
|
249
|
+
# @return [Hash] Impact metrics
|
|
250
|
+
def calculate_impact(execution_count:, consistency_score:)
|
|
251
|
+
# Estimates based on typical LLM vs symbolic execution
|
|
252
|
+
avg_neural_time = 2.5 # seconds
|
|
253
|
+
avg_neural_cost = 0.003 # dollars
|
|
254
|
+
avg_symbolic_time = 0.1 # seconds
|
|
255
|
+
avg_symbolic_cost = 0.0 # dollars
|
|
256
|
+
|
|
257
|
+
time_saved = avg_neural_time - avg_symbolic_time
|
|
258
|
+
cost_saved = avg_neural_cost - avg_symbolic_cost
|
|
259
|
+
|
|
260
|
+
{
|
|
261
|
+
current_avg_time: avg_neural_time,
|
|
262
|
+
optimized_avg_time: avg_symbolic_time,
|
|
263
|
+
time_reduction_pct: ((time_saved / avg_neural_time) * 100).round(1),
|
|
264
|
+
current_avg_cost: avg_neural_cost,
|
|
265
|
+
optimized_avg_cost: avg_symbolic_cost,
|
|
266
|
+
cost_reduction_pct: ((cost_saved / avg_neural_cost) * 100).round(1),
|
|
267
|
+
projected_monthly_savings: (cost_saved * execution_count * 30).round(2)
|
|
268
|
+
}
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
# Build proposal from synthesis result
|
|
272
|
+
#
|
|
273
|
+
# @param task_name [String] Task name
|
|
274
|
+
# @param task_def [Dsl::TaskDefinition] Task definition
|
|
275
|
+
# @param analysis [Hash] Pattern analysis result
|
|
276
|
+
# @param synthesis_result [Hash] LLM synthesis result
|
|
277
|
+
# @return [Hash] Optimization proposal
|
|
278
|
+
def build_synthesis_proposal(task_name:, task_def:, analysis:, synthesis_result:)
|
|
279
|
+
impact = calculate_impact(
|
|
280
|
+
execution_count: analysis[:execution_count],
|
|
281
|
+
consistency_score: synthesis_result[:confidence]
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
{
|
|
285
|
+
task_name: task_name,
|
|
286
|
+
current_code: format_current_code(task_def),
|
|
287
|
+
proposed_code: synthesis_result[:code],
|
|
288
|
+
full_generated_code: synthesis_result[:code],
|
|
289
|
+
consistency_score: analysis[:consistency_score],
|
|
290
|
+
execution_count: analysis[:execution_count],
|
|
291
|
+
pattern: analysis[:common_pattern],
|
|
292
|
+
performance_impact: impact,
|
|
293
|
+
validation_violations: synthesis_result[:validation_errors] || [],
|
|
294
|
+
ready_to_deploy: synthesis_result[:validation_errors].nil?,
|
|
295
|
+
synthesis_method: :llm_synthesis,
|
|
296
|
+
synthesis_confidence: synthesis_result[:confidence],
|
|
297
|
+
synthesis_explanation: synthesis_result[:explanation]
|
|
298
|
+
}
|
|
299
|
+
end
|
|
300
|
+
|
|
301
|
+
# Detect available tools from agent definition
|
|
302
|
+
#
|
|
303
|
+
# @return [Array<String>] Tool names
|
|
304
|
+
def detect_available_tools
|
|
305
|
+
return [] unless @agent_definition.respond_to?(:mcp_servers)
|
|
306
|
+
|
|
307
|
+
# Extract tool names from MCP server configurations
|
|
308
|
+
tools = []
|
|
309
|
+
@agent_definition.mcp_servers.each_value do |server|
|
|
310
|
+
tools.concat(server[:tools] || []) if server.is_a?(Hash)
|
|
311
|
+
end
|
|
312
|
+
tools.uniq
|
|
313
|
+
rescue StandardError
|
|
314
|
+
[]
|
|
315
|
+
end
|
|
316
|
+
end
|
|
317
|
+
end
|
|
318
|
+
end
|