language-operator 0.1.58 → 0.1.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +1 -1
  3. data/lib/language_operator/agent/base.rb +19 -0
  4. data/lib/language_operator/agent/task_executor.rb +60 -21
  5. data/lib/language_operator/agent/telemetry.rb +22 -11
  6. data/lib/language_operator/agent.rb +3 -0
  7. data/lib/language_operator/cli/base_command.rb +7 -1
  8. data/lib/language_operator/cli/commands/agent.rb +575 -0
  9. data/lib/language_operator/cli/formatters/optimization_formatter.rb +226 -0
  10. data/lib/language_operator/cli/formatters/progress_formatter.rb +1 -1
  11. data/lib/language_operator/client/base.rb +72 -2
  12. data/lib/language_operator/client/mcp_connector.rb +4 -6
  13. data/lib/language_operator/learning/adapters/base_adapter.rb +147 -0
  14. data/lib/language_operator/learning/adapters/jaeger_adapter.rb +218 -0
  15. data/lib/language_operator/learning/adapters/signoz_adapter.rb +432 -0
  16. data/lib/language_operator/learning/adapters/tempo_adapter.rb +236 -0
  17. data/lib/language_operator/learning/optimizer.rb +318 -0
  18. data/lib/language_operator/learning/pattern_detector.rb +260 -0
  19. data/lib/language_operator/learning/task_synthesizer.rb +261 -0
  20. data/lib/language_operator/learning/trace_analyzer.rb +280 -0
  21. data/lib/language_operator/templates/schema/agent_dsl_openapi.yaml +1 -1
  22. data/lib/language_operator/templates/schema/agent_dsl_schema.json +1 -1
  23. data/lib/language_operator/templates/task_synthesis.tmpl +97 -0
  24. data/lib/language_operator/ux/concerns/provider_helpers.rb +2 -2
  25. data/lib/language_operator/version.rb +1 -1
  26. data/synth/003/Makefile +8 -1
  27. data/synth/003/output.log +68 -0
  28. data/synth/README.md +1 -3
  29. metadata +12 -1
@@ -0,0 +1,280 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'logger'
4
+ require_relative 'adapters/base_adapter'
5
+
6
+ module LanguageOperator
7
+ module Learning
8
+ # Analyzes OpenTelemetry traces to detect patterns in task execution
9
+ #
10
+ # The TraceAnalyzer queries OTLP backends (SigNoz, Jaeger, Tempo) to retrieve
11
+ # execution traces for neural tasks, then analyzes them to determine if they
12
+ # exhibit consistent patterns that can be codified into symbolic implementations.
13
+ #
14
+ # Auto-detects available backends in order: SigNoz → Jaeger → Tempo
15
+ # Falls back gracefully if no backend is available (learning disabled).
16
+ #
17
+ # @example Basic usage
18
+ # analyzer = TraceAnalyzer.new(
19
+ # endpoint: ENV['OTEL_QUERY_ENDPOINT'],
20
+ # api_key: ENV['OTEL_QUERY_API_KEY']
21
+ # )
22
+ #
23
+ # analysis = analyzer.analyze_patterns(task_name: 'fetch_user_data')
24
+ # if analysis && analysis[:consistency] >= 0.85
25
+ # puts "Task is ready for learning!"
26
+ # puts "Tool sequence: #{analysis[:common_pattern]}"
27
+ # end
28
+ #
29
+ # @example Explicit backend selection
30
+ # ENV['OTEL_QUERY_BACKEND'] = 'signoz'
31
+ # analyzer = TraceAnalyzer.new(endpoint: 'https://signoz.example.com')
32
+ class TraceAnalyzer
33
+ # Minimum pattern consistency required for learning (configurable)
34
+ DEFAULT_CONSISTENCY_THRESHOLD = 0.85
35
+
36
+ # Default time range for queries (24 hours)
37
+ DEFAULT_TIME_RANGE = 24 * 60 * 60
38
+
39
+ # Initialize trace analyzer with backend connection
40
+ #
41
+ # @param endpoint [String, nil] OTLP backend endpoint (auto-detected from ENV if nil)
42
+ # @param api_key [String, nil] API key for authentication (if required)
43
+ # @param backend [String, nil] Explicit backend type ('signoz', 'jaeger', 'tempo')
44
+ # @param logger [Logger, nil] Logger instance (creates default if nil)
45
+ def initialize(endpoint: nil, api_key: nil, backend: nil, logger: nil)
46
+ @endpoint = endpoint || ENV.fetch('OTEL_QUERY_ENDPOINT', nil)
47
+ @api_key = api_key || ENV.fetch('OTEL_QUERY_API_KEY', nil)
48
+ @backend_type = backend || ENV.fetch('OTEL_QUERY_BACKEND', nil)
49
+ @logger = logger || ::Logger.new($stdout, level: ::Logger::WARN)
50
+ @adapter = detect_backend_adapter
51
+ end
52
+
53
+ # Check if learning is available (backend connected)
54
+ #
55
+ # @return [Boolean] True if a backend adapter is available
56
+ def available?
57
+ !@adapter.nil?
58
+ end
59
+
60
+ # Query task execution traces from backend
61
+ #
62
+ # @param task_name [String] Name of task to query
63
+ # @param limit [Integer] Maximum number of traces to return
64
+ # @param time_range [Integer, Range<Time>] Time range in seconds or explicit range
65
+ # @return [Array<Hash>] Task execution data
66
+ def query_task_traces(task_name:, limit: 100, time_range: DEFAULT_TIME_RANGE)
67
+ unless available?
68
+ @logger.warn('No OTLP backend available, learning disabled')
69
+ return []
70
+ end
71
+
72
+ range = normalize_time_range(time_range)
73
+
74
+ spans = @adapter.query_spans(
75
+ filter: { task_name: task_name },
76
+ time_range: range,
77
+ limit: limit
78
+ )
79
+
80
+ @adapter.extract_task_data(spans)
81
+ rescue StandardError => e
82
+ @logger.error("Failed to query task traces: #{e.message}")
83
+ @logger.debug(e.backtrace.join("\n"))
84
+ []
85
+ end
86
+
87
+ # Analyze task execution patterns for consistency
88
+ #
89
+ # Determines if a neural task exhibits consistent behavior that can be
90
+ # learned and converted to a symbolic implementation.
91
+ #
92
+ # @param task_name [String] Name of task to analyze
93
+ # @param min_executions [Integer] Minimum executions required for analysis
94
+ # @param consistency_threshold [Float] Required consistency (0.0-1.0)
95
+ # @param time_range [Integer, Range<Time>, nil] Time range for query (seconds or explicit range)
96
+ # @return [Hash, nil] Analysis results or nil if insufficient data
97
+ def analyze_patterns(task_name:, min_executions: 10, consistency_threshold: DEFAULT_CONSISTENCY_THRESHOLD,
98
+ time_range: nil)
99
+ executions = query_task_traces(task_name: task_name, limit: 1000, time_range: time_range || DEFAULT_TIME_RANGE)
100
+
101
+ if executions.empty?
102
+ @logger.info("No executions found for task '#{task_name}'")
103
+ return nil
104
+ end
105
+
106
+ if executions.size < min_executions
107
+ @logger.info("Insufficient executions for task '#{task_name}': #{executions.size}/#{min_executions}")
108
+ return {
109
+ task_name: task_name,
110
+ execution_count: executions.size,
111
+ required_count: min_executions,
112
+ ready_for_learning: false,
113
+ reason: "Need #{min_executions - executions.size} more executions"
114
+ }
115
+ end
116
+
117
+ consistency_data = calculate_consistency(executions)
118
+
119
+ # Task is ready for learning only if:
120
+ # 1. Consistency meets threshold
121
+ # 2. There's an actual tool pattern to learn (not empty/pure LLM)
122
+ has_pattern = !consistency_data[:common_pattern].nil? && !consistency_data[:common_pattern].empty?
123
+ ready = consistency_data[:score] >= consistency_threshold && has_pattern
124
+
125
+ {
126
+ task_name: task_name,
127
+ execution_count: executions.size,
128
+ consistency_score: consistency_data[:score],
129
+ consistency_threshold: consistency_threshold,
130
+ ready_for_learning: ready,
131
+ reason: has_pattern ? nil : 'No tool calls to learn (pure LLM task)',
132
+ common_pattern: consistency_data[:common_pattern],
133
+ input_signatures: consistency_data[:input_signatures],
134
+ analysis_timestamp: Time.now.iso8601
135
+ }
136
+ end
137
+
138
+ # Calculate pattern consistency across executions
139
+ #
140
+ # Groups executions by input signature and analyzes tool call sequences
141
+ # to determine how often the same pattern is used for the same inputs.
142
+ #
143
+ # @param executions [Array<Hash>] Task execution data
144
+ # @return [Hash] Consistency analysis with score and common pattern
145
+ def calculate_consistency(executions)
146
+ # Group by input signature
147
+ by_inputs = executions.group_by { |ex| normalize_inputs(ex[:inputs]) }
148
+
149
+ # For each input signature, find the most common tool call pattern
150
+ signature_patterns = by_inputs.map do |input_sig, execs|
151
+ patterns = execs.map { |ex| normalize_tool_calls(ex[:tool_calls]) }
152
+ pattern_counts = patterns.tally
153
+ most_common = pattern_counts.max_by { |_, count| count }
154
+
155
+ {
156
+ input_signature: input_sig,
157
+ total_executions: execs.size,
158
+ most_common_pattern: most_common[0],
159
+ pattern_count: most_common[1],
160
+ consistency: most_common[1].to_f / execs.size
161
+ }
162
+ end
163
+
164
+ # Overall consistency is weighted average across input signatures
165
+ total_execs = executions.size
166
+ weighted_consistency = signature_patterns.sum do |sig_data|
167
+ weight = sig_data[:total_executions].to_f / total_execs
168
+ weight * sig_data[:consistency]
169
+ end
170
+
171
+ # Find the globally most common pattern
172
+ all_patterns = signature_patterns.map { |s| s[:most_common_pattern] }
173
+ common_pattern = all_patterns.max_by { |p| all_patterns.count(p) }
174
+
175
+ {
176
+ score: weighted_consistency.round(3),
177
+ common_pattern: common_pattern,
178
+ input_signatures: signature_patterns.size
179
+ }
180
+ end
181
+
182
+ private
183
+
184
+ # Detect and initialize the appropriate backend adapter
185
+ #
186
+ # Auto-detection order: SigNoz → Jaeger → Tempo
187
+ # Falls back to nil if no backend is available
188
+ #
189
+ # @return [BaseAdapter, nil] Initialized adapter or nil
190
+ def detect_backend_adapter
191
+ return nil unless @endpoint
192
+
193
+ # Explicit backend selection
194
+ if @backend_type
195
+ adapter = create_adapter(@backend_type)
196
+ return adapter if adapter
197
+
198
+ @logger.warn("Requested backend '#{@backend_type}' not available, trying auto-detection")
199
+ end
200
+
201
+ # Auto-detect with fallback chain
202
+ %w[signoz jaeger tempo].each do |backend|
203
+ adapter = create_adapter(backend)
204
+ if adapter
205
+ @logger.info("Detected OTLP backend: #{backend} at #{@endpoint}")
206
+ return adapter
207
+ end
208
+ end
209
+
210
+ @logger.warn("No OTLP backend available at #{@endpoint}, learning disabled")
211
+ nil
212
+ end
213
+
214
+ # Create adapter instance for specified backend
215
+ #
216
+ # @param backend_type [String] Backend type
217
+ # @return [BaseAdapter, nil] Adapter instance or nil if unavailable
218
+ def create_adapter(backend_type)
219
+ require_relative "adapters/#{backend_type}_adapter"
220
+
221
+ adapter_class = case backend_type.downcase
222
+ when 'signoz'
223
+ Adapters::SignozAdapter
224
+ when 'jaeger'
225
+ Adapters::JaegerAdapter
226
+ when 'tempo'
227
+ Adapters::TempoAdapter
228
+ else
229
+ @logger.error("Unknown backend type: #{backend_type}")
230
+ return nil
231
+ end
232
+
233
+ return nil unless adapter_class.available?(@endpoint, @api_key)
234
+
235
+ adapter_class.new(@endpoint, @api_key, logger: @logger)
236
+ rescue LoadError => e
237
+ @logger.debug("Adapter #{backend_type} not available: #{e.message}")
238
+ nil
239
+ rescue StandardError => e
240
+ @logger.error("Failed to create #{backend_type} adapter: #{e.message}")
241
+ nil
242
+ end
243
+
244
+ # Normalize time range to Range<Time>
245
+ #
246
+ # @param time_range [Integer, Range<Time>] Time range
247
+ # @return [Range<Time>] Normalized time range
248
+ def normalize_time_range(time_range)
249
+ case time_range
250
+ when Range
251
+ time_range
252
+ when Integer
253
+ (Time.now - time_range)..Time.now
254
+ else
255
+ (Time.now - DEFAULT_TIME_RANGE)..Time.now
256
+ end
257
+ end
258
+
259
+ # Normalize inputs for comparison
260
+ #
261
+ # @param inputs [Hash] Task inputs
262
+ # @return [String] Normalized input signature
263
+ def normalize_inputs(inputs)
264
+ return '' unless inputs.is_a?(Hash)
265
+
266
+ inputs.sort.to_h.to_s
267
+ end
268
+
269
+ # Normalize tool calls for pattern matching
270
+ #
271
+ # @param tool_calls [Array<Hash>] Tool call sequence
272
+ # @return [String] Normalized pattern signature
273
+ def normalize_tool_calls(tool_calls)
274
+ return '' unless tool_calls.is_a?(Array)
275
+
276
+ tool_calls.map { |tc| tc[:tool_name] }.join(' → ')
277
+ end
278
+ end
279
+ end
280
+ end
@@ -2,7 +2,7 @@
2
2
  :openapi: 3.0.3
3
3
  :info:
4
4
  :title: Language Operator Agent API
5
- :version: 0.1.58
5
+ :version: 0.1.59
6
6
  :description: HTTP API endpoints exposed by Language Operator reactive agents
7
7
  :contact:
8
8
  :name: Language Operator
@@ -3,7 +3,7 @@
3
3
  "$id": "https://github.com/language-operator/language-operator-gem/schema/agent-dsl.json",
4
4
  "title": "Language Operator Agent DSL",
5
5
  "description": "Schema for defining autonomous AI agents using the Language Operator DSL",
6
- "version": "0.1.58",
6
+ "version": "0.1.59",
7
7
  "type": "object",
8
8
  "properties": {
9
9
  "name": {
@@ -0,0 +1,97 @@
1
+ You are analyzing whether an agentic task can be converted to symbolic Ruby code.
2
+
3
+ ## What "Symbolic" Means
4
+
5
+ A task is **symbolic** (can be optimized) if it follows a predictable algorithm:
6
+ - Reading files, calling APIs, transforming data = SYMBOLIC (even though outputs vary based on data)
7
+ - The same code logic applies regardless of the actual data values
8
+ - Conditional branches based on data (if file exists, if value > threshold) are fine
9
+
10
+ A task is **neural** (cannot be optimized) only if it requires:
11
+ - Creative text generation (writing stories, poems, marketing copy)
12
+ - Subjective reasoning or judgment calls
13
+ - Understanding nuanced human intent that varies per request
14
+
15
+ **Key insight:** File I/O, API calls, and data transformation are deterministic CODE even if their outputs depend on external state. "Read a file and count lines" is symbolic - the algorithm is fixed.
16
+
17
+ ## Task Definition
18
+
19
+ **Name:** {{.TaskName}}
20
+ **Instructions:** {{.Instructions}}
21
+
22
+ **Inputs:**
23
+ {{.Inputs}}
24
+
25
+ **Outputs:**
26
+ {{.Outputs}}
27
+
28
+ ## Current Task Code
29
+
30
+ ```ruby
31
+ {{.TaskCode}}
32
+ ```
33
+
34
+ ## Execution Traces ({{.TraceCount}} samples)
35
+
36
+ {{.Traces}}
37
+
38
+ ## Pattern Analysis
39
+
40
+ - **Most Common Pattern:** {{.CommonPattern}}
41
+ - **Pattern Consistency:** {{.ConsistencyScore}}%
42
+ - **Unique Patterns Observed:** {{.UniquePatternCount}}
43
+
44
+ ## Available Tools
45
+
46
+ {{.ToolsList}}
47
+
48
+ ---
49
+
50
+ ## Your Task
51
+
52
+ Analyze whether this neural task can be converted to symbolic Ruby code.
53
+
54
+ Questions to ask:
55
+ 1. Is there a clear algorithm implied by the instructions?
56
+ 2. Do the tool call patterns show a logical sequence?
57
+ 3. Can conditional logic handle the variations seen in traces?
58
+
59
+ Tasks that ARE symbolic (optimize these):
60
+ - "Read file X and return its contents" → read_file, return content
61
+ - "Check if file exists, create if not" → get_file_info, conditional write_file
62
+ - "Fetch data from API and transform it" → api_call, data transformation
63
+
64
+ Tasks that are NOT symbolic (don't optimize):
65
+ - "Write a creative story continuation"
66
+ - "Decide what the user probably meant"
67
+ - "Generate marketing copy for this product"
68
+
69
+ ## Output Format
70
+
71
+ Respond with valid JSON:
72
+
73
+ ```json
74
+ {
75
+ "is_deterministic": true/false,
76
+ "confidence": 0.0-1.0,
77
+ "explanation": "Brief explanation",
78
+ "code": "Ruby code if symbolic, null otherwise"
79
+ }
80
+ ```
81
+
82
+ **Code Requirements (if symbolic):**
83
+ - Use the DSL task format with a do block:
84
+ ```ruby
85
+ task :task_name,
86
+ instructions: "Keep the original instructions for documentation",
87
+ inputs: { ... },
88
+ outputs: { ... } do |inputs|
89
+ # Your code here
90
+ execute_tool(:tool_name, { arg: value })
91
+ { output_key: result }
92
+ end
93
+ ```
94
+ - Use `execute_tool(:tool_name, { arg: value })` for tool calls
95
+ - Access inputs via the `inputs` hash parameter
96
+ - Return a hash matching the output schema
97
+ - Do NOT use system(), eval(), or other unsafe methods
@@ -156,8 +156,8 @@ module LanguageOperator
156
156
  data = JSON.parse(response.body)
157
157
  models = data['data']&.map { |m| m['id'] } || []
158
158
 
159
- # Filter out fine-tuned/snapshot models for better UX
160
- models.reject { |m| m.include?('ft-') || m.include?(':') }
159
+ # Filter out fine-tuned models for better UX
160
+ models.reject { |m| m.include?('ft-') }
161
161
  rescue StandardError
162
162
  nil
163
163
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LanguageOperator
4
- VERSION = '0.1.58'
4
+ VERSION = '0.1.59'
5
5
  end
data/synth/003/Makefile CHANGED
@@ -8,11 +8,18 @@ create:
8
8
  cat agent.txt | $(AICTL) create --name $(AGENT) --tools "$(TOOLS)"
9
9
 
10
10
  run:
11
- kubectl create job --from=cronjob/$(AGENT) $(AGENT)-manual
11
+ @JOB_NAME=$(AGENT)-$(shell date +%s); \
12
+ kubectl create job --from=cronjob/$(AGENT) $$JOB_NAME && \
13
+ trap "kubectl delete job $$JOB_NAME" EXIT; \
14
+ kubectl wait --for=condition=ready pod -l job-name=$$JOB_NAME --timeout=60s && \
15
+ kubectl logs -f job/$$JOB_NAME
12
16
 
13
17
  code:
14
18
  $(AICTL) code $(AGENT)
15
19
 
20
+ optimize:
21
+ $(AICTL) optimize $(AGENT)
22
+
16
23
  logs:
17
24
  $(AICTL) logs $(AGENT)
18
25
 
@@ -0,0 +1,68 @@
1
+ ⚬ OpenTelemetry enabled
2
+ Waiting for tool at localhost:80...
3
+ Tool ready at localhost:80
4
+ ⚬ Configuring LLM (provider=openai_compatible, model=mistralai/magistral-small-2509, timeout=300)
5
+ ⚬ LLM configuration complete
6
+ ⚬ Connecting to MCP servers (count=1)
7
+ ⚬ Successfully connected to MCP server (server=default-tools-0)
8
+ ⚬ MCP server connected (server=default-tools-0, tool_count=6, tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
9
+ ⚬ MCP connection summary (connected_servers=1, total_tools=6)
10
+ ⚬ Chat session initialized (with_tools=true)
11
+ ⚬ Agent running in scheduled mode - executing once (agent_name=synth-003, dsl_version=v1)
12
+ ⚬ Executing main block (agent=synth-003, task_count=3)
13
+ ⚬ Executing main block (inputs_keys=[])
14
+ ⚬ Executing task (task=read_existing_story, type=neural, timeout=360.0, max_retries=3)
15
+ ⚬ Sending prompt to LLM (task=read_existing_story, prompt_length=572, available_tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
16
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=get_file_info, tool_id=617083832, arguments={"path" => "story.txt"}, arguments_json={"path":"story.txt"})
17
+ ⚬ Tool call result received (event=tool_result_received, result=Path: story.txt
18
+ Type: file
19
+ Size: 200 B
20
+ Permissions: 664
21
+ Owner UID: 1000
22
+ Owner GID: 101
23
+ Created: 2025-11-19 02:00:00 +0000
24
+ Modified: 2025-11-19 01:45:12 +0000
25
+ Accessed: 2025-11-19 02:07:30 +0000, result_preview=Path: story.txt
26
+ Type: file
27
+ Size: 200 B
28
+ Permissions: 664
29
+ Owner UID: 1000
30
+ Owner GID: 101
31
+ Created: 20...)
32
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=read_file, tool_id=783124383, arguments={"path" => "story.txt"}, arguments_json={"path":"story.txt"})
33
+ ⚬ Tool call result received (event=tool_result_received, result=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named Lily.
34
+ One day, while playing near the edge of the forest, she discovered a mysterious glowing flower., result_preview=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named...)
35
+ ⚬ LLM response received, extracting content (task=read_existing_story, response_class=RubyLLM::Message, has_tool_calls=, tool_call_count=0)
36
+ ⚬ Neural task response received (task=read_existing_story, response_length=592)
37
+ ⚬ Parsing neural task response (task=read_existing_story)
38
+ ⚬ LLM thinking captured (event=llm_thinking, task=read_existing_story, thinking_steps=1, thinking=["Now that I have the content, I need to count the number of sentences by splitting it by newline. The content is a single line, so there's only one sentence.\n\nBut wait, the problem says to split by newline. The content is a single line, so sentence_count should be 1.\n\nNow, I'll prepare the JSON response with the content and sentence count."], thinking_preview=Now that I have the content, I need to count the number of sentences by splitting it by newline. T...)
39
+ ⚬ Response parsed successfully (task=read_existing_story, output_keys=[:content, :sentence_count])
40
+ ⚬ Validating task outputs (task=read_existing_story)
41
+ ⚬ Executing task (task=generate_next_sentence, type=neural, timeout=360.0, max_retries=3)
42
+ ⚬ Sending prompt to LLM (task=generate_next_sentence, prompt_length=773, available_tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
43
+ E, [2025-11-19T02:10:54.380136 #7] ERROR -- : OpenTelemetry error: Unable to export 4 spans
44
+ ⚬ LLM response received, extracting content (task=generate_next_sentence, response_class=RubyLLM::Message, has_tool_calls=, tool_call_count=0)
45
+ ⚬ Neural task response received (task=generate_next_sentence, response_length=824)
46
+ ⚬ Parsing neural task response (task=generate_next_sentence)
47
+ ⚬ LLM thinking captured (event=llm_thinking, task=generate_next_sentence, thinking_steps=1, thinking=["The existing content has two sentences. The story is in a fairy tale style, so the next sentence should continue in that tone.\n\nThe first sentence sets up the setting and character. The second introduces a key event (the glowing flower). A natural continuation might be that Lily decides to pick the flower or something happens when she touches it.\n\nLet me draft a possible next sentence: \"As she reached out to touch the flower, it suddenly let out a soft humming sound.\"\n\nNow, I need to ensure that this is exactly one sentence and maintains the tone.\n\nLet me confirm it's a complete sentence with proper punctuation. It is, and it fits the fairy tale style.\n\nNow, prepare the JSON response with this sentence."], thinking_preview=The existing content has two sentences. The story is in a fairy tale style, so the next sentence s...)
48
+ ⚬ Response parsed successfully (task=generate_next_sentence, output_keys=[:sentence])
49
+ ⚬ Validating task outputs (task=generate_next_sentence)
50
+ ⚬ Executing task (task=append_to_story, type=neural, timeout=360.0, max_retries=3)
51
+ ⚬ Sending prompt to LLM (task=append_to_story, prompt_length=684, available_tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
52
+ E, [2025-11-19T02:11:09.382616 #7] ERROR -- : OpenTelemetry error: Unable to export 4 spans
53
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=read_file, tool_id=982459012, arguments={"path" => "story.txt"}, arguments_json={"path":"story.txt"})
54
+ ⚬ Tool call result received (event=tool_result_received, result=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named Lily.
55
+ One day, while playing near the edge of the forest, she discovered a mysterious glowing flower., result_preview=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named...)
56
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=write_file, tool_id=433722466, arguments={"path" => "story.txt", "content" => "Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named Lily.\nOne day, while playing near the edge of the forest, she discovered a mysterious glowing flower.\nAs she reached out to touch the flower, it suddenly let out a soft humming sound."}, arguments_json={"path":"story.txt","content":"Once upon a time, in a quiet village nestled between rolling hills,...)
57
+ ⚬ Tool call result received (event=tool_result_received, result=Successfully wrote 282 bytes to story.txt, result_preview=Successfully wrote 282 bytes to story.txt)
58
+ ⚬ LLM response received, extracting content (task=append_to_story, response_class=RubyLLM::Message, has_tool_calls=, tool_call_count=0)
59
+ ⚬ Neural task response received (task=append_to_story, response_length=39)
60
+ ⚬ Parsing neural task response (task=append_to_story)
61
+ ⚬ Response parsed successfully (task=append_to_story, output_keys=[:success, :total_sentences])
62
+ ⚬ Validating task outputs (task=append_to_story)
63
+ ⚬ Main execution (71.111s)
64
+ ⚬ Main block completed
65
+ ⚬ Main block execution completed (result={added_sentence: "As she reached out to touch the flower, it suddenly let out a soft humming sound.", total_sentences: 3})
66
+ Added sentence: As she reached out to touch the flower, it suddenly let out a soft humming sound.
67
+ Story now has 3 sentences
68
+ ⚬ Scheduled execution completed - exiting (agent_name=synth-003)
data/synth/README.md CHANGED
@@ -1,5 +1,3 @@
1
1
  # Synthesis Tests
2
2
 
3
- Canonical "hey this might actually work" synthesis suite.
4
-
5
- Model: [Magistral-Small-2509](https://huggingface.co/mistralai/Magistral-Small-2509) 4bit MLX
3
+ Canonical "hey this might actually work" synthesis suite.
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: language-operator
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.58
4
+ version: 0.1.59
5
5
  platform: ruby
6
6
  authors:
7
7
  - James Ryan
@@ -474,6 +474,7 @@ files:
474
474
  - lib/language_operator/cli/formatters/code_formatter.rb
475
475
  - lib/language_operator/cli/formatters/log_formatter.rb
476
476
  - lib/language_operator/cli/formatters/log_style.rb
477
+ - lib/language_operator/cli/formatters/optimization_formatter.rb
477
478
  - lib/language_operator/cli/formatters/progress_formatter.rb
478
479
  - lib/language_operator/cli/formatters/status_formatter.rb
479
480
  - lib/language_operator/cli/formatters/table_formatter.rb
@@ -524,6 +525,14 @@ files:
524
525
  - lib/language_operator/instrumentation/task_tracer.rb
525
526
  - lib/language_operator/kubernetes/client.rb
526
527
  - lib/language_operator/kubernetes/resource_builder.rb
528
+ - lib/language_operator/learning/adapters/base_adapter.rb
529
+ - lib/language_operator/learning/adapters/jaeger_adapter.rb
530
+ - lib/language_operator/learning/adapters/signoz_adapter.rb
531
+ - lib/language_operator/learning/adapters/tempo_adapter.rb
532
+ - lib/language_operator/learning/optimizer.rb
533
+ - lib/language_operator/learning/pattern_detector.rb
534
+ - lib/language_operator/learning/task_synthesizer.rb
535
+ - lib/language_operator/learning/trace_analyzer.rb
527
536
  - lib/language_operator/loggable.rb
528
537
  - lib/language_operator/logger.rb
529
538
  - lib/language_operator/retry.rb
@@ -535,6 +544,7 @@ files:
535
544
  - lib/language_operator/templates/schema/CHANGELOG.md
536
545
  - lib/language_operator/templates/schema/agent_dsl_openapi.yaml
537
546
  - lib/language_operator/templates/schema/agent_dsl_schema.json
547
+ - lib/language_operator/templates/task_synthesis.tmpl
538
548
  - lib/language_operator/tool_loader.rb
539
549
  - lib/language_operator/type_coercion.rb
540
550
  - lib/language_operator/ux/base.rb
@@ -561,6 +571,7 @@ files:
561
571
  - synth/003/README.md
562
572
  - synth/003/agent.rb
563
573
  - synth/003/agent.txt
574
+ - synth/003/output.log
564
575
  - synth/README.md
565
576
  homepage: https://github.com/language-operator/language-operator
566
577
  licenses: