language-operator 0.1.58 → 0.1.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +1 -1
  3. data/components/agent/Gemfile +1 -1
  4. data/lib/language_operator/agent/base.rb +22 -0
  5. data/lib/language_operator/agent/task_executor.rb +80 -23
  6. data/lib/language_operator/agent/telemetry.rb +22 -11
  7. data/lib/language_operator/agent.rb +3 -0
  8. data/lib/language_operator/cli/base_command.rb +7 -1
  9. data/lib/language_operator/cli/commands/agent.rb +575 -0
  10. data/lib/language_operator/cli/formatters/optimization_formatter.rb +226 -0
  11. data/lib/language_operator/cli/formatters/progress_formatter.rb +1 -1
  12. data/lib/language_operator/client/base.rb +74 -2
  13. data/lib/language_operator/client/mcp_connector.rb +4 -6
  14. data/lib/language_operator/dsl/task_definition.rb +7 -6
  15. data/lib/language_operator/learning/adapters/base_adapter.rb +149 -0
  16. data/lib/language_operator/learning/adapters/jaeger_adapter.rb +221 -0
  17. data/lib/language_operator/learning/adapters/signoz_adapter.rb +435 -0
  18. data/lib/language_operator/learning/adapters/tempo_adapter.rb +239 -0
  19. data/lib/language_operator/learning/optimizer.rb +319 -0
  20. data/lib/language_operator/learning/pattern_detector.rb +260 -0
  21. data/lib/language_operator/learning/task_synthesizer.rb +288 -0
  22. data/lib/language_operator/learning/trace_analyzer.rb +285 -0
  23. data/lib/language_operator/templates/schema/agent_dsl_openapi.yaml +1 -1
  24. data/lib/language_operator/templates/schema/agent_dsl_schema.json +1 -1
  25. data/lib/language_operator/templates/task_synthesis.tmpl +98 -0
  26. data/lib/language_operator/ux/concerns/provider_helpers.rb +2 -2
  27. data/lib/language_operator/version.rb +1 -1
  28. data/synth/003/Makefile +10 -3
  29. data/synth/003/output.log +68 -0
  30. data/synth/README.md +1 -3
  31. metadata +12 -1
@@ -0,0 +1,285 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'logger'
4
+ require_relative 'adapters/base_adapter'
5
+
6
+ module LanguageOperator
7
+ module Learning
8
+ # Analyzes OpenTelemetry traces to detect patterns in task execution
9
+ #
10
+ # The TraceAnalyzer queries OTLP backends (SigNoz, Jaeger, Tempo) to retrieve
11
+ # execution traces for neural tasks, then analyzes them to determine if they
12
+ # exhibit consistent patterns that can be codified into symbolic implementations.
13
+ #
14
+ # Auto-detects available backends in order: SigNoz → Jaeger → Tempo
15
+ # Falls back gracefully if no backend is available (learning disabled).
16
+ #
17
+ # @example Basic usage
18
+ # analyzer = TraceAnalyzer.new(
19
+ # endpoint: ENV['OTEL_QUERY_ENDPOINT'],
20
+ # api_key: ENV['OTEL_QUERY_API_KEY']
21
+ # )
22
+ #
23
+ # analysis = analyzer.analyze_patterns(task_name: 'fetch_user_data')
24
+ # if analysis && analysis[:consistency] >= 0.85
25
+ # puts "Task is ready for learning!"
26
+ # puts "Tool sequence: #{analysis[:common_pattern]}"
27
+ # end
28
+ #
29
+ # @example Explicit backend selection
30
+ # ENV['OTEL_QUERY_BACKEND'] = 'signoz'
31
+ # analyzer = TraceAnalyzer.new(endpoint: 'https://signoz.example.com')
32
+ class TraceAnalyzer
33
+ # Minimum pattern consistency required for learning (configurable)
34
+ DEFAULT_CONSISTENCY_THRESHOLD = 0.85
35
+
36
+ # Default time range for queries (24 hours)
37
+ DEFAULT_TIME_RANGE = 24 * 60 * 60
38
+
39
+ # Initialize trace analyzer with backend connection
40
+ #
41
+ # @param endpoint [String, nil] OTLP backend endpoint (auto-detected from ENV if nil)
42
+ # @param api_key [String, nil] API key for authentication (if required)
43
+ # @param backend [String, nil] Explicit backend type ('signoz', 'jaeger', 'tempo')
44
+ # @param logger [Logger, nil] Logger instance (creates default if nil)
45
+ def initialize(endpoint: nil, api_key: nil, backend: nil, logger: nil)
46
+ @endpoint = endpoint || ENV.fetch('OTEL_QUERY_ENDPOINT', nil)
47
+ @api_key = api_key || ENV.fetch('OTEL_QUERY_API_KEY', nil)
48
+ @backend_type = backend || ENV.fetch('OTEL_QUERY_BACKEND', nil)
49
+ @logger = logger || ::Logger.new($stdout, level: ::Logger::WARN)
50
+ @adapter = detect_backend_adapter
51
+ end
52
+
53
+ # Check if learning is available (backend connected)
54
+ #
55
+ # @return [Boolean] True if a backend adapter is available
56
+ def available?
57
+ !@adapter.nil?
58
+ end
59
+
60
+ # Query task execution traces from backend
61
+ #
62
+ # @param task_name [String] Name of task to query
63
+ # @param agent_name [String, nil] Optional agent name to filter by
64
+ # @param limit [Integer] Maximum number of traces to return
65
+ # @param time_range [Integer, Range<Time>] Time range in seconds or explicit range
66
+ # @return [Array<Hash>] Task execution data
67
+ def query_task_traces(task_name:, agent_name: nil, limit: 100, time_range: DEFAULT_TIME_RANGE)
68
+ unless available?
69
+ @logger.warn('No OTLP backend available, learning disabled')
70
+ return []
71
+ end
72
+
73
+ range = normalize_time_range(time_range)
74
+
75
+ filter = { task_name: task_name }
76
+ filter[:agent_name] = agent_name if agent_name
77
+
78
+ spans = @adapter.query_spans(
79
+ filter: filter,
80
+ time_range: range,
81
+ limit: limit
82
+ )
83
+
84
+ @adapter.extract_task_data(spans)
85
+ rescue StandardError => e
86
+ @logger.error("Failed to query task traces: #{e.message}")
87
+ @logger.debug(e.backtrace.join("\n"))
88
+ []
89
+ end
90
+
91
+ # Analyze task execution patterns for consistency
92
+ #
93
+ # Determines if a neural task exhibits consistent behavior that can be
94
+ # learned and converted to a symbolic implementation.
95
+ #
96
+ # @param task_name [String] Name of task to analyze
97
+ # @param agent_name [String, nil] Optional agent name to filter by
98
+ # @param min_executions [Integer] Minimum executions required for analysis
99
+ # @param consistency_threshold [Float] Required consistency (0.0-1.0)
100
+ # @param time_range [Integer, Range<Time>, nil] Time range for query (seconds or explicit range)
101
+ # @return [Hash, nil] Analysis results or nil if insufficient data
102
+ def analyze_patterns(task_name:, agent_name: nil, min_executions: 10, consistency_threshold: DEFAULT_CONSISTENCY_THRESHOLD,
103
+ time_range: nil)
104
+ executions = query_task_traces(task_name: task_name, agent_name: agent_name, limit: 1000, time_range: time_range || DEFAULT_TIME_RANGE)
105
+
106
+ if executions.empty?
107
+ @logger.info("No executions found for task '#{task_name}'")
108
+ return nil
109
+ end
110
+
111
+ if executions.size < min_executions
112
+ @logger.info("Insufficient executions for task '#{task_name}': #{executions.size}/#{min_executions}")
113
+ return {
114
+ task_name: task_name,
115
+ execution_count: executions.size,
116
+ required_count: min_executions,
117
+ ready_for_learning: false,
118
+ reason: "Need #{min_executions - executions.size} more executions"
119
+ }
120
+ end
121
+
122
+ consistency_data = calculate_consistency(executions)
123
+
124
+ # Task is ready for learning only if:
125
+ # 1. Consistency meets threshold
126
+ # 2. There's an actual tool pattern to learn (not empty/pure LLM)
127
+ has_pattern = !consistency_data[:common_pattern].nil? && !consistency_data[:common_pattern].empty?
128
+ ready = consistency_data[:score] >= consistency_threshold && has_pattern
129
+
130
+ {
131
+ task_name: task_name,
132
+ execution_count: executions.size,
133
+ consistency_score: consistency_data[:score],
134
+ consistency_threshold: consistency_threshold,
135
+ ready_for_learning: ready,
136
+ reason: has_pattern ? nil : 'No tool calls to learn (pure LLM task)',
137
+ common_pattern: consistency_data[:common_pattern],
138
+ input_signatures: consistency_data[:input_signatures],
139
+ analysis_timestamp: Time.now.iso8601
140
+ }
141
+ end
142
+
143
+ # Calculate pattern consistency across executions
144
+ #
145
+ # Groups executions by input signature and analyzes tool call sequences
146
+ # to determine how often the same pattern is used for the same inputs.
147
+ #
148
+ # @param executions [Array<Hash>] Task execution data
149
+ # @return [Hash] Consistency analysis with score and common pattern
150
+ def calculate_consistency(executions)
151
+ # Group by input signature
152
+ by_inputs = executions.group_by { |ex| normalize_inputs(ex[:inputs]) }
153
+
154
+ # For each input signature, find the most common tool call pattern
155
+ signature_patterns = by_inputs.map do |input_sig, execs|
156
+ patterns = execs.map { |ex| normalize_tool_calls(ex[:tool_calls]) }
157
+ pattern_counts = patterns.tally
158
+ most_common = pattern_counts.max_by { |_, count| count }
159
+
160
+ {
161
+ input_signature: input_sig,
162
+ total_executions: execs.size,
163
+ most_common_pattern: most_common[0],
164
+ pattern_count: most_common[1],
165
+ consistency: most_common[1].to_f / execs.size
166
+ }
167
+ end
168
+
169
+ # Overall consistency is weighted average across input signatures
170
+ total_execs = executions.size
171
+ weighted_consistency = signature_patterns.sum do |sig_data|
172
+ weight = sig_data[:total_executions].to_f / total_execs
173
+ weight * sig_data[:consistency]
174
+ end
175
+
176
+ # Find the globally most common pattern
177
+ all_patterns = signature_patterns.map { |s| s[:most_common_pattern] }
178
+ common_pattern = all_patterns.max_by { |p| all_patterns.count(p) }
179
+
180
+ {
181
+ score: weighted_consistency.round(3),
182
+ common_pattern: common_pattern,
183
+ input_signatures: signature_patterns.size
184
+ }
185
+ end
186
+
187
+ private
188
+
189
+ # Detect and initialize the appropriate backend adapter
190
+ #
191
+ # Auto-detection order: SigNoz → Jaeger → Tempo
192
+ # Falls back to nil if no backend is available
193
+ #
194
+ # @return [BaseAdapter, nil] Initialized adapter or nil
195
+ def detect_backend_adapter
196
+ return nil unless @endpoint
197
+
198
+ # Explicit backend selection
199
+ if @backend_type
200
+ adapter = create_adapter(@backend_type)
201
+ return adapter if adapter
202
+
203
+ @logger.warn("Requested backend '#{@backend_type}' not available, trying auto-detection")
204
+ end
205
+
206
+ # Auto-detect with fallback chain
207
+ %w[signoz jaeger tempo].each do |backend|
208
+ adapter = create_adapter(backend)
209
+ if adapter
210
+ @logger.info("Detected OTLP backend: #{backend} at #{@endpoint}")
211
+ return adapter
212
+ end
213
+ end
214
+
215
+ @logger.warn("No OTLP backend available at #{@endpoint}, learning disabled")
216
+ nil
217
+ end
218
+
219
+ # Create adapter instance for specified backend
220
+ #
221
+ # @param backend_type [String] Backend type
222
+ # @return [BaseAdapter, nil] Adapter instance or nil if unavailable
223
+ def create_adapter(backend_type)
224
+ require_relative "adapters/#{backend_type}_adapter"
225
+
226
+ adapter_class = case backend_type.downcase
227
+ when 'signoz'
228
+ Adapters::SignozAdapter
229
+ when 'jaeger'
230
+ Adapters::JaegerAdapter
231
+ when 'tempo'
232
+ Adapters::TempoAdapter
233
+ else
234
+ @logger.error("Unknown backend type: #{backend_type}")
235
+ return nil
236
+ end
237
+
238
+ return nil unless adapter_class.available?(@endpoint, @api_key)
239
+
240
+ adapter_class.new(@endpoint, @api_key, logger: @logger)
241
+ rescue LoadError => e
242
+ @logger.debug("Adapter #{backend_type} not available: #{e.message}")
243
+ nil
244
+ rescue StandardError => e
245
+ @logger.error("Failed to create #{backend_type} adapter: #{e.message}")
246
+ nil
247
+ end
248
+
249
+ # Normalize time range to Range<Time>
250
+ #
251
+ # @param time_range [Integer, Range<Time>] Time range
252
+ # @return [Range<Time>] Normalized time range
253
+ def normalize_time_range(time_range)
254
+ case time_range
255
+ when Range
256
+ time_range
257
+ when Integer
258
+ (Time.now - time_range)..Time.now
259
+ else
260
+ (Time.now - DEFAULT_TIME_RANGE)..Time.now
261
+ end
262
+ end
263
+
264
+ # Normalize inputs for comparison
265
+ #
266
+ # @param inputs [Hash] Task inputs
267
+ # @return [String] Normalized input signature
268
+ def normalize_inputs(inputs)
269
+ return '' unless inputs.is_a?(Hash)
270
+
271
+ inputs.sort.to_h.to_s
272
+ end
273
+
274
+ # Normalize tool calls for pattern matching
275
+ #
276
+ # @param tool_calls [Array<Hash>] Tool call sequence
277
+ # @return [String] Normalized pattern signature
278
+ def normalize_tool_calls(tool_calls)
279
+ return '' unless tool_calls.is_a?(Array)
280
+
281
+ tool_calls.map { |tc| tc[:tool_name] }.join(' → ')
282
+ end
283
+ end
284
+ end
285
+ end
@@ -2,7 +2,7 @@
2
2
  :openapi: 3.0.3
3
3
  :info:
4
4
  :title: Language Operator Agent API
5
- :version: 0.1.58
5
+ :version: 0.1.61
6
6
  :description: HTTP API endpoints exposed by Language Operator reactive agents
7
7
  :contact:
8
8
  :name: Language Operator
@@ -3,7 +3,7 @@
3
3
  "$id": "https://github.com/language-operator/language-operator-gem/schema/agent-dsl.json",
4
4
  "title": "Language Operator Agent DSL",
5
5
  "description": "Schema for defining autonomous AI agents using the Language Operator DSL",
6
- "version": "0.1.58",
6
+ "version": "0.1.61",
7
7
  "type": "object",
8
8
  "properties": {
9
9
  "name": {
@@ -0,0 +1,98 @@
1
+ You are analyzing whether an agentic task can be converted to symbolic Ruby code.
2
+
3
+ ## What "Symbolic" Means
4
+
5
+ A task is **symbolic** (can be optimized) if it follows a predictable algorithm:
6
+ - Reading files, calling APIs, transforming data = SYMBOLIC (even though outputs vary based on data)
7
+ - The same code logic applies regardless of the actual data values
8
+ - Conditional branches based on data (if file exists, if value > threshold) are fine
9
+
10
+ A task is **neural** (cannot be optimized) only if it requires:
11
+ - Creative text generation (writing stories, poems, marketing copy)
12
+ - Subjective reasoning or judgment calls
13
+ - Understanding nuanced human intent that varies per request
14
+
15
+ **Key insight:** File I/O, API calls, and data transformation are deterministic CODE even if their outputs depend on external state. "Read a file and count lines" is symbolic - the algorithm is fixed.
16
+
17
+ ## Task Definition
18
+
19
+ **Name:** {{.TaskName}}
20
+ **Instructions:** {{.Instructions}}
21
+
22
+ **Inputs:**
23
+ {{.Inputs}}
24
+
25
+ **Outputs:**
26
+ {{.Outputs}}
27
+
28
+ ## Current Task Code
29
+
30
+ ```ruby
31
+ {{.TaskCode}}
32
+ ```
33
+
34
+ ## Execution Traces ({{.TraceCount}} samples)
35
+
36
+ {{.Traces}}
37
+
38
+ ## Pattern Analysis
39
+
40
+ - **Most Common Pattern:** {{.CommonPattern}}
41
+ - **Pattern Consistency:** {{.ConsistencyScore}}%
42
+ - **Unique Patterns Observed:** {{.UniquePatternCount}}
43
+
44
+ ## Available Tools
45
+
46
+ {{.ToolsList}}
47
+
48
+ ---
49
+
50
+ ## Your Task
51
+
52
+ Analyze whether this neural task can be converted to symbolic Ruby code.
53
+
54
+ Questions to ask:
55
+ 1. Is there a clear algorithm implied by the instructions?
56
+ 2. Do the tool call patterns show a logical sequence?
57
+ 3. Can conditional logic handle the variations seen in traces?
58
+
59
+ Tasks that ARE symbolic (optimize these):
60
+ - "Read file X and return its contents" → read_file, return content
61
+ - "Check if file exists, create if not" → get_file_info, conditional write_file
62
+ - "Fetch data from API and transform it" → api_call, data transformation
63
+
64
+ Tasks that are NOT symbolic (don't optimize):
65
+ - "Write a creative story continuation"
66
+ - "Decide what the user probably meant"
67
+ - "Generate marketing copy for this product"
68
+
69
+ ## Output Format
70
+
71
+ Respond with valid JSON:
72
+
73
+ ```json
74
+ {
75
+ "is_deterministic": true/false,
76
+ "confidence": 0.0-1.0,
77
+ "explanation": "Brief explanation",
78
+ "code": "Ruby code if symbolic, null otherwise"
79
+ }
80
+ ```
81
+
82
+ **Code Requirements (if symbolic):**
83
+ - Use the DSL task format with a do block:
84
+ ```ruby
85
+ task :task_name,
86
+ instructions: "Keep the original instructions for documentation",
87
+ inputs: { ... },
88
+ outputs: { ... } do |inputs|
89
+ # Helper methods available: execute_tool, execute_task, execute_llm
90
+ result = execute_tool(:tool_name, { arg: value })
91
+ { output_key: result }
92
+ end
93
+ ```
94
+ - Use `execute_tool(:tool_name, { arg: value })` for MCP tool calls
95
+ - Use `execute_task(:task_name, inputs: { ... })` to call other tasks
96
+ - Access inputs via the `inputs` hash parameter
97
+ - Return a hash matching the output schema
98
+ - Do NOT use system(), eval(), or other unsafe methods
@@ -156,8 +156,8 @@ module LanguageOperator
156
156
  data = JSON.parse(response.body)
157
157
  models = data['data']&.map { |m| m['id'] } || []
158
158
 
159
- # Filter out fine-tuned/snapshot models for better UX
160
- models.reject { |m| m.include?('ft-') || m.include?(':') }
159
+ # Filter out fine-tuned models for better UX
160
+ models.reject { |m| m.include?('ft-') }
161
161
  rescue StandardError
162
162
  nil
163
163
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LanguageOperator
4
- VERSION = '0.1.58'
4
+ VERSION = '0.1.61'
5
5
  end
data/synth/003/Makefile CHANGED
@@ -1,6 +1,6 @@
1
1
  .PHONY: create code logs clean
2
2
 
3
- AGENT := synth-003
3
+ AGENT := s003
4
4
  AICTL := bundle exec ../../bin/aictl agent
5
5
  TOOLS := workspace
6
6
 
@@ -8,11 +8,18 @@ create:
8
8
  cat agent.txt | $(AICTL) create --name $(AGENT) --tools "$(TOOLS)"
9
9
 
10
10
  run:
11
- kubectl create job --from=cronjob/$(AGENT) $(AGENT)-manual
11
+ @JOB_NAME=$(AGENT)-$(shell date +%s); \
12
+ kubectl create job --from=cronjob/$(AGENT) $$JOB_NAME && \
13
+ trap "kubectl delete job $$JOB_NAME" EXIT; \
14
+ kubectl wait --for=condition=ready pod -l job-name=$$JOB_NAME --timeout=60s && \
15
+ kubectl logs -f job/$$JOB_NAME
12
16
 
13
17
  code:
14
18
  $(AICTL) code $(AGENT)
15
19
 
20
+ optimize:
21
+ $(AICTL) optimize $(AGENT)
22
+
16
23
  logs:
17
24
  $(AICTL) logs $(AGENT)
18
25
 
@@ -21,4 +28,4 @@ clean:
21
28
 
22
29
  save:
23
30
  $(AICTL) code $(AGENT) --raw > agent.rb
24
- $(AICTL) logs $(AGENT) > output.log
31
+ $(AICTL) logs $(AGENT) > output.log
@@ -0,0 +1,68 @@
1
+ ⚬ OpenTelemetry enabled
2
+ Waiting for tool at localhost:80...
3
+ Tool ready at localhost:80
4
+ ⚬ Configuring LLM (provider=openai_compatible, model=mistralai/magistral-small-2509, timeout=300)
5
+ ⚬ LLM configuration complete
6
+ ⚬ Connecting to MCP servers (count=1)
7
+ ⚬ Successfully connected to MCP server (server=default-tools-0)
8
+ ⚬ MCP server connected (server=default-tools-0, tool_count=6, tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
9
+ ⚬ MCP connection summary (connected_servers=1, total_tools=6)
10
+ ⚬ Chat session initialized (with_tools=true)
11
+ ⚬ Agent running in scheduled mode - executing once (agent_name=synth-003, dsl_version=v1)
12
+ ⚬ Executing main block (agent=synth-003, task_count=3)
13
+ ⚬ Executing main block (inputs_keys=[])
14
+ ⚬ Executing task (task=read_existing_story, type=neural, timeout=360.0, max_retries=3)
15
+ ⚬ Sending prompt to LLM (task=read_existing_story, prompt_length=572, available_tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
16
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=get_file_info, tool_id=617083832, arguments={"path" => "story.txt"}, arguments_json={"path":"story.txt"})
17
+ ⚬ Tool call result received (event=tool_result_received, result=Path: story.txt
18
+ Type: file
19
+ Size: 200 B
20
+ Permissions: 664
21
+ Owner UID: 1000
22
+ Owner GID: 101
23
+ Created: 2025-11-19 02:00:00 +0000
24
+ Modified: 2025-11-19 01:45:12 +0000
25
+ Accessed: 2025-11-19 02:07:30 +0000, result_preview=Path: story.txt
26
+ Type: file
27
+ Size: 200 B
28
+ Permissions: 664
29
+ Owner UID: 1000
30
+ Owner GID: 101
31
+ Created: 20...)
32
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=read_file, tool_id=783124383, arguments={"path" => "story.txt"}, arguments_json={"path":"story.txt"})
33
+ ⚬ Tool call result received (event=tool_result_received, result=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named Lily.
34
+ One day, while playing near the edge of the forest, she discovered a mysterious glowing flower., result_preview=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named...)
35
+ ⚬ LLM response received, extracting content (task=read_existing_story, response_class=RubyLLM::Message, has_tool_calls=, tool_call_count=0)
36
+ ⚬ Neural task response received (task=read_existing_story, response_length=592)
37
+ ⚬ Parsing neural task response (task=read_existing_story)
38
+ ⚬ LLM thinking captured (event=llm_thinking, task=read_existing_story, thinking_steps=1, thinking=["Now that I have the content, I need to count the number of sentences by splitting it by newline. The content is a single line, so there's only one sentence.\n\nBut wait, the problem says to split by newline. The content is a single line, so sentence_count should be 1.\n\nNow, I'll prepare the JSON response with the content and sentence count."], thinking_preview=Now that I have the content, I need to count the number of sentences by splitting it by newline. T...)
39
+ ⚬ Response parsed successfully (task=read_existing_story, output_keys=[:content, :sentence_count])
40
+ ⚬ Validating task outputs (task=read_existing_story)
41
+ ⚬ Executing task (task=generate_next_sentence, type=neural, timeout=360.0, max_retries=3)
42
+ ⚬ Sending prompt to LLM (task=generate_next_sentence, prompt_length=773, available_tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
43
+ E, [2025-11-19T02:10:54.380136 #7] ERROR -- : OpenTelemetry error: Unable to export 4 spans
44
+ ⚬ LLM response received, extracting content (task=generate_next_sentence, response_class=RubyLLM::Message, has_tool_calls=, tool_call_count=0)
45
+ ⚬ Neural task response received (task=generate_next_sentence, response_length=824)
46
+ ⚬ Parsing neural task response (task=generate_next_sentence)
47
+ ⚬ LLM thinking captured (event=llm_thinking, task=generate_next_sentence, thinking_steps=1, thinking=["The existing content has two sentences. The story is in a fairy tale style, so the next sentence should continue in that tone.\n\nThe first sentence sets up the setting and character. The second introduces a key event (the glowing flower). A natural continuation might be that Lily decides to pick the flower or something happens when she touches it.\n\nLet me draft a possible next sentence: \"As she reached out to touch the flower, it suddenly let out a soft humming sound.\"\n\nNow, I need to ensure that this is exactly one sentence and maintains the tone.\n\nLet me confirm it's a complete sentence with proper punctuation. It is, and it fits the fairy tale style.\n\nNow, prepare the JSON response with this sentence."], thinking_preview=The existing content has two sentences. The story is in a fairy tale style, so the next sentence s...)
48
+ ⚬ Response parsed successfully (task=generate_next_sentence, output_keys=[:sentence])
49
+ ⚬ Validating task outputs (task=generate_next_sentence)
50
+ ⚬ Executing task (task=append_to_story, type=neural, timeout=360.0, max_retries=3)
51
+ ⚬ Sending prompt to LLM (task=append_to_story, prompt_length=684, available_tools=["read_file", "write_file", "list_directory", "create_directory", "get_file_info", "search_files"])
52
+ E, [2025-11-19T02:11:09.382616 #7] ERROR -- : OpenTelemetry error: Unable to export 4 spans
53
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=read_file, tool_id=982459012, arguments={"path" => "story.txt"}, arguments_json={"path":"story.txt"})
54
+ ⚬ Tool call result received (event=tool_result_received, result=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named Lily.
55
+ One day, while playing near the edge of the forest, she discovered a mysterious glowing flower., result_preview=Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named...)
56
+ ⚬ Tool call initiated by LLM (event=tool_call_initiated, tool_name=write_file, tool_id=433722466, arguments={"path" => "story.txt", "content" => "Once upon a time, in a quiet village nestled between rolling hills, there lived a young girl named Lily.\nOne day, while playing near the edge of the forest, she discovered a mysterious glowing flower.\nAs she reached out to touch the flower, it suddenly let out a soft humming sound."}, arguments_json={"path":"story.txt","content":"Once upon a time, in a quiet village nestled between rolling hills,...)
57
+ ⚬ Tool call result received (event=tool_result_received, result=Successfully wrote 282 bytes to story.txt, result_preview=Successfully wrote 282 bytes to story.txt)
58
+ ⚬ LLM response received, extracting content (task=append_to_story, response_class=RubyLLM::Message, has_tool_calls=, tool_call_count=0)
59
+ ⚬ Neural task response received (task=append_to_story, response_length=39)
60
+ ⚬ Parsing neural task response (task=append_to_story)
61
+ ⚬ Response parsed successfully (task=append_to_story, output_keys=[:success, :total_sentences])
62
+ ⚬ Validating task outputs (task=append_to_story)
63
+ ⚬ Main execution (71.111s)
64
+ ⚬ Main block completed
65
+ ⚬ Main block execution completed (result={added_sentence: "As she reached out to touch the flower, it suddenly let out a soft humming sound.", total_sentences: 3})
66
+ Added sentence: As she reached out to touch the flower, it suddenly let out a soft humming sound.
67
+ Story now has 3 sentences
68
+ ⚬ Scheduled execution completed - exiting (agent_name=synth-003)
data/synth/README.md CHANGED
@@ -1,5 +1,3 @@
1
1
  # Synthesis Tests
2
2
 
3
- Canonical "hey this might actually work" synthesis suite.
4
-
5
- Model: [Magistral-Small-2509](https://huggingface.co/mistralai/Magistral-Small-2509) 4bit MLX
3
+ Canonical "hey this might actually work" synthesis suite.
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: language-operator
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.58
4
+ version: 0.1.61
5
5
  platform: ruby
6
6
  authors:
7
7
  - James Ryan
@@ -474,6 +474,7 @@ files:
474
474
  - lib/language_operator/cli/formatters/code_formatter.rb
475
475
  - lib/language_operator/cli/formatters/log_formatter.rb
476
476
  - lib/language_operator/cli/formatters/log_style.rb
477
+ - lib/language_operator/cli/formatters/optimization_formatter.rb
477
478
  - lib/language_operator/cli/formatters/progress_formatter.rb
478
479
  - lib/language_operator/cli/formatters/status_formatter.rb
479
480
  - lib/language_operator/cli/formatters/table_formatter.rb
@@ -524,6 +525,14 @@ files:
524
525
  - lib/language_operator/instrumentation/task_tracer.rb
525
526
  - lib/language_operator/kubernetes/client.rb
526
527
  - lib/language_operator/kubernetes/resource_builder.rb
528
+ - lib/language_operator/learning/adapters/base_adapter.rb
529
+ - lib/language_operator/learning/adapters/jaeger_adapter.rb
530
+ - lib/language_operator/learning/adapters/signoz_adapter.rb
531
+ - lib/language_operator/learning/adapters/tempo_adapter.rb
532
+ - lib/language_operator/learning/optimizer.rb
533
+ - lib/language_operator/learning/pattern_detector.rb
534
+ - lib/language_operator/learning/task_synthesizer.rb
535
+ - lib/language_operator/learning/trace_analyzer.rb
527
536
  - lib/language_operator/loggable.rb
528
537
  - lib/language_operator/logger.rb
529
538
  - lib/language_operator/retry.rb
@@ -535,6 +544,7 @@ files:
535
544
  - lib/language_operator/templates/schema/CHANGELOG.md
536
545
  - lib/language_operator/templates/schema/agent_dsl_openapi.yaml
537
546
  - lib/language_operator/templates/schema/agent_dsl_schema.json
547
+ - lib/language_operator/templates/task_synthesis.tmpl
538
548
  - lib/language_operator/tool_loader.rb
539
549
  - lib/language_operator/type_coercion.rb
540
550
  - lib/language_operator/ux/base.rb
@@ -561,6 +571,7 @@ files:
561
571
  - synth/003/README.md
562
572
  - synth/003/agent.rb
563
573
  - synth/003/agent.txt
574
+ - synth/003/output.log
564
575
  - synth/README.md
565
576
  homepage: https://github.com/language-operator/language-operator
566
577
  licenses: