language-operator 0.1.58 → 0.1.61
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/components/agent/Gemfile +1 -1
- data/lib/language_operator/agent/base.rb +22 -0
- data/lib/language_operator/agent/task_executor.rb +80 -23
- data/lib/language_operator/agent/telemetry.rb +22 -11
- data/lib/language_operator/agent.rb +3 -0
- data/lib/language_operator/cli/base_command.rb +7 -1
- data/lib/language_operator/cli/commands/agent.rb +575 -0
- data/lib/language_operator/cli/formatters/optimization_formatter.rb +226 -0
- data/lib/language_operator/cli/formatters/progress_formatter.rb +1 -1
- data/lib/language_operator/client/base.rb +74 -2
- data/lib/language_operator/client/mcp_connector.rb +4 -6
- data/lib/language_operator/dsl/task_definition.rb +7 -6
- data/lib/language_operator/learning/adapters/base_adapter.rb +149 -0
- data/lib/language_operator/learning/adapters/jaeger_adapter.rb +221 -0
- data/lib/language_operator/learning/adapters/signoz_adapter.rb +435 -0
- data/lib/language_operator/learning/adapters/tempo_adapter.rb +239 -0
- data/lib/language_operator/learning/optimizer.rb +319 -0
- data/lib/language_operator/learning/pattern_detector.rb +260 -0
- data/lib/language_operator/learning/task_synthesizer.rb +288 -0
- data/lib/language_operator/learning/trace_analyzer.rb +285 -0
- data/lib/language_operator/templates/schema/agent_dsl_openapi.yaml +1 -1
- data/lib/language_operator/templates/schema/agent_dsl_schema.json +1 -1
- data/lib/language_operator/templates/task_synthesis.tmpl +98 -0
- data/lib/language_operator/ux/concerns/provider_helpers.rb +2 -2
- data/lib/language_operator/version.rb +1 -1
- data/synth/003/Makefile +10 -3
- data/synth/003/output.log +68 -0
- data/synth/README.md +1 -3
- metadata +12 -1
|
@@ -491,6 +491,188 @@ module LanguageOperator
|
|
|
491
491
|
end
|
|
492
492
|
end
|
|
493
493
|
|
|
494
|
+
desc 'optimize NAME', 'Optimize neural tasks to symbolic based on learned patterns'
|
|
495
|
+
long_desc <<-DESC
|
|
496
|
+
Analyze agent execution patterns and propose optimizations to convert
|
|
497
|
+
neural (LLM-based) tasks into symbolic (code-based) implementations.
|
|
498
|
+
|
|
499
|
+
This command queries OpenTelemetry traces to detect deterministic patterns
|
|
500
|
+
in task execution, then generates optimized symbolic code that runs faster
|
|
501
|
+
and costs less while maintaining the same behavior.
|
|
502
|
+
|
|
503
|
+
Requirements:
|
|
504
|
+
• OpenTelemetry backend configured (SigNoz, Jaeger, or Tempo)
|
|
505
|
+
• Neural task has executed at least 10 times
|
|
506
|
+
• Execution pattern consistency >= 85%
|
|
507
|
+
|
|
508
|
+
Examples:
|
|
509
|
+
aictl agent optimize my-agent # Analyze and propose optimizations
|
|
510
|
+
aictl agent optimize my-agent --dry-run # Show what would be optimized
|
|
511
|
+
aictl agent optimize my-agent --status-only # Show learning status only
|
|
512
|
+
aictl agent optimize my-agent --auto-accept # Auto-accept high-confidence optimizations
|
|
513
|
+
aictl agent optimize my-agent --tasks task1,task2 # Optimize specific tasks only
|
|
514
|
+
DESC
|
|
515
|
+
option :cluster, type: :string, desc: 'Override current cluster context'
|
|
516
|
+
option :dry_run, type: :boolean, default: false, desc: 'Show what would be optimized without applying'
|
|
517
|
+
option :status_only, type: :boolean, default: false, desc: 'Show learning status without optimizing'
|
|
518
|
+
option :auto_accept, type: :boolean, default: false, desc: 'Auto-accept optimizations above min-confidence'
|
|
519
|
+
option :min_confidence, type: :numeric, default: 0.90, desc: 'Minimum consistency for auto-accept (0.0-1.0)'
|
|
520
|
+
option :tasks, type: :array, desc: 'Only optimize specific tasks'
|
|
521
|
+
option :since, type: :string, desc: 'Only analyze traces since (e.g., "2h", "1d", "7d")'
|
|
522
|
+
option :use_synthesis, type: :boolean, default: false, desc: 'Use LLM synthesis instead of pattern detection'
|
|
523
|
+
option :synthesis_model, type: :string, desc: 'Model to use for synthesis (default: cluster default)'
|
|
524
|
+
def optimize(name)
|
|
525
|
+
handle_command_error('optimize agent') do
|
|
526
|
+
require_relative '../../learning/trace_analyzer'
|
|
527
|
+
require_relative '../../learning/pattern_detector'
|
|
528
|
+
require_relative '../../learning/optimizer'
|
|
529
|
+
require_relative '../../learning/task_synthesizer'
|
|
530
|
+
require_relative '../../agent/safety/ast_validator'
|
|
531
|
+
require_relative '../formatters/optimization_formatter'
|
|
532
|
+
|
|
533
|
+
ctx = Helpers::ClusterContext.from_options(options)
|
|
534
|
+
|
|
535
|
+
# Get agent to verify it exists
|
|
536
|
+
get_resource_or_exit('LanguageAgent', name)
|
|
537
|
+
|
|
538
|
+
# Get agent code/definition
|
|
539
|
+
agent_definition = load_agent_definition(ctx, name)
|
|
540
|
+
unless agent_definition
|
|
541
|
+
Formatters::ProgressFormatter.error("Could not load agent definition for '#{name}'")
|
|
542
|
+
exit 1
|
|
543
|
+
end
|
|
544
|
+
|
|
545
|
+
# Check for OpenTelemetry configuration
|
|
546
|
+
unless ENV['OTEL_QUERY_ENDPOINT']
|
|
547
|
+
Formatters::ProgressFormatter.warn('OpenTelemetry endpoint not configured')
|
|
548
|
+
puts
|
|
549
|
+
puts 'Set OTEL_QUERY_ENDPOINT to enable learning:'
|
|
550
|
+
puts ' export OTEL_QUERY_ENDPOINT=https://your-signoz-instance.com'
|
|
551
|
+
puts ' export OTEL_QUERY_API_KEY=your-api-key # For SigNoz authentication'
|
|
552
|
+
puts ' export OTEL_QUERY_BACKEND=signoz # Optional: signoz, jaeger, or tempo'
|
|
553
|
+
puts
|
|
554
|
+
puts 'Auto-detection tries backends in order: SigNoz → Jaeger → Tempo'
|
|
555
|
+
puts 'Set OTEL_QUERY_BACKEND to skip auto-detection and use a specific backend.'
|
|
556
|
+
puts
|
|
557
|
+
exit 1
|
|
558
|
+
end
|
|
559
|
+
|
|
560
|
+
# Initialize learning components
|
|
561
|
+
trace_analyzer = Learning::TraceAnalyzer.new(
|
|
562
|
+
endpoint: ENV.fetch('OTEL_QUERY_ENDPOINT', nil),
|
|
563
|
+
api_key: ENV.fetch('OTEL_QUERY_API_KEY', nil),
|
|
564
|
+
backend: ENV.fetch('OTEL_QUERY_BACKEND', nil)
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
unless trace_analyzer.available?
|
|
568
|
+
Formatters::ProgressFormatter.error('OpenTelemetry backend not available')
|
|
569
|
+
puts
|
|
570
|
+
puts 'Check your OTEL_QUERY_ENDPOINT configuration and backend status.'
|
|
571
|
+
exit 1
|
|
572
|
+
end
|
|
573
|
+
|
|
574
|
+
validator = LanguageOperator::Agent::Safety::ASTValidator.new
|
|
575
|
+
pattern_detector = LanguageOperator::Learning::PatternDetector.new(
|
|
576
|
+
trace_analyzer: trace_analyzer,
|
|
577
|
+
validator: validator
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
# Create task synthesizer for fallback (or forced via --use-synthesis)
|
|
581
|
+
# Synthesis is used when pattern detection fails OR --use-synthesis is set
|
|
582
|
+
task_synthesizer = nil
|
|
583
|
+
llm_client = create_synthesis_llm_client(ctx, options[:synthesis_model])
|
|
584
|
+
if llm_client
|
|
585
|
+
task_synthesizer = LanguageOperator::Learning::TaskSynthesizer.new(
|
|
586
|
+
llm_client: llm_client,
|
|
587
|
+
validator: validator
|
|
588
|
+
)
|
|
589
|
+
Formatters::ProgressFormatter.info('LLM synthesis mode (forced)') if options[:use_synthesis]
|
|
590
|
+
elsif options[:use_synthesis]
|
|
591
|
+
Formatters::ProgressFormatter.warn('Could not create LLM client for synthesis')
|
|
592
|
+
end
|
|
593
|
+
|
|
594
|
+
optimizer = LanguageOperator::Learning::Optimizer.new(
|
|
595
|
+
agent_name: name,
|
|
596
|
+
agent_definition: agent_definition,
|
|
597
|
+
trace_analyzer: trace_analyzer,
|
|
598
|
+
pattern_detector: pattern_detector,
|
|
599
|
+
task_synthesizer: task_synthesizer
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
formatter = Formatters::OptimizationFormatter.new
|
|
603
|
+
|
|
604
|
+
# Parse --since option into time range
|
|
605
|
+
time_range = parse_since_option(options[:since])
|
|
606
|
+
|
|
607
|
+
# Analyze for opportunities
|
|
608
|
+
opportunities = optimizer.analyze(time_range: time_range)
|
|
609
|
+
|
|
610
|
+
# Display analysis only in status-only mode
|
|
611
|
+
if options[:status_only]
|
|
612
|
+
puts formatter.format_analysis(agent_name: name, opportunities: opportunities)
|
|
613
|
+
return
|
|
614
|
+
end
|
|
615
|
+
|
|
616
|
+
# Exit if no opportunities
|
|
617
|
+
return if opportunities.empty?
|
|
618
|
+
|
|
619
|
+
# Filter opportunities:
|
|
620
|
+
# - If synthesis available: try any task with enough executions
|
|
621
|
+
# - Otherwise: only tasks ready for pattern detection
|
|
622
|
+
candidates = if task_synthesizer
|
|
623
|
+
# With synthesis, try any task that has min executions
|
|
624
|
+
opportunities.select { |opp| opp[:execution_count] >= 10 }
|
|
625
|
+
else
|
|
626
|
+
opportunities.select { |opp| opp[:ready_for_learning] }
|
|
627
|
+
end
|
|
628
|
+
return if candidates.empty?
|
|
629
|
+
|
|
630
|
+
# Process each opportunity
|
|
631
|
+
candidates.each do |opp|
|
|
632
|
+
task_name = opp[:task_name]
|
|
633
|
+
|
|
634
|
+
# Skip if not in requested tasks list
|
|
635
|
+
next if options[:tasks] && !options[:tasks].include?(task_name)
|
|
636
|
+
|
|
637
|
+
# Generate proposal
|
|
638
|
+
begin
|
|
639
|
+
proposal = optimizer.propose(task_name: task_name, use_synthesis: options[:use_synthesis])
|
|
640
|
+
rescue ArgumentError => e
|
|
641
|
+
Formatters::ProgressFormatter.warn("Cannot optimize '#{task_name}': #{e.message}")
|
|
642
|
+
next
|
|
643
|
+
end
|
|
644
|
+
|
|
645
|
+
# Display proposal
|
|
646
|
+
puts formatter.format_proposal(proposal: proposal)
|
|
647
|
+
|
|
648
|
+
# Get user confirmation or auto-accept
|
|
649
|
+
accepted = if options[:auto_accept] && proposal[:consistency_score] >= options[:min_confidence]
|
|
650
|
+
consistency_pct = (proposal[:consistency_score] * 100).round(1)
|
|
651
|
+
threshold_pct = (options[:min_confidence] * 100).round(1)
|
|
652
|
+
puts pastel.green("✓ Auto-accepting (consistency: #{consistency_pct}% >= #{threshold_pct}%)")
|
|
653
|
+
true
|
|
654
|
+
elsif options[:dry_run]
|
|
655
|
+
puts pastel.yellow('[DRY RUN] Would prompt for acceptance')
|
|
656
|
+
false
|
|
657
|
+
else
|
|
658
|
+
prompt_for_optimization_acceptance(proposal)
|
|
659
|
+
end
|
|
660
|
+
|
|
661
|
+
# Apply if accepted
|
|
662
|
+
if accepted && !options[:dry_run]
|
|
663
|
+
result = apply_optimization(ctx, name, proposal)
|
|
664
|
+
puts formatter.format_success(result: result)
|
|
665
|
+
elsif accepted
|
|
666
|
+
puts pastel.yellow('[DRY RUN] Would apply optimization')
|
|
667
|
+
else
|
|
668
|
+
puts pastel.yellow("Skipped optimization for '#{task_name}'")
|
|
669
|
+
end
|
|
670
|
+
|
|
671
|
+
puts
|
|
672
|
+
end
|
|
673
|
+
end
|
|
674
|
+
end
|
|
675
|
+
|
|
494
676
|
desc 'workspace NAME', 'Browse agent workspace files'
|
|
495
677
|
long_desc <<-DESC
|
|
496
678
|
Browse and manage the workspace files for an agent.
|
|
@@ -538,6 +720,183 @@ module LanguageOperator
|
|
|
538
720
|
|
|
539
721
|
private
|
|
540
722
|
|
|
723
|
+
# Parse --since option into seconds (time range)
|
|
724
|
+
#
|
|
725
|
+
# @param since [String, nil] Duration string (e.g., "2h", "1d", "7d")
|
|
726
|
+
# @return [Integer, nil] Seconds or nil if not specified
|
|
727
|
+
def parse_since_option(since)
|
|
728
|
+
return nil unless since
|
|
729
|
+
|
|
730
|
+
match = since.match(/^(\d+)([hHdDwW])$/)
|
|
731
|
+
unless match
|
|
732
|
+
Formatters::ProgressFormatter.warn("Invalid --since format '#{since}', using default (24h)")
|
|
733
|
+
Formatters::ProgressFormatter.info('Valid formats: 2h (hours), 1d (days), 1w (weeks)')
|
|
734
|
+
return nil
|
|
735
|
+
end
|
|
736
|
+
|
|
737
|
+
value = match[1].to_i
|
|
738
|
+
unit = match[2].downcase
|
|
739
|
+
|
|
740
|
+
case unit
|
|
741
|
+
when 'h' then value * 3600
|
|
742
|
+
when 'd' then value * 86_400
|
|
743
|
+
when 'w' then value * 604_800
|
|
744
|
+
end
|
|
745
|
+
end
|
|
746
|
+
|
|
747
|
+
# Create LLM client for task synthesis using cluster model
|
|
748
|
+
#
|
|
749
|
+
# @param ctx [ClusterContext] Cluster context
|
|
750
|
+
# @param model_name [String, nil] Specific model to use (defaults to first available)
|
|
751
|
+
# @return [Object, nil] LLM client or nil if unavailable
|
|
752
|
+
def create_synthesis_llm_client(ctx, model_name = nil)
|
|
753
|
+
# Get model from cluster
|
|
754
|
+
selected_model = model_name || select_synthesis_model(ctx)
|
|
755
|
+
return nil unless selected_model
|
|
756
|
+
|
|
757
|
+
# Get model resource to extract model ID
|
|
758
|
+
# Always use port-forwarding to deployment (LiteLLM proxy for cost controls)
|
|
759
|
+
begin
|
|
760
|
+
model = ctx.client.get_resource('LanguageModel', selected_model, ctx.namespace)
|
|
761
|
+
model_id = model.dig('spec', 'modelName')
|
|
762
|
+
return nil unless model_id
|
|
763
|
+
|
|
764
|
+
ClusterLLMClient.new(
|
|
765
|
+
ctx: ctx,
|
|
766
|
+
model_name: selected_model,
|
|
767
|
+
model_id: model_id,
|
|
768
|
+
agent_command: self
|
|
769
|
+
)
|
|
770
|
+
rescue StandardError => e
|
|
771
|
+
@logger&.warn("Failed to create cluster LLM client: #{e.message}")
|
|
772
|
+
nil
|
|
773
|
+
end
|
|
774
|
+
end
|
|
775
|
+
|
|
776
|
+
# Select model for synthesis (first available if not specified)
|
|
777
|
+
#
|
|
778
|
+
# @param ctx [ClusterContext] Cluster context
|
|
779
|
+
# @return [String, nil] Model name or nil
|
|
780
|
+
def select_synthesis_model(ctx)
|
|
781
|
+
models = ctx.client.list_resources('LanguageModel', namespace: ctx.namespace)
|
|
782
|
+
return nil if models.empty?
|
|
783
|
+
|
|
784
|
+
models.first.dig('metadata', 'name')
|
|
785
|
+
rescue StandardError
|
|
786
|
+
nil
|
|
787
|
+
end
|
|
788
|
+
|
|
789
|
+
# LLM client that uses port-forwarding to cluster model deployments (LiteLLM proxy)
|
|
790
|
+
class ClusterLLMClient
|
|
791
|
+
def initialize(ctx:, model_name:, model_id:, agent_command:)
|
|
792
|
+
@ctx = ctx
|
|
793
|
+
@model_name = model_name
|
|
794
|
+
@model_id = model_id
|
|
795
|
+
@agent_command = agent_command
|
|
796
|
+
end
|
|
797
|
+
|
|
798
|
+
def chat(prompt)
|
|
799
|
+
require 'faraday'
|
|
800
|
+
require 'json'
|
|
801
|
+
|
|
802
|
+
pod = get_model_pod
|
|
803
|
+
pod_name = pod.dig('metadata', 'name')
|
|
804
|
+
|
|
805
|
+
local_port = find_available_port
|
|
806
|
+
port_forward_pid = nil
|
|
807
|
+
|
|
808
|
+
begin
|
|
809
|
+
port_forward_pid = start_port_forward(pod_name, local_port, 4000)
|
|
810
|
+
wait_for_port(local_port)
|
|
811
|
+
|
|
812
|
+
conn = Faraday.new(url: "http://localhost:#{local_port}") do |f|
|
|
813
|
+
f.request :json
|
|
814
|
+
f.response :json
|
|
815
|
+
f.adapter Faraday.default_adapter
|
|
816
|
+
f.options.timeout = 120
|
|
817
|
+
f.options.open_timeout = 10
|
|
818
|
+
end
|
|
819
|
+
|
|
820
|
+
payload = {
|
|
821
|
+
model: @model_id,
|
|
822
|
+
messages: [{ role: 'user', content: prompt }],
|
|
823
|
+
max_tokens: 4000,
|
|
824
|
+
temperature: 0.3
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
response = conn.post('/v1/chat/completions', payload)
|
|
828
|
+
result = response.body
|
|
829
|
+
|
|
830
|
+
raise "LLM error: #{result['error']['message'] || result['error']}" if result['error']
|
|
831
|
+
|
|
832
|
+
result.dig('choices', 0, 'message', 'content')
|
|
833
|
+
ensure
|
|
834
|
+
cleanup_port_forward(port_forward_pid) if port_forward_pid
|
|
835
|
+
end
|
|
836
|
+
end
|
|
837
|
+
|
|
838
|
+
private
|
|
839
|
+
|
|
840
|
+
def get_model_pod
|
|
841
|
+
# Get the deployment for the model
|
|
842
|
+
deployment = @ctx.client.get_resource('Deployment', @model_name, @ctx.namespace)
|
|
843
|
+
raise "Deployment '#{@model_name}' not found in namespace '#{@ctx.namespace}'" if deployment.nil?
|
|
844
|
+
|
|
845
|
+
labels = deployment.dig('spec', 'selector', 'matchLabels')
|
|
846
|
+
raise "Deployment '#{@model_name}' has no selector labels" if labels.nil?
|
|
847
|
+
|
|
848
|
+
# Convert to hash if needed (K8s API may return K8s::Resource)
|
|
849
|
+
labels_hash = labels.respond_to?(:to_h) ? labels.to_h : labels
|
|
850
|
+
raise "Deployment '#{@model_name}' has empty selector labels" if labels_hash.empty?
|
|
851
|
+
|
|
852
|
+
label_selector = labels_hash.map { |k, v| "#{k}=#{v}" }.join(',')
|
|
853
|
+
|
|
854
|
+
# Find a running pod
|
|
855
|
+
pods = @ctx.client.list_resources('Pod', namespace: @ctx.namespace, label_selector: label_selector)
|
|
856
|
+
raise "No pods found for model '#{@model_name}'" if pods.empty?
|
|
857
|
+
|
|
858
|
+
running_pods = pods.select { |p| p.dig('status', 'phase') == 'Running' }
|
|
859
|
+
raise "No running pods found for model '#{@model_name}'" if running_pods.empty?
|
|
860
|
+
|
|
861
|
+
running_pods.first
|
|
862
|
+
end
|
|
863
|
+
|
|
864
|
+
def find_available_port
|
|
865
|
+
server = TCPServer.new('127.0.0.1', 0)
|
|
866
|
+
port = server.addr[1]
|
|
867
|
+
server.close
|
|
868
|
+
port
|
|
869
|
+
end
|
|
870
|
+
|
|
871
|
+
def start_port_forward(pod_name, local_port, remote_port)
|
|
872
|
+
pid = spawn(
|
|
873
|
+
'kubectl', 'port-forward',
|
|
874
|
+
'-n', @ctx.namespace,
|
|
875
|
+
"pod/#{pod_name}",
|
|
876
|
+
"#{local_port}:#{remote_port}",
|
|
877
|
+
%i[out err] => '/dev/null'
|
|
878
|
+
)
|
|
879
|
+
Process.detach(pid)
|
|
880
|
+
pid
|
|
881
|
+
end
|
|
882
|
+
|
|
883
|
+
def wait_for_port(port, max_attempts: 30)
|
|
884
|
+
max_attempts.times do
|
|
885
|
+
TCPSocket.new('127.0.0.1', port).close
|
|
886
|
+
return true
|
|
887
|
+
rescue Errno::ECONNREFUSED
|
|
888
|
+
sleep 0.1
|
|
889
|
+
end
|
|
890
|
+
raise "Port #{port} not available after #{max_attempts} attempts"
|
|
891
|
+
end
|
|
892
|
+
|
|
893
|
+
def cleanup_port_forward(pid)
|
|
894
|
+
Process.kill('TERM', pid)
|
|
895
|
+
rescue Errno::ESRCH
|
|
896
|
+
# Process already gone
|
|
897
|
+
end
|
|
898
|
+
end
|
|
899
|
+
|
|
541
900
|
def handle_agent_not_found(name, ctx)
|
|
542
901
|
# Get available agents for fuzzy matching
|
|
543
902
|
agents = ctx.client.list_resources('LanguageAgent', namespace: ctx.namespace)
|
|
@@ -1131,6 +1490,222 @@ module LanguageOperator
|
|
|
1131
1490
|
def format_timestamp(time)
|
|
1132
1491
|
Formatters::ValueFormatter.timestamp(time)
|
|
1133
1492
|
end
|
|
1493
|
+
|
|
1494
|
+
# Load agent definition from ConfigMap
|
|
1495
|
+
def load_agent_definition(ctx, agent_name)
|
|
1496
|
+
# Try to get the agent code ConfigMap
|
|
1497
|
+
configmap_name = "#{agent_name}-code"
|
|
1498
|
+
begin
|
|
1499
|
+
configmap = ctx.client.get_resource('ConfigMap', configmap_name, ctx.namespace)
|
|
1500
|
+
code_content = configmap.dig('data', 'agent.rb')
|
|
1501
|
+
|
|
1502
|
+
return nil unless code_content
|
|
1503
|
+
|
|
1504
|
+
# Parse the code to extract agent definition
|
|
1505
|
+
# For now, we'll create a mock definition with the task structure
|
|
1506
|
+
# In a full implementation, this would eval the code safely
|
|
1507
|
+
parse_agent_code(code_content)
|
|
1508
|
+
rescue K8s::Error::NotFound
|
|
1509
|
+
nil
|
|
1510
|
+
rescue StandardError => e
|
|
1511
|
+
@logger&.error("Failed to load agent definition: #{e.message}")
|
|
1512
|
+
nil
|
|
1513
|
+
end
|
|
1514
|
+
end
|
|
1515
|
+
|
|
1516
|
+
# Parse agent code to extract definition
|
|
1517
|
+
def parse_agent_code(code)
|
|
1518
|
+
require_relative '../../dsl/agent_definition'
|
|
1519
|
+
|
|
1520
|
+
# Create a minimal agent definition structure
|
|
1521
|
+
agent_def = Struct.new(:tasks, :name, :mcp_servers) do
|
|
1522
|
+
def initialize
|
|
1523
|
+
super({}, 'agent', {})
|
|
1524
|
+
end
|
|
1525
|
+
end
|
|
1526
|
+
|
|
1527
|
+
agent = agent_def.new
|
|
1528
|
+
|
|
1529
|
+
# Parse tasks from code - extract full task definitions
|
|
1530
|
+
code.scan(/task\s+:(\w+),?\s*(.*?)(?=\n\s*(?:task\s+:|main\s+do|end\s*$))/m) do |match|
|
|
1531
|
+
task_name = match[0].to_sym
|
|
1532
|
+
task_block = match[1]
|
|
1533
|
+
|
|
1534
|
+
# Check if neural (has instructions but no do block) or symbolic
|
|
1535
|
+
is_neural = task_block.include?('instructions:') && !task_block.match?(/\bdo\s*\|/)
|
|
1536
|
+
|
|
1537
|
+
# Extract instructions
|
|
1538
|
+
instructions = extract_string_value(task_block, 'instructions')
|
|
1539
|
+
|
|
1540
|
+
# Extract inputs hash
|
|
1541
|
+
inputs = extract_hash_value(task_block, 'inputs')
|
|
1542
|
+
|
|
1543
|
+
# Extract outputs hash
|
|
1544
|
+
outputs = extract_hash_value(task_block, 'outputs')
|
|
1545
|
+
|
|
1546
|
+
task = Struct.new(:name, :neural?, :instructions, :inputs, :outputs).new(
|
|
1547
|
+
task_name, is_neural, instructions, inputs, outputs
|
|
1548
|
+
)
|
|
1549
|
+
|
|
1550
|
+
agent.tasks[task_name] = task
|
|
1551
|
+
end
|
|
1552
|
+
|
|
1553
|
+
agent
|
|
1554
|
+
end
|
|
1555
|
+
|
|
1556
|
+
# Extract a string value from DSL code (e.g., instructions: "...")
|
|
1557
|
+
def extract_string_value(code, key)
|
|
1558
|
+
# Match both single and double quoted strings, including multi-line
|
|
1559
|
+
match = code.match(/#{key}:\s*(['"])(.*?)\1/m) ||
|
|
1560
|
+
code.match(/#{key}:\s*(['"])(.+?)\1/m)
|
|
1561
|
+
match ? match[2] : ''
|
|
1562
|
+
end
|
|
1563
|
+
|
|
1564
|
+
# Extract a hash value from DSL code (e.g., inputs: { foo: 'bar' })
|
|
1565
|
+
def extract_hash_value(code, key)
|
|
1566
|
+
match = code.match(/#{key}:\s*\{([^}]*)\}/)
|
|
1567
|
+
return {} unless match
|
|
1568
|
+
|
|
1569
|
+
hash_content = match[1].strip
|
|
1570
|
+
return {} if hash_content.empty?
|
|
1571
|
+
|
|
1572
|
+
# Parse simple key: 'value' or key: "value" pairs
|
|
1573
|
+
result = {}
|
|
1574
|
+
hash_content.scan(/(\w+):\s*(['"])([^'"]*)\2/) do |k, _quote, v|
|
|
1575
|
+
result[k.to_sym] = v
|
|
1576
|
+
end
|
|
1577
|
+
result
|
|
1578
|
+
end
|
|
1579
|
+
|
|
1580
|
+
# Prompt user for optimization acceptance
|
|
1581
|
+
def prompt_for_optimization_acceptance(proposal)
|
|
1582
|
+
require 'tty-prompt'
|
|
1583
|
+
prompt = TTY::Prompt.new
|
|
1584
|
+
|
|
1585
|
+
choices = [
|
|
1586
|
+
{ name: 'Yes - apply this optimization', value: :yes },
|
|
1587
|
+
{ name: 'No - skip this task', value: :no },
|
|
1588
|
+
{ name: 'View full code diff', value: :diff },
|
|
1589
|
+
{ name: 'Skip all remaining', value: :skip_all }
|
|
1590
|
+
]
|
|
1591
|
+
|
|
1592
|
+
loop do
|
|
1593
|
+
choice = prompt.select(
|
|
1594
|
+
"Accept optimization for '#{proposal[:task_name]}'?",
|
|
1595
|
+
choices,
|
|
1596
|
+
per_page: 10
|
|
1597
|
+
)
|
|
1598
|
+
|
|
1599
|
+
case choice
|
|
1600
|
+
when :yes
|
|
1601
|
+
return true
|
|
1602
|
+
when :no
|
|
1603
|
+
return false
|
|
1604
|
+
when :diff
|
|
1605
|
+
show_code_diff(proposal)
|
|
1606
|
+
# Loop to ask again
|
|
1607
|
+
when :skip_all
|
|
1608
|
+
throw :skip_all
|
|
1609
|
+
end
|
|
1610
|
+
end
|
|
1611
|
+
end
|
|
1612
|
+
|
|
1613
|
+
# Show full code diff
|
|
1614
|
+
def show_code_diff(proposal)
|
|
1615
|
+
puts
|
|
1616
|
+
puts pastel.bold('Full Generated Code:')
|
|
1617
|
+
puts pastel.dim('=' * 70)
|
|
1618
|
+
puts proposal[:full_generated_code]
|
|
1619
|
+
puts pastel.dim('=' * 70)
|
|
1620
|
+
puts
|
|
1621
|
+
end
|
|
1622
|
+
|
|
1623
|
+
# Apply optimization by updating ConfigMap and restarting pod
|
|
1624
|
+
def apply_optimization(ctx, agent_name, proposal)
|
|
1625
|
+
configmap_name = "#{agent_name}-code"
|
|
1626
|
+
task_name = proposal[:task_name]
|
|
1627
|
+
|
|
1628
|
+
# Get current ConfigMap
|
|
1629
|
+
configmap = ctx.client.get_resource('ConfigMap', configmap_name, ctx.namespace)
|
|
1630
|
+
current_code = configmap.dig('data', 'agent.rb')
|
|
1631
|
+
|
|
1632
|
+
raise "ConfigMap '#{configmap_name}' does not contain agent.rb" unless current_code
|
|
1633
|
+
|
|
1634
|
+
# Replace the neural task with the symbolic implementation
|
|
1635
|
+
updated_code = replace_task_in_code(current_code, task_name, proposal[:proposed_code])
|
|
1636
|
+
|
|
1637
|
+
# Build updated ConfigMap resource
|
|
1638
|
+
# Add annotation to prevent controller from overwriting optimized code
|
|
1639
|
+
updated_configmap = {
|
|
1640
|
+
'apiVersion' => 'v1',
|
|
1641
|
+
'kind' => 'ConfigMap',
|
|
1642
|
+
'metadata' => {
|
|
1643
|
+
'name' => configmap_name,
|
|
1644
|
+
'namespace' => ctx.namespace,
|
|
1645
|
+
'resourceVersion' => configmap.metadata.resourceVersion,
|
|
1646
|
+
'annotations' => {
|
|
1647
|
+
'langop.io/optimized' => 'true',
|
|
1648
|
+
'langop.io/optimized-at' => Time.now.iso8601,
|
|
1649
|
+
'langop.io/optimized-task' => task_name
|
|
1650
|
+
}
|
|
1651
|
+
},
|
|
1652
|
+
'data' => {
|
|
1653
|
+
'agent.rb' => updated_code
|
|
1654
|
+
}
|
|
1655
|
+
}
|
|
1656
|
+
|
|
1657
|
+
# Update ConfigMap
|
|
1658
|
+
ctx.client.update_resource('ConfigMap', configmap_name, ctx.namespace, updated_configmap, 'v1')
|
|
1659
|
+
|
|
1660
|
+
# Restart the agent pod to pick up changes
|
|
1661
|
+
restart_agent_pod(ctx, agent_name)
|
|
1662
|
+
|
|
1663
|
+
{
|
|
1664
|
+
success: true,
|
|
1665
|
+
task_name: task_name,
|
|
1666
|
+
updated_code: proposal[:proposed_code],
|
|
1667
|
+
action: 'applied',
|
|
1668
|
+
message: "Optimization for '#{task_name}' applied successfully"
|
|
1669
|
+
}
|
|
1670
|
+
rescue StandardError => e
|
|
1671
|
+
{
|
|
1672
|
+
success: false,
|
|
1673
|
+
task_name: task_name,
|
|
1674
|
+
error: e.message,
|
|
1675
|
+
action: 'failed',
|
|
1676
|
+
message: "Failed to apply optimization: #{e.message}"
|
|
1677
|
+
}
|
|
1678
|
+
end
|
|
1679
|
+
|
|
1680
|
+
# Replace a task definition in agent code
|
|
1681
|
+
def replace_task_in_code(code, task_name, new_task_code)
|
|
1682
|
+
# Match the task definition including any trailing do block
|
|
1683
|
+
# Pattern matches: task :name, ... (neural) or task :name, ... do |inputs| ... end (symbolic)
|
|
1684
|
+
task_pattern = /task\s+:#{Regexp.escape(task_name.to_s)},?\s*.*?(?=\n\s*(?:task\s+:|main\s+do|end\s*$))/m
|
|
1685
|
+
|
|
1686
|
+
raise "Could not find task ':#{task_name}' in agent code" unless code.match?(task_pattern)
|
|
1687
|
+
|
|
1688
|
+
# Ensure new_task_code has proper trailing newline
|
|
1689
|
+
new_code = "#{new_task_code.strip}\n\n"
|
|
1690
|
+
|
|
1691
|
+
code.gsub(task_pattern, new_code.strip)
|
|
1692
|
+
end
|
|
1693
|
+
|
|
1694
|
+
# Restart agent pod by deleting it (Deployment will recreate)
|
|
1695
|
+
def restart_agent_pod(ctx, agent_name)
|
|
1696
|
+
# Find pods for this agent
|
|
1697
|
+
pods = ctx.client.list_resources('Pod', namespace: ctx.namespace, label_selector: "app=#{agent_name}")
|
|
1698
|
+
|
|
1699
|
+
pods.each do |pod|
|
|
1700
|
+
pod_name = pod.dig('metadata', 'name')
|
|
1701
|
+
begin
|
|
1702
|
+
ctx.client.delete_resource('Pod', pod_name, ctx.namespace)
|
|
1703
|
+
Formatters::ProgressFormatter.info("Restarting pod '#{pod_name}'")
|
|
1704
|
+
rescue StandardError => e
|
|
1705
|
+
Formatters::ProgressFormatter.warn("Could not delete pod '#{pod_name}': #{e.message}")
|
|
1706
|
+
end
|
|
1707
|
+
end
|
|
1708
|
+
end
|
|
1134
1709
|
end
|
|
1135
1710
|
end
|
|
1136
1711
|
end
|