language-operator 0.1.67 → 0.1.71
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +25 -24
- data/components/agent/Gemfile +1 -1
- data/docs/persona-driven-system-prompts.md +346 -0
- data/examples/basic_agent_with_default_chat.rb +99 -0
- data/examples/chat_endpoint_agent.rb +66 -0
- data/examples/hybrid_agent.rb +227 -0
- data/examples/identity_aware_chat_agent.rb +83 -0
- data/examples/pure_agent_test.rb +26 -0
- data/examples/ux_helpers_demo.rb +0 -0
- data/lib/language_operator/agent/metadata_collector.rb +222 -0
- data/lib/language_operator/agent/prompt_builder.rb +282 -0
- data/lib/language_operator/agent/web_server.rb +91 -25
- data/lib/language_operator/agent.rb +31 -2
- data/lib/language_operator/dsl/agent_definition.rb +5 -19
- data/lib/language_operator/dsl/chat_endpoint_definition.rb +112 -2
- data/lib/language_operator/templates/schema/agent_dsl_openapi.yaml +1 -1
- data/lib/language_operator/templates/schema/agent_dsl_schema.json +1 -1
- data/lib/language_operator/version.rb +1 -1
- data/synth/003/Makefile +6 -0
- metadata +23 -1
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative '../loggable'
|
|
4
|
+
require_relative 'metadata_collector'
|
|
5
|
+
|
|
6
|
+
module LanguageOperator
|
|
7
|
+
module Agent
|
|
8
|
+
# Dynamic Prompt Builder
|
|
9
|
+
#
|
|
10
|
+
# Generates persona-driven system prompts by combining static persona configuration
|
|
11
|
+
# with dynamic agent metadata and operational context.
|
|
12
|
+
#
|
|
13
|
+
# Supports multiple template styles and configurable levels of context injection.
|
|
14
|
+
# Falls back to static prompts for backward compatibility.
|
|
15
|
+
#
|
|
16
|
+
# @example Basic usage
|
|
17
|
+
# builder = PromptBuilder.new(agent, chat_endpoint_config)
|
|
18
|
+
# prompt = builder.build_system_prompt
|
|
19
|
+
#
|
|
20
|
+
# @example With custom template
|
|
21
|
+
# builder = PromptBuilder.new(agent, config, template: :detailed)
|
|
22
|
+
# prompt = builder.build_system_prompt
|
|
23
|
+
class PromptBuilder
|
|
24
|
+
include LanguageOperator::Loggable
|
|
25
|
+
|
|
26
|
+
attr_reader :agent, :chat_config, :metadata_collector
|
|
27
|
+
|
|
28
|
+
# Template levels for different amounts of context injection
|
|
29
|
+
TEMPLATE_LEVELS = {
|
|
30
|
+
minimal: :build_minimal_template,
|
|
31
|
+
standard: :build_standard_template,
|
|
32
|
+
detailed: :build_detailed_template,
|
|
33
|
+
comprehensive: :build_comprehensive_template
|
|
34
|
+
}.freeze
|
|
35
|
+
|
|
36
|
+
# Initialize prompt builder
|
|
37
|
+
#
|
|
38
|
+
# @param agent [LanguageOperator::Agent::Base] The agent instance
|
|
39
|
+
# @param chat_config [LanguageOperator::Dsl::ChatEndpointDefinition] Chat endpoint configuration
|
|
40
|
+
# @param options [Hash] Additional options
|
|
41
|
+
# @option options [Symbol] :template Template level (:minimal, :standard, :detailed, :comprehensive)
|
|
42
|
+
# @option options [Boolean] :enable_identity_awareness Enable identity context injection
|
|
43
|
+
def initialize(agent, chat_config, **options)
|
|
44
|
+
@agent = agent
|
|
45
|
+
@chat_config = chat_config
|
|
46
|
+
@options = options
|
|
47
|
+
@metadata_collector = MetadataCollector.new(agent)
|
|
48
|
+
|
|
49
|
+
# Configuration
|
|
50
|
+
@template_level = options[:template] || chat_config&.prompt_template_level || :standard
|
|
51
|
+
@identity_awareness_enabled = if options.key?(:enable_identity_awareness)
|
|
52
|
+
options[:enable_identity_awareness]
|
|
53
|
+
elsif chat_config&.identity_awareness_enabled.nil?
|
|
54
|
+
true
|
|
55
|
+
else
|
|
56
|
+
chat_config.identity_awareness_enabled
|
|
57
|
+
end
|
|
58
|
+
@static_prompt = chat_config&.system_prompt
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Build complete system prompt with persona and context
|
|
62
|
+
#
|
|
63
|
+
# @return [String] Generated system prompt
|
|
64
|
+
def build_system_prompt
|
|
65
|
+
# Return static prompt if identity awareness is disabled
|
|
66
|
+
unless @identity_awareness_enabled
|
|
67
|
+
return @static_prompt || build_fallback_prompt
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
# Collect metadata for context injection
|
|
71
|
+
metadata = @metadata_collector.summary_for_prompt
|
|
72
|
+
|
|
73
|
+
# Build dynamic prompt based on template level
|
|
74
|
+
if TEMPLATE_LEVELS.key?(@template_level)
|
|
75
|
+
method_name = TEMPLATE_LEVELS[@template_level]
|
|
76
|
+
send(method_name, metadata)
|
|
77
|
+
else
|
|
78
|
+
logger.warn("Unknown template level: #{@template_level}, falling back to standard")
|
|
79
|
+
build_standard_template(metadata)
|
|
80
|
+
end
|
|
81
|
+
rescue StandardError => e
|
|
82
|
+
logger.error('Failed to build dynamic system prompt, falling back to static',
|
|
83
|
+
error: e.message)
|
|
84
|
+
@static_prompt || build_fallback_prompt
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Build prompt for conversation context (shorter version)
|
|
88
|
+
#
|
|
89
|
+
# @return [String] Conversation context prompt
|
|
90
|
+
def build_conversation_context
|
|
91
|
+
return nil unless @identity_awareness_enabled
|
|
92
|
+
|
|
93
|
+
metadata = @metadata_collector.summary_for_prompt
|
|
94
|
+
build_conversation_context_template(metadata)
|
|
95
|
+
rescue StandardError => e
|
|
96
|
+
logger.error('Failed to build conversation context', error: e.message)
|
|
97
|
+
nil
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
private
|
|
101
|
+
|
|
102
|
+
def logger_component
|
|
103
|
+
'Agent::PromptBuilder'
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
# Minimal template - basic identity only
|
|
107
|
+
def build_minimal_template(metadata)
|
|
108
|
+
base_prompt = @static_prompt || "You are an AI assistant."
|
|
109
|
+
|
|
110
|
+
<<~PROMPT.strip
|
|
111
|
+
#{base_prompt}
|
|
112
|
+
|
|
113
|
+
You are #{metadata[:agent_name]}, running in #{metadata[:cluster] || 'a Kubernetes cluster'}.
|
|
114
|
+
PROMPT
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# Standard template - identity + basic operational context
|
|
118
|
+
def build_standard_template(metadata)
|
|
119
|
+
base_prompt = @static_prompt || load_persona_prompt || "You are an AI assistant."
|
|
120
|
+
|
|
121
|
+
identity_context = build_identity_context(metadata)
|
|
122
|
+
operational_context = build_basic_operational_context(metadata)
|
|
123
|
+
|
|
124
|
+
<<~PROMPT.strip
|
|
125
|
+
#{base_prompt}
|
|
126
|
+
|
|
127
|
+
#{identity_context}
|
|
128
|
+
|
|
129
|
+
#{operational_context}
|
|
130
|
+
|
|
131
|
+
You can discuss your role, capabilities, and current operational state. Respond as an intelligent agent with awareness of your function and environment.
|
|
132
|
+
PROMPT
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
# Detailed template - full context with capabilities
|
|
136
|
+
def build_detailed_template(metadata)
|
|
137
|
+
base_prompt = @static_prompt || load_persona_prompt || "You are an AI assistant."
|
|
138
|
+
|
|
139
|
+
identity_context = build_identity_context(metadata)
|
|
140
|
+
operational_context = build_detailed_operational_context(metadata)
|
|
141
|
+
capabilities_context = build_capabilities_context(metadata)
|
|
142
|
+
|
|
143
|
+
<<~PROMPT.strip
|
|
144
|
+
#{base_prompt}
|
|
145
|
+
|
|
146
|
+
#{identity_context}
|
|
147
|
+
|
|
148
|
+
#{operational_context}
|
|
149
|
+
|
|
150
|
+
#{capabilities_context}
|
|
151
|
+
|
|
152
|
+
You should:
|
|
153
|
+
- Demonstrate awareness of your identity and purpose
|
|
154
|
+
- Provide context about your operational environment when relevant
|
|
155
|
+
- Discuss your capabilities and tools naturally in conversation
|
|
156
|
+
- Respond as a professional, context-aware agent rather than a generic chatbot
|
|
157
|
+
PROMPT
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
# Comprehensive template - all available context
|
|
161
|
+
def build_comprehensive_template(metadata)
|
|
162
|
+
base_prompt = @static_prompt || load_persona_prompt || "You are an AI assistant."
|
|
163
|
+
|
|
164
|
+
sections = [
|
|
165
|
+
base_prompt,
|
|
166
|
+
build_identity_context(metadata),
|
|
167
|
+
build_detailed_operational_context(metadata),
|
|
168
|
+
build_capabilities_context(metadata),
|
|
169
|
+
build_environment_context(metadata),
|
|
170
|
+
build_behavioral_guidelines
|
|
171
|
+
].compact
|
|
172
|
+
|
|
173
|
+
sections.join("\n\n")
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
# Short context for ongoing conversations
|
|
177
|
+
def build_conversation_context_template(metadata)
|
|
178
|
+
"Agent: #{metadata[:agent_name]} | Mode: #{metadata[:agent_mode]} | Uptime: #{metadata[:uptime]} | Status: #{metadata[:status]}"
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# Build identity context section
|
|
182
|
+
def build_identity_context(metadata)
|
|
183
|
+
lines = []
|
|
184
|
+
lines << "You are #{metadata[:agent_name]}, a language agent."
|
|
185
|
+
lines << "Your primary function is: #{metadata[:agent_description]}" if metadata[:agent_description] != 'AI Agent'
|
|
186
|
+
lines << "You are currently running in #{metadata[:agent_mode]} mode."
|
|
187
|
+
lines.join(' ')
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
# Build basic operational context
|
|
191
|
+
def build_basic_operational_context(metadata)
|
|
192
|
+
context_parts = []
|
|
193
|
+
|
|
194
|
+
if metadata[:cluster]
|
|
195
|
+
context_parts << "running in the '#{metadata[:cluster]}' cluster"
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
if metadata[:uptime] != 'just started'
|
|
199
|
+
context_parts << "active for #{metadata[:uptime]}"
|
|
200
|
+
else
|
|
201
|
+
context_parts << "recently started"
|
|
202
|
+
end
|
|
203
|
+
|
|
204
|
+
if metadata[:status] == 'ready'
|
|
205
|
+
context_parts << "currently operational"
|
|
206
|
+
else
|
|
207
|
+
context_parts << "status: #{metadata[:status]}"
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
"You are #{context_parts.join(', ')}."
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
# Build detailed operational context
|
|
214
|
+
def build_detailed_operational_context(metadata)
|
|
215
|
+
lines = []
|
|
216
|
+
lines << build_basic_operational_context(metadata)
|
|
217
|
+
|
|
218
|
+
if metadata[:workspace_available]
|
|
219
|
+
lines << "Your workspace is available and ready for file operations."
|
|
220
|
+
else
|
|
221
|
+
lines << "Your workspace is currently unavailable."
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
lines.join(' ')
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
# Build capabilities context
|
|
228
|
+
def build_capabilities_context(metadata)
|
|
229
|
+
return nil if metadata[:tool_count].to_i.zero?
|
|
230
|
+
|
|
231
|
+
if metadata[:tool_count] == 1
|
|
232
|
+
"You have access to 1 tool to help accomplish tasks."
|
|
233
|
+
else
|
|
234
|
+
"You have access to #{metadata[:tool_count]} tools to help accomplish tasks."
|
|
235
|
+
end
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
# Build environment context
|
|
239
|
+
def build_environment_context(metadata)
|
|
240
|
+
context_parts = []
|
|
241
|
+
|
|
242
|
+
if metadata[:namespace]
|
|
243
|
+
context_parts << "Namespace: #{metadata[:namespace]}"
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
if metadata[:llm_model] && metadata[:llm_model] != 'unknown'
|
|
247
|
+
context_parts << "Model: #{metadata[:llm_model]}"
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
return nil if context_parts.empty?
|
|
251
|
+
|
|
252
|
+
"Environment details: #{context_parts.join(', ')}"
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
# Build behavioral guidelines
|
|
256
|
+
def build_behavioral_guidelines
|
|
257
|
+
<<~GUIDELINES.strip
|
|
258
|
+
Behavioral Guidelines:
|
|
259
|
+
- Maintain awareness of your identity and operational context
|
|
260
|
+
- Provide helpful, accurate responses within your capabilities
|
|
261
|
+
- Reference your environment and tools naturally when relevant
|
|
262
|
+
- Act as a knowledgeable agent rather than a generic assistant
|
|
263
|
+
- Be professional yet personable in your interactions
|
|
264
|
+
GUIDELINES
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
# Load persona prompt if available
|
|
268
|
+
def load_persona_prompt
|
|
269
|
+
return nil unless @agent.config&.dig('agent', 'persona')
|
|
270
|
+
|
|
271
|
+
# In a full implementation, this would load the persona from Kubernetes
|
|
272
|
+
# For now, we'll rely on the static prompt from chat config
|
|
273
|
+
nil
|
|
274
|
+
end
|
|
275
|
+
|
|
276
|
+
# Build fallback prompt when nothing else is available
|
|
277
|
+
def build_fallback_prompt
|
|
278
|
+
"You are an AI assistant running as a language operator agent. You can help with various tasks and questions."
|
|
279
|
+
end
|
|
280
|
+
end
|
|
281
|
+
end
|
|
282
|
+
end
|
|
@@ -4,6 +4,7 @@ require 'rack'
|
|
|
4
4
|
require 'rackup'
|
|
5
5
|
require 'mcp'
|
|
6
6
|
require_relative 'executor'
|
|
7
|
+
require_relative 'prompt_builder'
|
|
7
8
|
|
|
8
9
|
module LanguageOperator
|
|
9
10
|
module Agent
|
|
@@ -110,15 +111,21 @@ module LanguageOperator
|
|
|
110
111
|
|
|
111
112
|
# Register chat completion endpoint
|
|
112
113
|
#
|
|
113
|
-
# Sets up OpenAI-compatible chat completion endpoint.
|
|
114
|
-
#
|
|
114
|
+
# Sets up OpenAI-compatible chat completion endpoint for all agents.
|
|
115
|
+
# Every agent automatically gets identity-aware chat capabilities.
|
|
115
116
|
#
|
|
116
|
-
# @param chat_endpoint_def [LanguageOperator::Dsl::ChatEndpointDefinition] Chat endpoint definition
|
|
117
117
|
# @param agent [LanguageOperator::Agent::Base] The agent instance
|
|
118
118
|
# @return [void]
|
|
119
|
-
def register_chat_endpoint(
|
|
120
|
-
@chat_endpoint = chat_endpoint_def
|
|
119
|
+
def register_chat_endpoint(agent)
|
|
121
120
|
@chat_agent = agent
|
|
121
|
+
|
|
122
|
+
# Create simple chat configuration (identity awareness always enabled)
|
|
123
|
+
@chat_config = {
|
|
124
|
+
model_name: ENV.fetch('AGENT_NAME', agent.config&.dig('agent', 'name') || 'agent'),
|
|
125
|
+
system_prompt: build_default_system_prompt(agent),
|
|
126
|
+
temperature: 0.7,
|
|
127
|
+
max_tokens: 2000
|
|
128
|
+
}
|
|
122
129
|
|
|
123
130
|
# Register OpenAI-compatible endpoint
|
|
124
131
|
register_route('/v1/chat/completions', method: :post) do |context|
|
|
@@ -131,19 +138,19 @@ module LanguageOperator
|
|
|
131
138
|
object: 'list',
|
|
132
139
|
data: [
|
|
133
140
|
{
|
|
134
|
-
id:
|
|
141
|
+
id: @chat_config[:model_name],
|
|
135
142
|
object: 'model',
|
|
136
143
|
created: Time.now.to_i,
|
|
137
144
|
owned_by: 'language-operator',
|
|
138
145
|
permission: [],
|
|
139
|
-
root:
|
|
146
|
+
root: @chat_config[:model_name],
|
|
140
147
|
parent: nil
|
|
141
148
|
}
|
|
142
149
|
]
|
|
143
150
|
}
|
|
144
151
|
end
|
|
145
152
|
|
|
146
|
-
puts "Registered chat
|
|
153
|
+
puts "Registered identity-aware chat endpoint as model: #{@chat_config[:model_name]}"
|
|
147
154
|
end
|
|
148
155
|
|
|
149
156
|
# Handle incoming HTTP request
|
|
@@ -193,6 +200,24 @@ module LanguageOperator
|
|
|
193
200
|
|
|
194
201
|
private
|
|
195
202
|
|
|
203
|
+
# Build default system prompt for agent
|
|
204
|
+
#
|
|
205
|
+
# Creates a basic system prompt based on agent description
|
|
206
|
+
#
|
|
207
|
+
# @param agent [LanguageOperator::Agent::Base] The agent instance
|
|
208
|
+
# @return [String] Default system prompt
|
|
209
|
+
def build_default_system_prompt(agent)
|
|
210
|
+
description = agent.config&.dig('agent', 'instructions') ||
|
|
211
|
+
agent.config&.dig('agent', 'description') ||
|
|
212
|
+
"AI assistant"
|
|
213
|
+
|
|
214
|
+
if description.downcase.start_with?('you are')
|
|
215
|
+
description
|
|
216
|
+
else
|
|
217
|
+
"You are #{description.downcase}. Provide helpful assistance based on your capabilities."
|
|
218
|
+
end
|
|
219
|
+
end
|
|
220
|
+
|
|
196
221
|
# Setup executor pool for connection reuse
|
|
197
222
|
#
|
|
198
223
|
# Creates a thread-safe queue pre-populated with executor instances
|
|
@@ -433,29 +458,32 @@ module LanguageOperator
|
|
|
433
458
|
# Build prompt from messages
|
|
434
459
|
prompt = build_prompt_from_messages(messages)
|
|
435
460
|
|
|
436
|
-
# Execute agent
|
|
437
|
-
result = @chat_agent.
|
|
461
|
+
# Execute agent using the correct method
|
|
462
|
+
result = @chat_agent.execute_goal(prompt)
|
|
463
|
+
|
|
464
|
+
# Extract content from result (handle both String and Message objects)
|
|
465
|
+
result_content = result.is_a?(String) ? result : result.content
|
|
438
466
|
|
|
439
467
|
# Build OpenAI-compatible response
|
|
440
468
|
{
|
|
441
469
|
id: "chatcmpl-#{SecureRandom.hex(12)}",
|
|
442
470
|
object: 'chat.completion',
|
|
443
471
|
created: Time.now.to_i,
|
|
444
|
-
model: @
|
|
472
|
+
model: @chat_config[:model_name],
|
|
445
473
|
choices: [
|
|
446
474
|
{
|
|
447
475
|
index: 0,
|
|
448
476
|
message: {
|
|
449
477
|
role: 'assistant',
|
|
450
|
-
content:
|
|
478
|
+
content: result_content
|
|
451
479
|
},
|
|
452
480
|
finish_reason: 'stop'
|
|
453
481
|
}
|
|
454
482
|
],
|
|
455
483
|
usage: {
|
|
456
484
|
prompt_tokens: estimate_tokens(prompt),
|
|
457
|
-
completion_tokens: estimate_tokens(
|
|
458
|
-
total_tokens: estimate_tokens(prompt) + estimate_tokens(
|
|
485
|
+
completion_tokens: estimate_tokens(result_content),
|
|
486
|
+
total_tokens: estimate_tokens(prompt) + estimate_tokens(result_content)
|
|
459
487
|
}
|
|
460
488
|
}
|
|
461
489
|
end
|
|
@@ -477,41 +505,79 @@ module LanguageOperator
|
|
|
477
505
|
'Cache-Control' => 'no-cache',
|
|
478
506
|
'Connection' => 'keep-alive'
|
|
479
507
|
},
|
|
480
|
-
StreamingBody.new(@chat_agent, prompt, @
|
|
508
|
+
StreamingBody.new(@chat_agent, prompt, @chat_config[:model_name])
|
|
481
509
|
]
|
|
482
510
|
end
|
|
483
511
|
|
|
484
|
-
# Build prompt from OpenAI message format
|
|
512
|
+
# Build prompt from OpenAI message format with identity awareness
|
|
485
513
|
#
|
|
486
514
|
# @param messages [Array<Hash>] Array of message objects
|
|
487
|
-
# @return [String] Combined prompt
|
|
515
|
+
# @return [String] Combined prompt with agent identity context
|
|
488
516
|
def build_prompt_from_messages(messages)
|
|
489
|
-
# Combine all messages into a single prompt
|
|
490
|
-
# System messages become instructions
|
|
491
|
-
# User/assistant messages become conversation
|
|
492
517
|
prompt_parts = []
|
|
493
518
|
|
|
494
|
-
#
|
|
495
|
-
|
|
519
|
+
# Build identity-aware system prompt (always enabled)
|
|
520
|
+
system_prompt = build_identity_aware_system_prompt
|
|
521
|
+
prompt_parts << "System: #{system_prompt}" if system_prompt
|
|
522
|
+
|
|
523
|
+
# Add conversation context (always enabled)
|
|
524
|
+
conversation_context = build_conversation_context
|
|
525
|
+
prompt_parts << conversation_context if conversation_context
|
|
496
526
|
|
|
497
|
-
# Add conversation history
|
|
527
|
+
# Add conversation history (skip system messages from original array since we handle them above)
|
|
498
528
|
messages.each do |msg|
|
|
499
529
|
role = msg['role']
|
|
500
530
|
content = msg['content']
|
|
501
531
|
|
|
502
532
|
case role
|
|
503
|
-
when 'system'
|
|
504
|
-
prompt_parts << "System: #{content}"
|
|
505
533
|
when 'user'
|
|
506
534
|
prompt_parts << "User: #{content}"
|
|
507
535
|
when 'assistant'
|
|
508
536
|
prompt_parts << "Assistant: #{content}"
|
|
537
|
+
# Skip system messages - we handle them via PromptBuilder
|
|
509
538
|
end
|
|
510
539
|
end
|
|
511
540
|
|
|
512
541
|
prompt_parts.join("\n\n")
|
|
513
542
|
end
|
|
514
543
|
|
|
544
|
+
# Build identity-aware system prompt using PromptBuilder
|
|
545
|
+
#
|
|
546
|
+
# @return [String] Dynamic system prompt with agent identity
|
|
547
|
+
def build_identity_aware_system_prompt
|
|
548
|
+
# Create prompt builder with identity awareness always enabled
|
|
549
|
+
builder = PromptBuilder.new(
|
|
550
|
+
@chat_agent,
|
|
551
|
+
nil, # No chat config needed
|
|
552
|
+
template: :standard, # Good default
|
|
553
|
+
enable_identity_awareness: true
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
builder.build_system_prompt
|
|
557
|
+
rescue StandardError => e
|
|
558
|
+
# Log error and fall back to static prompt
|
|
559
|
+
puts "Warning: Failed to build identity-aware system prompt: #{e.message}"
|
|
560
|
+
@chat_config[:system_prompt]
|
|
561
|
+
end
|
|
562
|
+
|
|
563
|
+
# Build conversation context for ongoing chats
|
|
564
|
+
#
|
|
565
|
+
# @return [String, nil] Conversation context
|
|
566
|
+
def build_conversation_context
|
|
567
|
+
builder = PromptBuilder.new(
|
|
568
|
+
@chat_agent,
|
|
569
|
+
nil, # No chat config needed
|
|
570
|
+
enable_identity_awareness: true
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
context = builder.build_conversation_context
|
|
574
|
+
context ? "Context: #{context}" : nil
|
|
575
|
+
rescue StandardError => e
|
|
576
|
+
# Log error and continue without context
|
|
577
|
+
puts "Warning: Failed to build conversation context: #{e.message}"
|
|
578
|
+
nil
|
|
579
|
+
end
|
|
580
|
+
|
|
515
581
|
# Estimate token count (rough approximation)
|
|
516
582
|
#
|
|
517
583
|
# @param text [String] Text to estimate
|
|
@@ -159,11 +159,40 @@ module LanguageOperator
|
|
|
159
159
|
|
|
160
160
|
case agent.mode
|
|
161
161
|
when 'autonomous', 'interactive'
|
|
162
|
+
# Hybrid mode: All agents now run main work AND web server (chat endpoints always enabled)
|
|
163
|
+
logger.info('Starting hybrid agent (autonomous + web server)',
|
|
164
|
+
agent_name: agent_def.name,
|
|
165
|
+
chat_endpoint_enabled: true, # Always true now
|
|
166
|
+
has_webhooks: agent_def.webhooks.any?,
|
|
167
|
+
has_mcp_tools: !!(agent_def.mcp_server&.tools?))
|
|
168
|
+
|
|
169
|
+
# Start web server in background thread
|
|
170
|
+
web_server = LanguageOperator::Agent::WebServer.new(agent)
|
|
171
|
+
agent_def.webhooks.each { |webhook_def| webhook_def.register(web_server) }
|
|
172
|
+
web_server.register_mcp_tools(agent_def.mcp_server) if agent_def.mcp_server&.tools?
|
|
173
|
+
web_server.register_chat_endpoint(agent_def.chat_endpoint, agent) # Always register chat endpoint
|
|
174
|
+
|
|
175
|
+
web_thread = Thread.new do
|
|
176
|
+
web_server.start
|
|
177
|
+
rescue StandardError => e
|
|
178
|
+
logger.error('Web server error', error: e.message, backtrace: e.backtrace[0..5])
|
|
179
|
+
raise
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
# Set up signal handlers for graceful shutdown
|
|
183
|
+
%w[INT TERM].each do |signal|
|
|
184
|
+
Signal.trap(signal) do
|
|
185
|
+
logger.info('Received shutdown signal, stopping hybrid agent')
|
|
186
|
+
web_server.cleanup if web_server.respond_to?(:cleanup)
|
|
187
|
+
web_thread.kill if web_thread&.alive?
|
|
188
|
+
exit 0
|
|
189
|
+
end
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
# Run main work in foreground
|
|
162
193
|
if uses_dsl_v1
|
|
163
|
-
# DSL v1: Execute main block with task executor in persistent mode
|
|
164
194
|
execute_main_block_persistent(agent, agent_def)
|
|
165
195
|
elsif uses_dsl_v0
|
|
166
|
-
# DSL v0: Execute workflow in autonomous mode
|
|
167
196
|
executor = LanguageOperator::Agent::Executor.new(agent)
|
|
168
197
|
executor.execute_workflow(agent_def)
|
|
169
198
|
else
|
|
@@ -4,7 +4,6 @@ require_relative 'main_definition'
|
|
|
4
4
|
require_relative 'task_definition'
|
|
5
5
|
require_relative 'webhook_definition'
|
|
6
6
|
require_relative 'mcp_server_definition'
|
|
7
|
-
require_relative 'chat_endpoint_definition'
|
|
8
7
|
require_relative '../logger'
|
|
9
8
|
require_relative '../loggable'
|
|
10
9
|
|
|
@@ -49,7 +48,7 @@ module LanguageOperator
|
|
|
49
48
|
include LanguageOperator::Loggable
|
|
50
49
|
|
|
51
50
|
attr_reader :name, :description, :persona, :schedule, :objectives, :main, :tasks,
|
|
52
|
-
:constraints, :output_config, :execution_mode, :webhooks, :mcp_server
|
|
51
|
+
:constraints, :output_config, :execution_mode, :webhooks, :mcp_server
|
|
53
52
|
|
|
54
53
|
def initialize(name)
|
|
55
54
|
@name = name
|
|
@@ -64,7 +63,6 @@ module LanguageOperator
|
|
|
64
63
|
@execution_mode = :autonomous
|
|
65
64
|
@webhooks = []
|
|
66
65
|
@mcp_server = nil
|
|
67
|
-
@chat_endpoint = nil
|
|
68
66
|
|
|
69
67
|
logger.debug('Agent definition initialized',
|
|
70
68
|
name: name,
|
|
@@ -314,19 +312,6 @@ module LanguageOperator
|
|
|
314
312
|
@mcp_server
|
|
315
313
|
end
|
|
316
314
|
|
|
317
|
-
# Define chat endpoint capabilities
|
|
318
|
-
#
|
|
319
|
-
# Allows this agent to respond to OpenAI-compatible chat completion requests.
|
|
320
|
-
# Other systems can treat this agent as a language model.
|
|
321
|
-
#
|
|
322
|
-
# @yield Chat endpoint configuration block
|
|
323
|
-
# @return [ChatEndpointDefinition] The chat endpoint definition
|
|
324
|
-
def as_chat_endpoint(&block)
|
|
325
|
-
@chat_endpoint ||= ChatEndpointDefinition.new(@name)
|
|
326
|
-
@chat_endpoint.instance_eval(&block) if block
|
|
327
|
-
@execution_mode = :reactive if @execution_mode == :autonomous
|
|
328
|
-
@chat_endpoint
|
|
329
|
-
end
|
|
330
315
|
|
|
331
316
|
# Execute the agent
|
|
332
317
|
#
|
|
@@ -353,6 +338,7 @@ module LanguageOperator
|
|
|
353
338
|
|
|
354
339
|
private
|
|
355
340
|
|
|
341
|
+
|
|
356
342
|
def logger_component
|
|
357
343
|
"Agent:#{@name}"
|
|
358
344
|
end
|
|
@@ -367,7 +353,7 @@ module LanguageOperator
|
|
|
367
353
|
name: @name,
|
|
368
354
|
webhooks: @webhooks.size,
|
|
369
355
|
mcp_tools: @mcp_server&.tools&.size || 0,
|
|
370
|
-
chat_endpoint:
|
|
356
|
+
chat_endpoint: true)
|
|
371
357
|
|
|
372
358
|
# Create an Agent::Base instance with this definition
|
|
373
359
|
require_relative '../agent/base'
|
|
@@ -391,8 +377,8 @@ module LanguageOperator
|
|
|
391
377
|
# Register MCP tools
|
|
392
378
|
web_server.register_mcp_tools(@mcp_server) if @mcp_server&.tools?
|
|
393
379
|
|
|
394
|
-
# Register chat endpoint
|
|
395
|
-
web_server.register_chat_endpoint(
|
|
380
|
+
# Register chat endpoint - automatic for all agents
|
|
381
|
+
web_server.register_chat_endpoint(agent)
|
|
396
382
|
|
|
397
383
|
# Start the server
|
|
398
384
|
web_server.start
|