simple_a2a 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. checksums.yaml +7 -0
  2. data/.github/workflows/deploy-github-pages.yml +52 -0
  3. data/CHANGELOG.md +5 -0
  4. data/LICENSE.txt +21 -0
  5. data/README.md +192 -0
  6. data/Rakefile +13 -0
  7. data/docs/api/client/index.md +124 -0
  8. data/docs/api/index.md +27 -0
  9. data/docs/api/models/index.md +233 -0
  10. data/docs/api/server/index.md +162 -0
  11. data/docs/api/storage/index.md +84 -0
  12. data/docs/architecture/index.md +63 -0
  13. data/docs/architecture/protocol.md +112 -0
  14. data/docs/assets/css/custom.css +6 -0
  15. data/docs/examples/basic-usage.md +77 -0
  16. data/docs/examples/index.md +92 -0
  17. data/docs/examples/llm-research.md +92 -0
  18. data/docs/examples/streaming.md +81 -0
  19. data/docs/getting-started/installation.md +48 -0
  20. data/docs/getting-started/quick-start.md +100 -0
  21. data/docs/guides/custom-storage.md +69 -0
  22. data/docs/guides/push-notifications.md +104 -0
  23. data/docs/guides/streaming.md +75 -0
  24. data/docs/index.md +98 -0
  25. data/examples/01_basic_usage/client.rb +75 -0
  26. data/examples/01_basic_usage/server.rb +57 -0
  27. data/examples/02_streaming/client.rb +70 -0
  28. data/examples/02_streaming/server.rb +177 -0
  29. data/examples/03_llm_research/client.rb +138 -0
  30. data/examples/03_llm_research/run +82 -0
  31. data/examples/03_llm_research/server.rb +203 -0
  32. data/examples/03_llm_research/web_client.rb +501 -0
  33. data/examples/common_config.rb +4 -0
  34. data/examples/run +108 -0
  35. data/lib/simple_a2a/client/base.rb +101 -0
  36. data/lib/simple_a2a/client/sse.rb +58 -0
  37. data/lib/simple_a2a/errors.rb +15 -0
  38. data/lib/simple_a2a/json_rpc.rb +89 -0
  39. data/lib/simple_a2a/models/agent_capabilities.rb +11 -0
  40. data/lib/simple_a2a/models/agent_card.rb +23 -0
  41. data/lib/simple_a2a/models/agent_interface.rb +11 -0
  42. data/lib/simple_a2a/models/agent_provider.rb +11 -0
  43. data/lib/simple_a2a/models/agent_skill.rb +12 -0
  44. data/lib/simple_a2a/models/artifact.rb +23 -0
  45. data/lib/simple_a2a/models/authentication_info.rb +11 -0
  46. data/lib/simple_a2a/models/base.rb +111 -0
  47. data/lib/simple_a2a/models/message.rb +45 -0
  48. data/lib/simple_a2a/models/part.rb +45 -0
  49. data/lib/simple_a2a/models/push_notification_config.rb +17 -0
  50. data/lib/simple_a2a/models/security_scheme.rb +16 -0
  51. data/lib/simple_a2a/models/send_message_configuration.rb +12 -0
  52. data/lib/simple_a2a/models/stream_response.rb +32 -0
  53. data/lib/simple_a2a/models/task.rb +57 -0
  54. data/lib/simple_a2a/models/task_artifact_update_event.rb +21 -0
  55. data/lib/simple_a2a/models/task_status.rb +20 -0
  56. data/lib/simple_a2a/models/task_status_update_event.rb +19 -0
  57. data/lib/simple_a2a/models/types.rb +39 -0
  58. data/lib/simple_a2a/server/agent_executor.rb +16 -0
  59. data/lib/simple_a2a/server/app.rb +227 -0
  60. data/lib/simple_a2a/server/base.rb +43 -0
  61. data/lib/simple_a2a/server/context.rb +44 -0
  62. data/lib/simple_a2a/server/event_router.rb +50 -0
  63. data/lib/simple_a2a/server/falcon_runner.rb +31 -0
  64. data/lib/simple_a2a/server/multi_agent.rb +50 -0
  65. data/lib/simple_a2a/server/push_sender.rb +80 -0
  66. data/lib/simple_a2a/server/resume_context.rb +14 -0
  67. data/lib/simple_a2a/storage/base.rb +12 -0
  68. data/lib/simple_a2a/storage/memory.rb +41 -0
  69. data/lib/simple_a2a/version.rb +5 -0
  70. data/lib/simple_a2a.rb +49 -0
  71. data/mkdocs.yml +143 -0
  72. data/sig/simple_a2a.rbs +4 -0
  73. metadata +353 -0
@@ -0,0 +1,203 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Usage: bundle exec ruby examples/03_llm_research/server.rb
5
+ #
6
+ # Requires:
7
+ # ANTHROPIC_API_KEY — for the Anthropic research agent and the evaluator
8
+ # OPENAI_API_KEY — for the OpenAI research agent
9
+ #
10
+ # Agents hosted at:
11
+ # http://localhost:9292/anthropic — claude-sonnet-4-6 researcher
12
+ # http://localhost:9292/openai — gpt-4o researcher
13
+ # http://localhost:9292/evaluator — claude-sonnet-4-6 evaluator
14
+
15
+ require_relative "../common_config"
16
+ require "ruby_llm"
17
+ require "async/http/faraday"
18
+
19
+ # Make ruby_llm use the async-http Faraday adapter so LLM API calls are
20
+ # fiber-aware inside Falcon's reactor, enabling true SSE streaming.
21
+ RubyLLM::Connection.prepend(Module.new do
22
+ private
23
+
24
+ def setup_middleware(faraday)
25
+ faraday.request :multipart
26
+ faraday.request :json
27
+ faraday.response :json
28
+ faraday.adapter :async_http
29
+ faraday.use :llm_errors, provider: @provider
30
+ end
31
+ end)
32
+
33
+ %w[ANTHROPIC_API_KEY OPENAI_API_KEY].each do |key|
34
+ abort "#{key} is not set" unless ENV[key]
35
+ end
36
+
37
+ RubyLLM.configure do |c|
38
+ c.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
39
+ c.openai_api_key = ENV["OPENAI_API_KEY"]
40
+ end
41
+
42
+ # ---------------------------------------------------------------------------
43
+ # Shared research prompt
44
+ # ---------------------------------------------------------------------------
45
+ RESEARCH_PROMPT = <<~PROMPT
46
+ Research the following topic thoroughly. Provide a comprehensive, well-structured
47
+ response covering key concepts, history, current state, applications, and future
48
+ directions. Topic: %s
49
+ PROMPT
50
+
51
+ # ---------------------------------------------------------------------------
52
+ # Executors
53
+ # ---------------------------------------------------------------------------
54
+ module StreamingExecutor
55
+ private
56
+
57
+ def stream_llm(ctx, model, prompt)
58
+ ctx.task.start!
59
+ ctx.emit_status
60
+
61
+ first = true
62
+ prev = nil
63
+
64
+ RubyLLM.chat(model: model).ask(prompt) do |chunk|
65
+ text = chunk.content.to_s
66
+ next if text.empty?
67
+
68
+ if prev
69
+ ctx.emit_artifact(
70
+ A2A::Models::Artifact.new(
71
+ index: 0, parts: [A2A::Models::Part.text(prev)],
72
+ append: !first, last_chunk: false
73
+ ),
74
+ append: !first, last_chunk: false
75
+ )
76
+ first = false
77
+ end
78
+ prev = text
79
+ end
80
+
81
+ if prev
82
+ ctx.emit_artifact(
83
+ A2A::Models::Artifact.new(
84
+ index: 0, parts: [A2A::Models::Part.text(prev)],
85
+ append: !first, last_chunk: true
86
+ ),
87
+ append: !first, last_chunk: true
88
+ )
89
+ end
90
+
91
+ ctx.task.complete!
92
+ ctx.emit_status(final: true)
93
+ end
94
+ end
95
+
96
+ class AnthropicResearchExecutor < A2A::Server::AgentExecutor
97
+ include StreamingExecutor
98
+ MODEL = "claude-sonnet-4-6"
99
+
100
+ def call(ctx)
101
+ topic = ctx.message.parts.filter_map(&:text).join(" ").strip
102
+ raise A2A::InvalidParamsError, "topic is required" if topic.empty?
103
+ stream_llm(ctx, MODEL, RESEARCH_PROMPT % topic)
104
+ end
105
+ end
106
+
107
+ class OpenAIResearchExecutor < A2A::Server::AgentExecutor
108
+ include StreamingExecutor
109
+ MODEL = "gpt-5.4"
110
+
111
+ def call(ctx)
112
+ topic = ctx.message.parts.filter_map(&:text).join(" ").strip
113
+ raise A2A::InvalidParamsError, "topic is required" if topic.empty?
114
+ stream_llm(ctx, MODEL, RESEARCH_PROMPT % topic)
115
+ end
116
+ end
117
+
118
+ class EvaluatorExecutor < A2A::Server::AgentExecutor
119
+ include StreamingExecutor
120
+ MODEL = "claude-sonnet-4-6"
121
+
122
+ def call(ctx)
123
+ prompt = ctx.message.parts.filter_map(&:text).join("\n").strip
124
+ raise A2A::InvalidParamsError, "evaluation prompt is required" if prompt.empty?
125
+ stream_llm(ctx, MODEL, prompt)
126
+ end
127
+ end
128
+
129
+ # ---------------------------------------------------------------------------
130
+ # Agent cards
131
+ # ---------------------------------------------------------------------------
132
+ def research_card(name:, model:, path:)
133
+ A2A::Models::AgentCard.new(
134
+ name: name,
135
+ version: "1.0",
136
+ description: "Researches topics using #{model}",
137
+ capabilities: A2A::Models::AgentCapabilities.new(streaming: true),
138
+ skills: [
139
+ A2A::Models::AgentSkill.new(
140
+ name: "research",
141
+ description: "Deep research on any topic"
142
+ )
143
+ ],
144
+ interfaces: [
145
+ A2A::Models::AgentInterface.new(
146
+ type: "json-rpc",
147
+ url: "http://localhost:9292#{path}",
148
+ version: "1.0"
149
+ )
150
+ ]
151
+ )
152
+ end
153
+
154
+ anthropic_card = research_card(
155
+ name: "AnthropicResearchAgent",
156
+ model: AnthropicResearchExecutor::MODEL,
157
+ path: "/anthropic"
158
+ )
159
+
160
+ openai_card = research_card(
161
+ name: "OpenAIResearchAgent",
162
+ model: OpenAIResearchExecutor::MODEL,
163
+ path: "/openai"
164
+ )
165
+
166
+ evaluator_card = A2A::Models::AgentCard.new(
167
+ name: "EvaluatorAgent",
168
+ version: "1.0",
169
+ description: "Evaluates and compares research responses from multiple agents",
170
+ capabilities: A2A::Models::AgentCapabilities.new(streaming: true),
171
+ skills: [
172
+ A2A::Models::AgentSkill.new(
173
+ name: "evaluate",
174
+ description: "Compare two research responses and determine which is more extensive"
175
+ )
176
+ ],
177
+ interfaces: [
178
+ A2A::Models::AgentInterface.new(
179
+ type: "json-rpc",
180
+ url: "http://localhost:9292/evaluator",
181
+ version: "1.0"
182
+ )
183
+ ]
184
+ )
185
+
186
+ # ---------------------------------------------------------------------------
187
+ # Start multi-agent server
188
+ # ---------------------------------------------------------------------------
189
+ puts "Starting multi-agent research server on http://localhost:9292"
190
+ puts " /anthropic → #{AnthropicResearchExecutor::MODEL}"
191
+ puts " /openai → #{OpenAIResearchExecutor::MODEL}"
192
+ puts " /evaluator → #{EvaluatorExecutor::MODEL} (evaluator)"
193
+ puts "Press Ctrl-C to stop."
194
+ puts
195
+
196
+ A2A.multi_server(
197
+ agents: {
198
+ "/anthropic" => { agent_card: anthropic_card, executor: AnthropicResearchExecutor.new },
199
+ "/openai" => { agent_card: openai_card, executor: OpenAIResearchExecutor.new },
200
+ "/evaluator" => { agent_card: evaluator_card, executor: EvaluatorExecutor.new }
201
+ },
202
+ port: 9292
203
+ ).run
@@ -0,0 +1,501 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Sinatra web UI for the A2A multi-agent research demo.
5
+ #
6
+ # Usage (via lifecycle script):
7
+ # ruby examples/run 03_llm_research
8
+ #
9
+ # Usage (manually):
10
+ # bundle exec ruby examples/03_llm_research/server.rb &
11
+ # bundle exec ruby examples/03_llm_research/web_client.rb
12
+ # open http://localhost:4567
13
+
14
+ require_relative "../common_config"
15
+ require "sinatra/base"
16
+ require "async/queue"
17
+ require "json"
18
+
19
+ A2A_BASE = "http://localhost:9292"
20
+ ANTHROPIC_URL = "#{A2A_BASE}/anthropic"
21
+ OPENAI_URL = "#{A2A_BASE}/openai"
22
+ EVALUATOR_URL = "#{A2A_BASE}/evaluator"
23
+
24
+ # ---------------------------------------------------------------------------
25
+ # SSE response body.
26
+ #
27
+ # Protocol::Rack (Falcon's Rack adapter) wraps the body in its own plain Ruby
28
+ # fiber and calls body.each from there — Async::Task.current is unavailable
29
+ # in that fiber regardless of how we define each.
30
+ #
31
+ # Bridge with IO.pipe instead:
32
+ # Writer side — a Thread running its own Async reactor; it calls both A2A
33
+ # SSE clients in parallel, then the evaluator, and writes
34
+ # complete "data: …\n\n" strings to the write end.
35
+ # Reader side — the each body calls gets("\n\n") on the read end.
36
+ # Inside Falcon's thread the Ruby fiber scheduler intercepts
37
+ # that blocking read and turns it into a non-blocking await,
38
+ # so Falcon can serve other requests while we wait.
39
+ # ---------------------------------------------------------------------------
40
+ class ResearchSSEBody
41
+ def initialize(topic:)
42
+ @topic = topic
43
+ end
44
+
45
+ def each
46
+ topic = @topic
47
+ read_io, write_io = IO.pipe
48
+
49
+ producer = Thread.new do
50
+ anthropic_buf = +""
51
+ openai_buf = +""
52
+
53
+ begin
54
+ Async do |task|
55
+ queue = Async::Queue.new
56
+
57
+ task_a = task.async do
58
+ A2A.sse_client(url: ANTHROPIC_URL).send_subscribe(
59
+ message: A2A::Models::Message.user(topic)
60
+ ) do |event|
61
+ next unless event.is_a?(A2A::Models::TaskArtifactUpdateEvent)
62
+ text = event.artifact.parts.filter_map(&:text).join
63
+ anthropic_buf << text
64
+ queue.enqueue(agent: "anthropic", text: text)
65
+ end
66
+ rescue => e
67
+ queue.enqueue(agent: "error", text: "Anthropic: #{e.message}")
68
+ ensure
69
+ queue.enqueue(:anthropic_done)
70
+ end
71
+
72
+ task_b = task.async do
73
+ A2A.sse_client(url: OPENAI_URL).send_subscribe(
74
+ message: A2A::Models::Message.user(topic)
75
+ ) do |event|
76
+ next unless event.is_a?(A2A::Models::TaskArtifactUpdateEvent)
77
+ text = event.artifact.parts.filter_map(&:text).join
78
+ openai_buf << text
79
+ queue.enqueue(agent: "openai", text: text)
80
+ end
81
+ rescue => e
82
+ queue.enqueue(agent: "error", text: "OpenAI: #{e.message}")
83
+ ensure
84
+ queue.enqueue(:openai_done)
85
+ end
86
+
87
+ done_count = 0
88
+ while done_count < 2
89
+ item = queue.dequeue
90
+ case item
91
+ when :anthropic_done, :openai_done
92
+ done_count += 1
93
+ else
94
+ write_io.write("data: #{JSON.generate(item)}\n\n")
95
+ end
96
+ end
97
+
98
+ task_a.wait rescue nil
99
+ task_b.wait rescue nil
100
+
101
+ write_io.write("data: #{JSON.generate(agent: 'status', text: 'Both agents complete. Evaluating…')}\n\n")
102
+
103
+ begin
104
+ A2A.sse_client(url: EVALUATOR_URL).send_subscribe(
105
+ message: A2A::Models::Message.user(eval_prompt(topic, anthropic_buf, openai_buf))
106
+ ) do |event|
107
+ next unless event.is_a?(A2A::Models::TaskArtifactUpdateEvent)
108
+ write_io.write("data: #{JSON.generate(agent: 'evaluator', text: event.artifact.parts.filter_map(&:text).join)}\n\n")
109
+ end
110
+ rescue => e
111
+ write_io.write("data: #{JSON.generate(agent: 'error', text: "Evaluator: #{e.message}")}\n\n")
112
+ end
113
+
114
+ write_io.write("data: #{JSON.generate(agent: 'done', text: '')}\n\n")
115
+ end
116
+ rescue => e
117
+ write_io.write("data: #{JSON.generate(agent: 'error', text: e.message)}\n\n") rescue nil
118
+ ensure
119
+ write_io.close rescue nil
120
+ end
121
+ end
122
+
123
+ while (line = read_io.gets("\n\n"))
124
+ yield line
125
+ end
126
+ ensure
127
+ read_io.close rescue nil
128
+ producer&.join
129
+ end
130
+
131
+ private
132
+
133
+ def eval_prompt(topic, a_text, b_text)
134
+ <<~PROMPT
135
+ Two AI agents researched the same topic. Evaluate which response is more extensive and comprehensive.
136
+
137
+ Topic: #{topic}
138
+
139
+ == Response A: Claude (claude-sonnet-4-6) ==
140
+ #{a_text}
141
+
142
+ == Response B: OpenAI (gpt-5.4) ==
143
+ #{b_text}
144
+
145
+ Evaluate on: length and detail, breadth of subtopics, depth of analysis, concrete examples, overall information density.
146
+ Give a clear verdict stating which response (A or B) is more extensive, and explain why.
147
+ PROMPT
148
+ end
149
+ end
150
+
151
+ # ---------------------------------------------------------------------------
152
+ # App
153
+ # ---------------------------------------------------------------------------
154
+ class ResearchApp < Sinatra::Base
155
+ set :server, "falcon"
156
+ set :port, 4567
157
+ set :bind, "localhost"
158
+ set :logging, false
159
+
160
+ get "/" do
161
+ content_type "text/html"
162
+ HTML_PAGE
163
+ end
164
+
165
+ get "/research" do
166
+ topic = params[:topic].to_s.strip
167
+
168
+ headers "Content-Type" => "text/event-stream",
169
+ "Cache-Control" => "no-cache",
170
+ "X-Accel-Buffering" => "no"
171
+
172
+ if topic.empty?
173
+ return body ["data: #{JSON.generate(agent: 'error', text: 'Topic is required')}\n\n",
174
+ "data: #{JSON.generate(agent: 'done', text: '')}\n\n"]
175
+ end
176
+
177
+ body ResearchSSEBody.new(topic: topic)
178
+ end
179
+ end
180
+
181
+ # ---------------------------------------------------------------------------
182
+ # HTML (embedded — no views/ directory needed)
183
+ # ---------------------------------------------------------------------------
184
+ HTML_PAGE = <<~'HTML'
185
+ <!DOCTYPE html>
186
+ <html lang="en">
187
+ <head>
188
+ <meta charset="UTF-8">
189
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
190
+ <title>A2A Multi-Agent Research</title>
191
+ <style>
192
+ *, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
193
+
194
+ :root {
195
+ --bg: #0d1117;
196
+ --surface: #161b22;
197
+ --surface2: #1c2128;
198
+ --border: #30363d;
199
+ --text: #e6edf3;
200
+ --muted: #8b949e;
201
+ --accent: #58a6ff;
202
+ --green: #3fb950;
203
+ --amber: #d29922;
204
+ --code: #c9d1d9;
205
+ }
206
+
207
+ body {
208
+ background: var(--bg);
209
+ color: var(--text);
210
+ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif;
211
+ min-height: 100vh;
212
+ padding: 1.5rem;
213
+ display: flex;
214
+ flex-direction: column;
215
+ gap: 1.1rem;
216
+ }
217
+
218
+ header { text-align: center; }
219
+ header h1 {
220
+ font-size: 1.5rem;
221
+ font-weight: 600;
222
+ color: var(--accent);
223
+ letter-spacing: -0.02em;
224
+ }
225
+ header p { color: var(--muted); font-size: 0.8rem; margin-top: 0.2rem; }
226
+
227
+ .search-row { display: flex; gap: 0.6rem; }
228
+
229
+ #topic {
230
+ flex: 1;
231
+ padding: 0.6rem 0.9rem;
232
+ background: var(--surface);
233
+ border: 1px solid var(--border);
234
+ border-radius: 8px;
235
+ color: var(--text);
236
+ font-size: 0.9rem;
237
+ }
238
+ #topic:focus { outline: none; border-color: var(--accent); }
239
+
240
+ #btn {
241
+ padding: 0.6rem 1.3rem;
242
+ background: #238636;
243
+ border: 1px solid rgba(240,246,252,0.1);
244
+ border-radius: 8px;
245
+ color: #fff;
246
+ font-size: 0.9rem;
247
+ font-weight: 500;
248
+ cursor: pointer;
249
+ white-space: nowrap;
250
+ }
251
+ #btn:hover:not(:disabled) { background: #2ea043; }
252
+ #btn:disabled { background: var(--surface2); color: var(--muted); cursor: not-allowed; }
253
+
254
+ #status {
255
+ font-size: 0.78rem;
256
+ color: var(--muted);
257
+ text-align: center;
258
+ min-height: 1em;
259
+ font-style: italic;
260
+ border-radius: 6px;
261
+ padding: 0.1rem 0;
262
+ transition: all 0.15s ease;
263
+ }
264
+ #status.error {
265
+ background: #b91c1c;
266
+ color: #fff;
267
+ font-size: 1.15rem;
268
+ font-weight: 700;
269
+ font-style: normal;
270
+ padding: 0.6rem 1rem;
271
+ letter-spacing: 0.01em;
272
+ }
273
+
274
+ .panels {
275
+ display: grid;
276
+ grid-template-columns: 1fr 1fr;
277
+ gap: 1rem;
278
+ }
279
+
280
+ .panel, .eval-panel {
281
+ background: var(--surface);
282
+ border: 1px solid var(--border);
283
+ border-radius: 10px;
284
+ overflow: hidden;
285
+ display: flex;
286
+ flex-direction: column;
287
+ }
288
+
289
+ .panel-header, .eval-header {
290
+ padding: 0.55rem 0.9rem;
291
+ background: var(--surface2);
292
+ border-bottom: 1px solid var(--border);
293
+ display: flex;
294
+ align-items: center;
295
+ gap: 0.5rem;
296
+ flex-shrink: 0;
297
+ }
298
+
299
+ .dot {
300
+ width: 7px; height: 7px;
301
+ border-radius: 50%;
302
+ background: var(--border);
303
+ flex-shrink: 0;
304
+ }
305
+ .dot.active {
306
+ background: var(--green);
307
+ animation: pulse 1.2s ease-in-out infinite;
308
+ }
309
+ .dot.eval.active { background: var(--amber); }
310
+ @keyframes pulse {
311
+ 0%, 100% { opacity: 1; }
312
+ 50% { opacity: 0.35; }
313
+ }
314
+
315
+ .panel-title { font-size: 0.82rem; font-weight: 500; }
316
+ .panel-model {
317
+ font-size: 0.7rem;
318
+ color: var(--accent);
319
+ font-family: monospace;
320
+ margin-left: auto;
321
+ }
322
+ .panel-model.eval-model { color: var(--amber); }
323
+ .char-count {
324
+ font-size: 0.68rem;
325
+ color: var(--muted);
326
+ font-variant-numeric: tabular-nums;
327
+ margin-left: 0.4rem;
328
+ }
329
+
330
+ .panel-body {
331
+ flex: 1;
332
+ padding: 0.75rem 0.9rem;
333
+ font-family: "SFMono-Regular", "Consolas", "Liberation Mono", monospace;
334
+ font-size: 0.78rem;
335
+ line-height: 1.65;
336
+ white-space: pre-wrap;
337
+ word-break: break-word;
338
+ overflow-y: auto;
339
+ color: var(--code);
340
+ height: 380px;
341
+ }
342
+ .panel-body.placeholder, .eval-body.placeholder {
343
+ color: var(--muted);
344
+ font-family: sans-serif;
345
+ font-size: 0.8rem;
346
+ font-style: italic;
347
+ }
348
+
349
+ .eval-body {
350
+ padding: 0.75rem 0.9rem;
351
+ font-family: "SFMono-Regular", "Consolas", "Liberation Mono", monospace;
352
+ font-size: 0.78rem;
353
+ line-height: 1.65;
354
+ white-space: pre-wrap;
355
+ word-break: break-word;
356
+ color: var(--code);
357
+ min-height: 100px;
358
+ }
359
+ </style>
360
+ </head>
361
+ <body>
362
+ <header>
363
+ <h1>A2A Multi-Agent Research</h1>
364
+ <p>Two LLMs research the same topic in parallel · a third evaluates the results</p>
365
+ </header>
366
+
367
+ <div class="search-row">
368
+ <input id="topic" type="text"
369
+ placeholder="Enter a research topic…"
370
+ value="shortcomings and criticisms of the A2A protocol specification">
371
+ <button id="btn" onclick="go()">Research</button>
372
+ </div>
373
+
374
+ <div id="status"></div>
375
+
376
+ <div class="panels">
377
+ <div class="panel">
378
+ <div class="panel-header">
379
+ <div class="dot" id="dot-a"></div>
380
+ <span class="panel-title">Anthropic</span>
381
+ <span class="panel-model">claude-sonnet-4-6</span>
382
+ <span class="char-count" id="cnt-a"></span>
383
+ </div>
384
+ <div class="panel-body placeholder" id="out-a">Waiting for response…</div>
385
+ </div>
386
+
387
+ <div class="panel">
388
+ <div class="panel-header">
389
+ <div class="dot" id="dot-b"></div>
390
+ <span class="panel-title">OpenAI</span>
391
+ <span class="panel-model">gpt-5.4</span>
392
+ <span class="char-count" id="cnt-b"></span>
393
+ </div>
394
+ <div class="panel-body placeholder" id="out-b">Waiting for response…</div>
395
+ </div>
396
+ </div>
397
+
398
+ <div class="eval-panel">
399
+ <div class="eval-header">
400
+ <div class="dot eval" id="dot-e"></div>
401
+ <span class="panel-title">Evaluation</span>
402
+ <span class="panel-model eval-model">claude-sonnet-4-6</span>
403
+ </div>
404
+ <div class="eval-body placeholder" id="out-e">Evaluation will appear here after both agents complete.</div>
405
+ </div>
406
+
407
+ <script>
408
+ let src = null;
409
+ const lenA = { n: 0 }, lenB = { n: 0 };
410
+
411
+ function status(msg, isError) {
412
+ const el = document.getElementById('status');
413
+ el.textContent = msg;
414
+ el.classList.toggle('error', !!isError);
415
+ }
416
+ function dot(id, on) { document.getElementById(id).classList.toggle('active', on); }
417
+
418
+ function append(elId, text, cntId, len) {
419
+ const el = document.getElementById(elId);
420
+ if (el.classList.contains('placeholder')) {
421
+ el.classList.remove('placeholder');
422
+ el.textContent = '';
423
+ }
424
+ el.textContent += text;
425
+ el.scrollTop = el.scrollHeight;
426
+ if (cntId) {
427
+ len.n += text.length;
428
+ document.getElementById(cntId).textContent = len.n.toLocaleString() + ' chars';
429
+ }
430
+ }
431
+
432
+ function reset() {
433
+ ['out-a','out-b','out-e'].forEach(id => {
434
+ const el = document.getElementById(id);
435
+ el.textContent = id === 'out-e'
436
+ ? 'Evaluation will appear here after both agents complete.'
437
+ : 'Waiting for response…';
438
+ el.classList.add('placeholder');
439
+ });
440
+ ['dot-a','dot-b','dot-e'].forEach(id => dot(id, false));
441
+ ['cnt-a','cnt-b'].forEach(id => document.getElementById(id).textContent = '');
442
+ lenA.n = 0; lenB.n = 0;
443
+ }
444
+
445
+ function go() {
446
+ const topic = document.getElementById('topic').value.trim();
447
+ if (!topic) return;
448
+
449
+ if (src) { src.close(); src = null; }
450
+ reset();
451
+ document.getElementById('btn').disabled = true;
452
+ dot('dot-a', true); dot('dot-b', true);
453
+ status('Querying both agents in parallel…', false);
454
+
455
+ src = new EventSource('/research?topic=' + encodeURIComponent(topic));
456
+
457
+ src.onmessage = e => {
458
+ const { agent, text } = JSON.parse(e.data);
459
+ switch (agent) {
460
+ case 'anthropic': append('out-a', text, 'cnt-a', lenA); break;
461
+ case 'openai': append('out-b', text, 'cnt-b', lenB); break;
462
+ case 'evaluator': append('out-e', text); break;
463
+ case 'status':
464
+ status(text);
465
+ if (text.includes('Evaluat')) {
466
+ dot('dot-a', false); dot('dot-b', false); dot('dot-e', true);
467
+ }
468
+ break;
469
+ case 'done':
470
+ dot('dot-e', false);
471
+ status('Research complete.');
472
+ document.getElementById('btn').disabled = false;
473
+ src.close(); src = null;
474
+ break;
475
+ case 'error':
476
+ status('Error: ' + text, true);
477
+ document.getElementById('btn').disabled = false;
478
+ ['dot-a','dot-b','dot-e'].forEach(id => dot(id, false));
479
+ src.close(); src = null;
480
+ break;
481
+ }
482
+ };
483
+
484
+ src.onerror = () => {
485
+ if (!src || src.readyState === EventSource.CLOSED) return;
486
+ status('Connection lost.', true);
487
+ document.getElementById('btn').disabled = false;
488
+ ['dot-a','dot-b','dot-e'].forEach(id => dot(id, false));
489
+ src.close(); src = null;
490
+ };
491
+ }
492
+
493
+ document.getElementById('topic').addEventListener('keydown', e => {
494
+ if (e.key === 'Enter') go();
495
+ });
496
+ </script>
497
+ </body>
498
+ </html>
499
+ HTML
500
+
501
+ ResearchApp.run!
@@ -0,0 +1,4 @@
1
+ # simple_a2a/examples/common_config.rb
2
+
3
+ $LOAD_PATH.unshift File.expand_path("../lib", __dir__)
4
+ require "simple_a2a"