agentf 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/agentf +8 -0
- data/lib/agentf/agent_policy.rb +54 -0
- data/lib/agentf/agents/architect.rb +67 -0
- data/lib/agentf/agents/base.rb +53 -0
- data/lib/agentf/agents/debugger.rb +75 -0
- data/lib/agentf/agents/designer.rb +69 -0
- data/lib/agentf/agents/documenter.rb +58 -0
- data/lib/agentf/agents/explorer.rb +65 -0
- data/lib/agentf/agents/reviewer.rb +64 -0
- data/lib/agentf/agents/security.rb +84 -0
- data/lib/agentf/agents/specialist.rb +68 -0
- data/lib/agentf/agents/tester.rb +79 -0
- data/lib/agentf/agents.rb +19 -0
- data/lib/agentf/cli/architecture.rb +83 -0
- data/lib/agentf/cli/arg_parser.rb +50 -0
- data/lib/agentf/cli/code.rb +165 -0
- data/lib/agentf/cli/install.rb +112 -0
- data/lib/agentf/cli/memory.rb +393 -0
- data/lib/agentf/cli/metrics.rb +103 -0
- data/lib/agentf/cli/router.rb +111 -0
- data/lib/agentf/cli/update.rb +204 -0
- data/lib/agentf/commands/architecture.rb +183 -0
- data/lib/agentf/commands/debugger.rb +238 -0
- data/lib/agentf/commands/designer.rb +179 -0
- data/lib/agentf/commands/explorer.rb +208 -0
- data/lib/agentf/commands/memory_reviewer.rb +186 -0
- data/lib/agentf/commands/metrics.rb +272 -0
- data/lib/agentf/commands/security_scanner.rb +98 -0
- data/lib/agentf/commands/tester.rb +232 -0
- data/lib/agentf/commands.rb +17 -0
- data/lib/agentf/context_builder.rb +35 -0
- data/lib/agentf/installer.rb +580 -0
- data/lib/agentf/mcp/server.rb +310 -0
- data/lib/agentf/memory.rb +530 -0
- data/lib/agentf/packs.rb +74 -0
- data/lib/agentf/service/providers.rb +158 -0
- data/lib/agentf/tools/component_spec.rb +28 -0
- data/lib/agentf/tools/error_analysis.rb +19 -0
- data/lib/agentf/tools/file_match.rb +21 -0
- data/lib/agentf/tools/test_template.rb +17 -0
- data/lib/agentf/tools.rb +12 -0
- data/lib/agentf/version.rb +5 -0
- data/lib/agentf/workflow_contract.rb +158 -0
- data/lib/agentf/workflow_engine.rb +424 -0
- data/lib/agentf.rb +87 -0
- metadata +164 -0
|
@@ -0,0 +1,530 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "redis"
|
|
4
|
+
require "json"
|
|
5
|
+
require "set"
|
|
6
|
+
require "securerandom"
|
|
7
|
+
require "time"
|
|
8
|
+
|
|
9
|
+
module Agentf
|
|
10
|
+
module Memory
|
|
11
|
+
# Redis-backed memory system for agent learning
|
|
12
|
+
class RedisMemory
|
|
13
|
+
attr_reader :project
|
|
14
|
+
|
|
15
|
+
def initialize(redis_url: nil, project: nil)
|
|
16
|
+
@redis_url = redis_url || Agentf.config.redis_url
|
|
17
|
+
@project = project || Agentf.config.project_name
|
|
18
|
+
@client = Redis.new(client_options)
|
|
19
|
+
@json_supported = detect_json_support
|
|
20
|
+
@search_supported = detect_search_support
|
|
21
|
+
ensure_indexes if @search_supported
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def store_task(content:, embedding: [], language: nil, task_type: nil, success: true, agent: "ARCHITECT")
|
|
25
|
+
task_id = "task_#{SecureRandom.hex(4)}"
|
|
26
|
+
|
|
27
|
+
data = {
|
|
28
|
+
"id" => task_id,
|
|
29
|
+
"content" => content,
|
|
30
|
+
"project" => @project,
|
|
31
|
+
"language" => language || "",
|
|
32
|
+
"task_type" => task_type || "",
|
|
33
|
+
"success" => success,
|
|
34
|
+
"created_at" => Time.now.to_i,
|
|
35
|
+
"agent" => agent,
|
|
36
|
+
"embedding" => JSON.generate(embedding)
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
key = "semantic:#{task_id}"
|
|
40
|
+
@client.hset(key, data)
|
|
41
|
+
|
|
42
|
+
task_id
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def store_episode(type:, title:, description:, context: "", code_snippet: "", tags: [], agent: "SPECIALIST", related_task_id: nil,
|
|
46
|
+
metadata: {})
|
|
47
|
+
episode_id = "episode_#{SecureRandom.hex(4)}"
|
|
48
|
+
|
|
49
|
+
data = {
|
|
50
|
+
"id" => episode_id,
|
|
51
|
+
"type" => type,
|
|
52
|
+
"title" => title,
|
|
53
|
+
"description" => description,
|
|
54
|
+
"project" => @project,
|
|
55
|
+
"context" => context,
|
|
56
|
+
"code_snippet" => code_snippet,
|
|
57
|
+
"tags" => tags,
|
|
58
|
+
"created_at" => Time.now.to_i,
|
|
59
|
+
"agent" => agent,
|
|
60
|
+
"related_task_id" => related_task_id || "",
|
|
61
|
+
"metadata" => metadata
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
key = "episodic:#{episode_id}"
|
|
65
|
+
payload = JSON.generate(data)
|
|
66
|
+
|
|
67
|
+
if @json_supported
|
|
68
|
+
begin
|
|
69
|
+
@client.call("JSON.SET", key, ".", payload)
|
|
70
|
+
rescue Redis::CommandError => e
|
|
71
|
+
if missing_json_module?(e)
|
|
72
|
+
@json_supported = false
|
|
73
|
+
@client.set(key, payload)
|
|
74
|
+
else
|
|
75
|
+
raise Redis::CommandError, "Failed to persist episode with RedisJSON: #{e.message}"
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
else
|
|
79
|
+
@client.set(key, payload)
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
episode_id
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def store_success(title:, description:, context: "", code_snippet: "", tags: [], agent: "SPECIALIST")
|
|
86
|
+
store_episode(
|
|
87
|
+
type: "success",
|
|
88
|
+
title: title,
|
|
89
|
+
description: description,
|
|
90
|
+
context: context,
|
|
91
|
+
code_snippet: code_snippet,
|
|
92
|
+
tags: tags,
|
|
93
|
+
agent: agent
|
|
94
|
+
)
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
def store_pitfall(title:, description:, context: "", code_snippet: "", tags: [], agent: "SPECIALIST")
|
|
98
|
+
store_episode(
|
|
99
|
+
type: "pitfall",
|
|
100
|
+
title: title,
|
|
101
|
+
description: description,
|
|
102
|
+
context: context,
|
|
103
|
+
code_snippet: code_snippet,
|
|
104
|
+
tags: tags,
|
|
105
|
+
agent: agent
|
|
106
|
+
)
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def store_lesson(title:, description:, context: "", code_snippet: "", tags: [], agent: "SPECIALIST")
|
|
110
|
+
store_episode(
|
|
111
|
+
type: "lesson",
|
|
112
|
+
title: title,
|
|
113
|
+
description: description,
|
|
114
|
+
context: context,
|
|
115
|
+
code_snippet: code_snippet,
|
|
116
|
+
tags: tags,
|
|
117
|
+
agent: agent
|
|
118
|
+
)
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def store_business_intent(title:, description:, constraints: [], tags: [], agent: "WORKFLOW_ENGINE", priority: 1)
|
|
122
|
+
context = constraints.any? ? "Constraints: #{constraints.join('; ')}" : ""
|
|
123
|
+
|
|
124
|
+
store_episode(
|
|
125
|
+
type: "business_intent",
|
|
126
|
+
title: title,
|
|
127
|
+
description: description,
|
|
128
|
+
context: context,
|
|
129
|
+
tags: tags,
|
|
130
|
+
agent: agent,
|
|
131
|
+
metadata: {
|
|
132
|
+
"intent_kind" => "business",
|
|
133
|
+
"constraints" => constraints,
|
|
134
|
+
"priority" => priority
|
|
135
|
+
}
|
|
136
|
+
)
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
def store_feature_intent(title:, description:, acceptance_criteria: [], non_goals: [], tags: [], agent: "ARCHITECT", related_task_id: nil)
|
|
140
|
+
context_parts = []
|
|
141
|
+
context_parts << "Acceptance: #{acceptance_criteria.join('; ')}" if acceptance_criteria.any?
|
|
142
|
+
context_parts << "Non-goals: #{non_goals.join('; ')}" if non_goals.any?
|
|
143
|
+
|
|
144
|
+
store_episode(
|
|
145
|
+
type: "feature_intent",
|
|
146
|
+
title: title,
|
|
147
|
+
description: description,
|
|
148
|
+
context: context_parts.join(" | "),
|
|
149
|
+
tags: tags,
|
|
150
|
+
agent: agent,
|
|
151
|
+
related_task_id: related_task_id,
|
|
152
|
+
metadata: {
|
|
153
|
+
"intent_kind" => "feature",
|
|
154
|
+
"acceptance_criteria" => acceptance_criteria,
|
|
155
|
+
"non_goals" => non_goals
|
|
156
|
+
}
|
|
157
|
+
)
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
def store_incident(title:, description:, root_cause: "", resolution: "", tags: [], agent: "DEBUGGER", business_capability: nil)
|
|
161
|
+
store_episode(
|
|
162
|
+
type: "incident",
|
|
163
|
+
title: title,
|
|
164
|
+
description: description,
|
|
165
|
+
context: ["Root cause: #{root_cause}", "Resolution: #{resolution}"].reject { |entry| entry.end_with?(": ") }.join(" | "),
|
|
166
|
+
tags: tags,
|
|
167
|
+
agent: agent,
|
|
168
|
+
metadata: {
|
|
169
|
+
"root_cause" => root_cause,
|
|
170
|
+
"resolution" => resolution,
|
|
171
|
+
"business_capability" => business_capability,
|
|
172
|
+
"confidence" => 0.8
|
|
173
|
+
}
|
|
174
|
+
)
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
def store_playbook(title:, description:, steps: [], tags: [], agent: "ARCHITECT", feature_area: nil)
|
|
178
|
+
store_episode(
|
|
179
|
+
type: "playbook",
|
|
180
|
+
title: title,
|
|
181
|
+
description: description,
|
|
182
|
+
context: steps.any? ? "Steps: #{steps.join('; ')}" : "",
|
|
183
|
+
tags: tags,
|
|
184
|
+
agent: agent,
|
|
185
|
+
metadata: {
|
|
186
|
+
"steps" => steps,
|
|
187
|
+
"feature_area" => feature_area,
|
|
188
|
+
"confidence" => 0.9
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
def find_similar_tasks(query_embedding:, limit: 5, language: nil, task_type: nil)
|
|
194
|
+
return [] if query_embedding.nil? || query_embedding.empty?
|
|
195
|
+
|
|
196
|
+
query = query_embedding.map(&:to_f)
|
|
197
|
+
candidates = []
|
|
198
|
+
cursor = "0"
|
|
199
|
+
|
|
200
|
+
loop do
|
|
201
|
+
cursor, batch = @client.scan(cursor, match: "semantic:*", count: 100)
|
|
202
|
+
batch.each do |key|
|
|
203
|
+
task = @client.hgetall(key)
|
|
204
|
+
next if task.nil? || task.empty?
|
|
205
|
+
next unless task["project"] == @project
|
|
206
|
+
next if language && task["language"] != language
|
|
207
|
+
next if task_type && task["task_type"] != task_type
|
|
208
|
+
|
|
209
|
+
embedding = parse_embedding(task["embedding"])
|
|
210
|
+
next if embedding.empty?
|
|
211
|
+
|
|
212
|
+
score = cosine_similarity(query, embedding)
|
|
213
|
+
next if score <= 0
|
|
214
|
+
|
|
215
|
+
task["score"] = score
|
|
216
|
+
candidates << task
|
|
217
|
+
end
|
|
218
|
+
break if cursor == "0"
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
candidates.sort_by { |candidate| -candidate["score"] }.first(limit)
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
def get_memories_by_type(type:, limit: 10)
|
|
225
|
+
if @search_supported
|
|
226
|
+
query = "@type:#{type} @project:{#{@project}}"
|
|
227
|
+
search_episodic(query: query, limit: limit)
|
|
228
|
+
else
|
|
229
|
+
fetch_memories_without_search(limit: [limit * 4, 100].min).select { |mem| mem["type"] == type }.first(limit)
|
|
230
|
+
end
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
def get_intents(kind: nil, limit: 10)
|
|
234
|
+
return get_memories_by_type(type: "business_intent", limit: limit) if kind == "business"
|
|
235
|
+
return get_memories_by_type(type: "feature_intent", limit: limit) if kind == "feature"
|
|
236
|
+
|
|
237
|
+
intents = get_memories_by_type(type: "business_intent", limit: limit)
|
|
238
|
+
feature_limit = [limit - intents.length, 0].max
|
|
239
|
+
return intents if feature_limit.zero?
|
|
240
|
+
|
|
241
|
+
intents + get_memories_by_type(type: "feature_intent", limit: feature_limit)
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
def get_relevant_context(agent:, query_embedding: nil, task_type: nil, limit: 8)
|
|
245
|
+
get_agent_context(agent: agent, query_embedding: query_embedding, task_type: task_type, limit: limit)
|
|
246
|
+
end
|
|
247
|
+
|
|
248
|
+
def get_agent_context(agent:, query_embedding: nil, task_type: nil, limit: 8)
|
|
249
|
+
profile = context_profile(agent)
|
|
250
|
+
candidates = get_recent_memories(limit: [limit * 8, 200].min)
|
|
251
|
+
ranked = rank_memories(candidates: candidates, agent: agent, profile: profile)
|
|
252
|
+
|
|
253
|
+
{
|
|
254
|
+
"agent" => agent,
|
|
255
|
+
"profile" => profile,
|
|
256
|
+
"intent" => get_intents(limit: 4),
|
|
257
|
+
"memories" => ranked.first(limit),
|
|
258
|
+
"similar_tasks" => find_similar_tasks(query_embedding: query_embedding, limit: 3, task_type: task_type)
|
|
259
|
+
}
|
|
260
|
+
end
|
|
261
|
+
|
|
262
|
+
def get_pitfalls(limit: 10)
|
|
263
|
+
if @search_supported
|
|
264
|
+
search_episodic(query: "@type:pitfall @project:{#{@project}}", limit: limit)
|
|
265
|
+
else
|
|
266
|
+
fetch_memories_without_search(limit: [limit * 4, 100].min).select { |mem| mem["type"] == "pitfall" }.first(limit)
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
def get_recent_memories(limit: 10)
|
|
271
|
+
if @search_supported
|
|
272
|
+
search_episodic(query: "@project:{#{@project}}", limit: limit)
|
|
273
|
+
else
|
|
274
|
+
fetch_memories_without_search(limit: limit)
|
|
275
|
+
end
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
def get_all_tags
|
|
279
|
+
memories = get_recent_memories(limit: 100)
|
|
280
|
+
all_tags = Set.new
|
|
281
|
+
memories.each do |mem|
|
|
282
|
+
tags = mem["tags"]
|
|
283
|
+
all_tags.merge(tags) if tags.is_a?(Array)
|
|
284
|
+
end
|
|
285
|
+
all_tags.to_a
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
def close
|
|
289
|
+
@client.close
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
private
|
|
293
|
+
|
|
294
|
+
def ensure_indexes
|
|
295
|
+
return unless @search_supported
|
|
296
|
+
|
|
297
|
+
create_episodic_index
|
|
298
|
+
rescue Redis::CommandError => e
|
|
299
|
+
raise Redis::CommandError, "Failed to create episodic index: #{e.message}. Ensure Redis Stack with RediSearch is available." unless index_already_exists?(e)
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
def create_episodic_index
|
|
303
|
+
@client.call(
|
|
304
|
+
"FT.CREATE", "episodic:logs",
|
|
305
|
+
"ON", "JSON",
|
|
306
|
+
"PREFIX", "1", "episodic:",
|
|
307
|
+
"SCHEMA",
|
|
308
|
+
"$.id", "AS", "id", "TEXT",
|
|
309
|
+
"$.type", "AS", "type", "TEXT",
|
|
310
|
+
"$.title", "AS", "title", "TEXT",
|
|
311
|
+
"$.description", "AS", "description", "TEXT",
|
|
312
|
+
"$.project", "AS", "project", "TAG",
|
|
313
|
+
"$.context", "AS", "context", "TEXT",
|
|
314
|
+
"$.code_snippet", "AS", "code_snippet", "TEXT",
|
|
315
|
+
"$.tags", "AS", "tags", "TAG",
|
|
316
|
+
"$.created_at", "AS", "created_at", "NUMERIC",
|
|
317
|
+
"$.agent", "AS", "agent", "TEXT",
|
|
318
|
+
"$.related_task_id", "AS", "related_task_id", "TEXT",
|
|
319
|
+
"$.metadata.intent_kind", "AS", "intent_kind", "TAG",
|
|
320
|
+
"$.metadata.priority", "AS", "priority", "NUMERIC",
|
|
321
|
+
"$.metadata.confidence", "AS", "confidence", "NUMERIC",
|
|
322
|
+
"$.metadata.business_capability", "AS", "business_capability", "TAG",
|
|
323
|
+
"$.metadata.feature_area", "AS", "feature_area", "TAG"
|
|
324
|
+
)
|
|
325
|
+
end
|
|
326
|
+
|
|
327
|
+
def search_episodic(query:, limit:)
|
|
328
|
+
results = @client.call(
|
|
329
|
+
"FT.SEARCH", "episodic:logs",
|
|
330
|
+
query,
|
|
331
|
+
"SORTBY", "created_at", "DESC",
|
|
332
|
+
"LIMIT", "0", limit.to_s
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
return [] unless results && results[0] > 0
|
|
336
|
+
|
|
337
|
+
memories = []
|
|
338
|
+
(2...results.length).step(2) do |i|
|
|
339
|
+
item = results[i]
|
|
340
|
+
if item.is_a?(Array)
|
|
341
|
+
item.each_with_index do |part, j|
|
|
342
|
+
if part == "$" && j + 1 < item.length
|
|
343
|
+
begin
|
|
344
|
+
memory = JSON.parse(item[j + 1])
|
|
345
|
+
memories << memory
|
|
346
|
+
rescue JSON::ParserError
|
|
347
|
+
# Skip invalid JSON
|
|
348
|
+
end
|
|
349
|
+
end
|
|
350
|
+
end
|
|
351
|
+
end
|
|
352
|
+
end
|
|
353
|
+
memories
|
|
354
|
+
end
|
|
355
|
+
|
|
356
|
+
def index_already_exists?(error)
|
|
357
|
+
message = error.message
|
|
358
|
+
return false unless message
|
|
359
|
+
|
|
360
|
+
message.match?(/index\s+already\s+exists/i)
|
|
361
|
+
end
|
|
362
|
+
|
|
363
|
+
def detect_json_support
|
|
364
|
+
test_key = "agentf:json_probe:#{SecureRandom.hex(4)}"
|
|
365
|
+
created = false
|
|
366
|
+
|
|
367
|
+
begin
|
|
368
|
+
@client.call("JSON.SET", test_key, ".", "{}")
|
|
369
|
+
created = true
|
|
370
|
+
true
|
|
371
|
+
rescue Redis::CommandError => e
|
|
372
|
+
return false if missing_json_module?(e)
|
|
373
|
+
|
|
374
|
+
raise Redis::CommandError, "Failed to check RedisJSON availability: #{e.message}"
|
|
375
|
+
ensure
|
|
376
|
+
if created
|
|
377
|
+
begin
|
|
378
|
+
@client.call("JSON.DEL", test_key)
|
|
379
|
+
rescue Redis::CommandError
|
|
380
|
+
# ignore cleanup errors
|
|
381
|
+
end
|
|
382
|
+
end
|
|
383
|
+
end
|
|
384
|
+
end
|
|
385
|
+
|
|
386
|
+
def detect_search_support
|
|
387
|
+
@client.call("FT.INFO", "episodic:logs")
|
|
388
|
+
true
|
|
389
|
+
rescue Redis::CommandError => e
|
|
390
|
+
return true if index_missing_error?(e)
|
|
391
|
+
return false if missing_search_module?(e)
|
|
392
|
+
|
|
393
|
+
raise Redis::CommandError, "Failed to check RediSearch availability: #{e.message}"
|
|
394
|
+
end
|
|
395
|
+
|
|
396
|
+
def index_missing_error?(error)
|
|
397
|
+
message = error.message
|
|
398
|
+
return false unless message
|
|
399
|
+
|
|
400
|
+
message.match?(/unknown\s+index\s+name/i) || message.match?(/no\s+such\s+index/i)
|
|
401
|
+
end
|
|
402
|
+
|
|
403
|
+
def missing_json_module?(error)
|
|
404
|
+
message = error.message
|
|
405
|
+
return false unless message
|
|
406
|
+
|
|
407
|
+
message.downcase.include?("unknown command 'json.")
|
|
408
|
+
end
|
|
409
|
+
|
|
410
|
+
def missing_search_module?(error)
|
|
411
|
+
message = error.message
|
|
412
|
+
return false unless message
|
|
413
|
+
|
|
414
|
+
message.downcase.include?("unknown command 'ft.")
|
|
415
|
+
end
|
|
416
|
+
|
|
417
|
+
def fetch_memories_without_search(limit: 10)
|
|
418
|
+
memories = []
|
|
419
|
+
cursor = "0"
|
|
420
|
+
|
|
421
|
+
loop do
|
|
422
|
+
cursor, batch = @client.scan(cursor, match: "episodic:*", count: 100)
|
|
423
|
+
batch.each do |key|
|
|
424
|
+
memory = load_episode(key)
|
|
425
|
+
memories << memory if memory
|
|
426
|
+
end
|
|
427
|
+
break if cursor == "0"
|
|
428
|
+
end
|
|
429
|
+
|
|
430
|
+
memories.sort_by { |mem| -(mem["created_at"] || 0) }.first(limit)
|
|
431
|
+
end
|
|
432
|
+
|
|
433
|
+
def context_profile(agent)
|
|
434
|
+
case agent.to_s.upcase
|
|
435
|
+
when "ARCHITECT"
|
|
436
|
+
{ "preferred_types" => %w[business_intent feature_intent lesson playbook pitfall], "pitfall_penalty" => 0.1 }
|
|
437
|
+
when "SPECIALIST"
|
|
438
|
+
{ "preferred_types" => %w[playbook success lesson pitfall], "pitfall_penalty" => 0.05 }
|
|
439
|
+
when "TESTER"
|
|
440
|
+
{ "preferred_types" => %w[lesson pitfall incident success], "pitfall_penalty" => 0.0 }
|
|
441
|
+
when "DEBUGGER"
|
|
442
|
+
{ "preferred_types" => %w[incident pitfall lesson], "pitfall_penalty" => 0.0 }
|
|
443
|
+
when "SECURITY"
|
|
444
|
+
{ "preferred_types" => %w[pitfall lesson incident], "pitfall_penalty" => 0.0 }
|
|
445
|
+
else
|
|
446
|
+
{ "preferred_types" => %w[lesson pitfall success business_intent feature_intent], "pitfall_penalty" => 0.05 }
|
|
447
|
+
end
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
def rank_memories(candidates:, agent:, profile:)
|
|
451
|
+
now = Time.now.to_i
|
|
452
|
+
preferred_types = Array(profile["preferred_types"])
|
|
453
|
+
|
|
454
|
+
candidates
|
|
455
|
+
.select { |mem| mem["project"] == @project }
|
|
456
|
+
.map do |memory|
|
|
457
|
+
type = memory["type"].to_s
|
|
458
|
+
metadata = memory["metadata"].is_a?(Hash) ? memory["metadata"] : {}
|
|
459
|
+
confidence = metadata.fetch("confidence", 0.6).to_f
|
|
460
|
+
confidence = 0.0 if confidence.negative?
|
|
461
|
+
confidence = 1.0 if confidence > 1.0
|
|
462
|
+
|
|
463
|
+
type_score = preferred_types.include?(type) ? 1.0 : 0.25
|
|
464
|
+
agent_score = (memory["agent"] == agent || memory["agent"] == "WORKFLOW_ENGINE") ? 1.0 : 0.2
|
|
465
|
+
age_seconds = [now - memory.fetch("created_at", now).to_i, 0].max
|
|
466
|
+
recency_score = 1.0 / (1.0 + (age_seconds / 86_400.0))
|
|
467
|
+
|
|
468
|
+
pitfall_penalty = type == "pitfall" ? profile.fetch("pitfall_penalty", 0.0).to_f : 0.0
|
|
469
|
+
memory["rank_score"] = ((0.45 * type_score) + (0.3 * agent_score) + (0.2 * recency_score) + (0.05 * confidence) - pitfall_penalty).round(6)
|
|
470
|
+
memory
|
|
471
|
+
end
|
|
472
|
+
.sort_by { |memory| -memory["rank_score"] }
|
|
473
|
+
end
|
|
474
|
+
|
|
475
|
+
def load_episode(key)
|
|
476
|
+
raw = if @json_supported
|
|
477
|
+
begin
|
|
478
|
+
@client.call("JSON.GET", key, ".")
|
|
479
|
+
rescue Redis::CommandError => e
|
|
480
|
+
if missing_json_module?(e)
|
|
481
|
+
@json_supported = false
|
|
482
|
+
@client.get(key)
|
|
483
|
+
else
|
|
484
|
+
raise
|
|
485
|
+
end
|
|
486
|
+
end
|
|
487
|
+
else
|
|
488
|
+
@client.get(key)
|
|
489
|
+
end
|
|
490
|
+
|
|
491
|
+
return nil unless raw
|
|
492
|
+
|
|
493
|
+
JSON.parse(raw)
|
|
494
|
+
rescue JSON::ParserError
|
|
495
|
+
nil
|
|
496
|
+
end
|
|
497
|
+
|
|
498
|
+
def parse_embedding(raw)
|
|
499
|
+
return [] if raw.nil? || raw.empty?
|
|
500
|
+
|
|
501
|
+
value = raw.is_a?(String) ? JSON.parse(raw) : raw
|
|
502
|
+
return [] unless value.is_a?(Array)
|
|
503
|
+
|
|
504
|
+
value.map(&:to_f)
|
|
505
|
+
rescue JSON::ParserError
|
|
506
|
+
[]
|
|
507
|
+
end
|
|
508
|
+
|
|
509
|
+
def cosine_similarity(a, b)
|
|
510
|
+
return 0.0 if a.empty? || b.empty? || a.length != b.length
|
|
511
|
+
|
|
512
|
+
dot_product = a.zip(b).sum { |x, y| x * y }
|
|
513
|
+
magnitude_a = Math.sqrt(a.sum { |x| x * x })
|
|
514
|
+
magnitude_b = Math.sqrt(b.sum { |x| x * x })
|
|
515
|
+
return 0.0 if magnitude_a.zero? || magnitude_b.zero?
|
|
516
|
+
|
|
517
|
+
dot_product / (magnitude_a * magnitude_b)
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
def client_options
|
|
521
|
+
{ url: @redis_url }
|
|
522
|
+
end
|
|
523
|
+
end
|
|
524
|
+
|
|
525
|
+
# Convenience method
|
|
526
|
+
def self.memory(project: nil)
|
|
527
|
+
RedisMemory.new(project: project)
|
|
528
|
+
end
|
|
529
|
+
end
|
|
530
|
+
end
|
data/lib/agentf/packs.rb
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Agentf
|
|
4
|
+
module Packs
|
|
5
|
+
PROFILES = {
|
|
6
|
+
"generic" => {
|
|
7
|
+
"name" => "Generic",
|
|
8
|
+
"description" => "Default provider workflows without domain specialization.",
|
|
9
|
+
"keywords" => [],
|
|
10
|
+
"workflow_templates" => {}
|
|
11
|
+
},
|
|
12
|
+
"rails_standard" => {
|
|
13
|
+
"name" => "Rails Standard",
|
|
14
|
+
"description" => "Thin models/controllers with services, queries, presenters, and policy reviews.",
|
|
15
|
+
"keywords" => %w[rails activerecord rspec pundit viewcomponent hotwire turbo stimulus],
|
|
16
|
+
"workflow_templates" => {
|
|
17
|
+
"feature" => %w[ARCHITECT EXPLORER SPECIALIST TESTER SECURITY REVIEWER DOCUMENTER],
|
|
18
|
+
"bugfix" => %w[ARCHITECT DEBUGGER SPECIALIST TESTER SECURITY REVIEWER],
|
|
19
|
+
"refactor" => %w[ARCHITECT EXPLORER SPECIALIST TESTER REVIEWER],
|
|
20
|
+
"quick_fix" => %w[SPECIALIST TESTER REVIEWER],
|
|
21
|
+
"exploration" => %w[EXPLORER]
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
"rails_37signals" => {
|
|
25
|
+
"name" => "Rails 37signals",
|
|
26
|
+
"description" => "Resource-centric workflows favoring concerns, CRUD and model-rich patterns.",
|
|
27
|
+
"keywords" => %w[rails concern crud closure model minitest hotwire],
|
|
28
|
+
"workflow_templates" => {
|
|
29
|
+
"feature" => %w[ARCHITECT EXPLORER SPECIALIST TESTER REVIEWER DOCUMENTER],
|
|
30
|
+
"bugfix" => %w[ARCHITECT DEBUGGER SPECIALIST TESTER REVIEWER],
|
|
31
|
+
"refactor" => %w[ARCHITECT SPECIALIST TESTER REVIEWER],
|
|
32
|
+
"quick_fix" => %w[SPECIALIST REVIEWER],
|
|
33
|
+
"exploration" => %w[EXPLORER]
|
|
34
|
+
}
|
|
35
|
+
},
|
|
36
|
+
"rails_feature_spec" => {
|
|
37
|
+
"name" => "Rails Feature Spec",
|
|
38
|
+
"description" => "Feature-spec-first orchestration with planning and review emphasis.",
|
|
39
|
+
"keywords" => %w[rails feature specification acceptance criteria],
|
|
40
|
+
"workflow_templates" => {
|
|
41
|
+
"feature" => %w[ARCHITECT EXPLORER DESIGNER SPECIALIST TESTER REVIEWER DOCUMENTER],
|
|
42
|
+
"bugfix" => %w[ARCHITECT DEBUGGER SPECIALIST TESTER REVIEWER],
|
|
43
|
+
"refactor" => %w[ARCHITECT EXPLORER SPECIALIST TESTER REVIEWER],
|
|
44
|
+
"quick_fix" => %w[SPECIALIST REVIEWER],
|
|
45
|
+
"exploration" => %w[EXPLORER]
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}.freeze
|
|
49
|
+
|
|
50
|
+
module_function
|
|
51
|
+
|
|
52
|
+
def all
|
|
53
|
+
PROFILES
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def fetch(name)
|
|
57
|
+
PROFILES[name.to_s.downcase] || PROFILES["generic"]
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def infer(context = {})
|
|
61
|
+
text = [context["task"], context["design_spec"], context["stack"], context["framework"]]
|
|
62
|
+
.compact.join(" ").downcase
|
|
63
|
+
return "generic" if text.empty?
|
|
64
|
+
|
|
65
|
+
return "rails_standard" if includes_any_keyword?(text, PROFILES["rails_standard"]["keywords"])
|
|
66
|
+
|
|
67
|
+
"generic"
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def includes_any_keyword?(text, keywords)
|
|
71
|
+
keywords.any? { |keyword| text.include?(keyword) }
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|