llm-agent-rails 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 9d1e3f9099b547fc6fa0e3373f0975cb6b3f8204d3684b9adac7d4c66610c265
4
+ data.tar.gz: a323962c8263652345699fd1ecdf7355e307328cbb196c2918206d0747a41fda
5
+ SHA512:
6
+ metadata.gz: 6c623acb99a898b831280cb7fc02eaaa91382e752516e2763b6acf0ffad5c7dfc4dc5e7ac492798c13f21c830662ffb4cf6ab02350cf20ada2cfc237cdacdaad
7
+ data.tar.gz: 33acd6c6f4b1c1e717b24fe8fb768844679145991c6fcbde5f7543cd0f52c94f8963de99943591b1b4665647d5c2f35dcd3cd930f2227c83cca19fe83d9faf72
data/LICENSE ADDED
@@ -0,0 +1,3 @@
1
+ MIT License
2
+
3
+ Copyright (c) ...
data/README.md ADDED
@@ -0,0 +1,111 @@
1
+ # llm-agent-rails
2
+
3
+ Rails engine for **Llm-powered slot filling and tool orchestration**.
4
+ Mount an endpoint, register tools (JSON Schema + handler), and let an Llm collect missing fields and call your Ruby code safely.
5
+
6
+ ## Install
7
+
8
+ Add to your Gemfile:
9
+ ```ruby
10
+ gem "llm-agent-rails", github: "yourname/llm-agent-rails" # until published
11
+ ```
12
+
13
+ Bundle:
14
+ ```bash
15
+ bundle install
16
+ ```
17
+
18
+ Run the installer:
19
+ ```bash
20
+ rails g llm:agent:install
21
+ ```
22
+ This will:
23
+ - Create `config/initializers/llm_agent.rb`
24
+ - Mount the engine at `/llm/agent`
25
+
26
+ ## Quick test (cURL)
27
+
28
+ ```bash
29
+ curl -X POST http://localhost:3000/llm/agent/step -H "Content-Type: application/json" -d '{
30
+ "thread_id":"demo-thread",
31
+ "messages":[{"role":"user","content":"Open a ticket: Apple Pay checkout keeps failing on mobile."}]
32
+ }'
33
+ ```
34
+
35
+ ## Register a tool (example)
36
+
37
+ Create `app/llm_tools/tickets.rb`:
38
+ ```ruby
39
+ module LlmTools
40
+ CREATE_TICKET_V1 = {
41
+ type: "object", additionalProperties: false,
42
+ properties: {
43
+ title: { type: "string" },
44
+ description: { type: "string" },
45
+ priority: { type: "string", enum: %w[low medium high] },
46
+ assignee_id: { type: "string" }
47
+ },
48
+ required: %w[title description priority]
49
+ }
50
+
51
+ def self.register!(registry)
52
+ registry.register!(
53
+ name: "create_ticket", version: "v1",
54
+ schema: CREATE_TICKET_V1,
55
+ description: "Create a support ticket.",
56
+ handler: ->(args, ctx) {
57
+ key = Llm::Agent::Rails::Idempotency.generate(thread_id: ctx[:thread_id])
58
+ ticket = Ticket.create!(
59
+ org_id: ctx[:tenant_id],
60
+ user_id: ctx[:actor_id],
61
+ idempotency_key: key,
62
+ title: args["title"],
63
+ description: args["description"],
64
+ priority: args["priority"],
65
+ assignee_id: args["assignee_id"]
66
+ )
67
+ { id: ticket.id, title: ticket.title, priority: ticket.priority, key: key }
68
+ }
69
+ )
70
+ end
71
+ end
72
+ ```
73
+
74
+ Register it in `config/initializers/llm_agent.rb`:
75
+ ```ruby
76
+ # After Llm::Agent::Rails.configure block
77
+ require Rails.root.join("app/llm_tools/tickets")
78
+ LlmTools.register!(Llm::Agent::Rails.config[:registry])
79
+ ```
80
+
81
+ ## How it works
82
+
83
+ - **Registry**: Define tools (name/version/schema/description/handler).
84
+ - **Validators**: JSON Schema validation (`json_schemer`) before calling handlers.
85
+ - **Idempotency**: Generate a per-thread key to prevent duplicate creates.
86
+ - **Orchestrator**: Coordinates conversation, asks for missing fields, executes tools.
87
+ - **Adapter**: OpenAI adapter (supports `openai ~> 0.21`, `chat.completions.create`).
88
+ - **Store**: Memory store for tool-result messages (swap for Redis in prod).
89
+
90
+ ## Routes
91
+
92
+ The engine exposes:
93
+ ```
94
+ POST /llm/agent/step
95
+ ```
96
+ (If you change the mount point, this path changes accordingly.)
97
+
98
+ ## Config
99
+
100
+ Edit `config/initializers/llm_agent.rb`:
101
+ ```ruby
102
+ Llm::Agent::Rails.configure do |c|
103
+ c[:model] = "gpt-4o-mini"
104
+ c[:temperature] = 0
105
+ c[:store] = Llm::Agent::Rails::Store::Memory.new
106
+ c[:registry] = Llm::Agent::Rails::Registry.new
107
+ end
108
+ ```
109
+
110
+ ## License
111
+ MIT
data/Rakefile ADDED
@@ -0,0 +1,2 @@
1
+ # frozen_string_literal: true
2
+ require "bundler/gem_tasks"
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ class ChatController < ActionController::Base
5
+ protect_from_forgery with: :null_session
6
+
7
+ def step
8
+ raw = params[:messages]
9
+ if raw.nil?
10
+ return render json: { error: "BadRequest", message: "messages is required (array of {role, content})" }, status: :bad_request
11
+ end
12
+
13
+ # Coerce into an array of message hashes Rails/OpenAI will accept.
14
+ messages =
15
+ Array.wrap(raw).map do |m|
16
+ # If the client sent a JSON string, parse it.
17
+ m = JSON.parse(m) rescue m
18
+
19
+ # If it's ActionController::Parameters, unfurl to a plain hash.
20
+ m = m.to_unsafe_h if m.respond_to?(:to_unsafe_h)
21
+ m = m.to_h if m.respond_to?(:to_h) && !m.is_a?(Hash)
22
+
23
+ # Keep only role/content, as OpenAI expects
24
+ {
25
+ "role" => m["role"] || m[:role],
26
+ "content" => m["content"] || m[:content]
27
+ }
28
+ end
29
+
30
+ thread_id = params[:thread_id] || "http-#{request.request_id}"
31
+ ctx = {
32
+ tenant_id: params[:tenant_id] || "demo-org",
33
+ actor_id: params[:actor_id] || "demo-user",
34
+ thread_id: thread_id
35
+ }
36
+
37
+ adapter = ::Llm::Agent::Rails::Adapters::OpenAIAdapter.new(
38
+ api_key: ENV.fetch("OPENAI_API_KEY"),
39
+ model: ::Llm::Agent::Rails.config[:model],
40
+ temperature: ::Llm::Agent::Rails.config[:temperature]
41
+ )
42
+
43
+ orch = ::Llm::Agent::Rails::Orchestrator.new(
44
+ adapter: adapter,
45
+ registry: ::Llm::Agent::Rails.config[:registry],
46
+ store: ::Llm::Agent::Rails.config[:store]
47
+ )
48
+
49
+ outcome = orch.step(
50
+ thread_id: ctx[:thread_id],
51
+ tenant_id: ctx[:tenant_id],
52
+ actor_id: ctx[:actor_id],
53
+ messages: messages
54
+ )
55
+
56
+ render json: outcome
57
+ rescue ActionController::ParameterMissing => e
58
+ render json: { error: e.message }, status: :bad_request
59
+ rescue => e
60
+ render json: { error: e.class.name, message: e.message }, status: :internal_server_error
61
+ end
62
+
63
+ end
64
+ end
65
+ end
data/config/routes.rb ADDED
@@ -0,0 +1,4 @@
1
+ # frozen_string_literal: true
2
+ Llm::Agent::Rails::Engine.routes.draw do
3
+ post "step", to: "chat#step"
4
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+ require "rails/generators"
3
+
4
+ module Llm
5
+ module Agent
6
+ class InstallGenerator < ::Rails::Generators::Base
7
+ source_root File.expand_path("templates", __dir__)
8
+
9
+ def create_initializer
10
+ template "llm_agent.rb", "config/initializers/llm_agent.rb"
11
+ end
12
+
13
+ def mount_routes
14
+ route %(mount Llm::Agent::Rails::Engine => "/llm/agent")
15
+ end
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+ # Configure the Llm agent runtime.
3
+ Llm::Agent::Rails.configure do |c|
4
+ c[:model] = "gpt-4o-mini"
5
+ c[:temperature] = 0
6
+ c[:store] = Llm::Agent::Rails::Store::Memory.new
7
+ c[:registry] = Llm::Agent::Rails::Registry.new
8
+ end
9
+
10
+ # Example: register tools here or in separate files.
11
+ # require Rails.root.join("app/llm_tools/tickets")
12
+ # LlmTools.register!(Llm::Agent::Rails.config[:registry])
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ module Rails
5
+ module Adapters
6
+ class OpenAIAdapter
7
+ def initialize(api_key:, model:, temperature: 0)
8
+ @client = ::OpenAI::Client.new(api_key: api_key)
9
+ @model = model
10
+ @temperature = temperature
11
+ end
12
+
13
+ def step(system_prompt:, messages:, tools:, tool_results: [])
14
+ response = @client.chat.completions.create(
15
+ model: @model,
16
+ temperature: @temperature,
17
+ messages: [{ role: "system", content: system_prompt }] + messages + tool_results,
18
+ tools: tools,
19
+ tool_choice: "auto"
20
+ )
21
+
22
+ choice = response.choices.first
23
+ msg = choice.message
24
+
25
+ tool_calls = msg.respond_to?(:tool_calls) ? msg.tool_calls : nil
26
+ function_call = msg.respond_to?(:function_call) ? msg.function_call : nil
27
+ content = msg.respond_to?(:content) ? msg.content : nil
28
+
29
+ { tool_calls: tool_calls, function_call: function_call, content: content }
30
+ end
31
+
32
+ def tool_result_message(tool_call_id:, name:, content:)
33
+ { role: "tool", tool_call_id: tool_call_id, name: name, content: content.to_json }
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ module Rails
5
+ class Engine < ::Rails::Engine
6
+ isolate_namespace ::Llm::Agent
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+ require "securerandom"
3
+ module Llm
4
+ module Agent
5
+ module Rails
6
+ module Idempotency
7
+ def self.generate(thread_id:)
8
+ "chat-#{thread_id}-#{SecureRandom.hex(6)}"
9
+ end
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,83 @@
1
+ # frozen_string_literal: true
2
+ require "json"
3
+
4
+ module Llm
5
+ module Agent
6
+ module Rails
7
+ class Orchestrator
8
+ POLICY = <<~SYS
9
+ You are a task-oriented assistant that turns chat into structured tool calls.
10
+
11
+ Rules:
12
+ - First, extract as many required fields as possible from the user's latest message and prior context.
13
+ - Only ask for fields that are truly missing or ambiguous (one concise question at a time).
14
+ - Derive a short, descriptive title if not supplied (e.g., "Apple Pay checkout failure on mobile").
15
+ - Prefer sensible defaults when the schema allows (e.g., priority=medium).
16
+ - When you have all required fields, call exactly one function.
17
+
18
+ Reply briefly and clearly.
19
+ SYS
20
+
21
+ def initialize(adapter:, registry:, store:)
22
+ @adapter, @registry, @store = adapter, registry, store
23
+ end
24
+
25
+ def step(thread_id:, tenant_id:, actor_id:, messages:)
26
+ prior_tool_msgs = @store.fetch_tool_messages(thread_id)
27
+
28
+ res = @adapter.step(
29
+ system_prompt: POLICY,
30
+ messages: messages,
31
+ tools: @registry.tools_for_llm,
32
+ tool_results: prior_tool_msgs
33
+ )
34
+
35
+ if (calls = res[:tool_calls]).is_a?(Array) && calls.any?
36
+ call = calls.first
37
+ fn = call.respond_to?(:function) ? call.function : nil
38
+ name = fn&.respond_to?(:name) ? fn.name : nil
39
+ args_json = fn&.respond_to?(:arguments) ? fn.arguments.to_s : "{}"
40
+ args = args_json.empty? ? {} : JSON.parse(args_json)
41
+
42
+ tool_name, version = (name or "").split(/_v/i)
43
+ version ||= "v1"
44
+
45
+ tool = @registry.tool(tool_name, version: "v1")
46
+ Validators.validate!(tool.schema, args)
47
+
48
+ ctx = { tenant_id: tenant_id, actor_id: actor_id, thread_id: thread_id }
49
+ result = tool.handler.call(args, ctx)
50
+
51
+ tool_msg = @adapter.tool_result_message(
52
+ tool_call_id: call.respond_to?(:id) ? call.id : nil,
53
+ name: "#{tool_name}_v1",
54
+ content: result
55
+ )
56
+ @store.push_tool_message(thread_id, tool_msg)
57
+
58
+ return { type: :tool_ran, tool_name: tool_name, result: result }
59
+ end
60
+
61
+ if (fc = res[:function_call])
62
+ name_with_version = fc["name"]
63
+ args_json = fc["arguments"].to_s
64
+ args = args_json.empty? ? {} : JSON.parse(args_json)
65
+
66
+ tool_name, version = name_with_version.split(/_v/i)
67
+ version ||= "v1"
68
+
69
+ tool = @registry.tool(tool_name, version: "v1")
70
+ Validators.validate!(tool.schema, args)
71
+
72
+ ctx = { tenant_id: tenant_id, actor_id: actor_id, thread_id: thread_id }
73
+ result = tool.handler.call(args, ctx)
74
+
75
+ return { type: :tool_ran, tool_name: tool_name, result: result }
76
+ end
77
+
78
+ { type: :assistant, text: res[:content].to_s }
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ module Rails
5
+ Tool = Struct.new(:name, :version, :schema, :description, :handler, keyword_init: true)
6
+
7
+ class Registry
8
+ def initialize
9
+ @tools = {}
10
+ end
11
+
12
+ def register!(name:, version:, schema:, description:, handler:)
13
+ @tools[key_for(name, version)] = Tool.new(name:, version:, schema:, description:, handler:)
14
+ end
15
+
16
+ def tool(name, version: "v1")
17
+ @tools.fetch(key_for(name, version))
18
+ end
19
+
20
+ def tools_for_llm
21
+ @tools.values.map do |t|
22
+ {
23
+ type: "function",
24
+ function: {
25
+ name: "#{t.name}_#{t.version}",
26
+ description: t.description,
27
+ parameters: t.schema
28
+ }
29
+ }
30
+ end
31
+ end
32
+
33
+ private
34
+
35
+ def key_for(name, version) = "#{name}:#{version}"
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ module Rails
5
+ module Store
6
+ class Memory
7
+ def initialize
8
+ @tool_msgs_by_thread = Hash.new { |h,k| h[k] = [] }
9
+ end
10
+ def fetch_tool_messages(thread_id) = @tool_msgs_by_thread[thread_id]
11
+ def push_tool_message(thread_id, msg) = @tool_msgs_by_thread[thread_id] << msg
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ module Rails
5
+ class Validators
6
+ def self.validate!(schema, args)
7
+ schemer = JSONSchemer.schema(schema)
8
+ errors = schemer.validate(args).to_a
9
+ raise ArgumentError, "Schema validation failed: #{errors}" if errors.any?
10
+ end
11
+ end
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+ module Llm
3
+ module Agent
4
+ module Rails
5
+ VERSION = "0.1.0"
6
+ end
7
+ end
8
+ end
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+ require "rails"
3
+ require "json"
4
+ require "json_schemer"
5
+ require "openai"
6
+
7
+ require_relative "rails/version"
8
+ require_relative "rails/engine"
9
+
10
+ require_relative "rails/registry"
11
+ require_relative "rails/validators"
12
+ require_relative "rails/idempotency"
13
+ require_relative "rails/orchestrator"
14
+ require_relative "rails/adapters/openai_adapter"
15
+ require_relative "rails/store/memory"
16
+
17
+ module Llm
18
+ module Agent
19
+ module Rails
20
+ class << self
21
+ def config
22
+ @config ||= {
23
+ model: "gpt-4o-mini",
24
+ temperature: 0,
25
+ store: Llm::Agent::Rails::Store::Memory.new,
26
+ registry: Llm::Agent::Rails::Registry.new
27
+ }
28
+ end
29
+
30
+ def configure
31
+ yield config
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
metadata ADDED
@@ -0,0 +1,104 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: llm-agent-rails
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Phia Vang
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2025-09-03 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: rails
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ">="
18
+ - !ruby/object:Gem::Version
19
+ version: '7.0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - ">="
25
+ - !ruby/object:Gem::Version
26
+ version: '7.0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: json_schemer
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '2.3'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '2.3'
41
+ - !ruby/object:Gem::Dependency
42
+ name: openai
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '0.21'
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '0.21'
55
+ description: Drop-in Rails engine to register JSON-schema tools, let an Llm fill missing
56
+ fields, validate input, and execute handlers safely.
57
+ email:
58
+ - pnvang@gmail.com
59
+ executables: []
60
+ extensions: []
61
+ extra_rdoc_files: []
62
+ files:
63
+ - LICENSE
64
+ - README.md
65
+ - Rakefile
66
+ - app/controllers/llm/agent/chat_controller.rb
67
+ - config/routes.rb
68
+ - lib/generators/llm/agent/install_generator.rb
69
+ - lib/generators/llm/agent/templates/llm_agent.rb
70
+ - lib/llm/agent/rails.rb
71
+ - lib/llm/agent/rails/adapters/openai_adapter.rb
72
+ - lib/llm/agent/rails/engine.rb
73
+ - lib/llm/agent/rails/idempotency.rb
74
+ - lib/llm/agent/rails/orchestrator.rb
75
+ - lib/llm/agent/rails/registry.rb
76
+ - lib/llm/agent/rails/store/memory.rb
77
+ - lib/llm/agent/rails/validators.rb
78
+ - lib/llm/agent/rails/version.rb
79
+ homepage: https://github.com/pnvang/llm-agent-rails
80
+ licenses:
81
+ - MIT
82
+ metadata:
83
+ allowed_push_host: https://rubygems.org
84
+ source_code_uri: https://github.com/yourname/llm-agent-rails
85
+ post_install_message:
86
+ rdoc_options: []
87
+ require_paths:
88
+ - lib
89
+ required_ruby_version: !ruby/object:Gem::Requirement
90
+ requirements:
91
+ - - ">="
92
+ - !ruby/object:Gem::Version
93
+ version: '3.1'
94
+ required_rubygems_version: !ruby/object:Gem::Requirement
95
+ requirements:
96
+ - - ">="
97
+ - !ruby/object:Gem::Version
98
+ version: '0'
99
+ requirements: []
100
+ rubygems_version: 3.5.3
101
+ signing_key:
102
+ specification_version: 4
103
+ summary: Rails engine for Llm-powered slot filling and tool orchestration.
104
+ test_files: []