prompt_objects 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. checksums.yaml +7 -0
  2. data/CLAUDE.md +108 -0
  3. data/Gemfile +10 -0
  4. data/Gemfile.lock +231 -0
  5. data/IMPLEMENTATION_PLAN.md +1073 -0
  6. data/LICENSE +21 -0
  7. data/README.md +73 -0
  8. data/Rakefile +27 -0
  9. data/design-doc-v2.md +1232 -0
  10. data/exe/prompt_objects +572 -0
  11. data/exe/prompt_objects_mcp +34 -0
  12. data/frontend/.gitignore +3 -0
  13. data/frontend/index.html +13 -0
  14. data/frontend/package-lock.json +4417 -0
  15. data/frontend/package.json +32 -0
  16. data/frontend/postcss.config.js +6 -0
  17. data/frontend/src/App.tsx +95 -0
  18. data/frontend/src/components/CapabilitiesPanel.tsx +44 -0
  19. data/frontend/src/components/ChatPanel.tsx +251 -0
  20. data/frontend/src/components/Dashboard.tsx +83 -0
  21. data/frontend/src/components/Header.tsx +141 -0
  22. data/frontend/src/components/MarkdownMessage.tsx +153 -0
  23. data/frontend/src/components/MessageBus.tsx +55 -0
  24. data/frontend/src/components/ModelSelector.tsx +112 -0
  25. data/frontend/src/components/NotificationPanel.tsx +134 -0
  26. data/frontend/src/components/POCard.tsx +56 -0
  27. data/frontend/src/components/PODetail.tsx +117 -0
  28. data/frontend/src/components/PromptPanel.tsx +51 -0
  29. data/frontend/src/components/SessionsPanel.tsx +174 -0
  30. data/frontend/src/components/ThreadsSidebar.tsx +119 -0
  31. data/frontend/src/components/index.ts +11 -0
  32. data/frontend/src/hooks/useWebSocket.ts +363 -0
  33. data/frontend/src/index.css +37 -0
  34. data/frontend/src/main.tsx +10 -0
  35. data/frontend/src/store/index.ts +246 -0
  36. data/frontend/src/types/index.ts +146 -0
  37. data/frontend/tailwind.config.js +25 -0
  38. data/frontend/tsconfig.json +30 -0
  39. data/frontend/vite.config.ts +29 -0
  40. data/lib/prompt_objects/capability.rb +46 -0
  41. data/lib/prompt_objects/cli.rb +431 -0
  42. data/lib/prompt_objects/connectors/base.rb +73 -0
  43. data/lib/prompt_objects/connectors/mcp.rb +524 -0
  44. data/lib/prompt_objects/environment/exporter.rb +83 -0
  45. data/lib/prompt_objects/environment/git.rb +118 -0
  46. data/lib/prompt_objects/environment/importer.rb +159 -0
  47. data/lib/prompt_objects/environment/manager.rb +401 -0
  48. data/lib/prompt_objects/environment/manifest.rb +218 -0
  49. data/lib/prompt_objects/environment.rb +283 -0
  50. data/lib/prompt_objects/human_queue.rb +144 -0
  51. data/lib/prompt_objects/llm/anthropic_adapter.rb +137 -0
  52. data/lib/prompt_objects/llm/factory.rb +84 -0
  53. data/lib/prompt_objects/llm/gemini_adapter.rb +209 -0
  54. data/lib/prompt_objects/llm/openai_adapter.rb +104 -0
  55. data/lib/prompt_objects/llm/response.rb +61 -0
  56. data/lib/prompt_objects/loader.rb +32 -0
  57. data/lib/prompt_objects/mcp/server.rb +167 -0
  58. data/lib/prompt_objects/mcp/tools/get_conversation.rb +60 -0
  59. data/lib/prompt_objects/mcp/tools/get_pending_requests.rb +54 -0
  60. data/lib/prompt_objects/mcp/tools/inspect_po.rb +73 -0
  61. data/lib/prompt_objects/mcp/tools/list_prompt_objects.rb +37 -0
  62. data/lib/prompt_objects/mcp/tools/respond_to_request.rb +68 -0
  63. data/lib/prompt_objects/mcp/tools/send_message.rb +71 -0
  64. data/lib/prompt_objects/message_bus.rb +97 -0
  65. data/lib/prompt_objects/primitive.rb +13 -0
  66. data/lib/prompt_objects/primitives/http_get.rb +72 -0
  67. data/lib/prompt_objects/primitives/list_files.rb +95 -0
  68. data/lib/prompt_objects/primitives/read_file.rb +81 -0
  69. data/lib/prompt_objects/primitives/write_file.rb +73 -0
  70. data/lib/prompt_objects/prompt_object.rb +415 -0
  71. data/lib/prompt_objects/registry.rb +88 -0
  72. data/lib/prompt_objects/server/api/routes.rb +297 -0
  73. data/lib/prompt_objects/server/app.rb +174 -0
  74. data/lib/prompt_objects/server/file_watcher.rb +113 -0
  75. data/lib/prompt_objects/server/public/assets/index-2acS2FYZ.js +77 -0
  76. data/lib/prompt_objects/server/public/assets/index-DXU5uRXQ.css +1 -0
  77. data/lib/prompt_objects/server/public/index.html +14 -0
  78. data/lib/prompt_objects/server/websocket_handler.rb +619 -0
  79. data/lib/prompt_objects/server.rb +166 -0
  80. data/lib/prompt_objects/session/store.rb +826 -0
  81. data/lib/prompt_objects/universal/add_capability.rb +74 -0
  82. data/lib/prompt_objects/universal/add_primitive.rb +113 -0
  83. data/lib/prompt_objects/universal/ask_human.rb +109 -0
  84. data/lib/prompt_objects/universal/create_capability.rb +219 -0
  85. data/lib/prompt_objects/universal/create_primitive.rb +170 -0
  86. data/lib/prompt_objects/universal/list_capabilities.rb +55 -0
  87. data/lib/prompt_objects/universal/list_primitives.rb +145 -0
  88. data/lib/prompt_objects/universal/modify_primitive.rb +180 -0
  89. data/lib/prompt_objects/universal/request_primitive.rb +287 -0
  90. data/lib/prompt_objects/universal/think.rb +41 -0
  91. data/lib/prompt_objects/universal/verify_primitive.rb +173 -0
  92. data/lib/prompt_objects.rb +62 -0
  93. data/objects/coordinator.md +48 -0
  94. data/objects/greeter.md +30 -0
  95. data/objects/reader.md +33 -0
  96. data/prompt_objects.gemspec +50 -0
  97. data/templates/basic/.gitignore +2 -0
  98. data/templates/basic/manifest.yml +7 -0
  99. data/templates/basic/objects/basic.md +32 -0
  100. data/templates/developer/.gitignore +5 -0
  101. data/templates/developer/manifest.yml +17 -0
  102. data/templates/developer/objects/code_reviewer.md +33 -0
  103. data/templates/developer/objects/coordinator.md +39 -0
  104. data/templates/developer/objects/debugger.md +35 -0
  105. data/templates/empty/.gitignore +5 -0
  106. data/templates/empty/manifest.yml +14 -0
  107. data/templates/empty/objects/.gitkeep +0 -0
  108. data/templates/empty/objects/assistant.md +41 -0
  109. data/templates/minimal/.gitignore +5 -0
  110. data/templates/minimal/manifest.yml +7 -0
  111. data/templates/minimal/objects/assistant.md +41 -0
  112. data/templates/writer/.gitignore +5 -0
  113. data/templates/writer/manifest.yml +17 -0
  114. data/templates/writer/objects/coordinator.md +33 -0
  115. data/templates/writer/objects/editor.md +33 -0
  116. data/templates/writer/objects/researcher.md +34 -0
  117. metadata +343 -0
@@ -0,0 +1,137 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PromptObjects
4
+ module LLM
5
+ # Anthropic API adapter for LLM calls.
6
+ class AnthropicAdapter
7
+ DEFAULT_MODEL = "claude-haiku-4-5"
8
+ DEFAULT_MAX_TOKENS = 4096
9
+
10
+ def initialize(api_key: nil, model: nil, max_tokens: nil)
11
+ @api_key = api_key || ENV.fetch("ANTHROPIC_API_KEY") do
12
+ raise Error, "ANTHROPIC_API_KEY environment variable not set"
13
+ end
14
+ @model = model || DEFAULT_MODEL
15
+ @max_tokens = max_tokens || DEFAULT_MAX_TOKENS
16
+ @client = Anthropic::Client.new(api_key: @api_key)
17
+ end
18
+
19
+ # Make a chat completion request.
20
+ # @param system [String] System prompt
21
+ # @param messages [Array<Hash>] Conversation history
22
+ # @param tools [Array<Hash>] Tool descriptors (optional)
23
+ # @return [Response] Normalized response
24
+ def chat(system:, messages:, tools: [])
25
+ params = {
26
+ model: @model,
27
+ max_tokens: @max_tokens,
28
+ system: system,
29
+ messages: build_messages(messages)
30
+ }
31
+
32
+ # Only include tools if we have any
33
+ if tools.any?
34
+ params[:tools] = convert_tools(tools)
35
+ end
36
+
37
+ raw_response = @client.messages.create(**params)
38
+ parse_response(raw_response)
39
+ end
40
+
41
+ private
42
+
43
+ def build_messages(messages)
44
+ result = []
45
+
46
+ messages.each do |msg|
47
+ case msg[:role]
48
+ when :user
49
+ result << { role: "user", content: msg[:content] }
50
+ when :assistant
51
+ content_blocks = []
52
+
53
+ # Add text content if present
54
+ if msg[:content] && !msg[:content].empty?
55
+ content_blocks << { type: "text", text: msg[:content] }
56
+ end
57
+
58
+ # Add tool_use blocks if present
59
+ if msg[:tool_calls]
60
+ msg[:tool_calls].each do |tc|
61
+ # Handle both ToolCall objects and Hashes (from database)
62
+ tc_id = tc.respond_to?(:id) ? tc.id : (tc[:id] || tc["id"])
63
+ tc_name = tc.respond_to?(:name) ? tc.name : (tc[:name] || tc["name"])
64
+ tc_args = tc.respond_to?(:arguments) ? tc.arguments : (tc[:arguments] || tc["arguments"] || {})
65
+
66
+ content_blocks << {
67
+ type: "tool_use",
68
+ id: tc_id,
69
+ name: tc_name,
70
+ input: tc_args
71
+ }
72
+ end
73
+ end
74
+
75
+ result << { role: "assistant", content: content_blocks } if content_blocks.any?
76
+ when :tool
77
+ # Tool results in Anthropic are sent as user messages with tool_result content blocks
78
+ tool_result_blocks = msg[:results].map do |tool_result|
79
+ {
80
+ type: "tool_result",
81
+ tool_use_id: tool_result[:tool_call_id],
82
+ content: tool_result[:content].to_s
83
+ }
84
+ end
85
+ result << { role: "user", content: tool_result_blocks }
86
+ end
87
+ end
88
+
89
+ result
90
+ end
91
+
92
+ # Convert OpenAI-style tool definitions to Anthropic format
93
+ def convert_tools(tools)
94
+ tools.map do |tool|
95
+ if tool[:type] == "function"
96
+ # OpenAI format with function wrapper
97
+ func = tool[:function]
98
+ {
99
+ name: func[:name],
100
+ description: func[:description],
101
+ input_schema: func[:parameters] || { type: "object", properties: {} }
102
+ }
103
+ else
104
+ # Already in Anthropic format or simple format
105
+ {
106
+ name: tool[:name],
107
+ description: tool[:description],
108
+ input_schema: tool[:input_schema] || tool[:parameters] || { type: "object", properties: {} }
109
+ }
110
+ end
111
+ end
112
+ end
113
+
114
+ def parse_response(raw)
115
+ content = ""
116
+ tool_calls = []
117
+
118
+ # Raw response is an Anthropic::Message object with content array
119
+ # Note: SDK returns type as Symbol (:text, :tool_use), not String
120
+ raw.content.each do |block|
121
+ case block.type.to_sym
122
+ when :text
123
+ content += block.text
124
+ when :tool_use
125
+ tool_calls << ToolCall.new(
126
+ id: block.id,
127
+ name: block.name,
128
+ arguments: block.input.is_a?(Hash) ? block.input : block.input.to_h
129
+ )
130
+ end
131
+ end
132
+
133
+ Response.new(content: content, tool_calls: tool_calls, raw: raw)
134
+ end
135
+ end
136
+ end
137
+ end
@@ -0,0 +1,84 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PromptObjects
4
+ module LLM
5
+ # Factory for creating LLM adapters based on provider name.
6
+ # Provides a unified interface for switching between OpenAI, Anthropic, and Gemini.
7
+ class Factory
8
+ PROVIDERS = {
9
+ "openai" => {
10
+ adapter: "OpenAIAdapter",
11
+ env_key: "OPENAI_API_KEY",
12
+ default_model: "gpt-5.2",
13
+ models: %w[gpt-5.2 gpt-4.1 gpt-4.1-mini gpt-4.5-preview o3-mini o1]
14
+ },
15
+ "anthropic" => {
16
+ adapter: "AnthropicAdapter",
17
+ env_key: "ANTHROPIC_API_KEY",
18
+ default_model: "claude-haiku-4-5",
19
+ models: %w[claude-haiku-4-5 claude-sonnet-4-5 claude-opus-4]
20
+ },
21
+ "gemini" => {
22
+ adapter: "GeminiAdapter",
23
+ env_key: "GEMINI_API_KEY",
24
+ default_model: "gemini-3-flash-preview",
25
+ models: %w[gemini-3-flash-preview gemini-2.5-pro gemini-2.5-flash]
26
+ }
27
+ }.freeze
28
+
29
+ DEFAULT_PROVIDER = "anthropic"
30
+
31
+ class << self
32
+ # Create an adapter for the given provider.
33
+ # @param provider [String] Provider name (openai, anthropic, gemini)
34
+ # @param model [String, nil] Optional model override
35
+ # @param api_key [String, nil] Optional API key override
36
+ # @return [OpenAIAdapter, AnthropicAdapter, GeminiAdapter]
37
+ def create(provider: nil, model: nil, api_key: nil)
38
+ provider_name = (provider || DEFAULT_PROVIDER).to_s.downcase
39
+ config = PROVIDERS[provider_name]
40
+
41
+ raise Error, "Unknown LLM provider: #{provider_name}" unless config
42
+
43
+ adapter_class = LLM.const_get(config[:adapter])
44
+ adapter_class.new(api_key: api_key, model: model)
45
+ end
46
+
47
+ # List available providers.
48
+ # @return [Array<String>]
49
+ def providers
50
+ PROVIDERS.keys
51
+ end
52
+
53
+ # Get info about a provider.
54
+ # @param provider [String] Provider name
55
+ # @return [Hash, nil]
56
+ def provider_info(provider)
57
+ PROVIDERS[provider.to_s.downcase]
58
+ end
59
+
60
+ # Check which providers have API keys configured.
61
+ # @return [Hash<String, Boolean>]
62
+ def available_providers
63
+ PROVIDERS.transform_values do |config|
64
+ ENV.key?(config[:env_key])
65
+ end
66
+ end
67
+
68
+ # Get the default model for a provider.
69
+ # @param provider [String] Provider name
70
+ # @return [String, nil]
71
+ def default_model(provider)
72
+ PROVIDERS.dig(provider.to_s.downcase, :default_model)
73
+ end
74
+
75
+ # Get available models for a provider.
76
+ # @param provider [String] Provider name
77
+ # @return [Array<String>]
78
+ def models_for(provider)
79
+ PROVIDERS.dig(provider.to_s.downcase, :models) || []
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
@@ -0,0 +1,209 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "net/http"
4
+ require "uri"
5
+ require "json"
6
+ require "securerandom"
7
+
8
+ module PromptObjects
9
+ module LLM
10
+ # Google Gemini API adapter for LLM calls.
11
+ # Uses direct HTTP calls to the Gemini REST API.
12
+ class GeminiAdapter
13
+ DEFAULT_MODEL = "gemini-3-flash-preview"
14
+ API_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
15
+
16
+ def initialize(api_key: nil, model: nil)
17
+ @api_key = api_key || ENV.fetch("GEMINI_API_KEY") do
18
+ raise Error, "GEMINI_API_KEY environment variable not set"
19
+ end
20
+ @model = model || DEFAULT_MODEL
21
+ end
22
+
23
+ # Make a chat completion request.
24
+ # @param system [String] System prompt
25
+ # @param messages [Array<Hash>] Conversation history
26
+ # @param tools [Array<Hash>] Tool descriptors (optional)
27
+ # @return [Response] Normalized response
28
+ def chat(system:, messages:, tools: [])
29
+ body = {
30
+ system_instruction: build_system_instruction(system),
31
+ contents: build_contents(messages)
32
+ }
33
+
34
+ # Only include tools if we have any
35
+ if tools.any?
36
+ body[:tools] = build_tools(tools)
37
+ body[:tool_config] = { function_calling_config: { mode: "AUTO" } }
38
+ end
39
+
40
+ raw_response = make_request(body)
41
+ parse_response(raw_response)
42
+ end
43
+
44
+ private
45
+
46
+ def build_system_instruction(system)
47
+ {
48
+ parts: [{ text: system }]
49
+ }
50
+ end
51
+
52
+ def build_contents(messages)
53
+ result = []
54
+ # Track tool calls from the last assistant message for name lookup
55
+ last_tool_calls = {}
56
+
57
+ messages.each do |msg|
58
+ case msg[:role]
59
+ when :user
60
+ result << {
61
+ role: "user",
62
+ parts: [{ text: msg[:content] }]
63
+ }
64
+ when :assistant
65
+ parts = []
66
+ parts << { text: msg[:content] } if msg[:content] && !msg[:content].empty?
67
+ if msg[:tool_calls]
68
+ # Store tool calls for potential name lookup in tool results
69
+ last_tool_calls = {}
70
+ msg[:tool_calls].each do |tc|
71
+ # Handle both ToolCall objects and Hashes (from database)
72
+ tc_id = tc.respond_to?(:id) ? tc.id : (tc[:id] || tc["id"])
73
+ tc_name = tc.respond_to?(:name) ? tc.name : (tc[:name] || tc["name"])
74
+ tc_args = tc.respond_to?(:arguments) ? tc.arguments : (tc[:arguments] || tc["arguments"] || {})
75
+
76
+ last_tool_calls[tc_id] = tc_name
77
+ parts << {
78
+ functionCall: {
79
+ name: tc_name,
80
+ args: tc_args
81
+ }
82
+ }
83
+ end
84
+ end
85
+ result << { role: "model", parts: parts } if parts.any?
86
+ when :tool
87
+ # Tool results go back as a user message with functionResponse parts
88
+ parts = msg[:results].map do |tool_result|
89
+ # Get name from result, or look it up from the previous assistant's tool_calls
90
+ name = tool_result[:name] || last_tool_calls[tool_result[:tool_call_id]] || "unknown"
91
+ {
92
+ functionResponse: {
93
+ name: name,
94
+ response: parse_tool_response_content(tool_result[:content])
95
+ }
96
+ }
97
+ end
98
+ result << { role: "user", parts: parts }
99
+ end
100
+ end
101
+
102
+ result
103
+ end
104
+
105
+ def parse_tool_response_content(content)
106
+ # Try to parse as JSON, otherwise wrap in a result object
107
+ if content.is_a?(String)
108
+ begin
109
+ JSON.parse(content)
110
+ rescue JSON::ParserError
111
+ { result: content }
112
+ end
113
+ else
114
+ content
115
+ end
116
+ end
117
+
118
+ def build_tools(tools)
119
+ # Convert OpenAI-style tool format to Gemini function declarations
120
+ function_declarations = tools.map do |tool|
121
+ func = tool[:function] || tool["function"]
122
+ {
123
+ name: func[:name] || func["name"],
124
+ description: func[:description] || func["description"],
125
+ parameters: convert_parameters(func[:parameters] || func["parameters"])
126
+ }
127
+ end
128
+
129
+ [{ functionDeclarations: function_declarations }]
130
+ end
131
+
132
+ def convert_parameters(params)
133
+ return {} unless params
134
+
135
+ # Gemini uses OpenAPI-style parameters, similar to OpenAI
136
+ # but we need to ensure proper structure
137
+ result = {
138
+ type: params[:type] || params["type"] || "object"
139
+ }
140
+
141
+ if params[:properties] || params["properties"]
142
+ result[:properties] = params[:properties] || params["properties"]
143
+ end
144
+
145
+ if params[:required] || params["required"]
146
+ result[:required] = params[:required] || params["required"]
147
+ end
148
+
149
+ result
150
+ end
151
+
152
+ def make_request(body)
153
+ uri = URI("#{API_BASE_URL}/models/#{@model}:generateContent?key=#{@api_key}")
154
+
155
+ http = Net::HTTP.new(uri.host, uri.port)
156
+ http.use_ssl = true
157
+ http.read_timeout = 120
158
+ http.open_timeout = 30
159
+
160
+ request = Net::HTTP::Post.new(uri)
161
+ request["Content-Type"] = "application/json"
162
+ request.body = body.to_json
163
+
164
+ response = http.request(request)
165
+
166
+ unless response.is_a?(Net::HTTPSuccess)
167
+ error_body = begin
168
+ JSON.parse(response.body)
169
+ rescue StandardError
170
+ response.body
171
+ end
172
+ raise Error, "Gemini API error (#{response.code}): #{error_body}"
173
+ end
174
+
175
+ JSON.parse(response.body)
176
+ end
177
+
178
+ def parse_response(raw)
179
+ candidate = raw.dig("candidates", 0)
180
+ content_obj = candidate&.dig("content")
181
+
182
+ return Response.new(content: "", raw: raw) unless content_obj
183
+
184
+ text_content = ""
185
+ tool_calls = []
186
+
187
+ content_obj["parts"]&.each do |part|
188
+ if part["text"]
189
+ text_content += part["text"]
190
+ elsif part["functionCall"]
191
+ tool_calls << parse_function_call(part["functionCall"])
192
+ end
193
+ end
194
+
195
+ Response.new(content: text_content, tool_calls: tool_calls, raw: raw)
196
+ end
197
+
198
+ def parse_function_call(fc)
199
+ # Gemini doesn't use tool_call_id like OpenAI, so we generate one
200
+ # based on the function name and a random suffix
201
+ ToolCall.new(
202
+ id: "call_#{fc['name']}_#{SecureRandom.hex(8)}",
203
+ name: fc["name"],
204
+ arguments: fc["args"] || {}
205
+ )
206
+ end
207
+ end
208
+ end
209
+ end
@@ -0,0 +1,104 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PromptObjects
4
+ module LLM
5
+ # OpenAI API adapter for LLM calls.
6
+ class OpenAIAdapter
7
+ DEFAULT_MODEL = "gpt-5.2"
8
+
9
+ def initialize(api_key: nil, model: nil)
10
+ @api_key = api_key || ENV.fetch("OPENAI_API_KEY") do
11
+ raise Error, "OPENAI_API_KEY environment variable not set"
12
+ end
13
+ @model = model || DEFAULT_MODEL
14
+ @client = OpenAI::Client.new(access_token: @api_key)
15
+ end
16
+
17
+ # Make a chat completion request.
18
+ # @param system [String] System prompt
19
+ # @param messages [Array<Hash>] Conversation history
20
+ # @param tools [Array<Hash>] Tool descriptors (optional)
21
+ # @return [Response] Normalized response
22
+ def chat(system:, messages:, tools: [])
23
+ params = {
24
+ model: @model,
25
+ messages: build_messages(system, messages)
26
+ }
27
+
28
+ # Only include tools if we have any
29
+ if tools.any?
30
+ params[:tools] = tools
31
+ params[:tool_choice] = "auto"
32
+ end
33
+
34
+ raw_response = @client.chat(parameters: params)
35
+ parse_response(raw_response)
36
+ end
37
+
38
+ private
39
+
40
+ def build_messages(system, messages)
41
+ result = [{ role: "system", content: system }]
42
+
43
+ messages.each do |msg|
44
+ case msg[:role]
45
+ when :user
46
+ result << { role: "user", content: msg[:content] }
47
+ when :assistant
48
+ assistant_msg = { role: "assistant" }
49
+ assistant_msg[:content] = msg[:content] if msg[:content]
50
+ if msg[:tool_calls]
51
+ assistant_msg[:tool_calls] = msg[:tool_calls].map do |tc|
52
+ # Handle both ToolCall objects and Hashes (from database)
53
+ tc_id = tc.respond_to?(:id) ? tc.id : (tc[:id] || tc["id"])
54
+ tc_name = tc.respond_to?(:name) ? tc.name : (tc[:name] || tc["name"])
55
+ tc_args = tc.respond_to?(:arguments) ? tc.arguments : (tc[:arguments] || tc["arguments"] || {})
56
+
57
+ {
58
+ id: tc_id,
59
+ type: "function",
60
+ function: { name: tc_name, arguments: tc_args.to_json }
61
+ }
62
+ end
63
+ end
64
+ result << assistant_msg
65
+ when :tool
66
+ msg[:results].each do |tool_result|
67
+ result << {
68
+ role: "tool",
69
+ tool_call_id: tool_result[:tool_call_id],
70
+ content: tool_result[:content].to_s
71
+ }
72
+ end
73
+ end
74
+ end
75
+
76
+ result
77
+ end
78
+
79
+ def parse_response(raw)
80
+ choice = raw.dig("choices", 0)
81
+ message = choice&.dig("message")
82
+
83
+ return Response.new(content: "", raw: raw) unless message
84
+
85
+ content = message["content"] || ""
86
+ tool_calls = parse_tool_calls(message["tool_calls"])
87
+
88
+ Response.new(content: content, tool_calls: tool_calls, raw: raw)
89
+ end
90
+
91
+ def parse_tool_calls(raw_tool_calls)
92
+ return [] unless raw_tool_calls
93
+
94
+ raw_tool_calls.map do |tc|
95
+ ToolCall.new(
96
+ id: tc["id"],
97
+ name: tc.dig("function", "name"),
98
+ arguments: JSON.parse(tc.dig("function", "arguments") || "{}")
99
+ )
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,61 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PromptObjects
4
+ module LLM
5
+ # Normalized response from an LLM API call.
6
+ # Wraps provider-specific responses into a common interface.
7
+ class Response
8
+ attr_reader :content, :tool_calls, :raw
9
+
10
+ def initialize(content:, tool_calls: [], raw: nil)
11
+ @content = content
12
+ @tool_calls = tool_calls
13
+ @raw = raw
14
+ end
15
+
16
+ # Check if the response includes tool calls.
17
+ # @return [Boolean]
18
+ def tool_calls?
19
+ !@tool_calls.empty?
20
+ end
21
+ end
22
+
23
+ # Represents a single tool call from the LLM.
24
+ # Supports both method access (.id) and hash access ([:id]) for compatibility
25
+ # with code that may receive either ToolCall objects or Hashes from the DB.
26
+ class ToolCall
27
+ attr_reader :id, :name, :arguments
28
+
29
+ def initialize(id:, name:, arguments:)
30
+ @id = id
31
+ @name = name
32
+ @arguments = arguments
33
+ end
34
+
35
+ # Allow hash-style access for compatibility with code expecting Hashes
36
+ def [](key)
37
+ case key.to_sym
38
+ when :id then @id
39
+ when :name then @name
40
+ when :arguments then @arguments
41
+ end
42
+ end
43
+
44
+ # Convert to a plain Hash (for serialization)
45
+ def to_h
46
+ { id: @id, name: @name, arguments: @arguments }
47
+ end
48
+
49
+ # Create a ToolCall from a Hash (for deserialization)
50
+ def self.from_hash(hash)
51
+ return hash if hash.is_a?(ToolCall)
52
+
53
+ new(
54
+ id: hash[:id] || hash["id"],
55
+ name: hash[:name] || hash["name"],
56
+ arguments: hash[:arguments] || hash["arguments"] || {}
57
+ )
58
+ end
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PromptObjects
4
+ # Loads and parses prompt object markdown files.
5
+ # Extracts YAML frontmatter (config) and markdown body (soul).
6
+ class Loader
7
+ # Load a prompt object from a markdown file.
8
+ # @param path [String] Path to the .md file
9
+ # @return [Hash] Parsed data with :config, :body, and :path
10
+ def self.load(path)
11
+ raise Error, "File not found: #{path}" unless File.exist?(path)
12
+
13
+ content = File.read(path, encoding: "UTF-8")
14
+ parsed = FrontMatterParser::Parser.new(:md).call(content)
15
+
16
+ {
17
+ config: parsed.front_matter || {},
18
+ body: parsed.content.strip,
19
+ path: path
20
+ }
21
+ end
22
+
23
+ # Load all prompt objects from a directory.
24
+ # @param dir [String] Directory path
25
+ # @return [Array<Hash>] Array of parsed prompt objects
26
+ def self.load_all(dir)
27
+ raise Error, "Directory not found: #{dir}" unless Dir.exist?(dir)
28
+
29
+ Dir.glob(File.join(dir, "*.md")).map { |path| load(path) }
30
+ end
31
+ end
32
+ end