smart_prompt 0.4.0 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 67ba5c17240bbbfcf74c5aaabf100f01c1c40c360f77e84b15169ee83033018e
4
- data.tar.gz: bc43cc30cabdc5e4ba62e1b07b2851482a3dc472e50efc10a56206d73b842a55
3
+ metadata.gz: 396c6097973289a34143e86b65f428d55919d0e755916992a5c714e289ebf5a2
4
+ data.tar.gz: 5d2c2d81b486e1fb05b53f116047ab1f90be77c9536fd6440a96b573bdad00c3
5
5
  SHA512:
6
- metadata.gz: ebd50d0f9f83dbc8c3315b5edb22bf6fb7920c7bd8ae82d3c8bb90ee78b10deb2ff9945a125d248f453b92894e23b5d61f442b8191621b8a044d95468252e796
7
- data.tar.gz: 03f784ba90fcdc629694c88448360faf08c9eede708df7c3ebdfe736917e3acb5dc12ff24c55b89a14b2ce2c9721b70d986e35672ce25c001d69ad883bf8858b
6
+ metadata.gz: c6880395149678a195ea6efc46a81623d61c14c56534936041a625616a9e6b716597c078ccee0ac0d47c3b2a8e67d272742ee36940fa64321426799db0b26e4d
7
+ data.tar.gz: 63f0b8ae0f6f62363443731cae6fed3eafe746c463d259c9b88eee8563d6ef0cdcd84e684d6daeedd7329c3dcab84748ab62358fa2935b9d0278ec347df45ffb
data/CHANGELOG.md CHANGED
@@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.4.1] - 2026-04-22
9
+ ### Fixed
10
+ - Re-release package with `lib/smart_prompt/anthropic_adapter.rb`, which is required by the gem entrypoint.
11
+
12
+ ## [0.4.0] - 2026-04-22
13
+ ### Added
14
+ - Anthropic adapter support.
15
+
8
16
  ## [0.3.6] - 2026-04-08
9
17
  ### Changed
10
18
  - Bumped `ruby-openai` dependency from `8.1.0` to `8.3.0`
@@ -0,0 +1,298 @@
1
+ require "net/http"
2
+ require "json"
3
+ require "uri"
4
+
5
+ module SmartPrompt
6
+ class AnthropicAdapter < LLMAdapter
7
+ DEFAULT_URL = "https://api.anthropic.com"
8
+ DEFAULT_VERSION = "2023-06-01"
9
+ DEFAULT_MAX_TOKENS = 4096
10
+
11
+ def initialize(config)
12
+ super
13
+ @api_key = resolve_api_key(@config["api_key"]) || ENV["ANTHROPIC_API_KEY"]
14
+ @url = (@config["url"] || DEFAULT_URL).chomp("/")
15
+ @anthropic_version = @config["anthropic_version"] || DEFAULT_VERSION
16
+ @request_timeout = @config["request_timeout"] || 240
17
+
18
+ raise LLMAPIError, "Invalid Anthropic configuration: missing api_key" if @api_key.nil? || @api_key.empty?
19
+
20
+ @messages_uri = URI("#{@url}/v1/messages")
21
+ SmartPrompt.logger.info "Successful creation an Anthropic client."
22
+ rescue URI::InvalidURIError => e
23
+ SmartPrompt.logger.error "Failed to initialize Anthropic client: #{e.message}"
24
+ raise LLMAPIError, "Invalid Anthropic configuration: #{e.message}"
25
+ rescue LLMAPIError
26
+ raise
27
+ rescue => e
28
+ SmartPrompt.logger.error "Failed to initialize Anthropic client: #{e.message}"
29
+ raise Error, "Unexpected error initializing Anthropic client: #{e.message}"
30
+ end
31
+
32
+ def send_request(messages, model = nil, temperature = 0.7, tools = nil, proc = nil)
33
+ SmartPrompt.logger.info "AnthropicAdapter: Sending request to Anthropic"
34
+ temperature = 0.7 if temperature.nil?
35
+ model_name = model || @config["model"]
36
+ SmartPrompt.logger.info "AnthropicAdapter: Using model #{model_name}"
37
+
38
+ parameters = build_parameters(messages, model_name, temperature, tools, !proc.nil?)
39
+ SmartPrompt.logger.info "Send parameters is: #{parameters}"
40
+
41
+ response = post_messages(parameters, proc)
42
+ SmartPrompt.logger.info "AnthropicAdapter: Received response from Anthropic"
43
+
44
+ return if proc
45
+
46
+ @last_response = response
47
+ extract_content(response)
48
+ rescue JSON::ParserError
49
+ SmartPrompt.logger.error "Failed to parse Anthropic API response"
50
+ raise LLMAPIError, "Failed to parse Anthropic API response"
51
+ rescue LLMAPIError
52
+ raise
53
+ rescue => e
54
+ SmartPrompt.logger.error "Unexpected error during Anthropic request: #{e.message}"
55
+ raise Error, "Unexpected error during Anthropic request: #{e.message}"
56
+ ensure
57
+ SmartPrompt.logger.info "Successful send a message"
58
+ end
59
+
60
+ private
61
+
62
+ def resolve_api_key(api_key)
63
+ return api_key unless api_key.is_a?(String)
64
+
65
+ match = api_key.match(/\AENV\[(["']?)([A-Za-z_][A-Za-z0-9_]*)\1\]\z/)
66
+ return ENV[match[2]] if match
67
+
68
+ api_key
69
+ end
70
+
71
+ def build_parameters(messages, model_name, temperature, tools, stream)
72
+ anthropic_messages, system = normalize_messages(messages)
73
+ parameters = {
74
+ model: model_name,
75
+ messages: anthropic_messages,
76
+ max_tokens: @config["max_tokens"] || @config["max_completion_tokens"] || DEFAULT_MAX_TOKENS,
77
+ temperature: @config["temperature"] || temperature,
78
+ }
79
+ parameters[:system] = system unless system.empty?
80
+ parameters[:tools] = normalize_tools(tools) if tools
81
+ parameters[:stream] = true if stream
82
+ parameters
83
+ end
84
+
85
+ def normalize_messages(messages)
86
+ system_messages = []
87
+ anthropic_messages = []
88
+
89
+ messages.each do |message|
90
+ role = message["role"] || message[:role]
91
+ content = message["content"] || message[:content]
92
+
93
+ case role.to_s
94
+ when "system"
95
+ system_messages << content.to_s
96
+ when "user", "assistant"
97
+ anthropic_messages << {
98
+ role: role.to_s,
99
+ content: normalize_content(content),
100
+ }
101
+ when "tool"
102
+ anthropic_messages << {
103
+ role: "user",
104
+ content: normalize_tool_result(message),
105
+ }
106
+ else
107
+ anthropic_messages << {
108
+ role: "user",
109
+ content: normalize_content(content),
110
+ }
111
+ end
112
+ end
113
+
114
+ [anthropic_messages, system_messages.join("\n\n")]
115
+ end
116
+
117
+ def normalize_content(content)
118
+ return content if content.is_a?(Array)
119
+
120
+ content.to_s
121
+ end
122
+
123
+ def normalize_tool_result(message)
124
+ tool_use_id = message["tool_call_id"] || message[:tool_call_id]
125
+ content = message["content"] || message[:content]
126
+
127
+ [{
128
+ type: "tool_result",
129
+ tool_use_id: tool_use_id.to_s,
130
+ content: content.to_s,
131
+ }]
132
+ end
133
+
134
+ def normalize_tools(tools)
135
+ tools.map do |tool|
136
+ function = tool["function"] || tool[:function] || tool
137
+ {
138
+ name: function["name"] || function[:name],
139
+ description: function["description"] || function[:description],
140
+ input_schema: function["parameters"] || function[:parameters] || {},
141
+ }
142
+ end
143
+ end
144
+
145
+ def post_messages(parameters, stream_proc)
146
+ http = Net::HTTP.new(@messages_uri.host, @messages_uri.port)
147
+ http.use_ssl = @messages_uri.scheme == "https"
148
+ http.read_timeout = @request_timeout
149
+ http.open_timeout = @request_timeout
150
+
151
+ request = Net::HTTP::Post.new(@messages_uri)
152
+ request["Content-Type"] = "application/json"
153
+ request["x-api-key"] = @api_key
154
+ request["anthropic-version"] = @anthropic_version
155
+ request.body = JSON.generate(parameters)
156
+
157
+ if stream_proc
158
+ handle_streaming_response(http, request, stream_proc)
159
+ else
160
+ handle_response(http.request(request))
161
+ end
162
+ rescue SocketError => e
163
+ SmartPrompt.logger.error "Failed to connect to Anthropic API: #{e.message}"
164
+ raise LLMAPIError, "Network error: Unable to connect to Anthropic API"
165
+ rescue Net::OpenTimeout, Net::ReadTimeout
166
+ SmartPrompt.logger.error "Request to Anthropic API timed out"
167
+ raise LLMAPIError, "Request to Anthropic API timed out"
168
+ end
169
+
170
+ def handle_response(response)
171
+ body = JSON.parse(response.body)
172
+ return body if response.is_a?(Net::HTTPSuccess)
173
+
174
+ message = body.dig("error", "message") || response.message
175
+ SmartPrompt.logger.error "Anthropic API error: #{message}"
176
+ raise LLMAPIError, "Anthropic API error: #{message}"
177
+ end
178
+
179
+ def handle_streaming_response(http, request, stream_proc)
180
+ accumulated_response = nil
181
+
182
+ http.request(request) do |response|
183
+ unless response.is_a?(Net::HTTPSuccess)
184
+ body = response.body.to_s.empty? ? {} : JSON.parse(response.body)
185
+ message = body.dig("error", "message") || response.message
186
+ SmartPrompt.logger.error "Anthropic API error: #{message}"
187
+ raise LLMAPIError, "Anthropic API error: #{message}"
188
+ end
189
+
190
+ response.read_body do |chunk|
191
+ chunk.each_line do |line|
192
+ next unless line.start_with?("data:")
193
+
194
+ data = line.delete_prefix("data:").strip
195
+ next if data.empty?
196
+
197
+ event = JSON.parse(data)
198
+ accumulated_response = event if event["type"] == "message_start"
199
+ stream_proc.call(openai_stream_chunk(event), chunk.bytesize)
200
+ end
201
+ end
202
+ end
203
+
204
+ accumulated_response
205
+ end
206
+
207
+ def openai_stream_chunk(event)
208
+ case event["type"]
209
+ when "message_start"
210
+ message = event["message"] || {}
211
+ {
212
+ "id" => message["id"],
213
+ "object" => "chat.completion.chunk",
214
+ "created" => Time.now.to_i,
215
+ "model" => message["model"],
216
+ "choices" => [{
217
+ "index" => 0,
218
+ "delta" => {},
219
+ }],
220
+ "usage" => message["usage"],
221
+ }
222
+ when "content_block_delta"
223
+ {
224
+ "choices" => [{
225
+ "index" => 0,
226
+ "delta" => {
227
+ "content" => event.dig("delta", "text").to_s,
228
+ },
229
+ }],
230
+ }
231
+ else
232
+ {
233
+ "choices" => [{
234
+ "index" => 0,
235
+ "delta" => {},
236
+ }],
237
+ }
238
+ end
239
+ end
240
+
241
+ def extract_content(response)
242
+ text_parts = []
243
+ tool_calls = []
244
+
245
+ response.fetch("content", []).each do |block|
246
+ case block["type"]
247
+ when "text"
248
+ text_parts << block["text"].to_s
249
+ when "tool_use"
250
+ tool_calls << openai_tool_call(block)
251
+ else
252
+ text_parts << block.to_s
253
+ end
254
+ end
255
+
256
+ content = text_parts.join
257
+ return content if tool_calls.empty?
258
+
259
+ openai_response(response, content, tool_calls)
260
+ end
261
+
262
+ def openai_response(response, content, tool_calls)
263
+ {
264
+ "id" => response["id"],
265
+ "object" => "chat.completion",
266
+ "created" => Time.now.to_i,
267
+ "model" => response["model"],
268
+ "choices" => [{
269
+ "index" => 0,
270
+ "message" => {
271
+ "role" => "assistant",
272
+ "content" => content,
273
+ "tool_calls" => tool_calls,
274
+ },
275
+ "finish_reason" => openai_finish_reason(response["stop_reason"]),
276
+ }],
277
+ "usage" => response["usage"],
278
+ }
279
+ end
280
+
281
+ def openai_tool_call(block)
282
+ {
283
+ "id" => block["id"],
284
+ "type" => "function",
285
+ "function" => {
286
+ "name" => block["name"],
287
+ "arguments" => JSON.generate(block["input"] || {}),
288
+ },
289
+ }
290
+ end
291
+
292
+ def openai_finish_reason(stop_reason)
293
+ return "tool_calls" if stop_reason == "tool_use"
294
+
295
+ stop_reason
296
+ end
297
+ end
298
+ end
@@ -1,3 +1,3 @@
1
1
  module SmartPrompt
2
- VERSION = "0.4.0"
2
+ VERSION = "0.4.2"
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smart_prompt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.0
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - zhuang biaowei
@@ -107,6 +107,7 @@ files:
107
107
  - README.md
108
108
  - Rakefile
109
109
  - lib/smart_prompt.rb
110
+ - lib/smart_prompt/anthropic_adapter.rb
110
111
  - lib/smart_prompt/api_handler.rb
111
112
  - lib/smart_prompt/conversation.rb
112
113
  - lib/smart_prompt/db_adapter.rb