ai-chat 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +7 -7
- data/ai-chat.gemspec +1 -1
- data/lib/ai/amazing_print.rb +71 -0
- data/lib/ai/chat.rb +46 -8
- data/lib/ai-chat.rb +7 -0
- metadata +9 -5
- data/lib/ai/response.rb +0 -17
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c2397a7d43d950bf70abd84c83d0e57b2cadb70fc127985fae4da4a9ecf142b8
|
4
|
+
data.tar.gz: d44a9d1999ce48cd0af92f367f5f0e2f586b8c19d1fc9e95ce8c7aa7e3fffa84
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: fdb13f4e405f46a21591f71ae818544bc60f7bc083e4b3eb9a8a6d57b9eba03995fe8b75828f3972e564dfa3ed3ed09aed4a8b7ae2260b3602cbede9d80afd8e
|
7
|
+
data.tar.gz: 427b5db79e006d0f6f9b20b83472a78c3234118807e4d43d472304426ca2d80d5b189d029924cd548d7004206914a4e63fd52ca93b1944e55e81276309a501d9
|
data/README.md
CHANGED
@@ -88,7 +88,7 @@ a.generate! # => "Matz is nice and so we are nice" (or similar)
|
|
88
88
|
pp a.messages
|
89
89
|
# => [
|
90
90
|
# {:role=>"user", :content=>"If the Ruby community had an official motto, what might it be?"},
|
91
|
-
# {:role=>"assistant", :content=>"Matz is nice and so we are nice", :response =>
|
91
|
+
# {:role=>"assistant", :content=>"Matz is nice and so we are nice", :response => { id=resp_abc... model=gpt-4.1-nano tokens=12 } }
|
92
92
|
# ]
|
93
93
|
|
94
94
|
# Continue the conversation
|
@@ -108,7 +108,7 @@ That's it! You're building something like this:
|
|
108
108
|
[
|
109
109
|
{:role => "system", :content => "You are a helpful assistant"},
|
110
110
|
{:role => "user", :content => "Hello!"},
|
111
|
-
{:role => "assistant", :content => "Hi there! How can I help you today?", :response =>
|
111
|
+
{:role => "assistant", :content => "Hi there! How can I help you today?", :response => { id=resp_abc... model=gpt-4.1-nano tokens=12 } }
|
112
112
|
]
|
113
113
|
```
|
114
114
|
|
@@ -577,14 +577,14 @@ pp t.messages.last
|
|
577
577
|
# => {
|
578
578
|
# :role => "assistant",
|
579
579
|
# :content => "Hello! How can I help you today?",
|
580
|
-
# :response =>
|
580
|
+
# :response => { id=resp_abc... model=gpt-4.1-nano tokens=12 }
|
581
581
|
# }
|
582
582
|
|
583
583
|
# Access detailed information
|
584
584
|
response = t.last[:response]
|
585
|
-
response
|
586
|
-
response
|
587
|
-
response
|
585
|
+
response[:id] # => "resp_abc123..."
|
586
|
+
response[:model] # => "gpt-4.1-nano"
|
587
|
+
response[:usage] # => {:prompt_tokens=>5, :completion_tokens=>7, :total_tokens=>12}
|
588
588
|
```
|
589
589
|
|
590
590
|
This information is useful for:
|
@@ -599,7 +599,7 @@ You can also, if you know a response ID, continue an old conversation by setting
|
|
599
599
|
t = AI::Chat.new
|
600
600
|
t.user("Hello!")
|
601
601
|
t.generate!
|
602
|
-
old_id = t.last[:response]
|
602
|
+
old_id = t.last[:response][:id] # => "resp_abc123..."
|
603
603
|
|
604
604
|
# Some time in the future...
|
605
605
|
|
data/ai-chat.gemspec
CHANGED
@@ -0,0 +1,71 @@
|
|
1
|
+
require "amazing_print"
|
2
|
+
|
3
|
+
module AmazingPrint
|
4
|
+
module AI
|
5
|
+
def self.included(base)
|
6
|
+
base.send :alias_method, :cast_without_ai, :cast
|
7
|
+
base.send :alias_method, :cast, :cast_with_ai
|
8
|
+
end
|
9
|
+
|
10
|
+
def cast_with_ai(object, type)
|
11
|
+
case object
|
12
|
+
when ::AI::Chat
|
13
|
+
:ai_object
|
14
|
+
else
|
15
|
+
cast_without_ai(object, type)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
private
|
20
|
+
|
21
|
+
def awesome_ai_object(object)
|
22
|
+
case object
|
23
|
+
when ::AI::Chat
|
24
|
+
format_ai_chat(object)
|
25
|
+
else
|
26
|
+
awesome_object(object)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def format_ai_chat(chat)
|
31
|
+
vars = []
|
32
|
+
|
33
|
+
# Format messages with truncation
|
34
|
+
if chat.instance_variable_defined?(:@messages)
|
35
|
+
messages = chat.instance_variable_get(:@messages).map do |msg|
|
36
|
+
truncated_msg = msg.dup
|
37
|
+
if msg[:content].is_a?(String) && msg[:content].length > 80
|
38
|
+
truncated_msg[:content] = msg[:content][0..77] + "..."
|
39
|
+
end
|
40
|
+
truncated_msg
|
41
|
+
end
|
42
|
+
vars << ["@messages", messages]
|
43
|
+
end
|
44
|
+
|
45
|
+
# Add other variables (except sensitive ones)
|
46
|
+
skip_vars = [:@api_key, :@client, :@messages]
|
47
|
+
chat.instance_variables.sort.each do |var|
|
48
|
+
next if skip_vars.include?(var)
|
49
|
+
value = chat.instance_variable_get(var)
|
50
|
+
vars << [var.to_s, value] unless value.nil?
|
51
|
+
end
|
52
|
+
|
53
|
+
format_object(chat, vars)
|
54
|
+
end
|
55
|
+
|
56
|
+
def format_object(object, vars)
|
57
|
+
data = vars.map do |(name, value)|
|
58
|
+
name = colorize(name, :variable) unless @options[:plain]
|
59
|
+
"#{name}: #{inspector.awesome(value)}"
|
60
|
+
end
|
61
|
+
|
62
|
+
if @options[:multiline]
|
63
|
+
"#<#{object.class}\n#{data.map { |line| " #{line}" }.join("\n")}\n>"
|
64
|
+
else
|
65
|
+
"#<#{object.class} #{data.join(', ')}>"
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
AmazingPrint::Formatter.send(:include, AmazingPrint::AI)
|
data/lib/ai/chat.rb
CHANGED
@@ -8,8 +8,6 @@ require "pathname"
|
|
8
8
|
require "stringio"
|
9
9
|
require "fileutils"
|
10
10
|
|
11
|
-
require_relative "response"
|
12
|
-
|
13
11
|
module AI
|
14
12
|
# :reek:MissingSafeMethod { exclude: [ generate! ] }
|
15
13
|
# :reek:TooManyMethods
|
@@ -101,13 +99,18 @@ module AI
|
|
101
99
|
def generate!
|
102
100
|
response = create_response
|
103
101
|
|
104
|
-
chat_response = Response.new(response)
|
105
|
-
|
106
102
|
text_response = extract_text_from_response(response)
|
107
103
|
|
108
104
|
image_filenames = extract_and_save_images(response)
|
109
|
-
|
110
|
-
|
105
|
+
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
106
|
+
|
107
|
+
chat_response = {
|
108
|
+
id: response.id,
|
109
|
+
model: response.model,
|
110
|
+
usage: response_usage,
|
111
|
+
total_tokens: response_usage[:total_tokens],
|
112
|
+
images: image_filenames
|
113
|
+
}
|
111
114
|
|
112
115
|
message = if schema
|
113
116
|
if text_response.nil? || text_response.empty?
|
@@ -131,7 +134,7 @@ module AI
|
|
131
134
|
)
|
132
135
|
end
|
133
136
|
|
134
|
-
self.previous_response_id =
|
137
|
+
self.previous_response_id = chat_response[:id]
|
135
138
|
|
136
139
|
message
|
137
140
|
end
|
@@ -173,6 +176,38 @@ module AI
|
|
173
176
|
"#<#{self.class.name} @messages=#{messages.inspect} @model=#{@model.inspect} @schema=#{@schema.inspect} @reasoning_effort=#{@reasoning_effort.inspect}>"
|
174
177
|
end
|
175
178
|
|
179
|
+
# Support for Ruby's pp (pretty print)
|
180
|
+
def pretty_print(q)
|
181
|
+
q.group(1, "#<#{self.class}", '>') do
|
182
|
+
q.breakable
|
183
|
+
|
184
|
+
# Show messages with truncation
|
185
|
+
q.text "@messages="
|
186
|
+
truncated_messages = @messages.map do |msg|
|
187
|
+
truncated_msg = msg.dup
|
188
|
+
if msg[:content].is_a?(String) && msg[:content].length > 80
|
189
|
+
truncated_msg[:content] = msg[:content][0..77] + "..."
|
190
|
+
end
|
191
|
+
truncated_msg
|
192
|
+
end
|
193
|
+
q.pp truncated_messages
|
194
|
+
|
195
|
+
# Show other instance variables (except sensitive ones)
|
196
|
+
skip_vars = [:@messages, :@api_key, :@client]
|
197
|
+
instance_variables.sort.each do |var|
|
198
|
+
next if skip_vars.include?(var)
|
199
|
+
value = instance_variable_get(var)
|
200
|
+
unless value.nil?
|
201
|
+
q.text ","
|
202
|
+
q.breakable
|
203
|
+
q.text "#{var}="
|
204
|
+
q.pp value
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
210
|
+
|
176
211
|
private
|
177
212
|
|
178
213
|
class InputClassificationError < StandardError; end
|
@@ -210,7 +245,7 @@ module AI
|
|
210
245
|
def prepare_messages_for_api
|
211
246
|
return messages unless previous_response_id
|
212
247
|
|
213
|
-
previous_response_index = messages.find_index { |message| message
|
248
|
+
previous_response_index = messages.find_index { |message| message.dig(:response, :id) == previous_response_id }
|
214
249
|
|
215
250
|
if previous_response_index
|
216
251
|
messages[(previous_response_index + 1)..] || []
|
@@ -357,6 +392,8 @@ module AI
|
|
357
392
|
tools_list
|
358
393
|
end
|
359
394
|
|
395
|
+
# :reek:UtilityFunction
|
396
|
+
# :reek:ManualDispatch
|
360
397
|
def extract_text_from_response(response)
|
361
398
|
response.output.flat_map { |output|
|
362
399
|
output.respond_to?(:content) ? output.content : []
|
@@ -366,6 +403,7 @@ module AI
|
|
366
403
|
end
|
367
404
|
|
368
405
|
# :reek:FeatureEnvy
|
406
|
+
# :reek:UtilityFunction
|
369
407
|
def wrap_schema_if_needed(schema)
|
370
408
|
if schema.key?(:format) || schema.key?("format")
|
371
409
|
schema
|
data/lib/ai-chat.rb
CHANGED
metadata
CHANGED
@@ -1,13 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ai-chat
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Raghu Betina
|
8
|
+
autorequire:
|
8
9
|
bindir: bin
|
9
10
|
cert_chain: []
|
10
|
-
date:
|
11
|
+
date: 2025-08-18 00:00:00.000000000 Z
|
11
12
|
dependencies:
|
12
13
|
- !ruby/object:Gem::Dependency
|
13
14
|
name: openai
|
@@ -93,20 +94,21 @@ dependencies:
|
|
93
94
|
- - "~>"
|
94
95
|
- !ruby/object:Gem::Version
|
95
96
|
version: '11.1'
|
97
|
+
description:
|
96
98
|
email:
|
97
99
|
- raghu@firstdraft.com
|
98
100
|
executables: []
|
99
101
|
extensions: []
|
100
102
|
extra_rdoc_files:
|
101
|
-
- LICENSE
|
102
103
|
- README.md
|
104
|
+
- LICENSE
|
103
105
|
files:
|
104
106
|
- LICENSE
|
105
107
|
- README.md
|
106
108
|
- ai-chat.gemspec
|
107
109
|
- lib/ai-chat.rb
|
110
|
+
- lib/ai/amazing_print.rb
|
108
111
|
- lib/ai/chat.rb
|
109
|
-
- lib/ai/response.rb
|
110
112
|
homepage: https://github.com/firstdraft/ai-chat
|
111
113
|
licenses:
|
112
114
|
- MIT
|
@@ -117,6 +119,7 @@ metadata:
|
|
117
119
|
label: AI Chat
|
118
120
|
rubygems_mfa_required: 'true'
|
119
121
|
source_code_uri: https://github.com/firstdraft/ai-chat
|
122
|
+
post_install_message:
|
120
123
|
rdoc_options: []
|
121
124
|
require_paths:
|
122
125
|
- lib
|
@@ -131,7 +134,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
131
134
|
- !ruby/object:Gem::Version
|
132
135
|
version: '0'
|
133
136
|
requirements: []
|
134
|
-
rubygems_version: 3.
|
137
|
+
rubygems_version: 3.4.6
|
138
|
+
signing_key:
|
135
139
|
specification_version: 4
|
136
140
|
summary: A beginner-friendly Ruby interface for OpenAI's API
|
137
141
|
test_files: []
|
data/lib/ai/response.rb
DELETED
@@ -1,17 +0,0 @@
|
|
1
|
-
module AI
|
2
|
-
# :reek:IrresponsibleModule
|
3
|
-
# :reek:TooManyInstanceVariables
|
4
|
-
class Response
|
5
|
-
attr_reader :id, :model, :usage, :total_tokens
|
6
|
-
# :reek:Attribute
|
7
|
-
attr_accessor :images
|
8
|
-
|
9
|
-
def initialize(response)
|
10
|
-
@id = response.id
|
11
|
-
@model = response.model
|
12
|
-
@usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
13
|
-
@total_tokens = @usage[:total_tokens]
|
14
|
-
@images = []
|
15
|
-
end
|
16
|
-
end
|
17
|
-
end
|