instruct 0.1.0a1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +202 -0
- data/README.md +387 -0
- data/SCRATCHPAD.md +489 -0
- data/lib/instruct/compile_erb.rb +39 -0
- data/lib/instruct/env.rb +27 -0
- data/lib/instruct/error.rb +4 -0
- data/lib/instruct/gen/completion_request.rb +63 -0
- data/lib/instruct/gen/completion_response.rb +66 -0
- data/lib/instruct/gen/gen.rb +70 -0
- data/lib/instruct/gen/generate_completion.rb +61 -0
- data/lib/instruct/helpers/erb_helper.rb +29 -0
- data/lib/instruct/helpers/gen_helper.rb +22 -0
- data/lib/instruct/helpers/helpers.rb +9 -0
- data/lib/instruct/helpers/model_helper.rb +13 -0
- data/lib/instruct/helpers/refinements.rb +54 -0
- data/lib/instruct/llms/anthropic/completion_model.rb +107 -0
- data/lib/instruct/llms/anthropic/messages_completion_response.rb +35 -0
- data/lib/instruct/llms/anthropic/middleware.rb +91 -0
- data/lib/instruct/llms/openai/chat_completion_response.rb +21 -0
- data/lib/instruct/llms/openai/completion_model.rb +129 -0
- data/lib/instruct/llms/openai/completion_response.rb +20 -0
- data/lib/instruct/llms/openai/middleware.rb +52 -0
- data/lib/instruct/middleware/chat_completion_middleware.rb +90 -0
- data/lib/instruct/middleware/chomp_middleware.rb +56 -0
- data/lib/instruct/model.rb +21 -0
- data/lib/instruct/prompt.rb +217 -0
- data/lib/instruct/rails/active_job_object_serializer.rb +23 -0
- data/lib/instruct/rails/active_record_coders.rb +36 -0
- data/lib/instruct/railtie.rb +15 -0
- data/lib/instruct/utils/middleware_chain.rb +48 -0
- data/lib/instruct/utils/serializable_with_version.rb +73 -0
- data/lib/instruct/utils/serializer.rb +70 -0
- data/lib/instruct/utils/symbolize_keys.rb +22 -0
- data/lib/instruct/utils/variables.rb +37 -0
- data/lib/instruct/version.rb +3 -0
- data/lib/instruct.rb +74 -0
- metadata +122 -0
@@ -0,0 +1,52 @@
|
|
1
|
+
class Instruct::OpenAI
|
2
|
+
class Middleware
|
3
|
+
include Instruct::Serializable
|
4
|
+
set_instruct_class_id 101
|
5
|
+
CLIENT_PARAMS = %i[api_type api_version access_token organization_id uri_base request_timeout extra_headers].freeze
|
6
|
+
REQUEST_PARAMS = %i[store metadata frequency_penalty logit_bias logprobs top_logprobs max_completion_tokens n prediction presence_penalty response_format seed service_tier stop stream_options temperature top_p user].freeze
|
7
|
+
|
8
|
+
def call(req, _next:)
|
9
|
+
raise Instruct::Todo, "Non text modalities not supported yet, consider opening a pull request" if req.env[:modalities] && (req.env[:modalities] != [:text] || req.env[:modalities] != ["text"])
|
10
|
+
raise Instruct::Todo, "Tools are not supported yet, consider opening a pull request" if req.env[:tools] || req.env[:tool_choice] || req.env[:parallel_tool_calls] || req.env[:function_call] || req.env[:functions]
|
11
|
+
|
12
|
+
# Extract client options
|
13
|
+
client_options = filter_env_keys(req, CLIENT_PARAMS)
|
14
|
+
req.env[:openai_client_opts] = client_options
|
15
|
+
#
|
16
|
+
# Handle stop_chars conversion
|
17
|
+
if req.env[:stop_chars].is_a?(String)
|
18
|
+
req.env[:stop] = req.env[:stop_chars].split('')
|
19
|
+
end
|
20
|
+
|
21
|
+
# Extract request options
|
22
|
+
request_options = filter_env_keys(req, REQUEST_PARAMS)
|
23
|
+
req.env[:openai_args] = request_options
|
24
|
+
|
25
|
+
# Handle deprecated arguments
|
26
|
+
deprecated_args = [:max_tokens, :function_call, :functions]
|
27
|
+
req.env[:openai_deprecated_args] = filter_env_keys(req, deprecated_args)
|
28
|
+
|
29
|
+
|
30
|
+
req.add_prompt_transform do |attr_str|
|
31
|
+
transform(attr_str)
|
32
|
+
end
|
33
|
+
|
34
|
+
_next.call(req)
|
35
|
+
end
|
36
|
+
|
37
|
+
def transform(prompt_obj)
|
38
|
+
if prompt_obj.is_a?(Hash) && prompt_obj[:messages].is_a?(Array)
|
39
|
+
prompt_obj[:messages].map! do |message|
|
40
|
+
{ role: message.keys.first, content: message.values.first.to_s }
|
41
|
+
end
|
42
|
+
end
|
43
|
+
prompt_obj
|
44
|
+
end
|
45
|
+
|
46
|
+
private
|
47
|
+
|
48
|
+
def filter_env_keys(req, keys)
|
49
|
+
req.env.select { |k, _| keys.include?(k) }
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
module Instruct
|
2
|
+
# Converts prompt plain text entries into role-based conversation entries
|
3
|
+
# See {file:docs/prompt-completion-middleware.md#label-Chat+Completion+Middleware}
|
4
|
+
class ChatCompletionMiddleware
|
5
|
+
include Instruct::Serializable
|
6
|
+
set_instruct_class_id 4
|
7
|
+
|
8
|
+
def initialize(roles: [:system, :user, :assistant])
|
9
|
+
@roles = roles
|
10
|
+
end
|
11
|
+
|
12
|
+
def call(req, _next:)
|
13
|
+
role_changes = []
|
14
|
+
|
15
|
+
control_str = req.prompt.filter do | attrs |
|
16
|
+
attrs[:safe] == true
|
17
|
+
end
|
18
|
+
|
19
|
+
# scan the control string for role changes, defined
|
20
|
+
# as newlines followed by whitespace, then a role name,
|
21
|
+
# then a colon, then an optional single space which is chomped.
|
22
|
+
role_change_re = /(?:^|\n)\s*(\w+):\s?/
|
23
|
+
control_str.scan(role_change_re) do |match|
|
24
|
+
range_of_full_match = Regexp.last_match.offset(0)
|
25
|
+
|
26
|
+
ranges = control_str.original_ranges_for(range_of_full_match[0]..range_of_full_match[1] - 1)
|
27
|
+
start = ranges.first.first
|
28
|
+
finish = ranges.last.last
|
29
|
+
|
30
|
+
|
31
|
+
role = match[0].to_sym
|
32
|
+
if @roles.include?(role)
|
33
|
+
role_changes << { role: role, control_start: start, control_finish: finish }
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
start_pos = 0
|
38
|
+
role = @roles.first
|
39
|
+
# TODO: we want to make it so that if no role changes are defined we fallback to the
|
40
|
+
# default user role, and use the system: to define the system arg.
|
41
|
+
role_changes.each do |change|
|
42
|
+
if change[:control_start] > start_pos
|
43
|
+
if role == :system
|
44
|
+
req.env[:system_from_prompt] = req.prompt[start_pos...change[:control_start]]
|
45
|
+
end
|
46
|
+
req.prompt.add_attrs(start_pos...change[:control_start], role: role)
|
47
|
+
end
|
48
|
+
start_pos = change[:control_finish] + 1
|
49
|
+
role = change[:role]
|
50
|
+
end
|
51
|
+
if role == :system
|
52
|
+
req.env[:system_from_prompt] = req.prompt[start_pos...req.prompt.length]
|
53
|
+
end
|
54
|
+
req.prompt.add_attrs(start_pos...req.prompt.length, role: role)
|
55
|
+
|
56
|
+
req.add_prompt_transform do | attr_str |
|
57
|
+
transform(attr_str)
|
58
|
+
end
|
59
|
+
|
60
|
+
if req.prompt.attrs_at(req.prompt.length - 1)[:role] != :assistant && @roles.include?(:assistant)
|
61
|
+
req.prompt.safe_concat(Prompt.new("\nassistant: ", source: :chat_completion_middleware))
|
62
|
+
end
|
63
|
+
|
64
|
+
# need to work out pos of each entry in text prompt
|
65
|
+
_next.call(req)
|
66
|
+
end
|
67
|
+
|
68
|
+
def transform(prompt_str)
|
69
|
+
# TODO: once there is an attributed string presenter
|
70
|
+
# we can replace this
|
71
|
+
messages = []
|
72
|
+
message_range = 0...0
|
73
|
+
role = nil
|
74
|
+
prompt_str.each_char.with_index do |char, idx|
|
75
|
+
prompt_attrs = prompt_str.attrs_at(idx)
|
76
|
+
if prompt_attrs[:role] != role
|
77
|
+
messages << { role => prompt_str[message_range].remove_attrs(:role) } unless message_range.size.zero? || role.nil?
|
78
|
+
role = prompt_attrs[:role]
|
79
|
+
message_range = idx...idx
|
80
|
+
end
|
81
|
+
message_range = message_range.first..idx
|
82
|
+
end
|
83
|
+
messages << { role => prompt_str[message_range].remove_attrs(:role) } unless message_range.size.zero? || role.nil?
|
84
|
+
{ messages: messages }
|
85
|
+
end
|
86
|
+
|
87
|
+
|
88
|
+
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
module Instruct
|
2
|
+
# This middleware hides the whitespace at the end of the current prompt by
|
3
|
+
# adding a chomp_hide attribute to the whitespace with the id of the current
|
4
|
+
# request. The prompt_transform then removes the whitespace from the prompt
|
5
|
+
#
|
6
|
+
# object removes characters marked with chomp_hide for the same request.
|
7
|
+
# have the same whitespace at the beginning, the chomp_hide attribute is
|
8
|
+
# removed from the prompt. If the response does have the same whitespace
|
9
|
+
# it is chomped from the response.
|
10
|
+
class ChompMiddleware
|
11
|
+
include Instruct::Serializable
|
12
|
+
set_instruct_class_id 5
|
13
|
+
|
14
|
+
def call(req, _next:)
|
15
|
+
|
16
|
+
whitespace = ''
|
17
|
+
# TODO, this should only be for non-hidden whitespace
|
18
|
+
req.prompt.to_s.match(/(\s+)$/) do |match|
|
19
|
+
whitespace = match[0]
|
20
|
+
end
|
21
|
+
range = req.prompt.length - whitespace.length...req.prompt.length
|
22
|
+
|
23
|
+
req.prompt.add_attrs(range, chomped: req.id)
|
24
|
+
req.prompt.hide_range_from_prompt(range, by: self.class)
|
25
|
+
|
26
|
+
|
27
|
+
# TODO: maybe we work out a way to pause the stream from
|
28
|
+
# hitting upstream handles until it feels good about it
|
29
|
+
trimming_whitespace = true
|
30
|
+
unhidden = false
|
31
|
+
req.add_stream_handler do |completion, chunk|
|
32
|
+
if range.size.positive? && !unhidden
|
33
|
+
req.prompt.unhide_range_from_prompt(range, by: self.class)
|
34
|
+
unhidden = true
|
35
|
+
end
|
36
|
+
next completion if !trimming_whitespace
|
37
|
+
next false if completion.length < whitespace.length && whitespace.start_with?(completion.to_s)
|
38
|
+
# this will stop all upstream handlers, generally not a great idea, but
|
39
|
+
# for this middleware it is fine
|
40
|
+
if completion.length >= whitespace.length
|
41
|
+
trimming_whitespace = false
|
42
|
+
if completion.start_with?(whitespace)
|
43
|
+
completion[...whitespace.length] = ''
|
44
|
+
next false if completion.empty?
|
45
|
+
end
|
46
|
+
end
|
47
|
+
completion
|
48
|
+
end
|
49
|
+
|
50
|
+
_next.call(req)
|
51
|
+
|
52
|
+
end
|
53
|
+
|
54
|
+
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module Instruct
|
2
|
+
module Model
|
3
|
+
def self.from_string_or_model(model)
|
4
|
+
if model.class == String
|
5
|
+
self.from_string(model)
|
6
|
+
elsif model.respond_to?(:call)
|
7
|
+
model
|
8
|
+
else
|
9
|
+
raise ArgumentError, "Model must be a model name string or respond to call."
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.from_string(string, **kwargs)
|
14
|
+
if string.include?("claude") || string.include?("anthropic")
|
15
|
+
Instruct::Anthropic.new(string, **kwargs)
|
16
|
+
else
|
17
|
+
Instruct::OpenAI.new(string, **kwargs)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,217 @@
|
|
1
|
+
module Instruct
|
2
|
+
class Prompt < AttributedString
|
3
|
+
include Instruct::Serializable
|
4
|
+
set_instruct_class_id 1
|
5
|
+
|
6
|
+
|
7
|
+
def call(**kwargs, &streaming_block)
|
8
|
+
raise ArgumentError, "cannot add prompt to call on prompt" if kwargs[:prompt]
|
9
|
+
|
10
|
+
prompt = Prompt.new
|
11
|
+
|
12
|
+
gens, substrings = split_substrings_and_gen_attachments
|
13
|
+
results = []
|
14
|
+
|
15
|
+
gens.each_with_index do |gen, i|
|
16
|
+
prompt += substrings[i]
|
17
|
+
gen.prompt = prompt
|
18
|
+
completion = gen.call(**kwargs, &streaming_block)
|
19
|
+
prompt += completion
|
20
|
+
results << completion
|
21
|
+
end
|
22
|
+
|
23
|
+
results.length > 1 ? results : results.first
|
24
|
+
end
|
25
|
+
|
26
|
+
|
27
|
+
def +(other, *args, apply_completions: true)
|
28
|
+
self.dup.concat(other, *args, apply_completions:)
|
29
|
+
end
|
30
|
+
|
31
|
+
# Unlike normal strings << is not the same as concat. << first performs the concat, then runs call on the
|
32
|
+
# new prompt, before adding the result to the prompt.
|
33
|
+
def <<(other, *args, **kwargs)
|
34
|
+
concat(other, *args, perform_call: true, apply_completions: true, **kwargs)
|
35
|
+
end
|
36
|
+
|
37
|
+
def safe_concat(string)
|
38
|
+
string = Prompt.new(string, safe: true) if string.is_a?(String)
|
39
|
+
concat(string)
|
40
|
+
end
|
41
|
+
|
42
|
+
def concat(other, *args, perform_call: false, apply_completions: false)
|
43
|
+
if other.is_a?(Array) && other.all? { |obj| obj.is_a?(Prompt::Completion) } && !other.empty?
|
44
|
+
return concat(*(other + args), perform_call:, apply_completions: )
|
45
|
+
end
|
46
|
+
if other.is_a?(Prompt::Completion) && apply_completions
|
47
|
+
other.apply_to_prompt(self)
|
48
|
+
else
|
49
|
+
super(other)
|
50
|
+
end
|
51
|
+
if args.size.positive?
|
52
|
+
return self.concat(*args, perform_call: perform_call, apply_completions: apply_completions)
|
53
|
+
end
|
54
|
+
if perform_call && result = self.call
|
55
|
+
self.concat(result, perform_call: false, apply_completions: true)
|
56
|
+
else
|
57
|
+
self
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def split_substrings_and_gen_attachments
|
62
|
+
gens = []
|
63
|
+
substrings = []
|
64
|
+
deferred_gens = self.attachments_with_positions.filter { |obj| obj[:attachment].is_a?(Instruct::Gen) }
|
65
|
+
next_substring_index = 0
|
66
|
+
deferred_gens.each do |obj|
|
67
|
+
position = obj[:position]
|
68
|
+
substrings << self[next_substring_index..position]
|
69
|
+
gens << obj[:attachment]
|
70
|
+
next_substring_index = position + 1
|
71
|
+
end
|
72
|
+
substrings << self[next_substring_index..self.length - 1] if next_substring_index <= self.length - 1
|
73
|
+
return [gens, substrings]
|
74
|
+
end
|
75
|
+
|
76
|
+
def hide_range_from_prompt(range, by:)
|
77
|
+
add_attrs(range, "hidden_#{by}": true)
|
78
|
+
end
|
79
|
+
|
80
|
+
def unhide_range_from_prompt(range, by:)
|
81
|
+
remove_attrs(range, "hidden_#{by}".to_sym)
|
82
|
+
end
|
83
|
+
|
84
|
+
def prompt_object
|
85
|
+
prompt_object = self.dup
|
86
|
+
hidden_chars = prompt_object.filter do |attrs|
|
87
|
+
len_hidden_attrs(attrs).positive?
|
88
|
+
end
|
89
|
+
return prompt_object if hidden_chars.empty?
|
90
|
+
ranges = hidden_chars.original_ranges_for(0..hidden_chars.length - 1)
|
91
|
+
ranges.each do |range|
|
92
|
+
prompt_object[range] = ''
|
93
|
+
end
|
94
|
+
prompt_object
|
95
|
+
end
|
96
|
+
|
97
|
+
|
98
|
+
def to_s(gen: :emoji)
|
99
|
+
string = super()
|
100
|
+
deferred_gens = self.attachments_with_positions.filter { |obj| obj[:attachment].is_a?(Instruct::Gen) }
|
101
|
+
deferred_gens.each do |obj|
|
102
|
+
position = obj[:position]
|
103
|
+
case gen
|
104
|
+
when :no_change
|
105
|
+
when :hide
|
106
|
+
string[position] = ''
|
107
|
+
when :expand
|
108
|
+
string[position] = obj[:attachment].to_s
|
109
|
+
when :emoji
|
110
|
+
string[position] = "💬"
|
111
|
+
end
|
112
|
+
end
|
113
|
+
string
|
114
|
+
end
|
115
|
+
|
116
|
+
def captured(key)
|
117
|
+
return nil unless @captured
|
118
|
+
@captured[key]
|
119
|
+
end
|
120
|
+
|
121
|
+
def capture(key, **kwargs)
|
122
|
+
# TODO: attributed string should support -1
|
123
|
+
last_attachment = self.attachment_at(self.length - 1)
|
124
|
+
if last_attachment.is_a?(Instruct::Gen)
|
125
|
+
last_attachment.capture(key, **kwargs)
|
126
|
+
else
|
127
|
+
raise ArgumentError, "Cannot capture on a prompt that does not end with a Gen"
|
128
|
+
end
|
129
|
+
self
|
130
|
+
end
|
131
|
+
|
132
|
+
private
|
133
|
+
|
134
|
+
def add_captured(value, key, list_key)
|
135
|
+
@captured ||= {}
|
136
|
+
if key
|
137
|
+
@captured[key] = value
|
138
|
+
end
|
139
|
+
if list_key
|
140
|
+
@captured[list_key] ||= [@captured[list_key]].compact
|
141
|
+
@captured[list_key] << value
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
def len_hidden_attrs(attrs)
|
146
|
+
attrs.keys.filter { |key| key.to_s.start_with?('hidden_') }.length
|
147
|
+
end
|
148
|
+
|
149
|
+
|
150
|
+
# When a generated result is added to or concatted to a prompt, the
|
151
|
+
# prompt replaces its contents with modified prompt if
|
152
|
+
# the original prompt is the same as the prompt. This enables
|
153
|
+
# middleware to make modifications to the prompt that persist
|
154
|
+
# only when the result is added to the prompt. In all other cases,
|
155
|
+
# the prompt is not modified and the result is the normal result.
|
156
|
+
class Completion < AttributedString
|
157
|
+
include Instruct::Serializable
|
158
|
+
set_instruct_class_id 3
|
159
|
+
attr_reader :prompt
|
160
|
+
|
161
|
+
def apply_to_prompt(prompt_for_update)
|
162
|
+
deferred_gens = prompt_for_update.attachments_with_positions.filter { |obj| obj[:attachment].is_a?(Instruct::Gen) }
|
163
|
+
first_gen = deferred_gens.first
|
164
|
+
if first_gen.nil?
|
165
|
+
return prompt_for_update.replace(prompt_for_update.+(self, apply_completions: false))
|
166
|
+
end
|
167
|
+
# if the prompt_for_update matches the prompt, we replace the prompt_for_update with the updated prompt_for_update
|
168
|
+
# otherwise we just append the updated prompt_for_update to the prompt_for_update
|
169
|
+
# in both cases we remove the gen attachment
|
170
|
+
if (first_gen && prompt_for_update[..first_gen[:position]] == prompt) || (first_gen.nil? && prompt_for_update == prompt)
|
171
|
+
prompt_for_update.send(:add_captured, self, @key, @list_key)
|
172
|
+
prompt_for_update[..first_gen[:position]] = @updated_prompt.+(self, apply_completions: false)
|
173
|
+
else
|
174
|
+
prompt_for_update[first_gen[:position]] = self
|
175
|
+
end
|
176
|
+
prompt_for_update
|
177
|
+
end
|
178
|
+
|
179
|
+
def +(other)
|
180
|
+
return super unless other.is_a?(Prompt)
|
181
|
+
Prompt.new + self + other
|
182
|
+
end
|
183
|
+
|
184
|
+
# Returns the latest chunk in the completion unless a chunk argument is provided
|
185
|
+
def get_chunk(chunk = self.attrs_at(self.length - 1).fetch(:stream_chunk, nil))
|
186
|
+
filtered = self.filter { |attrs| attrs[:stream_chunk] == chunk }
|
187
|
+
ranges = filtered.original_ranges_for(0..(filtered.length - 1))
|
188
|
+
ranges.map { |range| self[range] }.join
|
189
|
+
end
|
190
|
+
|
191
|
+
def _prepare_for_return(prompt:, updated_prompt:, captured_key:, captured_list_key:)
|
192
|
+
@prompt = prompt
|
193
|
+
@updated_prompt = updated_prompt
|
194
|
+
@key = captured_key
|
195
|
+
@list_key = captured_list_key
|
196
|
+
end
|
197
|
+
|
198
|
+
|
199
|
+
private
|
200
|
+
|
201
|
+
def first_gen(prompt)
|
202
|
+
return nil if deferred_gens.empty?
|
203
|
+
deferred_gens
|
204
|
+
end
|
205
|
+
|
206
|
+
|
207
|
+
def captured=(key, list_key)
|
208
|
+
@key, @list_key = key, list_key
|
209
|
+
end
|
210
|
+
|
211
|
+
end
|
212
|
+
|
213
|
+
private
|
214
|
+
|
215
|
+
|
216
|
+
end
|
217
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
require "active_job"
|
2
|
+
|
3
|
+
module Instruct::Rails
|
4
|
+
class ActiveJobObjectSerializer < ActiveJob::Serializers::ObjectSerializer
|
5
|
+
def self.serialize(prompt)
|
6
|
+
super({"value" => Instruct::Serializer.dump(prompt)})
|
7
|
+
end
|
8
|
+
|
9
|
+
def self.deserialize(hash)
|
10
|
+
Instruct::Serializer.load(hash["value"])
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.serialize?(object)
|
14
|
+
# Allow prompts and completions to be serialized
|
15
|
+
return true if object.is_a?(Instruct::Prompt) || object.is_a?(Instruct::Prompt::Completion)
|
16
|
+
|
17
|
+
# Allow models to be serialized
|
18
|
+
return true if Instruct.openai_loaded && object.is_a?(OpenAI)
|
19
|
+
return true if Anthropic.anthropic_loaded && object.is_a?(Anthropic)
|
20
|
+
false
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module Instruct::Rails
|
2
|
+
module SerializableModel
|
3
|
+
def self.included(base)
|
4
|
+
base.extend ClassMethods
|
5
|
+
end
|
6
|
+
module ClassMethods
|
7
|
+
def dump(value)
|
8
|
+
Instruct::Serializer.dump(value)
|
9
|
+
end
|
10
|
+
def load(data)
|
11
|
+
Instruct::Serializer.load(data)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
|
18
|
+
class Instruct::Prompt
|
19
|
+
include Instruct::Rails::SerializableModel
|
20
|
+
end
|
21
|
+
|
22
|
+
class Instruct::Prompt::Completion
|
23
|
+
include Instruct::Rails::SerializableModel
|
24
|
+
end
|
25
|
+
|
26
|
+
if defined? Instuct::OpenAI
|
27
|
+
class Instruct::OpenAI
|
28
|
+
include Instruct::Rails::SerializableModel
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
if defined? Instruct::Anthropic
|
33
|
+
class Instruct::Anthropic
|
34
|
+
include Instruct::Rails::SerializableModel
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
require "rails"
|
2
|
+
|
3
|
+
module Instruct::Rails
|
4
|
+
class Railtie < Rails::Railtie
|
5
|
+
initializer "instruct.active_record_serializers" do
|
6
|
+
require_relative "rails/active_record_coders"
|
7
|
+
end
|
8
|
+
initializer "instruct.active_job.custom_serializers" do
|
9
|
+
require_relative "rails/active_job_object_serializer"
|
10
|
+
config.after_initialize do
|
11
|
+
ActiveJob::Serializers.add_serializers(Instruct::Rails::ActiveJobObjectSerializer)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
module Instruct
|
2
|
+
# Handles executing middleware chain. We use this class to coordinate as it allows us to
|
3
|
+
# make modifications inbetween middleware
|
4
|
+
class MiddlewareChain
|
5
|
+
include Instruct::Serializable
|
6
|
+
|
7
|
+
# @param middlewares [Array<#call(req, _next::), MiddlewareChain>] An array of middleware objects. This can be a mix of classes, instances, procs, or other middleware chains.
|
8
|
+
def initialize(middlewares:)
|
9
|
+
raise ArgumentError, "Middlewares must be an array, not #{middlewares.inspect}" unless middlewares.is_a?(Array)
|
10
|
+
@middlewares = middlewares
|
11
|
+
end
|
12
|
+
|
13
|
+
# Duplicates the middleware chain.
|
14
|
+
def dup
|
15
|
+
self.class.new(middlewares: @middlewares.dup)
|
16
|
+
end
|
17
|
+
|
18
|
+
# Executes the middleware chain with the given request object.
|
19
|
+
def execute(req)
|
20
|
+
raise RuntimeError, "Cannot call execute_* recursively or concurrently" if @stack_ptr
|
21
|
+
@stack_ptr = -1
|
22
|
+
resp = call(req)
|
23
|
+
@stack_ptr = nil
|
24
|
+
resp
|
25
|
+
end
|
26
|
+
|
27
|
+
# @api private
|
28
|
+
# Don't use. This is internally used to call the _next: middleware in the chain.
|
29
|
+
def call(req)
|
30
|
+
raise RuntimeError, "Cannot use .call directly, use .execute" if @stack_ptr.nil?
|
31
|
+
|
32
|
+
@stack_ptr += 1
|
33
|
+
|
34
|
+
if @stack_ptr >= @middlewares.size
|
35
|
+
raise RuntimeError, "Middleware chain exhausted, last object should not have called _next:"
|
36
|
+
end
|
37
|
+
|
38
|
+
middleware = @middlewares[@stack_ptr]
|
39
|
+
|
40
|
+
if middleware.is_a?(Class) && !middleware.respond_to?(:call)
|
41
|
+
middleware = middleware.new
|
42
|
+
end
|
43
|
+
|
44
|
+
middleware.is_a?(Instruct::MiddlewareChain) ? middleware.execute(req) : middleware.call(req, _next: self)
|
45
|
+
end
|
46
|
+
|
47
|
+
end
|
48
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
require 'psych'
|
2
|
+
|
3
|
+
module Instruct
|
4
|
+
|
5
|
+
module Serializable
|
6
|
+
def self.included(base)
|
7
|
+
base.extend(ClassMethods)
|
8
|
+
base.instance_variable_set(:@_instruct_serializer_version, 1) unless base.instance_variable_defined?(:@_instruct_serializer_version)
|
9
|
+
base.instance_variable_set(:@_instruct_serializer_class_id, base.name.hash) unless base.instance_variable_defined?(:@_instruct_serializer_class_id)
|
10
|
+
Serializer::ClassRegistry.register(base.instruct_class_id, base)
|
11
|
+
end
|
12
|
+
|
13
|
+
module ClassMethods
|
14
|
+
def instruct_class_id
|
15
|
+
@_instruct_serializer_class_id
|
16
|
+
end
|
17
|
+
|
18
|
+
def instruct_class_version
|
19
|
+
@_instruct_serializer_version
|
20
|
+
end
|
21
|
+
|
22
|
+
def set_instruct_class_id(id)
|
23
|
+
@_instruct_serializer_class_id = id
|
24
|
+
Serializer::ClassRegistry.register(@_instruct_serializer_class_id, self)
|
25
|
+
end
|
26
|
+
|
27
|
+
def set_instruct_class_id_from_original_name(name)
|
28
|
+
set_instruct_class_id(name.hash)
|
29
|
+
end
|
30
|
+
|
31
|
+
def set_instruct_class_version(v)
|
32
|
+
@_instruct_serializer_version = v
|
33
|
+
end
|
34
|
+
|
35
|
+
def migrate_data!(data, from:, to:)
|
36
|
+
# no-op by default
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def encode_with(coder)
|
41
|
+
coder.tag = "!ruby/instruct:#{self.class.instruct_class_id}@#{self.class.instruct_class_version}"
|
42
|
+
coder["data"] = instance_vars_to_hash
|
43
|
+
# TODO: consider calling the old encode_with method if defined
|
44
|
+
# and then this special case can be moved to prompt
|
45
|
+
coder["str"] = self.to_s(gen: :nochange) if self.is_a? Prompt
|
46
|
+
coder["str"] = self.to_s if coder["str"].nil? && self.is_a?(String)
|
47
|
+
end
|
48
|
+
|
49
|
+
def init_with(coder)
|
50
|
+
version = coder["version"]
|
51
|
+
if self.is_a? String
|
52
|
+
replace(coder["str"])
|
53
|
+
end
|
54
|
+
data = coder["data"] || {}
|
55
|
+
|
56
|
+
|
57
|
+
if version < self.class.instruct_class_version
|
58
|
+
self.class.migrate_data!(data, from: version, to: self.class.instruct_class_version)
|
59
|
+
end
|
60
|
+
hash_to_instance_vars(data)
|
61
|
+
end
|
62
|
+
|
63
|
+
private
|
64
|
+
|
65
|
+
def instance_vars_to_hash
|
66
|
+
Hash[instance_variables.map { |ivar| [ivar.to_s.sub('@', ''), instance_variable_get(ivar)] }]
|
67
|
+
end
|
68
|
+
|
69
|
+
def hash_to_instance_vars(data)
|
70
|
+
data.each { |k, v| instance_variable_set("@#{k}", v) }
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|