foobara-llm-backed-command 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/src/llm_backed_command.rb +2 -169
- data/src/llm_backed_execute_method.rb +176 -0
- metadata +2 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 53a125a1cf6ed542e1f7a45b271caa6e6651458afc54c817a21be114f9a29003
|
4
|
+
data.tar.gz: 45bce6b28ca7a1a68629056e62be036cbe11890a851e5da8e193747d4382b212
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8b2c99f1f022275d2ee480d0492b81f6e18d346d7d809143fb548915dd719e0f7080b8d79714f89c61b0f5f6a928dd482e92f58df3d6e262e368d216db9741bd
|
7
|
+
data.tar.gz: 716b310f5628dd2c3c7fe97463d98248107c3c597572a62b0cbd4c32abf0a99f523d3ce52285388f43c966d0958c1e4c957204f8a57cbe631cb2e22baed85d36
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,8 @@
|
|
1
|
+
## [0.0.5] - 2025-03-06
|
2
|
+
|
3
|
+
- Defer to downstream default model if none specified
|
4
|
+
- Split LlmBackedCommand into a Command and a LlmBackedExecuteMethod mixin
|
5
|
+
|
1
6
|
## [0.0.3] - 2025-03-05
|
2
7
|
|
3
8
|
- Support answers with an explanation followed by the proper answer in fence posts
|
data/src/llm_backed_command.rb
CHANGED
@@ -1,172 +1,5 @@
|
|
1
|
-
# NOTE: You can add the following inputs if you'd like, or, create methods with these names
|
2
|
-
# on the class.
|
3
|
-
#
|
4
|
-
# inputs do
|
5
|
-
# association_depth :symbol, one_of: JsonSchemaGenerator::AssociationDepth, default: AssociationDepth::ATOM
|
6
|
-
# llm_model :symbol, one_of: Foobara::Ai::AnswerBot::Types::ModelEnum
|
7
|
-
# end
|
8
1
|
module Foobara
|
9
|
-
|
10
|
-
include
|
11
|
-
|
12
|
-
on_include do
|
13
|
-
depends_on Ai::AnswerBot::Ask
|
14
|
-
end
|
15
|
-
|
16
|
-
def execute
|
17
|
-
determine_serializer
|
18
|
-
construct_input_json
|
19
|
-
generate_answer
|
20
|
-
parse_answer
|
21
|
-
|
22
|
-
parsed_answer
|
23
|
-
end
|
24
|
-
|
25
|
-
attr_accessor :serializer, :input_json, :answer, :parsed_answer
|
26
|
-
|
27
|
-
def determine_serializer
|
28
|
-
depth = if respond_to?(:association_depth)
|
29
|
-
association_depth
|
30
|
-
else
|
31
|
-
Foobara::JsonSchemaGenerator::AssociationDepth::AGGREGATE
|
32
|
-
end
|
33
|
-
|
34
|
-
serializer = case depth
|
35
|
-
when Foobara::JsonSchemaGenerator::AssociationDepth::ATOM
|
36
|
-
Foobara::CommandConnectors::Serializers::AtomicSerializer
|
37
|
-
when Foobara::JsonSchemaGenerator::AssociationDepth::AGGREGATE
|
38
|
-
Foobara::CommandConnectors::Serializers::AggregateSerializer
|
39
|
-
when Foobara::JsonSchemaGenerator::AssociationDepth::PRIMARY_KEY_ONLY
|
40
|
-
# :nocov:
|
41
|
-
raise "PRIMARY_KEY_ONLY depth not yet implemented"
|
42
|
-
# :nocov:
|
43
|
-
else
|
44
|
-
# :nocov:
|
45
|
-
raise "Unknown depth: #{depth}"
|
46
|
-
# :nocov:
|
47
|
-
end
|
48
|
-
|
49
|
-
# cache this?
|
50
|
-
self.serializer = serializer.new
|
51
|
-
end
|
52
|
-
|
53
|
-
def construct_input_json
|
54
|
-
inputs_without_llm_integration_inputs = inputs.except(:llm_model, :association_depth)
|
55
|
-
input_json = serializer.serialize(inputs_without_llm_integration_inputs)
|
56
|
-
|
57
|
-
self.input_json = JSON.fast_generate(input_json)
|
58
|
-
end
|
59
|
-
|
60
|
-
def generate_answer
|
61
|
-
self.answer = run_subcommand!(
|
62
|
-
Ai::AnswerBot::Ask,
|
63
|
-
model: llm_model,
|
64
|
-
instructions: llm_instructions,
|
65
|
-
question: input_json
|
66
|
-
)
|
67
|
-
end
|
68
|
-
|
69
|
-
def llm_instructions
|
70
|
-
self.class.llm_instructions
|
71
|
-
end
|
72
|
-
|
73
|
-
def parse_answer
|
74
|
-
stripped_answer = answer.gsub(/<THINK>.*?<\/THINK>/mi, "")
|
75
|
-
fencepostless_answer = stripped_answer.gsub(/^\s*```\w*\n(.*)```\s*\z/m, "\\1")
|
76
|
-
# TODO: should we verify against json-schema or no?
|
77
|
-
self.parsed_answer = begin
|
78
|
-
JSON.parse(fencepostless_answer)
|
79
|
-
rescue => e
|
80
|
-
# see if we can extract the last fence-posts content just in case
|
81
|
-
last_fence_post_regex = /```\w*\s*\n((?:(?!```).)+)\n```(?:(?!```).)*\z/m
|
82
|
-
begin
|
83
|
-
match = last_fence_post_regex.match(stripped_answer)
|
84
|
-
if match
|
85
|
-
JSON.parse(match[1])
|
86
|
-
else
|
87
|
-
# :nocov:
|
88
|
-
raise e
|
89
|
-
# :nocov:
|
90
|
-
end
|
91
|
-
rescue
|
92
|
-
# :nocov:
|
93
|
-
raise e
|
94
|
-
# :nocov:
|
95
|
-
end
|
96
|
-
end
|
97
|
-
end
|
98
|
-
|
99
|
-
module ClassMethods
|
100
|
-
def inputs_json_schema
|
101
|
-
@inputs_json_schema ||= JsonSchemaGenerator.to_json_schema(inputs_type_without_llm_integration_inputs)
|
102
|
-
end
|
103
|
-
|
104
|
-
def inputs_type_without_llm_integration_inputs
|
105
|
-
return @inputs_type_without_llm_integration_inputs if @inputs_type_without_llm_integration_inputs
|
106
|
-
|
107
|
-
type_declaration = Util.deep_dup(inputs_type.declaration_data)
|
108
|
-
|
109
|
-
element_type_declarations = type_declaration[:element_type_declarations]
|
110
|
-
|
111
|
-
changed = false
|
112
|
-
|
113
|
-
if element_type_declarations.key?(:llm_model)
|
114
|
-
changed = true
|
115
|
-
element_type_declarations.delete(:llm_model)
|
116
|
-
end
|
117
|
-
|
118
|
-
if element_type_declarations.key?(:association_depth)
|
119
|
-
changed = true
|
120
|
-
element_type_declarations.delete(:association_depth)
|
121
|
-
end
|
122
|
-
|
123
|
-
if type_declaration.key?(:defaults)
|
124
|
-
if type_declaration[:defaults].key?(:llm_model)
|
125
|
-
changed = true
|
126
|
-
type_declaration[:defaults].delete(:llm_model)
|
127
|
-
end
|
128
|
-
|
129
|
-
if type_declaration[:defaults].key?(:association_depth)
|
130
|
-
changed = true
|
131
|
-
type_declaration[:defaults].delete(:association_depth)
|
132
|
-
end
|
133
|
-
if type_declaration[:defaults].empty?
|
134
|
-
type_declaration.delete(:defaults)
|
135
|
-
end
|
136
|
-
end
|
137
|
-
|
138
|
-
@inputs_type_without_llm_integration_inputs = if changed
|
139
|
-
domain.foobara_type_from_declaration(type_declaration)
|
140
|
-
else
|
141
|
-
inputs_type
|
142
|
-
end
|
143
|
-
end
|
144
|
-
|
145
|
-
def result_json_schema
|
146
|
-
@result_json_schema ||= JsonSchemaGenerator.to_json_schema(result_type)
|
147
|
-
end
|
148
|
-
|
149
|
-
def llm_instructions
|
150
|
-
@llm_instructions ||= <<~INSTRUCTIONS
|
151
|
-
You are implementing an API for a command named #{scoped_full_name} which has the following description:
|
152
|
-
|
153
|
-
#{description}#{" "}
|
154
|
-
|
155
|
-
Here is the inputs JSON schema for the data you will receive:
|
156
|
-
|
157
|
-
#{inputs_json_schema}
|
158
|
-
|
159
|
-
Here is the result JSON schema:
|
160
|
-
|
161
|
-
#{result_json_schema}
|
162
|
-
|
163
|
-
You will receive 1 message containing only JSON data according to the inputs JSON schema above
|
164
|
-
and you will generate a JSON response that is a valid response according to the result JSON schema above.
|
165
|
-
|
166
|
-
You will reply with nothing more than the JSON you've generated so that the calling code
|
167
|
-
can successfully parse your answer.
|
168
|
-
INSTRUCTIONS
|
169
|
-
end
|
170
|
-
end
|
2
|
+
class LlmBackedCommand < Foobara::Command
|
3
|
+
include LlmBackedExecuteMethod
|
171
4
|
end
|
172
5
|
end
|
@@ -0,0 +1,176 @@
|
|
1
|
+
# NOTE: You can add the following inputs if you'd like, or, create methods with these names
|
2
|
+
# on the class.
|
3
|
+
#
|
4
|
+
# inputs do
|
5
|
+
# association_depth :symbol, one_of: JsonSchemaGenerator::AssociationDepth, default: AssociationDepth::ATOM
|
6
|
+
# llm_model :symbol, one_of: Foobara::Ai::AnswerBot::Types::ModelEnum
|
7
|
+
# end
|
8
|
+
module Foobara
|
9
|
+
module LlmBackedExecuteMethod
|
10
|
+
include Concern
|
11
|
+
|
12
|
+
on_include do
|
13
|
+
depends_on Ai::AnswerBot::Ask
|
14
|
+
end
|
15
|
+
|
16
|
+
def execute
|
17
|
+
determine_serializer
|
18
|
+
construct_input_json
|
19
|
+
generate_answer
|
20
|
+
parse_answer
|
21
|
+
|
22
|
+
parsed_answer
|
23
|
+
end
|
24
|
+
|
25
|
+
attr_accessor :serializer, :input_json, :answer, :parsed_answer
|
26
|
+
|
27
|
+
def determine_serializer
|
28
|
+
depth = if respond_to?(:association_depth)
|
29
|
+
association_depth
|
30
|
+
else
|
31
|
+
Foobara::JsonSchemaGenerator::AssociationDepth::AGGREGATE
|
32
|
+
end
|
33
|
+
|
34
|
+
serializer = case depth
|
35
|
+
when Foobara::JsonSchemaGenerator::AssociationDepth::ATOM
|
36
|
+
Foobara::CommandConnectors::Serializers::AtomicSerializer
|
37
|
+
when Foobara::JsonSchemaGenerator::AssociationDepth::AGGREGATE
|
38
|
+
Foobara::CommandConnectors::Serializers::AggregateSerializer
|
39
|
+
when Foobara::JsonSchemaGenerator::AssociationDepth::PRIMARY_KEY_ONLY
|
40
|
+
# :nocov:
|
41
|
+
raise "PRIMARY_KEY_ONLY depth not yet implemented"
|
42
|
+
# :nocov:
|
43
|
+
else
|
44
|
+
# :nocov:
|
45
|
+
raise "Unknown depth: #{depth}"
|
46
|
+
# :nocov:
|
47
|
+
end
|
48
|
+
|
49
|
+
# cache this?
|
50
|
+
self.serializer = serializer.new
|
51
|
+
end
|
52
|
+
|
53
|
+
def construct_input_json
|
54
|
+
inputs_without_llm_integration_inputs = inputs.except(:llm_model, :association_depth)
|
55
|
+
input_json = serializer.serialize(inputs_without_llm_integration_inputs)
|
56
|
+
|
57
|
+
self.input_json = JSON.fast_generate(input_json)
|
58
|
+
end
|
59
|
+
|
60
|
+
def generate_answer
|
61
|
+
ask_inputs = {
|
62
|
+
instructions: llm_instructions,
|
63
|
+
question: input_json
|
64
|
+
}
|
65
|
+
|
66
|
+
if respond_to?(:llm_model)
|
67
|
+
ask_inputs[:model] = llm_model
|
68
|
+
end
|
69
|
+
|
70
|
+
self.answer = run_subcommand!(Ai::AnswerBot::Ask, ask_inputs)
|
71
|
+
end
|
72
|
+
|
73
|
+
def llm_instructions
|
74
|
+
self.class.llm_instructions
|
75
|
+
end
|
76
|
+
|
77
|
+
def parse_answer
|
78
|
+
stripped_answer = answer.gsub(/<THINK>.*?<\/THINK>/mi, "")
|
79
|
+
fencepostless_answer = stripped_answer.gsub(/^\s*```\w*\n(.*)```\s*\z/m, "\\1")
|
80
|
+
# TODO: should we verify against json-schema or no?
|
81
|
+
self.parsed_answer = begin
|
82
|
+
JSON.parse(fencepostless_answer)
|
83
|
+
rescue => e
|
84
|
+
# see if we can extract the last fence-posts content just in case
|
85
|
+
last_fence_post_regex = /```\w*\s*\n((?:(?!```).)+)\n```(?:(?!```).)*\z/m
|
86
|
+
begin
|
87
|
+
match = last_fence_post_regex.match(stripped_answer)
|
88
|
+
if match
|
89
|
+
JSON.parse(match[1])
|
90
|
+
else
|
91
|
+
# :nocov:
|
92
|
+
raise e
|
93
|
+
# :nocov:
|
94
|
+
end
|
95
|
+
rescue
|
96
|
+
# :nocov:
|
97
|
+
raise e
|
98
|
+
# :nocov:
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
module ClassMethods
|
104
|
+
def inputs_json_schema
|
105
|
+
@inputs_json_schema ||= JsonSchemaGenerator.to_json_schema(inputs_type_without_llm_integration_inputs)
|
106
|
+
end
|
107
|
+
|
108
|
+
def inputs_type_without_llm_integration_inputs
|
109
|
+
return @inputs_type_without_llm_integration_inputs if @inputs_type_without_llm_integration_inputs
|
110
|
+
|
111
|
+
type_declaration = Util.deep_dup(inputs_type.declaration_data)
|
112
|
+
|
113
|
+
element_type_declarations = type_declaration[:element_type_declarations]
|
114
|
+
|
115
|
+
changed = false
|
116
|
+
|
117
|
+
if element_type_declarations.key?(:llm_model)
|
118
|
+
changed = true
|
119
|
+
element_type_declarations.delete(:llm_model)
|
120
|
+
end
|
121
|
+
|
122
|
+
if element_type_declarations.key?(:association_depth)
|
123
|
+
changed = true
|
124
|
+
element_type_declarations.delete(:association_depth)
|
125
|
+
end
|
126
|
+
|
127
|
+
if type_declaration.key?(:defaults)
|
128
|
+
if type_declaration[:defaults].key?(:llm_model)
|
129
|
+
changed = true
|
130
|
+
type_declaration[:defaults].delete(:llm_model)
|
131
|
+
end
|
132
|
+
|
133
|
+
if type_declaration[:defaults].key?(:association_depth)
|
134
|
+
changed = true
|
135
|
+
type_declaration[:defaults].delete(:association_depth)
|
136
|
+
end
|
137
|
+
if type_declaration[:defaults].empty?
|
138
|
+
type_declaration.delete(:defaults)
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
@inputs_type_without_llm_integration_inputs = if changed
|
143
|
+
domain.foobara_type_from_declaration(type_declaration)
|
144
|
+
else
|
145
|
+
inputs_type
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def result_json_schema
|
150
|
+
@result_json_schema ||= JsonSchemaGenerator.to_json_schema(result_type)
|
151
|
+
end
|
152
|
+
|
153
|
+
def llm_instructions
|
154
|
+
@llm_instructions ||= <<~INSTRUCTIONS
|
155
|
+
You are implementing an API for a command named #{scoped_full_name} which has the following description:
|
156
|
+
|
157
|
+
#{description}#{" "}
|
158
|
+
|
159
|
+
Here is the inputs JSON schema for the data you will receive:
|
160
|
+
|
161
|
+
#{inputs_json_schema}
|
162
|
+
|
163
|
+
Here is the result JSON schema:
|
164
|
+
|
165
|
+
#{result_json_schema}
|
166
|
+
|
167
|
+
You will receive 1 message containing only JSON data according to the inputs JSON schema above
|
168
|
+
and you will generate a JSON response that is a valid response according to the result JSON schema above.
|
169
|
+
|
170
|
+
You will reply with nothing more than the JSON you've generated so that the calling code
|
171
|
+
can successfully parse your answer.
|
172
|
+
INSTRUCTIONS
|
173
|
+
end
|
174
|
+
end
|
175
|
+
end
|
176
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: foobara-llm-backed-command
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Miles Georgi
|
@@ -49,6 +49,7 @@ files:
|
|
49
49
|
- README.md
|
50
50
|
- lib/foobara/llm_backed_command.rb
|
51
51
|
- src/llm_backed_command.rb
|
52
|
+
- src/llm_backed_execute_method.rb
|
52
53
|
homepage: https://github.com/foobara/llm-backed-command
|
53
54
|
licenses:
|
54
55
|
- MPL-2.0
|