foobara-llm-backed-command 0.0.2 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9d775d26884980d2bf6ab266dbc16fa0b18fedaa4690f3145e1793fec55984f9
4
- data.tar.gz: 12de9bf957015e76e4736a3145383064a7f324ce0beea88748067c47cc64205e
3
+ metadata.gz: 932878d2d01d4d804a4aca75cf53ccbbff901cf2e54067d52a781298d3ebea2a
4
+ data.tar.gz: 96c9c8a664b7fa493d740a222b5429d2259af317131c2ef4ebe23be6a730a68f
5
5
  SHA512:
6
- metadata.gz: 32ee0f1ae9c357072be04e252f275c7daf0597964d438dae2f74f9fa345c5ad1e181a14146a277d9dc1956ae61046bbe8b61608add7e8c4910364bc8f1db7251
7
- data.tar.gz: 6404bf3f85d207fdd9d637fe6c072b04120c749416a920a472db086882274e5979ac7e0fff3f4bc18ae72fc49ab3dc771a94465a715d406c405fc5459e73f358
6
+ metadata.gz: 947393bec6c00defe7198ea96f69c34a90f27d2beb9c35531e39b09c436c172b243a93e1be49b38cb64ecaf73f8a5ee572868a3cf7de89f343b39041ab47d650
7
+ data.tar.gz: f51fb84a77878d535c28699879c8ee7bc4dd643eaffbb3269aa679f994170ec0fc56c99bcb55c4c7f8570f95152bc8a12440860fcc552216d10d435ce8cad861
data/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [0.0.4] - 2025-03-06
2
+
3
+ - Defer to downstream default model if none specifiedz
4
+
5
+ ## [0.0.3] - 2025-03-05
6
+
7
+ - Support answers with an explanation followed by the proper answer in fence posts
8
+
9
+ ## [0.0.2] - 2025-03-05
10
+
11
+ - Strip <think> blocks out of answers
12
+
1
13
  ## [0.0.1] - 2025-03-05
2
14
 
3
15
  - Release as a gem
@@ -58,12 +58,16 @@ module Foobara
58
58
  end
59
59
 
60
60
  def generate_answer
61
- self.answer = run_subcommand!(
62
- Ai::AnswerBot::Ask,
63
- model: llm_model,
61
+ ask_inputs = {
64
62
  instructions: llm_instructions,
65
63
  question: input_json
66
- )
64
+ }
65
+
66
+ if respond_to?(:llm_model)
67
+ ask_inputs[:model] = llm_model
68
+ end
69
+
70
+ self.answer = run_subcommand!(Ai::AnswerBot::Ask, ask_inputs)
67
71
  end
68
72
 
69
73
  def llm_instructions
@@ -72,9 +76,28 @@ module Foobara
72
76
 
73
77
  def parse_answer
74
78
  stripped_answer = answer.gsub(/<THINK>.*?<\/THINK>/mi, "")
75
- stripped_answer = stripped_answer.gsub(/^\s*```\w*\n(.*)```\s*\z/m, "\\1")
79
+ fencepostless_answer = stripped_answer.gsub(/^\s*```\w*\n(.*)```\s*\z/m, "\\1")
76
80
  # TODO: should we verify against json-schema or no?
77
- self.parsed_answer = JSON.parse(stripped_answer)
81
+ self.parsed_answer = begin
82
+ JSON.parse(fencepostless_answer)
83
+ rescue => e
84
+ # see if we can extract the last fence-posts content just in case
85
+ last_fence_post_regex = /```\w*\s*\n((?:(?!```).)+)\n```(?:(?!```).)*\z/m
86
+ begin
87
+ match = last_fence_post_regex.match(stripped_answer)
88
+ if match
89
+ JSON.parse(match[1])
90
+ else
91
+ # :nocov:
92
+ raise e
93
+ # :nocov:
94
+ end
95
+ rescue
96
+ # :nocov:
97
+ raise e
98
+ # :nocov:
99
+ end
100
+ end
78
101
  end
79
102
 
80
103
  module ClassMethods
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: foobara-llm-backed-command
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.2
4
+ version: 0.0.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Miles Georgi