llm-shell 0.9.2 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (258) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +61 -66
  3. data/lib/llm/shell/command.rb +40 -40
  4. data/lib/llm/shell/commands/clear_screen.rb +4 -18
  5. data/lib/llm/shell/commands/debug_mode.rb +12 -0
  6. data/lib/llm/shell/commands/dir_import.rb +4 -20
  7. data/lib/llm/shell/commands/disable_tool.rb +33 -0
  8. data/lib/llm/shell/commands/enable_tool.rb +33 -0
  9. data/lib/llm/shell/commands/file_import.rb +4 -20
  10. data/lib/llm/shell/commands/help.rb +23 -36
  11. data/lib/llm/shell/commands/show_chat.rb +4 -19
  12. data/lib/llm/shell/commands/show_version.rb +4 -20
  13. data/lib/llm/shell/commands/system_prompt.rb +4 -18
  14. data/lib/llm/shell/completion.rb +5 -5
  15. data/lib/llm/shell/config.rb +4 -5
  16. data/lib/llm/shell/formatter.rb +1 -2
  17. data/lib/llm/shell/internal/coderay/lib/coderay/duo.rb +81 -0
  18. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/_map.rb +17 -0
  19. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/comment_filter.rb +25 -0
  20. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/count.rb +39 -0
  21. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug.rb +49 -0
  22. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug_lint.rb +63 -0
  23. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/div.rb +23 -0
  24. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/encoder.rb +190 -0
  25. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/filter.rb +58 -0
  26. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/css.rb +65 -0
  27. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/numbering.rb +108 -0
  28. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/output.rb +164 -0
  29. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html.rb +333 -0
  30. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/json.rb +83 -0
  31. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lines_of_code.rb +45 -0
  32. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lint.rb +59 -0
  33. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/null.rb +18 -0
  34. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/page.rb +24 -0
  35. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/span.rb +23 -0
  36. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/statistic.rb +95 -0
  37. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/terminal.rb +195 -0
  38. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/text.rb +46 -0
  39. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/token_kind_filter.rb +111 -0
  40. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/xml.rb +72 -0
  41. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/yaml.rb +50 -0
  42. data/lib/llm/shell/internal/coderay/lib/coderay/encoders.rb +18 -0
  43. data/lib/llm/shell/internal/coderay/lib/coderay/for_redcloth.rb +95 -0
  44. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/file_type.rb +151 -0
  45. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin.rb +55 -0
  46. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin_host.rb +221 -0
  47. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/word_list.rb +72 -0
  48. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/_map.rb +24 -0
  49. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/c.rb +189 -0
  50. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/clojure.rb +217 -0
  51. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/cpp.rb +217 -0
  52. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/css.rb +196 -0
  53. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/debug.rb +75 -0
  54. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/delphi.rb +144 -0
  55. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/diff.rb +221 -0
  56. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/erb.rb +81 -0
  57. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/go.rb +208 -0
  58. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/groovy.rb +268 -0
  59. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/haml.rb +168 -0
  60. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/html.rb +275 -0
  61. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java/builtin_types.rb +421 -0
  62. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java.rb +174 -0
  63. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java_script.rb +236 -0
  64. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/json.rb +98 -0
  65. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/lua.rb +280 -0
  66. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/php.rb +527 -0
  67. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/python.rb +287 -0
  68. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/raydebug.rb +75 -0
  69. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/patterns.rb +178 -0
  70. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/string_state.rb +79 -0
  71. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby.rb +477 -0
  72. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sass.rb +232 -0
  73. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/scanner.rb +337 -0
  74. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sql.rb +169 -0
  75. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/taskpaper.rb +36 -0
  76. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/text.rb +26 -0
  77. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/xml.rb +17 -0
  78. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/yaml.rb +140 -0
  79. data/lib/llm/shell/internal/coderay/lib/coderay/scanners.rb +27 -0
  80. data/lib/llm/shell/internal/coderay/lib/coderay/styles/_map.rb +7 -0
  81. data/lib/llm/shell/internal/coderay/lib/coderay/styles/alpha.rb +153 -0
  82. data/lib/llm/shell/internal/coderay/lib/coderay/styles/style.rb +18 -0
  83. data/lib/llm/shell/internal/coderay/lib/coderay/styles.rb +15 -0
  84. data/lib/llm/shell/internal/coderay/lib/coderay/token_kinds.rb +85 -0
  85. data/lib/llm/shell/internal/coderay/lib/coderay/tokens.rb +164 -0
  86. data/lib/llm/shell/internal/coderay/lib/coderay/tokens_proxy.rb +55 -0
  87. data/lib/llm/shell/internal/coderay/lib/coderay/version.rb +3 -0
  88. data/lib/llm/shell/internal/coderay/lib/coderay.rb +284 -0
  89. data/lib/llm/shell/internal/io-line/lib/io/line/multiple.rb +19 -0
  90. data/lib/{io → llm/shell/internal/io-line/lib/io}/line.rb +2 -0
  91. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/builder.rb +31 -0
  92. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/conversable.rb +37 -0
  93. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/completion.rb +49 -0
  94. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/respond.rb +49 -0
  95. data/lib/llm/shell/internal/llm.rb/lib/llm/bot.rb +150 -0
  96. data/lib/llm/shell/internal/llm.rb/lib/llm/buffer.rb +162 -0
  97. data/lib/llm/shell/internal/llm.rb/lib/llm/client.rb +36 -0
  98. data/lib/llm/shell/internal/llm.rb/lib/llm/error.rb +49 -0
  99. data/lib/llm/shell/internal/llm.rb/lib/llm/eventhandler.rb +44 -0
  100. data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/event.rb +69 -0
  101. data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/parser.rb +88 -0
  102. data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream.rb +8 -0
  103. data/lib/llm/shell/internal/llm.rb/lib/llm/file.rb +91 -0
  104. data/lib/llm/shell/internal/llm.rb/lib/llm/function.rb +177 -0
  105. data/lib/llm/shell/internal/llm.rb/lib/llm/message.rb +178 -0
  106. data/lib/llm/shell/internal/llm.rb/lib/llm/mime.rb +140 -0
  107. data/lib/llm/shell/internal/llm.rb/lib/llm/multipart.rb +101 -0
  108. data/lib/llm/shell/internal/llm.rb/lib/llm/object/builder.rb +38 -0
  109. data/lib/llm/shell/internal/llm.rb/lib/llm/object/kernel.rb +53 -0
  110. data/lib/llm/shell/internal/llm.rb/lib/llm/object.rb +89 -0
  111. data/lib/llm/shell/internal/llm.rb/lib/llm/provider.rb +352 -0
  112. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/error_handler.rb +36 -0
  113. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/files.rb +155 -0
  114. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format/completion_format.rb +88 -0
  115. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format.rb +29 -0
  116. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/models.rb +54 -0
  117. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/completion.rb +39 -0
  118. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
  119. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/file.rb +23 -0
  120. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/web_search.rb +21 -0
  121. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/stream_parser.rb +66 -0
  122. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic.rb +138 -0
  123. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
  124. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format.rb +27 -0
  125. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek.rb +75 -0
  126. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/audio.rb +73 -0
  127. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/error_handler.rb +47 -0
  128. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/files.rb +146 -0
  129. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format/completion_format.rb +69 -0
  130. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format.rb +39 -0
  131. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/images.rb +133 -0
  132. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/models.rb +60 -0
  133. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/completion.rb +35 -0
  134. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/embedding.rb +8 -0
  135. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/file.rb +11 -0
  136. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/files.rb +15 -0
  137. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/image.rb +31 -0
  138. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/models.rb +15 -0
  139. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/web_search.rb +22 -0
  140. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/stream_parser.rb +86 -0
  141. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini.rb +173 -0
  142. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/llamacpp.rb +74 -0
  143. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/error_handler.rb +36 -0
  144. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format/completion_format.rb +77 -0
  145. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format.rb +29 -0
  146. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/models.rb +56 -0
  147. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/completion.rb +28 -0
  148. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/embedding.rb +9 -0
  149. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/stream_parser.rb +44 -0
  150. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama.rb +116 -0
  151. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/audio.rb +91 -0
  152. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/error_handler.rb +46 -0
  153. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/files.rb +134 -0
  154. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/completion_format.rb +90 -0
  155. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/moderation_format.rb +35 -0
  156. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/respond_format.rb +72 -0
  157. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format.rb +54 -0
  158. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/images.rb +109 -0
  159. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/models.rb +55 -0
  160. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/moderations.rb +65 -0
  161. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/audio.rb +7 -0
  162. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/completion.rb +40 -0
  163. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/embedding.rb +9 -0
  164. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/enumerable.rb +23 -0
  165. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/file.rb +7 -0
  166. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/image.rb +16 -0
  167. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/moderations.rb +34 -0
  168. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb +48 -0
  169. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/web_search.rb +21 -0
  170. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
  171. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses.rb +99 -0
  172. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/stream_parser.rb +86 -0
  173. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/vector_stores.rb +228 -0
  174. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai.rb +206 -0
  175. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai/images.rb +58 -0
  176. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai.rb +72 -0
  177. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/zai.rb +74 -0
  178. data/lib/llm/shell/internal/llm.rb/lib/llm/response.rb +67 -0
  179. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/array.rb +26 -0
  180. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/boolean.rb +13 -0
  181. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/integer.rb +43 -0
  182. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/leaf.rb +78 -0
  183. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/null.rb +13 -0
  184. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/number.rb +43 -0
  185. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/object.rb +41 -0
  186. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/string.rb +34 -0
  187. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/version.rb +8 -0
  188. data/lib/llm/shell/internal/llm.rb/lib/llm/schema.rb +81 -0
  189. data/lib/llm/shell/internal/llm.rb/lib/llm/server_tool.rb +32 -0
  190. data/lib/llm/shell/internal/llm.rb/lib/llm/tool/param.rb +75 -0
  191. data/lib/llm/shell/internal/llm.rb/lib/llm/tool.rb +78 -0
  192. data/lib/llm/shell/internal/llm.rb/lib/llm/utils.rb +19 -0
  193. data/lib/llm/shell/internal/llm.rb/lib/llm/version.rb +5 -0
  194. data/lib/llm/shell/internal/llm.rb/lib/llm.rb +121 -0
  195. data/lib/llm/shell/internal/optparse/lib/optionparser.rb +2 -0
  196. data/lib/llm/shell/internal/optparse/lib/optparse/ac.rb +70 -0
  197. data/lib/llm/shell/internal/optparse/lib/optparse/date.rb +18 -0
  198. data/lib/llm/shell/internal/optparse/lib/optparse/kwargs.rb +27 -0
  199. data/lib/llm/shell/internal/optparse/lib/optparse/shellwords.rb +7 -0
  200. data/lib/llm/shell/internal/optparse/lib/optparse/time.rb +11 -0
  201. data/lib/llm/shell/internal/optparse/lib/optparse/uri.rb +7 -0
  202. data/lib/llm/shell/internal/optparse/lib/optparse/version.rb +80 -0
  203. data/lib/llm/shell/internal/optparse/lib/optparse.rb +2469 -0
  204. data/lib/llm/shell/internal/paint/lib/paint/constants.rb +104 -0
  205. data/lib/llm/shell/internal/paint/lib/paint/pa.rb +13 -0
  206. data/lib/llm/shell/internal/paint/lib/paint/rgb_colors.rb +14 -0
  207. data/lib/llm/shell/internal/paint/lib/paint/shortcuts.rb +100 -0
  208. data/lib/llm/shell/internal/paint/lib/paint/shortcuts_version.rb +5 -0
  209. data/lib/llm/shell/internal/paint/lib/paint/util.rb +16 -0
  210. data/lib/llm/shell/internal/paint/lib/paint/version.rb +5 -0
  211. data/lib/llm/shell/internal/paint/lib/paint.rb +261 -0
  212. data/lib/llm/shell/internal/reline/lib/reline/config.rb +378 -0
  213. data/lib/llm/shell/internal/reline/lib/reline/face.rb +199 -0
  214. data/lib/llm/shell/internal/reline/lib/reline/history.rb +76 -0
  215. data/lib/llm/shell/internal/reline/lib/reline/io/ansi.rb +322 -0
  216. data/lib/llm/shell/internal/reline/lib/reline/io/dumb.rb +120 -0
  217. data/lib/llm/shell/internal/reline/lib/reline/io/windows.rb +530 -0
  218. data/lib/llm/shell/internal/reline/lib/reline/io.rb +55 -0
  219. data/lib/llm/shell/internal/reline/lib/reline/key_actor/base.rb +37 -0
  220. data/lib/llm/shell/internal/reline/lib/reline/key_actor/composite.rb +17 -0
  221. data/lib/llm/shell/internal/reline/lib/reline/key_actor/emacs.rb +517 -0
  222. data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_command.rb +518 -0
  223. data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_insert.rb +517 -0
  224. data/lib/llm/shell/internal/reline/lib/reline/key_actor.rb +8 -0
  225. data/lib/llm/shell/internal/reline/lib/reline/key_stroke.rb +119 -0
  226. data/lib/llm/shell/internal/reline/lib/reline/kill_ring.rb +125 -0
  227. data/lib/llm/shell/internal/reline/lib/reline/line_editor.rb +2356 -0
  228. data/lib/llm/shell/internal/reline/lib/reline/unicode/east_asian_width.rb +1292 -0
  229. data/lib/llm/shell/internal/reline/lib/reline/unicode.rb +421 -0
  230. data/lib/llm/shell/internal/reline/lib/reline/version.rb +3 -0
  231. data/lib/llm/shell/internal/reline/lib/reline.rb +527 -0
  232. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/generated_parser.rb +712 -0
  233. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/handler.rb +268 -0
  234. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date.rb +35 -0
  235. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date_time.rb +42 -0
  236. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_time.rb +40 -0
  237. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/parser.rb +21 -0
  238. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/scanner.rb +92 -0
  239. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/string_utils.rb +40 -0
  240. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/version.rb +5 -0
  241. data/lib/llm/shell/internal/tomlrb/lib/tomlrb.rb +49 -0
  242. data/lib/llm/shell/options.rb +1 -1
  243. data/lib/llm/shell/renderer.rb +2 -3
  244. data/lib/llm/shell/repl.rb +21 -16
  245. data/lib/llm/shell/tool.rb +42 -0
  246. data/lib/llm/shell/tools/read_file.rb +15 -0
  247. data/lib/llm/shell/tools/system.rb +17 -0
  248. data/lib/llm/shell/tools/write_file.rb +16 -0
  249. data/lib/llm/shell/version.rb +1 -1
  250. data/lib/llm/shell.rb +83 -39
  251. data/libexec/llm-shell/shell +4 -6
  252. data/llm-shell.gemspec +0 -4
  253. metadata +233 -63
  254. data/lib/llm/function.rb +0 -17
  255. data/lib/llm/shell/command/extension.rb +0 -42
  256. data/lib/llm/shell/commands/utils.rb +0 -21
  257. data/lib/llm/shell/functions/read_file.rb +0 -22
  258. data/lib/llm/shell/functions/write_file.rb +0 -22
@@ -0,0 +1,73 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # The {LLM::Gemini::Audio LLM::Gemini::Audio} class provides an audio
6
+ # object for interacting with [Gemini's audio API](https://ai.google.dev/gemini-api/docs/audio).
7
+ # @example
8
+ # #!/usr/bin/env ruby
9
+ # require "llm"
10
+ #
11
+ # llm = LLM.gemini(key: ENV["KEY"])
12
+ # res = llm.audio.create_transcription(input: "/audio/rocket.mp3")
13
+ # res.text # => "A dog on a rocket to the moon"
14
+ class Audio
15
+ ##
16
+ # Returns a new Audio object
17
+ # @param provider [LLM::Provider]
18
+ # @return [LLM::Gemini::Responses]
19
+ def initialize(provider)
20
+ @provider = provider
21
+ end
22
+
23
+ ##
24
+ # @raise [NotImplementedError]
25
+ # This method is not implemented by Gemini
26
+ def create_speech
27
+ raise NotImplementedError
28
+ end
29
+
30
+ ##
31
+ # Create an audio transcription
32
+ # @example
33
+ # llm = LLM.gemini(key: ENV["KEY"])
34
+ # res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
35
+ # res.text # => "A dog on a rocket to the moon"
36
+ # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
37
+ # @param [String, LLM::File, LLM::Response] file The input audio
38
+ # @param [String] model The model to use
39
+ # @param [Hash] params Other parameters (see Gemini docs)
40
+ # @raise (see LLM::Provider#request)
41
+ # @return [LLM::Response]
42
+ def create_transcription(file:, model: "gemini-1.5-flash", **params)
43
+ res = @provider.complete [
44
+ "Your task is to transcribe the contents of an audio file",
45
+ "Your response should include the transcription, and nothing else",
46
+ LLM.File(file)
47
+ ], params.merge(role: :user, model:)
48
+ res.tap { _1.define_singleton_method(:text) { choices[0].content } }
49
+ end
50
+
51
+ ##
52
+ # Create an audio translation (in English)
53
+ # @example
54
+ # # Arabic => English
55
+ # llm = LLM.gemini(key: ENV["KEY"])
56
+ # res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
57
+ # res.text # => "In the name of Allah, the Beneficent, the Merciful."
58
+ # @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
59
+ # @param [String, LLM::File, LLM::Response] file The input audio
60
+ # @param [String] model The model to use
61
+ # @param [Hash] params Other parameters (see Gemini docs)
62
+ # @raise (see LLM::Provider#request)
63
+ # @return [LLM::Response]
64
+ def create_translation(file:, model: "gemini-1.5-flash", **params)
65
+ res = @provider.complete [
66
+ "Your task is to translate the contents of an audio file into English",
67
+ "Your response should include the translation, and nothing else",
68
+ LLM.File(file)
69
+ ], params.merge(role: :user, model:)
70
+ res.tap { _1.define_singleton_method(:text) { choices[0].content } }
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # @private
6
+ class ErrorHandler
7
+ ##
8
+ # @return [Net::HTTPResponse]
9
+ # Non-2XX response from the server
10
+ attr_reader :res
11
+
12
+ ##
13
+ # @param [Net::HTTPResponse] res
14
+ # The response from the server
15
+ # @return [LLM::Gemini::ErrorHandler]
16
+ def initialize(res)
17
+ @res = res
18
+ end
19
+
20
+ ##
21
+ # @raise [LLM::Error]
22
+ # Raises a subclass of {LLM::Error LLM::Error}
23
+ def raise_error!
24
+ case res
25
+ when Net::HTTPServerError
26
+ raise LLM::ServerError.new { _1.response = res }, "Server error"
27
+ when Net::HTTPBadRequest
28
+ reason = body.dig("error", "details", 0, "reason")
29
+ if reason == "API_KEY_INVALID"
30
+ raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
31
+ else
32
+ raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
33
+ end
34
+ when Net::HTTPTooManyRequests
35
+ raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
36
+ else
37
+ raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
38
+ end
39
+ end
40
+
41
+ private
42
+
43
+ def body
44
+ @body ||= JSON.parse(res.body)
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,146 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # The {LLM::Gemini::Files LLM::Gemini::Files} class provides a files
6
+ # object for interacting with [Gemini's Files API](https://ai.google.dev/gemini-api/docs/files).
7
+ # The files API allows a client to reference media files in prompts
8
+ # where they can be referenced by their URL.
9
+ #
10
+ # The files API is intended to preserve bandwidth and latency,
11
+ # especially for large files but it can be helpful for smaller files
12
+ # as well because it does not require the client to include a file
13
+ # in the prompt over and over again (which could be the case in a
14
+ # multi-turn conversation).
15
+ #
16
+ # @example example #1
17
+ # #!/usr/bin/env ruby
18
+ # require "llm"
19
+ #
20
+ # llm = LLM.gemini(key: ENV["KEY"])
21
+ # bot = LLM::Bot.new(llm)
22
+ # file = llm.files.create(file: "/audio/haiku.mp3")
23
+ # bot.chat ["Tell me about this file", file]
24
+ # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
25
+ class Files
26
+ require_relative "response/file"
27
+ require_relative "response/files"
28
+
29
+ ##
30
+ # Returns a new Files object
31
+ # @param provider [LLM::Provider]
32
+ # @return [LLM::Gemini::Files]
33
+ def initialize(provider)
34
+ @provider = provider
35
+ end
36
+
37
+ ##
38
+ # List all files
39
+ # @example
40
+ # llm = LLM.gemini(key: ENV["KEY"])
41
+ # res = llm.files.all
42
+ # res.each do |file|
43
+ # print "name: ", file.name, "\n"
44
+ # end
45
+ # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
46
+ # @param [Hash] params Other parameters (see Gemini docs)
47
+ # @raise (see LLM::Provider#request)
48
+ # @return [LLM::Response]
49
+ def all(**params)
50
+ query = URI.encode_www_form(params.merge!(key: key))
51
+ req = Net::HTTP::Get.new("/v1beta/files?#{query}", headers)
52
+ res = execute(request: req)
53
+ LLM::Response.new(res).extend(LLM::Gemini::Response::Files)
54
+ end
55
+
56
+ ##
57
+ # Create a file
58
+ # @example
59
+ # llm = LLM.gemini(key: ENV["KEY"])
60
+ # res = llm.files.create(file: "/audio/haiku.mp3")
61
+ # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
62
+ # @param [String, LLM::File] file The file
63
+ # @param [Hash] params Other parameters (see Gemini docs)
64
+ # @raise (see LLM::Provider#request)
65
+ # @return [LLM::Response]
66
+ def create(file:, **params)
67
+ file = LLM.File(file)
68
+ req = Net::HTTP::Post.new(request_upload_url(file:), {})
69
+ req["content-length"] = file.bytesize
70
+ req["X-Goog-Upload-Offset"] = 0
71
+ req["X-Goog-Upload-Command"] = "upload, finalize"
72
+ file.with_io do |io|
73
+ set_body_stream(req, io)
74
+ res = execute(request: req)
75
+ LLM::Response.new(res).extend(LLM::Gemini::Response::File)
76
+ end
77
+ end
78
+
79
+ ##
80
+ # Get a file
81
+ # @example
82
+ # llm = LLM.gemini(key: ENV["KEY"])
83
+ # res = llm.files.get(file: "files/1234567890")
84
+ # print "name: ", res.name, "\n"
85
+ # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
86
+ # @param [#name, String] file The file to get
87
+ # @param [Hash] params Other parameters (see Gemini docs)
88
+ # @raise (see LLM::Provider#request)
89
+ # @return [LLM::Response]
90
+ def get(file:, **params)
91
+ file_id = file.respond_to?(:name) ? file.name : file.to_s
92
+ query = URI.encode_www_form(params.merge!(key: key))
93
+ req = Net::HTTP::Get.new("/v1beta/#{file_id}?#{query}", headers)
94
+ res = execute(request: req)
95
+ LLM::Response.new(res).extend(LLM::Gemini::Response::File)
96
+ end
97
+
98
+ ##
99
+ # Delete a file
100
+ # @example
101
+ # llm = LLM.gemini(key: ENV["KEY"])
102
+ # res = llm.files.delete(file: "files/1234567890")
103
+ # @see https://ai.google.dev/gemini-api/docs/files Gemini docs
104
+ # @param [#name, String] file The file to delete
105
+ # @param [Hash] params Other parameters (see Gemini docs)
106
+ # @raise (see LLM::Provider#request)
107
+ # @return [LLM::Response]
108
+ def delete(file:, **params)
109
+ file_id = file.respond_to?(:name) ? file.name : file.to_s
110
+ query = URI.encode_www_form(params.merge!(key: key))
111
+ req = Net::HTTP::Delete.new("/v1beta/#{file_id}?#{query}", headers)
112
+ res = execute(request: req)
113
+ LLM::Response.new(res)
114
+ end
115
+
116
+ ##
117
+ # @raise [NotImplementedError]
118
+ # This method is not implemented by Gemini
119
+ def download
120
+ raise NotImplementedError
121
+ end
122
+
123
+ private
124
+
125
+ include LLM::Utils
126
+
127
+ def request_upload_url(file:)
128
+ req = Net::HTTP::Post.new("/upload/v1beta/files?key=#{key}", headers)
129
+ req["X-Goog-Upload-Protocol"] = "resumable"
130
+ req["X-Goog-Upload-Command"] = "start"
131
+ req["X-Goog-Upload-Header-Content-Length"] = file.bytesize
132
+ req["X-Goog-Upload-Header-Content-Type"] = file.mime_type
133
+ req.body = JSON.dump(file: {display_name: File.basename(file.path)})
134
+ res = execute(request: req)
135
+ res["x-goog-upload-url"]
136
+ end
137
+
138
+ def key
139
+ @provider.instance_variable_get(:@key)
140
+ end
141
+
142
+ [:headers, :execute, :set_body_stream].each do |m|
143
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
144
+ end
145
+ end
146
+ end
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Format
4
+ ##
5
+ # @private
6
+ class CompletionFormat
7
+ ##
8
+ # @param [LLM::Message, Hash] message
9
+ # The message to format
10
+ def initialize(message)
11
+ @message = message
12
+ end
13
+
14
+ ##
15
+ # Formats the message for the Gemini chat completions API
16
+ # @return [Hash]
17
+ def format
18
+ catch(:abort) do
19
+ if Hash === message
20
+ {role: message[:role], parts: format_content(message[:content])}
21
+ elsif message.tool_call?
22
+ {role: message.role, parts: message.extra[:original_tool_calls].map { {"functionCall" => _1} }}
23
+ else
24
+ {role: message.role, parts: format_content(message.content)}
25
+ end
26
+ end
27
+ end
28
+
29
+ def format_content(content)
30
+ case content
31
+ when Array
32
+ content.empty? ? throw(:abort, nil) : content.flat_map { format_content(_1) }
33
+ when LLM::Response
34
+ format_response(content)
35
+ when File
36
+ content.close unless content.closed?
37
+ format_content(LLM.File(content.path))
38
+ when LLM::File
39
+ file = content
40
+ [{inline_data: {mime_type: file.mime_type, data: file.to_b64}}]
41
+ when String
42
+ [{text: content}]
43
+ when LLM::Message
44
+ format_content(content.content)
45
+ when LLM::Function::Return
46
+ [{functionResponse: {name: content.name, response: content.value}}]
47
+ else
48
+ prompt_error!(content)
49
+ end
50
+ end
51
+
52
+ def format_response(response)
53
+ if response.file?
54
+ file = response
55
+ [{file_data: {mime_type: file.mime_type, file_uri: file.uri}}]
56
+ else
57
+ prompt_error!(content)
58
+ end
59
+ end
60
+
61
+ def prompt_error!(object)
62
+ raise LLM::PromptError, "The given object (an instance of #{object.class}) " \
63
+ "is not supported by the Gemini API"
64
+ end
65
+
66
+ def message = @message
67
+ def content = message.content
68
+ end
69
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # @private
6
+ module Format
7
+ require_relative "format/completion_format"
8
+
9
+ ##
10
+ # @param [Array<LLM::Message>] messages
11
+ # The messages to format
12
+ # @return [Array<Hash>]
13
+ def format(messages)
14
+ messages.filter_map do |message|
15
+ CompletionFormat.new(message).format
16
+ end
17
+ end
18
+
19
+ private
20
+
21
+ ##
22
+ # @param [Hash] params
23
+ # @return [Hash]
24
+ def format_schema(params)
25
+ return {} unless params and params[:schema]
26
+ schema = params.delete(:schema)
27
+ {generationConfig: {response_mime_type: "application/json", response_schema: schema}}
28
+ end
29
+
30
+ ##
31
+ # @param [Hash] params
32
+ # @return [Hash]
33
+ def format_tools(tools)
34
+ return {} unless tools&.any?
35
+ platform, functions = [tools.grep(LLM::ServerTool), tools.grep(LLM::Function)]
36
+ {tools: [*platform, {functionDeclarations: functions.map { _1.format(self) }}]}
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,133 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # The {LLM::Gemini::Images LLM::Gemini::Images} class provides an images
6
+ # object for interacting with [Gemini's images API](https://ai.google.dev/gemini-api/docs/image-generation).
7
+ # Please note that unlike OpenAI, which can return either URLs or base64-encoded strings,
8
+ # Gemini's images API will always return an image as a base64 encoded string that
9
+ # can be decoded into binary.
10
+ # @example
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ #
14
+ # llm = LLM.gemini(key: ENV["KEY"])
15
+ # res = llm.images.create prompt: "A dog on a rocket to the moon"
16
+ # IO.copy_stream res.images[0], "rocket.png"
17
+ class Images
18
+ require_relative "response/image"
19
+ include Format
20
+
21
+ ##
22
+ # Returns a new Images object
23
+ # @param provider [LLM::Provider]
24
+ # @return [LLM::Gemini::Responses]
25
+ def initialize(provider)
26
+ @provider = provider
27
+ end
28
+
29
+ ##
30
+ # Create an image
31
+ # @example
32
+ # llm = LLM.gemini(key: ENV["KEY"])
33
+ # res = llm.images.create prompt: "A dog on a rocket to the moon"
34
+ # IO.copy_stream res.images[0], "rocket.png"
35
+ # @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
36
+ # @param [String] prompt The prompt
37
+ # @param [Hash] params Other parameters (see Gemini docs)
38
+ # @raise (see LLM::Provider#request)
39
+ # @raise [LLM::NoImageError] when no images are returned
40
+ # @note
41
+ # The prompt should make it clear you want to generate an image, or you
42
+ # might unexpectedly receive a purely textual response. This is due to how
43
+ # Gemini implements image generation under the hood.
44
+ # @return [LLM::Response]
45
+ def create(prompt:, model: "gemini-2.5-flash-image-preview", **params)
46
+ req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
47
+ body = JSON.dump({
48
+ contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
49
+ generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
50
+ }.merge!(params))
51
+ req.body = body
52
+ res = execute(request: req)
53
+ validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
54
+ end
55
+
56
+ ##
57
+ # Edit an image
58
+ # @example
59
+ # llm = LLM.gemini(key: ENV["KEY"])
60
+ # res = llm.images.edit image: "cat.png", prompt: "Add a hat to the cat"
61
+ # IO.copy_stream res.images[0], "hatoncat.png"
62
+ # @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
63
+ # @param [String, LLM::File] image The image to edit
64
+ # @param [String] prompt The prompt
65
+ # @param [Hash] params Other parameters (see Gemini docs)
66
+ # @raise (see LLM::Provider#request)
67
+ # @raise [LLM::NoImageError] when no images are returned
68
+ # @note (see LLM::Gemini::Images#create)
69
+ # @return [LLM::Response]
70
+ def edit(image:, prompt:, model: "gemini-2.5-flash-image-preview", **params)
71
+ req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
72
+ image = LLM.File(image)
73
+ body = JSON.dump({
74
+ contents: [{parts: [{text: edit_prompt}, {text: prompt}, format.format_content(image)]}],
75
+ generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
76
+ }.merge!(params)).b
77
+ set_body_stream(req, StringIO.new(body))
78
+ res = execute(request: req)
79
+ validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
80
+ end
81
+
82
+ ##
83
+ # @raise [NotImplementedError]
84
+ # This method is not implemented by Gemini
85
+ def create_variation
86
+ raise NotImplementedError
87
+ end
88
+
89
+ private
90
+
91
+ def format
92
+ @format ||= CompletionFormat.new(nil)
93
+ end
94
+
95
+ def key
96
+ @provider.instance_variable_get(:@key)
97
+ end
98
+
99
+ def create_prompt
100
+ <<~PROMPT
101
+ ## Context
102
+ Your task is to generate one or more image(s) based on the user's instructions.
103
+ The user will provide you with text only.
104
+
105
+ ## Instructions
106
+ 1. The model *MUST* generate image(s) based on the user text alone.
107
+ 2. The model *MUST NOT* generate anything else.
108
+ PROMPT
109
+ end
110
+
111
+ def edit_prompt
112
+ <<~PROMPT
113
+ ## Context
114
+ Your task is to edit the provided image based on the user's instructions.
115
+ The user will provide you with both text and an image.
116
+
117
+ ## Instructions
118
+ 1. The model *MUST* edit the provided image based on the user's instructions
119
+ 2. The model *MUST NOT* generate a new image.
120
+ 3. The model *MUST NOT* generate anything else.
121
+ PROMPT
122
+ end
123
+
124
+ def validate(res)
125
+ return res unless res.images.empty?
126
+ raise LLM::NoImageError.new { _1.response = res.res }, "no images found in response"
127
+ end
128
+
129
+ [:headers, :execute, :set_body_stream].each do |m|
130
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
131
+ end
132
+ end
133
+ end
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Gemini
4
+ ##
5
+ # The {LLM::Gemini::Models LLM::Gemini::Models} class provides a model
6
+ # object for interacting with [Gemini's models API](https://ai.google.dev/api/models?hl=en#method:-models.list).
7
+ # The models API allows a client to query Gemini for a list of models
8
+ # that are available for use with the Gemini API.
9
+ #
10
+ # @example
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ #
14
+ # llm = LLM.gemini(key: ENV["KEY"])
15
+ # res = llm.models.all
16
+ # res.each do |model|
17
+ # print "id: ", model.id, "\n"
18
+ # end
19
+ class Models
20
+ require_relative "response/models"
21
+ include LLM::Utils
22
+
23
+ ##
24
+ # Returns a new Models object
25
+ # @param provider [LLM::Provider]
26
+ # @return [LLM::Gemini::Models]
27
+ def initialize(provider)
28
+ @provider = provider
29
+ end
30
+
31
+ ##
32
+ # List all models
33
+ # @example
34
+ # llm = LLM.gemini(key: ENV["KEY"])
35
+ # res = llm.models.all
36
+ # res.each do |model|
37
+ # print "id: ", model.id, "\n"
38
+ # end
39
+ # @see https://ai.google.dev/api/models?hl=en#method:-models.list Gemini docs
40
+ # @param [Hash] params Other parameters (see Gemini docs)
41
+ # @raise (see LLM::Provider#request)
42
+ # @return [LLM::Response]
43
+ def all(**params)
44
+ query = URI.encode_www_form(params.merge!(key: key))
45
+ req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
46
+ res = execute(request: req)
47
+ LLM::Response.new(res).extend(LLM::Gemini::Response::Models)
48
+ end
49
+
50
+ private
51
+
52
+ def key
53
+ @provider.instance_variable_get(:@key)
54
+ end
55
+
56
+ [:headers, :execute].each do |m|
57
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Response
4
+ module Completion
5
+ def model = body.modelVersion
6
+ def prompt_tokens = body.usageMetadata.promptTokenCount
7
+ def completion_tokens = body.usageMetadata.candidatesTokenCount
8
+ def total_tokens = body.usageMetadata.totalTokenCount
9
+ def choices = format_choices
10
+
11
+ private
12
+
13
+ def format_choices
14
+ candidates.map.with_index do |choice, index|
15
+ choice = LLM::Object.from_hash(choice)
16
+ content = choice.content || LLM::Object.new
17
+ role = content.role || "model"
18
+ parts = content.parts || [{"text" => choice.finishReason}]
19
+ text = parts.filter_map { _1["text"] }.join
20
+ tools = parts.filter_map { _1["functionCall"] }
21
+ extra = {index:, response: self, tool_calls: format_tool_calls(tools), original_tool_calls: tools}
22
+ LLM::Message.new(role, text, extra)
23
+ end
24
+ end
25
+
26
+ def format_tool_calls(tools)
27
+ (tools || []).map do |tool|
28
+ function = {name: tool.name, arguments: tool.args}
29
+ LLM::Object.new(function)
30
+ end
31
+ end
32
+
33
+ def candidates = body.candidates || []
34
+ end
35
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Response
4
+ module Embedding
5
+ def model = "text-embedding-004"
6
+ def embeddings = body.dig("embedding", "values")
7
+ end
8
+ end
@@ -0,0 +1,11 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Response
4
+ module File
5
+ def name = respond_to?(:file) ? file.name : body.name
6
+ def display_name = respond_to?(:file) ? file.displayName : body.displayName
7
+ def mime_type = respond_to?(:file) ? file.mimeType : body.mimeType
8
+ def uri = respond_to?(:file) ? file.uri : body.uri
9
+ def file? = true
10
+ end
11
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Response
4
+ module Files
5
+ include ::Enumerable
6
+ def each(&)
7
+ return enum_for(:each) unless block_given?
8
+ files.each { yield(_1) }
9
+ end
10
+
11
+ def files
12
+ body.files || []
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Response
4
+ module Image
5
+ ##
6
+ # @return [Array<StringIO>]
7
+ def images
8
+ candidates.flat_map do |candidate|
9
+ parts = candidate&.dig(:content, :parts) || []
10
+ parts.filter_map do
11
+ data = _1.dig(:inlineData, :data)
12
+ next unless data
13
+ StringIO.new(data.unpack1("m0"))
14
+ end
15
+ end
16
+ end
17
+
18
+ ##
19
+ # Returns one or more image URLs, or an empty array
20
+ # @note
21
+ # Gemini's image generation API does not return URLs, so this method
22
+ # will always return an empty array.
23
+ # @return [Array<String>]
24
+ def urls = []
25
+
26
+ ##
27
+ # Returns one or more candidates, or an empty array
28
+ # @return [Array<Hash>]
29
+ def candidates = body.candidates || []
30
+ end
31
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Gemini::Response
4
+ module Models
5
+ include ::Enumerable
6
+ def each(&)
7
+ return enum_for(:each) unless block_given?
8
+ models.each { yield(_1) }
9
+ end
10
+
11
+ def models
12
+ body.models || []
13
+ end
14
+ end
15
+ end