llm-shell 0.9.2 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (258) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +61 -66
  3. data/lib/llm/shell/command.rb +40 -40
  4. data/lib/llm/shell/commands/clear_screen.rb +4 -18
  5. data/lib/llm/shell/commands/debug_mode.rb +12 -0
  6. data/lib/llm/shell/commands/dir_import.rb +4 -20
  7. data/lib/llm/shell/commands/disable_tool.rb +33 -0
  8. data/lib/llm/shell/commands/enable_tool.rb +33 -0
  9. data/lib/llm/shell/commands/file_import.rb +4 -20
  10. data/lib/llm/shell/commands/help.rb +23 -36
  11. data/lib/llm/shell/commands/show_chat.rb +4 -19
  12. data/lib/llm/shell/commands/show_version.rb +4 -20
  13. data/lib/llm/shell/commands/system_prompt.rb +4 -18
  14. data/lib/llm/shell/completion.rb +5 -5
  15. data/lib/llm/shell/config.rb +4 -5
  16. data/lib/llm/shell/formatter.rb +1 -2
  17. data/lib/llm/shell/internal/coderay/lib/coderay/duo.rb +81 -0
  18. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/_map.rb +17 -0
  19. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/comment_filter.rb +25 -0
  20. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/count.rb +39 -0
  21. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug.rb +49 -0
  22. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug_lint.rb +63 -0
  23. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/div.rb +23 -0
  24. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/encoder.rb +190 -0
  25. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/filter.rb +58 -0
  26. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/css.rb +65 -0
  27. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/numbering.rb +108 -0
  28. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/output.rb +164 -0
  29. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html.rb +333 -0
  30. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/json.rb +83 -0
  31. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lines_of_code.rb +45 -0
  32. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lint.rb +59 -0
  33. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/null.rb +18 -0
  34. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/page.rb +24 -0
  35. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/span.rb +23 -0
  36. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/statistic.rb +95 -0
  37. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/terminal.rb +195 -0
  38. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/text.rb +46 -0
  39. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/token_kind_filter.rb +111 -0
  40. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/xml.rb +72 -0
  41. data/lib/llm/shell/internal/coderay/lib/coderay/encoders/yaml.rb +50 -0
  42. data/lib/llm/shell/internal/coderay/lib/coderay/encoders.rb +18 -0
  43. data/lib/llm/shell/internal/coderay/lib/coderay/for_redcloth.rb +95 -0
  44. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/file_type.rb +151 -0
  45. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin.rb +55 -0
  46. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin_host.rb +221 -0
  47. data/lib/llm/shell/internal/coderay/lib/coderay/helpers/word_list.rb +72 -0
  48. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/_map.rb +24 -0
  49. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/c.rb +189 -0
  50. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/clojure.rb +217 -0
  51. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/cpp.rb +217 -0
  52. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/css.rb +196 -0
  53. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/debug.rb +75 -0
  54. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/delphi.rb +144 -0
  55. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/diff.rb +221 -0
  56. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/erb.rb +81 -0
  57. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/go.rb +208 -0
  58. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/groovy.rb +268 -0
  59. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/haml.rb +168 -0
  60. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/html.rb +275 -0
  61. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java/builtin_types.rb +421 -0
  62. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java.rb +174 -0
  63. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java_script.rb +236 -0
  64. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/json.rb +98 -0
  65. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/lua.rb +280 -0
  66. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/php.rb +527 -0
  67. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/python.rb +287 -0
  68. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/raydebug.rb +75 -0
  69. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/patterns.rb +178 -0
  70. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/string_state.rb +79 -0
  71. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby.rb +477 -0
  72. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sass.rb +232 -0
  73. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/scanner.rb +337 -0
  74. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sql.rb +169 -0
  75. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/taskpaper.rb +36 -0
  76. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/text.rb +26 -0
  77. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/xml.rb +17 -0
  78. data/lib/llm/shell/internal/coderay/lib/coderay/scanners/yaml.rb +140 -0
  79. data/lib/llm/shell/internal/coderay/lib/coderay/scanners.rb +27 -0
  80. data/lib/llm/shell/internal/coderay/lib/coderay/styles/_map.rb +7 -0
  81. data/lib/llm/shell/internal/coderay/lib/coderay/styles/alpha.rb +153 -0
  82. data/lib/llm/shell/internal/coderay/lib/coderay/styles/style.rb +18 -0
  83. data/lib/llm/shell/internal/coderay/lib/coderay/styles.rb +15 -0
  84. data/lib/llm/shell/internal/coderay/lib/coderay/token_kinds.rb +85 -0
  85. data/lib/llm/shell/internal/coderay/lib/coderay/tokens.rb +164 -0
  86. data/lib/llm/shell/internal/coderay/lib/coderay/tokens_proxy.rb +55 -0
  87. data/lib/llm/shell/internal/coderay/lib/coderay/version.rb +3 -0
  88. data/lib/llm/shell/internal/coderay/lib/coderay.rb +284 -0
  89. data/lib/llm/shell/internal/io-line/lib/io/line/multiple.rb +19 -0
  90. data/lib/{io → llm/shell/internal/io-line/lib/io}/line.rb +2 -0
  91. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/builder.rb +31 -0
  92. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/conversable.rb +37 -0
  93. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/completion.rb +49 -0
  94. data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/respond.rb +49 -0
  95. data/lib/llm/shell/internal/llm.rb/lib/llm/bot.rb +150 -0
  96. data/lib/llm/shell/internal/llm.rb/lib/llm/buffer.rb +162 -0
  97. data/lib/llm/shell/internal/llm.rb/lib/llm/client.rb +36 -0
  98. data/lib/llm/shell/internal/llm.rb/lib/llm/error.rb +49 -0
  99. data/lib/llm/shell/internal/llm.rb/lib/llm/eventhandler.rb +44 -0
  100. data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/event.rb +69 -0
  101. data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/parser.rb +88 -0
  102. data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream.rb +8 -0
  103. data/lib/llm/shell/internal/llm.rb/lib/llm/file.rb +91 -0
  104. data/lib/llm/shell/internal/llm.rb/lib/llm/function.rb +177 -0
  105. data/lib/llm/shell/internal/llm.rb/lib/llm/message.rb +178 -0
  106. data/lib/llm/shell/internal/llm.rb/lib/llm/mime.rb +140 -0
  107. data/lib/llm/shell/internal/llm.rb/lib/llm/multipart.rb +101 -0
  108. data/lib/llm/shell/internal/llm.rb/lib/llm/object/builder.rb +38 -0
  109. data/lib/llm/shell/internal/llm.rb/lib/llm/object/kernel.rb +53 -0
  110. data/lib/llm/shell/internal/llm.rb/lib/llm/object.rb +89 -0
  111. data/lib/llm/shell/internal/llm.rb/lib/llm/provider.rb +352 -0
  112. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/error_handler.rb +36 -0
  113. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/files.rb +155 -0
  114. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format/completion_format.rb +88 -0
  115. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format.rb +29 -0
  116. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/models.rb +54 -0
  117. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/completion.rb +39 -0
  118. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
  119. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/file.rb +23 -0
  120. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/web_search.rb +21 -0
  121. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/stream_parser.rb +66 -0
  122. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic.rb +138 -0
  123. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
  124. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format.rb +27 -0
  125. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek.rb +75 -0
  126. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/audio.rb +73 -0
  127. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/error_handler.rb +47 -0
  128. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/files.rb +146 -0
  129. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format/completion_format.rb +69 -0
  130. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format.rb +39 -0
  131. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/images.rb +133 -0
  132. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/models.rb +60 -0
  133. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/completion.rb +35 -0
  134. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/embedding.rb +8 -0
  135. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/file.rb +11 -0
  136. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/files.rb +15 -0
  137. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/image.rb +31 -0
  138. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/models.rb +15 -0
  139. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/web_search.rb +22 -0
  140. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/stream_parser.rb +86 -0
  141. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini.rb +173 -0
  142. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/llamacpp.rb +74 -0
  143. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/error_handler.rb +36 -0
  144. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format/completion_format.rb +77 -0
  145. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format.rb +29 -0
  146. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/models.rb +56 -0
  147. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/completion.rb +28 -0
  148. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/embedding.rb +9 -0
  149. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/stream_parser.rb +44 -0
  150. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama.rb +116 -0
  151. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/audio.rb +91 -0
  152. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/error_handler.rb +46 -0
  153. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/files.rb +134 -0
  154. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/completion_format.rb +90 -0
  155. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/moderation_format.rb +35 -0
  156. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/respond_format.rb +72 -0
  157. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format.rb +54 -0
  158. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/images.rb +109 -0
  159. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/models.rb +55 -0
  160. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/moderations.rb +65 -0
  161. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/audio.rb +7 -0
  162. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/completion.rb +40 -0
  163. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/embedding.rb +9 -0
  164. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/enumerable.rb +23 -0
  165. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/file.rb +7 -0
  166. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/image.rb +16 -0
  167. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/moderations.rb +34 -0
  168. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb +48 -0
  169. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/web_search.rb +21 -0
  170. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
  171. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses.rb +99 -0
  172. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/stream_parser.rb +86 -0
  173. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/vector_stores.rb +228 -0
  174. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai.rb +206 -0
  175. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai/images.rb +58 -0
  176. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai.rb +72 -0
  177. data/lib/llm/shell/internal/llm.rb/lib/llm/providers/zai.rb +74 -0
  178. data/lib/llm/shell/internal/llm.rb/lib/llm/response.rb +67 -0
  179. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/array.rb +26 -0
  180. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/boolean.rb +13 -0
  181. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/integer.rb +43 -0
  182. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/leaf.rb +78 -0
  183. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/null.rb +13 -0
  184. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/number.rb +43 -0
  185. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/object.rb +41 -0
  186. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/string.rb +34 -0
  187. data/lib/llm/shell/internal/llm.rb/lib/llm/schema/version.rb +8 -0
  188. data/lib/llm/shell/internal/llm.rb/lib/llm/schema.rb +81 -0
  189. data/lib/llm/shell/internal/llm.rb/lib/llm/server_tool.rb +32 -0
  190. data/lib/llm/shell/internal/llm.rb/lib/llm/tool/param.rb +75 -0
  191. data/lib/llm/shell/internal/llm.rb/lib/llm/tool.rb +78 -0
  192. data/lib/llm/shell/internal/llm.rb/lib/llm/utils.rb +19 -0
  193. data/lib/llm/shell/internal/llm.rb/lib/llm/version.rb +5 -0
  194. data/lib/llm/shell/internal/llm.rb/lib/llm.rb +121 -0
  195. data/lib/llm/shell/internal/optparse/lib/optionparser.rb +2 -0
  196. data/lib/llm/shell/internal/optparse/lib/optparse/ac.rb +70 -0
  197. data/lib/llm/shell/internal/optparse/lib/optparse/date.rb +18 -0
  198. data/lib/llm/shell/internal/optparse/lib/optparse/kwargs.rb +27 -0
  199. data/lib/llm/shell/internal/optparse/lib/optparse/shellwords.rb +7 -0
  200. data/lib/llm/shell/internal/optparse/lib/optparse/time.rb +11 -0
  201. data/lib/llm/shell/internal/optparse/lib/optparse/uri.rb +7 -0
  202. data/lib/llm/shell/internal/optparse/lib/optparse/version.rb +80 -0
  203. data/lib/llm/shell/internal/optparse/lib/optparse.rb +2469 -0
  204. data/lib/llm/shell/internal/paint/lib/paint/constants.rb +104 -0
  205. data/lib/llm/shell/internal/paint/lib/paint/pa.rb +13 -0
  206. data/lib/llm/shell/internal/paint/lib/paint/rgb_colors.rb +14 -0
  207. data/lib/llm/shell/internal/paint/lib/paint/shortcuts.rb +100 -0
  208. data/lib/llm/shell/internal/paint/lib/paint/shortcuts_version.rb +5 -0
  209. data/lib/llm/shell/internal/paint/lib/paint/util.rb +16 -0
  210. data/lib/llm/shell/internal/paint/lib/paint/version.rb +5 -0
  211. data/lib/llm/shell/internal/paint/lib/paint.rb +261 -0
  212. data/lib/llm/shell/internal/reline/lib/reline/config.rb +378 -0
  213. data/lib/llm/shell/internal/reline/lib/reline/face.rb +199 -0
  214. data/lib/llm/shell/internal/reline/lib/reline/history.rb +76 -0
  215. data/lib/llm/shell/internal/reline/lib/reline/io/ansi.rb +322 -0
  216. data/lib/llm/shell/internal/reline/lib/reline/io/dumb.rb +120 -0
  217. data/lib/llm/shell/internal/reline/lib/reline/io/windows.rb +530 -0
  218. data/lib/llm/shell/internal/reline/lib/reline/io.rb +55 -0
  219. data/lib/llm/shell/internal/reline/lib/reline/key_actor/base.rb +37 -0
  220. data/lib/llm/shell/internal/reline/lib/reline/key_actor/composite.rb +17 -0
  221. data/lib/llm/shell/internal/reline/lib/reline/key_actor/emacs.rb +517 -0
  222. data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_command.rb +518 -0
  223. data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_insert.rb +517 -0
  224. data/lib/llm/shell/internal/reline/lib/reline/key_actor.rb +8 -0
  225. data/lib/llm/shell/internal/reline/lib/reline/key_stroke.rb +119 -0
  226. data/lib/llm/shell/internal/reline/lib/reline/kill_ring.rb +125 -0
  227. data/lib/llm/shell/internal/reline/lib/reline/line_editor.rb +2356 -0
  228. data/lib/llm/shell/internal/reline/lib/reline/unicode/east_asian_width.rb +1292 -0
  229. data/lib/llm/shell/internal/reline/lib/reline/unicode.rb +421 -0
  230. data/lib/llm/shell/internal/reline/lib/reline/version.rb +3 -0
  231. data/lib/llm/shell/internal/reline/lib/reline.rb +527 -0
  232. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/generated_parser.rb +712 -0
  233. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/handler.rb +268 -0
  234. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date.rb +35 -0
  235. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date_time.rb +42 -0
  236. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_time.rb +40 -0
  237. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/parser.rb +21 -0
  238. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/scanner.rb +92 -0
  239. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/string_utils.rb +40 -0
  240. data/lib/llm/shell/internal/tomlrb/lib/tomlrb/version.rb +5 -0
  241. data/lib/llm/shell/internal/tomlrb/lib/tomlrb.rb +49 -0
  242. data/lib/llm/shell/options.rb +1 -1
  243. data/lib/llm/shell/renderer.rb +2 -3
  244. data/lib/llm/shell/repl.rb +21 -16
  245. data/lib/llm/shell/tool.rb +42 -0
  246. data/lib/llm/shell/tools/read_file.rb +15 -0
  247. data/lib/llm/shell/tools/system.rb +17 -0
  248. data/lib/llm/shell/tools/write_file.rb +16 -0
  249. data/lib/llm/shell/version.rb +1 -1
  250. data/lib/llm/shell.rb +83 -39
  251. data/libexec/llm-shell/shell +4 -6
  252. data/llm-shell.gemspec +0 -4
  253. metadata +233 -63
  254. data/lib/llm/function.rb +0 -17
  255. data/lib/llm/shell/command/extension.rb +0 -42
  256. data/lib/llm/shell/commands/utils.rb +0 -21
  257. data/lib/llm/shell/functions/read_file.rb +0 -22
  258. data/lib/llm/shell/functions/write_file.rb +0 -22
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # @private
6
+ class StreamParser
7
+ ##
8
+ # Returns the fully constructed response body
9
+ # @return [LLM::Object]
10
+ attr_reader :body
11
+
12
+ ##
13
+ # @return [LLM::OpenAI::Chunk]
14
+ def initialize(io)
15
+ @body = LLM::Object.new
16
+ @io = io
17
+ end
18
+
19
+ ##
20
+ # @param [Hash] chunk
21
+ # @return [LLM::OpenAI::Chunk]
22
+ def parse!(chunk)
23
+ tap { merge!(chunk) }
24
+ end
25
+
26
+ private
27
+
28
+ def merge!(chunk)
29
+ chunk.each do |key, value|
30
+ if key == "choices"
31
+ @body["choices"] ||= []
32
+ merge_choices!(value)
33
+ else
34
+ @body[key] = value
35
+ end
36
+ end
37
+ end
38
+
39
+ def merge_choices!(choices)
40
+ choices.each do |choice|
41
+ if @body.choices[choice["index"]]
42
+ target_message = @body["choices"][choice["index"]]["message"]
43
+ delta = choice["delta"]
44
+ delta.each do |key, value|
45
+ if key == "content"
46
+ target_message[key] ||= +""
47
+ target_message[key] << value
48
+ @io << value if @io.respond_to?(:<<)
49
+ elsif key == "tool_calls"
50
+ merge_tools!(target_message, value)
51
+ else
52
+ target_message[key] = value
53
+ end
54
+ end
55
+ else
56
+ message_hash = {"role" => "assistant"}
57
+ @body["choices"][choice["index"]] = {"message" => message_hash}
58
+ choice["delta"].each do |key, value|
59
+ if key == "content"
60
+ @io << value if @io.respond_to?(:<<)
61
+ message_hash[key] = value
62
+ else
63
+ message_hash[key] = value
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
69
+
70
+ def merge_tools!(target, tools)
71
+ target["tool_calls"] ||= []
72
+ tools.each.with_index do |toola, index|
73
+ toolb = target["tool_calls"][index]
74
+ if toolb && toola["function"] && toolb["function"]
75
+ # Append to existing function arguments
76
+ toola["function"].each do |func_key, func_value|
77
+ toolb["function"][func_key] ||= +""
78
+ toolb["function"][func_key] << func_value
79
+ end
80
+ else
81
+ target["tool_calls"][index] = toola
82
+ end
83
+ end
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,228 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # The {LLM::OpenAI::VectorStores LLM::OpenAI::VectorStores} class provides
6
+ # an interface for [OpenAI's vector stores API](https://platform.openai.com/docs/api-reference/vector_stores/create).
7
+ #
8
+ # @example
9
+ # llm = LLM.openai(key: ENV["OPENAI_SECRET"])
10
+ # files = %w(foo.pdf bar.pdf).map { llm.files.create(file: _1) }
11
+ # store = llm.vector_stores.create_and_poll(name: "PDF Store", file_ids: files.map(&:id))
12
+ # print "[-] store is ready", "\n"
13
+ # chunks = llm.vector_stores.search(vector: store, query: "What is Ruby?")
14
+ # chunks.each { |chunk| puts chunk }
15
+ class VectorStores
16
+ require_relative "response/enumerable"
17
+ PollError = Class.new(LLM::Error)
18
+
19
+ ##
20
+ # @param [LLM::Provider] provider
21
+ # An OpenAI provider
22
+ def initialize(provider)
23
+ @provider = provider
24
+ end
25
+
26
+ ##
27
+ # List all vector stores
28
+ # @param [Hash] params Other parameters (see OpenAI docs)
29
+ # @return [LLM::Response]
30
+ def all(**params)
31
+ query = URI.encode_www_form(params)
32
+ req = Net::HTTP::Get.new("/v1/vector_stores?#{query}", headers)
33
+ res = execute(request: req)
34
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Enumerable)
35
+ end
36
+
37
+ ##
38
+ # Create a vector store
39
+ # @param [String] name The name of the vector store
40
+ # @param [Array<String>] file_ids The IDs of the files to include in the vector store
41
+ # @param [Hash] params Other parameters (see OpenAI docs)
42
+ # @raise (see LLM::Provider#request)
43
+ # @return [LLM::Response]
44
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/create OpenAI docs
45
+ def create(name:, file_ids: [], **params)
46
+ req = Net::HTTP::Post.new("/v1/vector_stores", headers)
47
+ req.body = JSON.dump(params.merge({name:, file_ids:}).compact)
48
+ res = execute(request: req)
49
+ LLM::Response.new(res)
50
+ end
51
+
52
+ ##
53
+ # Create a vector store and poll until its status is "completed"
54
+ # @param (see LLM::OpenAI::VectorStores#create)
55
+ # @return (see LLM::OpenAI::VectorStores#poll)
56
+ def create_and_poll(...)
57
+ poll(vector: create(...))
58
+ end
59
+
60
+ ##
61
+ # Get a vector store
62
+ # @param [String, #id] vector The ID of the vector store
63
+ # @raise (see LLM::Provider#request)
64
+ # @return [LLM::Response]
65
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/retrieve OpenAI docs
66
+ def get(vector:)
67
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
68
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}", headers)
69
+ res = execute(request: req)
70
+ LLM::Response.new(res)
71
+ end
72
+
73
+ ##
74
+ # Modify an existing vector store
75
+ # @param [String, #id] vector The ID of the vector store
76
+ # @param [String] name The new name of the vector store
77
+ # @param [Hash] params Other parameters (see OpenAI docs)
78
+ # @raise (see LLM::Provider#request)
79
+ # @return [LLM::Response]
80
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/modify OpenAI docs
81
+ def modify(vector:, name: nil, **params)
82
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
83
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}", headers)
84
+ req.body = JSON.dump(params.merge({name:}).compact)
85
+ res = execute(request: req)
86
+ LLM::Response.new(res)
87
+ end
88
+
89
+ ##
90
+ # Delete a vector store
91
+ # @param [String, #id] vector The ID of the vector store
92
+ # @raise (see LLM::Provider#request)
93
+ # @return [LLM::Response]
94
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/delete OpenAI docs
95
+ def delete(vector:)
96
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
97
+ req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}", headers)
98
+ res = execute(request: req)
99
+ LLM::Response.new(res)
100
+ end
101
+
102
+ ##
103
+ # Search a vector store
104
+ # @param [String, #id] vector The ID of the vector store
105
+ # @param query [String] The query to search for
106
+ # @param params [Hash] Other parameters (see OpenAI docs)
107
+ # @raise (see LLM::Provider#request)
108
+ # @return [LLM::Response]
109
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/search OpenAI docs
110
+ def search(vector:, query:, **params)
111
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
112
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/search", headers)
113
+ req.body = JSON.dump(params.merge({query:}).compact)
114
+ res = execute(request: req)
115
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Enumerable)
116
+ end
117
+
118
+ ##
119
+ # List all files in a vector store
120
+ # @param [String, #id] vector The ID of the vector store
121
+ # @param [Hash] params Other parameters (see OpenAI docs)
122
+ # @raise (see LLM::Provider#request)
123
+ # @return [LLM::Response]
124
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/listFiles OpenAI docs
125
+ def all_files(vector:, **params)
126
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
127
+ query = URI.encode_www_form(params)
128
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files?#{query}", headers)
129
+ res = execute(request: req)
130
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Enumerable)
131
+ end
132
+
133
+ ##
134
+ # Add a file to a vector store
135
+ # @param [String, #id] vector The ID of the vector store
136
+ # @param [String, #id] file The ID of the file to add
137
+ # @param [Hash] attributes Attributes to associate with the file (optional)
138
+ # @param [Hash] params Other parameters (see OpenAI docs)
139
+ # @raise (see LLM::Provider#request)
140
+ # @return [LLM::Response]
141
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/createFile OpenAI docs
142
+ def add_file(vector:, file:, attributes: nil, **params)
143
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
144
+ file_id = file.respond_to?(:id) ? file.id : file
145
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files", headers)
146
+ req.body = JSON.dump(params.merge({file_id:, attributes:}).compact)
147
+ res = execute(request: req)
148
+ LLM::Response.new(res)
149
+ end
150
+ alias_method :create_file, :add_file
151
+
152
+ ##
153
+ # Update a file in a vector store
154
+ # @param [String, #id] vector The ID of the vector store
155
+ # @param [String, #id] file The ID of the file to update
156
+ # @param [Hash] attributes Attributes to associate with the file
157
+ # @param [Hash] params Other parameters (see OpenAI docs)
158
+ # @raise (see LLM::Provider#request)
159
+ # @return [LLM::Response]
160
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/updateAttributes OpenAI docs
161
+ def update_file(vector:, file:, attributes:, **params)
162
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
163
+ file_id = file.respond_to?(:id) ? file.id : file
164
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
165
+ req.body = JSON.dump(params.merge({attributes:}).compact)
166
+ res = execute(request: req)
167
+ LLM::Response.new(res)
168
+ end
169
+
170
+ ##
171
+ # Get a file from a vector store
172
+ # @param [String, #id] vector The ID of the vector store
173
+ # @param [String, #id] file The ID of the file to retrieve
174
+ # @raise (see LLM::Provider#request)
175
+ # @return [LLM::Response]
176
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/getFile OpenAI docs
177
+ def get_file(vector:, file:, **params)
178
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
179
+ file_id = file.respond_to?(:id) ? file.id : file
180
+ query = URI.encode_www_form(params)
181
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}/files/#{file_id}?#{query}", headers)
182
+ res = execute(request: req)
183
+ LLM::Response.new(res)
184
+ end
185
+
186
+ ##
187
+ # Delete a file from a vector store
188
+ # @param [String, #id] vector The ID of the vector store
189
+ # @param [String, #id] file The ID of the file to delete
190
+ # @raise (see LLM::Provider#request)
191
+ # @return [LLM::Response]
192
+ # @see https://platform.openai.com/docs/api-reference/vector_stores_files/deleteFile OpenAI docs
193
+ def delete_file(vector:, file:)
194
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
195
+ file_id = file.respond_to?(:id) ? file.id : file
196
+ req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}/files/#{file_id}", headers)
197
+ res = execute(request: req)
198
+ LLM::Response.new(res)
199
+ end
200
+
201
+ ##
202
+ # Poll a vector store until its status is "completed"
203
+ # @param [String, #id] vector The ID of the vector store
204
+ # @param [Integer] attempts The current number of attempts (default: 0)
205
+ # @param [Integer] max The maximum number of iterations (default: 50)
206
+ # @raise [LLM::PollError] When the maximum number of iterations is reached
207
+ # @return [LLM::Response]
208
+ def poll(vector:, attempts: 0, max: 50)
209
+ if attempts == max
210
+ raise LLM::PollError, "vector store '#{vector.id}' has status '#{vector.status}' after #{max} attempts"
211
+ elsif vector.status == "expired"
212
+ raise LLM::PollError, "vector store '#{vector.id}' has expired"
213
+ elsif vector.status != "completed"
214
+ vector = get(vector:)
215
+ sleep(0.1 * (2**attempts))
216
+ poll(vector:, attempts: attempts + 1, max:)
217
+ else
218
+ vector
219
+ end
220
+ end
221
+
222
+ private
223
+
224
+ [:headers, :execute, :set_body_stream].each do |m|
225
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
226
+ end
227
+ end
228
+ end
@@ -0,0 +1,206 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The OpenAI class implements a provider for
6
+ # [OpenAI](https://platform.openai.com/).
7
+ #
8
+ # @example
9
+ # #!/usr/bin/env ruby
10
+ # require "llm"
11
+ #
12
+ # llm = LLM.openai(key: ENV["KEY"])
13
+ # bot = LLM::Bot.new(llm)
14
+ # bot.chat ["Tell me about this photo", File.open("/images/capybara.jpg", "rb")]
15
+ # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
16
+ class OpenAI < Provider
17
+ require_relative "openai/response/embedding"
18
+ require_relative "openai/response/completion"
19
+ require_relative "openai/response/web_search"
20
+ require_relative "openai/error_handler"
21
+ require_relative "openai/format"
22
+ require_relative "openai/stream_parser"
23
+ require_relative "openai/models"
24
+ require_relative "openai/responses"
25
+ require_relative "openai/images"
26
+ require_relative "openai/audio"
27
+ require_relative "openai/files"
28
+ require_relative "openai/moderations"
29
+ require_relative "openai/vector_stores"
30
+
31
+ include Format
32
+
33
+ HOST = "api.openai.com"
34
+
35
+ ##
36
+ # @param key (see LLM::Provider#initialize)
37
+ def initialize(**)
38
+ super(host: HOST, **)
39
+ end
40
+
41
+ ##
42
+ # Provides an embedding
43
+ # @see https://platform.openai.com/docs/api-reference/embeddings/create OpenAI docs
44
+ # @param input (see LLM::Provider#embed)
45
+ # @param model (see LLM::Provider#embed)
46
+ # @param params (see LLM::Provider#embed)
47
+ # @raise (see LLM::Provider#request)
48
+ # @return (see LLM::Provider#embed)
49
+ def embed(input, model: "text-embedding-3-small", **params)
50
+ req = Net::HTTP::Post.new("/v1/embeddings", headers)
51
+ req.body = JSON.dump({input:, model:}.merge!(params))
52
+ res = execute(request: req)
53
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Embedding)
54
+ end
55
+
56
+ ##
57
+ # Provides an interface to the chat completions API
58
+ # @see https://platform.openai.com/docs/api-reference/chat/create OpenAI docs
59
+ # @param prompt (see LLM::Provider#complete)
60
+ # @param params (see LLM::Provider#complete)
61
+ # @example (see LLM::Provider#complete)
62
+ # @raise (see LLM::Provider#request)
63
+ # @raise [LLM::PromptError]
64
+ # When given an object a provider does not understand
65
+ # @return (see LLM::Provider#complete)
66
+ def complete(prompt, params = {})
67
+ params = {role: :user, model: default_model}.merge!(params)
68
+ tools = resolve_tools(params.delete(:tools))
69
+ params = [params, format_schema(params), format_tools(tools)].inject({}, &:merge!).compact
70
+ role, stream = params.delete(:role), params.delete(:stream)
71
+ params[:stream] = true if stream.respond_to?(:<<) || stream == true
72
+ params[:stream_options] = {include_usage: true}.merge!(params[:stream_options] || {}) if params[:stream]
73
+ req = Net::HTTP::Post.new(completions_path, headers)
74
+ messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
75
+ body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
76
+ set_body_stream(req, StringIO.new(body))
77
+ res = execute(request: req, stream:)
78
+ LLM::Response.new(res)
79
+ .extend(LLM::OpenAI::Response::Completion)
80
+ .extend(Module.new { define_method(:__tools__) { tools } })
81
+ end
82
+
83
+ ##
84
+ # Provides an interface to OpenAI's response API
85
+ # @see https://platform.openai.com/docs/api-reference/responses/create OpenAI docs
86
+ # @return [LLM::OpenAI::Responses]
87
+ def responses
88
+ LLM::OpenAI::Responses.new(self)
89
+ end
90
+
91
+ ##
92
+ # Provides an interface to OpenAI's image generation API
93
+ # @see https://platform.openai.com/docs/api-reference/images/create OpenAI docs
94
+ # @return [LLM::OpenAI::Images]
95
+ def images
96
+ LLM::OpenAI::Images.new(self)
97
+ end
98
+
99
+ ##
100
+ # Provides an interface to OpenAI's audio generation API
101
+ # @see https://platform.openai.com/docs/api-reference/audio/createSpeech OpenAI docs
102
+ # @return [LLM::OpenAI::Audio]
103
+ def audio
104
+ LLM::OpenAI::Audio.new(self)
105
+ end
106
+
107
+ ##
108
+ # Provides an interface to OpenAI's files API
109
+ # @see https://platform.openai.com/docs/api-reference/files/create OpenAI docs
110
+ # @return [LLM::OpenAI::Files]
111
+ def files
112
+ LLM::OpenAI::Files.new(self)
113
+ end
114
+
115
+ ##
116
+ # Provides an interface to OpenAI's models API
117
+ # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
118
+ # @return [LLM::OpenAI::Models]
119
+ def models
120
+ LLM::OpenAI::Models.new(self)
121
+ end
122
+
123
+ ##
124
+ # Provides an interface to OpenAI's moderation API
125
+ # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
126
+ # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
127
+ # @return [LLM::OpenAI::Moderations]
128
+ def moderations
129
+ LLM::OpenAI::Moderations.new(self)
130
+ end
131
+
132
+ ##
133
+ # Provides an interface to OpenAI's vector store API
134
+ # @see https://platform.openai.com/docs/api-reference/vector-stores/create OpenAI docs
135
+ # @return [LLM::OpenAI::VectorStore]
136
+ def vector_stores
137
+ LLM::OpenAI::VectorStores.new(self)
138
+ end
139
+
140
+ ##
141
+ # @return (see LLM::Provider#assistant_role)
142
+ def assistant_role
143
+ "assistant"
144
+ end
145
+
146
+ ##
147
+ # Returns the default model for chat completions
148
+ # @see https://platform.openai.com/docs/models/gpt-4.1 gpt-4.1
149
+ # @return [String]
150
+ def default_model
151
+ "gpt-4.1"
152
+ end
153
+
154
+ ##
155
+ # @note
156
+ # This method includes certain tools that require configuration
157
+ # through a set of options that are easier to set through the
158
+ # {LLM::Provider#server_tool LLM::Provider#server_tool} method.
159
+ # @return (see LLM::Provider#server_tools)
160
+ def server_tools
161
+ {
162
+ web_search: server_tool(:web_search),
163
+ file_search: server_tool(:file_search),
164
+ image_generation: server_tool(:image_generation),
165
+ code_interpreter: server_tool(:code_interpreter),
166
+ computer_use: server_tool(:computer_use)
167
+ }
168
+ end
169
+
170
+ ##
171
+ # A convenience method for performing a web search using the
172
+ # OpenAI web search tool.
173
+ # @example
174
+ # llm = LLM.openai(key: ENV["KEY"])
175
+ # res = llm.web_search(query: "summarize today's news")
176
+ # res.search_results.each { |item| print item.title, ": ", item.url, "\n" }
177
+ # @param query [String] The search query.
178
+ # @return [LLM::Response] The response from the LLM provider.
179
+ def web_search(query:)
180
+ responses
181
+ .create(query, store: false, tools: [server_tools[:web_search]])
182
+ .extend(LLM::OpenAI::Response::WebSearch)
183
+ end
184
+
185
+ private
186
+
187
+ def completions_path
188
+ "/v1/chat/completions"
189
+ end
190
+
191
+ def headers
192
+ (@headers || {}).merge(
193
+ "Content-Type" => "application/json",
194
+ "Authorization" => "Bearer #{@key}"
195
+ )
196
+ end
197
+
198
+ def stream_parser
199
+ LLM::OpenAI::StreamParser
200
+ end
201
+
202
+ def error_handler
203
+ LLM::OpenAI::ErrorHandler
204
+ end
205
+ end
206
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::XAI
4
+ ##
5
+ # The {LLM::XAI::Images LLM::XAI::Images} class provides an interface
6
+ # for [xAI's images API](https://docs.x.ai/docs/guides/image-generations).
7
+ # xAI supports multiple response formats: temporary URLs, or binary strings
8
+ # encoded in base64. The default is to return temporary URLs.
9
+ #
10
+ # @example Temporary URLs
11
+ # #!/usr/bin/env ruby
12
+ # require "llm"
13
+ # require "open-uri"
14
+ # require "fileutils"
15
+ #
16
+ # llm = LLM.xai(key: ENV["KEY"])
17
+ # res = llm.images.create prompt: "A dog on a rocket to the moon"
18
+ # FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
19
+ # "rocket.png"
20
+ #
21
+ # @example Binary strings
22
+ # #!/usr/bin/env ruby
23
+ # require "llm"
24
+ #
25
+ # llm = LLM.xai(key: ENV["KEY"])
26
+ # res = llm.images.create prompt: "A dog on a rocket to the moon",
27
+ # response_format: "b64_json"
28
+ # IO.copy_stream res.images[0], "rocket.png"
29
+ class Images < LLM::OpenAI::Images
30
+ ##
31
+ # Create an image
32
+ # @example
33
+ # llm = LLM.xai(key: ENV["KEY"])
34
+ # res = llm.images.create prompt: "A dog on a rocket to the moon"
35
+ # res.urls.each { print _1, "\n"}
36
+ # @see https://docs.x.ai/docs/guides/image-generations xAI docs
37
+ # @param [String] prompt The prompt
38
+ # @param [String] model The model to use
39
+ # @param [Hash] params Other parameters (see xAI docs)
40
+ # @raise (see LLM::Provider#request)
41
+ # @return [LLM::Response]
42
+ def create(prompt:, model: "grok-2-image-1212", **params)
43
+ super
44
+ end
45
+
46
+ ##
47
+ # @raise [NotImplementedError]
48
+ def edit(model: "grok-2-image-1212", **)
49
+ raise NotImplementedError
50
+ end
51
+
52
+ ##
53
+ # @raise [NotImplementedError]
54
+ def create_variation(model: "grok-2-image-1212", **)
55
+ raise NotImplementedError
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,72 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "openai" unless defined?(LLM::OpenAI)
4
+
5
+ module LLM
6
+ ##
7
+ # The XAI class implements a provider for [xAI](https://docs.x.ai).
8
+ #
9
+ # @example
10
+ # #!/usr/bin/env ruby
11
+ # require "llm"
12
+ #
13
+ # llm = LLM.xai(key: ENV["KEY"])
14
+ # bot = LLM::Bot.new(llm)
15
+ # bot.chat ["Tell me about this photo", File.open("/images/crow.jpg", "rb")]
16
+ # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
+ class XAI < OpenAI
18
+ require_relative "xai/images"
19
+
20
+ ##
21
+ # @param [String] host A regional host or the default ("api.x.ai")
22
+ # @param key (see LLM::Provider#initialize)
23
+ # @see https://docs.x.ai/docs/key-information/regions Regional endpoints
24
+ def initialize(host: "api.x.ai", **)
25
+ super
26
+ end
27
+
28
+ ##
29
+ # @raise [NotImplementedError]
30
+ def files
31
+ raise NotImplementedError
32
+ end
33
+
34
+ ##
35
+ # @return [LLM::XAI::Images]
36
+ def images
37
+ LLM::XAI::Images.new(self)
38
+ end
39
+
40
+ ##
41
+ # @raise [NotImplementedError]
42
+ def audio
43
+ raise NotImplementedError
44
+ end
45
+
46
+ ##
47
+ # @raise [NotImplementedError]
48
+ def moderations
49
+ raise NotImplementedError
50
+ end
51
+
52
+ ##
53
+ # @raise [NotImplementedError]
54
+ def responses
55
+ raise NotImplementedError
56
+ end
57
+
58
+ ##
59
+ # @raise [NotImplementedError]
60
+ def vector_stores
61
+ raise NotImplementedError
62
+ end
63
+
64
+ ##
65
+ # Returns the default model for chat completions
66
+ # #see https://docs.x.ai/docs/models grok-4-0709
67
+ # @return [String]
68
+ def default_model
69
+ "grok-4-0709"
70
+ end
71
+ end
72
+ end