llm-shell 0.9.2 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +61 -66
- data/lib/llm/shell/command.rb +40 -40
- data/lib/llm/shell/commands/clear_screen.rb +4 -18
- data/lib/llm/shell/commands/debug_mode.rb +12 -0
- data/lib/llm/shell/commands/dir_import.rb +4 -20
- data/lib/llm/shell/commands/disable_tool.rb +33 -0
- data/lib/llm/shell/commands/enable_tool.rb +33 -0
- data/lib/llm/shell/commands/file_import.rb +4 -20
- data/lib/llm/shell/commands/help.rb +23 -36
- data/lib/llm/shell/commands/show_chat.rb +4 -19
- data/lib/llm/shell/commands/show_version.rb +4 -20
- data/lib/llm/shell/commands/system_prompt.rb +4 -18
- data/lib/llm/shell/completion.rb +5 -5
- data/lib/llm/shell/config.rb +4 -5
- data/lib/llm/shell/formatter.rb +1 -2
- data/lib/llm/shell/internal/coderay/lib/coderay/duo.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/_map.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/comment_filter.rb +25 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/count.rb +39 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug.rb +49 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug_lint.rb +63 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/div.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/encoder.rb +190 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/filter.rb +58 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/css.rb +65 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/numbering.rb +108 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/output.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html.rb +333 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/json.rb +83 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lines_of_code.rb +45 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lint.rb +59 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/null.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/page.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/span.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/statistic.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/terminal.rb +195 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/text.rb +46 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/token_kind_filter.rb +111 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/xml.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/yaml.rb +50 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/for_redcloth.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/file_type.rb +151 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin_host.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/word_list.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/_map.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/c.rb +189 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/clojure.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/cpp.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/css.rb +196 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/debug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/delphi.rb +144 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/diff.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/erb.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/go.rb +208 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/groovy.rb +268 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/haml.rb +168 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/html.rb +275 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java/builtin_types.rb +421 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java.rb +174 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java_script.rb +236 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/json.rb +98 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/lua.rb +280 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/php.rb +527 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/python.rb +287 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/raydebug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/patterns.rb +178 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/string_state.rb +79 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby.rb +477 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sass.rb +232 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/scanner.rb +337 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sql.rb +169 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/taskpaper.rb +36 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/text.rb +26 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/xml.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/yaml.rb +140 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners.rb +27 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/_map.rb +7 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/alpha.rb +153 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/style.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles.rb +15 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/token_kinds.rb +85 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens_proxy.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/version.rb +3 -0
- data/lib/llm/shell/internal/coderay/lib/coderay.rb +284 -0
- data/lib/llm/shell/internal/io-line/lib/io/line/multiple.rb +19 -0
- data/lib/{io → llm/shell/internal/io-line/lib/io}/line.rb +2 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/builder.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/conversable.rb +37 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/completion.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/respond.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot.rb +150 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/buffer.rb +162 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/client.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/error.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventhandler.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/file.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/function.rb +177 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/message.rb +178 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/mime.rb +140 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/multipart.rb +101 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/builder.rb +38 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/kernel.rb +53 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object.rb +89 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/provider.rb +352 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/files.rb +155 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format/completion_format.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/models.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/completion.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/file.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic.rb +138 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format.rb +27 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/audio.rb +73 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/error_handler.rb +47 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/files.rb +146 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format/completion_format.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/images.rb +133 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/models.rb +60 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/completion.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/embedding.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/file.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/files.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/image.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/models.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/web_search.rb +22 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini.rb +173 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/llamacpp.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format/completion_format.rb +77 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/models.rb +56 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/completion.rb +28 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama.rb +116 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/audio.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/error_handler.rb +46 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/files.rb +134 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/completion_format.rb +90 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/respond_format.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/images.rb +109 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/models.rb +55 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/moderations.rb +65 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/audio.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/completion.rb +40 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/enumerable.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/file.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/image.rb +16 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/moderations.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb +48 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses.rb +99 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/vector_stores.rb +228 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai.rb +206 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/zai.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/response.rb +67 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/array.rb +26 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/boolean.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/integer.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/leaf.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/null.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/number.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/object.rb +41 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/string.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/version.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema.rb +81 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/server_tool.rb +32 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool/param.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/utils.rb +19 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/version.rb +5 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm.rb +121 -0
- data/lib/llm/shell/internal/optparse/lib/optionparser.rb +2 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/ac.rb +70 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/date.rb +18 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/kwargs.rb +27 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/shellwords.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/time.rb +11 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/uri.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/version.rb +80 -0
- data/lib/llm/shell/internal/optparse/lib/optparse.rb +2469 -0
- data/lib/llm/shell/internal/paint/lib/paint/constants.rb +104 -0
- data/lib/llm/shell/internal/paint/lib/paint/pa.rb +13 -0
- data/lib/llm/shell/internal/paint/lib/paint/rgb_colors.rb +14 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts.rb +100 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts_version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint/util.rb +16 -0
- data/lib/llm/shell/internal/paint/lib/paint/version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint.rb +261 -0
- data/lib/llm/shell/internal/reline/lib/reline/config.rb +378 -0
- data/lib/llm/shell/internal/reline/lib/reline/face.rb +199 -0
- data/lib/llm/shell/internal/reline/lib/reline/history.rb +76 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/ansi.rb +322 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/dumb.rb +120 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/windows.rb +530 -0
- data/lib/llm/shell/internal/reline/lib/reline/io.rb +55 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/base.rb +37 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/composite.rb +17 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/emacs.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_command.rb +518 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_insert.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor.rb +8 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_stroke.rb +119 -0
- data/lib/llm/shell/internal/reline/lib/reline/kill_ring.rb +125 -0
- data/lib/llm/shell/internal/reline/lib/reline/line_editor.rb +2356 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode/east_asian_width.rb +1292 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode.rb +421 -0
- data/lib/llm/shell/internal/reline/lib/reline/version.rb +3 -0
- data/lib/llm/shell/internal/reline/lib/reline.rb +527 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/generated_parser.rb +712 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/handler.rb +268 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date.rb +35 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date_time.rb +42 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_time.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/parser.rb +21 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/scanner.rb +92 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/string_utils.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/version.rb +5 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb.rb +49 -0
- data/lib/llm/shell/options.rb +1 -1
- data/lib/llm/shell/renderer.rb +2 -3
- data/lib/llm/shell/repl.rb +21 -16
- data/lib/llm/shell/tool.rb +42 -0
- data/lib/llm/shell/tools/read_file.rb +15 -0
- data/lib/llm/shell/tools/system.rb +17 -0
- data/lib/llm/shell/tools/write_file.rb +16 -0
- data/lib/llm/shell/version.rb +1 -1
- data/lib/llm/shell.rb +83 -39
- data/libexec/llm-shell/shell +4 -6
- data/llm-shell.gemspec +0 -4
- metadata +233 -63
- data/lib/llm/function.rb +0 -17
- data/lib/llm/shell/command/extension.rb +0 -42
- data/lib/llm/shell/commands/utils.rb +0 -21
- data/lib/llm/shell/functions/read_file.rb +0 -22
- data/lib/llm/shell/functions/write_file.rb +0 -22
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM
|
|
4
|
+
##
|
|
5
|
+
# The Ollama class implements a provider for [Ollama](https://ollama.ai/) –
|
|
6
|
+
# and the provider supports a wide range of models. It is straight forward
|
|
7
|
+
# to run on your own hardware, and there are a number of multi-modal models
|
|
8
|
+
# that can process both images and text.
|
|
9
|
+
#
|
|
10
|
+
# @example
|
|
11
|
+
# #!/usr/bin/env ruby
|
|
12
|
+
# require "llm"
|
|
13
|
+
#
|
|
14
|
+
# llm = LLM.ollama(key: nil)
|
|
15
|
+
# bot = LLM::Bot.new(llm, model: "llava")
|
|
16
|
+
# bot.chat ["Tell me about this image", File.open("/images/parrot.png", "rb")]
|
|
17
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
|
18
|
+
class Ollama < Provider
|
|
19
|
+
require_relative "ollama/response/embedding"
|
|
20
|
+
require_relative "ollama/response/completion"
|
|
21
|
+
require_relative "ollama/error_handler"
|
|
22
|
+
require_relative "ollama/format"
|
|
23
|
+
require_relative "ollama/stream_parser"
|
|
24
|
+
require_relative "ollama/models"
|
|
25
|
+
|
|
26
|
+
include Format
|
|
27
|
+
|
|
28
|
+
HOST = "localhost"
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# @param key (see LLM::Provider#initialize)
|
|
32
|
+
def initialize(**)
|
|
33
|
+
super(host: HOST, port: 11434, ssl: false, **)
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
##
|
|
37
|
+
# Provides an embedding
|
|
38
|
+
# @param input (see LLM::Provider#embed)
|
|
39
|
+
# @param model (see LLM::Provider#embed)
|
|
40
|
+
# @param params (see LLM::Provider#embed)
|
|
41
|
+
# @raise (see LLM::Provider#request)
|
|
42
|
+
# @return [LLM::Response]
|
|
43
|
+
def embed(input, model: default_model, **params)
|
|
44
|
+
params = {model:}.merge!(params)
|
|
45
|
+
req = Net::HTTP::Post.new("/v1/embeddings", headers)
|
|
46
|
+
req.body = JSON.dump({input:}.merge!(params))
|
|
47
|
+
res = execute(request: req)
|
|
48
|
+
LLM::Response.new(res).extend(LLM::Ollama::Response::Embedding)
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
##
|
|
52
|
+
# Provides an interface to the chat completions API
|
|
53
|
+
# @see https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion Ollama docs
|
|
54
|
+
# @param prompt (see LLM::Provider#complete)
|
|
55
|
+
# @param params (see LLM::Provider#complete)
|
|
56
|
+
# @example (see LLM::Provider#complete)
|
|
57
|
+
# @raise (see LLM::Provider#request)
|
|
58
|
+
# @raise [LLM::PromptError]
|
|
59
|
+
# When given an object a provider does not understand
|
|
60
|
+
# @return [LLM::Response]
|
|
61
|
+
def complete(prompt, params = {})
|
|
62
|
+
params = {role: :user, model: default_model, stream: true}.merge!(params)
|
|
63
|
+
tools = resolve_tools(params.delete(:tools))
|
|
64
|
+
params = [params, {format: params[:schema]}, format_tools(tools)].inject({}, &:merge!).compact
|
|
65
|
+
role, stream = params.delete(:role), params.delete(:stream)
|
|
66
|
+
params[:stream] = true if stream.respond_to?(:<<) || stream == true
|
|
67
|
+
req = Net::HTTP::Post.new("/api/chat", headers)
|
|
68
|
+
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
|
69
|
+
body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
|
|
70
|
+
set_body_stream(req, StringIO.new(body))
|
|
71
|
+
res = execute(request: req, stream:)
|
|
72
|
+
LLM::Response.new(res)
|
|
73
|
+
.extend(LLM::Ollama::Response::Completion)
|
|
74
|
+
.extend(Module.new { define_method(:__tools__) { tools } })
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
##
|
|
78
|
+
# Provides an interface to Ollama's models API
|
|
79
|
+
# @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
|
|
80
|
+
# @return [LLM::Ollama::Models]
|
|
81
|
+
def models
|
|
82
|
+
LLM::Ollama::Models.new(self)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
##
|
|
86
|
+
# @return (see LLM::Provider#assistant_role)
|
|
87
|
+
def assistant_role
|
|
88
|
+
"assistant"
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
##
|
|
92
|
+
# Returns the default model for chat completions
|
|
93
|
+
# @see https://ollama.com/library/qwen3 qwen3
|
|
94
|
+
# @return [String]
|
|
95
|
+
def default_model
|
|
96
|
+
"qwen3:latest"
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
private
|
|
100
|
+
|
|
101
|
+
def headers
|
|
102
|
+
(@headers || {}).merge(
|
|
103
|
+
"Content-Type" => "application/json",
|
|
104
|
+
"Authorization" => "Bearer #{@key}"
|
|
105
|
+
)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def stream_parser
|
|
109
|
+
LLM::Ollama::StreamParser
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def error_handler
|
|
113
|
+
LLM::Ollama::ErrorHandler
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
end
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Audio LLM::OpenAI::Audio} class provides an audio
|
|
6
|
+
# object for interacting with [OpenAI's audio API](https://platform.openai.com/docs/api-reference/audio/createSpeech).
|
|
7
|
+
# @example
|
|
8
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
9
|
+
# res = llm.audio.create_speech(input: "A dog on a rocket to the moon")
|
|
10
|
+
# IO.copy_stream res.audio, "rocket.mp3"
|
|
11
|
+
class Audio
|
|
12
|
+
##
|
|
13
|
+
# Returns a new Audio object
|
|
14
|
+
# @param provider [LLM::Provider]
|
|
15
|
+
# @return [LLM::OpenAI::Responses]
|
|
16
|
+
def initialize(provider)
|
|
17
|
+
@provider = provider
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# Create an audio track
|
|
22
|
+
# @example
|
|
23
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
24
|
+
# res = llm.images.create_speech(input: "A dog on a rocket to the moon")
|
|
25
|
+
# File.binwrite("rocket.mp3", res.audio.string)
|
|
26
|
+
# @see https://platform.openai.com/docs/api-reference/audio/createSpeech OpenAI docs
|
|
27
|
+
# @param [String] input The text input
|
|
28
|
+
# @param [String] voice The voice to use
|
|
29
|
+
# @param [String] model The model to use
|
|
30
|
+
# @param [String] response_format The response format
|
|
31
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
32
|
+
# @raise (see LLM::Provider#request)
|
|
33
|
+
# @return [LLM::Response]
|
|
34
|
+
def create_speech(input:, voice: "alloy", model: "gpt-4o-mini-tts", response_format: "mp3", **params)
|
|
35
|
+
req = Net::HTTP::Post.new("/v1/audio/speech", headers)
|
|
36
|
+
req.body = JSON.dump({input:, voice:, model:, response_format:}.merge!(params))
|
|
37
|
+
io = StringIO.new("".b)
|
|
38
|
+
res = execute(request: req) { _1.read_body { |chunk| io << chunk } }
|
|
39
|
+
LLM::Response.new(res).tap { _1.define_singleton_method(:audio) { io } }
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
##
|
|
43
|
+
# Create an audio transcription
|
|
44
|
+
# @example
|
|
45
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
46
|
+
# res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
|
|
47
|
+
# res.text # => "A dog on a rocket to the moon"
|
|
48
|
+
# @see https://platform.openai.com/docs/api-reference/audio/createTranscription OpenAI docs
|
|
49
|
+
# @param [String, LLM::File] file The input audio
|
|
50
|
+
# @param [String] model The model to use
|
|
51
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
52
|
+
# @raise (see LLM::Provider#request)
|
|
53
|
+
# @return [LLM::Response]
|
|
54
|
+
def create_transcription(file:, model: "whisper-1", **params)
|
|
55
|
+
multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), model:))
|
|
56
|
+
req = Net::HTTP::Post.new("/v1/audio/transcriptions", headers)
|
|
57
|
+
req["content-type"] = multi.content_type
|
|
58
|
+
set_body_stream(req, multi.body)
|
|
59
|
+
res = execute(request: req)
|
|
60
|
+
LLM::Response.new(res)
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
##
|
|
64
|
+
# Create an audio translation (in English)
|
|
65
|
+
# @example
|
|
66
|
+
# # Arabic => English
|
|
67
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
68
|
+
# res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
|
|
69
|
+
# res.text # => "In the name of Allah, the Beneficent, the Merciful."
|
|
70
|
+
# @see https://platform.openai.com/docs/api-reference/audio/createTranslation OpenAI docs
|
|
71
|
+
# @param [LLM::File] file The input audio
|
|
72
|
+
# @param [String] model The model to use
|
|
73
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
74
|
+
# @raise (see LLM::Provider#request)
|
|
75
|
+
# @return [LLM::Response]
|
|
76
|
+
def create_translation(file:, model: "whisper-1", **params)
|
|
77
|
+
multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), model:))
|
|
78
|
+
req = Net::HTTP::Post.new("/v1/audio/translations", headers)
|
|
79
|
+
req["content-type"] = multi.content_type
|
|
80
|
+
set_body_stream(req, multi.body)
|
|
81
|
+
res = execute(request: req)
|
|
82
|
+
LLM::Response.new(res)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
private
|
|
86
|
+
|
|
87
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
|
88
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class ErrorHandler
|
|
7
|
+
##
|
|
8
|
+
# @return [Net::HTTPResponse]
|
|
9
|
+
# Non-2XX response from the server
|
|
10
|
+
attr_reader :res
|
|
11
|
+
|
|
12
|
+
##
|
|
13
|
+
# @param [Net::HTTPResponse] res
|
|
14
|
+
# The response from the server
|
|
15
|
+
# @return [LLM::OpenAI::ErrorHandler]
|
|
16
|
+
def initialize(res)
|
|
17
|
+
@res = res
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# @raise [LLM::Error]
|
|
22
|
+
# Raises a subclass of {LLM::Error LLM::Error}
|
|
23
|
+
def raise_error!
|
|
24
|
+
case res
|
|
25
|
+
when Net::HTTPServerError
|
|
26
|
+
raise LLM::ServerError.new { _1.response = res }, "Server error"
|
|
27
|
+
when Net::HTTPUnauthorized
|
|
28
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
|
29
|
+
when Net::HTTPTooManyRequests
|
|
30
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
|
31
|
+
else
|
|
32
|
+
error = body["error"] || {}
|
|
33
|
+
case error["type"]
|
|
34
|
+
when "server_error" then raise LLM::ServerError.new { _1.response = res }, error["message"]
|
|
35
|
+
else raise LLM::ResponseError.new { _1.response = res }, error["message"] || "Unexpected response"
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
private
|
|
41
|
+
|
|
42
|
+
def body
|
|
43
|
+
@body ||= JSON.parse(res.body)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Files LLM::OpenAI::Files} class provides a files
|
|
6
|
+
# object for interacting with [OpenAI's Files API](https://platform.openai.com/docs/api-reference/files/create).
|
|
7
|
+
# The files API allows a client to upload files for use with OpenAI's models
|
|
8
|
+
# and API endpoints. OpenAI supports multiple file formats, including text
|
|
9
|
+
# files, CSV files, JSON files, and more.
|
|
10
|
+
#
|
|
11
|
+
# @example example #1
|
|
12
|
+
# #!/usr/bin/env ruby
|
|
13
|
+
# require "llm"
|
|
14
|
+
#
|
|
15
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
16
|
+
# bot = LLM::Bot.new(llm)
|
|
17
|
+
# file = llm.files.create file: "/books/goodread.pdf"
|
|
18
|
+
# bot.chat ["Tell me about this PDF", file]
|
|
19
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
|
20
|
+
class Files
|
|
21
|
+
require_relative "response/enumerable"
|
|
22
|
+
require_relative "response/file"
|
|
23
|
+
|
|
24
|
+
##
|
|
25
|
+
# Returns a new Files object
|
|
26
|
+
# @param provider [LLM::Provider]
|
|
27
|
+
# @return [LLM::OpenAI::Files]
|
|
28
|
+
def initialize(provider)
|
|
29
|
+
@provider = provider
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
##
|
|
33
|
+
# List all files
|
|
34
|
+
# @example
|
|
35
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
36
|
+
# res = llm.files.all
|
|
37
|
+
# res.each do |file|
|
|
38
|
+
# print "id: ", file.id, "\n"
|
|
39
|
+
# end
|
|
40
|
+
# @see https://platform.openai.com/docs/api-reference/files/list OpenAI docs
|
|
41
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
42
|
+
# @raise (see LLM::Provider#request)
|
|
43
|
+
# @return [LLM::Response]
|
|
44
|
+
def all(**params)
|
|
45
|
+
query = URI.encode_www_form(params)
|
|
46
|
+
req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
|
|
47
|
+
res = execute(request: req)
|
|
48
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Enumerable)
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
##
|
|
52
|
+
# Create a file
|
|
53
|
+
# @example
|
|
54
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
55
|
+
# res = llm.files.create file: "/documents/haiku.txt"
|
|
56
|
+
# @see https://platform.openai.com/docs/api-reference/files/create OpenAI docs
|
|
57
|
+
# @param [File, LLM::File, String] file The file
|
|
58
|
+
# @param [String] purpose The purpose of the file (see OpenAI docs)
|
|
59
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
60
|
+
# @raise (see LLM::Provider#request)
|
|
61
|
+
# @return [LLM::Response]
|
|
62
|
+
def create(file:, purpose: "assistants", **params)
|
|
63
|
+
multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), purpose:))
|
|
64
|
+
req = Net::HTTP::Post.new("/v1/files", headers)
|
|
65
|
+
req["content-type"] = multi.content_type
|
|
66
|
+
set_body_stream(req, multi.body)
|
|
67
|
+
res = execute(request: req)
|
|
68
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::File)
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
##
|
|
72
|
+
# Get a file
|
|
73
|
+
# @example
|
|
74
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
75
|
+
# res = llm.files.get(file: "file-1234567890")
|
|
76
|
+
# print "id: ", res.id, "\n"
|
|
77
|
+
# @see https://platform.openai.com/docs/api-reference/files/get OpenAI docs
|
|
78
|
+
# @param [#id, #to_s] file The file ID
|
|
79
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
80
|
+
# @raise (see LLM::Provider#request)
|
|
81
|
+
# @return [LLM::Response]
|
|
82
|
+
def get(file:, **params)
|
|
83
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
|
84
|
+
query = URI.encode_www_form(params)
|
|
85
|
+
req = Net::HTTP::Get.new("/v1/files/#{file_id}?#{query}", headers)
|
|
86
|
+
res = execute(request: req)
|
|
87
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::File)
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
##
|
|
91
|
+
# Download the content of a file
|
|
92
|
+
# @example
|
|
93
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
94
|
+
# res = llm.files.download(file: "file-1234567890")
|
|
95
|
+
# File.binwrite "haiku1.txt", res.file.read
|
|
96
|
+
# print res.file.read, "\n"
|
|
97
|
+
# @see https://platform.openai.com/docs/api-reference/files/content OpenAI docs
|
|
98
|
+
# @param [#id, #to_s] file The file ID
|
|
99
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
100
|
+
# @raise (see LLM::Provider#request)
|
|
101
|
+
# @return [LLM::Response]
|
|
102
|
+
def download(file:, **params)
|
|
103
|
+
query = URI.encode_www_form(params)
|
|
104
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
|
105
|
+
req = Net::HTTP::Get.new("/v1/files/#{file_id}/content?#{query}", headers)
|
|
106
|
+
io = StringIO.new("".b)
|
|
107
|
+
res = execute(request: req) { |res| res.read_body { |chunk| io << chunk } }
|
|
108
|
+
LLM::Response.new(res).tap { _1.define_singleton_method(:file) { io } }
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
##
|
|
112
|
+
# Delete a file
|
|
113
|
+
# @example
|
|
114
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
115
|
+
# res = llm.files.delete(file: "file-1234567890")
|
|
116
|
+
# print res.deleted, "\n"
|
|
117
|
+
# @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
|
|
118
|
+
# @param [#id, #to_s] file The file ID
|
|
119
|
+
# @raise (see LLM::Provider#request)
|
|
120
|
+
# @return [LLM::Response]
|
|
121
|
+
def delete(file:)
|
|
122
|
+
file_id = file.respond_to?(:id) ? file.id : file
|
|
123
|
+
req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
|
|
124
|
+
res = execute(request: req)
|
|
125
|
+
LLM::Response.new(res)
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
private
|
|
129
|
+
|
|
130
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
|
131
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
132
|
+
end
|
|
133
|
+
end
|
|
134
|
+
end
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Format
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class CompletionFormat
|
|
7
|
+
##
|
|
8
|
+
# @param [LLM::Message, Hash] message
|
|
9
|
+
# The message to format
|
|
10
|
+
def initialize(message)
|
|
11
|
+
@message = message
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
##
|
|
15
|
+
# Formats the message for the OpenAI chat completions API
|
|
16
|
+
# @return [Hash]
|
|
17
|
+
def format
|
|
18
|
+
catch(:abort) do
|
|
19
|
+
if Hash === message
|
|
20
|
+
{role: message[:role], content: format_content(message[:content])}
|
|
21
|
+
elsif message.tool_call?
|
|
22
|
+
{role: message.role, content: nil, tool_calls: message.extra[:original_tool_calls]}
|
|
23
|
+
else
|
|
24
|
+
format_message
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
private
|
|
30
|
+
|
|
31
|
+
def format_content(content)
|
|
32
|
+
case content
|
|
33
|
+
when URI
|
|
34
|
+
[{type: :image_url, image_url: {url: content.to_s}}]
|
|
35
|
+
when File
|
|
36
|
+
content.close unless content.closed?
|
|
37
|
+
format_content(LLM.File(content.path))
|
|
38
|
+
when LLM::File
|
|
39
|
+
format_file(content)
|
|
40
|
+
when LLM::Response
|
|
41
|
+
content.file? ? [{type: :file, file: {file_id: content.id}}] : prompt_error!(content)
|
|
42
|
+
when String
|
|
43
|
+
[{type: :text, text: content.to_s}]
|
|
44
|
+
when LLM::Message
|
|
45
|
+
format_content(content.content)
|
|
46
|
+
when LLM::Function::Return
|
|
47
|
+
throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
|
|
48
|
+
else
|
|
49
|
+
prompt_error!(content)
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def format_file(content)
|
|
54
|
+
file = content
|
|
55
|
+
if file.image?
|
|
56
|
+
[{type: :image_url, image_url: {url: file.to_data_uri}}]
|
|
57
|
+
else
|
|
58
|
+
[{type: :file, file: {filename: file.basename, file_data: file.to_data_uri}}]
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def format_message
|
|
63
|
+
case content
|
|
64
|
+
when Array
|
|
65
|
+
format_array
|
|
66
|
+
else
|
|
67
|
+
{role: message.role, content: format_content(content)}
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def format_array
|
|
72
|
+
if content.empty?
|
|
73
|
+
nil
|
|
74
|
+
elsif returns.any?
|
|
75
|
+
returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
|
|
76
|
+
else
|
|
77
|
+
{role: message.role, content: content.flat_map { format_content(_1) }}
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def prompt_error!(content)
|
|
82
|
+
raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
|
|
83
|
+
"is not supported by the OpenAI chat completions API"
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def message = @message
|
|
87
|
+
def content = message.content
|
|
88
|
+
def returns = content.grep(LLM::Function::Return)
|
|
89
|
+
end
|
|
90
|
+
end
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Format
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class ModerationFormat
|
|
7
|
+
##
|
|
8
|
+
# @param [String, URI, Array<String, URI>] inputs
|
|
9
|
+
# The inputs to format
|
|
10
|
+
# @return [LLM::OpenAI::Format::ModerationFormat]
|
|
11
|
+
def initialize(inputs)
|
|
12
|
+
@inputs = inputs
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
##
|
|
16
|
+
# Formats the inputs for the OpenAI moderations API
|
|
17
|
+
# @return [Array<Hash>]
|
|
18
|
+
def format
|
|
19
|
+
[*inputs].flat_map do |input|
|
|
20
|
+
if String === input
|
|
21
|
+
{type: :text, text: input}
|
|
22
|
+
elsif URI === input
|
|
23
|
+
{type: :image_url, url: input.to_s}
|
|
24
|
+
else
|
|
25
|
+
raise LLM::FormatError, "The given object (an instance of #{input.class}) " \
|
|
26
|
+
"is not supported by OpenAI moderations API"
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
private
|
|
32
|
+
|
|
33
|
+
attr_reader :inputs
|
|
34
|
+
end
|
|
35
|
+
end
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Format
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class RespondFormat
|
|
7
|
+
def initialize(message)
|
|
8
|
+
@message = message
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def format
|
|
12
|
+
catch(:abort) do
|
|
13
|
+
if Hash === message
|
|
14
|
+
{role: message[:role], content: format_content(message[:content])}
|
|
15
|
+
else
|
|
16
|
+
format_message
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
private
|
|
22
|
+
|
|
23
|
+
def format_content(content)
|
|
24
|
+
case content
|
|
25
|
+
when LLM::Response
|
|
26
|
+
content.file? ? format_file(content) : prompt_error!(content)
|
|
27
|
+
when String
|
|
28
|
+
[{type: :input_text, text: content.to_s}]
|
|
29
|
+
when LLM::Message
|
|
30
|
+
format_content(content.content)
|
|
31
|
+
else
|
|
32
|
+
prompt_error!(content)
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def format_message
|
|
37
|
+
case content
|
|
38
|
+
when Array
|
|
39
|
+
format_array
|
|
40
|
+
else
|
|
41
|
+
{role: message.role, content: format_content(content)}
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def format_array
|
|
46
|
+
if content.empty?
|
|
47
|
+
nil
|
|
48
|
+
elsif returns.any?
|
|
49
|
+
returns.map { {type: "function_call_output", call_id: _1.id, output: JSON.dump(_1.value)} }
|
|
50
|
+
else
|
|
51
|
+
{role: message.role, content: content.flat_map { format_content(_1) }}
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
def format_file(content)
|
|
56
|
+
file = LLM::File(content.filename)
|
|
57
|
+
if file.image?
|
|
58
|
+
[{type: :input_image, file_id: content.id}]
|
|
59
|
+
else
|
|
60
|
+
[{type: :input_file, file_id: content.id}]
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def prompt_error!(content)
|
|
65
|
+
raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
|
|
66
|
+
"is not supported by the OpenAI responses API"
|
|
67
|
+
end
|
|
68
|
+
def message = @message
|
|
69
|
+
def content = message.content
|
|
70
|
+
def returns = content.grep(LLM::Function::Return)
|
|
71
|
+
end
|
|
72
|
+
end
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
module Format
|
|
7
|
+
require_relative "format/completion_format"
|
|
8
|
+
require_relative "format/respond_format"
|
|
9
|
+
require_relative "format/moderation_format"
|
|
10
|
+
|
|
11
|
+
##
|
|
12
|
+
# @param [Array<LLM::Message>] messages
|
|
13
|
+
# The messages to format
|
|
14
|
+
# @param [Symbol] mode
|
|
15
|
+
# The mode to format the messages for
|
|
16
|
+
# @return [Array<Hash>]
|
|
17
|
+
def format(messages, mode)
|
|
18
|
+
messages.filter_map do |message|
|
|
19
|
+
if mode == :complete
|
|
20
|
+
CompletionFormat.new(message).format
|
|
21
|
+
else
|
|
22
|
+
RespondFormat.new(message).format
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
private
|
|
28
|
+
|
|
29
|
+
##
|
|
30
|
+
# @param [Hash] params
|
|
31
|
+
# @return [Hash]
|
|
32
|
+
def format_schema(params)
|
|
33
|
+
return {} unless params and params[:schema]
|
|
34
|
+
schema = params.delete(:schema)
|
|
35
|
+
{
|
|
36
|
+
response_format: {
|
|
37
|
+
type: "json_schema",
|
|
38
|
+
json_schema: {name: "JSONSchema", schema:}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
##
|
|
44
|
+
# @param [Hash] params
|
|
45
|
+
# @return [Hash]
|
|
46
|
+
def format_tools(tools)
|
|
47
|
+
if tools.nil? || tools.empty?
|
|
48
|
+
{}
|
|
49
|
+
else
|
|
50
|
+
{tools: tools.map { _1.respond_to?(:format) ? _1.format(self) : _1 }}
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
end
|