llm-shell 0.9.2 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +61 -66
- data/lib/llm/shell/command.rb +40 -40
- data/lib/llm/shell/commands/clear_screen.rb +4 -18
- data/lib/llm/shell/commands/debug_mode.rb +12 -0
- data/lib/llm/shell/commands/dir_import.rb +4 -20
- data/lib/llm/shell/commands/disable_tool.rb +33 -0
- data/lib/llm/shell/commands/enable_tool.rb +33 -0
- data/lib/llm/shell/commands/file_import.rb +4 -20
- data/lib/llm/shell/commands/help.rb +23 -36
- data/lib/llm/shell/commands/show_chat.rb +4 -19
- data/lib/llm/shell/commands/show_version.rb +4 -20
- data/lib/llm/shell/commands/system_prompt.rb +4 -18
- data/lib/llm/shell/completion.rb +5 -5
- data/lib/llm/shell/config.rb +4 -5
- data/lib/llm/shell/formatter.rb +1 -2
- data/lib/llm/shell/internal/coderay/lib/coderay/duo.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/_map.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/comment_filter.rb +25 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/count.rb +39 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug.rb +49 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug_lint.rb +63 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/div.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/encoder.rb +190 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/filter.rb +58 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/css.rb +65 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/numbering.rb +108 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/output.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html.rb +333 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/json.rb +83 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lines_of_code.rb +45 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lint.rb +59 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/null.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/page.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/span.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/statistic.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/terminal.rb +195 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/text.rb +46 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/token_kind_filter.rb +111 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/xml.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/yaml.rb +50 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/for_redcloth.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/file_type.rb +151 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin_host.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/word_list.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/_map.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/c.rb +189 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/clojure.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/cpp.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/css.rb +196 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/debug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/delphi.rb +144 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/diff.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/erb.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/go.rb +208 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/groovy.rb +268 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/haml.rb +168 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/html.rb +275 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java/builtin_types.rb +421 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java.rb +174 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java_script.rb +236 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/json.rb +98 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/lua.rb +280 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/php.rb +527 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/python.rb +287 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/raydebug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/patterns.rb +178 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/string_state.rb +79 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby.rb +477 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sass.rb +232 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/scanner.rb +337 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sql.rb +169 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/taskpaper.rb +36 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/text.rb +26 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/xml.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/yaml.rb +140 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners.rb +27 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/_map.rb +7 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/alpha.rb +153 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/style.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles.rb +15 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/token_kinds.rb +85 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens_proxy.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/version.rb +3 -0
- data/lib/llm/shell/internal/coderay/lib/coderay.rb +284 -0
- data/lib/llm/shell/internal/io-line/lib/io/line/multiple.rb +19 -0
- data/lib/{io → llm/shell/internal/io-line/lib/io}/line.rb +2 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/builder.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/conversable.rb +37 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/completion.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/respond.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot.rb +150 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/buffer.rb +162 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/client.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/error.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventhandler.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/file.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/function.rb +177 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/message.rb +178 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/mime.rb +140 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/multipart.rb +101 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/builder.rb +38 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/kernel.rb +53 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object.rb +89 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/provider.rb +352 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/files.rb +155 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format/completion_format.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/models.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/completion.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/file.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic.rb +138 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format.rb +27 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/audio.rb +73 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/error_handler.rb +47 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/files.rb +146 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format/completion_format.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/images.rb +133 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/models.rb +60 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/completion.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/embedding.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/file.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/files.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/image.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/models.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/web_search.rb +22 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini.rb +173 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/llamacpp.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format/completion_format.rb +77 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/models.rb +56 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/completion.rb +28 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama.rb +116 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/audio.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/error_handler.rb +46 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/files.rb +134 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/completion_format.rb +90 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/respond_format.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/images.rb +109 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/models.rb +55 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/moderations.rb +65 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/audio.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/completion.rb +40 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/enumerable.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/file.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/image.rb +16 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/moderations.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb +48 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses.rb +99 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/vector_stores.rb +228 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai.rb +206 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/zai.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/response.rb +67 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/array.rb +26 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/boolean.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/integer.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/leaf.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/null.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/number.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/object.rb +41 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/string.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/version.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema.rb +81 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/server_tool.rb +32 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool/param.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/utils.rb +19 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/version.rb +5 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm.rb +121 -0
- data/lib/llm/shell/internal/optparse/lib/optionparser.rb +2 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/ac.rb +70 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/date.rb +18 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/kwargs.rb +27 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/shellwords.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/time.rb +11 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/uri.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/version.rb +80 -0
- data/lib/llm/shell/internal/optparse/lib/optparse.rb +2469 -0
- data/lib/llm/shell/internal/paint/lib/paint/constants.rb +104 -0
- data/lib/llm/shell/internal/paint/lib/paint/pa.rb +13 -0
- data/lib/llm/shell/internal/paint/lib/paint/rgb_colors.rb +14 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts.rb +100 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts_version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint/util.rb +16 -0
- data/lib/llm/shell/internal/paint/lib/paint/version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint.rb +261 -0
- data/lib/llm/shell/internal/reline/lib/reline/config.rb +378 -0
- data/lib/llm/shell/internal/reline/lib/reline/face.rb +199 -0
- data/lib/llm/shell/internal/reline/lib/reline/history.rb +76 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/ansi.rb +322 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/dumb.rb +120 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/windows.rb +530 -0
- data/lib/llm/shell/internal/reline/lib/reline/io.rb +55 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/base.rb +37 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/composite.rb +17 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/emacs.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_command.rb +518 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_insert.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor.rb +8 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_stroke.rb +119 -0
- data/lib/llm/shell/internal/reline/lib/reline/kill_ring.rb +125 -0
- data/lib/llm/shell/internal/reline/lib/reline/line_editor.rb +2356 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode/east_asian_width.rb +1292 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode.rb +421 -0
- data/lib/llm/shell/internal/reline/lib/reline/version.rb +3 -0
- data/lib/llm/shell/internal/reline/lib/reline.rb +527 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/generated_parser.rb +712 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/handler.rb +268 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date.rb +35 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date_time.rb +42 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_time.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/parser.rb +21 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/scanner.rb +92 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/string_utils.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/version.rb +5 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb.rb +49 -0
- data/lib/llm/shell/options.rb +1 -1
- data/lib/llm/shell/renderer.rb +2 -3
- data/lib/llm/shell/repl.rb +21 -16
- data/lib/llm/shell/tool.rb +42 -0
- data/lib/llm/shell/tools/read_file.rb +15 -0
- data/lib/llm/shell/tools/system.rb +17 -0
- data/lib/llm/shell/tools/write_file.rb +16 -0
- data/lib/llm/shell/version.rb +1 -1
- data/lib/llm/shell.rb +83 -39
- data/libexec/llm-shell/shell +4 -6
- data/llm-shell.gemspec +0 -4
- metadata +233 -63
- data/lib/llm/function.rb +0 -17
- data/lib/llm/shell/command/extension.rb +0 -42
- data/lib/llm/shell/commands/utils.rb +0 -21
- data/lib/llm/shell/functions/read_file.rb +0 -22
- data/lib/llm/shell/functions/write_file.rb +0 -22
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Gemini::Response
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Gemini::Response::WebSearch LLM::Gemini::Response::WebSearch}
|
|
6
|
+
# module provides methods for accessing web search results from a web search
|
|
7
|
+
# tool call made via the {LLM::Provider#web_search LLM::Provider#web_search}
|
|
8
|
+
# method.
|
|
9
|
+
module WebSearch
|
|
10
|
+
##
|
|
11
|
+
# Returns one or more search results
|
|
12
|
+
# @return [Array<LLM::Object>]
|
|
13
|
+
def search_results
|
|
14
|
+
LLM::Object.from_hash(
|
|
15
|
+
candidates[0]
|
|
16
|
+
.groundingMetadata
|
|
17
|
+
.groundingChunks
|
|
18
|
+
.map { {"url" => _1.web.uri, "title" => _1.web.title} }
|
|
19
|
+
)
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
end
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class StreamParser
|
|
7
|
+
##
|
|
8
|
+
# Returns the fully constructed response body
|
|
9
|
+
# @return [LLM::Object]
|
|
10
|
+
attr_reader :body
|
|
11
|
+
|
|
12
|
+
##
|
|
13
|
+
# @param [#<<] io An IO-like object
|
|
14
|
+
# @return [LLM::Gemini::StreamParser]
|
|
15
|
+
def initialize(io)
|
|
16
|
+
@body = LLM::Object.from_hash({candidates: []})
|
|
17
|
+
@io = io
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# @param [Hash] chunk
|
|
22
|
+
# @return [LLM::Gemini::StreamParser]
|
|
23
|
+
def parse!(chunk)
|
|
24
|
+
tap { merge_chunk!(LLM::Object.from_hash(chunk)) }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
private
|
|
28
|
+
|
|
29
|
+
def merge_chunk!(chunk)
|
|
30
|
+
chunk.each do |key, value|
|
|
31
|
+
if key.to_s == "candidates"
|
|
32
|
+
merge_candidates!(value)
|
|
33
|
+
elsif key.to_s == "usageMetadata" &&
|
|
34
|
+
@body.usageMetadata.is_a?(LLM::Object) &&
|
|
35
|
+
value.is_a?(LLM::Object)
|
|
36
|
+
@body.usageMetadata = LLM::Object.from_hash(@body.usageMetadata.to_h.merge(value.to_h))
|
|
37
|
+
else
|
|
38
|
+
@body[key] = value
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def merge_candidates!(new_candidates_list)
|
|
44
|
+
new_candidates_list.each do |new_candidate_delta|
|
|
45
|
+
index = new_candidate_delta.index
|
|
46
|
+
@body.candidates[index] ||= LLM::Object.from_hash({content: {parts: []}})
|
|
47
|
+
existing_candidate = @body.candidates[index]
|
|
48
|
+
new_candidate_delta.each do |key, value|
|
|
49
|
+
if key.to_s == "content"
|
|
50
|
+
merge_candidate_content!(existing_candidate.content, value) if value
|
|
51
|
+
else
|
|
52
|
+
existing_candidate[key] = value # Overwrite other fields
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def merge_candidate_content!(existing_content, new_content_delta)
|
|
59
|
+
new_content_delta.each do |key, value|
|
|
60
|
+
if key.to_s == "parts"
|
|
61
|
+
existing_content.parts ||= []
|
|
62
|
+
merge_content_parts!(existing_content.parts, value) if value
|
|
63
|
+
else
|
|
64
|
+
existing_content[key] = value
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def merge_content_parts!(existing_parts, new_parts_delta)
|
|
70
|
+
new_parts_delta.each do |new_part_delta|
|
|
71
|
+
if new_part_delta.text
|
|
72
|
+
last_existing_part = existing_parts.last
|
|
73
|
+
if last_existing_part&.text
|
|
74
|
+
last_existing_part.text << new_part_delta.text
|
|
75
|
+
@io << new_part_delta.text if @io.respond_to?(:<<)
|
|
76
|
+
else
|
|
77
|
+
existing_parts << new_part_delta
|
|
78
|
+
@io << new_part_delta.text if @io.respond_to?(:<<)
|
|
79
|
+
end
|
|
80
|
+
elsif new_part_delta.functionCall
|
|
81
|
+
existing_parts << new_part_delta
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM
|
|
4
|
+
##
|
|
5
|
+
# The Gemini class implements a provider for
|
|
6
|
+
# [Gemini](https://ai.google.dev/). The Gemini provider
|
|
7
|
+
# can accept multiple inputs (text, images, audio, and video).
|
|
8
|
+
# The inputs can be provided inline via the prompt for files
|
|
9
|
+
# under 20MB or via the Gemini Files API for files
|
|
10
|
+
# that are over 20MB.
|
|
11
|
+
#
|
|
12
|
+
# @example
|
|
13
|
+
# #!/usr/bin/env ruby
|
|
14
|
+
# require "llm"
|
|
15
|
+
#
|
|
16
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
17
|
+
# bot = LLM::Bot.new(llm)
|
|
18
|
+
# bot.chat ["Tell me about this photo", File.open("/images/horse.jpg", "rb")]
|
|
19
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
|
20
|
+
class Gemini < Provider
|
|
21
|
+
require_relative "gemini/response/embedding"
|
|
22
|
+
require_relative "gemini/response/completion"
|
|
23
|
+
require_relative "gemini/response/web_search"
|
|
24
|
+
require_relative "gemini/error_handler"
|
|
25
|
+
require_relative "gemini/format"
|
|
26
|
+
require_relative "gemini/stream_parser"
|
|
27
|
+
require_relative "gemini/models"
|
|
28
|
+
require_relative "gemini/images"
|
|
29
|
+
require_relative "gemini/files"
|
|
30
|
+
require_relative "gemini/audio"
|
|
31
|
+
|
|
32
|
+
include Format
|
|
33
|
+
|
|
34
|
+
HOST = "generativelanguage.googleapis.com"
|
|
35
|
+
|
|
36
|
+
##
|
|
37
|
+
# @param key (see LLM::Provider#initialize)
|
|
38
|
+
def initialize(**)
|
|
39
|
+
super(host: HOST, **)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
##
|
|
43
|
+
# Provides an embedding
|
|
44
|
+
# @param input (see LLM::Provider#embed)
|
|
45
|
+
# @param model (see LLM::Provider#embed)
|
|
46
|
+
# @param params (see LLM::Provider#embed)
|
|
47
|
+
# @raise (see LLM::Provider#request)
|
|
48
|
+
# @return [LLM::Response]
|
|
49
|
+
def embed(input, model: "text-embedding-004", **params)
|
|
50
|
+
model = model.respond_to?(:id) ? model.id : model
|
|
51
|
+
path = ["/v1beta/models/#{model}", "embedContent?key=#{@key}"].join(":")
|
|
52
|
+
req = Net::HTTP::Post.new(path, headers)
|
|
53
|
+
req.body = JSON.dump({content: {parts: [{text: input}]}})
|
|
54
|
+
res = execute(request: req)
|
|
55
|
+
LLM::Response.new(res).extend(LLM::Gemini::Response::Embedding)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
##
|
|
59
|
+
# Provides an interface to the chat completions API
|
|
60
|
+
# @see https://ai.google.dev/api/generate-content#v1beta.models.generateContent Gemini docs
|
|
61
|
+
# @param prompt (see LLM::Provider#complete)
|
|
62
|
+
# @param params (see LLM::Provider#complete)
|
|
63
|
+
# @example (see LLM::Provider#complete)
|
|
64
|
+
# @raise (see LLM::Provider#request)
|
|
65
|
+
# @raise [LLM::PromptError]
|
|
66
|
+
# When given an object a provider does not understand
|
|
67
|
+
# @return [LLM::Response]
|
|
68
|
+
def complete(prompt, params = {})
|
|
69
|
+
params = {role: :user, model: default_model}.merge!(params)
|
|
70
|
+
tools = resolve_tools(params.delete(:tools))
|
|
71
|
+
params = [params, format_schema(params), format_tools(tools)].inject({}, &:merge!).compact
|
|
72
|
+
role, model, stream = [:role, :model, :stream].map { params.delete(_1) }
|
|
73
|
+
action = stream ? "streamGenerateContent?key=#{@key}&alt=sse" : "generateContent?key=#{@key}"
|
|
74
|
+
model.respond_to?(:id) ? model.id : model
|
|
75
|
+
path = ["/v1beta/models/#{model}", action].join(":")
|
|
76
|
+
req = Net::HTTP::Post.new(path, headers)
|
|
77
|
+
messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
|
|
78
|
+
body = JSON.dump({contents: format(messages)}.merge!(params))
|
|
79
|
+
set_body_stream(req, StringIO.new(body))
|
|
80
|
+
res = execute(request: req, stream:)
|
|
81
|
+
LLM::Response.new(res)
|
|
82
|
+
.extend(LLM::Gemini::Response::Completion)
|
|
83
|
+
.extend(Module.new { define_method(:__tools__) { tools } })
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
##
|
|
87
|
+
# Provides an interface to Gemini's audio API
|
|
88
|
+
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
|
89
|
+
# @return [LLM::Gemini::Audio]
|
|
90
|
+
def audio
|
|
91
|
+
LLM::Gemini::Audio.new(self)
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
##
|
|
95
|
+
# Provides an interface to Gemini's image generation API
|
|
96
|
+
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
|
97
|
+
# @return [see LLM::Gemini::Images]
|
|
98
|
+
def images
|
|
99
|
+
LLM::Gemini::Images.new(self)
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
##
|
|
103
|
+
# Provides an interface to Gemini's file management API
|
|
104
|
+
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
|
105
|
+
# @return [LLM::Gemini::Files]
|
|
106
|
+
def files
|
|
107
|
+
LLM::Gemini::Files.new(self)
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
##
|
|
111
|
+
# Provides an interface to Gemini's models API
|
|
112
|
+
# @see https://ai.google.dev/gemini-api/docs/models Gemini docs
|
|
113
|
+
# @return [LLM::Gemini::Models]
|
|
114
|
+
def models
|
|
115
|
+
LLM::Gemini::Models.new(self)
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
##
|
|
119
|
+
# @return (see LLM::Provider#assistant_role)
|
|
120
|
+
def assistant_role
|
|
121
|
+
"model"
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
##
|
|
125
|
+
# Returns the default model for chat completions
|
|
126
|
+
# @see https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash gemini-2.5-flash
|
|
127
|
+
# @return [String]
|
|
128
|
+
def default_model
|
|
129
|
+
"gemini-2.5-flash"
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
##
|
|
133
|
+
# @note
|
|
134
|
+
# This method includes certain tools that require configuration
|
|
135
|
+
# through a set of options that are easier to set through the
|
|
136
|
+
# {LLM::Provider#server_tool LLM::Provider#server_tool} method.
|
|
137
|
+
# @see https://ai.google.dev/gemini-api/docs/google-search Gemini docs
|
|
138
|
+
# @return (see LLM::Provider#server_tools)
|
|
139
|
+
def server_tools
|
|
140
|
+
{
|
|
141
|
+
google_search: server_tool(:google_search),
|
|
142
|
+
code_execution: server_tool(:code_execution),
|
|
143
|
+
url_context: server_tool(:url_context)
|
|
144
|
+
}
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
##
|
|
148
|
+
# A convenience method for performing a web search using the
|
|
149
|
+
# Google Search tool.
|
|
150
|
+
# @param query [String] The search query.
|
|
151
|
+
# @return [LLM::Response] The response from the LLM provider.
|
|
152
|
+
def web_search(query:)
|
|
153
|
+
complete(query, tools: [server_tools[:google_search]])
|
|
154
|
+
.extend(LLM::Gemini::Response::WebSearch)
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
private
|
|
158
|
+
|
|
159
|
+
def headers
|
|
160
|
+
(@headers || {}).merge(
|
|
161
|
+
"Content-Type" => "application/json"
|
|
162
|
+
)
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
def stream_parser
|
|
166
|
+
LLM::Gemini::StreamParser
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
def error_handler
|
|
170
|
+
LLM::Gemini::ErrorHandler
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
end
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "openai" unless defined?(LLM::OpenAI)
|
|
4
|
+
|
|
5
|
+
module LLM
|
|
6
|
+
##
|
|
7
|
+
# The LlamaCpp class implements a provider for
|
|
8
|
+
# [llama.cpp](https://github.com/ggml-org/llama.cpp)
|
|
9
|
+
# through the OpenAI-compatible API provided by the
|
|
10
|
+
# llama-server binary. Similar to the ollama provider,
|
|
11
|
+
# this provider supports a wide range of models and
|
|
12
|
+
# is straightforward to run on your own hardware.
|
|
13
|
+
#
|
|
14
|
+
# @example
|
|
15
|
+
# #!/usr/bin/env ruby
|
|
16
|
+
# require "llm"
|
|
17
|
+
#
|
|
18
|
+
# llm = LLM.llamacpp(key: nil)
|
|
19
|
+
# bot = LLM::Bot.new(llm)
|
|
20
|
+
# bot.chat ["Tell me about this photo", File.open("/images/frog.jpg", "rb")]
|
|
21
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
|
22
|
+
class LlamaCpp < OpenAI
|
|
23
|
+
##
|
|
24
|
+
# @param (see LLM::Provider#initialize)
|
|
25
|
+
# @return [LLM::LlamaCpp]
|
|
26
|
+
def initialize(host: "localhost", port: 8080, ssl: false, **)
|
|
27
|
+
super
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# @raise [NotImplementedError]
|
|
32
|
+
def files
|
|
33
|
+
raise NotImplementedError
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
##
|
|
37
|
+
# @raise [NotImplementedError]
|
|
38
|
+
def images
|
|
39
|
+
raise NotImplementedError
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
##
|
|
43
|
+
# @raise [NotImplementedError]
|
|
44
|
+
def audio
|
|
45
|
+
raise NotImplementedError
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
##
|
|
49
|
+
# @raise [NotImplementedError]
|
|
50
|
+
def moderations
|
|
51
|
+
raise NotImplementedError
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
##
|
|
55
|
+
# @raise [NotImplementedError]
|
|
56
|
+
def responses
|
|
57
|
+
raise NotImplementedError
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
##
|
|
61
|
+
# @raise [NotImplementedError]
|
|
62
|
+
def vector_stores
|
|
63
|
+
raise NotImplementedError
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
##
|
|
67
|
+
# Returns the default model for chat completions
|
|
68
|
+
# @see https://ollama.com/library/qwen3 qwen3
|
|
69
|
+
# @return [String]
|
|
70
|
+
def default_model
|
|
71
|
+
"qwen3"
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Ollama
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class ErrorHandler
|
|
7
|
+
##
|
|
8
|
+
# @return [Net::HTTPResponse]
|
|
9
|
+
# Non-2XX response from the server
|
|
10
|
+
attr_reader :res
|
|
11
|
+
|
|
12
|
+
##
|
|
13
|
+
# @param [Net::HTTPResponse] res
|
|
14
|
+
# The response from the server
|
|
15
|
+
# @return [LLM::OpenAI::ErrorHandler]
|
|
16
|
+
def initialize(res)
|
|
17
|
+
@res = res
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# @raise [LLM::Error]
|
|
22
|
+
# Raises a subclass of {LLM::Error LLM::Error}
|
|
23
|
+
def raise_error!
|
|
24
|
+
case res
|
|
25
|
+
when Net::HTTPServerError
|
|
26
|
+
raise LLM::ServerError.new { _1.response = res }, "Server error"
|
|
27
|
+
when Net::HTTPUnauthorized
|
|
28
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
|
29
|
+
when Net::HTTPTooManyRequests
|
|
30
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
|
31
|
+
else
|
|
32
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Ollama::Format
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class CompletionFormat
|
|
7
|
+
##
|
|
8
|
+
# @param [LLM::Message] message
|
|
9
|
+
# The message to format
|
|
10
|
+
def initialize(message)
|
|
11
|
+
@message = message
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
##
|
|
15
|
+
# Returns the message for the Ollama chat completions API
|
|
16
|
+
# @return [Hash]
|
|
17
|
+
def format
|
|
18
|
+
catch(:abort) do
|
|
19
|
+
if Hash === message
|
|
20
|
+
{role: message[:role]}.merge(format_content(message[:content]))
|
|
21
|
+
else
|
|
22
|
+
format_message
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
private
|
|
28
|
+
|
|
29
|
+
def format_content(content)
|
|
30
|
+
case content
|
|
31
|
+
when File
|
|
32
|
+
content.close unless content.closed?
|
|
33
|
+
format_content(LLM.File(content.path))
|
|
34
|
+
when LLM::File
|
|
35
|
+
if content.image?
|
|
36
|
+
{content: "This message has an image associated with it", images: [content.to_b64]}
|
|
37
|
+
else
|
|
38
|
+
raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
|
|
39
|
+
"is not an image, and therefore not supported by the " \
|
|
40
|
+
"Ollama API"
|
|
41
|
+
end
|
|
42
|
+
when String
|
|
43
|
+
{content:}
|
|
44
|
+
when LLM::Message
|
|
45
|
+
format_content(content.content)
|
|
46
|
+
when LLM::Function::Return
|
|
47
|
+
throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
|
|
48
|
+
else
|
|
49
|
+
raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
|
|
50
|
+
"is not supported by the Ollama API"
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def format_message
|
|
55
|
+
case content
|
|
56
|
+
when Array
|
|
57
|
+
format_array
|
|
58
|
+
else
|
|
59
|
+
{role: message.role}.merge(format_content(content))
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def format_array
|
|
64
|
+
if content.empty?
|
|
65
|
+
nil
|
|
66
|
+
elsif returns.any?
|
|
67
|
+
returns.map { {role: "tool", tool_call_id: _1.id, content: JSON.dump(_1.value)} }
|
|
68
|
+
else
|
|
69
|
+
content.flat_map { {role: message.role}.merge(format_content(_1)) }
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def message = @message
|
|
74
|
+
def content = message.content
|
|
75
|
+
def returns = content.grep(LLM::Function::Return)
|
|
76
|
+
end
|
|
77
|
+
end
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Ollama
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
module Format
|
|
7
|
+
require_relative "format/completion_format"
|
|
8
|
+
|
|
9
|
+
##
|
|
10
|
+
# @param [Array<LLM::Message>] messages
|
|
11
|
+
# The messages to format
|
|
12
|
+
# @return [Array<Hash>]
|
|
13
|
+
def format(messages)
|
|
14
|
+
messages.filter_map do |message|
|
|
15
|
+
CompletionFormat.new(message).format
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
private
|
|
20
|
+
|
|
21
|
+
##
|
|
22
|
+
# @param [Hash] params
|
|
23
|
+
# @return [Hash]
|
|
24
|
+
def format_tools(tools)
|
|
25
|
+
return {} unless tools&.any?
|
|
26
|
+
{tools: tools.map { _1.format(self) }}
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Ollama
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Ollama::Models LLM::Ollama::Models} class provides a model
|
|
6
|
+
# object for interacting with [Ollama's models API](https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models).
|
|
7
|
+
# The models API allows a client to query Ollama for a list of models
|
|
8
|
+
# that are available for use with the Ollama API.
|
|
9
|
+
#
|
|
10
|
+
# @example
|
|
11
|
+
# #!/usr/bin/env ruby
|
|
12
|
+
# require "llm"
|
|
13
|
+
#
|
|
14
|
+
# llm = LLM.ollama(nil)
|
|
15
|
+
# res = llm.models.all
|
|
16
|
+
# res.each do |model|
|
|
17
|
+
# print "id: ", model.id, "\n"
|
|
18
|
+
# end
|
|
19
|
+
class Models
|
|
20
|
+
include LLM::Utils
|
|
21
|
+
|
|
22
|
+
##
|
|
23
|
+
# Returns a new Models object
|
|
24
|
+
# @param provider [LLM::Provider]
|
|
25
|
+
# @return [LLM::Ollama::Models]
|
|
26
|
+
def initialize(provider)
|
|
27
|
+
@provider = provider
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# List all models
|
|
32
|
+
# @example
|
|
33
|
+
# llm = LLM.ollama(nil)
|
|
34
|
+
# res = llm.models.all
|
|
35
|
+
# res.each do |model|
|
|
36
|
+
# print "id: ", model.id, "\n"
|
|
37
|
+
# end
|
|
38
|
+
# @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
|
|
39
|
+
# @see https://ollama.com/library Ollama library
|
|
40
|
+
# @param [Hash] params Other parameters (see Ollama docs)
|
|
41
|
+
# @raise (see LLM::Provider#request)
|
|
42
|
+
# @return [LLM::Response]
|
|
43
|
+
def all(**params)
|
|
44
|
+
query = URI.encode_www_form(params)
|
|
45
|
+
req = Net::HTTP::Get.new("/api/tags?#{query}", headers)
|
|
46
|
+
res = execute(request: req)
|
|
47
|
+
LLM::Response.new(res)
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
private
|
|
51
|
+
|
|
52
|
+
[:headers, :execute].each do |m|
|
|
53
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Ollama::Response
|
|
4
|
+
module Completion
|
|
5
|
+
def model = body.model
|
|
6
|
+
def prompt_tokens = body.prompt_eval_count || 0
|
|
7
|
+
def completion_tokens = body.eval_count || 0
|
|
8
|
+
def total_tokens = prompt_tokens + completion_tokens
|
|
9
|
+
def message = body.message
|
|
10
|
+
def choices = [format_choices]
|
|
11
|
+
|
|
12
|
+
private
|
|
13
|
+
|
|
14
|
+
def format_choices
|
|
15
|
+
role, content, calls = message.to_h.values_at("role", "content", "tool_calls")
|
|
16
|
+
extra = {response: self, tool_calls: format_tool_calls(calls)}
|
|
17
|
+
LLM::Message.new(role, content, extra)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def format_tool_calls(tools)
|
|
21
|
+
return [] unless tools
|
|
22
|
+
tools.filter_map do |tool|
|
|
23
|
+
next unless tool["function"]
|
|
24
|
+
LLM::Object.new(tool["function"])
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Ollama
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class StreamParser
|
|
7
|
+
##
|
|
8
|
+
# Returns the fully constructed response body
|
|
9
|
+
# @return [LLM::Object]
|
|
10
|
+
attr_reader :body
|
|
11
|
+
|
|
12
|
+
##
|
|
13
|
+
# @return [LLM::OpenAI::Chunk]
|
|
14
|
+
def initialize(io)
|
|
15
|
+
@body = LLM::Object.new
|
|
16
|
+
@io = io
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
##
|
|
20
|
+
# @param [Hash] chunk
|
|
21
|
+
# @return [LLM::OpenAI::Chunk]
|
|
22
|
+
def parse!(chunk)
|
|
23
|
+
tap { merge!(chunk) }
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
private
|
|
27
|
+
|
|
28
|
+
def merge!(chunk)
|
|
29
|
+
chunk.each do |key, value|
|
|
30
|
+
if key == "message"
|
|
31
|
+
if @body[key]
|
|
32
|
+
@body[key]["content"] << value["content"]
|
|
33
|
+
@io << value["content"] if @io.respond_to?(:<<)
|
|
34
|
+
else
|
|
35
|
+
@body[key] = value
|
|
36
|
+
@io << value["content"] if @io.respond_to?(:<<)
|
|
37
|
+
end
|
|
38
|
+
else
|
|
39
|
+
@body[key] = value
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|