llm-shell 0.9.2 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +61 -66
- data/lib/llm/shell/command.rb +40 -40
- data/lib/llm/shell/commands/clear_screen.rb +4 -18
- data/lib/llm/shell/commands/debug_mode.rb +12 -0
- data/lib/llm/shell/commands/dir_import.rb +4 -20
- data/lib/llm/shell/commands/disable_tool.rb +33 -0
- data/lib/llm/shell/commands/enable_tool.rb +33 -0
- data/lib/llm/shell/commands/file_import.rb +4 -20
- data/lib/llm/shell/commands/help.rb +23 -36
- data/lib/llm/shell/commands/show_chat.rb +4 -19
- data/lib/llm/shell/commands/show_version.rb +4 -20
- data/lib/llm/shell/commands/system_prompt.rb +4 -18
- data/lib/llm/shell/completion.rb +5 -5
- data/lib/llm/shell/config.rb +4 -5
- data/lib/llm/shell/formatter.rb +1 -2
- data/lib/llm/shell/internal/coderay/lib/coderay/duo.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/_map.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/comment_filter.rb +25 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/count.rb +39 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug.rb +49 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug_lint.rb +63 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/div.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/encoder.rb +190 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/filter.rb +58 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/css.rb +65 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/numbering.rb +108 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/output.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html.rb +333 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/json.rb +83 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lines_of_code.rb +45 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lint.rb +59 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/null.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/page.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/span.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/statistic.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/terminal.rb +195 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/text.rb +46 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/token_kind_filter.rb +111 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/xml.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/yaml.rb +50 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/for_redcloth.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/file_type.rb +151 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin_host.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/word_list.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/_map.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/c.rb +189 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/clojure.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/cpp.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/css.rb +196 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/debug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/delphi.rb +144 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/diff.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/erb.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/go.rb +208 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/groovy.rb +268 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/haml.rb +168 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/html.rb +275 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java/builtin_types.rb +421 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java.rb +174 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java_script.rb +236 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/json.rb +98 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/lua.rb +280 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/php.rb +527 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/python.rb +287 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/raydebug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/patterns.rb +178 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/string_state.rb +79 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby.rb +477 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sass.rb +232 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/scanner.rb +337 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sql.rb +169 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/taskpaper.rb +36 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/text.rb +26 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/xml.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/yaml.rb +140 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners.rb +27 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/_map.rb +7 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/alpha.rb +153 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/style.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles.rb +15 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/token_kinds.rb +85 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens_proxy.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/version.rb +3 -0
- data/lib/llm/shell/internal/coderay/lib/coderay.rb +284 -0
- data/lib/llm/shell/internal/io-line/lib/io/line/multiple.rb +19 -0
- data/lib/{io → llm/shell/internal/io-line/lib/io}/line.rb +2 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/builder.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/conversable.rb +37 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/completion.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/respond.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot.rb +150 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/buffer.rb +162 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/client.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/error.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventhandler.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/file.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/function.rb +177 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/message.rb +178 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/mime.rb +140 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/multipart.rb +101 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/builder.rb +38 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/kernel.rb +53 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object.rb +89 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/provider.rb +352 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/files.rb +155 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format/completion_format.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/models.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/completion.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/file.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic.rb +138 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format.rb +27 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/audio.rb +73 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/error_handler.rb +47 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/files.rb +146 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format/completion_format.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/images.rb +133 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/models.rb +60 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/completion.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/embedding.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/file.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/files.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/image.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/models.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/web_search.rb +22 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini.rb +173 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/llamacpp.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format/completion_format.rb +77 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/models.rb +56 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/completion.rb +28 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama.rb +116 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/audio.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/error_handler.rb +46 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/files.rb +134 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/completion_format.rb +90 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/respond_format.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/images.rb +109 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/models.rb +55 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/moderations.rb +65 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/audio.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/completion.rb +40 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/enumerable.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/file.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/image.rb +16 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/moderations.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb +48 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses.rb +99 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/vector_stores.rb +228 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai.rb +206 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/zai.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/response.rb +67 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/array.rb +26 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/boolean.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/integer.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/leaf.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/null.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/number.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/object.rb +41 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/string.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/version.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema.rb +81 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/server_tool.rb +32 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool/param.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/utils.rb +19 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/version.rb +5 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm.rb +121 -0
- data/lib/llm/shell/internal/optparse/lib/optionparser.rb +2 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/ac.rb +70 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/date.rb +18 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/kwargs.rb +27 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/shellwords.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/time.rb +11 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/uri.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/version.rb +80 -0
- data/lib/llm/shell/internal/optparse/lib/optparse.rb +2469 -0
- data/lib/llm/shell/internal/paint/lib/paint/constants.rb +104 -0
- data/lib/llm/shell/internal/paint/lib/paint/pa.rb +13 -0
- data/lib/llm/shell/internal/paint/lib/paint/rgb_colors.rb +14 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts.rb +100 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts_version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint/util.rb +16 -0
- data/lib/llm/shell/internal/paint/lib/paint/version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint.rb +261 -0
- data/lib/llm/shell/internal/reline/lib/reline/config.rb +378 -0
- data/lib/llm/shell/internal/reline/lib/reline/face.rb +199 -0
- data/lib/llm/shell/internal/reline/lib/reline/history.rb +76 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/ansi.rb +322 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/dumb.rb +120 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/windows.rb +530 -0
- data/lib/llm/shell/internal/reline/lib/reline/io.rb +55 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/base.rb +37 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/composite.rb +17 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/emacs.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_command.rb +518 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_insert.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor.rb +8 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_stroke.rb +119 -0
- data/lib/llm/shell/internal/reline/lib/reline/kill_ring.rb +125 -0
- data/lib/llm/shell/internal/reline/lib/reline/line_editor.rb +2356 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode/east_asian_width.rb +1292 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode.rb +421 -0
- data/lib/llm/shell/internal/reline/lib/reline/version.rb +3 -0
- data/lib/llm/shell/internal/reline/lib/reline.rb +527 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/generated_parser.rb +712 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/handler.rb +268 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date.rb +35 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date_time.rb +42 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_time.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/parser.rb +21 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/scanner.rb +92 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/string_utils.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/version.rb +5 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb.rb +49 -0
- data/lib/llm/shell/options.rb +1 -1
- data/lib/llm/shell/renderer.rb +2 -3
- data/lib/llm/shell/repl.rb +21 -16
- data/lib/llm/shell/tool.rb +42 -0
- data/lib/llm/shell/tools/read_file.rb +15 -0
- data/lib/llm/shell/tools/system.rb +17 -0
- data/lib/llm/shell/tools/write_file.rb +16 -0
- data/lib/llm/shell/version.rb +1 -1
- data/lib/llm/shell.rb +83 -39
- data/libexec/llm-shell/shell +4 -6
- data/llm-shell.gemspec +0 -4
- metadata +233 -63
- data/lib/llm/function.rb +0 -17
- data/lib/llm/shell/command/extension.rb +0 -42
- data/lib/llm/shell/commands/utils.rb +0 -21
- data/lib/llm/shell/functions/read_file.rb +0 -22
- data/lib/llm/shell/functions/write_file.rb +0 -22
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Gemini::Audio LLM::Gemini::Audio} class provides an audio
|
|
6
|
+
# object for interacting with [Gemini's audio API](https://ai.google.dev/gemini-api/docs/audio).
|
|
7
|
+
# @example
|
|
8
|
+
# #!/usr/bin/env ruby
|
|
9
|
+
# require "llm"
|
|
10
|
+
#
|
|
11
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
12
|
+
# res = llm.audio.create_transcription(input: "/audio/rocket.mp3")
|
|
13
|
+
# res.text # => "A dog on a rocket to the moon"
|
|
14
|
+
class Audio
|
|
15
|
+
##
|
|
16
|
+
# Returns a new Audio object
|
|
17
|
+
# @param provider [LLM::Provider]
|
|
18
|
+
# @return [LLM::Gemini::Responses]
|
|
19
|
+
def initialize(provider)
|
|
20
|
+
@provider = provider
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
##
|
|
24
|
+
# @raise [NotImplementedError]
|
|
25
|
+
# This method is not implemented by Gemini
|
|
26
|
+
def create_speech
|
|
27
|
+
raise NotImplementedError
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# Create an audio transcription
|
|
32
|
+
# @example
|
|
33
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
34
|
+
# res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
|
|
35
|
+
# res.text # => "A dog on a rocket to the moon"
|
|
36
|
+
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
|
37
|
+
# @param [String, LLM::File, LLM::Response] file The input audio
|
|
38
|
+
# @param [String] model The model to use
|
|
39
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
40
|
+
# @raise (see LLM::Provider#request)
|
|
41
|
+
# @return [LLM::Response]
|
|
42
|
+
def create_transcription(file:, model: "gemini-1.5-flash", **params)
|
|
43
|
+
res = @provider.complete [
|
|
44
|
+
"Your task is to transcribe the contents of an audio file",
|
|
45
|
+
"Your response should include the transcription, and nothing else",
|
|
46
|
+
LLM.File(file)
|
|
47
|
+
], params.merge(role: :user, model:)
|
|
48
|
+
res.tap { _1.define_singleton_method(:text) { choices[0].content } }
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
##
|
|
52
|
+
# Create an audio translation (in English)
|
|
53
|
+
# @example
|
|
54
|
+
# # Arabic => English
|
|
55
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
56
|
+
# res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
|
|
57
|
+
# res.text # => "In the name of Allah, the Beneficent, the Merciful."
|
|
58
|
+
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
|
59
|
+
# @param [String, LLM::File, LLM::Response] file The input audio
|
|
60
|
+
# @param [String] model The model to use
|
|
61
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
62
|
+
# @raise (see LLM::Provider#request)
|
|
63
|
+
# @return [LLM::Response]
|
|
64
|
+
def create_translation(file:, model: "gemini-1.5-flash", **params)
|
|
65
|
+
res = @provider.complete [
|
|
66
|
+
"Your task is to translate the contents of an audio file into English",
|
|
67
|
+
"Your response should include the translation, and nothing else",
|
|
68
|
+
LLM.File(file)
|
|
69
|
+
], params.merge(role: :user, model:)
|
|
70
|
+
res.tap { _1.define_singleton_method(:text) { choices[0].content } }
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class ErrorHandler
|
|
7
|
+
##
|
|
8
|
+
# @return [Net::HTTPResponse]
|
|
9
|
+
# Non-2XX response from the server
|
|
10
|
+
attr_reader :res
|
|
11
|
+
|
|
12
|
+
##
|
|
13
|
+
# @param [Net::HTTPResponse] res
|
|
14
|
+
# The response from the server
|
|
15
|
+
# @return [LLM::Gemini::ErrorHandler]
|
|
16
|
+
def initialize(res)
|
|
17
|
+
@res = res
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# @raise [LLM::Error]
|
|
22
|
+
# Raises a subclass of {LLM::Error LLM::Error}
|
|
23
|
+
def raise_error!
|
|
24
|
+
case res
|
|
25
|
+
when Net::HTTPServerError
|
|
26
|
+
raise LLM::ServerError.new { _1.response = res }, "Server error"
|
|
27
|
+
when Net::HTTPBadRequest
|
|
28
|
+
reason = body.dig("error", "details", 0, "reason")
|
|
29
|
+
if reason == "API_KEY_INVALID"
|
|
30
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
|
31
|
+
else
|
|
32
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
|
33
|
+
end
|
|
34
|
+
when Net::HTTPTooManyRequests
|
|
35
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
|
36
|
+
else
|
|
37
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
private
|
|
42
|
+
|
|
43
|
+
def body
|
|
44
|
+
@body ||= JSON.parse(res.body)
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Gemini::Files LLM::Gemini::Files} class provides a files
|
|
6
|
+
# object for interacting with [Gemini's Files API](https://ai.google.dev/gemini-api/docs/files).
|
|
7
|
+
# The files API allows a client to reference media files in prompts
|
|
8
|
+
# where they can be referenced by their URL.
|
|
9
|
+
#
|
|
10
|
+
# The files API is intended to preserve bandwidth and latency,
|
|
11
|
+
# especially for large files but it can be helpful for smaller files
|
|
12
|
+
# as well because it does not require the client to include a file
|
|
13
|
+
# in the prompt over and over again (which could be the case in a
|
|
14
|
+
# multi-turn conversation).
|
|
15
|
+
#
|
|
16
|
+
# @example example #1
|
|
17
|
+
# #!/usr/bin/env ruby
|
|
18
|
+
# require "llm"
|
|
19
|
+
#
|
|
20
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
21
|
+
# bot = LLM::Bot.new(llm)
|
|
22
|
+
# file = llm.files.create(file: "/audio/haiku.mp3")
|
|
23
|
+
# bot.chat ["Tell me about this file", file]
|
|
24
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
|
25
|
+
class Files
|
|
26
|
+
require_relative "response/file"
|
|
27
|
+
require_relative "response/files"
|
|
28
|
+
|
|
29
|
+
##
|
|
30
|
+
# Returns a new Files object
|
|
31
|
+
# @param provider [LLM::Provider]
|
|
32
|
+
# @return [LLM::Gemini::Files]
|
|
33
|
+
def initialize(provider)
|
|
34
|
+
@provider = provider
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
##
|
|
38
|
+
# List all files
|
|
39
|
+
# @example
|
|
40
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
41
|
+
# res = llm.files.all
|
|
42
|
+
# res.each do |file|
|
|
43
|
+
# print "name: ", file.name, "\n"
|
|
44
|
+
# end
|
|
45
|
+
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
|
46
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
47
|
+
# @raise (see LLM::Provider#request)
|
|
48
|
+
# @return [LLM::Response]
|
|
49
|
+
def all(**params)
|
|
50
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
|
51
|
+
req = Net::HTTP::Get.new("/v1beta/files?#{query}", headers)
|
|
52
|
+
res = execute(request: req)
|
|
53
|
+
LLM::Response.new(res).extend(LLM::Gemini::Response::Files)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
##
|
|
57
|
+
# Create a file
|
|
58
|
+
# @example
|
|
59
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
60
|
+
# res = llm.files.create(file: "/audio/haiku.mp3")
|
|
61
|
+
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
|
62
|
+
# @param [String, LLM::File] file The file
|
|
63
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
64
|
+
# @raise (see LLM::Provider#request)
|
|
65
|
+
# @return [LLM::Response]
|
|
66
|
+
def create(file:, **params)
|
|
67
|
+
file = LLM.File(file)
|
|
68
|
+
req = Net::HTTP::Post.new(request_upload_url(file:), {})
|
|
69
|
+
req["content-length"] = file.bytesize
|
|
70
|
+
req["X-Goog-Upload-Offset"] = 0
|
|
71
|
+
req["X-Goog-Upload-Command"] = "upload, finalize"
|
|
72
|
+
file.with_io do |io|
|
|
73
|
+
set_body_stream(req, io)
|
|
74
|
+
res = execute(request: req)
|
|
75
|
+
LLM::Response.new(res).extend(LLM::Gemini::Response::File)
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
##
|
|
80
|
+
# Get a file
|
|
81
|
+
# @example
|
|
82
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
83
|
+
# res = llm.files.get(file: "files/1234567890")
|
|
84
|
+
# print "name: ", res.name, "\n"
|
|
85
|
+
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
|
86
|
+
# @param [#name, String] file The file to get
|
|
87
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
88
|
+
# @raise (see LLM::Provider#request)
|
|
89
|
+
# @return [LLM::Response]
|
|
90
|
+
def get(file:, **params)
|
|
91
|
+
file_id = file.respond_to?(:name) ? file.name : file.to_s
|
|
92
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
|
93
|
+
req = Net::HTTP::Get.new("/v1beta/#{file_id}?#{query}", headers)
|
|
94
|
+
res = execute(request: req)
|
|
95
|
+
LLM::Response.new(res).extend(LLM::Gemini::Response::File)
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
##
|
|
99
|
+
# Delete a file
|
|
100
|
+
# @example
|
|
101
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
102
|
+
# res = llm.files.delete(file: "files/1234567890")
|
|
103
|
+
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
|
104
|
+
# @param [#name, String] file The file to delete
|
|
105
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
106
|
+
# @raise (see LLM::Provider#request)
|
|
107
|
+
# @return [LLM::Response]
|
|
108
|
+
def delete(file:, **params)
|
|
109
|
+
file_id = file.respond_to?(:name) ? file.name : file.to_s
|
|
110
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
|
111
|
+
req = Net::HTTP::Delete.new("/v1beta/#{file_id}?#{query}", headers)
|
|
112
|
+
res = execute(request: req)
|
|
113
|
+
LLM::Response.new(res)
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
##
|
|
117
|
+
# @raise [NotImplementedError]
|
|
118
|
+
# This method is not implemented by Gemini
|
|
119
|
+
def download
|
|
120
|
+
raise NotImplementedError
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
private
|
|
124
|
+
|
|
125
|
+
include LLM::Utils
|
|
126
|
+
|
|
127
|
+
def request_upload_url(file:)
|
|
128
|
+
req = Net::HTTP::Post.new("/upload/v1beta/files?key=#{key}", headers)
|
|
129
|
+
req["X-Goog-Upload-Protocol"] = "resumable"
|
|
130
|
+
req["X-Goog-Upload-Command"] = "start"
|
|
131
|
+
req["X-Goog-Upload-Header-Content-Length"] = file.bytesize
|
|
132
|
+
req["X-Goog-Upload-Header-Content-Type"] = file.mime_type
|
|
133
|
+
req.body = JSON.dump(file: {display_name: File.basename(file.path)})
|
|
134
|
+
res = execute(request: req)
|
|
135
|
+
res["x-goog-upload-url"]
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
def key
|
|
139
|
+
@provider.instance_variable_get(:@key)
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
|
143
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
144
|
+
end
|
|
145
|
+
end
|
|
146
|
+
end
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Gemini::Format
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class CompletionFormat
|
|
7
|
+
##
|
|
8
|
+
# @param [LLM::Message, Hash] message
|
|
9
|
+
# The message to format
|
|
10
|
+
def initialize(message)
|
|
11
|
+
@message = message
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
##
|
|
15
|
+
# Formats the message for the Gemini chat completions API
|
|
16
|
+
# @return [Hash]
|
|
17
|
+
def format
|
|
18
|
+
catch(:abort) do
|
|
19
|
+
if Hash === message
|
|
20
|
+
{role: message[:role], parts: format_content(message[:content])}
|
|
21
|
+
elsif message.tool_call?
|
|
22
|
+
{role: message.role, parts: message.extra[:original_tool_calls].map { {"functionCall" => _1} }}
|
|
23
|
+
else
|
|
24
|
+
{role: message.role, parts: format_content(message.content)}
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def format_content(content)
|
|
30
|
+
case content
|
|
31
|
+
when Array
|
|
32
|
+
content.empty? ? throw(:abort, nil) : content.flat_map { format_content(_1) }
|
|
33
|
+
when LLM::Response
|
|
34
|
+
format_response(content)
|
|
35
|
+
when File
|
|
36
|
+
content.close unless content.closed?
|
|
37
|
+
format_content(LLM.File(content.path))
|
|
38
|
+
when LLM::File
|
|
39
|
+
file = content
|
|
40
|
+
[{inline_data: {mime_type: file.mime_type, data: file.to_b64}}]
|
|
41
|
+
when String
|
|
42
|
+
[{text: content}]
|
|
43
|
+
when LLM::Message
|
|
44
|
+
format_content(content.content)
|
|
45
|
+
when LLM::Function::Return
|
|
46
|
+
[{functionResponse: {name: content.name, response: content.value}}]
|
|
47
|
+
else
|
|
48
|
+
prompt_error!(content)
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def format_response(response)
|
|
53
|
+
if response.file?
|
|
54
|
+
file = response
|
|
55
|
+
[{file_data: {mime_type: file.mime_type, file_uri: file.uri}}]
|
|
56
|
+
else
|
|
57
|
+
prompt_error!(content)
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def prompt_error!(object)
|
|
62
|
+
raise LLM::PromptError, "The given object (an instance of #{object.class}) " \
|
|
63
|
+
"is not supported by the Gemini API"
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def message = @message
|
|
67
|
+
def content = message.content
|
|
68
|
+
end
|
|
69
|
+
end
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
module Format
|
|
7
|
+
require_relative "format/completion_format"
|
|
8
|
+
|
|
9
|
+
##
|
|
10
|
+
# @param [Array<LLM::Message>] messages
|
|
11
|
+
# The messages to format
|
|
12
|
+
# @return [Array<Hash>]
|
|
13
|
+
def format(messages)
|
|
14
|
+
messages.filter_map do |message|
|
|
15
|
+
CompletionFormat.new(message).format
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
private
|
|
20
|
+
|
|
21
|
+
##
|
|
22
|
+
# @param [Hash] params
|
|
23
|
+
# @return [Hash]
|
|
24
|
+
def format_schema(params)
|
|
25
|
+
return {} unless params and params[:schema]
|
|
26
|
+
schema = params.delete(:schema)
|
|
27
|
+
{generationConfig: {response_mime_type: "application/json", response_schema: schema}}
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# @param [Hash] params
|
|
32
|
+
# @return [Hash]
|
|
33
|
+
def format_tools(tools)
|
|
34
|
+
return {} unless tools&.any?
|
|
35
|
+
platform, functions = [tools.grep(LLM::ServerTool), tools.grep(LLM::Function)]
|
|
36
|
+
{tools: [*platform, {functionDeclarations: functions.map { _1.format(self) }}]}
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Gemini::Images LLM::Gemini::Images} class provides an images
|
|
6
|
+
# object for interacting with [Gemini's images API](https://ai.google.dev/gemini-api/docs/image-generation).
|
|
7
|
+
# Please note that unlike OpenAI, which can return either URLs or base64-encoded strings,
|
|
8
|
+
# Gemini's images API will always return an image as a base64 encoded string that
|
|
9
|
+
# can be decoded into binary.
|
|
10
|
+
# @example
|
|
11
|
+
# #!/usr/bin/env ruby
|
|
12
|
+
# require "llm"
|
|
13
|
+
#
|
|
14
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
15
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
|
16
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
|
17
|
+
class Images
|
|
18
|
+
require_relative "response/image"
|
|
19
|
+
include Format
|
|
20
|
+
|
|
21
|
+
##
|
|
22
|
+
# Returns a new Images object
|
|
23
|
+
# @param provider [LLM::Provider]
|
|
24
|
+
# @return [LLM::Gemini::Responses]
|
|
25
|
+
def initialize(provider)
|
|
26
|
+
@provider = provider
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
##
|
|
30
|
+
# Create an image
|
|
31
|
+
# @example
|
|
32
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
33
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
|
34
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
|
35
|
+
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
|
36
|
+
# @param [String] prompt The prompt
|
|
37
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
38
|
+
# @raise (see LLM::Provider#request)
|
|
39
|
+
# @raise [LLM::NoImageError] when no images are returned
|
|
40
|
+
# @note
|
|
41
|
+
# The prompt should make it clear you want to generate an image, or you
|
|
42
|
+
# might unexpectedly receive a purely textual response. This is due to how
|
|
43
|
+
# Gemini implements image generation under the hood.
|
|
44
|
+
# @return [LLM::Response]
|
|
45
|
+
def create(prompt:, model: "gemini-2.5-flash-image-preview", **params)
|
|
46
|
+
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
|
47
|
+
body = JSON.dump({
|
|
48
|
+
contents: [{parts: [{text: create_prompt}, {text: prompt}]}],
|
|
49
|
+
generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
|
|
50
|
+
}.merge!(params))
|
|
51
|
+
req.body = body
|
|
52
|
+
res = execute(request: req)
|
|
53
|
+
validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
##
|
|
57
|
+
# Edit an image
|
|
58
|
+
# @example
|
|
59
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
60
|
+
# res = llm.images.edit image: "cat.png", prompt: "Add a hat to the cat"
|
|
61
|
+
# IO.copy_stream res.images[0], "hatoncat.png"
|
|
62
|
+
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
|
63
|
+
# @param [String, LLM::File] image The image to edit
|
|
64
|
+
# @param [String] prompt The prompt
|
|
65
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
66
|
+
# @raise (see LLM::Provider#request)
|
|
67
|
+
# @raise [LLM::NoImageError] when no images are returned
|
|
68
|
+
# @note (see LLM::Gemini::Images#create)
|
|
69
|
+
# @return [LLM::Response]
|
|
70
|
+
def edit(image:, prompt:, model: "gemini-2.5-flash-image-preview", **params)
|
|
71
|
+
req = Net::HTTP::Post.new("/v1beta/models/#{model}:generateContent?key=#{key}", headers)
|
|
72
|
+
image = LLM.File(image)
|
|
73
|
+
body = JSON.dump({
|
|
74
|
+
contents: [{parts: [{text: edit_prompt}, {text: prompt}, format.format_content(image)]}],
|
|
75
|
+
generationConfig: {responseModalities: ["TEXT", "IMAGE"]}
|
|
76
|
+
}.merge!(params)).b
|
|
77
|
+
set_body_stream(req, StringIO.new(body))
|
|
78
|
+
res = execute(request: req)
|
|
79
|
+
validate LLM::Response.new(res).extend(LLM::Gemini::Response::Image)
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
##
|
|
83
|
+
# @raise [NotImplementedError]
|
|
84
|
+
# This method is not implemented by Gemini
|
|
85
|
+
def create_variation
|
|
86
|
+
raise NotImplementedError
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
private
|
|
90
|
+
|
|
91
|
+
def format
|
|
92
|
+
@format ||= CompletionFormat.new(nil)
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def key
|
|
96
|
+
@provider.instance_variable_get(:@key)
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def create_prompt
|
|
100
|
+
<<~PROMPT
|
|
101
|
+
## Context
|
|
102
|
+
Your task is to generate one or more image(s) based on the user's instructions.
|
|
103
|
+
The user will provide you with text only.
|
|
104
|
+
|
|
105
|
+
## Instructions
|
|
106
|
+
1. The model *MUST* generate image(s) based on the user text alone.
|
|
107
|
+
2. The model *MUST NOT* generate anything else.
|
|
108
|
+
PROMPT
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def edit_prompt
|
|
112
|
+
<<~PROMPT
|
|
113
|
+
## Context
|
|
114
|
+
Your task is to edit the provided image based on the user's instructions.
|
|
115
|
+
The user will provide you with both text and an image.
|
|
116
|
+
|
|
117
|
+
## Instructions
|
|
118
|
+
1. The model *MUST* edit the provided image based on the user's instructions
|
|
119
|
+
2. The model *MUST NOT* generate a new image.
|
|
120
|
+
3. The model *MUST NOT* generate anything else.
|
|
121
|
+
PROMPT
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
def validate(res)
|
|
125
|
+
return res unless res.images.empty?
|
|
126
|
+
raise LLM::NoImageError.new { _1.response = res.res }, "no images found in response"
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
|
130
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
131
|
+
end
|
|
132
|
+
end
|
|
133
|
+
end
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Gemini
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Gemini::Models LLM::Gemini::Models} class provides a model
|
|
6
|
+
# object for interacting with [Gemini's models API](https://ai.google.dev/api/models?hl=en#method:-models.list).
|
|
7
|
+
# The models API allows a client to query Gemini for a list of models
|
|
8
|
+
# that are available for use with the Gemini API.
|
|
9
|
+
#
|
|
10
|
+
# @example
|
|
11
|
+
# #!/usr/bin/env ruby
|
|
12
|
+
# require "llm"
|
|
13
|
+
#
|
|
14
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
15
|
+
# res = llm.models.all
|
|
16
|
+
# res.each do |model|
|
|
17
|
+
# print "id: ", model.id, "\n"
|
|
18
|
+
# end
|
|
19
|
+
class Models
|
|
20
|
+
require_relative "response/models"
|
|
21
|
+
include LLM::Utils
|
|
22
|
+
|
|
23
|
+
##
|
|
24
|
+
# Returns a new Models object
|
|
25
|
+
# @param provider [LLM::Provider]
|
|
26
|
+
# @return [LLM::Gemini::Models]
|
|
27
|
+
def initialize(provider)
|
|
28
|
+
@provider = provider
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
##
|
|
32
|
+
# List all models
|
|
33
|
+
# @example
|
|
34
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
|
35
|
+
# res = llm.models.all
|
|
36
|
+
# res.each do |model|
|
|
37
|
+
# print "id: ", model.id, "\n"
|
|
38
|
+
# end
|
|
39
|
+
# @see https://ai.google.dev/api/models?hl=en#method:-models.list Gemini docs
|
|
40
|
+
# @param [Hash] params Other parameters (see Gemini docs)
|
|
41
|
+
# @raise (see LLM::Provider#request)
|
|
42
|
+
# @return [LLM::Response]
|
|
43
|
+
def all(**params)
|
|
44
|
+
query = URI.encode_www_form(params.merge!(key: key))
|
|
45
|
+
req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
|
|
46
|
+
res = execute(request: req)
|
|
47
|
+
LLM::Response.new(res).extend(LLM::Gemini::Response::Models)
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
private
|
|
51
|
+
|
|
52
|
+
def key
|
|
53
|
+
@provider.instance_variable_get(:@key)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
[:headers, :execute].each do |m|
|
|
57
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Gemini::Response
|
|
4
|
+
module Completion
|
|
5
|
+
def model = body.modelVersion
|
|
6
|
+
def prompt_tokens = body.usageMetadata.promptTokenCount
|
|
7
|
+
def completion_tokens = body.usageMetadata.candidatesTokenCount
|
|
8
|
+
def total_tokens = body.usageMetadata.totalTokenCount
|
|
9
|
+
def choices = format_choices
|
|
10
|
+
|
|
11
|
+
private
|
|
12
|
+
|
|
13
|
+
def format_choices
|
|
14
|
+
candidates.map.with_index do |choice, index|
|
|
15
|
+
choice = LLM::Object.from_hash(choice)
|
|
16
|
+
content = choice.content || LLM::Object.new
|
|
17
|
+
role = content.role || "model"
|
|
18
|
+
parts = content.parts || [{"text" => choice.finishReason}]
|
|
19
|
+
text = parts.filter_map { _1["text"] }.join
|
|
20
|
+
tools = parts.filter_map { _1["functionCall"] }
|
|
21
|
+
extra = {index:, response: self, tool_calls: format_tool_calls(tools), original_tool_calls: tools}
|
|
22
|
+
LLM::Message.new(role, text, extra)
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def format_tool_calls(tools)
|
|
27
|
+
(tools || []).map do |tool|
|
|
28
|
+
function = {name: tool.name, arguments: tool.args}
|
|
29
|
+
LLM::Object.new(function)
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def candidates = body.candidates || []
|
|
34
|
+
end
|
|
35
|
+
end
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Gemini::Response
|
|
4
|
+
module File
|
|
5
|
+
def name = respond_to?(:file) ? file.name : body.name
|
|
6
|
+
def display_name = respond_to?(:file) ? file.displayName : body.displayName
|
|
7
|
+
def mime_type = respond_to?(:file) ? file.mimeType : body.mimeType
|
|
8
|
+
def uri = respond_to?(:file) ? file.uri : body.uri
|
|
9
|
+
def file? = true
|
|
10
|
+
end
|
|
11
|
+
end
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Gemini::Response
|
|
4
|
+
module Image
|
|
5
|
+
##
|
|
6
|
+
# @return [Array<StringIO>]
|
|
7
|
+
def images
|
|
8
|
+
candidates.flat_map do |candidate|
|
|
9
|
+
parts = candidate&.dig(:content, :parts) || []
|
|
10
|
+
parts.filter_map do
|
|
11
|
+
data = _1.dig(:inlineData, :data)
|
|
12
|
+
next unless data
|
|
13
|
+
StringIO.new(data.unpack1("m0"))
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
##
|
|
19
|
+
# Returns one or more image URLs, or an empty array
|
|
20
|
+
# @note
|
|
21
|
+
# Gemini's image generation API does not return URLs, so this method
|
|
22
|
+
# will always return an empty array.
|
|
23
|
+
# @return [Array<String>]
|
|
24
|
+
def urls = []
|
|
25
|
+
|
|
26
|
+
##
|
|
27
|
+
# Returns one or more candidates, or an empty array
|
|
28
|
+
# @return [Array<Hash>]
|
|
29
|
+
def candidates = body.candidates || []
|
|
30
|
+
end
|
|
31
|
+
end
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Gemini::Response
|
|
4
|
+
module Models
|
|
5
|
+
include ::Enumerable
|
|
6
|
+
def each(&)
|
|
7
|
+
return enum_for(:each) unless block_given?
|
|
8
|
+
models.each { yield(_1) }
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def models
|
|
12
|
+
body.models || []
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
end
|