llm-shell 0.9.2 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +61 -66
- data/lib/llm/shell/command.rb +40 -40
- data/lib/llm/shell/commands/clear_screen.rb +4 -18
- data/lib/llm/shell/commands/debug_mode.rb +12 -0
- data/lib/llm/shell/commands/dir_import.rb +4 -20
- data/lib/llm/shell/commands/disable_tool.rb +33 -0
- data/lib/llm/shell/commands/enable_tool.rb +33 -0
- data/lib/llm/shell/commands/file_import.rb +4 -20
- data/lib/llm/shell/commands/help.rb +23 -36
- data/lib/llm/shell/commands/show_chat.rb +4 -19
- data/lib/llm/shell/commands/show_version.rb +4 -20
- data/lib/llm/shell/commands/system_prompt.rb +4 -18
- data/lib/llm/shell/completion.rb +5 -5
- data/lib/llm/shell/config.rb +4 -5
- data/lib/llm/shell/formatter.rb +1 -2
- data/lib/llm/shell/internal/coderay/lib/coderay/duo.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/_map.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/comment_filter.rb +25 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/count.rb +39 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug.rb +49 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/debug_lint.rb +63 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/div.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/encoder.rb +190 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/filter.rb +58 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/css.rb +65 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/numbering.rb +108 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html/output.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/html.rb +333 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/json.rb +83 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lines_of_code.rb +45 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/lint.rb +59 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/null.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/page.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/span.rb +23 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/statistic.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/terminal.rb +195 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/text.rb +46 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/token_kind_filter.rb +111 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/xml.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders/yaml.rb +50 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/encoders.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/for_redcloth.rb +95 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/file_type.rb +151 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/plugin_host.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/helpers/word_list.rb +72 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/_map.rb +24 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/c.rb +189 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/clojure.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/cpp.rb +217 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/css.rb +196 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/debug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/delphi.rb +144 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/diff.rb +221 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/erb.rb +81 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/go.rb +208 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/groovy.rb +268 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/haml.rb +168 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/html.rb +275 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java/builtin_types.rb +421 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java.rb +174 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/java_script.rb +236 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/json.rb +98 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/lua.rb +280 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/php.rb +527 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/python.rb +287 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/raydebug.rb +75 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/patterns.rb +178 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby/string_state.rb +79 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/ruby.rb +477 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sass.rb +232 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/scanner.rb +337 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/sql.rb +169 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/taskpaper.rb +36 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/text.rb +26 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/xml.rb +17 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners/yaml.rb +140 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/scanners.rb +27 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/_map.rb +7 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/alpha.rb +153 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles/style.rb +18 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/styles.rb +15 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/token_kinds.rb +85 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens.rb +164 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/tokens_proxy.rb +55 -0
- data/lib/llm/shell/internal/coderay/lib/coderay/version.rb +3 -0
- data/lib/llm/shell/internal/coderay/lib/coderay.rb +284 -0
- data/lib/llm/shell/internal/io-line/lib/io/line/multiple.rb +19 -0
- data/lib/{io → llm/shell/internal/io-line/lib/io}/line.rb +2 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/builder.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/conversable.rb +37 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/completion.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot/prompt/respond.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/bot.rb +150 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/buffer.rb +162 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/client.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/error.rb +49 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventhandler.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/eventstream.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/file.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/function.rb +177 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/message.rb +178 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/mime.rb +140 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/multipart.rb +101 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/builder.rb +38 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object/kernel.rb +53 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/object.rb +89 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/provider.rb +352 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/files.rb +155 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format/completion_format.rb +88 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/models.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/completion.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/file.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/anthropic.rb +138 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek/format.rb +27 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/deepseek.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/audio.rb +73 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/error_handler.rb +47 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/files.rb +146 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format/completion_format.rb +69 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/format.rb +39 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/images.rb +133 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/models.rb +60 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/completion.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/embedding.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/file.rb +11 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/files.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/image.rb +31 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/models.rb +15 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/response/web_search.rb +22 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/gemini.rb +173 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/llamacpp.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/error_handler.rb +36 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format/completion_format.rb +77 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/format.rb +29 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/models.rb +56 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/completion.rb +28 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/ollama.rb +116 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/audio.rb +91 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/error_handler.rb +46 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/files.rb +134 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/completion_format.rb +90 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format/respond_format.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/format.rb +54 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/images.rb +109 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/models.rb +55 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/moderations.rb +65 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/audio.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/completion.rb +40 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/embedding.rb +9 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/enumerable.rb +23 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/file.rb +7 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/image.rb +16 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/moderations.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/responds.rb +48 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/response/web_search.rb +21 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/responses.rb +99 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/stream_parser.rb +86 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai/vector_stores.rb +228 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/openai.rb +206 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/providers/zai.rb +74 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/response.rb +67 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/array.rb +26 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/boolean.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/integer.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/leaf.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/null.rb +13 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/number.rb +43 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/object.rb +41 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/string.rb +34 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema/version.rb +8 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/schema.rb +81 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/server_tool.rb +32 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool/param.rb +75 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/tool.rb +78 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/utils.rb +19 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm/version.rb +5 -0
- data/lib/llm/shell/internal/llm.rb/lib/llm.rb +121 -0
- data/lib/llm/shell/internal/optparse/lib/optionparser.rb +2 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/ac.rb +70 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/date.rb +18 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/kwargs.rb +27 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/shellwords.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/time.rb +11 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/uri.rb +7 -0
- data/lib/llm/shell/internal/optparse/lib/optparse/version.rb +80 -0
- data/lib/llm/shell/internal/optparse/lib/optparse.rb +2469 -0
- data/lib/llm/shell/internal/paint/lib/paint/constants.rb +104 -0
- data/lib/llm/shell/internal/paint/lib/paint/pa.rb +13 -0
- data/lib/llm/shell/internal/paint/lib/paint/rgb_colors.rb +14 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts.rb +100 -0
- data/lib/llm/shell/internal/paint/lib/paint/shortcuts_version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint/util.rb +16 -0
- data/lib/llm/shell/internal/paint/lib/paint/version.rb +5 -0
- data/lib/llm/shell/internal/paint/lib/paint.rb +261 -0
- data/lib/llm/shell/internal/reline/lib/reline/config.rb +378 -0
- data/lib/llm/shell/internal/reline/lib/reline/face.rb +199 -0
- data/lib/llm/shell/internal/reline/lib/reline/history.rb +76 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/ansi.rb +322 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/dumb.rb +120 -0
- data/lib/llm/shell/internal/reline/lib/reline/io/windows.rb +530 -0
- data/lib/llm/shell/internal/reline/lib/reline/io.rb +55 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/base.rb +37 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/composite.rb +17 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/emacs.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_command.rb +518 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor/vi_insert.rb +517 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_actor.rb +8 -0
- data/lib/llm/shell/internal/reline/lib/reline/key_stroke.rb +119 -0
- data/lib/llm/shell/internal/reline/lib/reline/kill_ring.rb +125 -0
- data/lib/llm/shell/internal/reline/lib/reline/line_editor.rb +2356 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode/east_asian_width.rb +1292 -0
- data/lib/llm/shell/internal/reline/lib/reline/unicode.rb +421 -0
- data/lib/llm/shell/internal/reline/lib/reline/version.rb +3 -0
- data/lib/llm/shell/internal/reline/lib/reline.rb +527 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/generated_parser.rb +712 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/handler.rb +268 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date.rb +35 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_date_time.rb +42 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/local_time.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/parser.rb +21 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/scanner.rb +92 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/string_utils.rb +40 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb/version.rb +5 -0
- data/lib/llm/shell/internal/tomlrb/lib/tomlrb.rb +49 -0
- data/lib/llm/shell/options.rb +1 -1
- data/lib/llm/shell/renderer.rb +2 -3
- data/lib/llm/shell/repl.rb +21 -16
- data/lib/llm/shell/tool.rb +42 -0
- data/lib/llm/shell/tools/read_file.rb +15 -0
- data/lib/llm/shell/tools/system.rb +17 -0
- data/lib/llm/shell/tools/write_file.rb +16 -0
- data/lib/llm/shell/version.rb +1 -1
- data/lib/llm/shell.rb +83 -39
- data/libexec/llm-shell/shell +4 -6
- data/llm-shell.gemspec +0 -4
- metadata +233 -63
- data/lib/llm/function.rb +0 -17
- data/lib/llm/shell/command/extension.rb +0 -42
- data/lib/llm/shell/commands/utils.rb +0 -21
- data/lib/llm/shell/functions/read_file.rb +0 -22
- data/lib/llm/shell/functions/write_file.rb +0 -22
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Images LLM::OpenAI::Images} class provides an interface
|
|
6
|
+
# for [OpenAI's images API](https://platform.openai.com/docs/api-reference/images).
|
|
7
|
+
# OpenAI supports multiple response formats: temporary URLs, or binary strings
|
|
8
|
+
# encoded in base64. The default is to return temporary URLs.
|
|
9
|
+
#
|
|
10
|
+
# @example Temporary URLs
|
|
11
|
+
# #!/usr/bin/env ruby
|
|
12
|
+
# require "llm"
|
|
13
|
+
# require "open-uri"
|
|
14
|
+
# require "fileutils"
|
|
15
|
+
#
|
|
16
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
17
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
|
18
|
+
# FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
|
|
19
|
+
# "rocket.png"
|
|
20
|
+
#
|
|
21
|
+
# @example Binary strings
|
|
22
|
+
# #!/usr/bin/env ruby
|
|
23
|
+
# require "llm"
|
|
24
|
+
#
|
|
25
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
26
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon",
|
|
27
|
+
# response_format: "b64_json"
|
|
28
|
+
# IO.copy_stream res.images[0], "rocket.png"
|
|
29
|
+
class Images
|
|
30
|
+
require_relative "response/image"
|
|
31
|
+
##
|
|
32
|
+
# Returns a new Images object
|
|
33
|
+
# @param provider [LLM::Provider]
|
|
34
|
+
# @return [LLM::OpenAI::Responses]
|
|
35
|
+
def initialize(provider)
|
|
36
|
+
@provider = provider
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
##
|
|
40
|
+
# Create an image
|
|
41
|
+
# @example
|
|
42
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
43
|
+
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
|
44
|
+
# res.urls.each { print _1, "\n" }
|
|
45
|
+
# @see https://platform.openai.com/docs/api-reference/images/create OpenAI docs
|
|
46
|
+
# @param [String] prompt The prompt
|
|
47
|
+
# @param [String] model The model to use
|
|
48
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
49
|
+
# @raise (see LLM::Provider#request)
|
|
50
|
+
# @return [LLM::Response]
|
|
51
|
+
def create(prompt:, model: "dall-e-3", **params)
|
|
52
|
+
req = Net::HTTP::Post.new("/v1/images/generations", headers)
|
|
53
|
+
req.body = JSON.dump({prompt:, n: 1, model:}.merge!(params))
|
|
54
|
+
res = execute(request: req)
|
|
55
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
##
|
|
59
|
+
# Create image variations
|
|
60
|
+
# @example
|
|
61
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
62
|
+
# res = llm.images.create_variation(image: "/images/hat.png", n: 5)
|
|
63
|
+
# p res.urls
|
|
64
|
+
# @see https://platform.openai.com/docs/api-reference/images/createVariation OpenAI docs
|
|
65
|
+
# @param [File] image The image to create variations from
|
|
66
|
+
# @param [String] model The model to use
|
|
67
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
68
|
+
# @raise (see LLM::Provider#request)
|
|
69
|
+
# @return [LLM::Response]
|
|
70
|
+
def create_variation(image:, model: "dall-e-2", **params)
|
|
71
|
+
image = LLM.File(image)
|
|
72
|
+
multi = LLM::Multipart.new(params.merge!(image:, model:))
|
|
73
|
+
req = Net::HTTP::Post.new("/v1/images/variations", headers)
|
|
74
|
+
req["content-type"] = multi.content_type
|
|
75
|
+
set_body_stream(req, multi.body)
|
|
76
|
+
res = execute(request: req)
|
|
77
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
##
|
|
81
|
+
# Edit an image
|
|
82
|
+
# @example
|
|
83
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
84
|
+
# res = llm.images.edit(image: "/images/hat.png", prompt: "A cat wearing this hat")
|
|
85
|
+
# p res.urls
|
|
86
|
+
# @see https://platform.openai.com/docs/api-reference/images/createEdit OpenAI docs
|
|
87
|
+
# @param [File] image The image to edit
|
|
88
|
+
# @param [String] prompt The prompt
|
|
89
|
+
# @param [String] model The model to use
|
|
90
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
91
|
+
# @raise (see LLM::Provider#request)
|
|
92
|
+
# @return [LLM::Response]
|
|
93
|
+
def edit(image:, prompt:, model: "dall-e-2", **params)
|
|
94
|
+
image = LLM.File(image)
|
|
95
|
+
multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
|
|
96
|
+
req = Net::HTTP::Post.new("/v1/images/edits", headers)
|
|
97
|
+
req["content-type"] = multi.content_type
|
|
98
|
+
set_body_stream(req, multi.body)
|
|
99
|
+
res = execute(request: req)
|
|
100
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
private
|
|
104
|
+
|
|
105
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
|
106
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
end
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Models LLM::OpenAI::Models} class provides a model
|
|
6
|
+
# object for interacting with [OpenAI's models API](https://platform.openai.com/docs/api-reference/models/list).
|
|
7
|
+
# The models API allows a client to query OpenAI for a list of models
|
|
8
|
+
# that are available for use with the OpenAI API.
|
|
9
|
+
#
|
|
10
|
+
# @example
|
|
11
|
+
# #!/usr/bin/env ruby
|
|
12
|
+
# require "llm"
|
|
13
|
+
#
|
|
14
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
15
|
+
# res = llm.models.all
|
|
16
|
+
# res.each do |model|
|
|
17
|
+
# print "id: ", model.id, "\n"
|
|
18
|
+
# end
|
|
19
|
+
class Models
|
|
20
|
+
require_relative "response/enumerable"
|
|
21
|
+
|
|
22
|
+
##
|
|
23
|
+
# Returns a new Models object
|
|
24
|
+
# @param provider [LLM::Provider]
|
|
25
|
+
# @return [LLM::OpenAI::Files]
|
|
26
|
+
def initialize(provider)
|
|
27
|
+
@provider = provider
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# List all models
|
|
32
|
+
# @example
|
|
33
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
34
|
+
# res = llm.models.all
|
|
35
|
+
# res.each do |model|
|
|
36
|
+
# print "id: ", model.id, "\n"
|
|
37
|
+
# end
|
|
38
|
+
# @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
|
|
39
|
+
# @param [Hash] params Other parameters (see OpenAI docs)
|
|
40
|
+
# @raise (see LLM::Provider#request)
|
|
41
|
+
# @return [LLM::Response]
|
|
42
|
+
def all(**params)
|
|
43
|
+
query = URI.encode_www_form(params)
|
|
44
|
+
req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
|
|
45
|
+
res = execute(request: req)
|
|
46
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Enumerable)
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
private
|
|
50
|
+
|
|
51
|
+
[:headers, :execute, :set_body_stream].each do |m|
|
|
52
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Moderations LLM::OpenAI::Moderations} class provides a moderations
|
|
6
|
+
# object for interacting with [OpenAI's moderations API](https://platform.openai.com/docs/api-reference/moderations).
|
|
7
|
+
# The moderations API can categorize content into different categories, such as
|
|
8
|
+
# hate speech, self-harm, and sexual content. It can also provide a confidence score
|
|
9
|
+
# for each category.
|
|
10
|
+
#
|
|
11
|
+
# @example
|
|
12
|
+
# #!/usr/bin/env ruby
|
|
13
|
+
# require "llm"
|
|
14
|
+
#
|
|
15
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
16
|
+
# res = llm.moderations.create input: "I hate you"
|
|
17
|
+
# mod = res.moderations[0]
|
|
18
|
+
# print "categories: #{mod.categories}", "\n"
|
|
19
|
+
# print "scores: #{mod.scores}", "\n"
|
|
20
|
+
#
|
|
21
|
+
# @example
|
|
22
|
+
# #!/usr/bin/env ruby
|
|
23
|
+
# require "llm"
|
|
24
|
+
#
|
|
25
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
26
|
+
# res = llm.moderations.create input: URI.parse("https://example.com/image.png")
|
|
27
|
+
# mod = res.moderations[0]
|
|
28
|
+
# print "categories: #{mod.categories}", "\n"
|
|
29
|
+
# print "scores: #{mod.scores}", "\n"
|
|
30
|
+
#
|
|
31
|
+
# @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
|
|
32
|
+
# @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
|
|
33
|
+
class Moderations
|
|
34
|
+
require_relative "response/moderations"
|
|
35
|
+
|
|
36
|
+
##
|
|
37
|
+
# Returns a new Moderations object
|
|
38
|
+
# @param [LLM::Provider] provider
|
|
39
|
+
# @return [LLM::OpenAI::Moderations]
|
|
40
|
+
def initialize(provider)
|
|
41
|
+
@provider = provider
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
##
|
|
45
|
+
# Create a moderation
|
|
46
|
+
# @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
|
|
47
|
+
# @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
|
|
48
|
+
# @param [String, URI, Array<String, URI>] input
|
|
49
|
+
# @param [String, LLM::Model] model The model to use
|
|
50
|
+
# @return [LLM::Response]
|
|
51
|
+
def create(input:, model: "omni-moderation-latest", **params)
|
|
52
|
+
req = Net::HTTP::Post.new("/v1/moderations", headers)
|
|
53
|
+
input = Format::ModerationFormat.new(input).format
|
|
54
|
+
req.body = JSON.dump({input:, model:}.merge!(params))
|
|
55
|
+
res = execute(request: req)
|
|
56
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Moderations)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
private
|
|
60
|
+
|
|
61
|
+
[:headers, :execute].each do |m|
|
|
62
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
end
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Response
|
|
4
|
+
module Completion
|
|
5
|
+
def choices
|
|
6
|
+
body.choices.map.with_index do |choice, index|
|
|
7
|
+
choice = LLM::Object.from_hash(choice)
|
|
8
|
+
message = choice.message
|
|
9
|
+
extra = {
|
|
10
|
+
index:, response: self,
|
|
11
|
+
logprobs: choice.logprobs,
|
|
12
|
+
tool_calls: format_tool_calls(message.tool_calls),
|
|
13
|
+
original_tool_calls: message.tool_calls
|
|
14
|
+
}
|
|
15
|
+
LLM::Message.new(message.role, message.content, extra)
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
alias_method :messages, :choices
|
|
19
|
+
|
|
20
|
+
def model = body.model
|
|
21
|
+
def prompt_tokens = usage["prompt_tokens"]
|
|
22
|
+
def completion_tokens = usage["completion_tokens"]
|
|
23
|
+
def total_tokens = usage["total_tokens"]
|
|
24
|
+
def usage = body.usage || {}
|
|
25
|
+
|
|
26
|
+
private
|
|
27
|
+
|
|
28
|
+
def format_tool_calls(tools)
|
|
29
|
+
(tools || []).filter_map do |tool|
|
|
30
|
+
next unless tool.function
|
|
31
|
+
tool = {
|
|
32
|
+
id: tool.id,
|
|
33
|
+
name: tool.function.name,
|
|
34
|
+
arguments: JSON.parse(tool.function.arguments)
|
|
35
|
+
}
|
|
36
|
+
LLM::Object.new(tool)
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Response
|
|
4
|
+
module Enumerable
|
|
5
|
+
include ::Enumerable
|
|
6
|
+
def each(&)
|
|
7
|
+
return enum_for(:each) unless block_given?
|
|
8
|
+
data.each { yield(_1) }
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
##
|
|
12
|
+
# @return [Boolean]
|
|
13
|
+
def empty?
|
|
14
|
+
data.empty?
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
##
|
|
18
|
+
# @return [Integer]
|
|
19
|
+
def size
|
|
20
|
+
data.size
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
end
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Response
|
|
4
|
+
module Image
|
|
5
|
+
def urls
|
|
6
|
+
data.filter_map { _1["url"] }
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def images
|
|
10
|
+
data.filter_map do
|
|
11
|
+
next unless _1["b64_json"]
|
|
12
|
+
StringIO.new(_1["b64_json"].unpack1("m0"))
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
end
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Response
|
|
4
|
+
module Moderations
|
|
5
|
+
##
|
|
6
|
+
# @return [Array<LLM::Response]
|
|
7
|
+
def moderations
|
|
8
|
+
@moderations ||= body.results.map { _1.extend(Moderation) }
|
|
9
|
+
end
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
module Moderation
|
|
13
|
+
##
|
|
14
|
+
# Returns true if the moderation is flagged
|
|
15
|
+
# @return [Boolean]
|
|
16
|
+
def flagged?
|
|
17
|
+
body.flagged
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# Returns the moderation categories
|
|
22
|
+
# @return [Array<String>]
|
|
23
|
+
def categories
|
|
24
|
+
self["categories"].filter_map { _2 ? _1 : nil }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
##
|
|
28
|
+
# Returns the moderation scores
|
|
29
|
+
# @return [Hash]
|
|
30
|
+
def scores
|
|
31
|
+
self["category_scores"].select { |(key, _)| categories.include?(key) }.to_h
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Response
|
|
4
|
+
module Responds
|
|
5
|
+
def model = body.model
|
|
6
|
+
def response_id = respond_to?(:response) ? response["id"] : id
|
|
7
|
+
def choices = [format_message]
|
|
8
|
+
def annotations = choices[0].annotations
|
|
9
|
+
|
|
10
|
+
def prompt_tokens = body.usage&.input_tokens
|
|
11
|
+
def completion_tokens = body.usage&.output_tokens
|
|
12
|
+
def total_tokens = body.usage&.total_tokens
|
|
13
|
+
|
|
14
|
+
##
|
|
15
|
+
# Returns the aggregated text content from the response outputs.
|
|
16
|
+
# @return [String]
|
|
17
|
+
def output_text
|
|
18
|
+
choices.find(&:assistant?).content || ""
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
private
|
|
22
|
+
|
|
23
|
+
def format_message
|
|
24
|
+
message = LLM::Message.new("assistant", +"", {response: self, tool_calls: []})
|
|
25
|
+
output.each.with_index do |choice, index|
|
|
26
|
+
if choice.type == "function_call"
|
|
27
|
+
message.extra[:tool_calls] << format_tool(choice)
|
|
28
|
+
elsif choice.content
|
|
29
|
+
choice.content.each do |c|
|
|
30
|
+
next unless c["type"] == "output_text"
|
|
31
|
+
message.content << c["text"] << "\n"
|
|
32
|
+
next unless c["annotations"]
|
|
33
|
+
message.extra["annotations"] = [*message.extra["annotations"], *c["annotations"]]
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
message
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def format_tool(tool)
|
|
41
|
+
LLM::Object.new(
|
|
42
|
+
id: tool.call_id,
|
|
43
|
+
name: tool.name,
|
|
44
|
+
arguments: JSON.parse(tool.arguments)
|
|
45
|
+
)
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
end
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::OpenAI::Response
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Response::WebSearch LLM::OpenAI::Response::WebSearch}
|
|
6
|
+
# module provides methods for accessing web search results from a web search
|
|
7
|
+
# tool call made via the {LLM::Provider#web_search LLM::Provider#web_search}
|
|
8
|
+
# method.
|
|
9
|
+
module WebSearch
|
|
10
|
+
##
|
|
11
|
+
# Returns one or more search results
|
|
12
|
+
# @return [Array<LLM::Object>]
|
|
13
|
+
def search_results
|
|
14
|
+
LLM::Object.from_hash(
|
|
15
|
+
choices[0]
|
|
16
|
+
.annotations
|
|
17
|
+
.map { _1.slice(:title, :url) }
|
|
18
|
+
)
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
end
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# @private
|
|
6
|
+
class Responses::StreamParser
|
|
7
|
+
##
|
|
8
|
+
# Returns the fully constructed response body
|
|
9
|
+
# @return [LLM::Object]
|
|
10
|
+
attr_reader :body
|
|
11
|
+
|
|
12
|
+
##
|
|
13
|
+
# @param [#<<] io An IO-like object
|
|
14
|
+
# @return [LLM::OpenAI::Responses::StreamParser]
|
|
15
|
+
def initialize(io)
|
|
16
|
+
@body = LLM::Object.new(output: []) # Initialize with an empty output array
|
|
17
|
+
@io = io
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
##
|
|
21
|
+
# @param [Hash] chunk
|
|
22
|
+
# @return [LLM::OpenAI::Responses::StreamParser]
|
|
23
|
+
def parse!(chunk)
|
|
24
|
+
tap { handle_event(chunk) }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
private
|
|
28
|
+
|
|
29
|
+
def handle_event(chunk)
|
|
30
|
+
case chunk["type"]
|
|
31
|
+
when "response.created"
|
|
32
|
+
chunk.each do |k, v|
|
|
33
|
+
next if k == "type"
|
|
34
|
+
@body[k] = v
|
|
35
|
+
end
|
|
36
|
+
@body.output ||= []
|
|
37
|
+
when "response.output_item.added"
|
|
38
|
+
output_index = chunk["output_index"]
|
|
39
|
+
item = LLM::Object.from_hash(chunk["item"])
|
|
40
|
+
@body.output[output_index] = item
|
|
41
|
+
@body.output[output_index].content ||= []
|
|
42
|
+
when "response.content_part.added"
|
|
43
|
+
output_index = chunk["output_index"]
|
|
44
|
+
content_index = chunk["content_index"]
|
|
45
|
+
part = LLM::Object.from_hash(chunk["part"])
|
|
46
|
+
@body.output[output_index] ||= LLM::Object.new(content: [])
|
|
47
|
+
@body.output[output_index].content ||= []
|
|
48
|
+
@body.output[output_index].content[content_index] = part
|
|
49
|
+
when "response.output_text.delta"
|
|
50
|
+
output_index = chunk["output_index"]
|
|
51
|
+
content_index = chunk["content_index"]
|
|
52
|
+
delta_text = chunk["delta"]
|
|
53
|
+
output_item = @body.output[output_index]
|
|
54
|
+
if output_item&.content
|
|
55
|
+
content_part = output_item.content[content_index]
|
|
56
|
+
if content_part && content_part.type == "output_text"
|
|
57
|
+
content_part.text ||= ""
|
|
58
|
+
content_part.text << delta_text
|
|
59
|
+
@io << delta_text if @io.respond_to?(:<<)
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
when "response.output_item.done"
|
|
63
|
+
output_index = chunk["output_index"]
|
|
64
|
+
item = LLM::Object.from_hash(chunk["item"])
|
|
65
|
+
@body.output[output_index] = item
|
|
66
|
+
when "response.content_part.done"
|
|
67
|
+
output_index = chunk["output_index"]
|
|
68
|
+
content_index = chunk["content_index"]
|
|
69
|
+
part = LLM::Object.from_hash(chunk["part"])
|
|
70
|
+
@body.output[output_index] ||= LLM::Object.new(content: [])
|
|
71
|
+
@body.output[output_index].content ||= []
|
|
72
|
+
@body.output[output_index].content[content_index] = part
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
end
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::OpenAI
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::OpenAI::Responses LLM::OpenAI::Responses} class provides
|
|
6
|
+
# an interface for [OpenAI's response API](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
|
|
7
|
+
#
|
|
8
|
+
# @example example #1
|
|
9
|
+
# #!/usr/bin/env ruby
|
|
10
|
+
# require "llm"
|
|
11
|
+
#
|
|
12
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
13
|
+
# res1 = llm.responses.create "Your task is to answer the user's questions", role: :developer
|
|
14
|
+
# res2 = llm.responses.create "5 + 5 = X ?", role: :user, previous_response_id: res1.id
|
|
15
|
+
# [res1, res2].each { llm.responses.delete(_1) }
|
|
16
|
+
class Responses
|
|
17
|
+
require_relative "response/responds"
|
|
18
|
+
require_relative "responses/stream_parser"
|
|
19
|
+
include Format
|
|
20
|
+
|
|
21
|
+
##
|
|
22
|
+
# Returns a new Responses object
|
|
23
|
+
# @param provider [LLM::Provider]
|
|
24
|
+
# @return [LLM::OpenAI::Responses]
|
|
25
|
+
def initialize(provider)
|
|
26
|
+
@provider = provider
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
##
|
|
30
|
+
# Create a response
|
|
31
|
+
# @see https://platform.openai.com/docs/api-reference/responses/create OpenAI docs
|
|
32
|
+
# @param prompt (see LLM::Provider#complete)
|
|
33
|
+
# @param params (see LLM::Provider#complete)
|
|
34
|
+
# @raise (see LLM::Provider#request)
|
|
35
|
+
# @raise [LLM::PromptError]
|
|
36
|
+
# When given an object a provider does not understand
|
|
37
|
+
# @return [LLM::Response]
|
|
38
|
+
def create(prompt, params = {})
|
|
39
|
+
params = {role: :user, model: @provider.default_model}.merge!(params)
|
|
40
|
+
tools = resolve_tools(params.delete(:tools))
|
|
41
|
+
params = [params, format_schema(params), format_tools(tools)].inject({}, &:merge!).compact
|
|
42
|
+
role, stream = params.delete(:role), params.delete(:stream)
|
|
43
|
+
params[:stream] = true if stream.respond_to?(:<<) || stream == true
|
|
44
|
+
req = Net::HTTP::Post.new("/v1/responses", headers)
|
|
45
|
+
messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
|
|
46
|
+
body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
|
|
47
|
+
set_body_stream(req, StringIO.new(body))
|
|
48
|
+
res = execute(request: req, stream:, stream_parser:)
|
|
49
|
+
LLM::Response.new(res)
|
|
50
|
+
.extend(LLM::OpenAI::Response::Responds)
|
|
51
|
+
.extend(Module.new { define_method(:__tools__) { tools } })
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
##
|
|
55
|
+
# Get a response
|
|
56
|
+
# @see https://platform.openai.com/docs/api-reference/responses/get OpenAI docs
|
|
57
|
+
# @param [#id, #to_s] response Response ID
|
|
58
|
+
# @raise (see LLM::Provider#request)
|
|
59
|
+
# @return [LLM::Response]
|
|
60
|
+
def get(response, **params)
|
|
61
|
+
response_id = response.respond_to?(:id) ? response.id : response
|
|
62
|
+
query = URI.encode_www_form(params)
|
|
63
|
+
req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
|
|
64
|
+
res = execute(request: req)
|
|
65
|
+
LLM::Response.new(res).extend(LLM::OpenAI::Response::Responds)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
##
|
|
69
|
+
# Deletes a response
|
|
70
|
+
# @see https://platform.openai.com/docs/api-reference/responses/delete OpenAI docs
|
|
71
|
+
# @param [#id, #to_s] response Response ID
|
|
72
|
+
# @raise (see LLM::Provider#request)
|
|
73
|
+
# @return [LLM::Object] Response body
|
|
74
|
+
def delete(response)
|
|
75
|
+
response_id = response.respond_to?(:id) ? response.id : response
|
|
76
|
+
req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
|
|
77
|
+
res = execute(request: req)
|
|
78
|
+
LLM::Response.new(res)
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
private
|
|
82
|
+
|
|
83
|
+
[:headers, :execute, :set_body_stream, :resolve_tools].each do |m|
|
|
84
|
+
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
def format_schema(params)
|
|
88
|
+
return {} unless params && params[:schema]
|
|
89
|
+
schema = params.delete(:schema)
|
|
90
|
+
schema = schema.to_h.merge(additionalProperties: false)
|
|
91
|
+
name = "JSONSchema"
|
|
92
|
+
{text: {format: {type: "json_schema", name:, schema:}}}
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def stream_parser
|
|
96
|
+
LLM::OpenAI::Responses::StreamParser
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|