dify_llm 1.8.1 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +12 -7
  3. data/lib/generators/ruby_llm/chat_ui/chat_ui_generator.rb +117 -69
  4. data/lib/generators/ruby_llm/chat_ui/templates/controllers/chats_controller.rb.tt +12 -12
  5. data/lib/generators/ruby_llm/chat_ui/templates/controllers/messages_controller.rb.tt +7 -7
  6. data/lib/generators/ruby_llm/chat_ui/templates/controllers/models_controller.rb.tt +4 -4
  7. data/lib/generators/ruby_llm/chat_ui/templates/jobs/chat_response_job.rb.tt +6 -6
  8. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_chat.html.erb.tt +4 -4
  9. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_form.html.erb.tt +5 -5
  10. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/index.html.erb.tt +5 -5
  11. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/new.html.erb.tt +4 -4
  12. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/show.html.erb.tt +8 -8
  13. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_form.html.erb.tt +5 -5
  14. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_message.html.erb.tt +9 -6
  15. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_tool_calls.html.erb.tt +7 -0
  16. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/create.turbo_stream.erb.tt +5 -5
  17. data/lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt +9 -9
  18. data/lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt +4 -6
  19. data/lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt +11 -11
  20. data/lib/generators/ruby_llm/generator_helpers.rb +131 -87
  21. data/lib/generators/ruby_llm/install/install_generator.rb +75 -79
  22. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +3 -0
  23. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +1 -1
  24. data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +88 -85
  25. data/lib/generators/ruby_llm/upgrade_to_v1_9/templates/add_v1_9_message_columns.rb.tt +15 -0
  26. data/lib/generators/ruby_llm/upgrade_to_v1_9/upgrade_to_v1_9_generator.rb +49 -0
  27. data/lib/ruby_llm/active_record/acts_as.rb +17 -8
  28. data/lib/ruby_llm/active_record/chat_methods.rb +41 -13
  29. data/lib/ruby_llm/active_record/message_methods.rb +11 -2
  30. data/lib/ruby_llm/active_record/model_methods.rb +1 -1
  31. data/lib/ruby_llm/aliases.json +62 -20
  32. data/lib/ruby_llm/attachment.rb +8 -0
  33. data/lib/ruby_llm/chat.rb +13 -2
  34. data/lib/ruby_llm/configuration.rb +6 -1
  35. data/lib/ruby_llm/connection.rb +4 -4
  36. data/lib/ruby_llm/content.rb +23 -0
  37. data/lib/ruby_llm/message.rb +11 -6
  38. data/lib/ruby_llm/model/info.rb +4 -0
  39. data/lib/ruby_llm/models.json +9410 -7793
  40. data/lib/ruby_llm/models.rb +14 -22
  41. data/lib/ruby_llm/provider.rb +23 -1
  42. data/lib/ruby_llm/providers/anthropic/chat.rb +22 -3
  43. data/lib/ruby_llm/providers/anthropic/content.rb +44 -0
  44. data/lib/ruby_llm/providers/anthropic/media.rb +2 -1
  45. data/lib/ruby_llm/providers/anthropic/models.rb +15 -0
  46. data/lib/ruby_llm/providers/anthropic/streaming.rb +2 -0
  47. data/lib/ruby_llm/providers/anthropic/tools.rb +20 -18
  48. data/lib/ruby_llm/providers/bedrock/media.rb +2 -1
  49. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +15 -0
  50. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +2 -0
  51. data/lib/ruby_llm/providers/dify/chat.rb +16 -5
  52. data/lib/ruby_llm/providers/gemini/chat.rb +352 -69
  53. data/lib/ruby_llm/providers/gemini/media.rb +59 -1
  54. data/lib/ruby_llm/providers/gemini/tools.rb +146 -25
  55. data/lib/ruby_llm/providers/gemini/transcription.rb +116 -0
  56. data/lib/ruby_llm/providers/gemini.rb +2 -1
  57. data/lib/ruby_llm/providers/gpustack/media.rb +1 -0
  58. data/lib/ruby_llm/providers/ollama/media.rb +1 -0
  59. data/lib/ruby_llm/providers/openai/chat.rb +7 -2
  60. data/lib/ruby_llm/providers/openai/media.rb +2 -1
  61. data/lib/ruby_llm/providers/openai/streaming.rb +7 -2
  62. data/lib/ruby_llm/providers/openai/tools.rb +26 -6
  63. data/lib/ruby_llm/providers/openai/transcription.rb +70 -0
  64. data/lib/ruby_llm/providers/openai.rb +1 -0
  65. data/lib/ruby_llm/providers/vertexai/transcription.rb +16 -0
  66. data/lib/ruby_llm/providers/vertexai.rb +3 -0
  67. data/lib/ruby_llm/stream_accumulator.rb +10 -4
  68. data/lib/ruby_llm/tool.rb +126 -0
  69. data/lib/ruby_llm/transcription.rb +35 -0
  70. data/lib/ruby_llm/utils.rb +46 -0
  71. data/lib/ruby_llm/version.rb +1 -1
  72. data/lib/ruby_llm.rb +6 -0
  73. metadata +25 -3
@@ -71,6 +71,14 @@ module RubyLLM
71
71
  Base64.strict_encode64(content)
72
72
  end
73
73
 
74
+ def save(path)
75
+ return unless io_like?
76
+
77
+ File.open(path, 'w') do |f|
78
+ f.puts(@source.read)
79
+ end
80
+ end
81
+
74
82
  def for_llm
75
83
  case type
76
84
  when :text
data/lib/ruby_llm/chat.rb CHANGED
@@ -31,7 +31,7 @@ module RubyLLM
31
31
  end
32
32
 
33
33
  def ask(message = nil, with: nil, &)
34
- add_message role: :user, content: Content.new(message, with)
34
+ add_message role: :user, content: build_content(message, with)
35
35
  complete(&)
36
36
  end
37
37
 
@@ -193,7 +193,8 @@ module RubyLLM
193
193
  @on[:tool_call]&.call(tool_call)
194
194
  result = execute_tool tool_call
195
195
  @on[:tool_result]&.call(result)
196
- content = result.is_a?(Content) ? result : result.to_s
196
+ tool_payload = result.is_a?(Tool::Halt) ? result.content : result
197
+ content = content_like?(tool_payload) ? tool_payload : tool_payload.to_s
197
198
  message = add_message role: :tool, content:, tool_call_id: tool_call.id
198
199
  @on[:end_message]&.call(message)
199
200
 
@@ -208,5 +209,15 @@ module RubyLLM
208
209
  args = tool_call.arguments
209
210
  tool.call(args)
210
211
  end
212
+
213
+ def build_content(message, attachments)
214
+ return message if content_like?(message)
215
+
216
+ Content.new(message, attachments)
217
+ end
218
+
219
+ def content_like?(object)
220
+ object.is_a?(Content) || object.is_a?(Content::Raw)
221
+ end
211
222
  end
212
223
  end
@@ -10,6 +10,7 @@ module RubyLLM
10
10
  :openai_use_system_role,
11
11
  :anthropic_api_key,
12
12
  :gemini_api_key,
13
+ :gemini_api_base,
13
14
  :vertexai_project_id,
14
15
  :vertexai_location,
15
16
  :deepseek_api_key,
@@ -31,7 +32,9 @@ module RubyLLM
31
32
  :default_embedding_model,
32
33
  :default_moderation_model,
33
34
  :default_image_model,
35
+ :default_transcription_model,
34
36
  # Model registry
37
+ :model_registry_file,
35
38
  :model_registry_class,
36
39
  # Rails integration
37
40
  :use_new_acts_as,
@@ -49,7 +52,7 @@ module RubyLLM
49
52
  :log_stream_debug
50
53
 
51
54
  def initialize
52
- @request_timeout = 120
55
+ @request_timeout = 300
53
56
  @max_retries = 3
54
57
  @retry_interval = 0.1
55
58
  @retry_backoff_factor = 2
@@ -60,7 +63,9 @@ module RubyLLM
60
63
  @default_embedding_model = 'text-embedding-3-small'
61
64
  @default_moderation_model = 'omni-moderation-latest'
62
65
  @default_image_model = 'gpt-image-1'
66
+ @default_transcription_model = 'whisper-1'
63
67
 
68
+ @model_registry_file = File.expand_path('models.json', __dir__)
64
69
  @model_registry_class = 'Model'
65
70
  @use_new_acts_as = false
66
71
 
@@ -34,8 +34,7 @@ module RubyLLM
34
34
  end
35
35
 
36
36
  def post(url, payload, &)
37
- body = payload.is_a?(Hash) ? JSON.generate(payload, ascii_only: false) : payload
38
- @connection.post url, body do |req|
37
+ @connection.post url, payload do |req|
39
38
  req.headers.merge! @provider.headers if @provider.respond_to?(:headers)
40
39
  yield req if block_given?
41
40
  end
@@ -77,7 +76,7 @@ module RubyLLM
77
76
  errors: true,
78
77
  headers: false,
79
78
  log_level: :debug do |logger|
80
- logger.filter(%r{[A-Za-z0-9+/=]{100,}}, 'data":"[BASE64 DATA]"')
79
+ logger.filter(%r{[A-Za-z0-9+/=]{100,}}, '[BASE64 DATA]')
81
80
  logger.filter(/[-\d.e,\s]{100,}/, '[EMBEDDINGS ARRAY]')
82
81
  end
83
82
  end
@@ -94,9 +93,10 @@ module RubyLLM
94
93
  end
95
94
 
96
95
  def setup_middleware(faraday)
96
+ faraday.request :multipart
97
97
  faraday.request :json
98
98
  faraday.response :json
99
- faraday.adapter Faraday.default_adapter
99
+ faraday.adapter :net_http
100
100
  faraday.use :llm_errors, provider: @provider
101
101
  end
102
102
 
@@ -48,3 +48,26 @@ module RubyLLM
48
48
  end
49
49
  end
50
50
  end
51
+
52
+ module RubyLLM
53
+ class Content
54
+ # Represents provider-specific payloads that should bypass RubyLLM formatting.
55
+ class Raw
56
+ attr_reader :value
57
+
58
+ def initialize(value)
59
+ raise ArgumentError, 'Raw content payload cannot be nil' if value.nil?
60
+
61
+ @value = value
62
+ end
63
+
64
+ def format
65
+ @value
66
+ end
67
+
68
+ def to_h
69
+ @value
70
+ end
71
+ end
72
+ end
73
+ end
@@ -5,18 +5,21 @@ module RubyLLM
5
5
  class Message
6
6
  ROLES = %i[system user assistant tool].freeze
7
7
 
8
- attr_reader :role, :tool_calls, :tool_call_id, :input_tokens, :output_tokens, :model_id, :raw, :conversation_id
8
+ attr_reader :role, :model_id, :tool_calls, :tool_call_id, :input_tokens, :output_tokens,
9
+ :cached_tokens, :cache_creation_tokens, :raw, :conversation_id
9
10
  attr_writer :content
10
11
 
11
12
  def initialize(options = {})
12
13
  @role = options.fetch(:role).to_sym
13
14
  @content = normalize_content(options.fetch(:content))
15
+ @model_id = options[:model_id]
14
16
  @tool_calls = options[:tool_calls]
17
+ @tool_call_id = options[:tool_call_id]
18
+ @conversation_id = options[:conversation_id]
15
19
  @input_tokens = options[:input_tokens]
16
20
  @output_tokens = options[:output_tokens]
17
- @model_id = options[:model_id]
18
- @conversation_id = options[:conversation_id]
19
- @tool_call_id = options[:tool_call_id]
21
+ @cached_tokens = options[:cached_tokens]
22
+ @cache_creation_tokens = options[:cache_creation_tokens]
20
23
  @raw = options[:raw]
21
24
 
22
25
  ensure_valid_role
@@ -46,12 +49,14 @@ module RubyLLM
46
49
  {
47
50
  role: role,
48
51
  content: content,
52
+ model_id: model_id,
49
53
  tool_calls: tool_calls,
50
54
  tool_call_id: tool_call_id,
55
+ conversation_id: conversation_id,
51
56
  input_tokens: input_tokens,
52
57
  output_tokens: output_tokens,
53
- conversation_id: conversation_id,
54
- model_id: model_id
58
+ cached_tokens: cached_tokens,
59
+ cache_creation_tokens: cache_creation_tokens
55
60
  }.compact
56
61
  end
57
62
 
@@ -72,6 +72,10 @@ module RubyLLM
72
72
  pricing.text_tokens.output
73
73
  end
74
74
 
75
+ def provider_class
76
+ RubyLLM::Provider.resolve provider
77
+ end
78
+
75
79
  def type # rubocop:disable Metrics/PerceivedComplexity
76
80
  if modalities.output.include?('embeddings') && !modalities.output.include?('text')
77
81
  'embedding'