dify_llm 1.7.0 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +16 -3
- data/lib/generators/ruby_llm/generator_helpers.rb +129 -0
- data/lib/generators/ruby_llm/install/install_generator.rb +12 -129
- data/lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt +9 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +0 -1
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +0 -3
- data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +1 -4
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +0 -1
- data/lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt +8 -0
- data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +47 -96
- data/lib/ruby_llm/attachment.rb +5 -0
- data/lib/ruby_llm/configuration.rb +2 -0
- data/lib/ruby_llm/mime_type.rb +4 -0
- data/lib/ruby_llm/model/info.rb +4 -0
- data/lib/ruby_llm/models.json +780 -511
- data/lib/ruby_llm/models.rb +7 -3
- data/lib/ruby_llm/moderation.rb +56 -0
- data/lib/ruby_llm/provider.rb +6 -0
- data/lib/ruby_llm/providers/gemini/capabilities.rb +5 -0
- data/lib/ruby_llm/providers/openai/moderation.rb +34 -0
- data/lib/ruby_llm/providers/openai.rb +1 -0
- data/lib/ruby_llm/railtie.rb +1 -1
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm.rb +4 -0
- metadata +8 -4
data/lib/ruby_llm/models.rb
CHANGED
@@ -194,15 +194,15 @@ module RubyLLM
|
|
194
194
|
end
|
195
195
|
|
196
196
|
def embedding_models
|
197
|
-
self.class.new(all.select { |m| m.type == 'embedding' })
|
197
|
+
self.class.new(all.select { |m| m.type == 'embedding' || m.modalities.output.include?('embeddings') })
|
198
198
|
end
|
199
199
|
|
200
200
|
def audio_models
|
201
|
-
self.class.new(all.select { |m| m.type == 'audio' })
|
201
|
+
self.class.new(all.select { |m| m.type == 'audio' || m.modalities.output.include?('audio') })
|
202
202
|
end
|
203
203
|
|
204
204
|
def image_models
|
205
|
-
self.class.new(all.select { |m| m.type == 'image' })
|
205
|
+
self.class.new(all.select { |m| m.type == 'image' || m.modalities.output.include?('image') })
|
206
206
|
end
|
207
207
|
|
208
208
|
def by_family(family)
|
@@ -217,6 +217,10 @@ module RubyLLM
|
|
217
217
|
self.class.refresh!(remote_only: remote_only)
|
218
218
|
end
|
219
219
|
|
220
|
+
def resolve(model_id, provider: nil, assume_exists: false, config: nil)
|
221
|
+
self.class.resolve(model_id, provider: provider, assume_exists: assume_exists, config: config)
|
222
|
+
end
|
223
|
+
|
220
224
|
private
|
221
225
|
|
222
226
|
def find_with_provider(model_id, provider)
|
@@ -0,0 +1,56 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
# Identify potentially harmful content in text.
|
5
|
+
# https://platform.openai.com/docs/guides/moderation
|
6
|
+
class Moderation
|
7
|
+
attr_reader :id, :model, :results
|
8
|
+
|
9
|
+
def initialize(id:, model:, results:)
|
10
|
+
@id = id
|
11
|
+
@model = model
|
12
|
+
@results = results
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.moderate(input,
|
16
|
+
model: nil,
|
17
|
+
provider: nil,
|
18
|
+
assume_model_exists: false,
|
19
|
+
context: nil)
|
20
|
+
config = context&.config || RubyLLM.config
|
21
|
+
model ||= config.default_moderation_model || 'omni-moderation-latest'
|
22
|
+
model, provider_instance = Models.resolve(model, provider: provider, assume_exists: assume_model_exists,
|
23
|
+
config: config)
|
24
|
+
model_id = model.id
|
25
|
+
|
26
|
+
provider_instance.moderate(input, model: model_id)
|
27
|
+
end
|
28
|
+
|
29
|
+
# Convenience method to get content from moderation result
|
30
|
+
def content
|
31
|
+
results
|
32
|
+
end
|
33
|
+
|
34
|
+
# Check if any content was flagged
|
35
|
+
def flagged?
|
36
|
+
results.any? { |result| result['flagged'] }
|
37
|
+
end
|
38
|
+
|
39
|
+
# Get all flagged categories across all results
|
40
|
+
def flagged_categories
|
41
|
+
results.flat_map do |result|
|
42
|
+
result['categories']&.select { |_category, flagged| flagged }&.keys || []
|
43
|
+
end.uniq
|
44
|
+
end
|
45
|
+
|
46
|
+
# Get category scores for the first result (most common case)
|
47
|
+
def category_scores
|
48
|
+
results.first&.dig('category_scores') || {}
|
49
|
+
end
|
50
|
+
|
51
|
+
# Get categories for the first result (most common case)
|
52
|
+
def categories
|
53
|
+
results.first&.dig('categories') || {}
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
data/lib/ruby_llm/provider.rb
CHANGED
@@ -70,6 +70,12 @@ module RubyLLM
|
|
70
70
|
parse_embedding_response(response, model:, text:)
|
71
71
|
end
|
72
72
|
|
73
|
+
def moderate(input, model:)
|
74
|
+
payload = render_moderation_payload(input, model:)
|
75
|
+
response = @connection.post moderation_url, payload
|
76
|
+
parse_moderation_response(response, model:)
|
77
|
+
end
|
78
|
+
|
73
79
|
def paint(prompt, model:, size:)
|
74
80
|
payload = render_image_payload(prompt, model:, size:)
|
75
81
|
response = @connection.post images_url, payload
|
@@ -52,6 +52,10 @@ module RubyLLM
|
|
52
52
|
model_id.match?(/gemini|flash|pro|imagen/)
|
53
53
|
end
|
54
54
|
|
55
|
+
def supports_video?(model_id)
|
56
|
+
model_id.match?(/gemini/)
|
57
|
+
end
|
58
|
+
|
55
59
|
def supports_functions?(model_id)
|
56
60
|
return false if model_id.match?(/text-embedding|embedding-001|aqa|flash-lite|imagen|gemini-2\.0-flash-lite/)
|
57
61
|
|
@@ -217,6 +221,7 @@ module RubyLLM
|
|
217
221
|
modalities[:input] << 'pdf'
|
218
222
|
end
|
219
223
|
|
224
|
+
modalities[:input] << 'video' if supports_video?(model_id)
|
220
225
|
modalities[:input] << 'audio' if model_id.match?(/audio/)
|
221
226
|
modalities[:output] << 'embeddings' if model_id.match?(/embedding|gemini-embedding/)
|
222
227
|
modalities[:output] = ['image'] if model_id.match?(/imagen/)
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class OpenAI
|
6
|
+
# Moderation methods of the OpenAI API integration
|
7
|
+
module Moderation
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def moderation_url
|
11
|
+
'moderations'
|
12
|
+
end
|
13
|
+
|
14
|
+
def render_moderation_payload(input, model:)
|
15
|
+
{
|
16
|
+
model: model,
|
17
|
+
input: input
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def parse_moderation_response(response, model:)
|
22
|
+
data = response.body
|
23
|
+
raise Error.new(response, data.dig('error', 'message')) if data.dig('error', 'message')
|
24
|
+
|
25
|
+
RubyLLM::Moderation.new(
|
26
|
+
id: data['id'],
|
27
|
+
model: model,
|
28
|
+
results: data['results'] || []
|
29
|
+
)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
data/lib/ruby_llm/railtie.rb
CHANGED
data/lib/ruby_llm/version.rb
CHANGED
data/lib/ruby_llm.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: dify_llm
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.8.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Carmine Paolino
|
@@ -152,7 +152,9 @@ files:
|
|
152
152
|
- lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt
|
153
153
|
- lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt
|
154
154
|
- lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt
|
155
|
+
- lib/generators/ruby_llm/generator_helpers.rb
|
155
156
|
- lib/generators/ruby_llm/install/install_generator.rb
|
157
|
+
- lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt
|
156
158
|
- lib/generators/ruby_llm/install/templates/chat_model.rb.tt
|
157
159
|
- lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt
|
158
160
|
- lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt
|
@@ -193,6 +195,7 @@ files:
|
|
193
195
|
- lib/ruby_llm/models.json
|
194
196
|
- lib/ruby_llm/models.rb
|
195
197
|
- lib/ruby_llm/models_schema.json
|
198
|
+
- lib/ruby_llm/moderation.rb
|
196
199
|
- lib/ruby_llm/provider.rb
|
197
200
|
- lib/ruby_llm/providers/anthropic.rb
|
198
201
|
- lib/ruby_llm/providers/anthropic/capabilities.rb
|
@@ -251,6 +254,7 @@ files:
|
|
251
254
|
- lib/ruby_llm/providers/openai/images.rb
|
252
255
|
- lib/ruby_llm/providers/openai/media.rb
|
253
256
|
- lib/ruby_llm/providers/openai/models.rb
|
257
|
+
- lib/ruby_llm/providers/openai/moderation.rb
|
254
258
|
- lib/ruby_llm/providers/openai/streaming.rb
|
255
259
|
- lib/ruby_llm/providers/openai/tools.rb
|
256
260
|
- lib/ruby_llm/providers/openrouter.rb
|
@@ -275,14 +279,14 @@ files:
|
|
275
279
|
- lib/tasks/release.rake
|
276
280
|
- lib/tasks/ruby_llm.rake
|
277
281
|
- lib/tasks/vcr.rake
|
278
|
-
homepage: https://
|
282
|
+
homepage: https://github.com/crmne/ruby_llm/pull/168
|
279
283
|
licenses:
|
280
284
|
- MIT
|
281
285
|
metadata:
|
282
|
-
homepage_uri: https://
|
286
|
+
homepage_uri: https://github.com/crmne/ruby_llm/pull/168
|
283
287
|
source_code_uri: https://github.com/crmne/ruby_llm
|
284
288
|
changelog_uri: https://github.com/crmne/ruby_llm/commits/main
|
285
|
-
documentation_uri: https://
|
289
|
+
documentation_uri: https://github.com/crmne/ruby_llm/pull/168
|
286
290
|
bug_tracker_uri: https://github.com/crmne/ruby_llm/issues
|
287
291
|
rubygems_mfa_required: 'true'
|
288
292
|
post_install_message: |
|