dify_llm 1.6.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +157 -0
- data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_legacy_migration.rb.tt +8 -0
- data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
- data/lib/generators/ruby_llm/install/templates/create_messages_legacy_migration.rb.tt +16 -0
- data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +16 -0
- data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +43 -0
- data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +15 -0
- data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +9 -0
- data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +4 -0
- data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
- data/lib/generators/ruby_llm/install_generator.rb +184 -0
- data/lib/generators/ruby_llm/migrate_model_fields/templates/migration.rb.tt +142 -0
- data/lib/generators/ruby_llm/migrate_model_fields_generator.rb +84 -0
- data/lib/ruby_llm/active_record/acts_as.rb +137 -0
- data/lib/ruby_llm/active_record/acts_as_legacy.rb +398 -0
- data/lib/ruby_llm/active_record/chat_methods.rb +315 -0
- data/lib/ruby_llm/active_record/message_methods.rb +72 -0
- data/lib/ruby_llm/active_record/model_methods.rb +84 -0
- data/lib/ruby_llm/aliases.json +274 -0
- data/lib/ruby_llm/aliases.rb +38 -0
- data/lib/ruby_llm/attachment.rb +191 -0
- data/lib/ruby_llm/chat.rb +212 -0
- data/lib/ruby_llm/chunk.rb +6 -0
- data/lib/ruby_llm/configuration.rb +69 -0
- data/lib/ruby_llm/connection.rb +137 -0
- data/lib/ruby_llm/content.rb +50 -0
- data/lib/ruby_llm/context.rb +29 -0
- data/lib/ruby_llm/embedding.rb +29 -0
- data/lib/ruby_llm/error.rb +76 -0
- data/lib/ruby_llm/image.rb +49 -0
- data/lib/ruby_llm/message.rb +76 -0
- data/lib/ruby_llm/mime_type.rb +67 -0
- data/lib/ruby_llm/model/info.rb +103 -0
- data/lib/ruby_llm/model/modalities.rb +22 -0
- data/lib/ruby_llm/model/pricing.rb +48 -0
- data/lib/ruby_llm/model/pricing_category.rb +46 -0
- data/lib/ruby_llm/model/pricing_tier.rb +33 -0
- data/lib/ruby_llm/model.rb +7 -0
- data/lib/ruby_llm/models.json +31418 -0
- data/lib/ruby_llm/models.rb +235 -0
- data/lib/ruby_llm/models_schema.json +168 -0
- data/lib/ruby_llm/provider.rb +215 -0
- data/lib/ruby_llm/providers/anthropic/capabilities.rb +134 -0
- data/lib/ruby_llm/providers/anthropic/chat.rb +106 -0
- data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
- data/lib/ruby_llm/providers/anthropic/media.rb +91 -0
- data/lib/ruby_llm/providers/anthropic/models.rb +48 -0
- data/lib/ruby_llm/providers/anthropic/streaming.rb +43 -0
- data/lib/ruby_llm/providers/anthropic/tools.rb +107 -0
- data/lib/ruby_llm/providers/anthropic.rb +36 -0
- data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
- data/lib/ruby_llm/providers/bedrock/chat.rb +63 -0
- data/lib/ruby_llm/providers/bedrock/media.rb +60 -0
- data/lib/ruby_llm/providers/bedrock/models.rb +98 -0
- data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +51 -0
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +56 -0
- data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +67 -0
- data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +78 -0
- data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +78 -0
- data/lib/ruby_llm/providers/bedrock/streaming.rb +18 -0
- data/lib/ruby_llm/providers/bedrock.rb +82 -0
- data/lib/ruby_llm/providers/deepseek/capabilities.rb +130 -0
- data/lib/ruby_llm/providers/deepseek/chat.rb +16 -0
- data/lib/ruby_llm/providers/deepseek.rb +30 -0
- data/lib/ruby_llm/providers/dify/capabilities.rb +16 -0
- data/lib/ruby_llm/providers/dify/chat.rb +59 -0
- data/lib/ruby_llm/providers/dify/media.rb +37 -0
- data/lib/ruby_llm/providers/dify/streaming.rb +28 -0
- data/lib/ruby_llm/providers/dify.rb +48 -0
- data/lib/ruby_llm/providers/gemini/capabilities.rb +276 -0
- data/lib/ruby_llm/providers/gemini/chat.rb +171 -0
- data/lib/ruby_llm/providers/gemini/embeddings.rb +37 -0
- data/lib/ruby_llm/providers/gemini/images.rb +47 -0
- data/lib/ruby_llm/providers/gemini/media.rb +54 -0
- data/lib/ruby_llm/providers/gemini/models.rb +40 -0
- data/lib/ruby_llm/providers/gemini/streaming.rb +61 -0
- data/lib/ruby_llm/providers/gemini/tools.rb +77 -0
- data/lib/ruby_llm/providers/gemini.rb +36 -0
- data/lib/ruby_llm/providers/gpustack/chat.rb +27 -0
- data/lib/ruby_llm/providers/gpustack/media.rb +45 -0
- data/lib/ruby_llm/providers/gpustack/models.rb +90 -0
- data/lib/ruby_llm/providers/gpustack.rb +34 -0
- data/lib/ruby_llm/providers/mistral/capabilities.rb +155 -0
- data/lib/ruby_llm/providers/mistral/chat.rb +24 -0
- data/lib/ruby_llm/providers/mistral/embeddings.rb +33 -0
- data/lib/ruby_llm/providers/mistral/models.rb +48 -0
- data/lib/ruby_llm/providers/mistral.rb +32 -0
- data/lib/ruby_llm/providers/ollama/chat.rb +27 -0
- data/lib/ruby_llm/providers/ollama/media.rb +45 -0
- data/lib/ruby_llm/providers/ollama/models.rb +36 -0
- data/lib/ruby_llm/providers/ollama.rb +30 -0
- data/lib/ruby_llm/providers/openai/capabilities.rb +291 -0
- data/lib/ruby_llm/providers/openai/chat.rb +83 -0
- data/lib/ruby_llm/providers/openai/embeddings.rb +33 -0
- data/lib/ruby_llm/providers/openai/images.rb +38 -0
- data/lib/ruby_llm/providers/openai/media.rb +80 -0
- data/lib/ruby_llm/providers/openai/models.rb +39 -0
- data/lib/ruby_llm/providers/openai/streaming.rb +41 -0
- data/lib/ruby_llm/providers/openai/tools.rb +78 -0
- data/lib/ruby_llm/providers/openai.rb +42 -0
- data/lib/ruby_llm/providers/openrouter/models.rb +73 -0
- data/lib/ruby_llm/providers/openrouter.rb +26 -0
- data/lib/ruby_llm/providers/perplexity/capabilities.rb +137 -0
- data/lib/ruby_llm/providers/perplexity/chat.rb +16 -0
- data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
- data/lib/ruby_llm/providers/perplexity.rb +48 -0
- data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
- data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
- data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
- data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
- data/lib/ruby_llm/providers/vertexai.rb +55 -0
- data/lib/ruby_llm/railtie.rb +41 -0
- data/lib/ruby_llm/stream_accumulator.rb +97 -0
- data/lib/ruby_llm/streaming.rb +153 -0
- data/lib/ruby_llm/tool.rb +83 -0
- data/lib/ruby_llm/tool_call.rb +22 -0
- data/lib/ruby_llm/utils.rb +45 -0
- data/lib/ruby_llm/version.rb +5 -0
- data/lib/ruby_llm.rb +97 -0
- data/lib/tasks/models.rake +525 -0
- data/lib/tasks/release.rake +67 -0
- data/lib/tasks/ruby_llm.rake +15 -0
- data/lib/tasks/vcr.rake +92 -0
- metadata +291 -0
@@ -0,0 +1,40 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Gemini
|
6
|
+
# Models methods for the Gemini API integration
|
7
|
+
module Models
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def models_url
|
11
|
+
'models'
|
12
|
+
end
|
13
|
+
|
14
|
+
def parse_list_models_response(response, slug, capabilities)
|
15
|
+
Array(response.body['models']).map do |model_data|
|
16
|
+
model_id = model_data['name'].gsub('models/', '')
|
17
|
+
|
18
|
+
Model::Info.new(
|
19
|
+
id: model_id,
|
20
|
+
name: model_data['displayName'],
|
21
|
+
provider: slug,
|
22
|
+
family: capabilities.model_family(model_id),
|
23
|
+
created_at: nil,
|
24
|
+
context_window: model_data['inputTokenLimit'] || capabilities.context_window_for(model_id),
|
25
|
+
max_output_tokens: model_data['outputTokenLimit'] || capabilities.max_tokens_for(model_id),
|
26
|
+
modalities: capabilities.modalities_for(model_id),
|
27
|
+
capabilities: capabilities.capabilities_for(model_id),
|
28
|
+
pricing: capabilities.pricing_for(model_id),
|
29
|
+
metadata: {
|
30
|
+
version: model_data['version'],
|
31
|
+
description: model_data['description'],
|
32
|
+
supported_generation_methods: model_data['supportedGenerationMethods']
|
33
|
+
}
|
34
|
+
)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Gemini
|
6
|
+
# Streaming methods for the Gemini API implementation
|
7
|
+
module Streaming
|
8
|
+
def stream_url
|
9
|
+
"models/#{@model}:streamGenerateContent?alt=sse"
|
10
|
+
end
|
11
|
+
|
12
|
+
def build_chunk(data)
|
13
|
+
Chunk.new(
|
14
|
+
role: :assistant,
|
15
|
+
model_id: extract_model_id(data),
|
16
|
+
content: extract_content(data),
|
17
|
+
input_tokens: extract_input_tokens(data),
|
18
|
+
output_tokens: extract_output_tokens(data),
|
19
|
+
tool_calls: extract_tool_calls(data)
|
20
|
+
)
|
21
|
+
end
|
22
|
+
|
23
|
+
private
|
24
|
+
|
25
|
+
def extract_model_id(data)
|
26
|
+
data['modelVersion']
|
27
|
+
end
|
28
|
+
|
29
|
+
def extract_content(data)
|
30
|
+
return nil unless data['candidates']&.any?
|
31
|
+
|
32
|
+
candidate = data['candidates'][0]
|
33
|
+
parts = candidate.dig('content', 'parts')
|
34
|
+
return nil unless parts
|
35
|
+
|
36
|
+
text_parts = parts.select { |p| p['text'] }
|
37
|
+
text_parts.map { |p| p['text'] }.join if text_parts.any?
|
38
|
+
end
|
39
|
+
|
40
|
+
def extract_input_tokens(data)
|
41
|
+
data.dig('usageMetadata', 'promptTokenCount')
|
42
|
+
end
|
43
|
+
|
44
|
+
def extract_output_tokens(data)
|
45
|
+
candidates = data.dig('usageMetadata', 'candidatesTokenCount') || 0
|
46
|
+
thoughts = data.dig('usageMetadata', 'thoughtsTokenCount') || 0
|
47
|
+
total = candidates + thoughts
|
48
|
+
total.positive? ? total : nil
|
49
|
+
end
|
50
|
+
|
51
|
+
def parse_streaming_error(data)
|
52
|
+
error_data = JSON.parse(data)
|
53
|
+
[error_data['error']['code'], error_data['error']['message']]
|
54
|
+
rescue JSON::ParserError => e
|
55
|
+
RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
|
56
|
+
[500, "Failed to parse error: #{data}"]
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Gemini
|
6
|
+
# Tools methods for the Gemini API implementation
|
7
|
+
module Tools
|
8
|
+
def format_tools(tools)
|
9
|
+
return [] if tools.empty?
|
10
|
+
|
11
|
+
[{
|
12
|
+
functionDeclarations: tools.values.map { |tool| function_declaration_for(tool) }
|
13
|
+
}]
|
14
|
+
end
|
15
|
+
|
16
|
+
def extract_tool_calls(data)
|
17
|
+
return nil unless data
|
18
|
+
|
19
|
+
candidate = data.is_a?(Hash) ? data.dig('candidates', 0) : nil
|
20
|
+
return nil unless candidate
|
21
|
+
|
22
|
+
parts = candidate.dig('content', 'parts')
|
23
|
+
return nil unless parts.is_a?(Array)
|
24
|
+
|
25
|
+
function_call_part = parts.find { |p| p['functionCall'] }
|
26
|
+
return nil unless function_call_part
|
27
|
+
|
28
|
+
function_data = function_call_part['functionCall']
|
29
|
+
return nil unless function_data
|
30
|
+
|
31
|
+
id = SecureRandom.uuid
|
32
|
+
|
33
|
+
{
|
34
|
+
id => ToolCall.new(
|
35
|
+
id: id,
|
36
|
+
name: function_data['name'],
|
37
|
+
arguments: function_data['args']
|
38
|
+
)
|
39
|
+
}
|
40
|
+
end
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
def function_declaration_for(tool)
|
45
|
+
{
|
46
|
+
name: tool.name,
|
47
|
+
description: tool.description,
|
48
|
+
parameters: tool.parameters.any? ? format_parameters(tool.parameters) : nil
|
49
|
+
}.compact
|
50
|
+
end
|
51
|
+
|
52
|
+
def format_parameters(parameters)
|
53
|
+
{
|
54
|
+
type: 'OBJECT',
|
55
|
+
properties: parameters.transform_values do |param|
|
56
|
+
{
|
57
|
+
type: param_type_for_gemini(param.type),
|
58
|
+
description: param.description
|
59
|
+
}.compact
|
60
|
+
end,
|
61
|
+
required: parameters.select { |_, p| p.required }.keys.map(&:to_s)
|
62
|
+
}
|
63
|
+
end
|
64
|
+
|
65
|
+
def param_type_for_gemini(type)
|
66
|
+
case type.to_s.downcase
|
67
|
+
when 'integer', 'number', 'float' then 'NUMBER'
|
68
|
+
when 'boolean' then 'BOOLEAN'
|
69
|
+
when 'array' then 'ARRAY'
|
70
|
+
when 'object' then 'OBJECT'
|
71
|
+
else 'STRING'
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# Native Gemini API implementation
|
6
|
+
class Gemini < Provider
|
7
|
+
include Gemini::Chat
|
8
|
+
include Gemini::Embeddings
|
9
|
+
include Gemini::Images
|
10
|
+
include Gemini::Models
|
11
|
+
include Gemini::Streaming
|
12
|
+
include Gemini::Tools
|
13
|
+
include Gemini::Media
|
14
|
+
|
15
|
+
def api_base
|
16
|
+
'https://generativelanguage.googleapis.com/v1beta'
|
17
|
+
end
|
18
|
+
|
19
|
+
def headers
|
20
|
+
{
|
21
|
+
'x-goog-api-key' => @config.gemini_api_key
|
22
|
+
}
|
23
|
+
end
|
24
|
+
|
25
|
+
class << self
|
26
|
+
def capabilities
|
27
|
+
Gemini::Capabilities
|
28
|
+
end
|
29
|
+
|
30
|
+
def configuration_requirements
|
31
|
+
%i[gemini_api_key]
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class GPUStack
|
6
|
+
# Chat methods of the GPUStack API integration
|
7
|
+
module Chat
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_messages(messages)
|
11
|
+
messages.map do |msg|
|
12
|
+
{
|
13
|
+
role: format_role(msg.role),
|
14
|
+
content: GPUStack::Media.format_content(msg.content),
|
15
|
+
tool_calls: format_tool_calls(msg.tool_calls),
|
16
|
+
tool_call_id: msg.tool_call_id
|
17
|
+
}.compact
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def format_role(role)
|
22
|
+
role.to_s
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class GPUStack
|
6
|
+
# Handles formatting of media content (images, audio) for GPUStack APIs
|
7
|
+
module Media
|
8
|
+
extend OpenAI::Media
|
9
|
+
|
10
|
+
module_function
|
11
|
+
|
12
|
+
def format_content(content)
|
13
|
+
return content.to_json if content.is_a?(Hash) || content.is_a?(Array)
|
14
|
+
return content unless content.is_a?(Content)
|
15
|
+
|
16
|
+
parts = []
|
17
|
+
parts << format_text(content.text) if content.text
|
18
|
+
|
19
|
+
content.attachments.each do |attachment|
|
20
|
+
case attachment.type
|
21
|
+
when :image
|
22
|
+
parts << GPUStack::Media.format_image(attachment)
|
23
|
+
when :text
|
24
|
+
parts << format_text_file(attachment)
|
25
|
+
else
|
26
|
+
raise UnsupportedAttachmentError, attachment.mime_type
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
parts
|
31
|
+
end
|
32
|
+
|
33
|
+
def format_image(image)
|
34
|
+
{
|
35
|
+
type: 'image_url',
|
36
|
+
image_url: {
|
37
|
+
url: image.for_llm,
|
38
|
+
detail: 'auto'
|
39
|
+
}
|
40
|
+
}
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class GPUStack
|
6
|
+
# Models methods of the GPUStack API integration
|
7
|
+
module Models
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def models_url
|
11
|
+
'models'
|
12
|
+
end
|
13
|
+
|
14
|
+
def parse_list_models_response(response, slug, _capabilities)
|
15
|
+
items = response.body['items'] || []
|
16
|
+
items.map do |model|
|
17
|
+
Model::Info.new(
|
18
|
+
id: model['name'],
|
19
|
+
name: model['name'],
|
20
|
+
created_at: model['created_at'] ? Time.parse(model['created_at']) : nil,
|
21
|
+
provider: slug,
|
22
|
+
family: 'gpustack',
|
23
|
+
metadata: {
|
24
|
+
description: model['description'],
|
25
|
+
source: model['source'],
|
26
|
+
huggingface_repo_id: model['huggingface_repo_id'],
|
27
|
+
ollama_library_model_name: model['ollama_library_model_name'],
|
28
|
+
backend: model['backend'],
|
29
|
+
meta: model['meta'],
|
30
|
+
categories: model['categories']
|
31
|
+
},
|
32
|
+
context_window: model.dig('meta', 'n_ctx'),
|
33
|
+
max_output_tokens: model.dig('meta', 'n_ctx'),
|
34
|
+
capabilities: build_capabilities(model),
|
35
|
+
modalities: build_modalities(model),
|
36
|
+
pricing: {}
|
37
|
+
)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
private
|
42
|
+
|
43
|
+
def determine_model_type(model)
|
44
|
+
return 'embedding' if model['categories']&.include?('embedding')
|
45
|
+
return 'chat' if model['categories']&.include?('llm')
|
46
|
+
|
47
|
+
'other'
|
48
|
+
end
|
49
|
+
|
50
|
+
def build_capabilities(model) # rubocop:disable Metrics/PerceivedComplexity
|
51
|
+
capabilities = []
|
52
|
+
|
53
|
+
# Add streaming by default for LLM models
|
54
|
+
capabilities << 'streaming' if model['categories']&.include?('llm')
|
55
|
+
|
56
|
+
# Map GPUStack metadata to standard capabilities
|
57
|
+
capabilities << 'function_calling' if model.dig('meta', 'support_tool_calls')
|
58
|
+
capabilities << 'vision' if model.dig('meta', 'support_vision')
|
59
|
+
capabilities << 'reasoning' if model.dig('meta', 'support_reasoning')
|
60
|
+
|
61
|
+
# GPUStack models generally support structured output and json mode
|
62
|
+
capabilities << 'structured_output' if model['categories']&.include?('llm')
|
63
|
+
capabilities << 'json_mode' if model['categories']&.include?('llm')
|
64
|
+
|
65
|
+
capabilities
|
66
|
+
end
|
67
|
+
|
68
|
+
def build_modalities(model)
|
69
|
+
input_modalities = []
|
70
|
+
output_modalities = []
|
71
|
+
|
72
|
+
if model['categories']&.include?('llm')
|
73
|
+
input_modalities << 'text'
|
74
|
+
input_modalities << 'image' if model.dig('meta', 'support_vision')
|
75
|
+
input_modalities << 'audio' if model.dig('meta', 'support_audio')
|
76
|
+
output_modalities << 'text'
|
77
|
+
elsif model['categories']&.include?('embedding')
|
78
|
+
input_modalities << 'text'
|
79
|
+
output_modalities << 'embeddings'
|
80
|
+
end
|
81
|
+
|
82
|
+
{
|
83
|
+
input: input_modalities,
|
84
|
+
output: output_modalities
|
85
|
+
}
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
# GPUStack API integration based on Ollama.
|
6
|
+
class GPUStack < OpenAI
|
7
|
+
include GPUStack::Chat
|
8
|
+
include GPUStack::Models
|
9
|
+
include GPUStack::Media
|
10
|
+
|
11
|
+
def api_base
|
12
|
+
@config.gpustack_api_base
|
13
|
+
end
|
14
|
+
|
15
|
+
def headers
|
16
|
+
return {} unless @config.gpustack_api_key
|
17
|
+
|
18
|
+
{
|
19
|
+
'Authorization' => "Bearer #{@config.gpustack_api_key}"
|
20
|
+
}
|
21
|
+
end
|
22
|
+
|
23
|
+
class << self
|
24
|
+
def local?
|
25
|
+
true
|
26
|
+
end
|
27
|
+
|
28
|
+
def configuration_requirements
|
29
|
+
%i[gpustack_api_base]
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,155 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Mistral
|
6
|
+
# Determines capabilities for Mistral models
|
7
|
+
module Capabilities
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def supports_streaming?(model_id)
|
11
|
+
!model_id.match?(/embed|moderation|ocr|transcriptions/)
|
12
|
+
end
|
13
|
+
|
14
|
+
def supports_tools?(model_id)
|
15
|
+
!model_id.match?(/embed|moderation|ocr|voxtral|transcriptions|mistral-(tiny|small)-(2312|2402)/)
|
16
|
+
end
|
17
|
+
|
18
|
+
def supports_vision?(model_id)
|
19
|
+
model_id.match?(/pixtral|mistral-small-(2503|2506)|mistral-medium/)
|
20
|
+
end
|
21
|
+
|
22
|
+
def supports_json_mode?(model_id)
|
23
|
+
!model_id.match?(/embed|moderation|ocr|voxtral|transcriptions/) && supports_tools?(model_id)
|
24
|
+
end
|
25
|
+
|
26
|
+
def format_display_name(model_id)
|
27
|
+
case model_id
|
28
|
+
when /mistral-large/ then 'Mistral Large'
|
29
|
+
when /mistral-medium/ then 'Mistral Medium'
|
30
|
+
when /mistral-small/ then 'Mistral Small'
|
31
|
+
when /ministral-3b/ then 'Ministral 3B'
|
32
|
+
when /ministral-8b/ then 'Ministral 8B'
|
33
|
+
when /codestral/ then 'Codestral'
|
34
|
+
when /pixtral-large/ then 'Pixtral Large'
|
35
|
+
when /pixtral-12b/ then 'Pixtral 12B'
|
36
|
+
when /mistral-embed/ then 'Mistral Embed'
|
37
|
+
when /mistral-moderation/ then 'Mistral Moderation'
|
38
|
+
else model_id.split('-').map(&:capitalize).join(' ')
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def model_family(model_id)
|
43
|
+
case model_id
|
44
|
+
when /mistral-large/ then 'mistral-large'
|
45
|
+
when /mistral-medium/ then 'mistral-medium'
|
46
|
+
when /mistral-small/ then 'mistral-small'
|
47
|
+
when /ministral/ then 'ministral'
|
48
|
+
when /codestral/ then 'codestral'
|
49
|
+
when /pixtral/ then 'pixtral'
|
50
|
+
when /mistral-embed/ then 'mistral-embed'
|
51
|
+
when /mistral-moderation/ then 'mistral-moderation'
|
52
|
+
else 'mistral'
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def context_window_for(_model_id)
|
57
|
+
32_768
|
58
|
+
end
|
59
|
+
|
60
|
+
def max_tokens_for(_model_id)
|
61
|
+
8192
|
62
|
+
end
|
63
|
+
|
64
|
+
def modalities_for(model_id)
|
65
|
+
case model_id
|
66
|
+
when /pixtral/
|
67
|
+
{
|
68
|
+
input: %w[text image],
|
69
|
+
output: ['text']
|
70
|
+
}
|
71
|
+
when /embed/
|
72
|
+
{
|
73
|
+
input: ['text'],
|
74
|
+
output: ['embeddings']
|
75
|
+
}
|
76
|
+
else
|
77
|
+
{
|
78
|
+
input: ['text'],
|
79
|
+
output: ['text']
|
80
|
+
}
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def capabilities_for(model_id) # rubocop:disable Metrics/PerceivedComplexity
|
85
|
+
case model_id
|
86
|
+
when /moderation/ then ['moderation']
|
87
|
+
when /voxtral.*transcribe/ then ['transcription']
|
88
|
+
when /ocr/ then ['vision']
|
89
|
+
else
|
90
|
+
capabilities = []
|
91
|
+
capabilities << 'streaming' if supports_streaming?(model_id)
|
92
|
+
capabilities << 'function_calling' if supports_tools?(model_id)
|
93
|
+
capabilities << 'structured_output' if supports_json_mode?(model_id)
|
94
|
+
capabilities << 'vision' if supports_vision?(model_id)
|
95
|
+
|
96
|
+
capabilities << 'reasoning' if model_id.match?(/magistral/)
|
97
|
+
capabilities << 'batch' unless model_id.match?(/voxtral|ocr|embed|moderation/)
|
98
|
+
capabilities << 'fine_tuning' if model_id.match?(/mistral-(small|medium|large)|devstral/)
|
99
|
+
capabilities << 'distillation' if model_id.match?(/ministral/)
|
100
|
+
capabilities << 'predicted_outputs' if model_id.match?(/codestral/)
|
101
|
+
|
102
|
+
capabilities.uniq
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
def pricing_for(_model_id)
|
107
|
+
{
|
108
|
+
input: 0.0,
|
109
|
+
output: 0.0
|
110
|
+
}
|
111
|
+
end
|
112
|
+
|
113
|
+
def release_date_for(model_id)
|
114
|
+
case model_id
|
115
|
+
when 'open-mistral-7b', 'mistral-tiny' then '2023-09-27'
|
116
|
+
when 'mistral-medium-2312', 'mistral-small-2312', 'mistral-small',
|
117
|
+
'open-mixtral-8x7b', 'mistral-tiny-2312' then '2023-12-11'
|
118
|
+
|
119
|
+
when 'mistral-embed' then '2024-01-11'
|
120
|
+
when 'mistral-large-2402', 'mistral-small-2402' then '2024-02-26'
|
121
|
+
when 'open-mixtral-8x22b', 'open-mixtral-8x22b-2404' then '2024-04-17'
|
122
|
+
when 'codestral-2405' then '2024-05-22'
|
123
|
+
when 'codestral-mamba-2407', 'codestral-mamba-latest', 'open-codestral-mamba' then '2024-07-16'
|
124
|
+
when 'open-mistral-nemo', 'open-mistral-nemo-2407', 'mistral-tiny-2407',
|
125
|
+
'mistral-tiny-latest' then '2024-07-18'
|
126
|
+
when 'mistral-large-2407' then '2024-07-24'
|
127
|
+
when 'pixtral-12b-2409', 'pixtral-12b-latest', 'pixtral-12b' then '2024-09-17'
|
128
|
+
when 'mistral-small-2409' then '2024-09-18'
|
129
|
+
when 'ministral-3b-2410', 'ministral-3b-latest', 'ministral-8b-2410',
|
130
|
+
'ministral-8b-latest' then '2024-10-16'
|
131
|
+
when 'pixtral-large-2411', 'pixtral-large-latest', 'mistral-large-pixtral-2411' then '2024-11-12'
|
132
|
+
when 'mistral-large-2411', 'mistral-large-latest', 'mistral-large' then '2024-11-20'
|
133
|
+
when 'codestral-2411-rc5', 'mistral-moderation-2411', 'mistral-moderation-latest' then '2024-11-26'
|
134
|
+
when 'codestral-2412' then '2024-12-17'
|
135
|
+
|
136
|
+
when 'mistral-small-2501' then '2025-01-13'
|
137
|
+
when 'codestral-2501' then '2025-01-14'
|
138
|
+
when 'mistral-saba-2502', 'mistral-saba-latest' then '2025-02-18'
|
139
|
+
when 'mistral-small-2503' then '2025-03-03'
|
140
|
+
when 'mistral-ocr-2503' then '2025-03-21'
|
141
|
+
when 'mistral-medium', 'mistral-medium-latest', 'mistral-medium-2505' then '2025-05-06'
|
142
|
+
when 'codestral-embed', 'codestral-embed-2505' then '2025-05-21'
|
143
|
+
when 'mistral-ocr-2505', 'mistral-ocr-latest' then '2025-05-23'
|
144
|
+
when 'devstral-small-2505' then '2025-05-28'
|
145
|
+
when 'mistral-small-2506', 'mistral-small-latest', 'magistral-medium-2506',
|
146
|
+
'magistral-medium-latest' then '2025-06-10'
|
147
|
+
when 'devstral-small-2507', 'devstral-small-latest', 'devstral-medium-2507',
|
148
|
+
'devstral-medium-latest' then '2025-07-09'
|
149
|
+
when 'codestral-2508', 'codestral-latest' then '2025-08-30'
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Mistral
|
6
|
+
# Chat methods for Mistral API
|
7
|
+
module Chat
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_role(role)
|
11
|
+
role.to_s
|
12
|
+
end
|
13
|
+
|
14
|
+
# rubocop:disable Metrics/ParameterLists
|
15
|
+
def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil)
|
16
|
+
payload = super
|
17
|
+
payload.delete(:stream_options)
|
18
|
+
payload
|
19
|
+
end
|
20
|
+
# rubocop:enable Metrics/ParameterLists
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Mistral
|
6
|
+
# Embeddings methods for Mistral API
|
7
|
+
module Embeddings
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def embedding_url(...)
|
11
|
+
'embeddings'
|
12
|
+
end
|
13
|
+
|
14
|
+
def render_embedding_payload(text, model:, dimensions:) # rubocop:disable Lint/UnusedMethodArgument
|
15
|
+
{
|
16
|
+
model: model,
|
17
|
+
input: text
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def parse_embedding_response(response, model:, text:)
|
22
|
+
data = response.body
|
23
|
+
input_tokens = data.dig('usage', 'prompt_tokens') || 0
|
24
|
+
vectors = data['data'].map { |d| d['embedding'] }
|
25
|
+
|
26
|
+
vectors = vectors.first if vectors.length == 1 && !text.is_a?(Array)
|
27
|
+
|
28
|
+
Embedding.new(vectors:, model:, input_tokens:)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
class Mistral
|
6
|
+
# Model information for Mistral
|
7
|
+
module Models
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def models_url
|
11
|
+
'models'
|
12
|
+
end
|
13
|
+
|
14
|
+
def headers(config)
|
15
|
+
{
|
16
|
+
'Authorization' => "Bearer #{config.mistral_api_key}"
|
17
|
+
}
|
18
|
+
end
|
19
|
+
|
20
|
+
def parse_list_models_response(response, slug, capabilities)
|
21
|
+
Array(response.body['data']).map do |model_data|
|
22
|
+
model_id = model_data['id']
|
23
|
+
|
24
|
+
release_date = capabilities.release_date_for(model_id)
|
25
|
+
created_at = release_date ? Time.parse(release_date) : nil
|
26
|
+
|
27
|
+
Model::Info.new(
|
28
|
+
id: model_id,
|
29
|
+
name: capabilities.format_display_name(model_id),
|
30
|
+
provider: slug,
|
31
|
+
family: capabilities.model_family(model_id),
|
32
|
+
created_at: created_at,
|
33
|
+
context_window: capabilities.context_window_for(model_id),
|
34
|
+
max_output_tokens: capabilities.max_tokens_for(model_id),
|
35
|
+
modalities: capabilities.modalities_for(model_id),
|
36
|
+
capabilities: capabilities.capabilities_for(model_id),
|
37
|
+
pricing: capabilities.pricing_for(model_id),
|
38
|
+
metadata: {
|
39
|
+
object: model_data['object'],
|
40
|
+
owned_by: model_data['owned_by']
|
41
|
+
}
|
42
|
+
)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|