ruby_llm 1.0.1 → 1.1.0rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +28 -12
- data/lib/ruby_llm/active_record/acts_as.rb +46 -7
- data/lib/ruby_llm/aliases.json +65 -0
- data/lib/ruby_llm/aliases.rb +56 -0
- data/lib/ruby_llm/chat.rb +10 -9
- data/lib/ruby_llm/configuration.rb +4 -0
- data/lib/ruby_llm/error.rb +15 -4
- data/lib/ruby_llm/models.json +1163 -303
- data/lib/ruby_llm/models.rb +40 -11
- data/lib/ruby_llm/provider.rb +32 -39
- data/lib/ruby_llm/providers/anthropic/capabilities.rb +8 -9
- data/lib/ruby_llm/providers/anthropic/chat.rb +31 -4
- data/lib/ruby_llm/providers/anthropic/streaming.rb +12 -6
- data/lib/ruby_llm/providers/anthropic.rb +4 -0
- data/lib/ruby_llm/providers/bedrock/capabilities.rb +168 -0
- data/lib/ruby_llm/providers/bedrock/chat.rb +108 -0
- data/lib/ruby_llm/providers/bedrock/models.rb +84 -0
- data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
- data/lib/ruby_llm/providers/bedrock/streaming/base.rb +46 -0
- data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +63 -0
- data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
- data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +90 -0
- data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
- data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
- data/lib/ruby_llm/providers/bedrock.rb +83 -0
- data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
- data/lib/ruby_llm/providers/deepseek.rb +5 -0
- data/lib/ruby_llm/providers/gemini/capabilities.rb +50 -34
- data/lib/ruby_llm/providers/gemini/chat.rb +8 -15
- data/lib/ruby_llm/providers/gemini/images.rb +5 -10
- data/lib/ruby_llm/providers/gemini/streaming.rb +35 -76
- data/lib/ruby_llm/providers/gemini/tools.rb +12 -12
- data/lib/ruby_llm/providers/gemini.rb +4 -0
- data/lib/ruby_llm/providers/openai/capabilities.rb +146 -206
- data/lib/ruby_llm/providers/openai/streaming.rb +9 -13
- data/lib/ruby_llm/providers/openai.rb +4 -0
- data/lib/ruby_llm/streaming.rb +96 -0
- data/lib/ruby_llm/version.rb +1 -1
- data/lib/ruby_llm.rb +6 -3
- data/lib/tasks/browser_helper.rb +97 -0
- data/lib/tasks/capability_generator.rb +123 -0
- data/lib/tasks/capability_scraper.rb +224 -0
- data/lib/tasks/cli_helper.rb +22 -0
- data/lib/tasks/code_validator.rb +29 -0
- data/lib/tasks/model_updater.rb +66 -0
- data/lib/tasks/models.rake +28 -193
- data/lib/tasks/vcr.rake +13 -30
- metadata +27 -19
- data/.github/workflows/cicd.yml +0 -158
- data/.github/workflows/docs.yml +0 -53
- data/.gitignore +0 -59
- data/.overcommit.yml +0 -26
- data/.rspec +0 -3
- data/.rubocop.yml +0 -10
- data/.yardopts +0 -12
- data/CONTRIBUTING.md +0 -207
- data/Gemfile +0 -33
- data/Rakefile +0 -9
- data/bin/console +0 -17
- data/bin/setup +0 -6
- data/ruby_llm.gemspec +0 -44
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Bedrock
|
6
|
+
module Streaming
|
7
|
+
# Base module for AWS Bedrock streaming functionality.
|
8
|
+
# Serves as the core module that includes all other streaming-related modules
|
9
|
+
# and provides fundamental streaming operations.
|
10
|
+
#
|
11
|
+
# Responsibilities:
|
12
|
+
# - Stream URL management
|
13
|
+
# - Stream handling and error processing
|
14
|
+
# - Coordinating the functionality of other streaming modules
|
15
|
+
#
|
16
|
+
# @example
|
17
|
+
# module MyStreamingImplementation
|
18
|
+
# include RubyLLM::Providers::Bedrock::Streaming::Base
|
19
|
+
# end
|
20
|
+
module Base
|
21
|
+
def self.included(base)
|
22
|
+
base.include ContentExtraction
|
23
|
+
base.include MessageProcessing
|
24
|
+
base.include PayloadProcessing
|
25
|
+
base.include PreludeHandling
|
26
|
+
end
|
27
|
+
|
28
|
+
def stream_url
|
29
|
+
"model/#{@model_id}/invoke-with-response-stream"
|
30
|
+
end
|
31
|
+
|
32
|
+
def handle_stream(&block)
|
33
|
+
buffer = String.new
|
34
|
+
proc do |chunk, _bytes, env|
|
35
|
+
if env && env.status != 200
|
36
|
+
handle_failed_response(chunk, buffer, env)
|
37
|
+
else
|
38
|
+
process_chunk(chunk, &block)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Bedrock
|
6
|
+
module Streaming
|
7
|
+
# Module for handling content extraction from AWS Bedrock streaming responses.
|
8
|
+
# Provides methods to extract and process various types of content from the response data.
|
9
|
+
#
|
10
|
+
# Responsibilities:
|
11
|
+
# - Extracting content from different response formats
|
12
|
+
# - Processing JSON deltas and content blocks
|
13
|
+
# - Extracting metadata (tokens, model IDs, tool calls)
|
14
|
+
# - Handling different content structures (arrays, blocks, completions)
|
15
|
+
module ContentExtraction
|
16
|
+
def json_delta?(data)
|
17
|
+
data['type'] == 'content_block_delta' && data.dig('delta', 'type') == 'input_json_delta'
|
18
|
+
end
|
19
|
+
|
20
|
+
def extract_streaming_content(data)
|
21
|
+
return '' unless data.is_a?(Hash)
|
22
|
+
|
23
|
+
extract_content_by_type(data)
|
24
|
+
end
|
25
|
+
|
26
|
+
def extract_tool_calls(data)
|
27
|
+
data.dig('message', 'tool_calls') || data['tool_calls']
|
28
|
+
end
|
29
|
+
|
30
|
+
def extract_model_id(data)
|
31
|
+
data.dig('message', 'model') || @model_id
|
32
|
+
end
|
33
|
+
|
34
|
+
def extract_input_tokens(data)
|
35
|
+
data.dig('message', 'usage', 'input_tokens')
|
36
|
+
end
|
37
|
+
|
38
|
+
def extract_output_tokens(data)
|
39
|
+
data.dig('message', 'usage', 'output_tokens') || data.dig('usage', 'output_tokens')
|
40
|
+
end
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
def extract_content_by_type(data)
|
45
|
+
case data['type']
|
46
|
+
when 'content_block_start' then extract_block_start_content(data)
|
47
|
+
when 'content_block_delta' then extract_delta_content(data)
|
48
|
+
else ''
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def extract_block_start_content(data)
|
53
|
+
data.dig('content_block', 'text').to_s
|
54
|
+
end
|
55
|
+
|
56
|
+
def extract_delta_content(data)
|
57
|
+
data.dig('delta', 'text').to_s
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Bedrock
|
6
|
+
module Streaming
|
7
|
+
# Module for processing streaming messages from AWS Bedrock.
|
8
|
+
# Handles the core message processing logic, including validation and chunking.
|
9
|
+
#
|
10
|
+
# Responsibilities:
|
11
|
+
# - Processing incoming message chunks
|
12
|
+
# - Validating message structure and content
|
13
|
+
# - Managing message offsets and boundaries
|
14
|
+
# - Error handling during message processing
|
15
|
+
#
|
16
|
+
# @example Processing a message chunk
|
17
|
+
# offset = process_message(chunk, current_offset) do |processed_chunk|
|
18
|
+
# handle_processed_chunk(processed_chunk)
|
19
|
+
# end
|
20
|
+
module MessageProcessing
|
21
|
+
def process_chunk(chunk, &)
|
22
|
+
offset = 0
|
23
|
+
offset = process_message(chunk, offset, &) while offset < chunk.bytesize
|
24
|
+
rescue StandardError => e
|
25
|
+
RubyLLM.logger.debug "Error processing chunk: #{e.message}"
|
26
|
+
RubyLLM.logger.debug "Chunk size: #{chunk.bytesize}"
|
27
|
+
end
|
28
|
+
|
29
|
+
def process_message(chunk, offset, &)
|
30
|
+
return chunk.bytesize unless can_read_prelude?(chunk, offset)
|
31
|
+
|
32
|
+
message_info = extract_message_info(chunk, offset)
|
33
|
+
return find_next_message(chunk, offset) unless message_info
|
34
|
+
|
35
|
+
process_valid_message(chunk, offset, message_info, &)
|
36
|
+
end
|
37
|
+
|
38
|
+
def process_valid_message(chunk, offset, message_info, &)
|
39
|
+
payload = extract_payload(chunk, message_info[:headers_end], message_info[:payload_end])
|
40
|
+
return find_next_message(chunk, offset) unless valid_payload?(payload)
|
41
|
+
|
42
|
+
process_payload(payload, &)
|
43
|
+
offset + message_info[:total_length]
|
44
|
+
end
|
45
|
+
|
46
|
+
private
|
47
|
+
|
48
|
+
def extract_message_info(chunk, offset)
|
49
|
+
total_length, headers_length = read_prelude(chunk, offset)
|
50
|
+
return unless valid_lengths?(total_length, headers_length)
|
51
|
+
|
52
|
+
message_end = offset + total_length
|
53
|
+
return unless chunk.bytesize >= message_end
|
54
|
+
|
55
|
+
headers_end, payload_end = calculate_positions(offset, total_length, headers_length)
|
56
|
+
return unless valid_positions?(headers_end, payload_end, chunk.bytesize)
|
57
|
+
|
58
|
+
{ total_length:, headers_length:, headers_end:, payload_end: }
|
59
|
+
end
|
60
|
+
|
61
|
+
def extract_payload(chunk, headers_end, payload_end)
|
62
|
+
chunk[headers_end...payload_end]
|
63
|
+
end
|
64
|
+
|
65
|
+
def valid_payload?(payload)
|
66
|
+
return false if payload.nil? || payload.empty?
|
67
|
+
|
68
|
+
json_start = payload.index('{')
|
69
|
+
json_end = payload.rindex('}')
|
70
|
+
|
71
|
+
return false if json_start.nil? || json_end.nil? || json_start >= json_end
|
72
|
+
|
73
|
+
true
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'base64'
|
4
|
+
|
5
|
+
module RubyLLM
|
6
|
+
module Providers
|
7
|
+
module Bedrock
|
8
|
+
module Streaming
|
9
|
+
# Module for processing payloads from AWS Bedrock streaming responses.
|
10
|
+
# Handles JSON payload extraction, decoding, and chunk creation.
|
11
|
+
#
|
12
|
+
# Responsibilities:
|
13
|
+
# - Extracting and validating JSON payloads
|
14
|
+
# - Decoding Base64-encoded response data
|
15
|
+
# - Creating response chunks from processed data
|
16
|
+
# - Error handling for JSON parsing and processing
|
17
|
+
#
|
18
|
+
# @example Processing a payload
|
19
|
+
# process_payload(raw_payload) do |chunk|
|
20
|
+
# yield_chunk_to_client(chunk)
|
21
|
+
# end
|
22
|
+
module PayloadProcessing
|
23
|
+
def process_payload(payload, &)
|
24
|
+
json_payload = extract_json_payload(payload)
|
25
|
+
parse_and_process_json(json_payload, &)
|
26
|
+
rescue JSON::ParserError => e
|
27
|
+
log_json_parse_error(e, json_payload)
|
28
|
+
rescue StandardError => e
|
29
|
+
log_general_error(e)
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def extract_json_payload(payload)
|
35
|
+
json_start = payload.index('{')
|
36
|
+
json_end = payload.rindex('}')
|
37
|
+
payload[json_start..json_end]
|
38
|
+
end
|
39
|
+
|
40
|
+
def parse_and_process_json(json_payload, &)
|
41
|
+
json_data = JSON.parse(json_payload)
|
42
|
+
process_json_data(json_data, &)
|
43
|
+
end
|
44
|
+
|
45
|
+
def process_json_data(json_data, &)
|
46
|
+
return unless json_data['bytes']
|
47
|
+
|
48
|
+
data = decode_and_parse_data(json_data)
|
49
|
+
create_and_yield_chunk(data, &)
|
50
|
+
end
|
51
|
+
|
52
|
+
def decode_and_parse_data(json_data)
|
53
|
+
decoded_bytes = Base64.strict_decode64(json_data['bytes'])
|
54
|
+
JSON.parse(decoded_bytes)
|
55
|
+
end
|
56
|
+
|
57
|
+
def create_and_yield_chunk(data, &block)
|
58
|
+
block.call(build_chunk(data))
|
59
|
+
end
|
60
|
+
|
61
|
+
def build_chunk(data)
|
62
|
+
Chunk.new(
|
63
|
+
**extract_chunk_attributes(data)
|
64
|
+
)
|
65
|
+
end
|
66
|
+
|
67
|
+
def extract_chunk_attributes(data)
|
68
|
+
{
|
69
|
+
role: :assistant,
|
70
|
+
model_id: extract_model_id(data),
|
71
|
+
content: extract_streaming_content(data),
|
72
|
+
input_tokens: extract_input_tokens(data),
|
73
|
+
output_tokens: extract_output_tokens(data),
|
74
|
+
tool_calls: extract_tool_calls(data)
|
75
|
+
}
|
76
|
+
end
|
77
|
+
|
78
|
+
def log_json_parse_error(error, json_payload)
|
79
|
+
RubyLLM.logger.debug "Failed to parse payload as JSON: #{error.message}"
|
80
|
+
RubyLLM.logger.debug "Attempted JSON payload: #{json_payload.inspect}"
|
81
|
+
end
|
82
|
+
|
83
|
+
def log_general_error(error)
|
84
|
+
RubyLLM.logger.debug "Error processing payload: #{error.message}"
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,91 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module Bedrock
|
6
|
+
module Streaming
|
7
|
+
# Module for handling message preludes in AWS Bedrock streaming responses.
|
8
|
+
# Manages the parsing and validation of message headers and prelude data.
|
9
|
+
#
|
10
|
+
# Responsibilities:
|
11
|
+
# - Reading and validating message preludes
|
12
|
+
# - Calculating message positions and boundaries
|
13
|
+
# - Finding and validating prelude positions in chunks
|
14
|
+
# - Ensuring message integrity through length validation
|
15
|
+
#
|
16
|
+
# @example Reading a prelude
|
17
|
+
# if can_read_prelude?(chunk, offset)
|
18
|
+
# total_length, headers_length = read_prelude(chunk, offset)
|
19
|
+
# process_message_with_lengths(total_length, headers_length)
|
20
|
+
# end
|
21
|
+
module PreludeHandling
|
22
|
+
def can_read_prelude?(chunk, offset)
|
23
|
+
chunk.bytesize - offset >= 12
|
24
|
+
end
|
25
|
+
|
26
|
+
def read_prelude(chunk, offset)
|
27
|
+
total_length = chunk[offset...offset + 4].unpack1('N')
|
28
|
+
headers_length = chunk[offset + 4...offset + 8].unpack1('N')
|
29
|
+
[total_length, headers_length]
|
30
|
+
end
|
31
|
+
|
32
|
+
def valid_lengths?(total_length, headers_length)
|
33
|
+
validate_length_constraints(total_length, headers_length)
|
34
|
+
end
|
35
|
+
|
36
|
+
def calculate_positions(offset, total_length, headers_length)
|
37
|
+
headers_end = offset + 12 + headers_length
|
38
|
+
payload_end = offset + total_length - 4 # Subtract 4 bytes for message CRC
|
39
|
+
[headers_end, payload_end]
|
40
|
+
end
|
41
|
+
|
42
|
+
def valid_positions?(headers_end, payload_end, chunk_size)
|
43
|
+
return false if headers_end >= payload_end
|
44
|
+
return false if headers_end >= chunk_size
|
45
|
+
return false if payload_end > chunk_size
|
46
|
+
|
47
|
+
true
|
48
|
+
end
|
49
|
+
|
50
|
+
def find_next_message(chunk, offset)
|
51
|
+
next_prelude = find_next_prelude(chunk, offset + 4)
|
52
|
+
next_prelude || chunk.bytesize
|
53
|
+
end
|
54
|
+
|
55
|
+
def find_next_prelude(chunk, start_offset)
|
56
|
+
scan_range(chunk, start_offset).each do |pos|
|
57
|
+
return pos if valid_prelude_at_position?(chunk, pos)
|
58
|
+
end
|
59
|
+
nil
|
60
|
+
end
|
61
|
+
|
62
|
+
private
|
63
|
+
|
64
|
+
def scan_range(chunk, start_offset)
|
65
|
+
(start_offset...(chunk.bytesize - 8))
|
66
|
+
end
|
67
|
+
|
68
|
+
def valid_prelude_at_position?(chunk, pos)
|
69
|
+
lengths = extract_potential_lengths(chunk, pos)
|
70
|
+
validate_length_constraints(*lengths)
|
71
|
+
end
|
72
|
+
|
73
|
+
def extract_potential_lengths(chunk, pos)
|
74
|
+
[
|
75
|
+
chunk[pos...pos + 4].unpack1('N'),
|
76
|
+
chunk[pos + 4...pos + 8].unpack1('N')
|
77
|
+
]
|
78
|
+
end
|
79
|
+
|
80
|
+
def validate_length_constraints(total_length, headers_length)
|
81
|
+
return false if total_length.nil? || headers_length.nil?
|
82
|
+
return false if total_length <= 0 || total_length > 1_000_000
|
83
|
+
return false if headers_length <= 0 || headers_length >= total_length
|
84
|
+
|
85
|
+
true
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'streaming/base'
|
4
|
+
require_relative 'streaming/content_extraction'
|
5
|
+
require_relative 'streaming/message_processing'
|
6
|
+
require_relative 'streaming/payload_processing'
|
7
|
+
require_relative 'streaming/prelude_handling'
|
8
|
+
|
9
|
+
module RubyLLM
|
10
|
+
module Providers
|
11
|
+
module Bedrock
|
12
|
+
# Streaming implementation for the AWS Bedrock API.
|
13
|
+
# This module provides functionality for handling streaming responses from AWS Bedrock,
|
14
|
+
# including message processing, content extraction, and error handling.
|
15
|
+
#
|
16
|
+
# The implementation is split into several focused modules:
|
17
|
+
# - Base: Core streaming functionality and module coordination
|
18
|
+
# - ContentExtraction: Extracting content from response data
|
19
|
+
# - MessageProcessing: Processing streaming message chunks
|
20
|
+
# - PayloadProcessing: Handling JSON payloads and chunk creation
|
21
|
+
# - PreludeHandling: Managing message preludes and headers
|
22
|
+
#
|
23
|
+
# @example Using the streaming module
|
24
|
+
# class BedrockClient
|
25
|
+
# include RubyLLM::Providers::Bedrock::Streaming
|
26
|
+
#
|
27
|
+
# def stream_response(&block)
|
28
|
+
# handle_stream(&block)
|
29
|
+
# end
|
30
|
+
# end
|
31
|
+
module Streaming
|
32
|
+
include Base
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'openssl'
|
4
|
+
require 'time'
|
5
|
+
|
6
|
+
module RubyLLM
|
7
|
+
module Providers
|
8
|
+
# AWS Bedrock API integration. Handles chat completion and streaming
|
9
|
+
# for Claude models.
|
10
|
+
module Bedrock
|
11
|
+
extend Provider
|
12
|
+
extend Bedrock::Chat
|
13
|
+
extend Bedrock::Streaming
|
14
|
+
extend Bedrock::Models
|
15
|
+
extend Bedrock::Signing
|
16
|
+
|
17
|
+
# This provider currently only supports Anthropic models, so the tools/media implementation is shared
|
18
|
+
extend Anthropic::Media
|
19
|
+
extend Anthropic::Tools
|
20
|
+
|
21
|
+
module_function
|
22
|
+
|
23
|
+
def api_base
|
24
|
+
@api_base ||= "https://bedrock-runtime.#{RubyLLM.config.bedrock_region}.amazonaws.com"
|
25
|
+
end
|
26
|
+
|
27
|
+
def post(url, payload)
|
28
|
+
signature = sign_request("#{connection.url_prefix}#{url}", payload:)
|
29
|
+
connection.post url, payload do |req|
|
30
|
+
req.headers.merge! build_headers(signature.headers, streaming: block_given?)
|
31
|
+
|
32
|
+
yield req if block_given?
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def sign_request(url, method: :post, payload: nil)
|
37
|
+
signer = create_signer
|
38
|
+
request = build_request(url, method:, payload:)
|
39
|
+
signer.sign_request(request)
|
40
|
+
end
|
41
|
+
|
42
|
+
def create_signer
|
43
|
+
Signing::Signer.new({
|
44
|
+
access_key_id: RubyLLM.config.bedrock_api_key,
|
45
|
+
secret_access_key: RubyLLM.config.bedrock_secret_key,
|
46
|
+
session_token: RubyLLM.config.bedrock_session_token,
|
47
|
+
region: RubyLLM.config.bedrock_region,
|
48
|
+
service: 'bedrock'
|
49
|
+
})
|
50
|
+
end
|
51
|
+
|
52
|
+
def build_request(url, method: :post, payload: nil)
|
53
|
+
{
|
54
|
+
connection: connection,
|
55
|
+
http_method: method,
|
56
|
+
url: url || completion_url,
|
57
|
+
body: payload ? JSON.generate(payload, ascii_only: false) : nil
|
58
|
+
}
|
59
|
+
end
|
60
|
+
|
61
|
+
def build_headers(signature_headers, streaming: false)
|
62
|
+
accept_header = streaming ? 'application/vnd.amazon.eventstream' : 'application/json'
|
63
|
+
|
64
|
+
signature_headers.merge(
|
65
|
+
'Content-Type' => 'application/json',
|
66
|
+
'Accept' => accept_header
|
67
|
+
)
|
68
|
+
end
|
69
|
+
|
70
|
+
def capabilities
|
71
|
+
Bedrock::Capabilities
|
72
|
+
end
|
73
|
+
|
74
|
+
def slug
|
75
|
+
'bedrock'
|
76
|
+
end
|
77
|
+
|
78
|
+
def configuration_requirements
|
79
|
+
%i[bedrock_api_key bedrock_secret_key bedrock_region]
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module Providers
|
5
|
+
module DeepSeek
|
6
|
+
# Chat methods of the DeepSeek API integration
|
7
|
+
module Chat
|
8
|
+
module_function
|
9
|
+
|
10
|
+
def format_role(role)
|
11
|
+
# DeepSeek doesn't use the new OpenAI convention for system prompts
|
12
|
+
role.to_s
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -5,6 +5,7 @@ module RubyLLM
|
|
5
5
|
# DeepSeek API integration.
|
6
6
|
module DeepSeek
|
7
7
|
extend OpenAI
|
8
|
+
extend DeepSeek::Chat
|
8
9
|
|
9
10
|
module_function
|
10
11
|
|
@@ -25,6 +26,10 @@ module RubyLLM
|
|
25
26
|
def slug
|
26
27
|
'deepseek'
|
27
28
|
end
|
29
|
+
|
30
|
+
def configuration_requirements
|
31
|
+
%i[deepseek_api_key]
|
32
|
+
end
|
28
33
|
end
|
29
34
|
end
|
30
35
|
end
|