ruby_llm_community 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +22 -0
  3. data/README.md +172 -0
  4. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
  5. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  7. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
  8. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
  9. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
  10. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
  11. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  12. data/lib/generators/ruby_llm/install_generator.rb +121 -0
  13. data/lib/ruby_llm/active_record/acts_as.rb +382 -0
  14. data/lib/ruby_llm/aliases.json +217 -0
  15. data/lib/ruby_llm/aliases.rb +56 -0
  16. data/lib/ruby_llm/attachment.rb +164 -0
  17. data/lib/ruby_llm/chat.rb +226 -0
  18. data/lib/ruby_llm/chunk.rb +6 -0
  19. data/lib/ruby_llm/configuration.rb +73 -0
  20. data/lib/ruby_llm/connection.rb +126 -0
  21. data/lib/ruby_llm/content.rb +52 -0
  22. data/lib/ruby_llm/context.rb +29 -0
  23. data/lib/ruby_llm/embedding.rb +30 -0
  24. data/lib/ruby_llm/error.rb +84 -0
  25. data/lib/ruby_llm/image.rb +53 -0
  26. data/lib/ruby_llm/message.rb +81 -0
  27. data/lib/ruby_llm/mime_type.rb +67 -0
  28. data/lib/ruby_llm/model/info.rb +101 -0
  29. data/lib/ruby_llm/model/modalities.rb +22 -0
  30. data/lib/ruby_llm/model/pricing.rb +51 -0
  31. data/lib/ruby_llm/model/pricing_category.rb +48 -0
  32. data/lib/ruby_llm/model/pricing_tier.rb +34 -0
  33. data/lib/ruby_llm/model.rb +7 -0
  34. data/lib/ruby_llm/models.json +29924 -0
  35. data/lib/ruby_llm/models.rb +214 -0
  36. data/lib/ruby_llm/models_schema.json +168 -0
  37. data/lib/ruby_llm/provider.rb +221 -0
  38. data/lib/ruby_llm/providers/anthropic/capabilities.rb +179 -0
  39. data/lib/ruby_llm/providers/anthropic/chat.rb +120 -0
  40. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  41. data/lib/ruby_llm/providers/anthropic/media.rb +116 -0
  42. data/lib/ruby_llm/providers/anthropic/models.rb +56 -0
  43. data/lib/ruby_llm/providers/anthropic/streaming.rb +45 -0
  44. data/lib/ruby_llm/providers/anthropic/tools.rb +108 -0
  45. data/lib/ruby_llm/providers/anthropic.rb +37 -0
  46. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  47. data/lib/ruby_llm/providers/bedrock/chat.rb +76 -0
  48. data/lib/ruby_llm/providers/bedrock/media.rb +73 -0
  49. data/lib/ruby_llm/providers/bedrock/models.rb +82 -0
  50. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  51. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +63 -0
  52. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +71 -0
  53. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
  54. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +92 -0
  55. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
  56. data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
  57. data/lib/ruby_llm/providers/bedrock.rb +83 -0
  58. data/lib/ruby_llm/providers/deepseek/capabilities.rb +131 -0
  59. data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
  60. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  61. data/lib/ruby_llm/providers/gemini/capabilities.rb +351 -0
  62. data/lib/ruby_llm/providers/gemini/chat.rb +146 -0
  63. data/lib/ruby_llm/providers/gemini/embeddings.rb +39 -0
  64. data/lib/ruby_llm/providers/gemini/images.rb +48 -0
  65. data/lib/ruby_llm/providers/gemini/media.rb +55 -0
  66. data/lib/ruby_llm/providers/gemini/models.rb +41 -0
  67. data/lib/ruby_llm/providers/gemini/streaming.rb +66 -0
  68. data/lib/ruby_llm/providers/gemini/tools.rb +82 -0
  69. data/lib/ruby_llm/providers/gemini.rb +36 -0
  70. data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
  71. data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
  72. data/lib/ruby_llm/providers/gpustack.rb +33 -0
  73. data/lib/ruby_llm/providers/mistral/capabilities.rb +163 -0
  74. data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
  75. data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
  76. data/lib/ruby_llm/providers/mistral/models.rb +49 -0
  77. data/lib/ruby_llm/providers/mistral.rb +32 -0
  78. data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
  79. data/lib/ruby_llm/providers/ollama/media.rb +50 -0
  80. data/lib/ruby_llm/providers/ollama.rb +29 -0
  81. data/lib/ruby_llm/providers/openai/capabilities.rb +306 -0
  82. data/lib/ruby_llm/providers/openai/chat.rb +87 -0
  83. data/lib/ruby_llm/providers/openai/embeddings.rb +36 -0
  84. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  85. data/lib/ruby_llm/providers/openai/media.rb +81 -0
  86. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  87. data/lib/ruby_llm/providers/openai/response.rb +116 -0
  88. data/lib/ruby_llm/providers/openai/response_media.rb +76 -0
  89. data/lib/ruby_llm/providers/openai/streaming.rb +191 -0
  90. data/lib/ruby_llm/providers/openai/tools.rb +100 -0
  91. data/lib/ruby_llm/providers/openai.rb +44 -0
  92. data/lib/ruby_llm/providers/openai_base.rb +44 -0
  93. data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
  94. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  95. data/lib/ruby_llm/providers/perplexity/capabilities.rb +138 -0
  96. data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
  97. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  98. data/lib/ruby_llm/providers/perplexity.rb +52 -0
  99. data/lib/ruby_llm/railtie.rb +17 -0
  100. data/lib/ruby_llm/stream_accumulator.rb +103 -0
  101. data/lib/ruby_llm/streaming.rb +162 -0
  102. data/lib/ruby_llm/tool.rb +100 -0
  103. data/lib/ruby_llm/tool_call.rb +31 -0
  104. data/lib/ruby_llm/utils.rb +49 -0
  105. data/lib/ruby_llm/version.rb +5 -0
  106. data/lib/ruby_llm.rb +98 -0
  107. data/lib/tasks/aliases.rake +235 -0
  108. data/lib/tasks/models_docs.rake +224 -0
  109. data/lib/tasks/models_update.rake +108 -0
  110. data/lib/tasks/release.rake +32 -0
  111. data/lib/tasks/vcr.rake +99 -0
  112. metadata +128 -7
@@ -0,0 +1,179 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Determines capabilities and pricing for Anthropic models
7
+ module Capabilities
8
+ module_function
9
+
10
+ # Determines the context window size for a given model
11
+ # @param model_id [String] the model identifier
12
+ # @return [Integer] the context window size in tokens
13
+ def determine_context_window(_model_id)
14
+ # All Claude 3 and 3.5 and 3.7 models have 200K token context windows
15
+ 200_000
16
+ end
17
+
18
+ # Determines the maximum output tokens for a given model
19
+ # @param model_id [String] the model identifier
20
+ # @return [Integer] the maximum output tokens
21
+ def determine_max_tokens(model_id)
22
+ case model_id
23
+ when /claude-3-7-sonnet/, /claude-3-5/ then 8_192
24
+ else 4_096
25
+ end
26
+ end
27
+
28
+ # Gets the input price per million tokens for a given model
29
+ # @param model_id [String] the model identifier
30
+ # @return [Float] the price per million tokens for input
31
+ def get_input_price(model_id)
32
+ PRICES.dig(model_family(model_id), :input) || default_input_price
33
+ end
34
+
35
+ # Gets the output price per million tokens for a given model
36
+ # @param model_id [String] the model identifier
37
+ # @return [Float] the price per million tokens for output
38
+ def get_output_price(model_id)
39
+ PRICES.dig(model_family(model_id), :output) || default_output_price
40
+ end
41
+
42
+ # Determines if a model supports vision capabilities
43
+ # @param model_id [String] the model identifier
44
+ # @return [Boolean] true if the model supports vision
45
+ def supports_vision?(model_id)
46
+ # All Claude 3, 3.5, and 3.7 models support vision
47
+ !model_id.match?(/claude-[12]/)
48
+ end
49
+
50
+ # Determines if a model supports function calling
51
+ # @param model_id [String] the model identifier
52
+ # @return [Boolean] true if the model supports functions
53
+ def supports_functions?(model_id)
54
+ model_id.match?(/claude-3/)
55
+ end
56
+
57
+ # Determines if a model supports JSON mode
58
+ # @param model_id [String] the model identifier
59
+ # @return [Boolean] true if the model supports JSON mode
60
+ def supports_json_mode?(model_id)
61
+ model_id.match?(/claude-3/)
62
+ end
63
+
64
+ # Determines if a model supports extended thinking
65
+ # @param model_id [String] the model identifier
66
+ # @return [Boolean] true if the model supports extended thinking
67
+ def supports_extended_thinking?(model_id)
68
+ model_id.match?(/claude-3-7-sonnet/)
69
+ end
70
+
71
+ # Determines the model family for a given model ID
72
+ # @param model_id [String] the model identifier
73
+ # @return [Symbol] the model family identifier
74
+ def model_family(model_id)
75
+ case model_id
76
+ when /claude-3-7-sonnet/ then 'claude-3-7-sonnet'
77
+ when /claude-3-5-sonnet/ then 'claude-3-5-sonnet'
78
+ when /claude-3-5-haiku/ then 'claude-3-5-haiku'
79
+ when /claude-3-opus/ then 'claude-3-opus'
80
+ when /claude-3-sonnet/ then 'claude-3-sonnet'
81
+ when /claude-3-haiku/ then 'claude-3-haiku'
82
+ else 'claude-2'
83
+ end
84
+ end
85
+
86
+ # Returns the model type
87
+ # @param model_id [String] the model identifier (unused but kept for API consistency)
88
+ # @return [String] the model type, always 'chat' for Anthropic models
89
+ def model_type(_)
90
+ 'chat'
91
+ end
92
+
93
+ # Pricing information for Anthropic models (per million tokens)
94
+ PRICES = {
95
+ 'claude-3-7-sonnet': { input: 3.0, output: 15.0 },
96
+ 'claude-3-5-sonnet': { input: 3.0, output: 15.0 },
97
+ 'claude-3-5-haiku': { input: 0.80, output: 4.0 },
98
+ 'claude-3-opus': { input: 15.0, output: 75.0 },
99
+ 'claude-3-haiku': { input: 0.25, output: 1.25 },
100
+ 'claude-2': { input: 3.0, output: 15.0 }
101
+ }.freeze
102
+
103
+ # Default input price if model not found in PRICES
104
+ # @return [Float] default price per million tokens for input
105
+ def default_input_price
106
+ 3.0
107
+ end
108
+
109
+ # Default output price if model not found in PRICES
110
+ # @return [Float] default price per million tokens for output
111
+ def default_output_price
112
+ 15.0
113
+ end
114
+
115
+ def modalities_for(model_id)
116
+ modalities = {
117
+ input: ['text'],
118
+ output: ['text']
119
+ }
120
+
121
+ # All Claude 3+ models support vision
122
+ unless model_id.match?(/claude-[12]/)
123
+ modalities[:input] << 'image'
124
+ modalities[:input] << 'pdf'
125
+ end
126
+
127
+ modalities
128
+ end
129
+
130
+ def capabilities_for(model_id)
131
+ capabilities = ['streaming']
132
+
133
+ # Function calling for Claude 3+
134
+ if model_id.match?(/claude-3/)
135
+ capabilities << 'function_calling'
136
+ capabilities << 'batch'
137
+ end
138
+
139
+ # Extended thinking (reasoning) for Claude 3.7
140
+ capabilities << 'reasoning' if model_id.match?(/claude-3-7/)
141
+
142
+ # Citations
143
+ capabilities << 'citations' if model_id.match?(/claude-3\.5|claude-3-7/)
144
+
145
+ capabilities
146
+ end
147
+
148
+ def pricing_for(model_id)
149
+ family = model_family(model_id)
150
+ prices = PRICES.fetch(family.to_sym, { input: default_input_price, output: default_output_price })
151
+
152
+ standard_pricing = {
153
+ input_per_million: prices[:input],
154
+ output_per_million: prices[:output]
155
+ }
156
+
157
+ # Batch is typically half the price
158
+ batch_pricing = {
159
+ input_per_million: prices[:input] * 0.5,
160
+ output_per_million: prices[:output] * 0.5
161
+ }
162
+
163
+ # Add reasoning output pricing for 3.7 models
164
+ if model_id.match?(/claude-3-7/)
165
+ standard_pricing[:reasoning_output_per_million] = prices[:output] * 2.5
166
+ batch_pricing[:reasoning_output_per_million] = prices[:output] * 1.25
167
+ end
168
+
169
+ {
170
+ text_tokens: {
171
+ standard: standard_pricing,
172
+ batch: batch_pricing
173
+ }
174
+ }
175
+ end
176
+ end
177
+ end
178
+ end
179
+ end
@@ -0,0 +1,120 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Chat methods of the OpenAI API integration
7
+ module Chat
8
+ module_function
9
+
10
+ def completion_url
11
+ '/v1/messages'
12
+ end
13
+
14
+ def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil, # rubocop:disable Metrics/ParameterLists,Lint/UnusedMethodArgument
15
+ cache_prompts: { system: false, user: false, tools: false })
16
+ system_messages, chat_messages = separate_messages(messages)
17
+ system_content = build_system_content(system_messages, cache: cache_prompts[:system])
18
+
19
+ build_base_payload(chat_messages, model, stream, cache: cache_prompts[:user]).tap do |payload|
20
+ add_optional_fields(payload, system_content:, tools:, temperature:,
21
+ cache_tools: cache_prompts[:tools])
22
+ end
23
+ end
24
+
25
+ def separate_messages(messages)
26
+ messages.partition { |msg| msg.role == :system }
27
+ end
28
+
29
+ def build_system_content(system_messages, cache: false)
30
+ system_messages.flat_map.with_index do |msg, idx|
31
+ message_cache = cache if idx == system_messages.size - 1
32
+ format_system_message(msg, cache: message_cache)
33
+ end
34
+ end
35
+
36
+ def build_base_payload(chat_messages, model, stream, cache: false)
37
+ messages = chat_messages.map.with_index do |msg, idx|
38
+ message_cache = cache if idx == chat_messages.size - 1
39
+ format_message(msg, cache: message_cache)
40
+ end
41
+
42
+ {
43
+ model: model,
44
+ messages:,
45
+ stream: stream,
46
+ max_tokens: RubyLLM.models.find(model)&.max_tokens || 4096
47
+ }
48
+ end
49
+
50
+ def add_optional_fields(payload, system_content:, tools:, temperature:, cache_tools: false)
51
+ if tools.any?
52
+ tool_definitions = tools.values.map { |t| Tools.function_for(t) }
53
+ tool_definitions[-1][:cache_control] = { type: 'ephemeral' } if cache_tools
54
+ payload[:tools] = tool_definitions
55
+ end
56
+
57
+ payload[:system] = system_content unless system_content.empty?
58
+ payload[:temperature] = temperature unless temperature.nil?
59
+ end
60
+
61
+ def parse_completion_response(response)
62
+ data = response.body
63
+ content_blocks = data['content'] || []
64
+
65
+ text_content = extract_text_content(content_blocks)
66
+ tool_use_blocks = Tools.find_tool_uses(content_blocks)
67
+
68
+ build_message(data, text_content, tool_use_blocks, response)
69
+ end
70
+
71
+ def extract_text_content(blocks)
72
+ text_blocks = blocks.select { |c| c['type'] == 'text' }
73
+ text_blocks.map { |c| c['text'] }.join
74
+ end
75
+
76
+ def build_message(data, content, tool_use_blocks, response)
77
+ Message.new(
78
+ role: :assistant,
79
+ content: content,
80
+ tool_calls: Tools.parse_tool_calls(tool_use_blocks),
81
+ input_tokens: data.dig('usage', 'input_tokens'),
82
+ output_tokens: data.dig('usage', 'output_tokens'),
83
+ model_id: data['model'],
84
+ cache_creation_tokens: data.dig('usage', 'cache_creation_input_tokens'),
85
+ cached_tokens: data.dig('usage', 'cache_read_input_tokens'),
86
+ raw: response
87
+ )
88
+ end
89
+
90
+ def format_message(msg, cache: false)
91
+ if msg.tool_call?
92
+ Tools.format_tool_call(msg)
93
+ elsif msg.tool_result?
94
+ Tools.format_tool_result(msg)
95
+ else
96
+ format_basic_message(msg, cache:)
97
+ end
98
+ end
99
+
100
+ def format_system_message(msg, cache: false)
101
+ Media.format_content(msg.content, cache:)
102
+ end
103
+
104
+ def format_basic_message(msg, cache: false)
105
+ {
106
+ role: convert_role(msg.role),
107
+ content: Media.format_content(msg.content, cache:)
108
+ }
109
+ end
110
+
111
+ def convert_role(role)
112
+ case role
113
+ when :tool, :user then 'user'
114
+ else 'assistant'
115
+ end
116
+ end
117
+ end
118
+ end
119
+ end
120
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Embeddings methods of the Anthropic API integration
7
+ module Embeddings
8
+ private
9
+
10
+ def embed
11
+ raise Error "Anthropic doesn't support embeddings"
12
+ end
13
+
14
+ alias render_embedding_payload embed
15
+ alias embedding_url embed
16
+ alias parse_embedding_response embed
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,116 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Handles formatting of media content (images, PDFs, audio) for Anthropic
7
+ module Media
8
+ module_function
9
+
10
+ def format_content(content, cache: false)
11
+ # Convert Hash/Array back to JSON string for API
12
+ return [format_text(content.to_json, cache:)] if content.is_a?(Hash) || content.is_a?(Array)
13
+ return [format_text(content, cache:)] unless content.is_a?(Content)
14
+
15
+ parts = []
16
+ parts << format_text(content.text, cache:) if content.text
17
+
18
+ content.attachments.each do |attachment|
19
+ case attachment.type
20
+ when :image
21
+ parts << format_image(attachment)
22
+ when :pdf
23
+ parts << format_pdf(attachment)
24
+ when :text
25
+ parts << format_text_file(attachment)
26
+ else
27
+ raise UnsupportedAttachmentError, attachment.mime_type
28
+ end
29
+ end
30
+
31
+ parts
32
+ end
33
+
34
+ def format_text(text, cache: false)
35
+ with_cache_control(
36
+ {
37
+ type: 'text',
38
+ text: text
39
+ },
40
+ cache:
41
+ )
42
+ end
43
+
44
+ def format_image(image, cache: false)
45
+ if image.url?
46
+ with_cache_control(
47
+ {
48
+ type: 'image',
49
+ source: {
50
+ type: 'url',
51
+ url: image.source
52
+ }
53
+ },
54
+ cache:
55
+ )
56
+ else
57
+ with_cache_control(
58
+ {
59
+ type: 'image',
60
+ source: {
61
+ type: 'base64',
62
+ media_type: image.mime_type,
63
+ data: image.encoded
64
+ }
65
+ },
66
+ cache:
67
+ )
68
+ end
69
+ end
70
+
71
+ def format_pdf(pdf, cache: false)
72
+ if pdf.url?
73
+ with_cache_control(
74
+ {
75
+ type: 'document',
76
+ source: {
77
+ type: 'url',
78
+ url: pdf.source
79
+ }
80
+ },
81
+ cache:
82
+ )
83
+ else
84
+ with_cache_control(
85
+ {
86
+ type: 'document',
87
+ source: {
88
+ type: 'base64',
89
+ media_type: pdf.mime_type,
90
+ data: pdf.encoded
91
+ }
92
+ },
93
+ cache:
94
+ )
95
+ end
96
+ end
97
+
98
+ def format_text_file(text_file, cache: false)
99
+ with_cache_control(
100
+ {
101
+ type: 'text',
102
+ text: Utils.format_text_file_for_llm(text_file)
103
+ },
104
+ cache:
105
+ )
106
+ end
107
+
108
+ def with_cache_control(hash, cache: false)
109
+ return hash unless cache
110
+
111
+ hash.merge(cache_control: { type: 'ephemeral' })
112
+ end
113
+ end
114
+ end
115
+ end
116
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Models methods of the Anthropic API integration
7
+ module Models
8
+ module_function
9
+
10
+ def models_url
11
+ '/v1/models'
12
+ end
13
+
14
+ def parse_list_models_response(response, slug, capabilities)
15
+ Array(response.body['data']).map do |model_data|
16
+ model_id = model_data['id']
17
+
18
+ Model::Info.new(
19
+ id: model_id,
20
+ name: model_data['display_name'],
21
+ provider: slug,
22
+ family: capabilities.model_family(model_id),
23
+ created_at: Time.parse(model_data['created_at']),
24
+ context_window: capabilities.determine_context_window(model_id),
25
+ max_output_tokens: capabilities.determine_max_tokens(model_id),
26
+ modalities: capabilities.modalities_for(model_id),
27
+ capabilities: capabilities.capabilities_for(model_id),
28
+ pricing: capabilities.pricing_for(model_id),
29
+ metadata: {}
30
+ )
31
+ end
32
+ end
33
+
34
+ def extract_model_id(data)
35
+ data.dig('message', 'model')
36
+ end
37
+
38
+ def extract_input_tokens(data)
39
+ data.dig('message', 'usage', 'input_tokens')
40
+ end
41
+
42
+ def extract_output_tokens(data)
43
+ data.dig('message', 'usage', 'output_tokens') || data.dig('usage', 'output_tokens')
44
+ end
45
+
46
+ def extract_cached_tokens(data)
47
+ data.dig('message', 'usage', 'cache_read_input_tokens')
48
+ end
49
+
50
+ def extract_cache_creation_tokens(data)
51
+ data.dig('message', 'usage', 'cache_creation_input_tokens')
52
+ end
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Streaming methods of the Anthropic API integration
7
+ module Streaming
8
+ private
9
+
10
+ def stream_url
11
+ completion_url
12
+ end
13
+
14
+ def build_chunk(data)
15
+ Chunk.new(
16
+ role: :assistant,
17
+ model_id: extract_model_id(data),
18
+ content: data.dig('delta', 'text'),
19
+ input_tokens: extract_input_tokens(data),
20
+ output_tokens: extract_output_tokens(data),
21
+ cached_tokens: extract_cached_tokens(data),
22
+ cache_creation_tokens: extract_cache_creation_tokens(data),
23
+ tool_calls: extract_tool_calls(data)
24
+ )
25
+ end
26
+
27
+ def json_delta?(data)
28
+ data['type'] == 'content_block_delta' && data.dig('delta', 'type') == 'input_json_delta'
29
+ end
30
+
31
+ def parse_streaming_error(data)
32
+ error_data = JSON.parse(data)
33
+ return unless error_data['type'] == 'error'
34
+
35
+ case error_data.dig('error', 'type')
36
+ when 'overloaded_error'
37
+ [529, error_data['error']['message']]
38
+ else
39
+ [500, error_data['error']['message']]
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,108 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Anthropic
6
+ # Tools methods of the Anthropic API integration
7
+ module Tools
8
+ module_function
9
+
10
+ def find_tool_uses(blocks)
11
+ blocks.select { |c| c['type'] == 'tool_use' }
12
+ end
13
+
14
+ def format_tool_call(msg)
15
+ content = []
16
+
17
+ content << Media.format_text(msg.content) unless msg.content.nil? || msg.content.empty?
18
+
19
+ msg.tool_calls.each_value do |tool_call|
20
+ content << format_tool_use_block(tool_call)
21
+ end
22
+
23
+ {
24
+ role: 'assistant',
25
+ content:
26
+ }
27
+ end
28
+
29
+ def format_tool_result(msg)
30
+ {
31
+ role: 'user',
32
+ content: [format_tool_result_block(msg)]
33
+ }
34
+ end
35
+
36
+ def format_tool_use_block(tool_call)
37
+ {
38
+ type: 'tool_use',
39
+ id: tool_call.id,
40
+ name: tool_call.name,
41
+ input: tool_call.arguments
42
+ }
43
+ end
44
+
45
+ def format_tool_result_block(msg)
46
+ {
47
+ type: 'tool_result',
48
+ tool_use_id: msg.tool_call_id,
49
+ content: msg.content
50
+ }
51
+ end
52
+
53
+ def function_for(tool)
54
+ {
55
+ name: tool.name,
56
+ description: tool.description,
57
+ input_schema: {
58
+ type: 'object',
59
+ properties: clean_parameters(tool.parameters),
60
+ required: required_parameters(tool.parameters)
61
+ }
62
+ }
63
+ end
64
+
65
+ def extract_tool_calls(data)
66
+ if json_delta?(data)
67
+ { nil => ToolCall.new(id: nil, name: nil, arguments: data.dig('delta', 'partial_json')) }
68
+ else
69
+ parse_tool_calls(data['content_block'])
70
+ end
71
+ end
72
+
73
+ def parse_tool_calls(content_blocks)
74
+ return nil if content_blocks.nil?
75
+
76
+ # Handle single content block (backward compatibility)
77
+ content_blocks = [content_blocks] unless content_blocks.is_a?(Array)
78
+
79
+ tool_calls = {}
80
+ content_blocks.each do |block|
81
+ next unless block && block['type'] == 'tool_use'
82
+
83
+ tool_calls[block['id']] = ToolCall.new(
84
+ id: block['id'],
85
+ name: block['name'],
86
+ arguments: block['input']
87
+ )
88
+ end
89
+
90
+ tool_calls.empty? ? nil : tool_calls
91
+ end
92
+
93
+ def clean_parameters(parameters)
94
+ parameters.transform_values do |param|
95
+ {
96
+ type: param.type,
97
+ description: param.description
98
+ }.compact
99
+ end
100
+ end
101
+
102
+ def required_parameters(parameters)
103
+ parameters.select { |_, param| param.required }.keys
104
+ end
105
+ end
106
+ end
107
+ end
108
+ end
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # Anthropic Claude API integration. Handles the complexities of
6
+ # Claude's unique message format and tool calling conventions.
7
+ class Anthropic < Provider
8
+ include Anthropic::Chat
9
+ include Anthropic::Embeddings
10
+ include Anthropic::Media
11
+ include Anthropic::Models
12
+ include Anthropic::Streaming
13
+ include Anthropic::Tools
14
+
15
+ def api_base
16
+ 'https://api.anthropic.com'
17
+ end
18
+
19
+ def headers
20
+ {
21
+ 'x-api-key' => @config.anthropic_api_key,
22
+ 'anthropic-version' => '2023-06-01'
23
+ }
24
+ end
25
+
26
+ class << self
27
+ def capabilities
28
+ Anthropic::Capabilities
29
+ end
30
+
31
+ def configuration_requirements
32
+ %i[anthropic_api_key]
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end