ruby_llm 1.9.2 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +5 -4
  3. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +3 -0
  4. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +1 -0
  5. data/lib/generators/ruby_llm/upgrade_to_v1_10/templates/add_v1_10_message_columns.rb.tt +19 -0
  6. data/lib/generators/ruby_llm/upgrade_to_v1_10/upgrade_to_v1_10_generator.rb +50 -0
  7. data/lib/ruby_llm/active_record/acts_as_legacy.rb +5 -1
  8. data/lib/ruby_llm/active_record/chat_methods.rb +12 -0
  9. data/lib/ruby_llm/active_record/message_methods.rb +41 -8
  10. data/lib/ruby_llm/aliases.json +4 -16
  11. data/lib/ruby_llm/chat.rb +10 -7
  12. data/lib/ruby_llm/configuration.rb +2 -1
  13. data/lib/ruby_llm/message.rb +37 -11
  14. data/lib/ruby_llm/models.json +1902 -1785
  15. data/lib/ruby_llm/models.rb +134 -12
  16. data/lib/ruby_llm/provider.rb +9 -4
  17. data/lib/ruby_llm/providers/anthropic/chat.rb +128 -13
  18. data/lib/ruby_llm/providers/anthropic/media.rb +2 -2
  19. data/lib/ruby_llm/providers/anthropic/streaming.rb +25 -1
  20. data/lib/ruby_llm/providers/bedrock/chat.rb +67 -15
  21. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +59 -2
  22. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +5 -0
  23. data/lib/ruby_llm/providers/gemini/chat.rb +69 -3
  24. data/lib/ruby_llm/providers/gemini/streaming.rb +32 -1
  25. data/lib/ruby_llm/providers/gemini/tools.rb +16 -3
  26. data/lib/ruby_llm/providers/gpustack/chat.rb +1 -1
  27. data/lib/ruby_llm/providers/mistral/chat.rb +58 -1
  28. data/lib/ruby_llm/providers/ollama/chat.rb +1 -1
  29. data/lib/ruby_llm/providers/openai/capabilities.rb +6 -2
  30. data/lib/ruby_llm/providers/openai/chat.rb +87 -3
  31. data/lib/ruby_llm/providers/openai/media.rb +1 -1
  32. data/lib/ruby_llm/providers/openai/streaming.rb +11 -3
  33. data/lib/ruby_llm/providers/openai/temperature.rb +28 -0
  34. data/lib/ruby_llm/providers/openai.rb +1 -1
  35. data/lib/ruby_llm/providers/openrouter/chat.rb +154 -0
  36. data/lib/ruby_llm/providers/openrouter/streaming.rb +74 -0
  37. data/lib/ruby_llm/providers/openrouter.rb +2 -0
  38. data/lib/ruby_llm/providers/vertexai.rb +5 -1
  39. data/lib/ruby_llm/providers/xai/chat.rb +15 -0
  40. data/lib/ruby_llm/providers/xai/models.rb +75 -0
  41. data/lib/ruby_llm/providers/xai.rb +28 -0
  42. data/lib/ruby_llm/stream_accumulator.rb +111 -14
  43. data/lib/ruby_llm/streaming.rb +54 -51
  44. data/lib/ruby_llm/thinking.rb +49 -0
  45. data/lib/ruby_llm/tokens.rb +47 -0
  46. data/lib/ruby_llm/tool_call.rb +6 -3
  47. data/lib/ruby_llm/version.rb +1 -1
  48. data/lib/ruby_llm.rb +10 -8
  49. data/lib/tasks/models.rake +20 -12
  50. metadata +15 -5
@@ -29,7 +29,7 @@ module RubyLLM
29
29
  end
30
30
 
31
31
  def handle_stream(&block)
32
- to_json_stream do |data|
32
+ build_on_data_handler do |data|
33
33
  block.call(build_chunk(data)) if data
34
34
  end
35
35
  end
@@ -40,19 +40,15 @@ module RubyLLM
40
40
  Faraday::VERSION.start_with?('1')
41
41
  end
42
42
 
43
- def to_json_stream(&)
43
+ def build_on_data_handler(&handler)
44
44
  buffer = +''
45
45
  parser = EventStreamParser::Parser.new
46
46
 
47
- create_stream_processor(parser, buffer, &)
48
- end
49
-
50
- def create_stream_processor(parser, buffer, &)
51
- if faraday_1?
52
- legacy_stream_processor(parser, &)
53
- else
54
- stream_processor(parser, buffer, &)
55
- end
47
+ FaradayHandlers.build(
48
+ faraday_v1: faraday_1?,
49
+ on_chunk: ->(chunk, env) { process_stream_chunk(chunk, parser, env, &handler) },
50
+ on_failed_response: ->(chunk, env) { handle_failed_response(chunk, buffer, env) }
51
+ )
56
52
  end
57
53
 
58
54
  def process_stream_chunk(chunk, parser, env, &)
@@ -67,22 +63,6 @@ module RubyLLM
67
63
  end
68
64
  end
69
65
 
70
- def legacy_stream_processor(parser, &block)
71
- proc do |chunk, _size|
72
- process_stream_chunk(chunk, parser, nil, &block)
73
- end
74
- end
75
-
76
- def stream_processor(parser, buffer, &block)
77
- proc do |chunk, _bytes, env|
78
- if env&.status == 200
79
- process_stream_chunk(chunk, parser, env, &block)
80
- else
81
- handle_failed_response(chunk, buffer, env)
82
- end
83
- end
84
- end
85
-
86
66
  def error_chunk?(chunk)
87
67
  chunk.start_with?('event: error')
88
68
  end
@@ -92,30 +72,18 @@ module RubyLLM
92
72
  end
93
73
 
94
74
  def handle_json_error_chunk(chunk, env)
95
- parsed_data = JSON.parse(chunk)
96
- status, _message = parse_streaming_error(parsed_data.to_json)
97
- error_response = build_stream_error_response(parsed_data, env, status)
98
- ErrorMiddleware.parse_error(provider: self, response: error_response)
99
- rescue JSON::ParserError => e
100
- RubyLLM.logger.debug "Failed to parse JSON error chunk: #{e.message}"
75
+ parse_error_from_json(chunk, env, 'Failed to parse JSON error chunk')
101
76
  end
102
77
 
103
78
  def handle_error_chunk(chunk, env)
104
79
  error_data = chunk.split("\n")[1].delete_prefix('data: ')
105
- parsed_data = JSON.parse(error_data)
106
- status, _message = parse_streaming_error(parsed_data.to_json)
107
- error_response = build_stream_error_response(parsed_data, env, status)
108
- ErrorMiddleware.parse_error(provider: self, response: error_response)
109
- rescue JSON::ParserError => e
110
- RubyLLM.logger.debug "Failed to parse error chunk: #{e.message}"
80
+ parse_error_from_json(error_data, env, 'Failed to parse error chunk')
111
81
  end
112
82
 
113
83
  def handle_failed_response(chunk, buffer, env)
114
84
  buffer << chunk
115
85
  error_data = JSON.parse(buffer)
116
- status, _message = parse_streaming_error(error_data.to_json)
117
- error_response = env.merge(body: error_data, status: status || env.status)
118
- ErrorMiddleware.parse_error(provider: self, response: error_response)
86
+ handle_parsed_error(error_data, env)
119
87
  rescue JSON::ParserError
120
88
  RubyLLM.logger.debug "Accumulating error chunk: #{chunk}"
121
89
  end
@@ -135,20 +103,13 @@ module RubyLLM
135
103
  parsed = JSON.parse(data)
136
104
  return parsed unless parsed.is_a?(Hash) && parsed.key?('error')
137
105
 
138
- status, _message = parse_streaming_error(parsed.to_json)
139
- error_response = build_stream_error_response(parsed, env, status)
140
- ErrorMiddleware.parse_error(provider: self, response: error_response)
106
+ handle_parsed_error(parsed, env)
141
107
  rescue JSON::ParserError => e
142
108
  RubyLLM.logger.debug "Failed to parse data chunk: #{e.message}"
143
109
  end
144
110
 
145
111
  def handle_error_event(data, env)
146
- parsed_data = JSON.parse(data)
147
- status, _message = parse_streaming_error(parsed_data.to_json)
148
- error_response = build_stream_error_response(parsed_data, env, status)
149
- ErrorMiddleware.parse_error(provider: self, response: error_response)
150
- rescue JSON::ParserError => e
151
- RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
112
+ parse_error_from_json(data, env, 'Failed to parse error event')
152
113
  end
153
114
 
154
115
  def parse_streaming_error(data)
@@ -159,6 +120,19 @@ module RubyLLM
159
120
  [500, "Failed to parse error: #{data}"]
160
121
  end
161
122
 
123
+ def handle_parsed_error(parsed_data, env)
124
+ status, _message = parse_streaming_error(parsed_data.to_json)
125
+ error_response = build_stream_error_response(parsed_data, env, status)
126
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
127
+ end
128
+
129
+ def parse_error_from_json(data, env, error_message)
130
+ parsed_data = JSON.parse(data)
131
+ handle_parsed_error(parsed_data, env)
132
+ rescue JSON::ParserError => e
133
+ RubyLLM.logger.debug "#{error_message}: #{e.message}"
134
+ end
135
+
162
136
  def build_stream_error_response(parsed_data, env, status)
163
137
  error_status = status || env&.status || 500
164
138
 
@@ -168,5 +142,34 @@ module RubyLLM
168
142
  env.merge(body: parsed_data, status: error_status)
169
143
  end
170
144
  end
145
+
146
+ # Builds Faraday on_data handlers for different major versions.
147
+ module FaradayHandlers
148
+ module_function
149
+
150
+ def build(faraday_v1:, on_chunk:, on_failed_response:)
151
+ if faraday_v1
152
+ v1_on_data(on_chunk)
153
+ else
154
+ v2_on_data(on_chunk, on_failed_response)
155
+ end
156
+ end
157
+
158
+ def v1_on_data(on_chunk)
159
+ proc do |chunk, _size|
160
+ on_chunk.call(chunk, nil)
161
+ end
162
+ end
163
+
164
+ def v2_on_data(on_chunk, on_failed_response)
165
+ proc do |chunk, _bytes, env|
166
+ if env&.status == 200
167
+ on_chunk.call(chunk, env)
168
+ else
169
+ on_failed_response.call(chunk, env)
170
+ end
171
+ end
172
+ end
173
+ end
171
174
  end
172
175
  end
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Represents provider thinking output.
5
+ class Thinking
6
+ attr_reader :text, :signature
7
+
8
+ def initialize(text: nil, signature: nil)
9
+ @text = text
10
+ @signature = signature
11
+ end
12
+
13
+ def self.build(text: nil, signature: nil)
14
+ text = nil if text.is_a?(String) && text.empty?
15
+ signature = nil if signature.is_a?(String) && signature.empty?
16
+
17
+ return nil if text.nil? && signature.nil?
18
+
19
+ new(text: text, signature: signature)
20
+ end
21
+
22
+ def pretty_print(printer)
23
+ printer.object_group(self) do
24
+ printer.breakable
25
+ printer.text 'text='
26
+ printer.pp text
27
+ printer.comma_breakable
28
+ printer.text 'signature='
29
+ printer.pp(signature ? '[REDACTED]' : nil)
30
+ end
31
+ end
32
+ end
33
+
34
+ class Thinking
35
+ # Normalized config for thinking across providers.
36
+ class Config
37
+ attr_reader :effort, :budget
38
+
39
+ def initialize(effort: nil, budget: nil)
40
+ @effort = effort.is_a?(Symbol) ? effort.to_s : effort
41
+ @budget = budget
42
+ end
43
+
44
+ def enabled?
45
+ !effort.nil? || !budget.nil?
46
+ end
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Represents token usage for a response.
5
+ class Tokens
6
+ attr_reader :input, :output, :cached, :cache_creation, :thinking
7
+
8
+ # rubocop:disable Metrics/ParameterLists
9
+ def initialize(input: nil, output: nil, cached: nil, cache_creation: nil, thinking: nil, reasoning: nil)
10
+ @input = input
11
+ @output = output
12
+ @cached = cached
13
+ @cache_creation = cache_creation
14
+ @thinking = thinking || reasoning
15
+ end
16
+ # rubocop:enable Metrics/ParameterLists
17
+
18
+ # rubocop:disable Metrics/ParameterLists
19
+ def self.build(input: nil, output: nil, cached: nil, cache_creation: nil, thinking: nil, reasoning: nil)
20
+ return nil if [input, output, cached, cache_creation, thinking, reasoning].all?(&:nil?)
21
+
22
+ new(
23
+ input: input,
24
+ output: output,
25
+ cached: cached,
26
+ cache_creation: cache_creation,
27
+ thinking: thinking,
28
+ reasoning: reasoning
29
+ )
30
+ end
31
+ # rubocop:enable Metrics/ParameterLists
32
+
33
+ def to_h
34
+ {
35
+ input_tokens: input,
36
+ output_tokens: output,
37
+ cached_tokens: cached,
38
+ cache_creation_tokens: cache_creation,
39
+ thinking_tokens: thinking
40
+ }.compact
41
+ end
42
+
43
+ def reasoning
44
+ thinking
45
+ end
46
+ end
47
+ end
@@ -4,19 +4,22 @@ module RubyLLM
4
4
  # Represents a function call from an AI model to a Tool.
5
5
  class ToolCall
6
6
  attr_reader :id, :name, :arguments
7
+ attr_accessor :thought_signature
7
8
 
8
- def initialize(id:, name:, arguments: {})
9
+ def initialize(id:, name:, arguments: {}, thought_signature: nil)
9
10
  @id = id
10
11
  @name = name
11
12
  @arguments = arguments
13
+ @thought_signature = thought_signature
12
14
  end
13
15
 
14
16
  def to_h
15
17
  {
16
18
  id: @id,
17
19
  name: @name,
18
- arguments: @arguments
19
- }
20
+ arguments: @arguments,
21
+ thought_signature: @thought_signature
22
+ }.compact
20
23
  end
21
24
  end
22
25
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '1.9.2'
4
+ VERSION = '1.11.0'
5
5
  end
data/lib/ruby_llm.rb CHANGED
@@ -13,19 +13,20 @@ require 'zeitwerk'
13
13
 
14
14
  loader = Zeitwerk::Loader.for_gem
15
15
  loader.inflector.inflect(
16
- 'ruby_llm' => 'RubyLLM',
17
- 'llm' => 'LLM',
18
- 'openai' => 'OpenAI',
16
+ 'UI' => 'UI',
19
17
  'api' => 'API',
20
- 'deepseek' => 'DeepSeek',
21
- 'perplexity' => 'Perplexity',
22
18
  'bedrock' => 'Bedrock',
23
- 'openrouter' => 'OpenRouter',
19
+ 'deepseek' => 'DeepSeek',
24
20
  'gpustack' => 'GPUStack',
21
+ 'llm' => 'LLM',
25
22
  'mistral' => 'Mistral',
26
- 'vertexai' => 'VertexAI',
23
+ 'openai' => 'OpenAI',
24
+ 'openrouter' => 'OpenRouter',
27
25
  'pdf' => 'PDF',
28
- 'UI' => 'UI'
26
+ 'perplexity' => 'Perplexity',
27
+ 'ruby_llm' => 'RubyLLM',
28
+ 'vertexai' => 'VertexAI',
29
+ 'xai' => 'XAI'
29
30
  )
30
31
  loader.ignore("#{__dir__}/tasks")
31
32
  loader.ignore("#{__dir__}/generators")
@@ -100,6 +101,7 @@ RubyLLM::Provider.register :openai, RubyLLM::Providers::OpenAI
100
101
  RubyLLM::Provider.register :openrouter, RubyLLM::Providers::OpenRouter
101
102
  RubyLLM::Provider.register :perplexity, RubyLLM::Providers::Perplexity
102
103
  RubyLLM::Provider.register :vertexai, RubyLLM::Providers::VertexAI
104
+ RubyLLM::Provider.register :xai, RubyLLM::Providers::XAI
103
105
 
104
106
  if defined?(Rails::Railtie)
105
107
  require 'ruby_llm/railtie'
@@ -45,6 +45,7 @@ def configure_from_env
45
45
  config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
46
46
  config.perplexity_api_key = ENV.fetch('PERPLEXITY_API_KEY', nil)
47
47
  config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
48
+ config.xai_api_key = ENV.fetch('XAI_API_KEY', nil)
48
49
  config.mistral_api_key = ENV.fetch('MISTRAL_API_KEY', nil)
49
50
  config.vertexai_location = ENV.fetch('GOOGLE_CLOUD_LOCATION', nil)
50
51
  config.vertexai_project_id = ENV.fetch('GOOGLE_CLOUD_PROJECT', nil)
@@ -61,7 +62,8 @@ def configure_bedrock(config)
61
62
  end
62
63
 
63
64
  def refresh_models
64
- initial_count = RubyLLM.models.all.size
65
+ existing_models = RubyLLM::Models.read_from_json
66
+ initial_count = existing_models.size
65
67
  puts "Refreshing models (#{initial_count} cached)..."
66
68
 
67
69
  models = RubyLLM.models.refresh!
@@ -69,19 +71,29 @@ def refresh_models
69
71
  if models.all.empty? && initial_count.zero?
70
72
  puts 'Error: Failed to fetch models.'
71
73
  exit(1)
72
- elsif models.all.size == initial_count && initial_count.positive?
73
- puts 'Warning: Model list unchanged.'
74
74
  else
75
- puts 'Validating models...'
76
- validate_models!(models)
75
+ existing_data = sorted_models_data(existing_models)
76
+ new_data = sorted_models_data(models.all)
77
77
 
78
- puts "Saving models.json (#{models.all.size} models)"
79
- models.save_to_json
78
+ if new_data == existing_data && initial_count.positive?
79
+ puts 'Warning: Model list unchanged.'
80
+ else
81
+ puts 'Validating models...'
82
+ validate_models!(models)
83
+
84
+ puts "Saving models.json (#{models.all.size} models)"
85
+ models.save_to_json
86
+ end
80
87
  end
81
88
 
82
89
  @models = models
83
90
  end
84
91
 
92
+ def sorted_models_data(models)
93
+ models.map(&:to_h)
94
+ .sort_by { |model| [model[:provider].to_s, model[:id].to_s] }
95
+ end
96
+
85
97
  def validate_models!(models)
86
98
  schema_path = RubyLLM::Models.schema_file
87
99
  models_data = models.all.map(&:to_h)
@@ -154,11 +166,7 @@ def generate_models_markdown
154
166
 
155
167
  ---
156
168
 
157
- ## Model Data Sources
158
-
159
- - **OpenAI, Anthropic, DeepSeek, Gemini, VertexAI**: Enriched by [models.dev](https://models.dev/) *([LLM metadata API](https://models.dev/api.json))*
160
- - **OpenRouter**: Direct API
161
- - **Others**: Local capabilities files
169
+ _Model information enriched by [models.dev](https://models.dev) and our custom code._
162
170
 
163
171
  ## Last Updated
164
172
  {: .d-inline-block }
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.9.2
4
+ version: 1.11.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
@@ -52,7 +52,7 @@ dependencies:
52
52
  - !ruby/object:Gem::Version
53
53
  version: 1.10.0
54
54
  - !ruby/object:Gem::Dependency
55
- name: faraday-multipart
55
+ name: faraday-retry
56
56
  requirement: !ruby/object:Gem::Requirement
57
57
  requirements:
58
58
  - - ">="
@@ -66,7 +66,7 @@ dependencies:
66
66
  - !ruby/object:Gem::Version
67
67
  version: '1'
68
68
  - !ruby/object:Gem::Dependency
69
- name: faraday-net_http
69
+ name: faraday-multipart
70
70
  requirement: !ruby/object:Gem::Requirement
71
71
  requirements:
72
72
  - - ">="
@@ -80,7 +80,7 @@ dependencies:
80
80
  - !ruby/object:Gem::Version
81
81
  version: '1'
82
82
  - !ruby/object:Gem::Dependency
83
- name: faraday-retry
83
+ name: faraday-net_http
84
84
  requirement: !ruby/object:Gem::Requirement
85
85
  requirements:
86
86
  - - ">="
@@ -180,6 +180,8 @@ files:
180
180
  - lib/generators/ruby_llm/install/templates/message_model.rb.tt
181
181
  - lib/generators/ruby_llm/install/templates/model_model.rb.tt
182
182
  - lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt
183
+ - lib/generators/ruby_llm/upgrade_to_v1_10/templates/add_v1_10_message_columns.rb.tt
184
+ - lib/generators/ruby_llm/upgrade_to_v1_10/upgrade_to_v1_10_generator.rb
183
185
  - lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt
184
186
  - lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb
185
187
  - lib/generators/ruby_llm/upgrade_to_v1_9/templates/add_v1_9_message_columns.rb.tt
@@ -271,10 +273,13 @@ files:
271
273
  - lib/ruby_llm/providers/openai/models.rb
272
274
  - lib/ruby_llm/providers/openai/moderation.rb
273
275
  - lib/ruby_llm/providers/openai/streaming.rb
276
+ - lib/ruby_llm/providers/openai/temperature.rb
274
277
  - lib/ruby_llm/providers/openai/tools.rb
275
278
  - lib/ruby_llm/providers/openai/transcription.rb
276
279
  - lib/ruby_llm/providers/openrouter.rb
280
+ - lib/ruby_llm/providers/openrouter/chat.rb
277
281
  - lib/ruby_llm/providers/openrouter/models.rb
282
+ - lib/ruby_llm/providers/openrouter/streaming.rb
278
283
  - lib/ruby_llm/providers/perplexity.rb
279
284
  - lib/ruby_llm/providers/perplexity/capabilities.rb
280
285
  - lib/ruby_llm/providers/perplexity/chat.rb
@@ -285,9 +290,14 @@ files:
285
290
  - lib/ruby_llm/providers/vertexai/models.rb
286
291
  - lib/ruby_llm/providers/vertexai/streaming.rb
287
292
  - lib/ruby_llm/providers/vertexai/transcription.rb
293
+ - lib/ruby_llm/providers/xai.rb
294
+ - lib/ruby_llm/providers/xai/chat.rb
295
+ - lib/ruby_llm/providers/xai/models.rb
288
296
  - lib/ruby_llm/railtie.rb
289
297
  - lib/ruby_llm/stream_accumulator.rb
290
298
  - lib/ruby_llm/streaming.rb
299
+ - lib/ruby_llm/thinking.rb
300
+ - lib/ruby_llm/tokens.rb
291
301
  - lib/ruby_llm/tool.rb
292
302
  - lib/ruby_llm/tool_call.rb
293
303
  - lib/ruby_llm/transcription.rb
@@ -309,7 +319,7 @@ metadata:
309
319
  funding_uri: https://github.com/sponsors/crmne
310
320
  rubygems_mfa_required: 'true'
311
321
  post_install_message: |
312
- Upgrading from RubyLLM <= 1.8.x? Check the upgrade guide for new features and migration instructions
322
+ Upgrading from RubyLLM < 1.10.x? Check the upgrade guide for new features and migration instructions
313
323
  --> https://rubyllm.com/upgrading/
314
324
  rdoc_options: []
315
325
  require_paths: