llm.rb 0.4.2 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +132 -84
  3. data/lib/json/schema/array.rb +5 -0
  4. data/lib/json/schema/boolean.rb +4 -0
  5. data/lib/json/schema/integer.rb +23 -1
  6. data/lib/json/schema/leaf.rb +11 -0
  7. data/lib/json/schema/null.rb +4 -0
  8. data/lib/json/schema/number.rb +23 -1
  9. data/lib/json/schema/object.rb +6 -2
  10. data/lib/json/schema/string.rb +26 -1
  11. data/lib/json/schema/version.rb +2 -0
  12. data/lib/json/schema.rb +10 -10
  13. data/lib/llm/buffer.rb +28 -10
  14. data/lib/llm/chat.rb +26 -1
  15. data/lib/llm/core_ext/ostruct.rb +14 -8
  16. data/lib/llm/file.rb +6 -1
  17. data/lib/llm/function.rb +81 -0
  18. data/lib/llm/message.rb +46 -1
  19. data/lib/llm/providers/anthropic/format/completion_format.rb +73 -0
  20. data/lib/llm/providers/anthropic/format.rb +7 -33
  21. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +51 -0
  22. data/lib/llm/providers/anthropic/response_parser.rb +1 -9
  23. data/lib/llm/providers/anthropic.rb +4 -3
  24. data/lib/llm/providers/gemini/audio.rb +4 -4
  25. data/lib/llm/providers/gemini/files.rb +5 -4
  26. data/lib/llm/providers/gemini/format/completion_format.rb +54 -0
  27. data/lib/llm/providers/gemini/format.rb +28 -27
  28. data/lib/llm/providers/gemini/images.rb +9 -4
  29. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +46 -0
  30. data/lib/llm/providers/gemini/response_parser.rb +13 -20
  31. data/lib/llm/providers/gemini.rb +3 -12
  32. data/lib/llm/providers/ollama/format/completion_format.rb +72 -0
  33. data/lib/llm/providers/ollama/format.rb +10 -30
  34. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +42 -0
  35. data/lib/llm/providers/ollama/response_parser.rb +8 -11
  36. data/lib/llm/providers/ollama.rb +3 -11
  37. data/lib/llm/providers/openai/audio.rb +6 -6
  38. data/lib/llm/providers/openai/files.rb +3 -3
  39. data/lib/llm/providers/openai/format/completion_format.rb +81 -0
  40. data/lib/llm/providers/openai/format/respond_format.rb +69 -0
  41. data/lib/llm/providers/openai/format.rb +25 -58
  42. data/lib/llm/providers/openai/images.rb +4 -2
  43. data/lib/llm/providers/openai/response_parser/completion_parser.rb +55 -0
  44. data/lib/llm/providers/openai/response_parser/respond_parser.rb +56 -0
  45. data/lib/llm/providers/openai/response_parser.rb +8 -44
  46. data/lib/llm/providers/openai/responses.rb +10 -11
  47. data/lib/llm/providers/openai.rb +5 -16
  48. data/lib/llm/response/{output.rb → respond.rb} +2 -2
  49. data/lib/llm/response.rb +1 -1
  50. data/lib/llm/version.rb +1 -1
  51. data/lib/llm.rb +28 -0
  52. data/llm.gemspec +1 -0
  53. metadata +28 -3
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Format
4
+ ##
5
+ # @private
6
+ class RespondFormat
7
+ def initialize(message)
8
+ @message = message
9
+ end
10
+
11
+ def format
12
+ catch(:abort) do
13
+ if Hash === message
14
+ {role: message[:role], content: format_content(message[:content])}
15
+ else
16
+ format_message
17
+ end
18
+ end
19
+ end
20
+
21
+ private
22
+
23
+ def format_content(content)
24
+ case content
25
+ when LLM::Response::File
26
+ format_file(content)
27
+ when String
28
+ [{type: :input_text, text: content.to_s}]
29
+ when LLM::Message
30
+ format_content(content.content)
31
+ else
32
+ raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
33
+ "is not supported by the OpenAI responses API"
34
+ end
35
+ end
36
+
37
+ def format_message
38
+ case content
39
+ when Array
40
+ format_array
41
+ else
42
+ {role: message.role, content: format_content(content)}
43
+ end
44
+ end
45
+
46
+ def format_array
47
+ if content.empty?
48
+ nil
49
+ elsif returns.any?
50
+ returns.map { {type: "function_call_output", call_id: _1.id, output: JSON.dump(_1.value)} }
51
+ else
52
+ {role: message.role, content: content.flat_map { format_content(_1) }}
53
+ end
54
+ end
55
+
56
+ def format_file(content)
57
+ file = LLM::File(content.filename)
58
+ if file.image?
59
+ [{type: :input_image, file_id: content.id}]
60
+ else
61
+ [{type: :input_file, file_id: content.id}]
62
+ end
63
+ end
64
+
65
+ def message = @message
66
+ def content = message.content
67
+ def returns = content.grep(LLM::Function::Return)
68
+ end
69
+ end
@@ -4,6 +4,9 @@ class LLM::OpenAI
4
4
  ##
5
5
  # @private
6
6
  module Format
7
+ require_relative "format/completion_format"
8
+ require_relative "format/respond_format"
9
+
7
10
  ##
8
11
  # @param [Array<LLM::Message>] messages
9
12
  # The messages to format
@@ -11,11 +14,11 @@ class LLM::OpenAI
11
14
  # The mode to format the messages for
12
15
  # @return [Array<Hash>]
13
16
  def format(messages, mode)
14
- messages.map do
15
- if Hash === _1
16
- {role: _1[:role], content: format_content(_1[:content], mode)}
17
+ messages.filter_map do |message|
18
+ if mode == :complete
19
+ CompletionFormat.new(message).format
17
20
  else
18
- {role: _1.role, content: format_content(_1.content, mode)}
21
+ RespondFormat.new(message).format
19
22
  end
20
23
  end
21
24
  end
@@ -23,62 +26,26 @@ class LLM::OpenAI
23
26
  private
24
27
 
25
28
  ##
26
- # @param [String, URI] content
27
- # The content to format
28
- # @return [String, Hash]
29
- # The formatted content
30
- def format_content(content, mode)
31
- if mode == :complete
32
- format_complete(content)
33
- elsif mode == :response
34
- format_response(content)
35
- end
29
+ # @param [JSON::Schema] schema
30
+ # The schema to format
31
+ # @return [Hash]
32
+ def format_schema(schema)
33
+ return {} unless schema
34
+ {
35
+ response_format: {
36
+ type: "json_schema",
37
+ json_schema: {name: "JSONSchema", schema:}
38
+ }
39
+ }
36
40
  end
37
41
 
38
- def format_complete(content)
39
- case content
40
- when Array
41
- content.flat_map { format_complete(_1) }
42
- when URI
43
- [{type: :image_url, image_url: {url: content.to_s}}]
44
- when LLM::File
45
- file = content
46
- if file.image?
47
- [{type: :image_url, image_url: {url: file.to_data_uri}}]
48
- else
49
- [{type: :file, file: {filename: file.basename, file_data: file.to_data_uri}}]
50
- end
51
- when LLM::Response::File
52
- [{type: :file, file: {file_id: content.id}}]
53
- when String
54
- [{type: :text, text: content.to_s}]
55
- when LLM::Message
56
- format_complete(content.content)
57
- else
58
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
59
- "is not supported by the OpenAI chat completions API"
60
- end
61
- end
62
-
63
- def format_response(content)
64
- case content
65
- when Array
66
- content.flat_map { format_response(_1) }
67
- when LLM::Response::File
68
- file = LLM::File(content.filename)
69
- if file.image?
70
- [{type: :input_image, file_id: content.id}]
71
- else
72
- [{type: :input_file, file_id: content.id}]
73
- end
74
- when String
75
- [{type: :input_text, text: content.to_s}]
76
- when LLM::Message
77
- format_response(content.content)
78
- else
79
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
80
- "is not supported by the OpenAI responses API"
81
- end
42
+ ##
43
+ # @param [Array<LLM::Function>] tools
44
+ # The tools to format
45
+ # @return [Hash]
46
+ def format_tools(tools)
47
+ return {} unless tools
48
+ {tools: tools.map { _1.format(self) }}
82
49
  end
83
50
  end
84
51
  end
@@ -57,7 +57,7 @@ class LLM::OpenAI
57
57
  # Create image variations
58
58
  # @example
59
59
  # llm = LLM.openai(ENV["KEY"])
60
- # res = llm.images.create_variation(image: LLM::File("/images/hat.png"), n: 5)
60
+ # res = llm.images.create_variation(image: "/images/hat.png", n: 5)
61
61
  # p res.urls
62
62
  # @see https://platform.openai.com/docs/api-reference/images/createVariation OpenAI docs
63
63
  # @param [File] image The image to create variations from
@@ -66,6 +66,7 @@ class LLM::OpenAI
66
66
  # @raise (see LLM::Provider#request)
67
67
  # @return [LLM::Response::Image]
68
68
  def create_variation(image:, model: "dall-e-2", **params)
69
+ image = LLM.File(image)
69
70
  multi = LLM::Multipart.new(params.merge!(image:, model:))
70
71
  req = Net::HTTP::Post.new("/v1/images/variations", headers)
71
72
  req["content-type"] = multi.content_type
@@ -78,7 +79,7 @@ class LLM::OpenAI
78
79
  # Edit an image
79
80
  # @example
80
81
  # llm = LLM.openai(ENV["KEY"])
81
- # res = llm.images.edit(image: LLM::File("/images/hat.png"), prompt: "A cat wearing this hat")
82
+ # res = llm.images.edit(image: "/images/hat.png", prompt: "A cat wearing this hat")
82
83
  # p res.urls
83
84
  # @see https://platform.openai.com/docs/api-reference/images/createEdit OpenAI docs
84
85
  # @param [File] image The image to edit
@@ -88,6 +89,7 @@ class LLM::OpenAI
88
89
  # @raise (see LLM::Provider#request)
89
90
  # @return [LLM::Response::Image]
90
91
  def edit(image:, prompt:, model: "dall-e-2", **params)
92
+ image = LLM.File(image)
91
93
  multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
92
94
  req = Net::HTTP::Post.new("/v1/images/edits", headers)
93
95
  req["content-type"] = multi.content_type
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::ResponseParser
4
+ ##
5
+ # @private
6
+ class CompletionParser
7
+ def initialize(body)
8
+ @body = OpenStruct.from_hash(body)
9
+ end
10
+
11
+ def format(response)
12
+ {
13
+ model:,
14
+ prompt_tokens:,
15
+ completion_tokens:,
16
+ total_tokens:,
17
+ choices: format_choices(response)
18
+ }
19
+ end
20
+
21
+ private
22
+
23
+ def format_choices(response)
24
+ choices.map.with_index do |choice, index|
25
+ message = choice.message
26
+ extra = {
27
+ index:, response:,
28
+ logprobs: choice.logprobs,
29
+ tool_calls: format_tool_calls(message.tool_calls),
30
+ original_tool_calls: message.tool_calls
31
+ }
32
+ LLM::Message.new(message.role, message.content, extra)
33
+ end
34
+ end
35
+
36
+ def format_tool_calls(tools)
37
+ (tools || []).filter_map do |tool|
38
+ next unless tool.function
39
+ tool = {
40
+ id: tool.id,
41
+ name: tool.function.name,
42
+ arguments: JSON.parse(tool.function.arguments)
43
+ }
44
+ OpenStruct.new(tool)
45
+ end
46
+ end
47
+
48
+ def body = @body
49
+ def model = body.model
50
+ def prompt_tokens = body.usage.prompt_tokens
51
+ def completion_tokens = body.usage.completion_tokens
52
+ def total_tokens = body.usage.total_tokens
53
+ def choices = body.choices
54
+ end
55
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::ResponseParser
4
+ ##
5
+ # @private
6
+ class RespondParser
7
+ def initialize(body)
8
+ @body = OpenStruct.from_hash(body)
9
+ end
10
+
11
+ def format(response)
12
+ {
13
+ id:,
14
+ model:,
15
+ input_tokens:,
16
+ output_tokens:,
17
+ total_tokens:,
18
+ outputs: [format_message(response)]
19
+ }
20
+ end
21
+
22
+ private
23
+
24
+ def format_message(response)
25
+ message = LLM::Message.new("assistant", +"", {response:, tool_calls: []})
26
+ choices.each.with_index do |choice, index|
27
+ if choice.type == "function_call"
28
+ message.extra[:tool_calls] << format_tool(choice)
29
+ elsif choice.content
30
+ choice.content.each do |c|
31
+ next unless c["type"] == "output_text"
32
+ message.content << c["text"] << "\n"
33
+ end
34
+ end
35
+ end
36
+ message
37
+ end
38
+
39
+ def format_tool(tool)
40
+ OpenStruct.new(
41
+ id: tool.call_id,
42
+ name: tool.name,
43
+ arguments: JSON.parse(tool.arguments)
44
+ )
45
+ end
46
+
47
+ def body = @body
48
+ def id = body.id
49
+ def model = body.model
50
+ def input_tokens = body.usage.input_tokens
51
+ def output_tokens = body.usage.output_tokens
52
+ def total_tokens = body.usage.total_tokens
53
+ def choices = body.output
54
+ def tools = output.select { _1.type == "function_call" }
55
+ end
56
+ end
@@ -8,55 +8,28 @@ class LLM::OpenAI
8
8
  # @param [Hash] body
9
9
  # The response body from the LLM provider
10
10
  # @return [Hash]
11
- def parse_embedding(body)
12
- {
13
- model: body["model"],
14
- embeddings: body["data"].map { _1["embedding"] },
15
- prompt_tokens: body.dig("usage", "prompt_tokens"),
16
- total_tokens: body.dig("usage", "total_tokens")
17
- }
11
+ def parse_completion(body)
12
+ CompletionParser.new(body).format(self)
18
13
  end
19
14
 
20
15
  ##
21
16
  # @param [Hash] body
22
17
  # The response body from the LLM provider
23
18
  # @return [Hash]
24
- def parse_completion(body)
25
- {
26
- model: body["model"],
27
- choices: body["choices"].map.with_index do
28
- extra = {
29
- index: _2, response: self,
30
- logprobs: _1["logprobs"]
31
- }
32
- LLM::Message.new(*_1["message"].values_at("role", "content"), extra)
33
- end,
34
- prompt_tokens: body.dig("usage", "prompt_tokens"),
35
- completion_tokens: body.dig("usage", "completion_tokens"),
36
- total_tokens: body.dig("usage", "total_tokens")
37
- }
19
+ def parse_respond_response(body)
20
+ RespondParser.new(body).format(self)
38
21
  end
39
22
 
40
23
  ##
41
24
  # @param [Hash] body
42
25
  # The response body from the LLM provider
43
26
  # @return [Hash]
44
- def parse_output_response(body)
27
+ def parse_embedding(body)
45
28
  {
46
- id: body["id"],
47
29
  model: body["model"],
48
- input_tokens: body.dig("usage", "input_tokens"),
49
- output_tokens: body.dig("usage", "output_tokens"),
50
- total_tokens: body.dig("usage", "total_tokens"),
51
- outputs: body["output"].filter_map.with_index do |output, index|
52
- next unless output["content"]
53
- extra = {
54
- index:, response: self,
55
- contents: output["content"],
56
- annotations: output["annotations"]
57
- }
58
- LLM::Message.new(output["role"], text(output), extra)
59
- end
30
+ embeddings: body["data"].map { _1["embedding"] },
31
+ prompt_tokens: body.dig("usage", "prompt_tokens"),
32
+ total_tokens: body.dig("usage", "total_tokens")
60
33
  }
61
34
  end
62
35
 
@@ -73,14 +46,5 @@ class LLM::OpenAI
73
46
  end
74
47
  }
75
48
  end
76
-
77
- private
78
-
79
- def text(output)
80
- output["content"]
81
- .select { _1["type"] == "output_text" }
82
- .map { _1["text"] }
83
- .join("\n")
84
- end
85
49
  end
86
50
  end
@@ -21,14 +21,14 @@ class LLM::OpenAI
21
21
  # require "llm"
22
22
  #
23
23
  # llm = LLM.openai(ENV["KEY"])
24
- # file = llm.files.create file: LLM::File("/images/hat.png")
24
+ # file = llm.files.create file: "/images/hat.png"
25
25
  # res = llm.responses.create ["Describe the image", file]
26
26
  # @example
27
27
  # #!/usr/bin/env ruby
28
28
  # require "llm"
29
29
  #
30
30
  # llm = LLM.openai(ENV["KEY"])
31
- # file = llm.files.create file: LLM::File("/documents/freebsd.pdf")
31
+ # file = llm.files.create file: "/documents/freebsd.pdf"
32
32
  # res = llm.responses.create ["Describe the document, file]
33
33
  class Responses
34
34
  include Format
@@ -52,17 +52,14 @@ class LLM::OpenAI
52
52
  # @raise [LLM::Error::PromptError]
53
53
  # When given an object a provider does not understand
54
54
  # @return [LLM::Response::Output]
55
- def create(prompt, role = :user, model: @provider.default_model, schema: nil, **params)
56
- params = {model:}
57
- .merge!(expand_schema(schema))
58
- .merge!(params)
59
- .compact
55
+ def create(prompt, role = :user, model: @provider.default_model, schema: nil, tools: nil, **params)
56
+ params = [{model:}, format_schema(schema), format_tools(tools), params].inject({}, &:merge!).compact
60
57
  req = Net::HTTP::Post.new("/v1/responses", headers)
61
58
  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
62
- body = JSON.dump({input: format(messages, :response)}.merge!(params))
59
+ body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
63
60
  set_body_stream(req, StringIO.new(body))
64
61
  res = request(http, req)
65
- LLM::Response::Output.new(res).extend(response_parser)
62
+ LLM::Response::Respond.new(res).extend(response_parser)
66
63
  end
67
64
 
68
65
  ##
@@ -76,7 +73,7 @@ class LLM::OpenAI
76
73
  query = URI.encode_www_form(params)
77
74
  req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
78
75
  res = request(http, req)
79
- LLM::Response::Output.new(res).extend(response_parser)
76
+ LLM::Response::Respond.new(res).extend(response_parser)
80
77
  end
81
78
 
82
79
  ##
@@ -98,7 +95,9 @@ class LLM::OpenAI
98
95
  @provider.instance_variable_get(:@http)
99
96
  end
100
97
 
101
- [:response_parser, :headers, :request, :set_body_stream, :expand_schema].each do |m|
98
+ [:response_parser, :headers,
99
+ :request, :set_body_stream,
100
+ :format_schema, :format_tools].each do |m|
102
101
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
103
102
  end
104
103
  end
@@ -8,6 +8,8 @@ module LLM
8
8
  require_relative "openai/format"
9
9
  require_relative "openai/error_handler"
10
10
  require_relative "openai/response_parser"
11
+ require_relative "openai/response_parser/completion_parser"
12
+ require_relative "openai/response_parser/respond_parser"
11
13
  require_relative "openai/responses"
12
14
  require_relative "openai/images"
13
15
  require_relative "openai/audio"
@@ -51,14 +53,11 @@ module LLM
51
53
  # @raise [LLM::Error::PromptError]
52
54
  # When given an object a provider does not understand
53
55
  # @return (see LLM::Provider#complete)
54
- def complete(prompt, role = :user, model: default_model, schema: nil, **params)
55
- params = {model:}
56
- .merge!(expand_schema(schema))
57
- .merge!(params)
58
- .compact
56
+ def complete(prompt, role = :user, model: default_model, schema: nil, tools: nil, **params)
57
+ params = [{model:}, format_schema(schema), format_tools(tools), params].inject({}, &:merge!).compact
59
58
  req = Net::HTTP::Post.new("/v1/chat/completions", headers)
60
59
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
61
- body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
60
+ body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
62
61
  set_body_stream(req, StringIO.new(body))
63
62
  res = request(@http, req)
64
63
  Response::Completion.new(res).extend(response_parser)
@@ -134,15 +133,5 @@ module LLM
134
133
  def error_handler
135
134
  LLM::OpenAI::ErrorHandler
136
135
  end
137
-
138
- def expand_schema(schema)
139
- return {} unless schema
140
- {
141
- response_format: {
142
- type: "json_schema",
143
- json_schema: {name: "JSONSchema", schema:}
144
- }
145
- }
146
- end
147
136
  end
148
137
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- class Response::Output < Response
4
+ class Response::Respond < Response
5
5
  ##
6
6
  # @return [String]
7
7
  # Returns the id of the response
@@ -50,7 +50,7 @@ module LLM
50
50
  # @return [Hash]
51
51
  # Returns the parsed response from the provider
52
52
  def parsed
53
- @parsed ||= parse_output_response(body)
53
+ @parsed ||= parse_respond_response(body)
54
54
  end
55
55
  end
56
56
  end
data/lib/llm/response.rb CHANGED
@@ -5,7 +5,7 @@ module LLM
5
5
  require "json"
6
6
  require_relative "response/completion"
7
7
  require_relative "response/embedding"
8
- require_relative "response/output"
8
+ require_relative "response/respond"
9
9
  require_relative "response/image"
10
10
  require_relative "response/audio"
11
11
  require_relative "response/audio_transcription"
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.4.2"
4
+ VERSION = "0.5.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  module LLM
4
4
  require "stringio"
5
+ require_relative "json/schema"
5
6
  require_relative "llm/core_ext/ostruct"
6
7
  require_relative "llm/version"
7
8
  require_relative "llm/utils"
@@ -15,6 +16,7 @@ module LLM
15
16
  require_relative "llm/provider"
16
17
  require_relative "llm/chat"
17
18
  require_relative "llm/buffer"
19
+ require_relative "llm/function"
18
20
 
19
21
  module_function
20
22
 
@@ -58,4 +60,30 @@ module LLM
58
60
  require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
59
61
  LLM::OpenAI.new(secret, **options)
60
62
  end
63
+
64
+ ##
65
+ # Define a function
66
+ # @example
67
+ # LLM.function(:system) do |fn|
68
+ # fn.description "Run system command"
69
+ # fn.params do |schema|
70
+ # schema.object(command: schema.string.required)
71
+ # end
72
+ # fn.define do |params|
73
+ # system(params.command)
74
+ # end
75
+ # end
76
+ # @param [Symbol] name The name of the function
77
+ # @param [Proc] b The block to define the function
78
+ # @return [LLM::Function] The function object
79
+ def function(name, &b)
80
+ functions[name.to_s] = LLM::Function.new(name, &b)
81
+ end
82
+
83
+ ##
84
+ # Returns all known functions
85
+ # @return [Hash<String,LLM::Function>]
86
+ def functions
87
+ @functions ||= {}
88
+ end
61
89
  end
data/llm.gemspec CHANGED
@@ -36,4 +36,5 @@ Gem::Specification.new do |spec|
36
36
  spec.add_development_dependency "rspec", "~> 3.0"
37
37
  spec.add_development_dependency "standard", "~> 1.40"
38
38
  spec.add_development_dependency "vcr", "~> 6.0"
39
+ spec.add_development_dependency "dotenv", "~> 2.8"
39
40
  end