ticuna 0.2.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 91f1d0f3d86d36f73ee9127c48209b950a0a9e03d234ec0cf397015794797dda
4
- data.tar.gz: 3db96fa9f07cc35f3baf5fe3af99da54d8e41e7ed954d3add56ee488f4a15de1
3
+ metadata.gz: '058bd0a97b93a756f03650373f98ba045b49a7b72499d8fd259d3520ab375fcd'
4
+ data.tar.gz: 1377d3415e111d5ed72cee4e68f9ff9b98f4a1d947c6bbb854820d8abe82e765
5
5
  SHA512:
6
- metadata.gz: e33ed0a919eda4ba474d3fad6993640b01ddf8b09c754cf5a8492673fbd914a4db3cd387db618ff85dc3a04afad70bc9e8d8de4925eb4ac47d43c78d44b392e7
7
- data.tar.gz: aa329d1ad68be5fa446acbafd41eb3d6b99caf6a3d81f990950243439fbe5e359778ae375ff6f10d3133f9f919983c32d612f3183cdcb4cd34470663582b3b95
6
+ metadata.gz: 41144c5b7ba4226d026d055f33a960174dc7cde8e726dc68f19d2abc44dfda5cbf14fac3fe1f7bbdbd4c8b51d126712200fba3902f9d4e88dc1846f9eede7355
7
+ data.tar.gz: a1d5312ecfb22e1f7fa99ae35bc1057a9aff4e5aae60faa227dbbe8d95f59eb31526126da0aa0cbdf2ac51e34b9958abfe03749064fe59058dfd42686f01bf07
data/CHANGELOG.md CHANGED
@@ -3,3 +3,16 @@
3
3
  ## [0.0.1.0] - 2025-10-02
4
4
 
5
5
  - Initial release
6
+ - Starting develop this gem
7
+
8
+ ## [0.1.0] - 2025-10-06
9
+ - Add support for OpenAI
10
+
11
+ ## [0.2.0] - 2025-10-06
12
+ - Add Ticuna::Response [commit here](https://github.com/thiagochirana/ticuna/commit/04ee12ee4caa3c7d30962a117777ef101607eba6)
13
+
14
+ ## [0.2.1] - 2025-10-06
15
+ - Add Ticuna::Response to attend hash and string responses, [commit here](https://github.com/thiagochirana/ticuna/commit/0c7cb8a9e843f37eab41cda35252c10163dc3c9b)
16
+
17
+ ## [0.2.2] - 2025-10-06
18
+ - Add support for OpenAI with stream and output_format (json or text), [PR here](https://github.com/thiagochirana/ticuna/pull/2)
data/lib/ticuna/llm.rb CHANGED
@@ -38,16 +38,13 @@ module Ticuna
38
38
  self
39
39
  end
40
40
 
41
- def ask(message, stream: false, model: "gpt-4.1-nano", &block)
41
+ def ask(message, stream: false, model: "gpt-4.1-nano", output_format: :text, &block)
42
42
  tool_contexts = @tools.map(&:context).compact.join("\n\n")
43
43
 
44
44
  system_message = if tool_contexts.empty?
45
45
  nil
46
46
  else
47
- {
48
- role: "system",
49
- content: "Tools contexts:\n\n#{tool_contexts}"
50
- }
47
+ { role: "system", content: "Tools contexts:\n\n#{tool_contexts}" }
51
48
  end
52
49
 
53
50
  messages = if system_message
@@ -56,7 +53,9 @@ module Ticuna
56
53
  [{ role: "user", content: message }]
57
54
  end
58
55
 
59
- Ticuna::Response.new(@provider.ask_with_messages(messages, stream: stream, model: model, &block))
56
+ Ticuna::Response.new(
57
+ @provider.ask_with_messages(messages, stream: stream, model: model, output_format: output_format, &block)
58
+ )
60
59
  end
61
60
 
62
61
  private
@@ -9,24 +9,36 @@ module Ticuna
9
9
  super(api_key: api_key, base_url: "https://api.openai.com/v1/")
10
10
  end
11
11
 
12
- def ask(message, stream: false, model: "gpt-4.1-nano", &block)
13
- send_request_to(messages: [{ role: "user", content: message }], stream:, model:, &block)
12
+ def ask(message, stream: false, model: "gpt-4.1-nano", output_format: :text, &block)
13
+ send_request_to(
14
+ messages: [{ role: "user", content: message }],
15
+ stream:,
16
+ model:,
17
+ output_format:,
18
+ &block
19
+ )
14
20
  end
15
21
 
16
- def ask_with_messages(messages, stream: false, model: "gpt-4.1-nano", &block)
17
- send_request_to(messages:, stream:, model:, &block)
22
+ def ask_with_messages(messages, stream: false, model: "gpt-4.1-nano", output_format: :text, &block)
23
+ send_request_to(messages:, stream:, model:, output_format:, &block)
18
24
  end
19
25
 
20
26
  private
21
27
 
22
- def send_request_to(messages:, model: nil, stream: false, &block)
28
+ def send_request_to(messages:, model: nil, stream: false, output_format: :text, &block)
29
+ raise ArgumentError, "Invalid output_format: #{output_format}" unless %i[text json].include?(output_format)
30
+
23
31
  body = {
24
32
  model: model,
25
33
  messages: messages,
26
34
  stream: stream
27
35
  }
28
36
 
37
+ body[:response_format] = { type: "json_object" } if output_format == :json
38
+
29
39
  post("chat/completions", body, stream: stream, &block)
40
+ rescue Faraday::BadRequestError => e
41
+ raise "response_format not supported by #{model}" unless e.message.include?("response_format")
30
42
  end
31
43
 
32
44
  def post(path, body, stream: false, &block)
@@ -35,7 +47,6 @@ module Ticuna
35
47
 
36
48
  @connection.post(path) do |req|
37
49
  req.body = body.to_json
38
-
39
50
  req.options.on_data = proc do |chunk, _bytes|
40
51
  chunk.each_line do |line|
41
52
  next unless line.start_with?("data:")
@@ -51,7 +62,6 @@ module Ticuna
51
62
 
52
63
  delta = json.dig("choices", 0, "delta", "content")
53
64
  full_text << delta if delta
54
-
55
65
  yield json if block_given?
56
66
  end
57
67
  end
@@ -64,10 +74,7 @@ module Ticuna
64
74
  "choices" => [
65
75
  {
66
76
  "index" => 0,
67
- "message" => {
68
- "role" => "assistant",
69
- "content" => full_text
70
- },
77
+ "message" => { "role" => "assistant", "content" => full_text },
71
78
  "finish_reason" => "stop"
72
79
  }
73
80
  ]
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Ticuna
4
- VERSION = "0.2.1"
4
+ VERSION = "0.2.2"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ticuna
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Chirana