openaiext 0.0.8 → 0.0.9

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 001ae93d35a8aa13a2e2fe167e027849068c67e3b67e5691b0a1486ba32038b1
4
- data.tar.gz: 4bee70a359b8b751a02cb293c0a200bb2775e303e2f96914bec7b8dd42bf3733
3
+ metadata.gz: 851fc779d886d1a1da353129d64c8203801faca4a34e83fd8a1829a49320ea37
4
+ data.tar.gz: 199b29d1dc970da2cbc9cf41d22fccd42d1aa42831b1c6b2b675e2d1de28457d
5
5
  SHA512:
6
- metadata.gz: 25aacf4d31e2919c1d5e09d5c25335e29dfc2b0823b14e53087d26fa1172d5bd3671eb2006ec2c218220062328c4e51999d580bc0999c7732b2cf24c96ea8dd7
7
- data.tar.gz: 97330fe2fc52929de29f8fb13dd46d2831e1b6a28761997552731f9a9abb87957504f36241ea83d7f6cdebe37891dafe45065494f0f1b4ab3029303b1f4088dd
6
+ metadata.gz: 2e48ece09e1d0b48f87a52d3d061eade56bba4736330566af4c82974575724585dc165ab98c8719c248ad003d27f2a439387f9e60b841a29b47b81857d5defde
7
+ data.tar.gz: 9a63dc77d5d19e94940a9b33ce1f17b67786f13ddcc7b8fe6fbf3a80934e9d4217a6e97886bbc9e4db27abc4ffb6de49adb6226d9c9c988fcee1eeca4e5c0559
@@ -1,34 +1,57 @@
1
1
  module OpenAIExt
2
2
  class Messages < Array
3
- def initialize messages = nil
4
- super parse_messages(messages)
3
+ VALID_ROLES = %w[system user assistant tool].freeze
4
+
5
+ def initialize(messages = nil)
6
+ super(parse_messages(messages))
5
7
  end
6
8
 
7
- def add(message) = concat(parse_messages(message))
9
+ def add(message)
10
+ concat(parse_messages(message))
11
+ end
8
12
 
9
13
  private
14
+
10
15
  def parse_messages(messages)
11
16
  return [] if messages.nil?
12
17
 
13
18
  messages = [messages] unless messages.is_a?(Array)
14
-
15
- # if first element is ok, then do not parse the rest
16
- return messages if messages.first in { role: String | Symbol, content: String | Array | Hash}
17
-
18
- messages.flat_map do |msg|
19
- if msg.is_a?(Hash)
20
- if msg.keys.size == 1
21
- role, content = msg.first
22
- { role: role.to_s, content: content }
23
- elsif msg.key?(:role) && msg.key?(:content)
24
- { role: msg[:role].to_s, content: msg[:content] }
25
- else
26
- msg.map { |role, content| { role: role.to_s, content: content } }
27
- end
28
- else
29
- raise ArgumentError, "Invalid message format: #{msg}"
30
- end
19
+
20
+ # Verificação se a estrutura está no formato esperado
21
+ return messages if messages.first.is_a?(Hash) &&
22
+ messages.first.key?(:role) &&
23
+ messages.first.key?(:content)
24
+
25
+ messages.flat_map { |msg| parse_message(msg) }
26
+ end
27
+
28
+ def parse_message(msg)
29
+ return parse_hash_message(msg) if msg.is_a?(Hash)
30
+ raise ArgumentError, "Formato de mensagem inválido: #{msg.inspect}"
31
+ end
32
+
33
+ def parse_hash_message(msg)
34
+ if msg.size == 1
35
+ role, content = msg.first
36
+ validate_and_format_message(role, content)
37
+ elsif msg.key?(:role) && msg.key?(:content)
38
+ validate_and_format_message(msg[:role], msg[:content])
39
+ else
40
+ msg.map { |role, content| validate_and_format_message(role, content) }
41
+ end
42
+ end
43
+
44
+ def validate_and_format_message(role, content)
45
+ role_str = role.to_s
46
+ unless VALID_ROLES.include?(role_str)
47
+ raise ArgumentError, "Role inválido: #{role_str}. Roles válidos: #{VALID_ROLES.join(', ')}"
31
48
  end
49
+
50
+ unless content.is_a?(String) || content.is_a?(Array) || content.is_a?(Hash)
51
+ raise ArgumentError, "Conteúdo inválido: #{content.inspect}"
52
+ end
53
+
54
+ { role: role_str, content: content }
32
55
  end
33
56
  end
34
57
  end
@@ -1,27 +1,22 @@
1
1
  module OpenAIExt
2
2
  module Model
3
- GPT_BASIC_MODEL = ENV.fetch('OPENAI_GPT_BASIC_MODEL', 'gpt-4o-mini')
4
- GPT_ADVANCED_MODEL = ENV.fetch('OPENAI_GPT_ADVANCED_MODEL', 'gpt-4o')
3
+ GPT_BASIC_MODEL = ENV.fetch('OPENAI_GPT_BASIC_MODEL', 'gpt-4o-mini')
4
+ GPT_ADVANCED_MODEL = ENV.fetch('OPENAI_GPT_ADVANCED_MODEL', 'gpt-4o')
5
5
  GPT_ADVANCED_MODEL_LATEST = ENV.fetch('OPENAI_GPT_ADVANCED_MODEL_LATEST', 'chatgpt-4o-latest')
6
6
 
7
- O1_BASIC_MODEL = ENV.fetch('OPENAI_O1_BASIC_MODEL', 'o1-mini')
8
- O1_ADVANCED_MODEL = ENV.fetch('OPENAI_O1_ADVANCED_MODEL', 'o1-preview')
7
+ BASIC_REASONING_MODEL = ENV.fetch('OPENAI_BASIC_REASONING_MODEL', 'o1-mini')
8
+ ADVANCED_REASONING_MODEL = ENV.fetch('OPENAI_ADVANCED_REASONING_MODEL', 'o1-preview')
9
+
10
+ MODEL_MAP = {
11
+ gpt_basic: GPT_BASIC_MODEL,
12
+ gpt_advanced: GPT_ADVANCED_MODEL,
13
+ gpt_advanced_latest: GPT_ADVANCED_MODEL_LATEST,
14
+ reasoning_basic: BASIC_REASONING_MODEL,
15
+ reasoning_advanced: ADVANCED_REASONING_MODEL
16
+ }.freeze
9
17
 
10
18
  def self.select(model)
11
- case model
12
- when :gpt_basic
13
- GPT_BASIC_MODEL
14
- when :gpt_advanced
15
- GPT_ADVANCED_MODEL
16
- when :gpt_advanced_latest
17
- GPT_ADVANCED_MODEL_LATEST
18
- when :o1_basic
19
- O1_BASIC_MODEL
20
- when :o1_advanced
21
- O1_ADVANCED_MODEL
22
- else
23
- model
24
- end
19
+ MODEL_MAP.fetch(model, model)
25
20
  end
26
21
  end
27
22
  end
@@ -1,44 +1,62 @@
1
1
  module ResponseExtender
2
- def chat_params = self[:chat_params]
2
+ def chat_params
3
+ self[:chat_params]
4
+ end
5
+
6
+ def message
7
+ dig('choices', 0, 'message')
8
+ end
3
9
 
4
- def message = dig('choices', 0, 'message')
10
+ def content
11
+ dig('choices', 0, 'message', 'content')
12
+ end
5
13
 
6
- def content = dig('choices', 0, 'message', 'content')
7
- def content? = !content.nil?
14
+ def content?
15
+ !content.nil?
16
+ end
17
+
18
+ def tool_calls
19
+ dig('choices', 0, 'message', 'tool_calls')
20
+ end
8
21
 
9
- def tool_calls = dig('choices', 0, 'message', 'tool_calls')
10
- def tool_calls? = !tool_calls.nil?
22
+ def tool_calls?
23
+ !tool_calls.nil?
24
+ end
11
25
 
12
26
  def functions
13
- return if tool_calls.nil?
14
-
15
- functions = tool_calls.filter { |tool| tool['type'].eql? 'function' }
16
- return if functions.empty?
17
-
18
- functions_list = []
19
- functions.map.with_index do |function, function_index|
20
- function_info = tool_calls.dig(function_index, 'function')
21
- function_def = { id: function['id'], name: function_info['name'], arguments: Oj.load(function_info['arguments'], symbol_keys: true) }
22
-
23
- def function_def.run(context:)
27
+ return [] unless tool_calls&.any?
28
+
29
+ tool_functions = tool_calls.select { |tool| tool['type'] == 'function' }
30
+ return [] if tool_functions.empty?
31
+
32
+ tool_functions.map do |function|
33
+ function_info = function['function']
34
+ function_def = {
35
+ id: function['id'],
36
+ name: function_info['name'],
37
+ arguments: Oj.load(function_info['arguments'], symbol_keys: true)
38
+ }
39
+
40
+ function_def.define_singleton_method(:run) do |context:|
24
41
  {
25
42
  tool_call_id: self[:id],
26
- role: :tool,
27
- name: self[:name],
28
- content: context.send(self[:name], **self[:arguments])
43
+ role: :tool,
44
+ name: self[:name],
45
+ content: Oj.dump(context.send(self[:name], **self[:arguments]))
29
46
  }
30
47
  end
31
48
 
32
- functions_list << function_def
49
+ function_def
33
50
  end
34
-
35
- functions_list
36
51
  end
37
52
 
38
53
  def functions_run_all(context:)
39
- raise 'No functions to run' if functions.nil?
40
- functions.map { |function| function.run(context:) }
54
+ raise 'Nenhuma função para executar' if functions.empty?
55
+
56
+ functions.map { |function| function.run(context: context) }
41
57
  end
42
58
 
43
- def functions? = !functions.nil?
59
+ def functions?
60
+ functions.any?
61
+ end
44
62
  end
data/lib/openaiext.rb CHANGED
@@ -1,82 +1,139 @@
1
1
  require 'openai'
2
-
3
2
  require 'openaiext/model'
4
3
  require 'openaiext/messages'
5
4
  require 'openaiext/response_extender'
6
- require 'openaiext/agent'
7
5
 
8
6
  module OpenAIExt
9
7
  MAX_TOKENS = ENV.fetch('OPENAI_MAX_TOKENS', 16_383).to_i
10
8
 
11
9
  def self.embeddings(input, model: 'text-embedding-3-large')
12
- response = OpenAI::Client.new.embeddings(parameters: { input:, model: })
13
- def response.embeddings = dig('data', 0, 'embedding')
10
+ client = OpenAI::Client.new
11
+ response = client.embeddings(parameters: { input: input, model: model })
12
+ def response.embeddings
13
+ dig('data', 0, 'embedding')
14
+ end
14
15
  response
15
16
  end
16
17
 
17
- def self.vision(prompt:, image_url:, model: :gpt_advanced, response_format: nil, max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil, auto_run_functions: false, function_context: nil)
18
- message_content = [{ type: :text, text: prompt }, { type: :image_url, image_url: { url: image_url } }]
19
- chat(messages: [{ role: :user, content: message_content }], model:, response_format:, max_tokens:, store:, tools:, auto_run_functions:, function_context:)
20
- end
18
+ def self.vision(prompt:, image_url:, model: :gpt_advanced, response_format: nil,
19
+ max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil,
20
+ auto_run_functions: false, function_context: nil)
21
+ message_content = [
22
+ { type: :text, text: prompt },
23
+ { type: :image_url, image_url: { url: image_url } }
24
+ ]
25
+ chat(
26
+ messages: [{ role: :user, content: message_content }],
27
+ model: model,
28
+ response_format: response_format,
29
+ max_tokens: max_tokens,
30
+ store: store,
31
+ tools: tools,
32
+ auto_run_functions: auto_run_functions,
33
+ function_context: function_context
34
+ )
35
+ end
21
36
 
22
- def self.single_prompt(prompt:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil, auto_run_functions: false, function_context: nil, temperature: nil, top_p: nil, frequency_penalty: nil, presence_penalty: nil, prediction: nil)
23
- chat(messages: [{ user: prompt }], model:, response_format:, max_tokens:, store:, tools:, auto_run_functions:, function_context:, temperature:, top_p:, frequency_penalty:, presence_penalty:, prediction:)
37
+ def self.single_prompt(prompt:, model: :gpt_basic, response_format: nil,
38
+ max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil,
39
+ auto_run_functions: false, function_context: nil, temperature: nil,
40
+ top_p: nil, frequency_penalty: nil, presence_penalty: nil, prediction: nil)
41
+ chat(
42
+ messages: [{ user: prompt }],
43
+ model: model,
44
+ response_format: response_format,
45
+ max_tokens: max_tokens,
46
+ store: store,
47
+ tools: tools,
48
+ auto_run_functions: auto_run_functions,
49
+ function_context: function_context,
50
+ temperature: temperature,
51
+ top_p: top_p,
52
+ frequency_penalty: frequency_penalty,
53
+ presence_penalty: presence_penalty,
54
+ prediction: prediction
55
+ )
24
56
  end
25
57
 
26
- def self.single_chat(system:, user:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil, auto_run_functions: false, function_context: nil, temperature: nil, top_p: nil, frequency_penalty: nil, presence_penalty: nil, prediction: nil)
27
- chat(messages: [{ system: }, { user: }], model:, response_format:, max_tokens:, store:, tools:, auto_run_functions:, function_context:, temperature:, top_p:, frequency_penalty:, presence_penalty:, prediction:)
58
+ def self.single_chat(system:, user:, model: :gpt_basic, response_format: nil,
59
+ max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil,
60
+ auto_run_functions: false, function_context: nil, temperature: nil,
61
+ top_p: nil, frequency_penalty: nil, presence_penalty: nil, prediction: nil)
62
+ chat(
63
+ messages: [{ system: system }, { user: user }],
64
+ model: model,
65
+ response_format: response_format,
66
+ max_tokens: max_tokens,
67
+ store: store,
68
+ tools: tools,
69
+ auto_run_functions: auto_run_functions,
70
+ function_context: function_context,
71
+ temperature: temperature,
72
+ top_p: top_p,
73
+ frequency_penalty: frequency_penalty,
74
+ presence_penalty: presence_penalty,
75
+ prediction: prediction
76
+ )
28
77
  end
29
78
 
30
- def self.chat(messages:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS, store: true, metadata: nil, tools: nil, auto_run_functions: false, function_context: nil, temperature: nil, top_p: nil, frequency_penalty: nil, presence_penalty: nil, prediction: nil)
31
- model = OpenAIExt::Model.select(model)
32
- is_o1_model = model.start_with?('o1')
79
+ def self.chat(messages:, model: :gpt_basic, response_format: nil, max_tokens: MAX_TOKENS,
80
+ store: true, metadata: nil, tools: nil, auto_run_functions: false,
81
+ function_context: nil, temperature: nil, top_p: nil, frequency_penalty: nil,
82
+ presence_penalty: nil, prediction: nil)
83
+ selected_model = OpenAIExt::Model.select(model)
84
+ is_reasoning_model = selected_model.start_with?('o')
33
85
 
34
86
  messages = OpenAIExt::Messages.new(messages) unless messages.is_a?(OpenAIExt::Messages)
35
-
36
- parameters = { model:, messages:, store: }
37
- parameters[:metadata] = metadata if metadata
38
87
 
39
- # o1 family models doesn't support max_tokens params. Instead, use max_completion_tokens
40
- parameters[:max_completion_tokens] = max_tokens if is_o1_model
41
- parameters[:max_tokens] = max_tokens unless is_o1_model
88
+ parameters = { model: selected_model, messages: messages, store: store }
89
+ parameters[:metadata] = metadata if metadata
42
90
 
43
- parameters[:response_format] = { type: 'json_object' } if response_format.eql?(:json)
44
- parameters[:tools] = tools if tools
91
+ if is_reasoning_model
92
+ parameters[:max_completion_tokens] = max_tokens
93
+ else
94
+ parameters[:max_tokens] = max_tokens
95
+ end
45
96
 
46
- parameters[:temperature] = temperature if temperature
47
- parameters[:top_p] = top_p if top_p
48
- parameters[:frequency_penalty] = frequency_penalty if frequency_penalty
49
- parameters[:presence_penalty] = presence_penalty if presence_penalty
50
- parameters[:prediction] = prediction if prediction
97
+ parameters[:response_format] = { type: 'json_object' } if response_format == :json
98
+ parameters[:tools] = tools if tools
99
+ parameters[:temperature] = temperature if temperature
100
+ parameters[:top_p] = top_p if top_p
101
+ parameters[:frequency_penalty] = frequency_penalty if frequency_penalty
102
+ parameters[:presence_penalty] = presence_penalty if presence_penalty
103
+ parameters[:prediction] = prediction if prediction
51
104
 
52
105
  begin
53
- response = OpenAI::Client.new.chat(parameters:)
54
- rescue => e
55
- raise "Error in OpenAI chat: #{e.message}\nParameters: #{parameters.inspect}"
106
+ client = OpenAI::Client.new
107
+ response = client.chat(parameters: parameters)
108
+ rescue StandardError => e
109
+ raise "Erro na comunicação com OpenAI: #{e.message}\nParâmetros: #{parameters.inspect}"
56
110
  end
57
-
58
- response[:chat_params] = parameters
111
+
112
+ response[:chat_params] = parameters
59
113
  response.extend(ResponseExtender)
60
114
 
61
115
  if response.functions? && auto_run_functions
62
- raise 'Function context not provided for auto-running functions' if function_context.nil?
116
+ raise 'Contexto para funções não informado para execução automática' if function_context.nil?
117
+
63
118
  parameters[:messages] << response.message
64
119
  parameters[:messages] += response.functions_run_all(context: function_context)
65
120
 
66
- response = chat(**parameters.except(:chat_params))
121
+ response = chat(**parameters.reject { |k, _| k == :chat_params })
67
122
  end
68
-
123
+
69
124
  response
70
125
  end
71
126
 
72
- def self.models = OpenAI::Client.new.models.list
127
+ def self.models
128
+ OpenAI::Client.new.models.list
129
+ end
73
130
 
74
131
  def self.load_config
75
132
  OpenAI.configure do |config|
76
- config.access_token = ENV.fetch('OPENAI_ACCESS_TOKEN')
77
- config.organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID')
78
- config.request_timeout = 300
79
- config.log_errors = true
133
+ config.access_token = ENV.fetch('OPENAI_ACCESS_TOKEN')
134
+ config.organization_id = ENV.fetch('OPENAI_ORGANIZATION_ID')
135
+ config.request_timeout = 300
136
+ config.log_errors = true
80
137
  end
81
138
  end
82
139
  end
metadata CHANGED
@@ -1,11 +1,10 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openaiext
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.8
4
+ version: 0.0.9
5
5
  platform: ruby
6
6
  authors:
7
7
  - Gedean Dias
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
10
  date: 2024-11-07 00:00:00.000000000 Z
@@ -46,7 +45,6 @@ extra_rdoc_files: []
46
45
  files:
47
46
  - README.md
48
47
  - lib/openaiext.rb
49
- - lib/openaiext/agent.rb
50
48
  - lib/openaiext/messages.rb
51
49
  - lib/openaiext/model.rb
52
50
  - lib/openaiext/response_extender.rb
@@ -54,7 +52,6 @@ homepage: https://github.com/gedean/openaiext
54
52
  licenses:
55
53
  - MIT
56
54
  metadata: {}
57
- post_install_message:
58
55
  rdoc_options: []
59
56
  require_paths:
60
57
  - lib
@@ -69,8 +66,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
69
66
  - !ruby/object:Gem::Version
70
67
  version: '0'
71
68
  requirements: []
72
- rubygems_version: 3.5.23
73
- signing_key:
69
+ rubygems_version: 3.6.3
74
70
  specification_version: 4
75
71
  summary: Ruby OpenAI Extended
76
72
  test_files: []
@@ -1,64 +0,0 @@
1
- module OpenAIExt
2
- class Agent
3
- extend OpenAI
4
-
5
- attr_reader :assistant, :thread, :instructions, :vector_store_id
6
-
7
- def initialize(assistant_id: nil, thread_id: nil, thread_instructions: nil, vector_store_id: nil)
8
- @openai_client = OpenAI::Client.new
9
-
10
- assistant_id ||= ENV.fetch('OPENAI_ASSISTANT_ID')
11
- @assistant = @openai_client.assistants.retrieve(id: assistant_id)
12
-
13
- thread_params = {}
14
-
15
- # Only one vector store can be attached, according to the OpenAI API documentation
16
- @vector_store_id = vector_store_id
17
- thread_params = { tool_resources: { file_search: { vector_store_ids: [vector_store_id] } } } if @vector_store_id
18
-
19
- thread_id ||= @openai_client.threads.create(parameters: thread_params)['id']
20
- @thread = @openai_client.threads.retrieve(id: thread_id)
21
-
22
- @instructions = thread_instructions || @assistant['instructions']
23
- end
24
-
25
- def add_message(text, role: 'user') = @openai_client.messages.create(thread_id: @thread['id'], parameters: { role: role, content: text })
26
- def messages = @openai_client.messages.list(thread_id: @thread['id'])
27
- def last_message = messages['data'].first['content'].first['text']['value']
28
- def runs = @openai_client.runs.list(thread_id: @thread['id'])
29
-
30
- def run(instructions: nil, additional_instructions: nil, additional_message: nil, model: nil, tool_choice: nil)
31
- params = { assistant_id: @assistant['id'] }
32
-
33
- params[:instructions] = instructions || @instructions
34
- params[:additional_instructions] = additional_instructions unless additional_instructions.nil?
35
- params[:tool_choice] = tool_choice unless tool_choice.nil?
36
-
37
- params[:additional_messages] = [{ role: :user, content: additional_message }] unless additional_message.nil?
38
-
39
- params[:model] = OpenAIExt::Model.select(model) || @assistant['model']
40
-
41
- run_id = @openai_client.runs.create(thread_id: @thread['id'], parameters: params)['id']
42
-
43
- loop do
44
- response = @openai_client.runs.retrieve(id: run_id, thread_id: @thread['id'])
45
-
46
- case response['status']
47
- when 'queued', 'in_progress', 'cancelling'
48
- puts 'Status: Waiting AI Processing finish'
49
- sleep 1
50
- when 'completed'
51
- puts last_message
52
- break
53
- when 'requires_action'
54
- # Handle tool calls (see below)
55
- when 'cancelled', 'failed', 'expired'
56
- puts response['last_error'].inspect
57
- break # or `exit`
58
- else
59
- puts "Unknown status response: #{status}"
60
- end
61
- end
62
- end
63
- end
64
- end