openai 0.14.0 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +18 -0
- data/README.md +3 -3
- data/lib/openai/models/audio/speech_create_params.rb +0 -9
- data/lib/openai/models/chat/chat_completion.rb +2 -2
- data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
- data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
- data/lib/openai/models/chat/completion_create_params.rb +2 -2
- data/lib/openai/models/function_definition.rb +1 -1
- data/lib/openai/models/image_edit_params.rb +4 -1
- data/lib/openai/models/image_generate_params.rb +4 -1
- data/lib/openai/models/images_response.rb +2 -5
- data/lib/openai/models/responses/response.rb +2 -2
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
- data/lib/openai/models/responses/response_create_params.rb +2 -2
- data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
- data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
- data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
- data/lib/openai/models/responses/response_stream_event.rb +1 -7
- data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
- data/lib/openai/models/responses/response_text_done_event.rb +66 -1
- data/lib/openai/resources/images.rb +6 -6
- data/lib/openai/resources/responses.rb +2 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +0 -2
- data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
- data/rbi/openai/models/chat/chat_completion.rbi +3 -3
- data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
- data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
- data/rbi/openai/models/function_definition.rbi +2 -2
- data/rbi/openai/models/image_edit_params.rbi +6 -0
- data/rbi/openai/models/image_generate_params.rbi +6 -0
- data/rbi/openai/models/images_response.rbi +2 -2
- data/rbi/openai/models/responses/response.rbi +3 -3
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
- data/rbi/openai/models/responses/response_create_params.rbi +3 -3
- data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
- data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
- data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
- data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
- data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
- data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
- data/rbi/openai/resources/chat/completions.rbi +2 -2
- data/rbi/openai/resources/images.rbi +22 -10
- data/rbi/openai/resources/responses.rbi +2 -2
- data/sig/openai/models/audio/speech_create_params.rbs +0 -6
- data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
- data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
- data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
- data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
- data/sig/openai/models/responses/response_stream_event.rbs +0 -2
- data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
- data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
- metadata +2 -8
- data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
- data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
- data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
- data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
- data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -12,8 +12,8 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
-
#
|
16
|
-
sig { returns(
|
15
|
+
# A JSON string containing the finalized arguments for the MCP tool call.
|
16
|
+
sig { returns(String) }
|
17
17
|
attr_accessor :arguments
|
18
18
|
|
19
19
|
# The unique identifier of the MCP tool call item being processed.
|
@@ -35,7 +35,7 @@ module OpenAI
|
|
35
35
|
# Emitted when the arguments for an MCP tool call are finalized.
|
36
36
|
sig do
|
37
37
|
params(
|
38
|
-
arguments:
|
38
|
+
arguments: String,
|
39
39
|
item_id: String,
|
40
40
|
output_index: Integer,
|
41
41
|
sequence_number: Integer,
|
@@ -43,7 +43,7 @@ module OpenAI
|
|
43
43
|
).returns(T.attached_class)
|
44
44
|
end
|
45
45
|
def self.new(
|
46
|
-
#
|
46
|
+
# A JSON string containing the finalized arguments for the MCP tool call.
|
47
47
|
arguments:,
|
48
48
|
# The unique identifier of the MCP tool call item being processed.
|
49
49
|
item_id:,
|
@@ -59,7 +59,7 @@ module OpenAI
|
|
59
59
|
sig do
|
60
60
|
override.returns(
|
61
61
|
{
|
62
|
-
arguments:
|
62
|
+
arguments: String,
|
63
63
|
item_id: String,
|
64
64
|
output_index: Integer,
|
65
65
|
sequence_number: Integer,
|
@@ -12,6 +12,14 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
+
# The ID of the MCP tool call item that completed.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
|
+
|
19
|
+
# The index of the output item that completed.
|
20
|
+
sig { returns(Integer) }
|
21
|
+
attr_accessor :output_index
|
22
|
+
|
15
23
|
# The sequence number of this event.
|
16
24
|
sig { returns(Integer) }
|
17
25
|
attr_accessor :sequence_number
|
@@ -22,11 +30,18 @@ module OpenAI
|
|
22
30
|
|
23
31
|
# Emitted when an MCP tool call has completed successfully.
|
24
32
|
sig do
|
25
|
-
params(
|
26
|
-
|
27
|
-
|
33
|
+
params(
|
34
|
+
item_id: String,
|
35
|
+
output_index: Integer,
|
36
|
+
sequence_number: Integer,
|
37
|
+
type: Symbol
|
38
|
+
).returns(T.attached_class)
|
28
39
|
end
|
29
40
|
def self.new(
|
41
|
+
# The ID of the MCP tool call item that completed.
|
42
|
+
item_id:,
|
43
|
+
# The index of the output item that completed.
|
44
|
+
output_index:,
|
30
45
|
# The sequence number of this event.
|
31
46
|
sequence_number:,
|
32
47
|
# The type of the event. Always 'response.mcp_call.completed'.
|
@@ -34,7 +49,16 @@ module OpenAI
|
|
34
49
|
)
|
35
50
|
end
|
36
51
|
|
37
|
-
sig
|
52
|
+
sig do
|
53
|
+
override.returns(
|
54
|
+
{
|
55
|
+
item_id: String,
|
56
|
+
output_index: Integer,
|
57
|
+
sequence_number: Integer,
|
58
|
+
type: Symbol
|
59
|
+
}
|
60
|
+
)
|
61
|
+
end
|
38
62
|
def to_hash
|
39
63
|
end
|
40
64
|
end
|
@@ -12,6 +12,14 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
+
# The ID of the MCP tool call item that failed.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
|
+
|
19
|
+
# The index of the output item that failed.
|
20
|
+
sig { returns(Integer) }
|
21
|
+
attr_accessor :output_index
|
22
|
+
|
15
23
|
# The sequence number of this event.
|
16
24
|
sig { returns(Integer) }
|
17
25
|
attr_accessor :sequence_number
|
@@ -22,11 +30,18 @@ module OpenAI
|
|
22
30
|
|
23
31
|
# Emitted when an MCP tool call has failed.
|
24
32
|
sig do
|
25
|
-
params(
|
26
|
-
|
27
|
-
|
33
|
+
params(
|
34
|
+
item_id: String,
|
35
|
+
output_index: Integer,
|
36
|
+
sequence_number: Integer,
|
37
|
+
type: Symbol
|
38
|
+
).returns(T.attached_class)
|
28
39
|
end
|
29
40
|
def self.new(
|
41
|
+
# The ID of the MCP tool call item that failed.
|
42
|
+
item_id:,
|
43
|
+
# The index of the output item that failed.
|
44
|
+
output_index:,
|
30
45
|
# The sequence number of this event.
|
31
46
|
sequence_number:,
|
32
47
|
# The type of the event. Always 'response.mcp_call.failed'.
|
@@ -34,7 +49,16 @@ module OpenAI
|
|
34
49
|
)
|
35
50
|
end
|
36
51
|
|
37
|
-
sig
|
52
|
+
sig do
|
53
|
+
override.returns(
|
54
|
+
{
|
55
|
+
item_id: String,
|
56
|
+
output_index: Integer,
|
57
|
+
sequence_number: Integer,
|
58
|
+
type: Symbol
|
59
|
+
}
|
60
|
+
)
|
61
|
+
end
|
38
62
|
def to_hash
|
39
63
|
end
|
40
64
|
end
|
@@ -12,6 +12,14 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
+
# The ID of the MCP tool call item that produced this output.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
|
+
|
19
|
+
# The index of the output item that was processed.
|
20
|
+
sig { returns(Integer) }
|
21
|
+
attr_accessor :output_index
|
22
|
+
|
15
23
|
# The sequence number of this event.
|
16
24
|
sig { returns(Integer) }
|
17
25
|
attr_accessor :sequence_number
|
@@ -22,11 +30,18 @@ module OpenAI
|
|
22
30
|
|
23
31
|
# Emitted when the list of available MCP tools has been successfully retrieved.
|
24
32
|
sig do
|
25
|
-
params(
|
26
|
-
|
27
|
-
|
33
|
+
params(
|
34
|
+
item_id: String,
|
35
|
+
output_index: Integer,
|
36
|
+
sequence_number: Integer,
|
37
|
+
type: Symbol
|
38
|
+
).returns(T.attached_class)
|
28
39
|
end
|
29
40
|
def self.new(
|
41
|
+
# The ID of the MCP tool call item that produced this output.
|
42
|
+
item_id:,
|
43
|
+
# The index of the output item that was processed.
|
44
|
+
output_index:,
|
30
45
|
# The sequence number of this event.
|
31
46
|
sequence_number:,
|
32
47
|
# The type of the event. Always 'response.mcp_list_tools.completed'.
|
@@ -34,7 +49,16 @@ module OpenAI
|
|
34
49
|
)
|
35
50
|
end
|
36
51
|
|
37
|
-
sig
|
52
|
+
sig do
|
53
|
+
override.returns(
|
54
|
+
{
|
55
|
+
item_id: String,
|
56
|
+
output_index: Integer,
|
57
|
+
sequence_number: Integer,
|
58
|
+
type: Symbol
|
59
|
+
}
|
60
|
+
)
|
61
|
+
end
|
38
62
|
def to_hash
|
39
63
|
end
|
40
64
|
end
|
@@ -12,6 +12,14 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
+
# The ID of the MCP tool call item that failed.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
|
+
|
19
|
+
# The index of the output item that failed.
|
20
|
+
sig { returns(Integer) }
|
21
|
+
attr_accessor :output_index
|
22
|
+
|
15
23
|
# The sequence number of this event.
|
16
24
|
sig { returns(Integer) }
|
17
25
|
attr_accessor :sequence_number
|
@@ -22,11 +30,18 @@ module OpenAI
|
|
22
30
|
|
23
31
|
# Emitted when the attempt to list available MCP tools has failed.
|
24
32
|
sig do
|
25
|
-
params(
|
26
|
-
|
27
|
-
|
33
|
+
params(
|
34
|
+
item_id: String,
|
35
|
+
output_index: Integer,
|
36
|
+
sequence_number: Integer,
|
37
|
+
type: Symbol
|
38
|
+
).returns(T.attached_class)
|
28
39
|
end
|
29
40
|
def self.new(
|
41
|
+
# The ID of the MCP tool call item that failed.
|
42
|
+
item_id:,
|
43
|
+
# The index of the output item that failed.
|
44
|
+
output_index:,
|
30
45
|
# The sequence number of this event.
|
31
46
|
sequence_number:,
|
32
47
|
# The type of the event. Always 'response.mcp_list_tools.failed'.
|
@@ -34,7 +49,16 @@ module OpenAI
|
|
34
49
|
)
|
35
50
|
end
|
36
51
|
|
37
|
-
sig
|
52
|
+
sig do
|
53
|
+
override.returns(
|
54
|
+
{
|
55
|
+
item_id: String,
|
56
|
+
output_index: Integer,
|
57
|
+
sequence_number: Integer,
|
58
|
+
type: Symbol
|
59
|
+
}
|
60
|
+
)
|
61
|
+
end
|
38
62
|
def to_hash
|
39
63
|
end
|
40
64
|
end
|
@@ -12,6 +12,14 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
+
# The ID of the MCP tool call item that is being processed.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
|
+
|
19
|
+
# The index of the output item that is being processed.
|
20
|
+
sig { returns(Integer) }
|
21
|
+
attr_accessor :output_index
|
22
|
+
|
15
23
|
# The sequence number of this event.
|
16
24
|
sig { returns(Integer) }
|
17
25
|
attr_accessor :sequence_number
|
@@ -23,11 +31,18 @@ module OpenAI
|
|
23
31
|
# Emitted when the system is in the process of retrieving the list of available
|
24
32
|
# MCP tools.
|
25
33
|
sig do
|
26
|
-
params(
|
27
|
-
|
28
|
-
|
34
|
+
params(
|
35
|
+
item_id: String,
|
36
|
+
output_index: Integer,
|
37
|
+
sequence_number: Integer,
|
38
|
+
type: Symbol
|
39
|
+
).returns(T.attached_class)
|
29
40
|
end
|
30
41
|
def self.new(
|
42
|
+
# The ID of the MCP tool call item that is being processed.
|
43
|
+
item_id:,
|
44
|
+
# The index of the output item that is being processed.
|
45
|
+
output_index:,
|
31
46
|
# The sequence number of this event.
|
32
47
|
sequence_number:,
|
33
48
|
# The type of the event. Always 'response.mcp_list_tools.in_progress'.
|
@@ -35,7 +50,16 @@ module OpenAI
|
|
35
50
|
)
|
36
51
|
end
|
37
52
|
|
38
|
-
sig
|
53
|
+
sig do
|
54
|
+
override.returns(
|
55
|
+
{
|
56
|
+
item_id: String,
|
57
|
+
output_index: Integer,
|
58
|
+
sequence_number: Integer,
|
59
|
+
type: Symbol
|
60
|
+
}
|
61
|
+
)
|
62
|
+
end
|
39
63
|
def to_hash
|
40
64
|
end
|
41
65
|
end
|
@@ -59,8 +59,6 @@ module OpenAI
|
|
59
59
|
OpenAI::Responses::ResponseMcpListToolsInProgressEvent,
|
60
60
|
OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent,
|
61
61
|
OpenAI::Responses::ResponseQueuedEvent,
|
62
|
-
OpenAI::Responses::ResponseReasoningDeltaEvent,
|
63
|
-
OpenAI::Responses::ResponseReasoningDoneEvent,
|
64
62
|
OpenAI::Responses::ResponseReasoningSummaryDeltaEvent,
|
65
63
|
OpenAI::Responses::ResponseReasoningSummaryDoneEvent
|
66
64
|
)
|
@@ -24,6 +24,12 @@ module OpenAI
|
|
24
24
|
sig { returns(String) }
|
25
25
|
attr_accessor :item_id
|
26
26
|
|
27
|
+
# The log probabilities of the tokens in the delta.
|
28
|
+
sig do
|
29
|
+
returns(T::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob])
|
30
|
+
end
|
31
|
+
attr_accessor :logprobs
|
32
|
+
|
27
33
|
# The index of the output item that the text delta was added to.
|
28
34
|
sig { returns(Integer) }
|
29
35
|
attr_accessor :output_index
|
@@ -42,6 +48,10 @@ module OpenAI
|
|
42
48
|
content_index: Integer,
|
43
49
|
delta: String,
|
44
50
|
item_id: String,
|
51
|
+
logprobs:
|
52
|
+
T::Array[
|
53
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob::OrHash
|
54
|
+
],
|
45
55
|
output_index: Integer,
|
46
56
|
sequence_number: Integer,
|
47
57
|
type: Symbol
|
@@ -54,6 +64,8 @@ module OpenAI
|
|
54
64
|
delta:,
|
55
65
|
# The ID of the output item that the text delta was added to.
|
56
66
|
item_id:,
|
67
|
+
# The log probabilities of the tokens in the delta.
|
68
|
+
logprobs:,
|
57
69
|
# The index of the output item that the text delta was added to.
|
58
70
|
output_index:,
|
59
71
|
# The sequence number for this event.
|
@@ -69,6 +81,8 @@ module OpenAI
|
|
69
81
|
content_index: Integer,
|
70
82
|
delta: String,
|
71
83
|
item_id: String,
|
84
|
+
logprobs:
|
85
|
+
T::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob],
|
72
86
|
output_index: Integer,
|
73
87
|
sequence_number: Integer,
|
74
88
|
type: Symbol
|
@@ -77,6 +91,123 @@ module OpenAI
|
|
77
91
|
end
|
78
92
|
def to_hash
|
79
93
|
end
|
94
|
+
|
95
|
+
class Logprob < OpenAI::Internal::Type::BaseModel
|
96
|
+
OrHash =
|
97
|
+
T.type_alias do
|
98
|
+
T.any(
|
99
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob,
|
100
|
+
OpenAI::Internal::AnyHash
|
101
|
+
)
|
102
|
+
end
|
103
|
+
|
104
|
+
# A possible text token.
|
105
|
+
sig { returns(String) }
|
106
|
+
attr_accessor :token
|
107
|
+
|
108
|
+
# The log probability of this token.
|
109
|
+
sig { returns(Float) }
|
110
|
+
attr_accessor :logprob
|
111
|
+
|
112
|
+
# The log probability of the top 20 most likely tokens.
|
113
|
+
sig do
|
114
|
+
returns(
|
115
|
+
T.nilable(
|
116
|
+
T::Array[
|
117
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob
|
118
|
+
]
|
119
|
+
)
|
120
|
+
)
|
121
|
+
end
|
122
|
+
attr_reader :top_logprobs
|
123
|
+
|
124
|
+
sig do
|
125
|
+
params(
|
126
|
+
top_logprobs:
|
127
|
+
T::Array[
|
128
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob::OrHash
|
129
|
+
]
|
130
|
+
).void
|
131
|
+
end
|
132
|
+
attr_writer :top_logprobs
|
133
|
+
|
134
|
+
# A logprob is the logarithmic probability that the model assigns to producing a
|
135
|
+
# particular token at a given position in the sequence. Less-negative (higher)
|
136
|
+
# logprob values indicate greater model confidence in that token choice.
|
137
|
+
sig do
|
138
|
+
params(
|
139
|
+
token: String,
|
140
|
+
logprob: Float,
|
141
|
+
top_logprobs:
|
142
|
+
T::Array[
|
143
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob::OrHash
|
144
|
+
]
|
145
|
+
).returns(T.attached_class)
|
146
|
+
end
|
147
|
+
def self.new(
|
148
|
+
# A possible text token.
|
149
|
+
token:,
|
150
|
+
# The log probability of this token.
|
151
|
+
logprob:,
|
152
|
+
# The log probability of the top 20 most likely tokens.
|
153
|
+
top_logprobs: nil
|
154
|
+
)
|
155
|
+
end
|
156
|
+
|
157
|
+
sig do
|
158
|
+
override.returns(
|
159
|
+
{
|
160
|
+
token: String,
|
161
|
+
logprob: Float,
|
162
|
+
top_logprobs:
|
163
|
+
T::Array[
|
164
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob
|
165
|
+
]
|
166
|
+
}
|
167
|
+
)
|
168
|
+
end
|
169
|
+
def to_hash
|
170
|
+
end
|
171
|
+
|
172
|
+
class TopLogprob < OpenAI::Internal::Type::BaseModel
|
173
|
+
OrHash =
|
174
|
+
T.type_alias do
|
175
|
+
T.any(
|
176
|
+
OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob,
|
177
|
+
OpenAI::Internal::AnyHash
|
178
|
+
)
|
179
|
+
end
|
180
|
+
|
181
|
+
# A possible text token.
|
182
|
+
sig { returns(T.nilable(String)) }
|
183
|
+
attr_reader :token
|
184
|
+
|
185
|
+
sig { params(token: String).void }
|
186
|
+
attr_writer :token
|
187
|
+
|
188
|
+
# The log probability of this token.
|
189
|
+
sig { returns(T.nilable(Float)) }
|
190
|
+
attr_reader :logprob
|
191
|
+
|
192
|
+
sig { params(logprob: Float).void }
|
193
|
+
attr_writer :logprob
|
194
|
+
|
195
|
+
sig do
|
196
|
+
params(token: String, logprob: Float).returns(T.attached_class)
|
197
|
+
end
|
198
|
+
def self.new(
|
199
|
+
# A possible text token.
|
200
|
+
token: nil,
|
201
|
+
# The log probability of this token.
|
202
|
+
logprob: nil
|
203
|
+
)
|
204
|
+
end
|
205
|
+
|
206
|
+
sig { override.returns({ token: String, logprob: Float }) }
|
207
|
+
def to_hash
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
80
211
|
end
|
81
212
|
end
|
82
213
|
end
|
@@ -20,6 +20,12 @@ module OpenAI
|
|
20
20
|
sig { returns(String) }
|
21
21
|
attr_accessor :item_id
|
22
22
|
|
23
|
+
# The log probabilities of the tokens in the delta.
|
24
|
+
sig do
|
25
|
+
returns(T::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob])
|
26
|
+
end
|
27
|
+
attr_accessor :logprobs
|
28
|
+
|
23
29
|
# The index of the output item that the text content is finalized.
|
24
30
|
sig { returns(Integer) }
|
25
31
|
attr_accessor :output_index
|
@@ -41,6 +47,10 @@ module OpenAI
|
|
41
47
|
params(
|
42
48
|
content_index: Integer,
|
43
49
|
item_id: String,
|
50
|
+
logprobs:
|
51
|
+
T::Array[
|
52
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob::OrHash
|
53
|
+
],
|
44
54
|
output_index: Integer,
|
45
55
|
sequence_number: Integer,
|
46
56
|
text: String,
|
@@ -52,6 +62,8 @@ module OpenAI
|
|
52
62
|
content_index:,
|
53
63
|
# The ID of the output item that the text content is finalized.
|
54
64
|
item_id:,
|
65
|
+
# The log probabilities of the tokens in the delta.
|
66
|
+
logprobs:,
|
55
67
|
# The index of the output item that the text content is finalized.
|
56
68
|
output_index:,
|
57
69
|
# The sequence number for this event.
|
@@ -68,6 +80,8 @@ module OpenAI
|
|
68
80
|
{
|
69
81
|
content_index: Integer,
|
70
82
|
item_id: String,
|
83
|
+
logprobs:
|
84
|
+
T::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob],
|
71
85
|
output_index: Integer,
|
72
86
|
sequence_number: Integer,
|
73
87
|
text: String,
|
@@ -77,6 +91,123 @@ module OpenAI
|
|
77
91
|
end
|
78
92
|
def to_hash
|
79
93
|
end
|
94
|
+
|
95
|
+
class Logprob < OpenAI::Internal::Type::BaseModel
|
96
|
+
OrHash =
|
97
|
+
T.type_alias do
|
98
|
+
T.any(
|
99
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob,
|
100
|
+
OpenAI::Internal::AnyHash
|
101
|
+
)
|
102
|
+
end
|
103
|
+
|
104
|
+
# A possible text token.
|
105
|
+
sig { returns(String) }
|
106
|
+
attr_accessor :token
|
107
|
+
|
108
|
+
# The log probability of this token.
|
109
|
+
sig { returns(Float) }
|
110
|
+
attr_accessor :logprob
|
111
|
+
|
112
|
+
# The log probability of the top 20 most likely tokens.
|
113
|
+
sig do
|
114
|
+
returns(
|
115
|
+
T.nilable(
|
116
|
+
T::Array[
|
117
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob
|
118
|
+
]
|
119
|
+
)
|
120
|
+
)
|
121
|
+
end
|
122
|
+
attr_reader :top_logprobs
|
123
|
+
|
124
|
+
sig do
|
125
|
+
params(
|
126
|
+
top_logprobs:
|
127
|
+
T::Array[
|
128
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob::OrHash
|
129
|
+
]
|
130
|
+
).void
|
131
|
+
end
|
132
|
+
attr_writer :top_logprobs
|
133
|
+
|
134
|
+
# A logprob is the logarithmic probability that the model assigns to producing a
|
135
|
+
# particular token at a given position in the sequence. Less-negative (higher)
|
136
|
+
# logprob values indicate greater model confidence in that token choice.
|
137
|
+
sig do
|
138
|
+
params(
|
139
|
+
token: String,
|
140
|
+
logprob: Float,
|
141
|
+
top_logprobs:
|
142
|
+
T::Array[
|
143
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob::OrHash
|
144
|
+
]
|
145
|
+
).returns(T.attached_class)
|
146
|
+
end
|
147
|
+
def self.new(
|
148
|
+
# A possible text token.
|
149
|
+
token:,
|
150
|
+
# The log probability of this token.
|
151
|
+
logprob:,
|
152
|
+
# The log probability of the top 20 most likely tokens.
|
153
|
+
top_logprobs: nil
|
154
|
+
)
|
155
|
+
end
|
156
|
+
|
157
|
+
sig do
|
158
|
+
override.returns(
|
159
|
+
{
|
160
|
+
token: String,
|
161
|
+
logprob: Float,
|
162
|
+
top_logprobs:
|
163
|
+
T::Array[
|
164
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob
|
165
|
+
]
|
166
|
+
}
|
167
|
+
)
|
168
|
+
end
|
169
|
+
def to_hash
|
170
|
+
end
|
171
|
+
|
172
|
+
class TopLogprob < OpenAI::Internal::Type::BaseModel
|
173
|
+
OrHash =
|
174
|
+
T.type_alias do
|
175
|
+
T.any(
|
176
|
+
OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob,
|
177
|
+
OpenAI::Internal::AnyHash
|
178
|
+
)
|
179
|
+
end
|
180
|
+
|
181
|
+
# A possible text token.
|
182
|
+
sig { returns(T.nilable(String)) }
|
183
|
+
attr_reader :token
|
184
|
+
|
185
|
+
sig { params(token: String).void }
|
186
|
+
attr_writer :token
|
187
|
+
|
188
|
+
# The log probability of this token.
|
189
|
+
sig { returns(T.nilable(Float)) }
|
190
|
+
attr_reader :logprob
|
191
|
+
|
192
|
+
sig { params(logprob: Float).void }
|
193
|
+
attr_writer :logprob
|
194
|
+
|
195
|
+
sig do
|
196
|
+
params(token: String, logprob: Float).returns(T.attached_class)
|
197
|
+
end
|
198
|
+
def self.new(
|
199
|
+
# A possible text token.
|
200
|
+
token: nil,
|
201
|
+
# The log probability of this token.
|
202
|
+
logprob: nil
|
203
|
+
)
|
204
|
+
end
|
205
|
+
|
206
|
+
sig { override.returns({ token: String, logprob: Float }) }
|
207
|
+
def to_hash
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
80
211
|
end
|
81
212
|
end
|
82
213
|
end
|
@@ -233,7 +233,7 @@ module OpenAI
|
|
233
233
|
# - If set to 'auto', then the request will be processed with the service tier
|
234
234
|
# configured in the Project settings. Unless otherwise configured, the Project
|
235
235
|
# will use 'default'.
|
236
|
-
# - If set to 'default', then the
|
236
|
+
# - If set to 'default', then the request will be processed with the standard
|
237
237
|
# pricing and performance for the selected model.
|
238
238
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
239
239
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -522,7 +522,7 @@ module OpenAI
|
|
522
522
|
# - If set to 'auto', then the request will be processed with the service tier
|
523
523
|
# configured in the Project settings. Unless otherwise configured, the Project
|
524
524
|
# will use 'default'.
|
525
|
-
# - If set to 'default', then the
|
525
|
+
# - If set to 'default', then the request will be processed with the standard
|
526
526
|
# pricing and performance for the selected model.
|
527
527
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
528
528
|
# 'priority', then the request will be processed with the corresponding service
|