openai 0.4.1 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +27 -0
- data/README.md +1 -1
- data/lib/openai/internal/util.rb +5 -1
- data/lib/openai/models/audio/transcription_text_delta_event.rb +3 -3
- data/lib/openai/models/audio/transcription_text_done_event.rb +3 -3
- data/lib/openai/models/chat/chat_completion.rb +4 -4
- data/lib/openai/models/chat/chat_completion_chunk.rb +4 -4
- data/lib/openai/models/chat/completion_create_params.rb +4 -4
- data/lib/openai/models/fine_tuning/alpha/grader_run_params.rb +17 -30
- data/lib/openai/models/fine_tuning/fine_tuning_job.rb +3 -5
- data/lib/openai/models/graders/multi_grader.rb +11 -4
- data/lib/openai/models/image_edit_params.rb +2 -2
- data/lib/openai/models/responses/response.rb +4 -4
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +5 -5
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +5 -5
- data/lib/openai/models/responses/response_create_params.rb +6 -4
- data/lib/openai/models/responses/response_includable.rb +3 -0
- data/lib/openai/models/responses/response_output_text.rb +120 -4
- data/lib/openai/models/responses/response_retrieve_params.rb +11 -1
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/resources/beta/threads/messages.rb +11 -0
- data/lib/openai/resources/beta/threads/runs/steps.rb +5 -0
- data/lib/openai/resources/beta/threads/runs.rb +17 -0
- data/lib/openai/resources/beta/threads.rb +15 -2
- data/lib/openai/resources/containers/files/content.rb +3 -2
- data/lib/openai/resources/fine_tuning/alpha/graders.rb +6 -3
- data/lib/openai/resources/responses.rb +49 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +3 -1
- data/rbi/openai/models/audio/transcription_text_delta_event.rbi +4 -4
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +4 -4
- data/rbi/openai/models/chat/chat_completion.rbi +6 -6
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -6
- data/rbi/openai/models/chat/completion_create_params.rbi +6 -6
- data/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi +24 -43
- data/rbi/openai/models/fine_tuning/fine_tuning_job.rbi +2 -3
- data/rbi/openai/models/graders/multi_grader.rbi +27 -32
- data/rbi/openai/models/image_edit_params.rbi +3 -3
- data/rbi/openai/models/responses/response.rbi +6 -6
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +3 -3
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +3 -3
- data/rbi/openai/models/responses/response_create_params.rbi +10 -6
- data/rbi/openai/models/responses/response_includable.rbi +7 -0
- data/rbi/openai/models/responses/response_output_text.rbi +189 -1
- data/rbi/openai/models/responses/response_retrieve_params.rbi +11 -0
- data/rbi/openai/resources/chat/completions.rbi +4 -4
- data/rbi/openai/resources/containers/files/content.rbi +1 -1
- data/rbi/openai/resources/fine_tuning/alpha/graders.rbi +10 -5
- data/rbi/openai/resources/images.rbi +1 -1
- data/rbi/openai/resources/responses.rbi +49 -5
- data/sig/openai/models/audio/transcription_text_delta_event.rbs +10 -5
- data/sig/openai/models/audio/transcription_text_done_event.rbs +10 -5
- data/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs +6 -14
- data/sig/openai/models/fine_tuning/fine_tuning_job.rbs +1 -1
- data/sig/openai/models/graders/multi_grader.rbs +7 -7
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +4 -4
- data/sig/openai/models/responses/response_includable.rbs +2 -0
- data/sig/openai/models/responses/response_output_text.rbs +104 -2
- data/sig/openai/models/responses/response_retrieve_params.rbs +10 -1
- data/sig/openai/resources/containers/files/content.rbs +1 -1
- data/sig/openai/resources/fine_tuning/alpha/graders.rbs +1 -1
- data/sig/openai/resources/responses.rbs +9 -1
- metadata +2 -2
@@ -31,9 +31,17 @@ module OpenAI
|
|
31
31
|
end
|
32
32
|
attr_writer :include
|
33
33
|
|
34
|
+
# The sequence number of the event after which to start streaming.
|
35
|
+
sig { returns(T.nilable(Integer)) }
|
36
|
+
attr_reader :starting_after
|
37
|
+
|
38
|
+
sig { params(starting_after: Integer).void }
|
39
|
+
attr_writer :starting_after
|
40
|
+
|
34
41
|
sig do
|
35
42
|
params(
|
36
43
|
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
44
|
+
starting_after: Integer,
|
37
45
|
request_options: OpenAI::RequestOptions::OrHash
|
38
46
|
).returns(T.attached_class)
|
39
47
|
end
|
@@ -41,6 +49,8 @@ module OpenAI
|
|
41
49
|
# Additional fields to include in the response. See the `include` parameter for
|
42
50
|
# Response creation above for more information.
|
43
51
|
include: nil,
|
52
|
+
# The sequence number of the event after which to start streaming.
|
53
|
+
starting_after: nil,
|
44
54
|
request_options: {}
|
45
55
|
)
|
46
56
|
end
|
@@ -50,6 +60,7 @@ module OpenAI
|
|
50
60
|
{
|
51
61
|
include:
|
52
62
|
T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
63
|
+
starting_after: Integer,
|
53
64
|
request_options: OpenAI::RequestOptions
|
54
65
|
}
|
55
66
|
)
|
@@ -235,9 +235,9 @@ module OpenAI
|
|
235
235
|
# utilize scale tier credits until they are exhausted.
|
236
236
|
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
|
237
237
|
# be processed using the default service tier with a lower uptime SLA and no
|
238
|
-
# latency
|
238
|
+
# latency guarantee.
|
239
239
|
# - If set to 'default', the request will be processed using the default service
|
240
|
-
# tier with a lower uptime SLA and no latency
|
240
|
+
# tier with a lower uptime SLA and no latency guarantee.
|
241
241
|
# - If set to 'flex', the request will be processed with the Flex Processing
|
242
242
|
# service tier.
|
243
243
|
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
@@ -522,9 +522,9 @@ module OpenAI
|
|
522
522
|
# utilize scale tier credits until they are exhausted.
|
523
523
|
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
|
524
524
|
# be processed using the default service tier with a lower uptime SLA and no
|
525
|
-
# latency
|
525
|
+
# latency guarantee.
|
526
526
|
# - If set to 'default', the request will be processed using the default service
|
527
|
-
# tier with a lower uptime SLA and no latency
|
527
|
+
# tier with a lower uptime SLA and no latency guarantee.
|
528
528
|
# - If set to 'flex', the request will be processed with the Flex Processing
|
529
529
|
# service tier.
|
530
530
|
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
@@ -17,18 +17,23 @@ module OpenAI
|
|
17
17
|
OpenAI::Graders::MultiGrader::OrHash
|
18
18
|
),
|
19
19
|
model_sample: String,
|
20
|
-
|
21
|
-
OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants,
|
20
|
+
item: T.anything,
|
22
21
|
request_options: OpenAI::RequestOptions::OrHash
|
23
22
|
).returns(OpenAI::Models::FineTuning::Alpha::GraderRunResponse)
|
24
23
|
end
|
25
24
|
def run(
|
26
25
|
# The grader used for the fine-tuning job.
|
27
26
|
grader:,
|
28
|
-
# The model sample to be evaluated.
|
27
|
+
# The model sample to be evaluated. This value will be used to populate the
|
28
|
+
# `sample` namespace. See
|
29
|
+
# [the guide](https://platform.openai.com/docs/guides/graders) for more details.
|
30
|
+
# The `output_json` variable will be populated if the model sample is a valid JSON
|
31
|
+
# string.
|
29
32
|
model_sample:,
|
30
|
-
# The
|
31
|
-
|
33
|
+
# The dataset item provided to the grader. This will be used to populate the
|
34
|
+
# `item` namespace. See
|
35
|
+
# [the guide](https://platform.openai.com/docs/guides/graders) for more details.
|
36
|
+
item: nil,
|
32
37
|
request_options: {}
|
33
38
|
)
|
34
39
|
end
|
@@ -64,7 +64,7 @@ module OpenAI
|
|
64
64
|
# The image(s) to edit. Must be a supported image file or an array of images.
|
65
65
|
#
|
66
66
|
# For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
67
|
-
#
|
67
|
+
# 50MB. You can provide up to 16 images.
|
68
68
|
#
|
69
69
|
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
70
70
|
# file less than 4MB.
|
@@ -112,6 +112,8 @@ module OpenAI
|
|
112
112
|
# multi-turn conversations when using the Responses API statelessly (like when
|
113
113
|
# the `store` parameter is set to `false`, or when an organization is enrolled
|
114
114
|
# in the zero data retention program).
|
115
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
116
|
+
# in code interpreter tool call items.
|
115
117
|
include: nil,
|
116
118
|
# Inserts a system (or developer) message as the first item in the model's
|
117
119
|
# context.
|
@@ -149,9 +151,9 @@ module OpenAI
|
|
149
151
|
# utilize scale tier credits until they are exhausted.
|
150
152
|
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
|
151
153
|
# be processed using the default service tier with a lower uptime SLA and no
|
152
|
-
# latency
|
154
|
+
# latency guarantee.
|
153
155
|
# - If set to 'default', the request will be processed using the default service
|
154
|
-
# tier with a lower uptime SLA and no latency
|
156
|
+
# tier with a lower uptime SLA and no latency guarantee.
|
155
157
|
# - If set to 'flex', the request will be processed with the Flex Processing
|
156
158
|
# service tier.
|
157
159
|
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
@@ -323,6 +325,8 @@ module OpenAI
|
|
323
325
|
# multi-turn conversations when using the Responses API statelessly (like when
|
324
326
|
# the `store` parameter is set to `false`, or when an organization is enrolled
|
325
327
|
# in the zero data retention program).
|
328
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
329
|
+
# in code interpreter tool call items.
|
326
330
|
include: nil,
|
327
331
|
# Inserts a system (or developer) message as the first item in the model's
|
328
332
|
# context.
|
@@ -360,9 +364,9 @@ module OpenAI
|
|
360
364
|
# utilize scale tier credits until they are exhausted.
|
361
365
|
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
|
362
366
|
# be processed using the default service tier with a lower uptime SLA and no
|
363
|
-
# latency
|
367
|
+
# latency guarantee.
|
364
368
|
# - If set to 'default', the request will be processed using the default service
|
365
|
-
# tier with a lower uptime SLA and no latency
|
369
|
+
# tier with a lower uptime SLA and no latency guarantee.
|
366
370
|
# - If set to 'flex', the request will be processed with the Flex Processing
|
367
371
|
# service tier.
|
368
372
|
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
@@ -428,11 +432,15 @@ module OpenAI
|
|
428
432
|
)
|
429
433
|
end
|
430
434
|
|
435
|
+
# See {OpenAI::Resources::Responses#retrieve_streaming} for streaming counterpart.
|
436
|
+
#
|
431
437
|
# Retrieves a model response with the given ID.
|
432
438
|
sig do
|
433
439
|
params(
|
434
440
|
response_id: String,
|
435
441
|
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
442
|
+
starting_after: Integer,
|
443
|
+
stream: T.noreturn,
|
436
444
|
request_options: OpenAI::RequestOptions::OrHash
|
437
445
|
).returns(OpenAI::Responses::Response)
|
438
446
|
end
|
@@ -442,6 +450,42 @@ module OpenAI
|
|
442
450
|
# Additional fields to include in the response. See the `include` parameter for
|
443
451
|
# Response creation above for more information.
|
444
452
|
include: nil,
|
453
|
+
# The sequence number of the event after which to start streaming.
|
454
|
+
starting_after: nil,
|
455
|
+
# There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or
|
456
|
+
# `#retrieve` for streaming and non-streaming use cases, respectively.
|
457
|
+
stream: false,
|
458
|
+
request_options: {}
|
459
|
+
)
|
460
|
+
end
|
461
|
+
|
462
|
+
# See {OpenAI::Resources::Responses#retrieve} for non-streaming counterpart.
|
463
|
+
#
|
464
|
+
# Retrieves a model response with the given ID.
|
465
|
+
sig do
|
466
|
+
params(
|
467
|
+
response_id: String,
|
468
|
+
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
469
|
+
starting_after: Integer,
|
470
|
+
stream: T.noreturn,
|
471
|
+
request_options: OpenAI::RequestOptions::OrHash
|
472
|
+
).returns(
|
473
|
+
OpenAI::Internal::Stream[
|
474
|
+
OpenAI::Responses::ResponseStreamEvent::Variants
|
475
|
+
]
|
476
|
+
)
|
477
|
+
end
|
478
|
+
def retrieve_streaming(
|
479
|
+
# The ID of the response to retrieve.
|
480
|
+
response_id,
|
481
|
+
# Additional fields to include in the response. See the `include` parameter for
|
482
|
+
# Response creation above for more information.
|
483
|
+
include: nil,
|
484
|
+
# The sequence number of the event after which to start streaming.
|
485
|
+
starting_after: nil,
|
486
|
+
# There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or
|
487
|
+
# `#retrieve` for streaming and non-streaming use cases, respectively.
|
488
|
+
stream: true,
|
445
489
|
request_options: {}
|
446
490
|
)
|
447
491
|
end
|
@@ -467,7 +511,7 @@ module OpenAI
|
|
467
511
|
params(
|
468
512
|
response_id: String,
|
469
513
|
request_options: OpenAI::RequestOptions::OrHash
|
470
|
-
).
|
514
|
+
).returns(OpenAI::Responses::Response)
|
471
515
|
end
|
472
516
|
def cancel(
|
473
517
|
# The ID of the response to cancel.
|
@@ -31,16 +31,17 @@ module OpenAI
|
|
31
31
|
logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob]
|
32
32
|
}
|
33
33
|
|
34
|
-
type logprob =
|
34
|
+
type logprob =
|
35
|
+
{ token: String, bytes: ::Array[Integer], logprob: Float }
|
35
36
|
|
36
37
|
class Logprob < OpenAI::Internal::Type::BaseModel
|
37
38
|
attr_reader token: String?
|
38
39
|
|
39
40
|
def token=: (String) -> String
|
40
41
|
|
41
|
-
attr_reader bytes: ::Array[
|
42
|
+
attr_reader bytes: ::Array[Integer]?
|
42
43
|
|
43
|
-
def bytes=: (::Array[
|
44
|
+
def bytes=: (::Array[Integer]) -> ::Array[Integer]
|
44
45
|
|
45
46
|
attr_reader logprob: Float?
|
46
47
|
|
@@ -48,11 +49,15 @@ module OpenAI
|
|
48
49
|
|
49
50
|
def initialize: (
|
50
51
|
?token: String,
|
51
|
-
?bytes: ::Array[
|
52
|
+
?bytes: ::Array[Integer],
|
52
53
|
?logprob: Float
|
53
54
|
) -> void
|
54
55
|
|
55
|
-
def to_hash: -> {
|
56
|
+
def to_hash: -> {
|
57
|
+
token: String,
|
58
|
+
bytes: ::Array[Integer],
|
59
|
+
logprob: Float
|
60
|
+
}
|
56
61
|
end
|
57
62
|
end
|
58
63
|
end
|
@@ -31,16 +31,17 @@ module OpenAI
|
|
31
31
|
logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
|
32
32
|
}
|
33
33
|
|
34
|
-
type logprob =
|
34
|
+
type logprob =
|
35
|
+
{ token: String, bytes: ::Array[Integer], logprob: Float }
|
35
36
|
|
36
37
|
class Logprob < OpenAI::Internal::Type::BaseModel
|
37
38
|
attr_reader token: String?
|
38
39
|
|
39
40
|
def token=: (String) -> String
|
40
41
|
|
41
|
-
attr_reader bytes: ::Array[
|
42
|
+
attr_reader bytes: ::Array[Integer]?
|
42
43
|
|
43
|
-
def bytes=: (::Array[
|
44
|
+
def bytes=: (::Array[Integer]) -> ::Array[Integer]
|
44
45
|
|
45
46
|
attr_reader logprob: Float?
|
46
47
|
|
@@ -48,11 +49,15 @@ module OpenAI
|
|
48
49
|
|
49
50
|
def initialize: (
|
50
51
|
?token: String,
|
51
|
-
?bytes: ::Array[
|
52
|
+
?bytes: ::Array[Integer],
|
52
53
|
?logprob: Float
|
53
54
|
) -> void
|
54
55
|
|
55
|
-
def to_hash: -> {
|
56
|
+
def to_hash: -> {
|
57
|
+
token: String,
|
58
|
+
bytes: ::Array[Integer],
|
59
|
+
logprob: Float
|
60
|
+
}
|
56
61
|
end
|
57
62
|
end
|
58
63
|
end
|
@@ -6,7 +6,7 @@ module OpenAI
|
|
6
6
|
{
|
7
7
|
grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
|
8
8
|
model_sample: String,
|
9
|
-
|
9
|
+
item: top
|
10
10
|
}
|
11
11
|
& OpenAI::Internal::Type::request_parameters
|
12
12
|
|
@@ -18,19 +18,21 @@ module OpenAI
|
|
18
18
|
|
19
19
|
attr_accessor model_sample: String
|
20
20
|
|
21
|
-
|
21
|
+
attr_reader item: top?
|
22
|
+
|
23
|
+
def item=: (top) -> top
|
22
24
|
|
23
25
|
def initialize: (
|
24
26
|
grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
|
25
27
|
model_sample: String,
|
26
|
-
|
28
|
+
?item: top,
|
27
29
|
?request_options: OpenAI::request_opts
|
28
30
|
) -> void
|
29
31
|
|
30
32
|
def to_hash: -> {
|
31
33
|
grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
|
32
34
|
model_sample: String,
|
33
|
-
|
35
|
+
item: top,
|
34
36
|
request_options: OpenAI::RequestOptions
|
35
37
|
}
|
36
38
|
|
@@ -46,16 +48,6 @@ module OpenAI
|
|
46
48
|
|
47
49
|
def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader]
|
48
50
|
end
|
49
|
-
|
50
|
-
type reference_answer = String | top | ::Array[top] | Float
|
51
|
-
|
52
|
-
module ReferenceAnswer
|
53
|
-
extend OpenAI::Internal::Type::Union
|
54
|
-
|
55
|
-
def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer]
|
56
|
-
|
57
|
-
UnionMember2Array: OpenAI::Internal::Type::Converter
|
58
|
-
end
|
59
51
|
end
|
60
52
|
end
|
61
53
|
end
|
@@ -165,7 +165,7 @@ module OpenAI
|
|
165
165
|
n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs
|
166
166
|
}
|
167
167
|
|
168
|
-
type batch_size =
|
168
|
+
type batch_size = :auto | Integer
|
169
169
|
|
170
170
|
module BatchSize
|
171
171
|
extend OpenAI::Internal::Type::Union
|
@@ -6,7 +6,7 @@ module OpenAI
|
|
6
6
|
type multi_grader =
|
7
7
|
{
|
8
8
|
calculate_output: String,
|
9
|
-
graders:
|
9
|
+
graders: OpenAI::Models::Graders::MultiGrader::graders,
|
10
10
|
name: String,
|
11
11
|
type: :multi
|
12
12
|
}
|
@@ -14,7 +14,7 @@ module OpenAI
|
|
14
14
|
class MultiGrader < OpenAI::Internal::Type::BaseModel
|
15
15
|
attr_accessor calculate_output: String
|
16
16
|
|
17
|
-
attr_accessor graders:
|
17
|
+
attr_accessor graders: OpenAI::Models::Graders::MultiGrader::graders
|
18
18
|
|
19
19
|
attr_accessor name: String
|
20
20
|
|
@@ -22,29 +22,29 @@ module OpenAI
|
|
22
22
|
|
23
23
|
def initialize: (
|
24
24
|
calculate_output: String,
|
25
|
-
graders:
|
25
|
+
graders: OpenAI::Models::Graders::MultiGrader::graders,
|
26
26
|
name: String,
|
27
27
|
?type: :multi
|
28
28
|
) -> void
|
29
29
|
|
30
30
|
def to_hash: -> {
|
31
31
|
calculate_output: String,
|
32
|
-
graders:
|
32
|
+
graders: OpenAI::Models::Graders::MultiGrader::graders,
|
33
33
|
name: String,
|
34
34
|
type: :multi
|
35
35
|
}
|
36
36
|
|
37
|
-
type
|
37
|
+
type graders =
|
38
38
|
OpenAI::Graders::StringCheckGrader
|
39
39
|
| OpenAI::Graders::TextSimilarityGrader
|
40
40
|
| OpenAI::Graders::PythonGrader
|
41
41
|
| OpenAI::Graders::ScoreModelGrader
|
42
42
|
| OpenAI::Graders::LabelModelGrader
|
43
43
|
|
44
|
-
module
|
44
|
+
module Graders
|
45
45
|
extend OpenAI::Internal::Type::Union
|
46
46
|
|
47
|
-
def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::
|
47
|
+
def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::graders]
|
48
48
|
end
|
49
49
|
end
|
50
50
|
end
|
@@ -6,7 +6,7 @@ module OpenAI
|
|
6
6
|
delta: String,
|
7
7
|
output_index: Integer,
|
8
8
|
sequence_number: Integer,
|
9
|
-
type: :"response.
|
9
|
+
type: :"response.code_interpreter_call_code.delta"
|
10
10
|
}
|
11
11
|
|
12
12
|
class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel
|
@@ -16,20 +16,20 @@ module OpenAI
|
|
16
16
|
|
17
17
|
attr_accessor sequence_number: Integer
|
18
18
|
|
19
|
-
attr_accessor type: :"response.
|
19
|
+
attr_accessor type: :"response.code_interpreter_call_code.delta"
|
20
20
|
|
21
21
|
def initialize: (
|
22
22
|
delta: String,
|
23
23
|
output_index: Integer,
|
24
24
|
sequence_number: Integer,
|
25
|
-
?type: :"response.
|
25
|
+
?type: :"response.code_interpreter_call_code.delta"
|
26
26
|
) -> void
|
27
27
|
|
28
28
|
def to_hash: -> {
|
29
29
|
delta: String,
|
30
30
|
output_index: Integer,
|
31
31
|
sequence_number: Integer,
|
32
|
-
type: :"response.
|
32
|
+
type: :"response.code_interpreter_call_code.delta"
|
33
33
|
}
|
34
34
|
end
|
35
35
|
end
|
@@ -6,7 +6,7 @@ module OpenAI
|
|
6
6
|
code: String,
|
7
7
|
output_index: Integer,
|
8
8
|
sequence_number: Integer,
|
9
|
-
type: :"response.
|
9
|
+
type: :"response.code_interpreter_call_code.done"
|
10
10
|
}
|
11
11
|
|
12
12
|
class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel
|
@@ -16,20 +16,20 @@ module OpenAI
|
|
16
16
|
|
17
17
|
attr_accessor sequence_number: Integer
|
18
18
|
|
19
|
-
attr_accessor type: :"response.
|
19
|
+
attr_accessor type: :"response.code_interpreter_call_code.done"
|
20
20
|
|
21
21
|
def initialize: (
|
22
22
|
code: String,
|
23
23
|
output_index: Integer,
|
24
24
|
sequence_number: Integer,
|
25
|
-
?type: :"response.
|
25
|
+
?type: :"response.code_interpreter_call_code.done"
|
26
26
|
) -> void
|
27
27
|
|
28
28
|
def to_hash: -> {
|
29
29
|
code: String,
|
30
30
|
output_index: Integer,
|
31
31
|
sequence_number: Integer,
|
32
|
-
type: :"response.
|
32
|
+
type: :"response.code_interpreter_call_code.done"
|
33
33
|
}
|
34
34
|
end
|
35
35
|
end
|
@@ -6,6 +6,7 @@ module OpenAI
|
|
6
6
|
| :"message.input_image.image_url"
|
7
7
|
| :"computer_call_output.output.image_url"
|
8
8
|
| :"reasoning.encrypted_content"
|
9
|
+
| :"code_interpreter_call.outputs"
|
9
10
|
|
10
11
|
module ResponseIncludable
|
11
12
|
extend OpenAI::Internal::Type::Enum
|
@@ -14,6 +15,7 @@ module OpenAI
|
|
14
15
|
MESSAGE_INPUT_IMAGE_IMAGE_URL: :"message.input_image.image_url"
|
15
16
|
COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url"
|
16
17
|
REASONING_ENCRYPTED_CONTENT: :"reasoning.encrypted_content"
|
18
|
+
CODE_INTERPRETER_CALL_OUTPUTS: :"code_interpreter_call.outputs"
|
17
19
|
|
18
20
|
def self?.values: -> ::Array[OpenAI::Models::Responses::response_includable]
|
19
21
|
end
|
@@ -5,7 +5,8 @@ module OpenAI
|
|
5
5
|
{
|
6
6
|
annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation],
|
7
7
|
text: String,
|
8
|
-
type: :output_text
|
8
|
+
type: :output_text,
|
9
|
+
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
9
10
|
}
|
10
11
|
|
11
12
|
class ResponseOutputText < OpenAI::Internal::Type::BaseModel
|
@@ -15,21 +16,30 @@ module OpenAI
|
|
15
16
|
|
16
17
|
attr_accessor type: :output_text
|
17
18
|
|
19
|
+
attr_reader logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]?
|
20
|
+
|
21
|
+
def logprobs=: (
|
22
|
+
::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
23
|
+
) -> ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
24
|
+
|
18
25
|
def initialize: (
|
19
26
|
annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation],
|
20
27
|
text: String,
|
28
|
+
?logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob],
|
21
29
|
?type: :output_text
|
22
30
|
) -> void
|
23
31
|
|
24
32
|
def to_hash: -> {
|
25
33
|
annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation],
|
26
34
|
text: String,
|
27
|
-
type: :output_text
|
35
|
+
type: :output_text,
|
36
|
+
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
28
37
|
}
|
29
38
|
|
30
39
|
type annotation =
|
31
40
|
OpenAI::Responses::ResponseOutputText::Annotation::FileCitation
|
32
41
|
| OpenAI::Responses::ResponseOutputText::Annotation::URLCitation
|
42
|
+
| OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation
|
33
43
|
| OpenAI::Responses::ResponseOutputText::Annotation::FilePath
|
34
44
|
|
35
45
|
module Annotation
|
@@ -95,6 +105,43 @@ module OpenAI
|
|
95
105
|
}
|
96
106
|
end
|
97
107
|
|
108
|
+
type container_file_citation =
|
109
|
+
{
|
110
|
+
container_id: String,
|
111
|
+
end_index: Integer,
|
112
|
+
file_id: String,
|
113
|
+
start_index: Integer,
|
114
|
+
type: :container_file_citation
|
115
|
+
}
|
116
|
+
|
117
|
+
class ContainerFileCitation < OpenAI::Internal::Type::BaseModel
|
118
|
+
attr_accessor container_id: String
|
119
|
+
|
120
|
+
attr_accessor end_index: Integer
|
121
|
+
|
122
|
+
attr_accessor file_id: String
|
123
|
+
|
124
|
+
attr_accessor start_index: Integer
|
125
|
+
|
126
|
+
attr_accessor type: :container_file_citation
|
127
|
+
|
128
|
+
def initialize: (
|
129
|
+
container_id: String,
|
130
|
+
end_index: Integer,
|
131
|
+
file_id: String,
|
132
|
+
start_index: Integer,
|
133
|
+
?type: :container_file_citation
|
134
|
+
) -> void
|
135
|
+
|
136
|
+
def to_hash: -> {
|
137
|
+
container_id: String,
|
138
|
+
end_index: Integer,
|
139
|
+
file_id: String,
|
140
|
+
start_index: Integer,
|
141
|
+
type: :container_file_citation
|
142
|
+
}
|
143
|
+
end
|
144
|
+
|
98
145
|
type file_path = { file_id: String, index: Integer, type: :file_path }
|
99
146
|
|
100
147
|
class FilePath < OpenAI::Internal::Type::BaseModel
|
@@ -119,6 +166,61 @@ module OpenAI
|
|
119
166
|
|
120
167
|
def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation]
|
121
168
|
end
|
169
|
+
|
170
|
+
type logprob =
|
171
|
+
{
|
172
|
+
token: String,
|
173
|
+
bytes: ::Array[Integer],
|
174
|
+
logprob: Float,
|
175
|
+
top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
|
176
|
+
}
|
177
|
+
|
178
|
+
class Logprob < OpenAI::Internal::Type::BaseModel
|
179
|
+
attr_accessor token: String
|
180
|
+
|
181
|
+
attr_accessor bytes: ::Array[Integer]
|
182
|
+
|
183
|
+
attr_accessor logprob: Float
|
184
|
+
|
185
|
+
attr_accessor top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
|
186
|
+
|
187
|
+
def initialize: (
|
188
|
+
token: String,
|
189
|
+
bytes: ::Array[Integer],
|
190
|
+
logprob: Float,
|
191
|
+
top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
|
192
|
+
) -> void
|
193
|
+
|
194
|
+
def to_hash: -> {
|
195
|
+
token: String,
|
196
|
+
bytes: ::Array[Integer],
|
197
|
+
logprob: Float,
|
198
|
+
top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
|
199
|
+
}
|
200
|
+
|
201
|
+
type top_logprob =
|
202
|
+
{ token: String, bytes: ::Array[Integer], logprob: Float }
|
203
|
+
|
204
|
+
class TopLogprob < OpenAI::Internal::Type::BaseModel
|
205
|
+
attr_accessor token: String
|
206
|
+
|
207
|
+
attr_accessor bytes: ::Array[Integer]
|
208
|
+
|
209
|
+
attr_accessor logprob: Float
|
210
|
+
|
211
|
+
def initialize: (
|
212
|
+
token: String,
|
213
|
+
bytes: ::Array[Integer],
|
214
|
+
logprob: Float
|
215
|
+
) -> void
|
216
|
+
|
217
|
+
def to_hash: -> {
|
218
|
+
token: String,
|
219
|
+
bytes: ::Array[Integer],
|
220
|
+
logprob: Float
|
221
|
+
}
|
222
|
+
end
|
223
|
+
end
|
122
224
|
end
|
123
225
|
end
|
124
226
|
end
|
@@ -2,7 +2,10 @@ module OpenAI
|
|
2
2
|
module Models
|
3
3
|
module Responses
|
4
4
|
type response_retrieve_params =
|
5
|
-
{
|
5
|
+
{
|
6
|
+
include: ::Array[OpenAI::Models::Responses::response_includable],
|
7
|
+
starting_after: Integer
|
8
|
+
}
|
6
9
|
& OpenAI::Internal::Type::request_parameters
|
7
10
|
|
8
11
|
class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel
|
@@ -15,13 +18,19 @@ module OpenAI
|
|
15
18
|
::Array[OpenAI::Models::Responses::response_includable]
|
16
19
|
) -> ::Array[OpenAI::Models::Responses::response_includable]
|
17
20
|
|
21
|
+
attr_reader starting_after: Integer?
|
22
|
+
|
23
|
+
def starting_after=: (Integer) -> Integer
|
24
|
+
|
18
25
|
def initialize: (
|
19
26
|
?include: ::Array[OpenAI::Models::Responses::response_includable],
|
27
|
+
?starting_after: Integer,
|
20
28
|
?request_options: OpenAI::request_opts
|
21
29
|
) -> void
|
22
30
|
|
23
31
|
def to_hash: -> {
|
24
32
|
include: ::Array[OpenAI::Models::Responses::response_includable],
|
33
|
+
starting_after: Integer,
|
25
34
|
request_options: OpenAI::RequestOptions
|
26
35
|
}
|
27
36
|
end
|