openai 0.23.3 → 0.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +17 -0
  3. data/README.md +1 -1
  4. data/lib/openai/errors.rb +25 -11
  5. data/lib/openai/internal/conversation_cursor_page.rb +1 -1
  6. data/lib/openai/internal/cursor_page.rb +1 -1
  7. data/lib/openai/internal/page.rb +1 -1
  8. data/lib/openai/internal/stream.rb +1 -0
  9. data/lib/openai/internal/transport/base_client.rb +11 -7
  10. data/lib/openai/internal/type/base_page.rb +1 -1
  11. data/lib/openai/internal/type/base_stream.rb +9 -1
  12. data/lib/openai/internal/util.rb +1 -1
  13. data/lib/openai/models/conversations/computer_screenshot_content.rb +2 -0
  14. data/lib/openai/models/conversations/conversation_item.rb +1 -0
  15. data/lib/openai/models/conversations/input_file_content.rb +1 -34
  16. data/lib/openai/models/conversations/input_image_content.rb +1 -54
  17. data/lib/openai/models/conversations/input_text_content.rb +1 -18
  18. data/lib/openai/models/conversations/message.rb +43 -8
  19. data/lib/openai/models/conversations/output_text_content.rb +1 -49
  20. data/lib/openai/models/conversations/refusal_content.rb +1 -18
  21. data/lib/openai/models/conversations/summary_text_content.rb +7 -2
  22. data/lib/openai/models/conversations/text_content.rb +2 -0
  23. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +13 -1
  24. data/lib/openai/models/evals/run_cancel_response.rb +13 -1
  25. data/lib/openai/models/evals/run_create_params.rb +13 -1
  26. data/lib/openai/models/evals/run_create_response.rb +13 -1
  27. data/lib/openai/models/evals/run_list_response.rb +13 -1
  28. data/lib/openai/models/evals/run_retrieve_response.rb +13 -1
  29. data/lib/openai/models/evals/runs/output_item_list_response.rb +49 -4
  30. data/lib/openai/models/evals/runs/output_item_retrieve_response.rb +49 -4
  31. data/lib/openai/models/graders/score_model_grader.rb +56 -3
  32. data/lib/openai/models/responses/response_content.rb +25 -1
  33. data/lib/openai/models/responses/response_content_part_added_event.rb +27 -3
  34. data/lib/openai/models/responses/response_content_part_done_event.rb +27 -3
  35. data/lib/openai/models/responses/response_reasoning_item.rb +6 -8
  36. data/lib/openai/version.rb +1 -1
  37. data/lib/openai.rb +5 -10
  38. data/rbi/openai/errors.rbi +29 -2
  39. data/rbi/openai/internal/transport/base_client.rbi +4 -5
  40. data/rbi/openai/internal/type/base_page.rbi +1 -1
  41. data/rbi/openai/internal/type/base_stream.rbi +16 -1
  42. data/rbi/openai/internal/util.rbi +1 -1
  43. data/rbi/openai/models/conversations/computer_screenshot_content.rbi +1 -0
  44. data/rbi/openai/models/conversations/input_file_content.rbi +1 -64
  45. data/rbi/openai/models/conversations/input_image_content.rbi +1 -105
  46. data/rbi/openai/models/conversations/input_text_content.rbi +1 -30
  47. data/rbi/openai/models/conversations/message.rbi +46 -10
  48. data/rbi/openai/models/conversations/output_text_content.rbi +1 -102
  49. data/rbi/openai/models/conversations/refusal_content.rbi +1 -30
  50. data/rbi/openai/models/conversations/summary_text_content.rbi +9 -1
  51. data/rbi/openai/models/conversations/text_content.rbi +1 -0
  52. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +16 -0
  53. data/rbi/openai/models/evals/run_cancel_response.rbi +18 -0
  54. data/rbi/openai/models/evals/run_create_params.rbi +18 -0
  55. data/rbi/openai/models/evals/run_create_response.rbi +18 -0
  56. data/rbi/openai/models/evals/run_list_response.rbi +18 -0
  57. data/rbi/openai/models/evals/run_retrieve_response.rbi +18 -0
  58. data/rbi/openai/models/evals/runs/output_item_list_response.rbi +88 -5
  59. data/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi +88 -5
  60. data/rbi/openai/models/graders/score_model_grader.rbi +88 -4
  61. data/rbi/openai/models/responses/response_content.rbi +34 -1
  62. data/rbi/openai/models/responses/response_content_part_added_event.rbi +36 -2
  63. data/rbi/openai/models/responses/response_content_part_done_event.rbi +36 -2
  64. data/rbi/openai/models/responses/response_reasoning_item.rbi +6 -4
  65. data/sig/openai/errors.rbs +7 -0
  66. data/sig/openai/internal/type/base_stream.rbs +5 -0
  67. data/sig/openai/models/conversations/input_file_content.rbs +1 -35
  68. data/sig/openai/models/conversations/input_image_content.rbs +1 -43
  69. data/sig/openai/models/conversations/input_text_content.rbs +1 -11
  70. data/sig/openai/models/conversations/message.rbs +18 -5
  71. data/sig/openai/models/conversations/output_text_content.rbs +1 -46
  72. data/sig/openai/models/conversations/refusal_content.rbs +1 -11
  73. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +5 -0
  74. data/sig/openai/models/evals/run_cancel_response.rbs +5 -0
  75. data/sig/openai/models/evals/run_create_params.rbs +5 -0
  76. data/sig/openai/models/evals/run_create_response.rbs +5 -0
  77. data/sig/openai/models/evals/run_list_response.rbs +5 -0
  78. data/sig/openai/models/evals/run_retrieve_response.rbs +5 -0
  79. data/sig/openai/models/evals/runs/output_item_list_response.rbs +43 -4
  80. data/sig/openai/models/evals/runs/output_item_retrieve_response.rbs +43 -4
  81. data/sig/openai/models/graders/score_model_grader.rbs +44 -5
  82. data/sig/openai/models/responses/response_content.rbs +13 -0
  83. data/sig/openai/models/responses/response_content_part_added_event.rbs +13 -0
  84. data/sig/openai/models/responses/response_content_part_done_event.rbs +13 -0
  85. metadata +2 -17
  86. data/lib/openai/models/conversations/container_file_citation_body.rb +0 -58
  87. data/lib/openai/models/conversations/file_citation_body.rb +0 -42
  88. data/lib/openai/models/conversations/lob_prob.rb +0 -35
  89. data/lib/openai/models/conversations/top_log_prob.rb +0 -29
  90. data/lib/openai/models/conversations/url_citation_body.rb +0 -50
  91. data/rbi/openai/models/conversations/container_file_citation_body.rbi +0 -82
  92. data/rbi/openai/models/conversations/file_citation_body.rbi +0 -61
  93. data/rbi/openai/models/conversations/lob_prob.rbi +0 -50
  94. data/rbi/openai/models/conversations/top_log_prob.rbi +0 -41
  95. data/rbi/openai/models/conversations/url_citation_body.rbi +0 -74
  96. data/sig/openai/models/conversations/container_file_citation_body.rbs +0 -47
  97. data/sig/openai/models/conversations/file_citation_body.rbs +0 -37
  98. data/sig/openai/models/conversations/lob_prob.rbs +0 -37
  99. data/sig/openai/models/conversations/top_log_prob.rbs +0 -28
  100. data/sig/openai/models/conversations/url_citation_body.rbs +0 -42
@@ -5,18 +5,23 @@ module OpenAI
5
5
  module Conversations
6
6
  class SummaryTextContent < OpenAI::Internal::Type::BaseModel
7
7
  # @!attribute text
8
+ # A summary of the reasoning output from the model so far.
8
9
  #
9
10
  # @return [String]
10
11
  required :text, String
11
12
 
12
13
  # @!attribute type
14
+ # The type of the object. Always `summary_text`.
13
15
  #
14
16
  # @return [Symbol, :summary_text]
15
17
  required :type, const: :summary_text
16
18
 
17
19
  # @!method initialize(text:, type: :summary_text)
18
- # @param text [String]
19
- # @param type [Symbol, :summary_text]
20
+ # A summary text from the model.
21
+ #
22
+ # @param text [String] A summary of the reasoning output from the model so far.
23
+ #
24
+ # @param type [Symbol, :summary_text] The type of the object. Always `summary_text`.
20
25
  end
21
26
  end
22
27
  end
@@ -15,6 +15,8 @@ module OpenAI
15
15
  required :type, const: :text
16
16
 
17
17
  # @!method initialize(text:, type: :text)
18
+ # A text content.
19
+ #
18
20
  # @param text [String]
19
21
  # @param type [Symbol, :text]
20
22
  end
@@ -459,6 +459,16 @@ module OpenAI
459
459
  # @return [Integer, nil]
460
460
  optional :max_completion_tokens, Integer
461
461
 
462
+ # @!attribute reasoning_effort
463
+ # Constrains effort on reasoning for
464
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
465
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
466
+ # effort can result in faster responses and fewer tokens used on reasoning in a
467
+ # response.
468
+ #
469
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
470
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
471
+
462
472
  # @!attribute response_format
463
473
  # An object specifying the format that the model must output.
464
474
  #
@@ -501,13 +511,15 @@ module OpenAI
501
511
  # @return [Float, nil]
502
512
  optional :top_p, Float
503
513
 
504
- # @!method initialize(max_completion_tokens: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil)
514
+ # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil)
505
515
  # Some parameter documentations has been truncated, see
506
516
  # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams} for
507
517
  # more details.
508
518
  #
509
519
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
510
520
  #
521
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
522
+ #
511
523
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
512
524
  #
513
525
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
@@ -654,6 +654,16 @@ module OpenAI
654
654
  # @return [Integer, nil]
655
655
  optional :max_completion_tokens, Integer
656
656
 
657
+ # @!attribute reasoning_effort
658
+ # Constrains effort on reasoning for
659
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
660
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
661
+ # effort can result in faster responses and fewer tokens used on reasoning in a
662
+ # response.
663
+ #
664
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
665
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
666
+
657
667
  # @!attribute seed
658
668
  # A seed value to initialize the randomness, during sampling.
659
669
  #
@@ -702,13 +712,15 @@ module OpenAI
702
712
  # @return [Float, nil]
703
713
  optional :top_p, Float
704
714
 
705
- # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
715
+ # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
706
716
  # Some parameter documentations has been truncated, see
707
717
  # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams}
708
718
  # for more details.
709
719
  #
710
720
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
711
721
  #
722
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
723
+ #
712
724
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
713
725
  #
714
726
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
@@ -582,6 +582,16 @@ module OpenAI
582
582
  # @return [Integer, nil]
583
583
  optional :max_completion_tokens, Integer
584
584
 
585
+ # @!attribute reasoning_effort
586
+ # Constrains effort on reasoning for
587
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
588
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
589
+ # effort can result in faster responses and fewer tokens used on reasoning in a
590
+ # response.
591
+ #
592
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
593
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
594
+
585
595
  # @!attribute seed
586
596
  # A seed value to initialize the randomness, during sampling.
587
597
  #
@@ -630,13 +640,15 @@ module OpenAI
630
640
  # @return [Float, nil]
631
641
  optional :top_p, Float
632
642
 
633
- # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
643
+ # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
634
644
  # Some parameter documentations has been truncated, see
635
645
  # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams}
636
646
  # for more details.
637
647
  #
638
648
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
639
649
  #
650
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
651
+ #
640
652
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
641
653
  #
642
654
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
@@ -654,6 +654,16 @@ module OpenAI
654
654
  # @return [Integer, nil]
655
655
  optional :max_completion_tokens, Integer
656
656
 
657
+ # @!attribute reasoning_effort
658
+ # Constrains effort on reasoning for
659
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
660
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
661
+ # effort can result in faster responses and fewer tokens used on reasoning in a
662
+ # response.
663
+ #
664
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
665
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
666
+
657
667
  # @!attribute seed
658
668
  # A seed value to initialize the randomness, during sampling.
659
669
  #
@@ -702,13 +712,15 @@ module OpenAI
702
712
  # @return [Float, nil]
703
713
  optional :top_p, Float
704
714
 
705
- # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
715
+ # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
706
716
  # Some parameter documentations has been truncated, see
707
717
  # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams}
708
718
  # for more details.
709
719
  #
710
720
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
711
721
  #
722
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
723
+ #
712
724
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
713
725
  #
714
726
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
@@ -654,6 +654,16 @@ module OpenAI
654
654
  # @return [Integer, nil]
655
655
  optional :max_completion_tokens, Integer
656
656
 
657
+ # @!attribute reasoning_effort
658
+ # Constrains effort on reasoning for
659
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
660
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
661
+ # effort can result in faster responses and fewer tokens used on reasoning in a
662
+ # response.
663
+ #
664
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
665
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
666
+
657
667
  # @!attribute seed
658
668
  # A seed value to initialize the randomness, during sampling.
659
669
  #
@@ -701,13 +711,15 @@ module OpenAI
701
711
  # @return [Float, nil]
702
712
  optional :top_p, Float
703
713
 
704
- # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
714
+ # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
705
715
  # Some parameter documentations has been truncated, see
706
716
  # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams}
707
717
  # for more details.
708
718
  #
709
719
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
710
720
  #
721
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
722
+ #
711
723
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
712
724
  #
713
725
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
@@ -658,6 +658,16 @@ module OpenAI
658
658
  # @return [Integer, nil]
659
659
  optional :max_completion_tokens, Integer
660
660
 
661
+ # @!attribute reasoning_effort
662
+ # Constrains effort on reasoning for
663
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
664
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
665
+ # effort can result in faster responses and fewer tokens used on reasoning in a
666
+ # response.
667
+ #
668
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
669
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
670
+
661
671
  # @!attribute seed
662
672
  # A seed value to initialize the randomness, during sampling.
663
673
  #
@@ -706,13 +716,15 @@ module OpenAI
706
716
  # @return [Float, nil]
707
717
  optional :top_p, Float
708
718
 
709
- # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
719
+ # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
710
720
  # Some parameter documentations has been truncated, see
711
721
  # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams}
712
722
  # for more details.
713
723
  #
714
724
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
715
725
  #
726
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
727
+ #
716
728
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
717
729
  #
718
730
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
@@ -43,11 +43,11 @@ module OpenAI
43
43
  required :object, const: :"eval.run.output_item"
44
44
 
45
45
  # @!attribute results
46
- # A list of results from the evaluation run.
46
+ # A list of grader results for this output item.
47
47
  #
48
- # @return [Array<Hash{Symbol=>Object}>]
48
+ # @return [Array<OpenAI::Models::Evals::Runs::OutputItemListResponse::Result>]
49
49
  required :results,
50
- OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]
50
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result] }
51
51
 
52
52
  # @!attribute run_id
53
53
  # The identifier of the evaluation run associated with this output item.
@@ -80,7 +80,7 @@ module OpenAI
80
80
  #
81
81
  # @param eval_id [String] The identifier of the evaluation group.
82
82
  #
83
- # @param results [Array<Hash{Symbol=>Object}>] A list of results from the evaluation run.
83
+ # @param results [Array<OpenAI::Models::Evals::Runs::OutputItemListResponse::Result>] A list of grader results for this output item.
84
84
  #
85
85
  # @param run_id [String] The identifier of the evaluation run associated with this output item.
86
86
  #
@@ -90,6 +90,51 @@ module OpenAI
90
90
  #
91
91
  # @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item".
92
92
 
93
+ class Result < OpenAI::Internal::Type::BaseModel
94
+ # @!attribute name
95
+ # The name of the grader.
96
+ #
97
+ # @return [String]
98
+ required :name, String
99
+
100
+ # @!attribute passed
101
+ # Whether the grader considered the output a pass.
102
+ #
103
+ # @return [Boolean]
104
+ required :passed, OpenAI::Internal::Type::Boolean
105
+
106
+ # @!attribute score
107
+ # The numeric score produced by the grader.
108
+ #
109
+ # @return [Float]
110
+ required :score, Float
111
+
112
+ # @!attribute sample
113
+ # Optional sample or intermediate data produced by the grader.
114
+ #
115
+ # @return [Hash{Symbol=>Object}, nil]
116
+ optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], nil?: true
117
+
118
+ # @!attribute type
119
+ # The grader type (for example, "string-check-grader").
120
+ #
121
+ # @return [String, nil]
122
+ optional :type, String
123
+
124
+ # @!method initialize(name:, passed:, score:, sample: nil, type: nil)
125
+ # A single grader result for an evaluation run output item.
126
+ #
127
+ # @param name [String] The name of the grader.
128
+ #
129
+ # @param passed [Boolean] Whether the grader considered the output a pass.
130
+ #
131
+ # @param score [Float] The numeric score produced by the grader.
132
+ #
133
+ # @param sample [Hash{Symbol=>Object}, nil] Optional sample or intermediate data produced by the grader.
134
+ #
135
+ # @param type [String] The grader type (for example, "string-check-grader").
136
+ end
137
+
93
138
  # @see OpenAI::Models::Evals::Runs::OutputItemListResponse#sample
94
139
  class Sample < OpenAI::Internal::Type::BaseModel
95
140
  # @!attribute error
@@ -43,11 +43,11 @@ module OpenAI
43
43
  required :object, const: :"eval.run.output_item"
44
44
 
45
45
  # @!attribute results
46
- # A list of results from the evaluation run.
46
+ # A list of grader results for this output item.
47
47
  #
48
- # @return [Array<Hash{Symbol=>Object}>]
48
+ # @return [Array<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result>]
49
49
  required :results,
50
- OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]
50
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result] }
51
51
 
52
52
  # @!attribute run_id
53
53
  # The identifier of the evaluation run associated with this output item.
@@ -80,7 +80,7 @@ module OpenAI
80
80
  #
81
81
  # @param eval_id [String] The identifier of the evaluation group.
82
82
  #
83
- # @param results [Array<Hash{Symbol=>Object}>] A list of results from the evaluation run.
83
+ # @param results [Array<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result>] A list of grader results for this output item.
84
84
  #
85
85
  # @param run_id [String] The identifier of the evaluation run associated with this output item.
86
86
  #
@@ -90,6 +90,51 @@ module OpenAI
90
90
  #
91
91
  # @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item".
92
92
 
93
+ class Result < OpenAI::Internal::Type::BaseModel
94
+ # @!attribute name
95
+ # The name of the grader.
96
+ #
97
+ # @return [String]
98
+ required :name, String
99
+
100
+ # @!attribute passed
101
+ # Whether the grader considered the output a pass.
102
+ #
103
+ # @return [Boolean]
104
+ required :passed, OpenAI::Internal::Type::Boolean
105
+
106
+ # @!attribute score
107
+ # The numeric score produced by the grader.
108
+ #
109
+ # @return [Float]
110
+ required :score, Float
111
+
112
+ # @!attribute sample
113
+ # Optional sample or intermediate data produced by the grader.
114
+ #
115
+ # @return [Hash{Symbol=>Object}, nil]
116
+ optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], nil?: true
117
+
118
+ # @!attribute type
119
+ # The grader type (for example, "string-check-grader").
120
+ #
121
+ # @return [String, nil]
122
+ optional :type, String
123
+
124
+ # @!method initialize(name:, passed:, score:, sample: nil, type: nil)
125
+ # A single grader result for an evaluation run output item.
126
+ #
127
+ # @param name [String] The name of the grader.
128
+ #
129
+ # @param passed [Boolean] Whether the grader considered the output a pass.
130
+ #
131
+ # @param score [Float] The numeric score produced by the grader.
132
+ #
133
+ # @param sample [Hash{Symbol=>Object}, nil] Optional sample or intermediate data produced by the grader.
134
+ #
135
+ # @param type [String] The grader type (for example, "string-check-grader").
136
+ end
137
+
93
138
  # @see OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse#sample
94
139
  class Sample < OpenAI::Internal::Type::BaseModel
95
140
  # @!attribute error
@@ -37,8 +37,8 @@ module OpenAI
37
37
  # @!attribute sampling_params
38
38
  # The sampling parameters for the model.
39
39
  #
40
- # @return [Object, nil]
41
- optional :sampling_params, OpenAI::Internal::Type::Unknown
40
+ # @return [OpenAI::Models::Graders::ScoreModelGrader::SamplingParams, nil]
41
+ optional :sampling_params, -> { OpenAI::Graders::ScoreModelGrader::SamplingParams }
42
42
 
43
43
  # @!method initialize(input:, model:, name:, range: nil, sampling_params: nil, type: :score_model)
44
44
  # A ScoreModelGrader object that uses a model to assign a score to the input.
@@ -51,7 +51,7 @@ module OpenAI
51
51
  #
52
52
  # @param range [Array<Float>] The range of the score. Defaults to `[0, 1]`.
53
53
  #
54
- # @param sampling_params [Object] The sampling parameters for the model.
54
+ # @param sampling_params [OpenAI::Models::Graders::ScoreModelGrader::SamplingParams] The sampling parameters for the model.
55
55
  #
56
56
  # @param type [Symbol, :score_model] The object type, which is always `score_model`.
57
57
 
@@ -210,6 +210,59 @@ module OpenAI
210
210
  # @return [Array<Symbol>]
211
211
  end
212
212
  end
213
+
214
+ # @see OpenAI::Models::Graders::ScoreModelGrader#sampling_params
215
+ class SamplingParams < OpenAI::Internal::Type::BaseModel
216
+ # @!attribute max_completions_tokens
217
+ # The maximum number of tokens the grader model may generate in its response.
218
+ #
219
+ # @return [Integer, nil]
220
+ optional :max_completions_tokens, Integer, nil?: true
221
+
222
+ # @!attribute reasoning_effort
223
+ # Constrains effort on reasoning for
224
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
225
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
226
+ # effort can result in faster responses and fewer tokens used on reasoning in a
227
+ # response.
228
+ #
229
+ # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
230
+ optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
231
+
232
+ # @!attribute seed
233
+ # A seed value to initialize the randomness, during sampling.
234
+ #
235
+ # @return [Integer, nil]
236
+ optional :seed, Integer, nil?: true
237
+
238
+ # @!attribute temperature
239
+ # A higher temperature increases randomness in the outputs.
240
+ #
241
+ # @return [Float, nil]
242
+ optional :temperature, Float, nil?: true
243
+
244
+ # @!attribute top_p
245
+ # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
246
+ #
247
+ # @return [Float, nil]
248
+ optional :top_p, Float, nil?: true
249
+
250
+ # @!method initialize(max_completions_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, top_p: nil)
251
+ # Some parameter documentations has been truncated, see
252
+ # {OpenAI::Models::Graders::ScoreModelGrader::SamplingParams} for more details.
253
+ #
254
+ # The sampling parameters for the model.
255
+ #
256
+ # @param max_completions_tokens [Integer, nil] The maximum number of tokens the grader model may generate in its response.
257
+ #
258
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
259
+ #
260
+ # @param seed [Integer, nil] A seed value to initialize the randomness, during sampling.
261
+ #
262
+ # @param temperature [Float, nil] A higher temperature increases randomness in the outputs.
263
+ #
264
+ # @param top_p [Float, nil] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
265
+ end
213
266
  end
214
267
  end
215
268
 
@@ -25,8 +25,32 @@ module OpenAI
25
25
  # A refusal from the model.
26
26
  variant -> { OpenAI::Responses::ResponseOutputRefusal }
27
27
 
28
+ # Reasoning text from the model.
29
+ variant -> { OpenAI::Responses::ResponseContent::ReasoningTextContent }
30
+
31
+ class ReasoningTextContent < OpenAI::Internal::Type::BaseModel
32
+ # @!attribute text
33
+ # The reasoning text from the model.
34
+ #
35
+ # @return [String]
36
+ required :text, String
37
+
38
+ # @!attribute type
39
+ # The type of the reasoning text. Always `reasoning_text`.
40
+ #
41
+ # @return [Symbol, :reasoning_text]
42
+ required :type, const: :reasoning_text
43
+
44
+ # @!method initialize(text:, type: :reasoning_text)
45
+ # Reasoning text from the model.
46
+ #
47
+ # @param text [String] The reasoning text from the model.
48
+ #
49
+ # @param type [Symbol, :reasoning_text] The type of the reasoning text. Always `reasoning_text`.
50
+ end
51
+
28
52
  # @!method self.variants
29
- # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
53
+ # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContent::ReasoningTextContent)]
30
54
  end
31
55
  end
32
56
  end
@@ -25,7 +25,7 @@ module OpenAI
25
25
  # @!attribute part
26
26
  # The content part that was added.
27
27
  #
28
- # @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal]
28
+ # @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContentPartAddedEvent::Part::ReasoningText]
29
29
  required :part, union: -> { OpenAI::Responses::ResponseContentPartAddedEvent::Part }
30
30
 
31
31
  # @!attribute sequence_number
@@ -52,7 +52,7 @@ module OpenAI
52
52
  #
53
53
  # @param output_index [Integer] The index of the output item that the content part was added to.
54
54
  #
55
- # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] The content part that was added.
55
+ # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContentPartAddedEvent::Part::ReasoningText] The content part that was added.
56
56
  #
57
57
  # @param sequence_number [Integer] The sequence number of this event.
58
58
  #
@@ -72,8 +72,32 @@ module OpenAI
72
72
  # A refusal from the model.
73
73
  variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal }
74
74
 
75
+ # Reasoning text from the model.
76
+ variant :reasoning_text, -> { OpenAI::Responses::ResponseContentPartAddedEvent::Part::ReasoningText }
77
+
78
+ class ReasoningText < OpenAI::Internal::Type::BaseModel
79
+ # @!attribute text
80
+ # The reasoning text from the model.
81
+ #
82
+ # @return [String]
83
+ required :text, String
84
+
85
+ # @!attribute type
86
+ # The type of the reasoning text. Always `reasoning_text`.
87
+ #
88
+ # @return [Symbol, :reasoning_text]
89
+ required :type, const: :reasoning_text
90
+
91
+ # @!method initialize(text:, type: :reasoning_text)
92
+ # Reasoning text from the model.
93
+ #
94
+ # @param text [String] The reasoning text from the model.
95
+ #
96
+ # @param type [Symbol, :reasoning_text] The type of the reasoning text. Always `reasoning_text`.
97
+ end
98
+
75
99
  # @!method self.variants
76
- # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
100
+ # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContentPartAddedEvent::Part::ReasoningText)]
77
101
  end
78
102
  end
79
103
  end
@@ -25,7 +25,7 @@ module OpenAI
25
25
  # @!attribute part
26
26
  # The content part that is done.
27
27
  #
28
- # @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal]
28
+ # @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContentPartDoneEvent::Part::ReasoningText]
29
29
  required :part, union: -> { OpenAI::Responses::ResponseContentPartDoneEvent::Part }
30
30
 
31
31
  # @!attribute sequence_number
@@ -52,7 +52,7 @@ module OpenAI
52
52
  #
53
53
  # @param output_index [Integer] The index of the output item that the content part was added to.
54
54
  #
55
- # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] The content part that is done.
55
+ # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContentPartDoneEvent::Part::ReasoningText] The content part that is done.
56
56
  #
57
57
  # @param sequence_number [Integer] The sequence number of this event.
58
58
  #
@@ -72,8 +72,32 @@ module OpenAI
72
72
  # A refusal from the model.
73
73
  variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal }
74
74
 
75
+ # Reasoning text from the model.
76
+ variant :reasoning_text, -> { OpenAI::Responses::ResponseContentPartDoneEvent::Part::ReasoningText }
77
+
78
+ class ReasoningText < OpenAI::Internal::Type::BaseModel
79
+ # @!attribute text
80
+ # The reasoning text from the model.
81
+ #
82
+ # @return [String]
83
+ required :text, String
84
+
85
+ # @!attribute type
86
+ # The type of the reasoning text. Always `reasoning_text`.
87
+ #
88
+ # @return [Symbol, :reasoning_text]
89
+ required :type, const: :reasoning_text
90
+
91
+ # @!method initialize(text:, type: :reasoning_text)
92
+ # Reasoning text from the model.
93
+ #
94
+ # @param text [String] The reasoning text from the model.
95
+ #
96
+ # @param type [Symbol, :reasoning_text] The type of the reasoning text. Always `reasoning_text`.
97
+ end
98
+
75
99
  # @!method self.variants
76
- # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
100
+ # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContentPartDoneEvent::Part::ReasoningText)]
77
101
  end
78
102
  end
79
103
  end
@@ -79,8 +79,7 @@ module OpenAI
79
79
  required :type, const: :summary_text
80
80
 
81
81
  # @!method initialize(text:, type: :summary_text)
82
- # Some parameter documentations has been truncated, see
83
- # {OpenAI::Models::Responses::ResponseReasoningItem::Summary} for more details.
82
+ # A summary text from the model.
84
83
  #
85
84
  # @param text [String] A summary of the reasoning output from the model so far.
86
85
  #
@@ -89,24 +88,23 @@ module OpenAI
89
88
 
90
89
  class Content < OpenAI::Internal::Type::BaseModel
91
90
  # @!attribute text
92
- # Reasoning text output from the model.
91
+ # The reasoning text from the model.
93
92
  #
94
93
  # @return [String]
95
94
  required :text, String
96
95
 
97
96
  # @!attribute type
98
- # The type of the object. Always `reasoning_text`.
97
+ # The type of the reasoning text. Always `reasoning_text`.
99
98
  #
100
99
  # @return [Symbol, :reasoning_text]
101
100
  required :type, const: :reasoning_text
102
101
 
103
102
  # @!method initialize(text:, type: :reasoning_text)
104
- # Some parameter documentations has been truncated, see
105
- # {OpenAI::Models::Responses::ResponseReasoningItem::Content} for more details.
103
+ # Reasoning text from the model.
106
104
  #
107
- # @param text [String] Reasoning text output from the model.
105
+ # @param text [String] The reasoning text from the model.
108
106
  #
109
- # @param type [Symbol, :reasoning_text] The type of the object. Always `reasoning_text`.
107
+ # @param type [Symbol, :reasoning_text] The type of the reasoning text. Always `reasoning_text`.
110
108
  end
111
109
 
112
110
  # The status of the item. One of `in_progress`, `completed`, or `incomplete`.