scale-gp-beta 0.1.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. scale_gp/__init__.py +96 -0
  2. scale_gp/_base_client.py +2058 -0
  3. scale_gp/_client.py +544 -0
  4. scale_gp/_compat.py +219 -0
  5. scale_gp/_constants.py +14 -0
  6. scale_gp/_exceptions.py +108 -0
  7. scale_gp/_files.py +123 -0
  8. scale_gp/_models.py +801 -0
  9. scale_gp/_qs.py +150 -0
  10. scale_gp/_resource.py +43 -0
  11. scale_gp/_response.py +830 -0
  12. scale_gp/_streaming.py +333 -0
  13. scale_gp/_types.py +217 -0
  14. scale_gp/_utils/__init__.py +57 -0
  15. scale_gp/_utils/_logs.py +25 -0
  16. scale_gp/_utils/_proxy.py +62 -0
  17. scale_gp/_utils/_reflection.py +42 -0
  18. scale_gp/_utils/_streams.py +12 -0
  19. scale_gp/_utils/_sync.py +86 -0
  20. scale_gp/_utils/_transform.py +402 -0
  21. scale_gp/_utils/_typing.py +149 -0
  22. scale_gp/_utils/_utils.py +414 -0
  23. scale_gp/_version.py +4 -0
  24. scale_gp/lib/.keep +4 -0
  25. scale_gp/pagination.py +83 -0
  26. scale_gp/py.typed +0 -0
  27. scale_gp/resources/__init__.py +103 -0
  28. scale_gp/resources/chat/__init__.py +33 -0
  29. scale_gp/resources/chat/chat.py +102 -0
  30. scale_gp/resources/chat/completions.py +1054 -0
  31. scale_gp/resources/completions.py +765 -0
  32. scale_gp/resources/files/__init__.py +33 -0
  33. scale_gp/resources/files/content.py +162 -0
  34. scale_gp/resources/files/files.py +558 -0
  35. scale_gp/resources/inference.py +210 -0
  36. scale_gp/resources/models.py +834 -0
  37. scale_gp/resources/question_sets.py +680 -0
  38. scale_gp/resources/questions.py +396 -0
  39. scale_gp/types/__init__.py +33 -0
  40. scale_gp/types/chat/__init__.py +8 -0
  41. scale_gp/types/chat/chat_completion.py +257 -0
  42. scale_gp/types/chat/chat_completion_chunk.py +240 -0
  43. scale_gp/types/chat/completion_create_params.py +156 -0
  44. scale_gp/types/chat/completion_create_response.py +11 -0
  45. scale_gp/types/completion.py +116 -0
  46. scale_gp/types/completion_create_params.py +108 -0
  47. scale_gp/types/file.py +30 -0
  48. scale_gp/types/file_create_params.py +13 -0
  49. scale_gp/types/file_delete_response.py +16 -0
  50. scale_gp/types/file_list.py +27 -0
  51. scale_gp/types/file_list_params.py +16 -0
  52. scale_gp/types/file_update_params.py +12 -0
  53. scale_gp/types/files/__init__.py +3 -0
  54. scale_gp/types/inference_create_params.py +25 -0
  55. scale_gp/types/inference_create_response.py +11 -0
  56. scale_gp/types/inference_model.py +167 -0
  57. scale_gp/types/inference_model_list.py +27 -0
  58. scale_gp/types/inference_response.py +14 -0
  59. scale_gp/types/inference_response_chunk.py +14 -0
  60. scale_gp/types/model_create_params.py +165 -0
  61. scale_gp/types/model_delete_response.py +16 -0
  62. scale_gp/types/model_list_params.py +20 -0
  63. scale_gp/types/model_update_params.py +161 -0
  64. scale_gp/types/question.py +68 -0
  65. scale_gp/types/question_create_params.py +59 -0
  66. scale_gp/types/question_list.py +27 -0
  67. scale_gp/types/question_list_params.py +16 -0
  68. scale_gp/types/question_set.py +106 -0
  69. scale_gp/types/question_set_create_params.py +115 -0
  70. scale_gp/types/question_set_delete_response.py +16 -0
  71. scale_gp/types/question_set_list.py +27 -0
  72. scale_gp/types/question_set_list_params.py +20 -0
  73. scale_gp/types/question_set_retrieve_params.py +12 -0
  74. scale_gp/types/question_set_update_params.py +23 -0
  75. scale_gp_beta-0.1.0a2.dist-info/METADATA +440 -0
  76. scale_gp_beta-0.1.0a2.dist-info/RECORD +78 -0
  77. scale_gp_beta-0.1.0a2.dist-info/WHEEL +4 -0
  78. scale_gp_beta-0.1.0a2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,240 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ import builtins
4
+ from typing import TYPE_CHECKING, List, Optional
5
+ from typing_extensions import Literal
6
+
7
+ from ..._models import BaseModel
8
+
9
+ __all__ = [
10
+ "ChatCompletionChunk",
11
+ "Choice",
12
+ "ChoiceDelta",
13
+ "ChoiceDeltaFunctionCall",
14
+ "ChoiceDeltaToolCall",
15
+ "ChoiceDeltaToolCallFunction",
16
+ "ChoiceLogprobs",
17
+ "ChoiceLogprobsContent",
18
+ "ChoiceLogprobsContentTopLogprob",
19
+ "ChoiceLogprobsRefusal",
20
+ "ChoiceLogprobsRefusalTopLogprob",
21
+ "Usage",
22
+ "UsageCompletionTokensDetails",
23
+ "UsagePromptTokensDetails",
24
+ ]
25
+
26
+
27
+ class ChoiceDeltaFunctionCall(BaseModel):
28
+ arguments: Optional[str] = None
29
+
30
+ name: Optional[str] = None
31
+
32
+ if TYPE_CHECKING:
33
+ # Stub to indicate that arbitrary properties are accepted.
34
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
35
+ # `getattr(obj, '$type')`
36
+ def __getattr__(self, attr: str) -> object: ...
37
+
38
+
39
+ class ChoiceDeltaToolCallFunction(BaseModel):
40
+ arguments: Optional[str] = None
41
+
42
+ name: Optional[str] = None
43
+
44
+ if TYPE_CHECKING:
45
+ # Stub to indicate that arbitrary properties are accepted.
46
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
47
+ # `getattr(obj, '$type')`
48
+ def __getattr__(self, attr: str) -> object: ...
49
+
50
+
51
+ class ChoiceDeltaToolCall(BaseModel):
52
+ index: int
53
+
54
+ id: Optional[str] = None
55
+
56
+ function: Optional[ChoiceDeltaToolCallFunction] = None
57
+
58
+ type: Optional[Literal["function"]] = None
59
+
60
+ if TYPE_CHECKING:
61
+ # Stub to indicate that arbitrary properties are accepted.
62
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
63
+ # `getattr(obj, '$type')`
64
+ def __getattr__(self, attr: str) -> object: ...
65
+
66
+
67
+ class ChoiceDelta(BaseModel):
68
+ content: Optional[str] = None
69
+
70
+ function_call: Optional[ChoiceDeltaFunctionCall] = None
71
+
72
+ refusal: Optional[str] = None
73
+
74
+ role: Optional[Literal["system", "user", "assistant", "tool"]] = None
75
+
76
+ tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
77
+
78
+ if TYPE_CHECKING:
79
+ # Stub to indicate that arbitrary properties are accepted.
80
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
81
+ # `getattr(obj, '$type')`
82
+ def __getattr__(self, attr: str) -> object: ...
83
+
84
+
85
+ class ChoiceLogprobsContentTopLogprob(BaseModel):
86
+ token: str
87
+
88
+ logprob: float
89
+
90
+ bytes: Optional[List[int]] = None
91
+
92
+ if TYPE_CHECKING:
93
+ # Stub to indicate that arbitrary properties are accepted.
94
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
95
+ # `getattr(obj, '$type')`
96
+ def __getattr__(self, attr: str) -> object: ...
97
+
98
+
99
+ class ChoiceLogprobsContent(BaseModel):
100
+ token: str
101
+
102
+ logprob: float
103
+
104
+ top_logprobs: List[ChoiceLogprobsContentTopLogprob]
105
+
106
+ bytes: Optional[List[int]] = None
107
+
108
+ if TYPE_CHECKING:
109
+ # Stub to indicate that arbitrary properties are accepted.
110
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
111
+ # `getattr(obj, '$type')`
112
+ def __getattr__(self, attr: str) -> object: ...
113
+
114
+
115
+ class ChoiceLogprobsRefusalTopLogprob(BaseModel):
116
+ token: str
117
+
118
+ logprob: float
119
+
120
+ bytes: Optional[List[int]] = None
121
+
122
+ if TYPE_CHECKING:
123
+ # Stub to indicate that arbitrary properties are accepted.
124
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
125
+ # `getattr(obj, '$type')`
126
+ def __getattr__(self, attr: str) -> object: ...
127
+
128
+
129
+ class ChoiceLogprobsRefusal(BaseModel):
130
+ token: str
131
+
132
+ logprob: float
133
+
134
+ top_logprobs: List[ChoiceLogprobsRefusalTopLogprob]
135
+
136
+ bytes: Optional[List[int]] = None
137
+
138
+ if TYPE_CHECKING:
139
+ # Stub to indicate that arbitrary properties are accepted.
140
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
141
+ # `getattr(obj, '$type')`
142
+ def __getattr__(self, attr: str) -> object: ...
143
+
144
+
145
+ class ChoiceLogprobs(BaseModel):
146
+ content: Optional[List[ChoiceLogprobsContent]] = None
147
+
148
+ refusal: Optional[List[ChoiceLogprobsRefusal]] = None
149
+
150
+ if TYPE_CHECKING:
151
+ # Stub to indicate that arbitrary properties are accepted.
152
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
153
+ # `getattr(obj, '$type')`
154
+ def __getattr__(self, attr: str) -> object: ...
155
+
156
+
157
+ class Choice(BaseModel):
158
+ delta: ChoiceDelta
159
+
160
+ index: int
161
+
162
+ finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None
163
+
164
+ logprobs: Optional[ChoiceLogprobs] = None
165
+
166
+ if TYPE_CHECKING:
167
+ # Stub to indicate that arbitrary properties are accepted.
168
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
169
+ # `getattr(obj, '$type')`
170
+ def __getattr__(self, attr: str) -> object: ...
171
+
172
+
173
+ class UsageCompletionTokensDetails(BaseModel):
174
+ accepted_prediction_tokens: Optional[int] = None
175
+
176
+ audio_tokens: Optional[int] = None
177
+
178
+ reasoning_tokens: Optional[int] = None
179
+
180
+ rejected_prediction_tokens: Optional[int] = None
181
+
182
+ if TYPE_CHECKING:
183
+ # Stub to indicate that arbitrary properties are accepted.
184
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
185
+ # `getattr(obj, '$type')`
186
+ def __getattr__(self, attr: str) -> object: ...
187
+
188
+
189
+ class UsagePromptTokensDetails(BaseModel):
190
+ audio_tokens: Optional[int] = None
191
+
192
+ cached_tokens: Optional[int] = None
193
+
194
+ if TYPE_CHECKING:
195
+ # Stub to indicate that arbitrary properties are accepted.
196
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
197
+ # `getattr(obj, '$type')`
198
+ def __getattr__(self, attr: str) -> object: ...
199
+
200
+
201
+ class Usage(BaseModel):
202
+ completion_tokens: int
203
+
204
+ prompt_tokens: int
205
+
206
+ total_tokens: int
207
+
208
+ completion_tokens_details: Optional[UsageCompletionTokensDetails] = None
209
+
210
+ prompt_tokens_details: Optional[UsagePromptTokensDetails] = None
211
+
212
+ if TYPE_CHECKING:
213
+ # Stub to indicate that arbitrary properties are accepted.
214
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
215
+ # `getattr(obj, '$type')`
216
+ def __getattr__(self, attr: str) -> object: ...
217
+
218
+
219
+ class ChatCompletionChunk(BaseModel):
220
+ id: str
221
+
222
+ choices: List[Choice]
223
+
224
+ created: int
225
+
226
+ model: str
227
+
228
+ object: Optional[Literal["chat.completion.chunk"]] = None
229
+
230
+ service_tier: Optional[Literal["scale", "default"]] = None
231
+
232
+ system_fingerprint: Optional[str] = None
233
+
234
+ usage: Optional[Usage] = None
235
+
236
+ if TYPE_CHECKING:
237
+ # Stub to indicate that arbitrary properties are accepted.
238
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
239
+ # `getattr(obj, '$type')`
240
+ def __getattr__(self, attr: str) -> builtins.object: ...
@@ -0,0 +1,156 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"]
9
+
10
+
11
+ class CompletionCreateParamsBase(TypedDict, total=False):
12
+ messages: Required[Iterable[Dict[str, object]]]
13
+ """openai standard message format"""
14
+
15
+ model: Required[str]
16
+ """model specified as `model_vendor/model`, for example `openai/gpt-4o`"""
17
+
18
+ audio: Dict[str, object]
19
+ """Parameters for audio output.
20
+
21
+ Required when audio output is requested with modalities: ['audio'].
22
+ """
23
+
24
+ frequency_penalty: float
25
+ """Number between -2.0 and 2.0.
26
+
27
+ Positive values penalize new tokens based on their existing frequency in the
28
+ text so far.
29
+ """
30
+
31
+ function_call: Dict[str, object]
32
+ """Deprecated in favor of tool_choice.
33
+
34
+ Controls which function is called by the model.
35
+ """
36
+
37
+ functions: Iterable[Dict[str, object]]
38
+ """Deprecated in favor of tools.
39
+
40
+ A list of functions the model may generate JSON inputs for.
41
+ """
42
+
43
+ logit_bias: Dict[str, int]
44
+ """Modify the likelihood of specified tokens appearing in the completion.
45
+
46
+ Maps tokens to bias values from -100 to 100.
47
+ """
48
+
49
+ logprobs: bool
50
+ """Whether to return log probabilities of the output tokens or not."""
51
+
52
+ max_completion_tokens: int
53
+ """
54
+ An upper bound for the number of tokens that can be generated, including visible
55
+ output tokens and reasoning tokens.
56
+ """
57
+
58
+ max_tokens: int
59
+ """Deprecated in favor of max_completion_tokens.
60
+
61
+ The maximum number of tokens to generate.
62
+ """
63
+
64
+ metadata: Dict[str, str]
65
+ """
66
+ Developer-defined tags and values used for filtering completions in the
67
+ dashboard.
68
+ """
69
+
70
+ modalities: List[str]
71
+ """Output types that you would like the model to generate for this request."""
72
+
73
+ n: int
74
+ """How many chat completion choices to generate for each input message."""
75
+
76
+ parallel_tool_calls: bool
77
+ """Whether to enable parallel function calling during tool use."""
78
+
79
+ prediction: Dict[str, object]
80
+ """
81
+ Static predicted output content, such as the content of a text file being
82
+ regenerated.
83
+ """
84
+
85
+ presence_penalty: float
86
+ """Number between -2.0 and 2.0.
87
+
88
+ Positive values penalize tokens based on whether they appear in the text so far.
89
+ """
90
+
91
+ reasoning_effort: str
92
+ """For o1 models only. Constrains effort on reasoning. Values: low, medium, high."""
93
+
94
+ response_format: Dict[str, object]
95
+ """An object specifying the format that the model must output."""
96
+
97
+ seed: int
98
+ """
99
+ If specified, system will attempt to sample deterministically for repeated
100
+ requests with same seed.
101
+ """
102
+
103
+ stop: Union[str, List[str]]
104
+ """Up to 4 sequences where the API will stop generating further tokens."""
105
+
106
+ store: bool
107
+ """Whether to store the output for use in model distillation or evals products."""
108
+
109
+ stream_options: Dict[str, object]
110
+ """Options for streaming response. Only set this when stream is true."""
111
+
112
+ temperature: float
113
+ """What sampling temperature to use.
114
+
115
+ Higher values make output more random, lower more focused.
116
+ """
117
+
118
+ tool_choice: Union[str, Dict[str, object]]
119
+ """Controls which tool is called by the model.
120
+
121
+ Values: none, auto, required, or specific tool.
122
+ """
123
+
124
+ tools: Iterable[Dict[str, object]]
125
+ """A list of tools the model may call.
126
+
127
+ Currently, only functions are supported. Max 128 functions.
128
+ """
129
+
130
+ top_k: int
131
+ """Only sample from the top K options for each subsequent token"""
132
+
133
+ top_logprobs: int
134
+ """
135
+ Number of most likely tokens to return at each position, with associated log
136
+ probability.
137
+ """
138
+
139
+ top_p: float
140
+ """Alternative to temperature.
141
+
142
+ Only tokens comprising top_p probability mass are considered.
143
+ """
144
+
145
+
146
+ class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
147
+ stream: Literal[False]
148
+ """If true, partial message deltas will be sent as server-sent events."""
149
+
150
+
151
+ class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
152
+ stream: Required[Literal[True]]
153
+ """If true, partial message deltas will be sent as server-sent events."""
154
+
155
+
156
+ CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
@@ -0,0 +1,11 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union
4
+ from typing_extensions import TypeAlias
5
+
6
+ from .chat_completion import ChatCompletion
7
+ from .chat_completion_chunk import ChatCompletionChunk
8
+
9
+ __all__ = ["CompletionCreateResponse"]
10
+
11
+ CompletionCreateResponse: TypeAlias = Union[ChatCompletion, ChatCompletionChunk]
@@ -0,0 +1,116 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ import builtins
4
+ from typing import TYPE_CHECKING, Dict, List, Optional
5
+ from typing_extensions import Literal
6
+
7
+ from .._models import BaseModel
8
+
9
+ __all__ = [
10
+ "Completion",
11
+ "Choice",
12
+ "ChoiceLogprobs",
13
+ "Usage",
14
+ "UsageCompletionTokensDetails",
15
+ "UsagePromptTokensDetails",
16
+ ]
17
+
18
+
19
+ class ChoiceLogprobs(BaseModel):
20
+ text_offset: Optional[List[int]] = None
21
+
22
+ token_logprobs: Optional[List[float]] = None
23
+
24
+ tokens: Optional[List[str]] = None
25
+
26
+ top_logprobs: Optional[List[Dict[str, float]]] = None
27
+
28
+ if TYPE_CHECKING:
29
+ # Stub to indicate that arbitrary properties are accepted.
30
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
31
+ # `getattr(obj, '$type')`
32
+ def __getattr__(self, attr: str) -> object: ...
33
+
34
+
35
+ class Choice(BaseModel):
36
+ finish_reason: Literal["stop", "length", "content_filter"]
37
+
38
+ index: int
39
+
40
+ text: str
41
+
42
+ logprobs: Optional[ChoiceLogprobs] = None
43
+
44
+ if TYPE_CHECKING:
45
+ # Stub to indicate that arbitrary properties are accepted.
46
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
47
+ # `getattr(obj, '$type')`
48
+ def __getattr__(self, attr: str) -> object: ...
49
+
50
+
51
+ class UsageCompletionTokensDetails(BaseModel):
52
+ accepted_prediction_tokens: Optional[int] = None
53
+
54
+ audio_tokens: Optional[int] = None
55
+
56
+ reasoning_tokens: Optional[int] = None
57
+
58
+ rejected_prediction_tokens: Optional[int] = None
59
+
60
+ if TYPE_CHECKING:
61
+ # Stub to indicate that arbitrary properties are accepted.
62
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
63
+ # `getattr(obj, '$type')`
64
+ def __getattr__(self, attr: str) -> object: ...
65
+
66
+
67
+ class UsagePromptTokensDetails(BaseModel):
68
+ audio_tokens: Optional[int] = None
69
+
70
+ cached_tokens: Optional[int] = None
71
+
72
+ if TYPE_CHECKING:
73
+ # Stub to indicate that arbitrary properties are accepted.
74
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
75
+ # `getattr(obj, '$type')`
76
+ def __getattr__(self, attr: str) -> object: ...
77
+
78
+
79
+ class Usage(BaseModel):
80
+ completion_tokens: int
81
+
82
+ prompt_tokens: int
83
+
84
+ total_tokens: int
85
+
86
+ completion_tokens_details: Optional[UsageCompletionTokensDetails] = None
87
+
88
+ prompt_tokens_details: Optional[UsagePromptTokensDetails] = None
89
+
90
+ if TYPE_CHECKING:
91
+ # Stub to indicate that arbitrary properties are accepted.
92
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
93
+ # `getattr(obj, '$type')`
94
+ def __getattr__(self, attr: str) -> object: ...
95
+
96
+
97
+ class Completion(BaseModel):
98
+ id: str
99
+
100
+ choices: List[Choice]
101
+
102
+ created: int
103
+
104
+ model: str
105
+
106
+ object: Optional[Literal["text_completion"]] = None
107
+
108
+ system_fingerprint: Optional[str] = None
109
+
110
+ usage: Optional[Usage] = None
111
+
112
+ if TYPE_CHECKING:
113
+ # Stub to indicate that arbitrary properties are accepted.
114
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
115
+ # `getattr(obj, '$type')`
116
+ def __getattr__(self, attr: str) -> builtins.object: ...
@@ -0,0 +1,108 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Union
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"]
9
+
10
+
11
+ class CompletionCreateParamsBase(TypedDict, total=False):
12
+ model: Required[str]
13
+ """model specified as `model_vendor/model`, for example `openai/gpt-4o`"""
14
+
15
+ prompt: Required[Union[str, List[str]]]
16
+ """The prompt to generate completions for, encoded as a string"""
17
+
18
+ best_of: int
19
+ """Generates best_of completions server-side and returns the best one.
20
+
21
+ Must be greater than n when used together.
22
+ """
23
+
24
+ echo: bool
25
+ """Echo back the prompt in addition to the completion"""
26
+
27
+ frequency_penalty: float
28
+ """Number between -2.0 and 2.0.
29
+
30
+ Positive values penalize new tokens based on their existing frequency in the
31
+ text.
32
+ """
33
+
34
+ logit_bias: Dict[str, int]
35
+ """Modify the likelihood of specified tokens appearing in the completion.
36
+
37
+ Maps tokens to bias values from -100 to 100.
38
+ """
39
+
40
+ logprobs: int
41
+ """Include log probabilities of the most likely tokens. Maximum value is 5."""
42
+
43
+ max_tokens: int
44
+ """The maximum number of tokens that can be generated in the completion."""
45
+
46
+ n: int
47
+ """How many completions to generate for each prompt."""
48
+
49
+ presence_penalty: float
50
+ """Number between -2.0 and 2.0.
51
+
52
+ Positive values penalize new tokens based on their presence in the text so far.
53
+ """
54
+
55
+ seed: int
56
+ """If specified, attempts to generate deterministic samples.
57
+
58
+ Determinism is not guaranteed.
59
+ """
60
+
61
+ stop: Union[str, List[str]]
62
+ """Up to 4 sequences where the API will stop generating further tokens."""
63
+
64
+ stream_options: Dict[str, object]
65
+ """Options for streaming response. Only set this when stream is True."""
66
+
67
+ suffix: str
68
+ """The suffix that comes after a completion of inserted text.
69
+
70
+ Only supported for gpt-3.5-turbo-instruct.
71
+ """
72
+
73
+ temperature: float
74
+ """Sampling temperature between 0 and 2.
75
+
76
+ Higher values make output more random, lower more focused.
77
+ """
78
+
79
+ top_p: float
80
+ """Alternative to temperature.
81
+
82
+ Consider only tokens with top_p probability mass. Range 0-1.
83
+ """
84
+
85
+ user: str
86
+ """
87
+ A unique identifier representing your end-user, which can help OpenAI monitor
88
+ and detect abuse.
89
+ """
90
+
91
+
92
+ class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
93
+ stream: Literal[False]
94
+ """Whether to stream back partial progress.
95
+
96
+ If set, tokens will be sent as data-only server-sent events.
97
+ """
98
+
99
+
100
+ class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
101
+ stream: Required[Literal[True]]
102
+ """Whether to stream back partial progress.
103
+
104
+ If set, tokens will be sent as data-only server-sent events.
105
+ """
106
+
107
+
108
+ CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
scale_gp/types/file.py ADDED
@@ -0,0 +1,30 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ import builtins
4
+ from typing import Dict, Optional
5
+ from datetime import datetime
6
+ from typing_extensions import Literal
7
+
8
+ from .._models import BaseModel
9
+
10
+ __all__ = ["File"]
11
+
12
+
13
+ class File(BaseModel):
14
+ id: str
15
+
16
+ created_at: datetime
17
+
18
+ created_by_user_id: str
19
+
20
+ filename: str
21
+
22
+ md5_checksum: str
23
+
24
+ mime_type: str
25
+
26
+ size: int
27
+
28
+ object: Optional[Literal["file"]] = None
29
+
30
+ tags: Optional[Dict[str, builtins.object]] = None
@@ -0,0 +1,13 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ from .._types import FileTypes
8
+
9
+ __all__ = ["FileCreateParams"]
10
+
11
+
12
+ class FileCreateParams(TypedDict, total=False):
13
+ file: Required[FileTypes]
@@ -0,0 +1,16 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from .._models import BaseModel
7
+
8
+ __all__ = ["FileDeleteResponse"]
9
+
10
+
11
+ class FileDeleteResponse(BaseModel):
12
+ id: str
13
+
14
+ deleted: bool
15
+
16
+ object: Optional[Literal["file"]] = None