scale-gp-beta 0.1.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. scale_gp/__init__.py +96 -0
  2. scale_gp/_base_client.py +2058 -0
  3. scale_gp/_client.py +544 -0
  4. scale_gp/_compat.py +219 -0
  5. scale_gp/_constants.py +14 -0
  6. scale_gp/_exceptions.py +108 -0
  7. scale_gp/_files.py +123 -0
  8. scale_gp/_models.py +801 -0
  9. scale_gp/_qs.py +150 -0
  10. scale_gp/_resource.py +43 -0
  11. scale_gp/_response.py +830 -0
  12. scale_gp/_streaming.py +333 -0
  13. scale_gp/_types.py +217 -0
  14. scale_gp/_utils/__init__.py +57 -0
  15. scale_gp/_utils/_logs.py +25 -0
  16. scale_gp/_utils/_proxy.py +62 -0
  17. scale_gp/_utils/_reflection.py +42 -0
  18. scale_gp/_utils/_streams.py +12 -0
  19. scale_gp/_utils/_sync.py +86 -0
  20. scale_gp/_utils/_transform.py +402 -0
  21. scale_gp/_utils/_typing.py +149 -0
  22. scale_gp/_utils/_utils.py +414 -0
  23. scale_gp/_version.py +4 -0
  24. scale_gp/lib/.keep +4 -0
  25. scale_gp/pagination.py +83 -0
  26. scale_gp/py.typed +0 -0
  27. scale_gp/resources/__init__.py +103 -0
  28. scale_gp/resources/chat/__init__.py +33 -0
  29. scale_gp/resources/chat/chat.py +102 -0
  30. scale_gp/resources/chat/completions.py +1054 -0
  31. scale_gp/resources/completions.py +765 -0
  32. scale_gp/resources/files/__init__.py +33 -0
  33. scale_gp/resources/files/content.py +162 -0
  34. scale_gp/resources/files/files.py +558 -0
  35. scale_gp/resources/inference.py +210 -0
  36. scale_gp/resources/models.py +834 -0
  37. scale_gp/resources/question_sets.py +680 -0
  38. scale_gp/resources/questions.py +396 -0
  39. scale_gp/types/__init__.py +33 -0
  40. scale_gp/types/chat/__init__.py +8 -0
  41. scale_gp/types/chat/chat_completion.py +257 -0
  42. scale_gp/types/chat/chat_completion_chunk.py +240 -0
  43. scale_gp/types/chat/completion_create_params.py +156 -0
  44. scale_gp/types/chat/completion_create_response.py +11 -0
  45. scale_gp/types/completion.py +116 -0
  46. scale_gp/types/completion_create_params.py +108 -0
  47. scale_gp/types/file.py +30 -0
  48. scale_gp/types/file_create_params.py +13 -0
  49. scale_gp/types/file_delete_response.py +16 -0
  50. scale_gp/types/file_list.py +27 -0
  51. scale_gp/types/file_list_params.py +16 -0
  52. scale_gp/types/file_update_params.py +12 -0
  53. scale_gp/types/files/__init__.py +3 -0
  54. scale_gp/types/inference_create_params.py +25 -0
  55. scale_gp/types/inference_create_response.py +11 -0
  56. scale_gp/types/inference_model.py +167 -0
  57. scale_gp/types/inference_model_list.py +27 -0
  58. scale_gp/types/inference_response.py +14 -0
  59. scale_gp/types/inference_response_chunk.py +14 -0
  60. scale_gp/types/model_create_params.py +165 -0
  61. scale_gp/types/model_delete_response.py +16 -0
  62. scale_gp/types/model_list_params.py +20 -0
  63. scale_gp/types/model_update_params.py +161 -0
  64. scale_gp/types/question.py +68 -0
  65. scale_gp/types/question_create_params.py +59 -0
  66. scale_gp/types/question_list.py +27 -0
  67. scale_gp/types/question_list_params.py +16 -0
  68. scale_gp/types/question_set.py +106 -0
  69. scale_gp/types/question_set_create_params.py +115 -0
  70. scale_gp/types/question_set_delete_response.py +16 -0
  71. scale_gp/types/question_set_list.py +27 -0
  72. scale_gp/types/question_set_list_params.py +20 -0
  73. scale_gp/types/question_set_retrieve_params.py +12 -0
  74. scale_gp/types/question_set_update_params.py +23 -0
  75. scale_gp_beta-0.1.0a2.dist-info/METADATA +440 -0
  76. scale_gp_beta-0.1.0a2.dist-info/RECORD +78 -0
  77. scale_gp_beta-0.1.0a2.dist-info/WHEEL +4 -0
  78. scale_gp_beta-0.1.0a2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,765 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Union
6
+ from typing_extensions import Literal, overload
7
+
8
+ import httpx
9
+
10
+ from ..types import completion_create_params
11
+ from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
12
+ from .._utils import (
13
+ required_args,
14
+ maybe_transform,
15
+ async_maybe_transform,
16
+ )
17
+ from .._compat import cached_property
18
+ from .._resource import SyncAPIResource, AsyncAPIResource
19
+ from .._response import (
20
+ to_raw_response_wrapper,
21
+ to_streamed_response_wrapper,
22
+ async_to_raw_response_wrapper,
23
+ async_to_streamed_response_wrapper,
24
+ )
25
+ from .._streaming import Stream, AsyncStream
26
+ from .._base_client import make_request_options
27
+ from ..types.completion import Completion
28
+
29
+ __all__ = ["CompletionsResource", "AsyncCompletionsResource"]
30
+
31
+
32
+ class CompletionsResource(SyncAPIResource):
33
+ @cached_property
34
+ def with_raw_response(self) -> CompletionsResourceWithRawResponse:
35
+ """
36
+ This property can be used as a prefix for any HTTP method call to return
37
+ the raw response object instead of the parsed content.
38
+
39
+ For more information, see https://www.github.com/scaleapi/sgp-python-beta#accessing-raw-response-data-eg-headers
40
+ """
41
+ return CompletionsResourceWithRawResponse(self)
42
+
43
+ @cached_property
44
+ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
45
+ """
46
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
47
+
48
+ For more information, see https://www.github.com/scaleapi/sgp-python-beta#with_streaming_response
49
+ """
50
+ return CompletionsResourceWithStreamingResponse(self)
51
+
52
+ @overload
53
+ def create(
54
+ self,
55
+ *,
56
+ model: str,
57
+ prompt: Union[str, List[str]],
58
+ best_of: int | NotGiven = NOT_GIVEN,
59
+ echo: bool | NotGiven = NOT_GIVEN,
60
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
61
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
62
+ logprobs: int | NotGiven = NOT_GIVEN,
63
+ max_tokens: int | NotGiven = NOT_GIVEN,
64
+ n: int | NotGiven = NOT_GIVEN,
65
+ presence_penalty: float | NotGiven = NOT_GIVEN,
66
+ seed: int | NotGiven = NOT_GIVEN,
67
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
68
+ stream: Literal[False] | NotGiven = NOT_GIVEN,
69
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
70
+ suffix: str | NotGiven = NOT_GIVEN,
71
+ temperature: float | NotGiven = NOT_GIVEN,
72
+ top_p: float | NotGiven = NOT_GIVEN,
73
+ user: str | NotGiven = NOT_GIVEN,
74
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
75
+ # The extra values given here take precedence over values defined on the client or passed to this method.
76
+ extra_headers: Headers | None = None,
77
+ extra_query: Query | None = None,
78
+ extra_body: Body | None = None,
79
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
80
+ ) -> Completion:
81
+ """
82
+ Completions
83
+
84
+ Args:
85
+ model: model specified as `model_vendor/model`, for example `openai/gpt-4o`
86
+
87
+ prompt: The prompt to generate completions for, encoded as a string
88
+
89
+ best_of: Generates best_of completions server-side and returns the best one. Must be
90
+ greater than n when used together.
91
+
92
+ echo: Echo back the prompt in addition to the completion
93
+
94
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
95
+ existing frequency in the text.
96
+
97
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion. Maps
98
+ tokens to bias values from -100 to 100.
99
+
100
+ logprobs: Include log probabilities of the most likely tokens. Maximum value is 5.
101
+
102
+ max_tokens: The maximum number of tokens that can be generated in the completion.
103
+
104
+ n: How many completions to generate for each prompt.
105
+
106
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
107
+ presence in the text so far.
108
+
109
+ seed: If specified, attempts to generate deterministic samples. Determinism is not
110
+ guaranteed.
111
+
112
+ stop: Up to 4 sequences where the API will stop generating further tokens.
113
+
114
+ stream: Whether to stream back partial progress. If set, tokens will be sent as
115
+ data-only server-sent events.
116
+
117
+ stream_options: Options for streaming response. Only set this when stream is True.
118
+
119
+ suffix: The suffix that comes after a completion of inserted text. Only supported for
120
+ gpt-3.5-turbo-instruct.
121
+
122
+ temperature: Sampling temperature between 0 and 2. Higher values make output more random,
123
+ lower more focused.
124
+
125
+ top_p: Alternative to temperature. Consider only tokens with top_p probability mass.
126
+ Range 0-1.
127
+
128
+ user: A unique identifier representing your end-user, which can help OpenAI monitor
129
+ and detect abuse.
130
+
131
+ extra_headers: Send extra headers
132
+
133
+ extra_query: Add additional query parameters to the request
134
+
135
+ extra_body: Add additional JSON properties to the request
136
+
137
+ timeout: Override the client-level default timeout for this request, in seconds
138
+ """
139
+ ...
140
+
141
+ @overload
142
+ def create(
143
+ self,
144
+ *,
145
+ model: str,
146
+ prompt: Union[str, List[str]],
147
+ stream: Literal[True],
148
+ best_of: int | NotGiven = NOT_GIVEN,
149
+ echo: bool | NotGiven = NOT_GIVEN,
150
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
151
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
152
+ logprobs: int | NotGiven = NOT_GIVEN,
153
+ max_tokens: int | NotGiven = NOT_GIVEN,
154
+ n: int | NotGiven = NOT_GIVEN,
155
+ presence_penalty: float | NotGiven = NOT_GIVEN,
156
+ seed: int | NotGiven = NOT_GIVEN,
157
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
158
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
159
+ suffix: str | NotGiven = NOT_GIVEN,
160
+ temperature: float | NotGiven = NOT_GIVEN,
161
+ top_p: float | NotGiven = NOT_GIVEN,
162
+ user: str | NotGiven = NOT_GIVEN,
163
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
164
+ # The extra values given here take precedence over values defined on the client or passed to this method.
165
+ extra_headers: Headers | None = None,
166
+ extra_query: Query | None = None,
167
+ extra_body: Body | None = None,
168
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
169
+ ) -> Stream[Completion]:
170
+ """
171
+ Completions
172
+
173
+ Args:
174
+ model: model specified as `model_vendor/model`, for example `openai/gpt-4o`
175
+
176
+ prompt: The prompt to generate completions for, encoded as a string
177
+
178
+ stream: Whether to stream back partial progress. If set, tokens will be sent as
179
+ data-only server-sent events.
180
+
181
+ best_of: Generates best_of completions server-side and returns the best one. Must be
182
+ greater than n when used together.
183
+
184
+ echo: Echo back the prompt in addition to the completion
185
+
186
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
187
+ existing frequency in the text.
188
+
189
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion. Maps
190
+ tokens to bias values from -100 to 100.
191
+
192
+ logprobs: Include log probabilities of the most likely tokens. Maximum value is 5.
193
+
194
+ max_tokens: The maximum number of tokens that can be generated in the completion.
195
+
196
+ n: How many completions to generate for each prompt.
197
+
198
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
199
+ presence in the text so far.
200
+
201
+ seed: If specified, attempts to generate deterministic samples. Determinism is not
202
+ guaranteed.
203
+
204
+ stop: Up to 4 sequences where the API will stop generating further tokens.
205
+
206
+ stream_options: Options for streaming response. Only set this when stream is True.
207
+
208
+ suffix: The suffix that comes after a completion of inserted text. Only supported for
209
+ gpt-3.5-turbo-instruct.
210
+
211
+ temperature: Sampling temperature between 0 and 2. Higher values make output more random,
212
+ lower more focused.
213
+
214
+ top_p: Alternative to temperature. Consider only tokens with top_p probability mass.
215
+ Range 0-1.
216
+
217
+ user: A unique identifier representing your end-user, which can help OpenAI monitor
218
+ and detect abuse.
219
+
220
+ extra_headers: Send extra headers
221
+
222
+ extra_query: Add additional query parameters to the request
223
+
224
+ extra_body: Add additional JSON properties to the request
225
+
226
+ timeout: Override the client-level default timeout for this request, in seconds
227
+ """
228
+ ...
229
+
230
+ @overload
231
+ def create(
232
+ self,
233
+ *,
234
+ model: str,
235
+ prompt: Union[str, List[str]],
236
+ stream: bool,
237
+ best_of: int | NotGiven = NOT_GIVEN,
238
+ echo: bool | NotGiven = NOT_GIVEN,
239
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
240
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
241
+ logprobs: int | NotGiven = NOT_GIVEN,
242
+ max_tokens: int | NotGiven = NOT_GIVEN,
243
+ n: int | NotGiven = NOT_GIVEN,
244
+ presence_penalty: float | NotGiven = NOT_GIVEN,
245
+ seed: int | NotGiven = NOT_GIVEN,
246
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
247
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
248
+ suffix: str | NotGiven = NOT_GIVEN,
249
+ temperature: float | NotGiven = NOT_GIVEN,
250
+ top_p: float | NotGiven = NOT_GIVEN,
251
+ user: str | NotGiven = NOT_GIVEN,
252
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
253
+ # The extra values given here take precedence over values defined on the client or passed to this method.
254
+ extra_headers: Headers | None = None,
255
+ extra_query: Query | None = None,
256
+ extra_body: Body | None = None,
257
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
258
+ ) -> Completion | Stream[Completion]:
259
+ """
260
+ Completions
261
+
262
+ Args:
263
+ model: model specified as `model_vendor/model`, for example `openai/gpt-4o`
264
+
265
+ prompt: The prompt to generate completions for, encoded as a string
266
+
267
+ stream: Whether to stream back partial progress. If set, tokens will be sent as
268
+ data-only server-sent events.
269
+
270
+ best_of: Generates best_of completions server-side and returns the best one. Must be
271
+ greater than n when used together.
272
+
273
+ echo: Echo back the prompt in addition to the completion
274
+
275
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
276
+ existing frequency in the text.
277
+
278
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion. Maps
279
+ tokens to bias values from -100 to 100.
280
+
281
+ logprobs: Include log probabilities of the most likely tokens. Maximum value is 5.
282
+
283
+ max_tokens: The maximum number of tokens that can be generated in the completion.
284
+
285
+ n: How many completions to generate for each prompt.
286
+
287
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
288
+ presence in the text so far.
289
+
290
+ seed: If specified, attempts to generate deterministic samples. Determinism is not
291
+ guaranteed.
292
+
293
+ stop: Up to 4 sequences where the API will stop generating further tokens.
294
+
295
+ stream_options: Options for streaming response. Only set this when stream is True.
296
+
297
+ suffix: The suffix that comes after a completion of inserted text. Only supported for
298
+ gpt-3.5-turbo-instruct.
299
+
300
+ temperature: Sampling temperature between 0 and 2. Higher values make output more random,
301
+ lower more focused.
302
+
303
+ top_p: Alternative to temperature. Consider only tokens with top_p probability mass.
304
+ Range 0-1.
305
+
306
+ user: A unique identifier representing your end-user, which can help OpenAI monitor
307
+ and detect abuse.
308
+
309
+ extra_headers: Send extra headers
310
+
311
+ extra_query: Add additional query parameters to the request
312
+
313
+ extra_body: Add additional JSON properties to the request
314
+
315
+ timeout: Override the client-level default timeout for this request, in seconds
316
+ """
317
+ ...
318
+
319
+ @required_args(["model", "prompt"], ["model", "prompt", "stream"])
320
+ def create(
321
+ self,
322
+ *,
323
+ model: str,
324
+ prompt: Union[str, List[str]],
325
+ best_of: int | NotGiven = NOT_GIVEN,
326
+ echo: bool | NotGiven = NOT_GIVEN,
327
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
328
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
329
+ logprobs: int | NotGiven = NOT_GIVEN,
330
+ max_tokens: int | NotGiven = NOT_GIVEN,
331
+ n: int | NotGiven = NOT_GIVEN,
332
+ presence_penalty: float | NotGiven = NOT_GIVEN,
333
+ seed: int | NotGiven = NOT_GIVEN,
334
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
335
+ stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
336
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
337
+ suffix: str | NotGiven = NOT_GIVEN,
338
+ temperature: float | NotGiven = NOT_GIVEN,
339
+ top_p: float | NotGiven = NOT_GIVEN,
340
+ user: str | NotGiven = NOT_GIVEN,
341
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
342
+ # The extra values given here take precedence over values defined on the client or passed to this method.
343
+ extra_headers: Headers | None = None,
344
+ extra_query: Query | None = None,
345
+ extra_body: Body | None = None,
346
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
347
+ ) -> Completion | Stream[Completion]:
348
+ return self._post(
349
+ "/v5/completions",
350
+ body=maybe_transform(
351
+ {
352
+ "model": model,
353
+ "prompt": prompt,
354
+ "best_of": best_of,
355
+ "echo": echo,
356
+ "frequency_penalty": frequency_penalty,
357
+ "logit_bias": logit_bias,
358
+ "logprobs": logprobs,
359
+ "max_tokens": max_tokens,
360
+ "n": n,
361
+ "presence_penalty": presence_penalty,
362
+ "seed": seed,
363
+ "stop": stop,
364
+ "stream": stream,
365
+ "stream_options": stream_options,
366
+ "suffix": suffix,
367
+ "temperature": temperature,
368
+ "top_p": top_p,
369
+ "user": user,
370
+ },
371
+ completion_create_params.CompletionCreateParams,
372
+ ),
373
+ options=make_request_options(
374
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
375
+ ),
376
+ cast_to=Completion,
377
+ stream=stream or False,
378
+ stream_cls=Stream[Completion],
379
+ )
380
+
381
+
382
+ class AsyncCompletionsResource(AsyncAPIResource):
383
+ @cached_property
384
+ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
385
+ """
386
+ This property can be used as a prefix for any HTTP method call to return
387
+ the raw response object instead of the parsed content.
388
+
389
+ For more information, see https://www.github.com/scaleapi/sgp-python-beta#accessing-raw-response-data-eg-headers
390
+ """
391
+ return AsyncCompletionsResourceWithRawResponse(self)
392
+
393
+ @cached_property
394
+ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse:
395
+ """
396
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
397
+
398
+ For more information, see https://www.github.com/scaleapi/sgp-python-beta#with_streaming_response
399
+ """
400
+ return AsyncCompletionsResourceWithStreamingResponse(self)
401
+
402
+ @overload
403
+ async def create(
404
+ self,
405
+ *,
406
+ model: str,
407
+ prompt: Union[str, List[str]],
408
+ best_of: int | NotGiven = NOT_GIVEN,
409
+ echo: bool | NotGiven = NOT_GIVEN,
410
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
411
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
412
+ logprobs: int | NotGiven = NOT_GIVEN,
413
+ max_tokens: int | NotGiven = NOT_GIVEN,
414
+ n: int | NotGiven = NOT_GIVEN,
415
+ presence_penalty: float | NotGiven = NOT_GIVEN,
416
+ seed: int | NotGiven = NOT_GIVEN,
417
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
418
+ stream: Literal[False] | NotGiven = NOT_GIVEN,
419
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
420
+ suffix: str | NotGiven = NOT_GIVEN,
421
+ temperature: float | NotGiven = NOT_GIVEN,
422
+ top_p: float | NotGiven = NOT_GIVEN,
423
+ user: str | NotGiven = NOT_GIVEN,
424
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
425
+ # The extra values given here take precedence over values defined on the client or passed to this method.
426
+ extra_headers: Headers | None = None,
427
+ extra_query: Query | None = None,
428
+ extra_body: Body | None = None,
429
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
430
+ ) -> Completion:
431
+ """
432
+ Completions
433
+
434
+ Args:
435
+ model: model specified as `model_vendor/model`, for example `openai/gpt-4o`
436
+
437
+ prompt: The prompt to generate completions for, encoded as a string
438
+
439
+ best_of: Generates best_of completions server-side and returns the best one. Must be
440
+ greater than n when used together.
441
+
442
+ echo: Echo back the prompt in addition to the completion
443
+
444
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
445
+ existing frequency in the text.
446
+
447
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion. Maps
448
+ tokens to bias values from -100 to 100.
449
+
450
+ logprobs: Include log probabilities of the most likely tokens. Maximum value is 5.
451
+
452
+ max_tokens: The maximum number of tokens that can be generated in the completion.
453
+
454
+ n: How many completions to generate for each prompt.
455
+
456
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
457
+ presence in the text so far.
458
+
459
+ seed: If specified, attempts to generate deterministic samples. Determinism is not
460
+ guaranteed.
461
+
462
+ stop: Up to 4 sequences where the API will stop generating further tokens.
463
+
464
+ stream: Whether to stream back partial progress. If set, tokens will be sent as
465
+ data-only server-sent events.
466
+
467
+ stream_options: Options for streaming response. Only set this when stream is True.
468
+
469
+ suffix: The suffix that comes after a completion of inserted text. Only supported for
470
+ gpt-3.5-turbo-instruct.
471
+
472
+ temperature: Sampling temperature between 0 and 2. Higher values make output more random,
473
+ lower more focused.
474
+
475
+ top_p: Alternative to temperature. Consider only tokens with top_p probability mass.
476
+ Range 0-1.
477
+
478
+ user: A unique identifier representing your end-user, which can help OpenAI monitor
479
+ and detect abuse.
480
+
481
+ extra_headers: Send extra headers
482
+
483
+ extra_query: Add additional query parameters to the request
484
+
485
+ extra_body: Add additional JSON properties to the request
486
+
487
+ timeout: Override the client-level default timeout for this request, in seconds
488
+ """
489
+ ...
490
+
491
+ @overload
492
+ async def create(
493
+ self,
494
+ *,
495
+ model: str,
496
+ prompt: Union[str, List[str]],
497
+ stream: Literal[True],
498
+ best_of: int | NotGiven = NOT_GIVEN,
499
+ echo: bool | NotGiven = NOT_GIVEN,
500
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
501
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
502
+ logprobs: int | NotGiven = NOT_GIVEN,
503
+ max_tokens: int | NotGiven = NOT_GIVEN,
504
+ n: int | NotGiven = NOT_GIVEN,
505
+ presence_penalty: float | NotGiven = NOT_GIVEN,
506
+ seed: int | NotGiven = NOT_GIVEN,
507
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
508
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
509
+ suffix: str | NotGiven = NOT_GIVEN,
510
+ temperature: float | NotGiven = NOT_GIVEN,
511
+ top_p: float | NotGiven = NOT_GIVEN,
512
+ user: str | NotGiven = NOT_GIVEN,
513
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
514
+ # The extra values given here take precedence over values defined on the client or passed to this method.
515
+ extra_headers: Headers | None = None,
516
+ extra_query: Query | None = None,
517
+ extra_body: Body | None = None,
518
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
519
+ ) -> AsyncStream[Completion]:
520
+ """
521
+ Completions
522
+
523
+ Args:
524
+ model: model specified as `model_vendor/model`, for example `openai/gpt-4o`
525
+
526
+ prompt: The prompt to generate completions for, encoded as a string
527
+
528
+ stream: Whether to stream back partial progress. If set, tokens will be sent as
529
+ data-only server-sent events.
530
+
531
+ best_of: Generates best_of completions server-side and returns the best one. Must be
532
+ greater than n when used together.
533
+
534
+ echo: Echo back the prompt in addition to the completion
535
+
536
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
537
+ existing frequency in the text.
538
+
539
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion. Maps
540
+ tokens to bias values from -100 to 100.
541
+
542
+ logprobs: Include log probabilities of the most likely tokens. Maximum value is 5.
543
+
544
+ max_tokens: The maximum number of tokens that can be generated in the completion.
545
+
546
+ n: How many completions to generate for each prompt.
547
+
548
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
549
+ presence in the text so far.
550
+
551
+ seed: If specified, attempts to generate deterministic samples. Determinism is not
552
+ guaranteed.
553
+
554
+ stop: Up to 4 sequences where the API will stop generating further tokens.
555
+
556
+ stream_options: Options for streaming response. Only set this when stream is True.
557
+
558
+ suffix: The suffix that comes after a completion of inserted text. Only supported for
559
+ gpt-3.5-turbo-instruct.
560
+
561
+ temperature: Sampling temperature between 0 and 2. Higher values make output more random,
562
+ lower more focused.
563
+
564
+ top_p: Alternative to temperature. Consider only tokens with top_p probability mass.
565
+ Range 0-1.
566
+
567
+ user: A unique identifier representing your end-user, which can help OpenAI monitor
568
+ and detect abuse.
569
+
570
+ extra_headers: Send extra headers
571
+
572
+ extra_query: Add additional query parameters to the request
573
+
574
+ extra_body: Add additional JSON properties to the request
575
+
576
+ timeout: Override the client-level default timeout for this request, in seconds
577
+ """
578
+ ...
579
+
580
+ @overload
581
+ async def create(
582
+ self,
583
+ *,
584
+ model: str,
585
+ prompt: Union[str, List[str]],
586
+ stream: bool,
587
+ best_of: int | NotGiven = NOT_GIVEN,
588
+ echo: bool | NotGiven = NOT_GIVEN,
589
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
590
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
591
+ logprobs: int | NotGiven = NOT_GIVEN,
592
+ max_tokens: int | NotGiven = NOT_GIVEN,
593
+ n: int | NotGiven = NOT_GIVEN,
594
+ presence_penalty: float | NotGiven = NOT_GIVEN,
595
+ seed: int | NotGiven = NOT_GIVEN,
596
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
597
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
598
+ suffix: str | NotGiven = NOT_GIVEN,
599
+ temperature: float | NotGiven = NOT_GIVEN,
600
+ top_p: float | NotGiven = NOT_GIVEN,
601
+ user: str | NotGiven = NOT_GIVEN,
602
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
603
+ # The extra values given here take precedence over values defined on the client or passed to this method.
604
+ extra_headers: Headers | None = None,
605
+ extra_query: Query | None = None,
606
+ extra_body: Body | None = None,
607
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
608
+ ) -> Completion | AsyncStream[Completion]:
609
+ """
610
+ Completions
611
+
612
+ Args:
613
+ model: model specified as `model_vendor/model`, for example `openai/gpt-4o`
614
+
615
+ prompt: The prompt to generate completions for, encoded as a string
616
+
617
+ stream: Whether to stream back partial progress. If set, tokens will be sent as
618
+ data-only server-sent events.
619
+
620
+ best_of: Generates best_of completions server-side and returns the best one. Must be
621
+ greater than n when used together.
622
+
623
+ echo: Echo back the prompt in addition to the completion
624
+
625
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
626
+ existing frequency in the text.
627
+
628
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion. Maps
629
+ tokens to bias values from -100 to 100.
630
+
631
+ logprobs: Include log probabilities of the most likely tokens. Maximum value is 5.
632
+
633
+ max_tokens: The maximum number of tokens that can be generated in the completion.
634
+
635
+ n: How many completions to generate for each prompt.
636
+
637
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
638
+ presence in the text so far.
639
+
640
+ seed: If specified, attempts to generate deterministic samples. Determinism is not
641
+ guaranteed.
642
+
643
+ stop: Up to 4 sequences where the API will stop generating further tokens.
644
+
645
+ stream_options: Options for streaming response. Only set this when stream is True.
646
+
647
+ suffix: The suffix that comes after a completion of inserted text. Only supported for
648
+ gpt-3.5-turbo-instruct.
649
+
650
+ temperature: Sampling temperature between 0 and 2. Higher values make output more random,
651
+ lower more focused.
652
+
653
+ top_p: Alternative to temperature. Consider only tokens with top_p probability mass.
654
+ Range 0-1.
655
+
656
+ user: A unique identifier representing your end-user, which can help OpenAI monitor
657
+ and detect abuse.
658
+
659
+ extra_headers: Send extra headers
660
+
661
+ extra_query: Add additional query parameters to the request
662
+
663
+ extra_body: Add additional JSON properties to the request
664
+
665
+ timeout: Override the client-level default timeout for this request, in seconds
666
+ """
667
+ ...
668
+
669
+ @required_args(["model", "prompt"], ["model", "prompt", "stream"])
670
+ async def create(
671
+ self,
672
+ *,
673
+ model: str,
674
+ prompt: Union[str, List[str]],
675
+ best_of: int | NotGiven = NOT_GIVEN,
676
+ echo: bool | NotGiven = NOT_GIVEN,
677
+ frequency_penalty: float | NotGiven = NOT_GIVEN,
678
+ logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
679
+ logprobs: int | NotGiven = NOT_GIVEN,
680
+ max_tokens: int | NotGiven = NOT_GIVEN,
681
+ n: int | NotGiven = NOT_GIVEN,
682
+ presence_penalty: float | NotGiven = NOT_GIVEN,
683
+ seed: int | NotGiven = NOT_GIVEN,
684
+ stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
685
+ stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
686
+ stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
687
+ suffix: str | NotGiven = NOT_GIVEN,
688
+ temperature: float | NotGiven = NOT_GIVEN,
689
+ top_p: float | NotGiven = NOT_GIVEN,
690
+ user: str | NotGiven = NOT_GIVEN,
691
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
692
+ # The extra values given here take precedence over values defined on the client or passed to this method.
693
+ extra_headers: Headers | None = None,
694
+ extra_query: Query | None = None,
695
+ extra_body: Body | None = None,
696
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
697
+ ) -> Completion | AsyncStream[Completion]:
698
+ return await self._post(
699
+ "/v5/completions",
700
+ body=await async_maybe_transform(
701
+ {
702
+ "model": model,
703
+ "prompt": prompt,
704
+ "best_of": best_of,
705
+ "echo": echo,
706
+ "frequency_penalty": frequency_penalty,
707
+ "logit_bias": logit_bias,
708
+ "logprobs": logprobs,
709
+ "max_tokens": max_tokens,
710
+ "n": n,
711
+ "presence_penalty": presence_penalty,
712
+ "seed": seed,
713
+ "stop": stop,
714
+ "stream": stream,
715
+ "stream_options": stream_options,
716
+ "suffix": suffix,
717
+ "temperature": temperature,
718
+ "top_p": top_p,
719
+ "user": user,
720
+ },
721
+ completion_create_params.CompletionCreateParams,
722
+ ),
723
+ options=make_request_options(
724
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
725
+ ),
726
+ cast_to=Completion,
727
+ stream=stream or False,
728
+ stream_cls=AsyncStream[Completion],
729
+ )
730
+
731
+
732
+ class CompletionsResourceWithRawResponse:
733
+ def __init__(self, completions: CompletionsResource) -> None:
734
+ self._completions = completions
735
+
736
+ self.create = to_raw_response_wrapper(
737
+ completions.create,
738
+ )
739
+
740
+
741
+ class AsyncCompletionsResourceWithRawResponse:
742
+ def __init__(self, completions: AsyncCompletionsResource) -> None:
743
+ self._completions = completions
744
+
745
+ self.create = async_to_raw_response_wrapper(
746
+ completions.create,
747
+ )
748
+
749
+
750
+ class CompletionsResourceWithStreamingResponse:
751
+ def __init__(self, completions: CompletionsResource) -> None:
752
+ self._completions = completions
753
+
754
+ self.create = to_streamed_response_wrapper(
755
+ completions.create,
756
+ )
757
+
758
+
759
+ class AsyncCompletionsResourceWithStreamingResponse:
760
+ def __init__(self, completions: AsyncCompletionsResource) -> None:
761
+ self._completions = completions
762
+
763
+ self.create = async_to_streamed_response_wrapper(
764
+ completions.create,
765
+ )