frogml-core 0.0.113__py3-none-any.whl → 0.0.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. frogml_core/__init__.py +1 -1
  2. frogml_core/clients/administration/authentication/client.py +2 -2
  3. frogml_core/clients/batch_job_management/client.py +4 -4
  4. frogml_core/clients/build_orchestrator/build_model_request_getter.py +6 -6
  5. frogml_core/clients/build_orchestrator/client.py +12 -12
  6. frogml_core/clients/build_orchestrator/internal_client.py +10 -10
  7. frogml_core/frogml_client/build_api_helpers/build_api_steps.py +3 -3
  8. frogml_core/inner/build_logic/constants/upload_tag.py +7 -7
  9. frogml_core/inner/build_logic/interface/context_interface.py +1 -1
  10. frogml_core/inner/build_logic/phases/phase_010_fetch_model/fetch_strategy_manager/strategy/strategy.py +4 -4
  11. frogml_core/inner/build_logic/phases/phase_010_fetch_model/set_version_step.py +3 -3
  12. frogml_core/inner/build_logic/phases/phase_020_remote_register_frogml_build/start_remote_build_step.py +3 -3
  13. frogml_core/inner/build_logic/phases/phase_020_remote_register_frogml_build/upload_step.py +11 -9
  14. frogml_core/inner/build_logic/tools/ignore_files.py +3 -3
  15. frogml_core/inner/di_configuration/__init__.py +0 -6
  16. frogml_core/model/adapters/__init__.py +1 -1
  17. frogml_core/model/analytics_logging.py +1 -1
  18. frogml_core/model/tools/adapters/input.py +6 -6
  19. frogml_core/model/tools/adapters/output.py +8 -8
  20. frogml_core/model/tools/run_model_locally.py +2 -2
  21. frogml_core/model/utils/feature_utils.py +1 -1
  22. {frogml_core-0.0.113.dist-info → frogml_core-0.0.115.dist-info}/METADATA +1 -1
  23. {frogml_core-0.0.113.dist-info → frogml_core-0.0.115.dist-info}/RECORD +30 -137
  24. frogml_services_mock/mocks/analytics_api.py +6 -6
  25. frogml_services_mock/mocks/ecosystem_service_api.py +2 -2
  26. frogml_services_mock/mocks/frogml_mocks.py +0 -11
  27. frogml_services_mock/services_mock.py +4 -52
  28. frogml_storage/__init__.py +1 -1
  29. frogml_core/clients/prompt_manager/__init__.py +0 -0
  30. frogml_core/clients/prompt_manager/model_descriptor_mapper.py +0 -196
  31. frogml_core/clients/prompt_manager/prompt_manager_client.py +0 -190
  32. frogml_core/clients/prompt_manager/prompt_proto_mapper.py +0 -264
  33. frogml_core/clients/vector_store/__init__.py +0 -2
  34. frogml_core/clients/vector_store/management_client.py +0 -127
  35. frogml_core/clients/vector_store/serving_client.py +0 -157
  36. frogml_core/clients/workspace_manager/__init__.py +0 -1
  37. frogml_core/clients/workspace_manager/client.py +0 -224
  38. frogml_core/llmops/__init__.py +0 -0
  39. frogml_core/llmops/generation/__init__.py +0 -0
  40. frogml_core/llmops/generation/_steaming.py +0 -78
  41. frogml_core/llmops/generation/base.py +0 -5
  42. frogml_core/llmops/generation/chat/__init__.py +0 -0
  43. frogml_core/llmops/generation/chat/openai/LICENSE.txt +0 -201
  44. frogml_core/llmops/generation/chat/openai/types/__init__.py +0 -0
  45. frogml_core/llmops/generation/chat/openai/types/chat/__init__.py +0 -0
  46. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion.py +0 -88
  47. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_assistant_message_param.py +0 -65
  48. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_chunk.py +0 -153
  49. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_content_part_text_param.py +0 -28
  50. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_function_call_option_param.py +0 -25
  51. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_function_message_param.py +0 -33
  52. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message.py +0 -56
  53. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message_param.py +0 -34
  54. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message_tool_call.py +0 -46
  55. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_message_tool_call_param.py +0 -44
  56. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_named_tool_choice_param.py +0 -32
  57. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_role.py +0 -20
  58. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_system_message_param.py +0 -35
  59. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_token_logprob.py +0 -71
  60. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_tool_choice_option_param.py +0 -28
  61. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_tool_message_param.py +0 -31
  62. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_tool_param.py +0 -29
  63. frogml_core/llmops/generation/chat/openai/types/chat/chat_completion_user_message_param.py +0 -35
  64. frogml_core/llmops/generation/chat/openai/types/chat/completion_create_params.py +0 -279
  65. frogml_core/llmops/generation/chat/openai/types/completion_choice.py +0 -47
  66. frogml_core/llmops/generation/chat/openai/types/completion_create_params.py +0 -209
  67. frogml_core/llmops/generation/chat/openai/types/completion_usage.py +0 -30
  68. frogml_core/llmops/generation/chat/openai/types/model.py +0 -35
  69. frogml_core/llmops/generation/chat/openai/types/shared/__init__.py +0 -3
  70. frogml_core/llmops/generation/chat/openai/types/shared/error_object.py +0 -27
  71. frogml_core/llmops/generation/chat/openai/types/shared/function_definition.py +0 -49
  72. frogml_core/llmops/generation/chat/openai/types/shared/function_parameters.py +0 -20
  73. frogml_core/llmops/generation/chat/openai/types/shared_params/__init__.py +0 -2
  74. frogml_core/llmops/generation/chat/openai/types/shared_params/function_definition.py +0 -49
  75. frogml_core/llmops/generation/chat/openai/types/shared_params/function_parameters.py +0 -22
  76. frogml_core/llmops/generation/streaming.py +0 -26
  77. frogml_core/llmops/model/__init__.py +0 -0
  78. frogml_core/llmops/model/descriptor.py +0 -40
  79. frogml_core/llmops/prompt/__init__.py +0 -0
  80. frogml_core/llmops/prompt/base.py +0 -136
  81. frogml_core/llmops/prompt/chat/__init__.py +0 -0
  82. frogml_core/llmops/prompt/chat/message.py +0 -24
  83. frogml_core/llmops/prompt/chat/template.py +0 -113
  84. frogml_core/llmops/prompt/chat/value.py +0 -10
  85. frogml_core/llmops/prompt/manager.py +0 -138
  86. frogml_core/llmops/prompt/template.py +0 -24
  87. frogml_core/llmops/prompt/value.py +0 -14
  88. frogml_core/llmops/provider/__init__.py +0 -0
  89. frogml_core/llmops/provider/chat.py +0 -44
  90. frogml_core/llmops/provider/openai/__init__.py +0 -0
  91. frogml_core/llmops/provider/openai/client.py +0 -126
  92. frogml_core/llmops/provider/openai/provider.py +0 -93
  93. frogml_core/vector_store/__init__.py +0 -4
  94. frogml_core/vector_store/client.py +0 -151
  95. frogml_core/vector_store/collection.py +0 -429
  96. frogml_core/vector_store/filters.py +0 -359
  97. frogml_core/vector_store/inference_client.py +0 -105
  98. frogml_core/vector_store/rest_helpers.py +0 -81
  99. frogml_core/vector_store/utils/__init__.py +0 -0
  100. frogml_core/vector_store/utils/filter_utils.py +0 -23
  101. frogml_core/vector_store/utils/upsert_utils.py +0 -218
  102. frogml_proto/qwak/prompt/v1/prompt/prompt_manager_service_pb2.py +0 -77
  103. frogml_proto/qwak/prompt/v1/prompt/prompt_manager_service_pb2.pyi +0 -417
  104. frogml_proto/qwak/prompt/v1/prompt/prompt_manager_service_pb2_grpc.py +0 -441
  105. frogml_proto/qwak/prompt/v1/prompt/prompt_pb2.py +0 -69
  106. frogml_proto/qwak/prompt/v1/prompt/prompt_pb2.pyi +0 -415
  107. frogml_proto/qwak/prompt/v1/prompt/prompt_pb2_grpc.py +0 -4
  108. frogml_proto/qwak/vectors/v1/collection/collection_pb2.py +0 -46
  109. frogml_proto/qwak/vectors/v1/collection/collection_pb2.pyi +0 -287
  110. frogml_proto/qwak/vectors/v1/collection/collection_pb2_grpc.py +0 -4
  111. frogml_proto/qwak/vectors/v1/collection/collection_service_pb2.py +0 -60
  112. frogml_proto/qwak/vectors/v1/collection/collection_service_pb2.pyi +0 -258
  113. frogml_proto/qwak/vectors/v1/collection/collection_service_pb2_grpc.py +0 -304
  114. frogml_proto/qwak/vectors/v1/collection/event/collection_event_pb2.py +0 -28
  115. frogml_proto/qwak/vectors/v1/collection/event/collection_event_pb2.pyi +0 -41
  116. frogml_proto/qwak/vectors/v1/collection/event/collection_event_pb2_grpc.py +0 -4
  117. frogml_proto/qwak/vectors/v1/filters_pb2.py +0 -52
  118. frogml_proto/qwak/vectors/v1/filters_pb2.pyi +0 -297
  119. frogml_proto/qwak/vectors/v1/filters_pb2_grpc.py +0 -4
  120. frogml_proto/qwak/vectors/v1/vector_pb2.py +0 -38
  121. frogml_proto/qwak/vectors/v1/vector_pb2.pyi +0 -142
  122. frogml_proto/qwak/vectors/v1/vector_pb2_grpc.py +0 -4
  123. frogml_proto/qwak/vectors/v1/vector_service_pb2.py +0 -53
  124. frogml_proto/qwak/vectors/v1/vector_service_pb2.pyi +0 -243
  125. frogml_proto/qwak/vectors/v1/vector_service_pb2_grpc.py +0 -201
  126. frogml_proto/qwak/workspace/workspace_pb2.py +0 -50
  127. frogml_proto/qwak/workspace/workspace_pb2.pyi +0 -331
  128. frogml_proto/qwak/workspace/workspace_pb2_grpc.py +0 -4
  129. frogml_proto/qwak/workspace/workspace_service_pb2.py +0 -84
  130. frogml_proto/qwak/workspace/workspace_service_pb2.pyi +0 -393
  131. frogml_proto/qwak/workspace/workspace_service_pb2_grpc.py +0 -507
  132. frogml_services_mock/mocks/prompt_manager_service.py +0 -281
  133. frogml_services_mock/mocks/vector_serving_api.py +0 -159
  134. frogml_services_mock/mocks/vectors_management_api.py +0 -97
  135. frogml_services_mock/mocks/workspace_manager_service_mock.py +0 -202
  136. /frogml_core/model/adapters/output_adapters/{qwak_with_default_fallback.py → frogml_with_default_fallback.py} +0 -0
  137. {frogml_core-0.0.113.dist-info → frogml_core-0.0.115.dist-info}/WHEEL +0 -0
@@ -1,279 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from __future__ import annotations
17
-
18
- from dataclasses import dataclass
19
- from typing import Dict, Iterable, List, Optional, Union
20
-
21
- from typing_extensions import Literal, Required, TypedDict
22
-
23
- from ...types import shared_params
24
- from .chat_completion_function_call_option_param import (
25
- ChatCompletionFunctionCallOptionParam,
26
- )
27
- from .chat_completion_message_param import ChatCompletionMessageParam
28
- from .chat_completion_tool_choice_option_param import (
29
- ChatCompletionToolChoiceOptionParam,
30
- )
31
- from .chat_completion_tool_param import ChatCompletionToolParam
32
-
33
- __all__ = [
34
- "CompletionCreateParamsBase",
35
- "FunctionCall",
36
- "Function",
37
- "ResponseFormat",
38
- "CompletionCreateParamsNonStreaming",
39
- "CompletionCreateParamsStreaming",
40
- ]
41
-
42
-
43
- class CompletionCreateParamsBase(TypedDict, total=False):
44
- messages: Required[Iterable[ChatCompletionMessageParam]]
45
- """A list of messages comprising the conversation so far.
46
-
47
- [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
48
- """
49
-
50
- model: Required[str]
51
- """ID of the model to use.
52
-
53
- See the
54
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
55
- table for details on which models work with the Chat API.
56
- """
57
-
58
- frequency_penalty: Optional[float]
59
- """Number between -2.0 and 2.0.
60
-
61
- Positive values penalize new tokens based on their existing frequency in the
62
- text so far, decreasing the model's likelihood to repeat the same line verbatim.
63
-
64
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
65
- """
66
-
67
- function_call: FunctionCall
68
- """Deprecated in favor of `tool_choice`.
69
-
70
- Controls which (if any) function is called by the model. `none` means the model
71
- will not call a function and instead generates a message. `auto` means the model
72
- can pick between generating a message or calling a function. Specifying a
73
- particular function via `{"name": "my_function"}` forces the model to call that
74
- function.
75
-
76
- `none` is the default when no functions are present. `auto` is the default if
77
- functions are present.
78
- """
79
-
80
- functions: Iterable[Function]
81
- """Deprecated in favor of `tools`.
82
-
83
- A list of functions the model may generate JSON inputs for.
84
- """
85
-
86
- logit_bias: Optional[Dict[str, int]]
87
- """Modify the likelihood of specified tokens appearing in the completion.
88
-
89
- Accepts a JSON object that maps tokens (specified by their token ID in the
90
- tokenizer) to an associated bias value from -100 to 100. Mathematically, the
91
- bias is added to the logits generated by the model prior to sampling. The exact
92
- effect will vary per model, but values between -1 and 1 should decrease or
93
- increase likelihood of selection; values like -100 or 100 should result in a ban
94
- or exclusive selection of the relevant token.
95
- """
96
-
97
- logprobs: Optional[bool]
98
- """Whether to return log probabilities of the output tokens or not.
99
-
100
- If true, returns the log probabilities of each output token returned in the
101
- `content` of `message`.
102
- """
103
-
104
- max_tokens: Optional[int]
105
- """
106
- The maximum number of [tokens](/tokenizer) that can be generated in the chat
107
- completion.
108
-
109
- The total length of input tokens and generated tokens is limited by the model's
110
- context length.
111
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
112
- for counting tokens.
113
- """
114
-
115
- n: Optional[int]
116
- """How many chat completion choices to generate for each input message.
117
-
118
- Note that you will be charged based on the number of generated tokens across all
119
- of the choices. Keep `n` as `1` to minimize costs.
120
- """
121
-
122
- presence_penalty: Optional[float]
123
- """Number between -2.0 and 2.0.
124
-
125
- Positive values penalize new tokens based on whether they appear in the text so
126
- far, increasing the model's likelihood to talk about new topics.
127
-
128
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
129
- """
130
-
131
- response_format: ResponseFormat
132
- """An object specifying the format that the model must output.
133
-
134
- Compatible with
135
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
136
- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
137
-
138
- Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
139
- message the model generates is valid JSON.
140
-
141
- **Important:** when using JSON mode, you **must** also instruct the model to
142
- produce JSON yourself via a system or user message. Without this, the model may
143
- generate an unending stream of whitespace until the generation reaches the token
144
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
145
- the message content may be partially cut off if `finish_reason="length"`, which
146
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
147
- max context length.
148
- """
149
-
150
- seed: Optional[int]
151
- """
152
- This feature is in Beta. If specified, our system will make a best effort to
153
- sample deterministically, such that repeated requests with the same `seed` and
154
- parameters should return the same result. Determinism is not guaranteed, and you
155
- should refer to the `system_fingerprint` response parameter to monitor changes
156
- in the backend.
157
- """
158
-
159
- stop: Union[Optional[str], List[str]]
160
- """Up to 4 sequences where the API will stop generating further tokens."""
161
-
162
- temperature: Optional[float]
163
- """What sampling temperature to use, between 0 and 2.
164
-
165
- Higher values like 0.8 will make the output more random, while lower values like
166
- 0.2 will make it more focused and deterministic.
167
-
168
- We generally recommend altering this or `top_p` but not both.
169
- """
170
-
171
- tool_choice: ChatCompletionToolChoiceOptionParam
172
- """
173
- Controls which (if any) function is called by the model. `none` means the model
174
- will not call a function and instead generates a message. `auto` means the model
175
- can pick between generating a message or calling a function. Specifying a
176
- particular function via
177
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
178
- call that function.
179
-
180
- `none` is the default when no functions are present. `auto` is the default if
181
- functions are present.
182
- """
183
-
184
- tools: Iterable[ChatCompletionToolParam]
185
- """A list of tools the model may call.
186
-
187
- Currently, only functions are supported as a tool. Use this to provide a list of
188
- functions the model may generate JSON inputs for. A max of 128 functions are
189
- supported.
190
- """
191
-
192
- top_logprobs: Optional[int]
193
- """
194
- An integer between 0 and 20 specifying the number of most likely tokens to
195
- return at each token position, each with an associated log probability.
196
- `logprobs` must be set to `true` if this parameter is used.
197
- """
198
-
199
- top_p: Optional[float]
200
- """
201
- An alternative to sampling with temperature, called nucleus sampling, where the
202
- model considers the results of the tokens with top_p probability mass. So 0.1
203
- means only the tokens comprising the top 10% probability mass are considered.
204
-
205
- We generally recommend altering this or `temperature` but not both.
206
- """
207
-
208
- user: str
209
- """
210
- A unique identifier representing your end-user, which can help OpenAI to monitor
211
- and detect abuse.
212
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
213
- """
214
-
215
-
216
- FunctionCall = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam]
217
-
218
-
219
- class Function(TypedDict, total=False):
220
- name: Required[str]
221
- """The name of the function to be called.
222
-
223
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
224
- of 64.
225
- """
226
-
227
- description: str
228
- """
229
- A description of what the function does, used by the model to choose when and
230
- how to call the function.
231
- """
232
-
233
- parameters: shared_params.FunctionParameters
234
- """The parameters the functions accepts, described as a JSON Schema object.
235
-
236
- See the
237
- [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
238
- for examples, and the
239
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
240
- documentation about the format.
241
-
242
- Omitting `parameters` defines a function with an empty parameter list.
243
- """
244
-
245
-
246
- class ResponseFormat(TypedDict, total=False):
247
- type: Literal["text", "json_object"]
248
- """Must be one of `text` or `json_object`."""
249
-
250
-
251
- @dataclass
252
- class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
253
- stream: Optional[Literal[False]]
254
- """If set, partial message deltas will be sent, like in ChatGPT.
255
-
256
- Tokens will be sent as data-only
257
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
258
- as they become available, with the stream terminated by a `data: [DONE]`
259
- message.
260
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
261
- """
262
-
263
-
264
- @dataclass
265
- class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
266
- stream: Required[Literal[True]]
267
- """If set, partial message deltas will be sent, like in ChatGPT.
268
-
269
- Tokens will be sent as data-only
270
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
271
- as they become available, with the stream terminated by a `data: [DONE]`
272
- message.
273
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
274
- """
275
-
276
-
277
- CompletionCreateParams = Union[
278
- CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming
279
- ]
@@ -1,47 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from dataclasses import dataclass
17
- from typing import Dict, List, Optional
18
-
19
- from typing_extensions import Literal
20
-
21
- __all__ = ["CompletionChoice", "Logprobs"]
22
-
23
-
24
- @dataclass
25
- class Logprobs:
26
- text_offset: Optional[List[int]] = None
27
-
28
- token_logprobs: Optional[List[float]] = None
29
-
30
- tokens: Optional[List[str]] = None
31
-
32
- top_logprobs: Optional[List[Dict[str, float]]] = None
33
-
34
-
35
- @dataclass
36
- class CompletionChoice:
37
- finish_reason: Literal["stop", "length", "content_filter"]
38
- """The reason the model stopped generating tokens.
39
-
40
- This will be `stop` if the model hit a natural stop point or a provided stop
41
- sequence, `length` if the maximum number of tokens specified in the request was
42
- reached, or `content_filter` if content was omitted due to a flag from our
43
- content filters.
44
- """
45
- index: int
46
- text: str
47
- logprobs: Optional[Logprobs] = None
@@ -1,209 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from __future__ import annotations
17
-
18
- from dataclasses import dataclass
19
- from typing import Dict, Iterable, List, Optional, Union
20
-
21
- from typing_extensions import Literal, Required, TypedDict
22
-
23
- __all__ = [
24
- "CompletionCreateParamsBase",
25
- "CompletionCreateParamsNonStreaming",
26
- "CompletionCreateParamsStreaming",
27
- ]
28
-
29
-
30
- class CompletionCreateParamsBase(TypedDict, total=False):
31
- model: Required[
32
- Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]
33
- ]
34
- """ID of the model to use.
35
-
36
- You can use the
37
- [List models](https://platform.openai.com/docs/api-reference/models/list) API to
38
- see all of your available models, or see our
39
- [Model overview](https://platform.openai.com/docs/models/overview) for
40
- descriptions of them.
41
- """
42
-
43
- prompt: Required[
44
- Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]
45
- ]
46
- """
47
- The prompt(s) to generate completions for, encoded as a string, array of
48
- strings, array of tokens, or array of token arrays.
49
-
50
- Note that <|endoftext|> is the document separator that the model sees during
51
- training, so if a prompt is not specified the model will generate as if from the
52
- beginning of a new document.
53
- """
54
-
55
- best_of: Optional[int]
56
- """
57
- Generates `best_of` completions server-side and returns the "best" (the one with
58
- the highest log probability per token). Results cannot be streamed.
59
-
60
- When used with `n`, `best_of` controls the number of candidate completions and
61
- `n` specifies how many to return – `best_of` must be greater than `n`.
62
-
63
- **Note:** Because this parameter generates many completions, it can quickly
64
- consume your token quota. Use carefully and ensure that you have reasonable
65
- settings for `max_tokens` and `stop`.
66
- """
67
-
68
- echo: Optional[bool]
69
- """Echo back the prompt in addition to the completion"""
70
-
71
- frequency_penalty: Optional[float]
72
- """Number between -2.0 and 2.0.
73
-
74
- Positive values penalize new tokens based on their existing frequency in the
75
- text so far, decreasing the model's likelihood to repeat the same line verbatim.
76
-
77
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
78
- """
79
-
80
- logit_bias: Optional[Dict[str, int]]
81
- """Modify the likelihood of specified tokens appearing in the completion.
82
-
83
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
84
- tokenizer) to an associated bias value from -100 to 100. You can use this
85
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
86
- Mathematically, the bias is added to the logits generated by the model prior to
87
- sampling. The exact effect will vary per model, but values between -1 and 1
88
- should decrease or increase likelihood of selection; values like -100 or 100
89
- should result in a ban or exclusive selection of the relevant token.
90
-
91
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
92
- from being generated.
93
- """
94
-
95
- logprobs: Optional[int]
96
- """
97
- Include the log probabilities on the `logprobs` most likely output tokens, as
98
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
99
- list of the 5 most likely tokens. The API will always return the `logprob` of
100
- the sampled token, so there may be up to `logprobs+1` elements in the response.
101
-
102
- The maximum value for `logprobs` is 5.
103
- """
104
-
105
- max_tokens: Optional[int]
106
- """
107
- The maximum number of [tokens](/tokenizer) that can be generated in the
108
- completion.
109
-
110
- The token count of your prompt plus `max_tokens` cannot exceed the model's
111
- context length.
112
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
113
- for counting tokens.
114
- """
115
-
116
- n: Optional[int]
117
- """How many completions to generate for each prompt.
118
-
119
- **Note:** Because this parameter generates many completions, it can quickly
120
- consume your token quota. Use carefully and ensure that you have reasonable
121
- settings for `max_tokens` and `stop`.
122
- """
123
-
124
- presence_penalty: Optional[float]
125
- """Number between -2.0 and 2.0.
126
-
127
- Positive values penalize new tokens based on whether they appear in the text so
128
- far, increasing the model's likelihood to talk about new topics.
129
-
130
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
131
- """
132
-
133
- seed: Optional[int]
134
- """
135
- If specified, our system will make a best effort to sample deterministically,
136
- such that repeated requests with the same `seed` and parameters should return
137
- the same result.
138
-
139
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
140
- response parameter to monitor changes in the backend.
141
- """
142
-
143
- stop: Union[Optional[str], List[str], None]
144
- """Up to 4 sequences where the API will stop generating further tokens.
145
-
146
- The returned text will not contain the stop sequence.
147
- """
148
-
149
- suffix: Optional[str]
150
- """The suffix that comes after a completion of inserted text.
151
-
152
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
153
- """
154
-
155
- temperature: Optional[float]
156
- """What sampling temperature to use, between 0 and 2.
157
-
158
- Higher values like 0.8 will make the output more random, while lower values like
159
- 0.2 will make it more focused and deterministic.
160
-
161
- We generally recommend altering this or `top_p` but not both.
162
- """
163
-
164
- top_p: Optional[float]
165
- """
166
- An alternative to sampling with temperature, called nucleus sampling, where the
167
- model considers the results of the tokens with top_p probability mass. So 0.1
168
- means only the tokens comprising the top 10% probability mass are considered.
169
-
170
- We generally recommend altering this or `temperature` but not both.
171
- """
172
-
173
- user: str
174
- """
175
- A unique identifier representing your end-user, which can help OpenAI to monitor
176
- and detect abuse.
177
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
178
- """
179
-
180
-
181
- @dataclass
182
- class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
183
- stream: Optional[Literal[False]]
184
- """Whether to stream back partial progress.
185
-
186
- If set, tokens will be sent as data-only
187
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
188
- as they become available, with the stream terminated by a `data: [DONE]`
189
- message.
190
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
191
- """
192
-
193
-
194
- @dataclass
195
- class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
196
- stream: Required[Literal[True]]
197
- """Whether to stream back partial progress.
198
-
199
- If set, tokens will be sent as data-only
200
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
201
- as they become available, with the stream terminated by a `data: [DONE]`
202
- message.
203
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
204
- """
205
-
206
-
207
- CompletionCreateParams = Union[
208
- CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming
209
- ]
@@ -1,30 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- __all__ = ["CompletionUsage"]
17
-
18
- from dataclasses import dataclass
19
-
20
-
21
- @dataclass
22
- class CompletionUsage:
23
- completion_tokens: int
24
- """Number of tokens in the generated completion."""
25
-
26
- prompt_tokens: int
27
- """Number of tokens in the prompt."""
28
-
29
- total_tokens: int
30
- """Total number of tokens used in the request (prompt + completion)."""
@@ -1,35 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from dataclasses import dataclass
17
-
18
- from typing_extensions import Literal
19
-
20
- __all__ = ["Model"]
21
-
22
-
23
- @dataclass
24
- class Model:
25
- id: str
26
- """The model identifier, which can be referenced in the API endpoints."""
27
-
28
- created: int
29
- """The Unix timestamp (in seconds) when the model was created."""
30
-
31
- object: Literal["model"]
32
- """The object type, which is always "model"."""
33
-
34
- owned_by: str
35
- """The organization that owns the model."""
@@ -1,3 +0,0 @@
1
- from .error_object import ErrorObject as ErrorObject
2
- from .function_definition import FunctionDefinition as FunctionDefinition
3
- from .function_parameters import FunctionParameters as FunctionParameters
@@ -1,27 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from dataclasses import dataclass
17
- from typing import Optional
18
-
19
- __all__ = ["ErrorObject"]
20
-
21
-
22
- @dataclass
23
- class ErrorObject:
24
- type: str
25
- message: str
26
- param: Optional[str] = None
27
- code: Optional[str] = None
@@ -1,49 +0,0 @@
1
- #
2
- # Copyright 2024 OpenAI
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from dataclasses import dataclass
17
- from typing import Optional
18
-
19
- from .function_parameters import FunctionParameters
20
-
21
- __all__ = ["FunctionDefinition"]
22
-
23
-
24
- @dataclass
25
- class FunctionDefinition:
26
- name: str
27
- """The name of the function to be called.
28
-
29
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
30
- of 64.
31
- """
32
-
33
- description: Optional[str] = None
34
- """
35
- A description of what the function does, used by the model to choose when and
36
- how to call the function.
37
- """
38
-
39
- parameters: Optional[FunctionParameters] = None
40
- """The parameters the functions accepts, described as a JSON Schema object.
41
-
42
- See the
43
- [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
44
- for examples, and the
45
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
46
- documentation about the format.
47
-
48
- Omitting `parameters` defines a function with an empty parameter list.
49
- """