c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python 0.1.0a7__py3-none-any.whl → 0.1.0a9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a7.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info}/METADATA +1 -1
- {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a7.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info}/RECORD +15 -13
- gradientai/_client.py +4 -0
- gradientai/_streaming.py +40 -3
- gradientai/_version.py +1 -1
- gradientai/resources/agents/chat/completions.py +530 -5
- gradientai/resources/chat/completions.py +532 -5
- gradientai/types/agents/chat/__init__.py +1 -0
- gradientai/types/agents/chat/agent_chat_completion_chunk.py +93 -0
- gradientai/types/agents/chat/completion_create_params.py +23 -8
- gradientai/types/chat/__init__.py +1 -0
- gradientai/types/chat/chat_completion_chunk.py +93 -0
- gradientai/types/chat/completion_create_params.py +23 -8
- {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a7.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info}/WHEEL +0 -0
- {c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a7.dist-info → c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,93 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from typing import List, Optional
|
4
|
+
from typing_extensions import Literal
|
5
|
+
|
6
|
+
from ...._models import BaseModel
|
7
|
+
from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
|
8
|
+
|
9
|
+
__all__ = ["AgentChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"]
|
10
|
+
|
11
|
+
|
12
|
+
class ChoiceDelta(BaseModel):
|
13
|
+
content: Optional[str] = None
|
14
|
+
"""The contents of the chunk message."""
|
15
|
+
|
16
|
+
refusal: Optional[str] = None
|
17
|
+
"""The refusal message generated by the model."""
|
18
|
+
|
19
|
+
role: Optional[Literal["developer", "user", "assistant"]] = None
|
20
|
+
"""The role of the author of this message."""
|
21
|
+
|
22
|
+
|
23
|
+
class ChoiceLogprobs(BaseModel):
|
24
|
+
content: Optional[List[ChatCompletionTokenLogprob]] = None
|
25
|
+
"""A list of message content tokens with log probability information."""
|
26
|
+
|
27
|
+
refusal: Optional[List[ChatCompletionTokenLogprob]] = None
|
28
|
+
"""A list of message refusal tokens with log probability information."""
|
29
|
+
|
30
|
+
|
31
|
+
class Choice(BaseModel):
|
32
|
+
delta: ChoiceDelta
|
33
|
+
"""A chat completion delta generated by streamed model responses."""
|
34
|
+
|
35
|
+
finish_reason: Optional[Literal["stop", "length"]] = None
|
36
|
+
"""The reason the model stopped generating tokens.
|
37
|
+
|
38
|
+
This will be `stop` if the model hit a natural stop point or a provided stop
|
39
|
+
sequence, or `length` if the maximum number of tokens specified in the request
|
40
|
+
was reached
|
41
|
+
"""
|
42
|
+
|
43
|
+
index: int
|
44
|
+
"""The index of the choice in the list of choices."""
|
45
|
+
|
46
|
+
logprobs: Optional[ChoiceLogprobs] = None
|
47
|
+
"""Log probability information for the choice."""
|
48
|
+
|
49
|
+
|
50
|
+
class Usage(BaseModel):
|
51
|
+
completion_tokens: int
|
52
|
+
"""Number of tokens in the generated completion."""
|
53
|
+
|
54
|
+
prompt_tokens: int
|
55
|
+
"""Number of tokens in the prompt."""
|
56
|
+
|
57
|
+
total_tokens: int
|
58
|
+
"""Total number of tokens used in the request (prompt + completion)."""
|
59
|
+
|
60
|
+
|
61
|
+
class AgentChatCompletionChunk(BaseModel):
|
62
|
+
id: str
|
63
|
+
"""A unique identifier for the chat completion. Each chunk has the same ID."""
|
64
|
+
|
65
|
+
choices: List[Choice]
|
66
|
+
"""A list of chat completion choices.
|
67
|
+
|
68
|
+
Can contain more than one elements if `n` is greater than 1. Can also be empty
|
69
|
+
for the last chunk if you set `stream_options: {"include_usage": true}`.
|
70
|
+
"""
|
71
|
+
|
72
|
+
created: int
|
73
|
+
"""The Unix timestamp (in seconds) of when the chat completion was created.
|
74
|
+
|
75
|
+
Each chunk has the same timestamp.
|
76
|
+
"""
|
77
|
+
|
78
|
+
model: str
|
79
|
+
"""The model to generate the completion."""
|
80
|
+
|
81
|
+
object: Literal["chat.completion.chunk"]
|
82
|
+
"""The object type, which is always `chat.completion.chunk`."""
|
83
|
+
|
84
|
+
usage: Optional[Usage] = None
|
85
|
+
"""
|
86
|
+
An optional field that will only be present when you set
|
87
|
+
`stream_options: {"include_usage": true}` in your request. When present, it
|
88
|
+
contains a null value **except for the last chunk** which contains the token
|
89
|
+
usage statistics for the entire request.
|
90
|
+
|
91
|
+
**NOTE:** If the stream is interrupted or cancelled, you may not receive the
|
92
|
+
final usage chunk which contains the total token usage for the request.
|
93
|
+
"""
|
@@ -6,17 +6,19 @@ from typing import Dict, List, Union, Iterable, Optional
|
|
6
6
|
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
7
7
|
|
8
8
|
__all__ = [
|
9
|
-
"
|
9
|
+
"CompletionCreateParamsBase",
|
10
10
|
"Message",
|
11
11
|
"MessageChatCompletionRequestSystemMessage",
|
12
12
|
"MessageChatCompletionRequestDeveloperMessage",
|
13
13
|
"MessageChatCompletionRequestUserMessage",
|
14
14
|
"MessageChatCompletionRequestAssistantMessage",
|
15
15
|
"StreamOptions",
|
16
|
+
"CompletionCreateParamsNonStreaming",
|
17
|
+
"CompletionCreateParamsStreaming",
|
16
18
|
]
|
17
19
|
|
18
20
|
|
19
|
-
class
|
21
|
+
class CompletionCreateParamsBase(TypedDict, total=False):
|
20
22
|
messages: Required[Iterable[Message]]
|
21
23
|
"""A list of messages comprising the conversation so far."""
|
22
24
|
|
@@ -92,12 +94,6 @@ class CompletionCreateParams(TypedDict, total=False):
|
|
92
94
|
The returned text will not contain the stop sequence.
|
93
95
|
"""
|
94
96
|
|
95
|
-
stream: Optional[bool]
|
96
|
-
"""
|
97
|
-
If set to true, the model response data will be streamed to the client as it is
|
98
|
-
generated using server-sent events.
|
99
|
-
"""
|
100
|
-
|
101
97
|
stream_options: Optional[StreamOptions]
|
102
98
|
"""Options for streaming response. Only set this when you set `stream: true`."""
|
103
99
|
|
@@ -183,3 +179,22 @@ class StreamOptions(TypedDict, total=False):
|
|
183
179
|
**NOTE:** If the stream is interrupted, you may not receive the final usage
|
184
180
|
chunk which contains the total token usage for the request.
|
185
181
|
"""
|
182
|
+
|
183
|
+
|
184
|
+
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
|
185
|
+
stream: Optional[Literal[False]]
|
186
|
+
"""
|
187
|
+
If set to true, the model response data will be streamed to the client as it is
|
188
|
+
generated using server-sent events.
|
189
|
+
"""
|
190
|
+
|
191
|
+
|
192
|
+
class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
|
193
|
+
stream: Required[Literal[True]]
|
194
|
+
"""
|
195
|
+
If set to true, the model response data will be streamed to the client as it is
|
196
|
+
generated using server-sent events.
|
197
|
+
"""
|
198
|
+
|
199
|
+
|
200
|
+
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
|
@@ -2,5 +2,6 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
|
+
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
|
5
6
|
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
6
7
|
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
|
@@ -0,0 +1,93 @@
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2
|
+
|
3
|
+
from typing import List, Optional
|
4
|
+
from typing_extensions import Literal
|
5
|
+
|
6
|
+
from ..._models import BaseModel
|
7
|
+
from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
|
8
|
+
|
9
|
+
__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"]
|
10
|
+
|
11
|
+
|
12
|
+
class ChoiceDelta(BaseModel):
|
13
|
+
content: Optional[str] = None
|
14
|
+
"""The contents of the chunk message."""
|
15
|
+
|
16
|
+
refusal: Optional[str] = None
|
17
|
+
"""The refusal message generated by the model."""
|
18
|
+
|
19
|
+
role: Optional[Literal["developer", "user", "assistant"]] = None
|
20
|
+
"""The role of the author of this message."""
|
21
|
+
|
22
|
+
|
23
|
+
class ChoiceLogprobs(BaseModel):
|
24
|
+
content: Optional[List[ChatCompletionTokenLogprob]] = None
|
25
|
+
"""A list of message content tokens with log probability information."""
|
26
|
+
|
27
|
+
refusal: Optional[List[ChatCompletionTokenLogprob]] = None
|
28
|
+
"""A list of message refusal tokens with log probability information."""
|
29
|
+
|
30
|
+
|
31
|
+
class Choice(BaseModel):
|
32
|
+
delta: ChoiceDelta
|
33
|
+
"""A chat completion delta generated by streamed model responses."""
|
34
|
+
|
35
|
+
finish_reason: Optional[Literal["stop", "length"]] = None
|
36
|
+
"""The reason the model stopped generating tokens.
|
37
|
+
|
38
|
+
This will be `stop` if the model hit a natural stop point or a provided stop
|
39
|
+
sequence, or `length` if the maximum number of tokens specified in the request
|
40
|
+
was reached
|
41
|
+
"""
|
42
|
+
|
43
|
+
index: int
|
44
|
+
"""The index of the choice in the list of choices."""
|
45
|
+
|
46
|
+
logprobs: Optional[ChoiceLogprobs] = None
|
47
|
+
"""Log probability information for the choice."""
|
48
|
+
|
49
|
+
|
50
|
+
class Usage(BaseModel):
|
51
|
+
completion_tokens: int
|
52
|
+
"""Number of tokens in the generated completion."""
|
53
|
+
|
54
|
+
prompt_tokens: int
|
55
|
+
"""Number of tokens in the prompt."""
|
56
|
+
|
57
|
+
total_tokens: int
|
58
|
+
"""Total number of tokens used in the request (prompt + completion)."""
|
59
|
+
|
60
|
+
|
61
|
+
class ChatCompletionChunk(BaseModel):
|
62
|
+
id: str
|
63
|
+
"""A unique identifier for the chat completion. Each chunk has the same ID."""
|
64
|
+
|
65
|
+
choices: List[Choice]
|
66
|
+
"""A list of chat completion choices.
|
67
|
+
|
68
|
+
Can contain more than one elements if `n` is greater than 1. Can also be empty
|
69
|
+
for the last chunk if you set `stream_options: {"include_usage": true}`.
|
70
|
+
"""
|
71
|
+
|
72
|
+
created: int
|
73
|
+
"""The Unix timestamp (in seconds) of when the chat completion was created.
|
74
|
+
|
75
|
+
Each chunk has the same timestamp.
|
76
|
+
"""
|
77
|
+
|
78
|
+
model: str
|
79
|
+
"""The model to generate the completion."""
|
80
|
+
|
81
|
+
object: Literal["chat.completion.chunk"]
|
82
|
+
"""The object type, which is always `chat.completion.chunk`."""
|
83
|
+
|
84
|
+
usage: Optional[Usage] = None
|
85
|
+
"""
|
86
|
+
An optional field that will only be present when you set
|
87
|
+
`stream_options: {"include_usage": true}` in your request. When present, it
|
88
|
+
contains a null value **except for the last chunk** which contains the token
|
89
|
+
usage statistics for the entire request.
|
90
|
+
|
91
|
+
**NOTE:** If the stream is interrupted or cancelled, you may not receive the
|
92
|
+
final usage chunk which contains the total token usage for the request.
|
93
|
+
"""
|
@@ -6,17 +6,19 @@ from typing import Dict, List, Union, Iterable, Optional
|
|
6
6
|
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
7
7
|
|
8
8
|
__all__ = [
|
9
|
-
"
|
9
|
+
"CompletionCreateParamsBase",
|
10
10
|
"Message",
|
11
11
|
"MessageChatCompletionRequestSystemMessage",
|
12
12
|
"MessageChatCompletionRequestDeveloperMessage",
|
13
13
|
"MessageChatCompletionRequestUserMessage",
|
14
14
|
"MessageChatCompletionRequestAssistantMessage",
|
15
15
|
"StreamOptions",
|
16
|
+
"CompletionCreateParamsNonStreaming",
|
17
|
+
"CompletionCreateParamsStreaming",
|
16
18
|
]
|
17
19
|
|
18
20
|
|
19
|
-
class
|
21
|
+
class CompletionCreateParamsBase(TypedDict, total=False):
|
20
22
|
messages: Required[Iterable[Message]]
|
21
23
|
"""A list of messages comprising the conversation so far."""
|
22
24
|
|
@@ -92,12 +94,6 @@ class CompletionCreateParams(TypedDict, total=False):
|
|
92
94
|
The returned text will not contain the stop sequence.
|
93
95
|
"""
|
94
96
|
|
95
|
-
stream: Optional[bool]
|
96
|
-
"""
|
97
|
-
If set to true, the model response data will be streamed to the client as it is
|
98
|
-
generated using server-sent events.
|
99
|
-
"""
|
100
|
-
|
101
97
|
stream_options: Optional[StreamOptions]
|
102
98
|
"""Options for streaming response. Only set this when you set `stream: true`."""
|
103
99
|
|
@@ -183,3 +179,22 @@ class StreamOptions(TypedDict, total=False):
|
|
183
179
|
**NOTE:** If the stream is interrupted, you may not receive the final usage
|
184
180
|
chunk which contains the total token usage for the request.
|
185
181
|
"""
|
182
|
+
|
183
|
+
|
184
|
+
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
|
185
|
+
stream: Optional[Literal[False]]
|
186
|
+
"""
|
187
|
+
If set to true, the model response data will be streamed to the client as it is
|
188
|
+
generated using server-sent events.
|
189
|
+
"""
|
190
|
+
|
191
|
+
|
192
|
+
class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
|
193
|
+
stream: Required[Literal[True]]
|
194
|
+
"""
|
195
|
+
If set to true, the model response data will be streamed to the client as it is
|
196
|
+
generated using server-sent events.
|
197
|
+
"""
|
198
|
+
|
199
|
+
|
200
|
+
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
|
File without changes
|