decart 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- decart-0.0.1/PKG-INFO +18 -0
- decart-0.0.1/decart/README.md +0 -0
- decart-0.0.1/decart/__init__.py +11 -0
- decart-0.0.1/decart/clients/base_client.py +86 -0
- decart-0.0.1/decart/clients/client.py +46 -0
- decart-0.0.1/decart/exceptions.py +2 -0
- decart-0.0.1/decart/resources/base_resource.py +75 -0
- decart-0.0.1/decart/resources/chat.py +13 -0
- decart-0.0.1/decart/resources/chat_completions.py +158 -0
- decart-0.0.1/decart/resources/completions.py +11 -0
- decart-0.0.1/decart/types/chat_completion.py +36 -0
- decart-0.0.1/decart/types/chat_completion_chunk.py +21 -0
- decart-0.0.1/decart/types/chat_completion_message.py +14 -0
- decart-0.0.1/decart/types/completion_usage.py +7 -0
- decart-0.0.1/decart/types/finish_reason.py +6 -0
- decart-0.0.1/decart/types/model.py +9 -0
- decart-0.0.1/decart/types/models.py +14 -0
- decart-0.0.1/pyproject.toml +22 -0
decart-0.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: decart
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary:
|
|
5
|
+
Author: Decart
|
|
6
|
+
Author-email: support@decart.ai
|
|
7
|
+
Requires-Python: >=3.8,<4.0
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Requires-Dist: httpx (>=0.25.2,<0.26.0)
|
|
14
|
+
Requires-Dist: httpx-sse (>=0.3.1,<0.4.0)
|
|
15
|
+
Requires-Dist: pydantic (>=2.5.2,<3.0.0)
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
|
|
18
|
+
|
|
File without changes
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from typing import AsyncIterator, Iterable, Mapping, TypeVar
|
|
2
|
+
import httpx
|
|
3
|
+
import httpx_sse
|
|
4
|
+
import pydantic
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
_ResponseT = TypeVar("_ResponseT", bound=pydantic.BaseModel)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class BaseClient:
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Mapping[str, str],
|
|
14
|
+
base_url: httpx.URL,
|
|
15
|
+
) -> None:
|
|
16
|
+
self._client = httpx.Client(
|
|
17
|
+
headers=headers,
|
|
18
|
+
base_url=base_url,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
def post(
|
|
22
|
+
self, url: httpx.URL, request: pydantic.BaseModel, cls: type[_ResponseT]
|
|
23
|
+
) -> _ResponseT:
|
|
24
|
+
response = self._client.post(
|
|
25
|
+
url,
|
|
26
|
+
json=request.model_dump(),
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
if response.status_code != httpx.codes.OK:
|
|
30
|
+
raise self._make_status_error(response)
|
|
31
|
+
|
|
32
|
+
return cls.model_validate(response.json())
|
|
33
|
+
|
|
34
|
+
def stream(
|
|
35
|
+
self, url: httpx.URL, request: pydantic.BaseModel, cls: type[_ResponseT]
|
|
36
|
+
) -> Iterable[_ResponseT]:
|
|
37
|
+
with httpx_sse.connect_sse(
|
|
38
|
+
self._client,
|
|
39
|
+
method="POST",
|
|
40
|
+
url=str(url),
|
|
41
|
+
json=request.model_dump(),
|
|
42
|
+
) as event_source:
|
|
43
|
+
for response in event_source.iter_sse():
|
|
44
|
+
yield cls.model_validate(response.json())
|
|
45
|
+
|
|
46
|
+
def _make_status_error(self, response: httpx.Response) -> Exception:
|
|
47
|
+
raise NotImplementedError()
|
|
48
|
+
|
|
49
|
+
class AsyncBaseClient:
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
headers: Mapping[str, str],
|
|
53
|
+
base_url: httpx.URL,
|
|
54
|
+
) -> None:
|
|
55
|
+
self._client = httpx.AsyncClient(
|
|
56
|
+
headers=headers,
|
|
57
|
+
base_url=base_url,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
async def post(
|
|
61
|
+
self, url: httpx.URL, request: pydantic.BaseModel, cls: type[_ResponseT]
|
|
62
|
+
) -> _ResponseT:
|
|
63
|
+
response = await self._client.post(
|
|
64
|
+
url,
|
|
65
|
+
json=request.model_dump(),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
if response.status_code != httpx.codes.OK:
|
|
69
|
+
raise self._make_status_error(response)
|
|
70
|
+
|
|
71
|
+
return cls.model_validate(response.json())
|
|
72
|
+
|
|
73
|
+
async def stream(
|
|
74
|
+
self, url: httpx.URL, request: pydantic.BaseModel, cls: type[_ResponseT]
|
|
75
|
+
) -> AsyncIterator[_ResponseT]:
|
|
76
|
+
async with httpx_sse.aconnect_sse(
|
|
77
|
+
self._client,
|
|
78
|
+
method="POST",
|
|
79
|
+
url=str(url),
|
|
80
|
+
json=request.model_dump(),
|
|
81
|
+
) as event_source:
|
|
82
|
+
async for response in event_source.aiter_sse():
|
|
83
|
+
yield cls.model_validate(response.json())
|
|
84
|
+
|
|
85
|
+
def _make_status_error(self, response: httpx.Response) -> Exception:
|
|
86
|
+
raise NotImplementedError()
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import httpx
|
|
3
|
+
from ..resources import chat
|
|
4
|
+
from ..resources import completions
|
|
5
|
+
from . import base_client
|
|
6
|
+
from .. import exceptions
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class _ClientMixin:
|
|
10
|
+
_base_url: httpx.URL
|
|
11
|
+
_key_variable: str
|
|
12
|
+
|
|
13
|
+
def _make_status_error(self, response: httpx.Response) -> exceptions.APIError:
|
|
14
|
+
# TODO(vova): based on code
|
|
15
|
+
return exceptions.APIError()
|
|
16
|
+
|
|
17
|
+
class Client(_ClientMixin, base_client.BaseClient):
|
|
18
|
+
def __init__(self, api_key: str | None = None) -> None:
|
|
19
|
+
if api_key is None:
|
|
20
|
+
api_key = os.getenv(self._key_variable)
|
|
21
|
+
|
|
22
|
+
self.chat = chat.Chat(self)
|
|
23
|
+
self.completion = completions.Completions(self)
|
|
24
|
+
|
|
25
|
+
super().__init__(
|
|
26
|
+
headers={
|
|
27
|
+
"Authorization": f"Bearer {api_key}",
|
|
28
|
+
},
|
|
29
|
+
base_url=httpx.URL(self._base_url),
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class AsyncClient(_ClientMixin, base_client.AsyncBaseClient):
|
|
34
|
+
def __init__(self, api_key: str | None = None) -> None:
|
|
35
|
+
if api_key is None:
|
|
36
|
+
api_key = os.getenv(self._key_variable)
|
|
37
|
+
|
|
38
|
+
self.chat = chat.AsyncChat(self)
|
|
39
|
+
self.completion = completions.AsyncCompletions(self)
|
|
40
|
+
|
|
41
|
+
super().__init__(
|
|
42
|
+
headers={
|
|
43
|
+
"Authorization": f"Bearer {api_key}",
|
|
44
|
+
},
|
|
45
|
+
base_url=httpx.URL(self._base_url),
|
|
46
|
+
)
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from typing import AsyncIterator, Iterable, Literal, TypeVar, overload
|
|
2
|
+
import httpx
|
|
3
|
+
import pydantic
|
|
4
|
+
from ..clients import base_client
|
|
5
|
+
|
|
6
|
+
_ResponseT = TypeVar("_ResponseT", bound=pydantic.BaseModel)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseResource:
|
|
10
|
+
def __init__(self, client: base_client.BaseClient):
|
|
11
|
+
self._client = client
|
|
12
|
+
|
|
13
|
+
@overload
|
|
14
|
+
def _post(
|
|
15
|
+
self,
|
|
16
|
+
url: str,
|
|
17
|
+
model: pydantic.BaseModel,
|
|
18
|
+
stream: Literal[False],
|
|
19
|
+
cls: type[_ResponseT],
|
|
20
|
+
) -> _ResponseT:
|
|
21
|
+
...
|
|
22
|
+
|
|
23
|
+
@overload
|
|
24
|
+
def _post(
|
|
25
|
+
self,
|
|
26
|
+
url: str,
|
|
27
|
+
model: pydantic.BaseModel,
|
|
28
|
+
stream: Literal[True],
|
|
29
|
+
cls: type[_ResponseT],
|
|
30
|
+
) -> Iterable[_ResponseT]:
|
|
31
|
+
...
|
|
32
|
+
|
|
33
|
+
def _post(
|
|
34
|
+
self, url: str, model: pydantic.BaseModel, stream: bool, cls: type[_ResponseT]
|
|
35
|
+
) -> _ResponseT | Iterable[_ResponseT]:
|
|
36
|
+
url_httpx = httpx.URL(url)
|
|
37
|
+
|
|
38
|
+
if stream:
|
|
39
|
+
return self._client.stream(url_httpx, model, cls)
|
|
40
|
+
else:
|
|
41
|
+
return self._client.post(url_httpx, model, cls)
|
|
42
|
+
|
|
43
|
+
class AsyncBaseResource:
|
|
44
|
+
def __init__(self, client: base_client.AsyncBaseClient):
|
|
45
|
+
self._client = client
|
|
46
|
+
|
|
47
|
+
@overload
|
|
48
|
+
async def _post(
|
|
49
|
+
self,
|
|
50
|
+
url: str,
|
|
51
|
+
model: pydantic.BaseModel,
|
|
52
|
+
stream: Literal[False],
|
|
53
|
+
cls: type[_ResponseT],
|
|
54
|
+
) -> _ResponseT:
|
|
55
|
+
...
|
|
56
|
+
|
|
57
|
+
@overload
|
|
58
|
+
async def _post(
|
|
59
|
+
self,
|
|
60
|
+
url: str,
|
|
61
|
+
model: pydantic.BaseModel,
|
|
62
|
+
stream: Literal[True],
|
|
63
|
+
cls: type[_ResponseT],
|
|
64
|
+
) -> AsyncIterator[_ResponseT]:
|
|
65
|
+
...
|
|
66
|
+
|
|
67
|
+
async def _post(
|
|
68
|
+
self, url: str, model: pydantic.BaseModel, stream: bool, cls: type[_ResponseT]
|
|
69
|
+
) -> _ResponseT | AsyncIterator[_ResponseT]:
|
|
70
|
+
url_httpx = httpx.URL(url)
|
|
71
|
+
|
|
72
|
+
if stream:
|
|
73
|
+
return self._client.stream(url_httpx, model, cls)
|
|
74
|
+
else:
|
|
75
|
+
return await self._client.post(url_httpx, model, cls)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from . import chat_completions
|
|
2
|
+
from ..clients import base_client
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Chat:
|
|
6
|
+
def __init__(self, client: base_client.BaseClient):
|
|
7
|
+
self._client = client
|
|
8
|
+
self.completions = chat_completions.Completions(client)
|
|
9
|
+
|
|
10
|
+
class AsyncChat:
|
|
11
|
+
def __init__(self, client: base_client.AsyncBaseClient):
|
|
12
|
+
self._client = client
|
|
13
|
+
self.completions = chat_completions.AsyncCompletions(client)
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
from typing import AsyncIterator, Iterable, Iterator, Literal, Optional, Sequence, overload
|
|
2
|
+
|
|
3
|
+
from ..types.chat_completion_chunk import ChatCompletionChunk
|
|
4
|
+
from . import base_resource
|
|
5
|
+
from ..clients import base_client
|
|
6
|
+
from ..types.model import Model
|
|
7
|
+
from ..types.chat_completion import (
|
|
8
|
+
ChatCompletion,
|
|
9
|
+
CreateChatCompletion,
|
|
10
|
+
ChatCompletionMessage,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Completions(base_resource.BaseResource):
|
|
15
|
+
def __init__(self, client: base_client.BaseClient) -> None:
|
|
16
|
+
super().__init__(client)
|
|
17
|
+
|
|
18
|
+
@overload
|
|
19
|
+
def create(
|
|
20
|
+
self,
|
|
21
|
+
*,
|
|
22
|
+
messages: Sequence[ChatCompletionMessage],
|
|
23
|
+
model: Model,
|
|
24
|
+
frequency_penalty: float = 0.0,
|
|
25
|
+
presence_penalty: float = 0.0,
|
|
26
|
+
stop: Sequence[str] = (),
|
|
27
|
+
max_tokens: Optional[int] = None,
|
|
28
|
+
stream: Literal[False],
|
|
29
|
+
temperature: float = 0.0,
|
|
30
|
+
top_p: float = 1.0,
|
|
31
|
+
top_k: int | None = None,
|
|
32
|
+
) -> ChatCompletion:
|
|
33
|
+
...
|
|
34
|
+
|
|
35
|
+
@overload
|
|
36
|
+
def create(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
messages: Sequence[ChatCompletionMessage],
|
|
40
|
+
model: Model,
|
|
41
|
+
frequency_penalty: float = 0.0,
|
|
42
|
+
presence_penalty: float = 0.0,
|
|
43
|
+
stop: Sequence[str] = (),
|
|
44
|
+
max_tokens: Optional[int] = None,
|
|
45
|
+
stream: Literal[True],
|
|
46
|
+
temperature: float = 0.0,
|
|
47
|
+
top_p: float = 1.0,
|
|
48
|
+
top_k: int | None = None,
|
|
49
|
+
) -> Iterator[ChatCompletionChunk]:
|
|
50
|
+
...
|
|
51
|
+
|
|
52
|
+
def create(
|
|
53
|
+
self,
|
|
54
|
+
*,
|
|
55
|
+
messages: Sequence[ChatCompletionMessage],
|
|
56
|
+
model: Model,
|
|
57
|
+
frequency_penalty: float = 0.0,
|
|
58
|
+
presence_penalty: float = 0.0,
|
|
59
|
+
stop: Sequence[str] = (),
|
|
60
|
+
max_tokens: Optional[int] = None,
|
|
61
|
+
stream: bool = False,
|
|
62
|
+
temperature: float = 0.0,
|
|
63
|
+
top_p: float = 1.0,
|
|
64
|
+
top_k: int | None = None,
|
|
65
|
+
) -> ChatCompletion | Iterable[ChatCompletionChunk]:
|
|
66
|
+
create_chat_completion = CreateChatCompletion(
|
|
67
|
+
messages=messages,
|
|
68
|
+
model=model,
|
|
69
|
+
frequency_penalty=frequency_penalty,
|
|
70
|
+
presence_penalty=presence_penalty,
|
|
71
|
+
max_tokens=max_tokens,
|
|
72
|
+
stop=stop,
|
|
73
|
+
stream=stream,
|
|
74
|
+
temperature=temperature,
|
|
75
|
+
top_p=top_p,
|
|
76
|
+
top_k=top_k,
|
|
77
|
+
)
|
|
78
|
+
if stream:
|
|
79
|
+
return self._post(
|
|
80
|
+
"/chat/completions", create_chat_completion, True, ChatCompletionChunk
|
|
81
|
+
)
|
|
82
|
+
else:
|
|
83
|
+
return self._post(
|
|
84
|
+
"/chat/completions", create_chat_completion, False, ChatCompletion
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
class AsyncCompletions(base_resource.AsyncBaseResource):
|
|
88
|
+
def __init__(self, client: base_client.AsyncBaseClient) -> None:
|
|
89
|
+
super().__init__(client)
|
|
90
|
+
|
|
91
|
+
@overload
|
|
92
|
+
async def create(
|
|
93
|
+
self,
|
|
94
|
+
*,
|
|
95
|
+
messages: Sequence[ChatCompletionMessage],
|
|
96
|
+
model: Model,
|
|
97
|
+
frequency_penalty: float = 0.0,
|
|
98
|
+
presence_penalty: float = 0.0,
|
|
99
|
+
stop: Sequence[str] = (),
|
|
100
|
+
max_tokens: Optional[int] = None,
|
|
101
|
+
stream: Literal[False],
|
|
102
|
+
temperature: float = 0.0,
|
|
103
|
+
top_p: float = 1.0,
|
|
104
|
+
top_k: int | None = None,
|
|
105
|
+
) -> ChatCompletion:
|
|
106
|
+
...
|
|
107
|
+
|
|
108
|
+
@overload
|
|
109
|
+
async def create(
|
|
110
|
+
self,
|
|
111
|
+
*,
|
|
112
|
+
messages: Sequence[ChatCompletionMessage],
|
|
113
|
+
model: Model,
|
|
114
|
+
frequency_penalty: float = 0.0,
|
|
115
|
+
presence_penalty: float = 0.0,
|
|
116
|
+
stop: Sequence[str] = (),
|
|
117
|
+
max_tokens: Optional[int] = None,
|
|
118
|
+
stream: Literal[True],
|
|
119
|
+
temperature: float = 0.0,
|
|
120
|
+
top_p: float = 1.0,
|
|
121
|
+
top_k: int | None = None,
|
|
122
|
+
) -> AsyncIterator[ChatCompletionChunk]:
|
|
123
|
+
...
|
|
124
|
+
|
|
125
|
+
async def create(
|
|
126
|
+
self,
|
|
127
|
+
*,
|
|
128
|
+
messages: Sequence[ChatCompletionMessage],
|
|
129
|
+
model: Model,
|
|
130
|
+
frequency_penalty: float = 0.0,
|
|
131
|
+
presence_penalty: float = 0.0,
|
|
132
|
+
stop: Sequence[str] = (),
|
|
133
|
+
max_tokens: Optional[int] = None,
|
|
134
|
+
stream: bool = False,
|
|
135
|
+
temperature: float = 0.0,
|
|
136
|
+
top_p: float = 1.0,
|
|
137
|
+
top_k: int | None = None,
|
|
138
|
+
) -> ChatCompletion | AsyncIterator[ChatCompletionChunk]:
|
|
139
|
+
create_chat_completion = CreateChatCompletion(
|
|
140
|
+
messages=messages,
|
|
141
|
+
model=model,
|
|
142
|
+
frequency_penalty=frequency_penalty,
|
|
143
|
+
presence_penalty=presence_penalty,
|
|
144
|
+
max_tokens=max_tokens,
|
|
145
|
+
stop=stop,
|
|
146
|
+
stream=stream,
|
|
147
|
+
temperature=temperature,
|
|
148
|
+
top_p=top_p,
|
|
149
|
+
top_k=top_k,
|
|
150
|
+
)
|
|
151
|
+
if stream:
|
|
152
|
+
return await self._post(
|
|
153
|
+
"/chat/completions", create_chat_completion, True, ChatCompletionChunk
|
|
154
|
+
)
|
|
155
|
+
else:
|
|
156
|
+
return await self._post(
|
|
157
|
+
"/chat/completions", create_chat_completion, False, ChatCompletion
|
|
158
|
+
)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from . import base_resource
|
|
2
|
+
from ..clients import base_client
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Completions(base_resource.BaseResource):
|
|
6
|
+
def __init__(self, client: base_client.BaseClient) -> None:
|
|
7
|
+
super().__init__(client)
|
|
8
|
+
|
|
9
|
+
class AsyncCompletions(base_resource.AsyncBaseResource):
|
|
10
|
+
def __init__(self, client: base_client.AsyncBaseClient) -> None:
|
|
11
|
+
super().__init__(client)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Literal, Optional, Sequence
|
|
2
|
+
import pydantic
|
|
3
|
+
|
|
4
|
+
from ..types.chat_completion_message import ChatCompletionMessage
|
|
5
|
+
from ..types.finish_reason import FinishReason
|
|
6
|
+
from ..types.model import Model
|
|
7
|
+
from ..types.completion_usage import CompletionUsage
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class CreateChatCompletion(pydantic.BaseModel):
|
|
11
|
+
messages: Sequence[ChatCompletionMessage]
|
|
12
|
+
model: Model
|
|
13
|
+
frequency_penalty: float
|
|
14
|
+
presence_penalty: float
|
|
15
|
+
stop: Sequence[str]
|
|
16
|
+
max_tokens: Optional[int]
|
|
17
|
+
stream: bool
|
|
18
|
+
temperature: float
|
|
19
|
+
top_p: float
|
|
20
|
+
top_k: int | None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Choice(pydantic.BaseModel):
|
|
24
|
+
finish_reason: FinishReason
|
|
25
|
+
index: int
|
|
26
|
+
message: ChatCompletionMessage
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ChatCompletion(pydantic.BaseModel):
|
|
30
|
+
id: str
|
|
31
|
+
choices: Sequence[Choice]
|
|
32
|
+
created: int
|
|
33
|
+
model: str
|
|
34
|
+
object: Literal["chat.completion"]
|
|
35
|
+
system_fingerprint: Optional[str] = None
|
|
36
|
+
usage: Optional[CompletionUsage] = None
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from typing import Literal, Optional, Sequence
|
|
2
|
+
|
|
3
|
+
import pydantic
|
|
4
|
+
|
|
5
|
+
from ..types.chat_completion_message import ChatCompletionMessage
|
|
6
|
+
from ..types.finish_reason import FinishReason
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Choice(pydantic.BaseModel):
|
|
10
|
+
delta: ChatCompletionMessage
|
|
11
|
+
finish_reason: Optional[FinishReason]
|
|
12
|
+
index: int
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChatCompletionChunk(pydantic.BaseModel):
|
|
16
|
+
id: str
|
|
17
|
+
choices: Sequence[Choice]
|
|
18
|
+
created: int
|
|
19
|
+
model: str
|
|
20
|
+
object: Literal["chat.completion.chunk"]
|
|
21
|
+
system_fingerprint: Optional[str] = None
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import enum
|
|
2
|
+
from typing import Optional
|
|
3
|
+
import pydantic
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Role(str, enum.Enum):
|
|
7
|
+
SYSTEM = "system"
|
|
8
|
+
USER = "user"
|
|
9
|
+
ASSISTANT = "assistant"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ChatCompletionMessage(pydantic.BaseModel):
|
|
13
|
+
content: Optional[str] = None
|
|
14
|
+
role: Optional[Role] = None
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class Model(str, enum.Enum):
|
|
5
|
+
LLAMA_70B_CHAT = "meta-llama/llama-2-70b-chat-hf"
|
|
6
|
+
MISTRAL_7B_INSTRUCT = "mistralai/mistral-7b-instruct-v0.1"
|
|
7
|
+
ZEPHYR_7B_BETA = "huggingfaceh4/zephyr-7b-beta"
|
|
8
|
+
LZLV_70B = "lizpreciatior/lzlv_70b_fp16_hf"
|
|
9
|
+
FALCON_180B = "tiiuae/falcon-180b-chat"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class Model(str, enum.Enum):
|
|
5
|
+
|
|
6
|
+
LLAMA_70B_CHAT = "meta-llama/llama-2-70b-chat-hf"
|
|
7
|
+
|
|
8
|
+
MISTRAL_7B_INSTRUCT = "mistralai/mistral-7b-instruct-v0.1"
|
|
9
|
+
|
|
10
|
+
ZEPHYR_7B_BETA = "huggingfaceh4/zephyr-7b-beta"
|
|
11
|
+
|
|
12
|
+
LZLV_70B = "lizpreciatior/lzlv_70b_fp16_hf"
|
|
13
|
+
|
|
14
|
+
FALCON_180B = "tiiuae/falcon-180b-chat"
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "decart"
|
|
3
|
+
version = "0.0.1"
|
|
4
|
+
description = ""
|
|
5
|
+
authors = ["Decart <support@decart.ai>"]
|
|
6
|
+
readme = "decart/README.md"
|
|
7
|
+
|
|
8
|
+
[tool.poetry.dependencies]
|
|
9
|
+
python = "^3.8"
|
|
10
|
+
httpx = "^0.25.2"
|
|
11
|
+
pydantic = "^2.5.2"
|
|
12
|
+
httpx-sse = "^0.3.1"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
[tool.poetry.group.dev.dependencies]
|
|
16
|
+
ruff = "^0.1.8"
|
|
17
|
+
jinja2 = "^3.1.2"
|
|
18
|
+
pyyaml = "^6.0.1"
|
|
19
|
+
|
|
20
|
+
[build-system]
|
|
21
|
+
requires = ["poetry-core"]
|
|
22
|
+
build-backend = "poetry.core.masonry.api"
|