deepanything 0.1.2__tar.gz → 0.1.4__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {deepanything-0.1.2 → deepanything-0.1.4}/PKG-INFO +3 -1
- {deepanything-0.1.2 → deepanything-0.1.4}/README.md +2 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/DeepAnythingClient.py +19 -6
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/ReasonClient.py +12 -2
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/ResponseClient.py +2 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/Server/Server.py +42 -9
- deepanything-0.1.4/deepanything/Server/Types.py +22 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/Utility.py +1 -1
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/__main__.py +1 -1
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything.egg-info/PKG-INFO +3 -1
- {deepanything-0.1.2 → deepanything-0.1.4}/setup.py +1 -1
- {deepanything-0.1.2 → deepanything-0.1.4}/test/server.py +1 -1
- deepanything-0.1.2/deepanything/Server/Types.py +0 -15
- {deepanything-0.1.2 → deepanything-0.1.4}/LICENSE +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/Server/__init__.py +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/Stream.py +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything/__init__.py +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything.egg-info/SOURCES.txt +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything.egg-info/dependency_links.txt +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything.egg-info/entry_points.txt +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything.egg-info/requires.txt +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/deepanything.egg-info/top_level.txt +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/requirements.txt +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/setup.cfg +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/test/think.py +0 -0
- {deepanything-0.1.2 → deepanything-0.1.4}/test/think_async.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: deepanything
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.4
|
4
4
|
Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
|
5
5
|
Author: Junity
|
6
6
|
Author-email: 1727636624@qq.com
|
@@ -140,6 +140,8 @@ async def main():
|
|
140
140
|
asyncio.run(main())
|
141
141
|
```
|
142
142
|
|
143
|
+
More example can be find in [examples](examples).
|
144
|
+
|
143
145
|
### 2. Use as a Server
|
144
146
|
|
145
147
|
```bash
|
@@ -45,15 +45,19 @@ def _build_message(
|
|
45
45
|
role="assistant",
|
46
46
|
content=reason_prompt.format(reason_content)
|
47
47
|
)]
|
48
|
-
def
|
49
|
-
delta = chunk.choices[0].delta
|
50
|
-
reasoning_contents.append(delta.reasoning_content)
|
48
|
+
def _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id):
|
51
49
|
new_chunk = chunk.model_copy(deep=False)
|
52
50
|
new_chunk.model = show_model
|
53
51
|
new_chunk.created = created
|
54
52
|
new_chunk.id = _id
|
53
|
+
|
55
54
|
if new_chunk.usage is not None:
|
56
55
|
reason_usage = new_chunk.usage
|
56
|
+
|
57
|
+
if chunk.choices:
|
58
|
+
delta = chunk.choices[0].delta
|
59
|
+
reasoning_contents.append(delta.reasoning_content)
|
60
|
+
|
57
61
|
return new_chunk, reason_usage
|
58
62
|
|
59
63
|
def _process_response_chunk(chunk, reason_usage,show_model, created, _id):
|
@@ -116,6 +120,8 @@ def chat_completion(
|
|
116
120
|
|
117
121
|
if max_tokens is not None:
|
118
122
|
max_tokens -= reason_chat_completion.usage.completion_tokens
|
123
|
+
if max_tokens <= 0:
|
124
|
+
return reason_chat_completion
|
119
125
|
response_args["max_tokens"] = max_tokens
|
120
126
|
|
121
127
|
response_chat_completion: ChatCompletion = response_client.chat_completions(
|
@@ -166,11 +172,13 @@ def chat_completion_stream(
|
|
166
172
|
reason_usage = make_usage(0, 0, 0)
|
167
173
|
|
168
174
|
for chunk in reason_stream:
|
169
|
-
new_chunk, reason_usage =
|
175
|
+
new_chunk, reason_usage = _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id)
|
170
176
|
yield new_chunk
|
171
177
|
|
172
178
|
if max_tokens is not None:
|
173
179
|
max_tokens -= reason_usage.completion_tokens
|
180
|
+
if max_tokens <= 0:
|
181
|
+
return
|
174
182
|
response_args["max_tokens"] = max_tokens
|
175
183
|
|
176
184
|
new_messages = _build_message(messages,reason_content="".join(reasoning_contents),reason_prompt=reason_prompt)
|
@@ -219,7 +227,8 @@ async def chat_completion_async(
|
|
219
227
|
response_args=response_args,
|
220
228
|
created=created,
|
221
229
|
_id=_id,
|
222
|
-
reason_prompt=reason_prompt
|
230
|
+
reason_prompt=reason_prompt,
|
231
|
+
max_tokens=max_tokens
|
223
232
|
)
|
224
233
|
|
225
234
|
if max_tokens is not None:
|
@@ -233,6 +242,8 @@ async def chat_completion_async(
|
|
233
242
|
|
234
243
|
if max_tokens is not None:
|
235
244
|
max_tokens -= reason_chat_completion.usage.completion_tokens
|
245
|
+
if max_tokens <= 0:
|
246
|
+
return reason_chat_completion
|
236
247
|
response_args["max_tokens"] = max_tokens
|
237
248
|
|
238
249
|
response_chat_completion:ChatCompletion = await response_client.chat_completions(
|
@@ -283,13 +294,15 @@ async def chat_completion_stream_async(
|
|
283
294
|
reason_usage = make_usage(0,0,0)
|
284
295
|
|
285
296
|
async for chunk in reason_stream:
|
286
|
-
new_chunk,reason_usage =
|
297
|
+
new_chunk,reason_usage = _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id)
|
287
298
|
yield new_chunk
|
288
299
|
|
289
300
|
new_messages = _build_message(messages,reason_content="".join(reasoning_contents),reason_prompt=reason_prompt)
|
290
301
|
|
291
302
|
if max_tokens is not None:
|
292
303
|
max_tokens -= reason_usage.completion_tokens
|
304
|
+
if max_tokens <= 0:
|
305
|
+
return
|
293
306
|
response_args["max_tokens"] = max_tokens
|
294
307
|
|
295
308
|
response_stream = await response_client.chat_completions_stream(
|
@@ -76,11 +76,14 @@ class DeepseekReasonClient(ReasonClient):
|
|
76
76
|
messages=messages,
|
77
77
|
model=model,
|
78
78
|
stream=True,
|
79
|
+
stream_options = {"include_usage": True},
|
79
80
|
**kwargs
|
80
81
|
)
|
81
82
|
|
82
83
|
def _iter():
|
83
84
|
for chunk in stream:
|
85
|
+
if len(chunk.choices) == 0:
|
86
|
+
yield chunk
|
84
87
|
if chunk.choices[0].delta.reasoning_content is not None:
|
85
88
|
yield chunk
|
86
89
|
else:
|
@@ -111,11 +114,15 @@ class AsyncDeepseekReasonClient(AsyncReasonClient):
|
|
111
114
|
messages=messages,
|
112
115
|
model=model,
|
113
116
|
stream=True,
|
117
|
+
stream_options = {"include_usage": True},
|
114
118
|
**kwargs
|
115
119
|
)
|
116
120
|
|
117
121
|
async def _iter():
|
118
122
|
async for chunk in stream:
|
123
|
+
if len(chunk.choices) == 0:
|
124
|
+
yield chunk
|
125
|
+
continue
|
119
126
|
if chunk.choices[0].delta.reasoning_content is not None:
|
120
127
|
yield chunk
|
121
128
|
else:
|
@@ -130,8 +137,9 @@ class AsyncDeepseekReasonClient(AsyncReasonClient):
|
|
130
137
|
def _rebuild_chunk_for_openai(
|
131
138
|
chunk:chat_completion_chunk.ChatCompletionChunk
|
132
139
|
) -> chat_completion_chunk.ChatCompletionChunk:
|
133
|
-
|
134
|
-
|
140
|
+
if len(chunk.choices):
|
141
|
+
chunk.choices[0].delta.reasoning_content = chunk.choices[0].delta.content
|
142
|
+
chunk.choices[0].delta.content = None
|
135
143
|
return chunk
|
136
144
|
|
137
145
|
|
@@ -159,6 +167,7 @@ class OpenaiReasonClient(ReasonClient):
|
|
159
167
|
messages=messages,
|
160
168
|
model=model,
|
161
169
|
stream=True,
|
170
|
+
stream_options = {"include_usage": True},
|
162
171
|
**kwargs
|
163
172
|
)
|
164
173
|
|
@@ -205,6 +214,7 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
|
|
205
214
|
messages=messages,
|
206
215
|
model=model,
|
207
216
|
stream=True,
|
217
|
+
stream_options = {"include_usage": True},
|
208
218
|
**kwargs
|
209
219
|
)
|
210
220
|
|
@@ -55,6 +55,7 @@ class OpenaiResponseClient(ResponseClient):
|
|
55
55
|
messages=messages,
|
56
56
|
model=model,
|
57
57
|
stream=True,
|
58
|
+
stream_options = {"include_usage": True},
|
58
59
|
**kwargs
|
59
60
|
)
|
60
61
|
|
@@ -83,5 +84,6 @@ class AsyncOpenaiResponseClient(AsyncResponseClient):
|
|
83
84
|
messages=messages,
|
84
85
|
model=model,
|
85
86
|
stream=True,
|
87
|
+
stream_options = {"include_usage": True},
|
86
88
|
**kwargs
|
87
89
|
)
|
@@ -1,7 +1,8 @@
|
|
1
1
|
from dataclasses import dataclass
|
2
2
|
import time
|
3
3
|
import uvicorn
|
4
|
-
from typing import Dict, List, Optional
|
4
|
+
from typing import Dict, List, Optional, Any
|
5
|
+
import json
|
5
6
|
|
6
7
|
from openai.types.model import Model as OpenaiModel
|
7
8
|
from fastapi import FastAPI,Depends, HTTPException, status,Header
|
@@ -21,8 +22,8 @@ class ModelInfo:
|
|
21
22
|
reason_model : str
|
22
23
|
response_client : str
|
23
24
|
response_model : str
|
24
|
-
created : int
|
25
|
-
reason_prompt : str
|
25
|
+
created : int = int(time.time())
|
26
|
+
reason_prompt : str = "<Think>{}</Think>"
|
26
27
|
|
27
28
|
class DeepAnythingServer:
|
28
29
|
app : FastAPI = FastAPI()
|
@@ -35,9 +36,12 @@ class DeepAnythingServer:
|
|
35
36
|
api_keys : List[str] = []
|
36
37
|
security = HTTPBearer()
|
37
38
|
|
38
|
-
def __init__(self, host:str = None, port:int = None,
|
39
|
-
if
|
40
|
-
|
39
|
+
def __init__(self, host:str = None, port:int = None, config : Any or str = None):
|
40
|
+
if config is not None:
|
41
|
+
if isinstance(config,str):
|
42
|
+
with open(config) as f:
|
43
|
+
config = json.load(f)
|
44
|
+
self.load_config(config)
|
41
45
|
|
42
46
|
if host:
|
43
47
|
self.host = host
|
@@ -50,6 +54,13 @@ class DeepAnythingServer:
|
|
50
54
|
def run(self):
|
51
55
|
uvicorn.run(self.app,host=self.host,port=self.port,log_level="trace")
|
52
56
|
|
57
|
+
@staticmethod
|
58
|
+
def _extract_args(query : Types.ChatCompletionQuery) -> dict:
|
59
|
+
args = query.dict()
|
60
|
+
for key in ["messages","model","stream"]:
|
61
|
+
args.pop(key)
|
62
|
+
return args
|
63
|
+
|
53
64
|
def load_config(self,config_object : Dict) -> None:
|
54
65
|
self.host = config_object.get("host","0.0.0.0")
|
55
66
|
self.port = config_object.get("port",8000)
|
@@ -106,8 +117,17 @@ class DeepAnythingServer:
|
|
106
117
|
|
107
118
|
self.api_keys = config_object.get("api_keys",[])
|
108
119
|
|
120
|
+
|
121
|
+
def add_reason_client(self,name:str,client:AsyncReasonClient):
|
122
|
+
self.reason_clients[name] = client
|
123
|
+
|
124
|
+
def add_response_client(self,name:str,client:AsyncResponseClient):
|
125
|
+
self.response_clients[name] = client
|
126
|
+
|
127
|
+
def add_model(self,name:str,model:ModelInfo):
|
128
|
+
self.models[name] = model
|
109
129
|
def verify_authorization(self, authorization:Optional[str]):
|
110
|
-
if self.api_keys
|
130
|
+
if not self.api_keys:
|
111
131
|
return
|
112
132
|
|
113
133
|
if authorization is None:
|
@@ -140,6 +160,13 @@ class DeepAnythingServer:
|
|
140
160
|
yield f"data: {chunk.model_dump_json(indent=None)}\n\n"
|
141
161
|
yield "data: [DONE]"
|
142
162
|
|
163
|
+
args = DeepAnythingServer._extract_args(query)
|
164
|
+
|
165
|
+
max_tokens = None
|
166
|
+
if "max_tokens" in args:
|
167
|
+
max_tokens = args["max_tokens"]
|
168
|
+
args.pop("max_tokens")
|
169
|
+
|
143
170
|
if query.stream:
|
144
171
|
res = sse(
|
145
172
|
await chat_completion_stream_async(
|
@@ -150,6 +177,9 @@ class DeepAnythingServer:
|
|
150
177
|
response_model=model.response_model,
|
151
178
|
show_model=model.name,
|
152
179
|
reason_prompt=model.reason_prompt,
|
180
|
+
response_args=args,
|
181
|
+
reason_args=args,
|
182
|
+
max_tokens=max_tokens
|
153
183
|
)
|
154
184
|
)
|
155
185
|
return StreamingResponse(
|
@@ -164,11 +194,14 @@ class DeepAnythingServer:
|
|
164
194
|
response_client=self.response_clients[model.response_client],
|
165
195
|
response_model=model.response_model,
|
166
196
|
show_model=model.name,
|
167
|
-
reason_prompt=model.reason_prompt
|
197
|
+
reason_prompt=model.reason_prompt,
|
198
|
+
response_args=args,
|
199
|
+
reason_args=args,
|
200
|
+
max_tokens=max_tokens
|
168
201
|
)
|
169
202
|
return res.model_dump_json()
|
170
203
|
|
171
|
-
|
204
|
+
def get_models(self) -> Types.ModelsListResponse:
|
172
205
|
return Types.ModelsListResponse(
|
173
206
|
data = [OpenaiModel(
|
174
207
|
id = model_info.name,
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from pydantic import BaseModel
|
2
|
+
from openai.types.model import Model as OpenaiModel
|
3
|
+
from typing import Dict, List, Optional
|
4
|
+
|
5
|
+
|
6
|
+
class ModelsListResponse(BaseModel):
|
7
|
+
data : List[OpenaiModel]
|
8
|
+
|
9
|
+
class ChatCompletionMessage(BaseModel):
|
10
|
+
role : str
|
11
|
+
content : str
|
12
|
+
|
13
|
+
class ChatCompletionQuery(BaseModel):
|
14
|
+
model : str
|
15
|
+
messages : List[ChatCompletionMessage]
|
16
|
+
stream : bool = False
|
17
|
+
temperature : Optional[float] = 0.7
|
18
|
+
top_p : Optional[float] = 1.0
|
19
|
+
frequency_penalty : Optional[float] = 0.0
|
20
|
+
presence_penalty : Optional[float] = 0.0
|
21
|
+
max_tokens : Optional[int] = None
|
22
|
+
stop : Optional[List[str]] = None
|
@@ -12,7 +12,7 @@ def main():
|
|
12
12
|
if args.config is not None:
|
13
13
|
with open(args.config) as f:
|
14
14
|
config = json.load(f)
|
15
|
-
server = DeepAnythingServer(host=args.host,port=args.port,
|
15
|
+
server = DeepAnythingServer(host=args.host, port=args.port, config=config)
|
16
16
|
server.run()
|
17
17
|
else:
|
18
18
|
print("No config file specified.")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: deepanything
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.4
|
4
4
|
Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
|
5
5
|
Author: Junity
|
6
6
|
Author-email: 1727636624@qq.com
|
@@ -140,6 +140,8 @@ async def main():
|
|
140
140
|
asyncio.run(main())
|
141
141
|
```
|
142
142
|
|
143
|
+
More example can be find in [examples](examples).
|
144
|
+
|
143
145
|
### 2. Use as a Server
|
144
146
|
|
145
147
|
```bash
|
@@ -8,7 +8,7 @@ with open("requirements.txt") as f:
|
|
8
8
|
|
9
9
|
setup(
|
10
10
|
name="deepanything",
|
11
|
-
version="0.1.
|
11
|
+
version="0.1.4",
|
12
12
|
author="Junity",
|
13
13
|
author_email="1727636624@qq.com",
|
14
14
|
description="DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).",
|
@@ -1,15 +0,0 @@
|
|
1
|
-
from pydantic import BaseModel
|
2
|
-
from openai.types.model import Model as OpenaiModel
|
3
|
-
from typing import Dict,List
|
4
|
-
|
5
|
-
class ModelsListResponse(BaseModel):
|
6
|
-
data : List[OpenaiModel]
|
7
|
-
|
8
|
-
class ChatCompletionMessage(BaseModel):
|
9
|
-
role : str
|
10
|
-
content : str
|
11
|
-
|
12
|
-
class ChatCompletionQuery(BaseModel):
|
13
|
-
model : str
|
14
|
-
messages : List[ChatCompletionMessage]
|
15
|
-
stream : bool = False
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|