deepanything 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,15 +45,19 @@ def _build_message(
45
45
  role="assistant",
46
46
  content=reason_prompt.format(reason_content)
47
47
  )]
48
- def _proecess_reason_chunk(chunk, reasoning_contents, reason_usage,show_model, created, _id):
49
- delta = chunk.choices[0].delta
50
- reasoning_contents.append(delta.reasoning_content)
48
+ def _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id):
51
49
  new_chunk = chunk.model_copy(deep=False)
52
50
  new_chunk.model = show_model
53
51
  new_chunk.created = created
54
52
  new_chunk.id = _id
53
+
55
54
  if new_chunk.usage is not None:
56
55
  reason_usage = new_chunk.usage
56
+
57
+ if chunk.choices:
58
+ delta = chunk.choices[0].delta
59
+ reasoning_contents.append(delta.reasoning_content)
60
+
57
61
  return new_chunk, reason_usage
58
62
 
59
63
  def _process_response_chunk(chunk, reason_usage,show_model, created, _id):
@@ -116,6 +120,8 @@ def chat_completion(
116
120
 
117
121
  if max_tokens is not None:
118
122
  max_tokens -= reason_chat_completion.usage.completion_tokens
123
+ if max_tokens <= 0:
124
+ return reason_chat_completion
119
125
  response_args["max_tokens"] = max_tokens
120
126
 
121
127
  response_chat_completion: ChatCompletion = response_client.chat_completions(
@@ -166,11 +172,13 @@ def chat_completion_stream(
166
172
  reason_usage = make_usage(0, 0, 0)
167
173
 
168
174
  for chunk in reason_stream:
169
- new_chunk, reason_usage = _proecess_reason_chunk(chunk, reasoning_contents, reason_usage,show_model,created,_id)
175
+ new_chunk, reason_usage = _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id)
170
176
  yield new_chunk
171
177
 
172
178
  if max_tokens is not None:
173
179
  max_tokens -= reason_usage.completion_tokens
180
+ if max_tokens <= 0:
181
+ return
174
182
  response_args["max_tokens"] = max_tokens
175
183
 
176
184
  new_messages = _build_message(messages,reason_content="".join(reasoning_contents),reason_prompt=reason_prompt)
@@ -219,7 +227,8 @@ async def chat_completion_async(
219
227
  response_args=response_args,
220
228
  created=created,
221
229
  _id=_id,
222
- reason_prompt=reason_prompt
230
+ reason_prompt=reason_prompt,
231
+ max_tokens=max_tokens
223
232
  )
224
233
 
225
234
  if max_tokens is not None:
@@ -233,6 +242,8 @@ async def chat_completion_async(
233
242
 
234
243
  if max_tokens is not None:
235
244
  max_tokens -= reason_chat_completion.usage.completion_tokens
245
+ if max_tokens <= 0:
246
+ return reason_chat_completion
236
247
  response_args["max_tokens"] = max_tokens
237
248
 
238
249
  response_chat_completion:ChatCompletion = await response_client.chat_completions(
@@ -283,13 +294,15 @@ async def chat_completion_stream_async(
283
294
  reason_usage = make_usage(0,0,0)
284
295
 
285
296
  async for chunk in reason_stream:
286
- new_chunk,reason_usage = _proecess_reason_chunk(chunk, reasoning_contents, reason_usage,show_model,created,_id)
297
+ new_chunk,reason_usage = _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id)
287
298
  yield new_chunk
288
299
 
289
300
  new_messages = _build_message(messages,reason_content="".join(reasoning_contents),reason_prompt=reason_prompt)
290
301
 
291
302
  if max_tokens is not None:
292
303
  max_tokens -= reason_usage.completion_tokens
304
+ if max_tokens <= 0:
305
+ return
293
306
  response_args["max_tokens"] = max_tokens
294
307
 
295
308
  response_stream = await response_client.chat_completions_stream(
@@ -76,11 +76,14 @@ class DeepseekReasonClient(ReasonClient):
76
76
  messages=messages,
77
77
  model=model,
78
78
  stream=True,
79
+ stream_options = {"include_usage": True},
79
80
  **kwargs
80
81
  )
81
82
 
82
83
  def _iter():
83
84
  for chunk in stream:
85
+ if len(chunk.choices) == 0:
86
+ yield chunk
84
87
  if chunk.choices[0].delta.reasoning_content is not None:
85
88
  yield chunk
86
89
  else:
@@ -111,11 +114,15 @@ class AsyncDeepseekReasonClient(AsyncReasonClient):
111
114
  messages=messages,
112
115
  model=model,
113
116
  stream=True,
117
+ stream_options = {"include_usage": True},
114
118
  **kwargs
115
119
  )
116
120
 
117
121
  async def _iter():
118
122
  async for chunk in stream:
123
+ if len(chunk.choices) == 0:
124
+ yield chunk
125
+ continue
119
126
  if chunk.choices[0].delta.reasoning_content is not None:
120
127
  yield chunk
121
128
  else:
@@ -130,8 +137,9 @@ class AsyncDeepseekReasonClient(AsyncReasonClient):
130
137
  def _rebuild_chunk_for_openai(
131
138
  chunk:chat_completion_chunk.ChatCompletionChunk
132
139
  ) -> chat_completion_chunk.ChatCompletionChunk:
133
- chunk.choices[0].delta.reasoning_content = chunk.choices[0].delta.content
134
- chunk.choices[0].delta.content = None
140
+ if len(chunk.choices):
141
+ chunk.choices[0].delta.reasoning_content = chunk.choices[0].delta.content
142
+ chunk.choices[0].delta.content = None
135
143
  return chunk
136
144
 
137
145
 
@@ -159,6 +167,7 @@ class OpenaiReasonClient(ReasonClient):
159
167
  messages=messages,
160
168
  model=model,
161
169
  stream=True,
170
+ stream_options = {"include_usage": True},
162
171
  **kwargs
163
172
  )
164
173
 
@@ -205,6 +214,7 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
205
214
  messages=messages,
206
215
  model=model,
207
216
  stream=True,
217
+ stream_options = {"include_usage": True},
208
218
  **kwargs
209
219
  )
210
220
 
@@ -55,6 +55,7 @@ class OpenaiResponseClient(ResponseClient):
55
55
  messages=messages,
56
56
  model=model,
57
57
  stream=True,
58
+ stream_options = {"include_usage": True},
58
59
  **kwargs
59
60
  )
60
61
 
@@ -83,5 +84,6 @@ class AsyncOpenaiResponseClient(AsyncResponseClient):
83
84
  messages=messages,
84
85
  model=model,
85
86
  stream=True,
87
+ stream_options = {"include_usage": True},
86
88
  **kwargs
87
89
  )
@@ -1,7 +1,8 @@
1
1
  from dataclasses import dataclass
2
2
  import time
3
3
  import uvicorn
4
- from typing import Dict, List, Optional
4
+ from typing import Dict, List, Optional, Any
5
+ import json
5
6
 
6
7
  from openai.types.model import Model as OpenaiModel
7
8
  from fastapi import FastAPI,Depends, HTTPException, status,Header
@@ -21,8 +22,8 @@ class ModelInfo:
21
22
  reason_model : str
22
23
  response_client : str
23
24
  response_model : str
24
- created : int
25
- reason_prompt : str
25
+ created : int = int(time.time())
26
+ reason_prompt : str = "<Think>{}</Think>"
26
27
 
27
28
  class DeepAnythingServer:
28
29
  app : FastAPI = FastAPI()
@@ -35,9 +36,12 @@ class DeepAnythingServer:
35
36
  api_keys : List[str] = []
36
37
  security = HTTPBearer()
37
38
 
38
- def __init__(self, host:str = None, port:int = None,config_object : Dict = None):
39
- if config_object is not None:
40
- self.load_config(config_object)
39
+ def __init__(self, host:str = None, port:int = None, config : Any or str = None):
40
+ if config is not None:
41
+ if isinstance(config,str):
42
+ with open(config) as f:
43
+ config = json.load(f)
44
+ self.load_config(config)
41
45
 
42
46
  if host:
43
47
  self.host = host
@@ -50,6 +54,13 @@ class DeepAnythingServer:
50
54
  def run(self):
51
55
  uvicorn.run(self.app,host=self.host,port=self.port,log_level="trace")
52
56
 
57
+ @staticmethod
58
+ def _extract_args(query : Types.ChatCompletionQuery) -> dict:
59
+ args = query.dict()
60
+ for key in ["messages","model","stream"]:
61
+ args.pop(key)
62
+ return args
63
+
53
64
  def load_config(self,config_object : Dict) -> None:
54
65
  self.host = config_object.get("host","0.0.0.0")
55
66
  self.port = config_object.get("port",8000)
@@ -106,8 +117,17 @@ class DeepAnythingServer:
106
117
 
107
118
  self.api_keys = config_object.get("api_keys",[])
108
119
 
120
+
121
+ def add_reason_client(self,name:str,client:AsyncReasonClient):
122
+ self.reason_clients[name] = client
123
+
124
+ def add_response_client(self,name:str,client:AsyncResponseClient):
125
+ self.response_clients[name] = client
126
+
127
+ def add_model(self,name:str,model:ModelInfo):
128
+ self.models[name] = model
109
129
  def verify_authorization(self, authorization:Optional[str]):
110
- if self.api_keys == []:
130
+ if not self.api_keys:
111
131
  return
112
132
 
113
133
  if authorization is None:
@@ -140,6 +160,13 @@ class DeepAnythingServer:
140
160
  yield f"data: {chunk.model_dump_json(indent=None)}\n\n"
141
161
  yield "data: [DONE]"
142
162
 
163
+ args = DeepAnythingServer._extract_args(query)
164
+
165
+ max_tokens = None
166
+ if "max_tokens" in args:
167
+ max_tokens = args["max_tokens"]
168
+ args.pop("max_tokens")
169
+
143
170
  if query.stream:
144
171
  res = sse(
145
172
  await chat_completion_stream_async(
@@ -150,6 +177,9 @@ class DeepAnythingServer:
150
177
  response_model=model.response_model,
151
178
  show_model=model.name,
152
179
  reason_prompt=model.reason_prompt,
180
+ response_args=args,
181
+ reason_args=args,
182
+ max_tokens=max_tokens
153
183
  )
154
184
  )
155
185
  return StreamingResponse(
@@ -164,11 +194,14 @@ class DeepAnythingServer:
164
194
  response_client=self.response_clients[model.response_client],
165
195
  response_model=model.response_model,
166
196
  show_model=model.name,
167
- reason_prompt=model.reason_prompt
197
+ reason_prompt=model.reason_prompt,
198
+ response_args=args,
199
+ reason_args=args,
200
+ max_tokens=max_tokens
168
201
  )
169
202
  return res.model_dump_json()
170
203
 
171
- async def get_models(self) -> Types.ModelsListResponse:
204
+ def get_models(self) -> Types.ModelsListResponse:
172
205
  return Types.ModelsListResponse(
173
206
  data = [OpenaiModel(
174
207
  id = model_info.name,
@@ -1,6 +1,7 @@
1
1
  from pydantic import BaseModel
2
2
  from openai.types.model import Model as OpenaiModel
3
- from typing import Dict,List
3
+ from typing import Dict, List, Optional
4
+
4
5
 
5
6
  class ModelsListResponse(BaseModel):
6
7
  data : List[OpenaiModel]
@@ -12,4 +13,10 @@ class ChatCompletionMessage(BaseModel):
12
13
  class ChatCompletionQuery(BaseModel):
13
14
  model : str
14
15
  messages : List[ChatCompletionMessage]
15
- stream : bool = False
16
+ stream : bool = False
17
+ temperature : Optional[float] = 0.7
18
+ top_p : Optional[float] = 1.0
19
+ frequency_penalty : Optional[float] = 0.0
20
+ presence_penalty : Optional[float] = 0.0
21
+ max_tokens : Optional[int] = None
22
+ stop : Optional[List[str]] = None
deepanything/Utility.py CHANGED
@@ -168,7 +168,7 @@ def merge_chunk(
168
168
  model: str,
169
169
  created: int = int(time.time())
170
170
  ) -> chat_completion.ChatCompletion:
171
- """同步合并 chunk 流"""
171
+
172
172
  reasoning_content = ""
173
173
  content = ""
174
174
  comp_tokens = prompt_tokens = total_tokens = 0
deepanything/__main__.py CHANGED
@@ -12,7 +12,7 @@ def main():
12
12
  if args.config is not None:
13
13
  with open(args.config) as f:
14
14
  config = json.load(f)
15
- server = DeepAnythingServer(host=args.host,port=args.port,config_object=config)
15
+ server = DeepAnythingServer(host=args.host, port=args.port, config=config)
16
16
  server.run()
17
17
  else:
18
18
  print("No config file specified.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepanything
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
5
5
  Author: Junity
6
6
  Author-email: 1727636624@qq.com
@@ -140,6 +140,8 @@ async def main():
140
140
  asyncio.run(main())
141
141
  ```
142
142
 
143
+ More example can be find in [examples](examples).
144
+
143
145
  ### 2. Use as a Server
144
146
 
145
147
  ```bash
@@ -0,0 +1,16 @@
1
+ deepanything/DeepAnythingClient.py,sha256=B6m67keuGqUsTBABjSaMpyQpI2e2USbos5Fsmma73HU,15377
2
+ deepanything/ReasonClient.py,sha256=4IAotSKIzxR-jXHfU9YJH1KHdWvBfbTKMVlXvEEHq9E,7558
3
+ deepanything/ResponseClient.py,sha256=NbXjlU_0qTKBNjZy8B9J9emuABQYvx3NZsWuja9OnMI,2989
4
+ deepanything/Stream.py,sha256=8ESR8ttjyPZ-uXPDENsVWUzaL34_GT2OZBJ0PWu7vsA,1578
5
+ deepanything/Utility.py,sha256=TFH-4NS-gQpzzEb1Aba7WpDEqBqNRF1STWFzfRqQLcg,6645
6
+ deepanything/__init__.py,sha256=_2RolcKcpxmW0dmtiQpXlvgxe5dvqx90Yg_Q_oVLVZQ,175
7
+ deepanything/__main__.py,sha256=A6SQ1hkW2LAiHoRxQJdCLrSGUn3NP-eQY8O2oTv9nYA,936
8
+ deepanything/Server/Server.py,sha256=90MMxWXppLuUy8o9lEyJC_yXJeSSanBDBOjCcsG0yr8,8359
9
+ deepanything/Server/Types.py,sha256=b7aMaRBgODEKdyYe0FeraUfrygJuye3b5lfQTOWASXA,650
10
+ deepanything/Server/__init__.py,sha256=eIpn6NbNvEg4ST8CuuIuzPT3m_fTlmPC3sikPoPFsYo,92
11
+ deepanything-0.1.4.dist-info/LICENSE,sha256=JWYd2E-mcNcSYjT5nk4ayM5kkkDq6ZlOxVcYsyqCIwU,1059
12
+ deepanything-0.1.4.dist-info/METADATA,sha256=7MKrxbIcsqBLCpb6CBppWi7ftEMHnNmC829IBAMKCUg,5893
13
+ deepanything-0.1.4.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
14
+ deepanything-0.1.4.dist-info/entry_points.txt,sha256=UT4gNGx6dJsKBjZIl3VkMekh385O5WMbMidAAla6UB4,60
15
+ deepanything-0.1.4.dist-info/top_level.txt,sha256=wGeRb__4jEJTclCUl0cxhgubD_Bq-QT38VIH6C4KpzY,13
16
+ deepanything-0.1.4.dist-info/RECORD,,
@@ -1,16 +0,0 @@
1
- deepanything/DeepAnythingClient.py,sha256=7yf4iteGjcDWcLKd3GD0OWv-A_d54QKVAuSHMl7-T54,15042
2
- deepanything/ReasonClient.py,sha256=TxyXf7q3f_xASFA-blrKNHhaV38FIcFKmTnGBqU_d0U,7116
3
- deepanything/ResponseClient.py,sha256=oWPIQXknm7QEkG5Ysx9ejKUyISd0cHZF-HVG0fersOQ,2871
4
- deepanything/Stream.py,sha256=8ESR8ttjyPZ-uXPDENsVWUzaL34_GT2OZBJ0PWu7vsA,1578
5
- deepanything/Utility.py,sha256=HmiXU5X1eQO9iL292lfFA3dbGVjEPIM4Ayvw8Z8y2lk,6677
6
- deepanything/__init__.py,sha256=_2RolcKcpxmW0dmtiQpXlvgxe5dvqx90Yg_Q_oVLVZQ,175
7
- deepanything/__main__.py,sha256=x9LQGBlVnyML4yJ6Y3Rrt1pZ4xkLu0uZ569wKqwmpJQ,941
8
- deepanything/Server/Server.py,sha256=rrTVahuDe0U1U6V6IfWJ7p5c-LbOQ15T42lymbwzna4,7211
9
- deepanything/Server/Types.py,sha256=iN3X2QDnOyKwpuifUymeu1qg4OZ8uu4QG9ThttZFSDQ,390
10
- deepanything/Server/__init__.py,sha256=eIpn6NbNvEg4ST8CuuIuzPT3m_fTlmPC3sikPoPFsYo,92
11
- deepanything-0.1.2.dist-info/LICENSE,sha256=JWYd2E-mcNcSYjT5nk4ayM5kkkDq6ZlOxVcYsyqCIwU,1059
12
- deepanything-0.1.2.dist-info/METADATA,sha256=06fgJbN8qXxxsFKbwkUaBs3hJm4VDe5JO3lRAlrk0Ko,5840
13
- deepanything-0.1.2.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
14
- deepanything-0.1.2.dist-info/entry_points.txt,sha256=UT4gNGx6dJsKBjZIl3VkMekh385O5WMbMidAAla6UB4,60
15
- deepanything-0.1.2.dist-info/top_level.txt,sha256=wGeRb__4jEJTclCUl0cxhgubD_Bq-QT38VIH6C4KpzY,13
16
- deepanything-0.1.2.dist-info/RECORD,,