deepanything 0.1.5__tar.gz → 0.1.7__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (26) hide show
  1. {deepanything-0.1.5 → deepanything-0.1.7}/PKG-INFO +1 -1
  2. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/DeepAnythingClient.py +27 -10
  3. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/ReasonClient.py +26 -0
  4. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/Server/Server.py +74 -28
  5. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/Utility.py +14 -1
  6. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/__main__.py +4 -2
  7. deepanything-0.1.7/deepanything/metadatas.py +4 -0
  8. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything.egg-info/PKG-INFO +1 -1
  9. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything.egg-info/SOURCES.txt +1 -0
  10. {deepanything-0.1.5 → deepanything-0.1.7}/setup.py +2 -1
  11. {deepanything-0.1.5 → deepanything-0.1.7}/test/server.py +1 -1
  12. {deepanything-0.1.5 → deepanything-0.1.7}/LICENSE +0 -0
  13. {deepanything-0.1.5 → deepanything-0.1.7}/README.md +0 -0
  14. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/ResponseClient.py +0 -0
  15. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/Server/Types.py +0 -0
  16. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/Server/__init__.py +0 -0
  17. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/Stream.py +0 -0
  18. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything/__init__.py +0 -0
  19. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything.egg-info/dependency_links.txt +0 -0
  20. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything.egg-info/entry_points.txt +0 -0
  21. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything.egg-info/requires.txt +0 -0
  22. {deepanything-0.1.5 → deepanything-0.1.7}/deepanything.egg-info/top_level.txt +0 -0
  23. {deepanything-0.1.5 → deepanything-0.1.7}/requirements.txt +0 -0
  24. {deepanything-0.1.5 → deepanything-0.1.7}/setup.cfg +0 -0
  25. {deepanything-0.1.5 → deepanything-0.1.7}/test/think.py +0 -0
  26. {deepanything-0.1.5 → deepanything-0.1.7}/test/think_async.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepanything
3
- Version: 0.1.5
3
+ Version: 0.1.7
4
4
  Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
5
5
  Author: Junity
6
6
  Author-email: 1727636624@qq.com
@@ -4,7 +4,8 @@ from openai.types.chat.chat_completion import ChatCompletion
4
4
 
5
5
  from deepanything.Stream import Stream,AsyncStream
6
6
  from deepanything.Utility import make_usage, make_chat_completion_message, merge_chunk, async_merge_chunk, \
7
- make_chat_completion_chunk, make_chat_completion, make_chat_completion_choice, merge_usage, make_id_by_timestamp
7
+ make_chat_completion_chunk, make_chat_completion, make_chat_completion_choice, merge_usage, make_id_by_timestamp, \
8
+ attend_message
8
9
  from deepanything.ResponseClient import ResponseClient,AsyncResponseClient
9
10
  from deepanything.ReasonClient import ReasonClient,AsyncReasonClient
10
11
 
@@ -41,10 +42,7 @@ def _build_message(
41
42
  reason_content : str,
42
43
  reason_prompt : str
43
44
  ) -> List:
44
- return messages + [make_chat_completion_message(
45
- role="assistant",
46
- content=reason_prompt.format(reason_content)
47
- )]
45
+ return attend_message(messages,role="assistant",content=reason_prompt.format(reason_content))
48
46
  def _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id):
49
47
  new_chunk = chunk.model_copy(deep=False)
50
48
  new_chunk.model = show_model
@@ -83,6 +81,7 @@ def chat_completion(
83
81
  reason_args=None,
84
82
  response_args=None,
85
83
  reason_prompt: str = "<Think>{}</Think>",
84
+ reason_system_prompt: Optional[str] = None,
86
85
  created: int = int(time.time()),
87
86
  stream = False,
88
87
  _id: str = make_id_by_timestamp(),
@@ -105,7 +104,8 @@ def chat_completion(
105
104
  created=created,
106
105
  _id=_id,
107
106
  reason_prompt=reason_prompt,
108
- max_tokens=max_tokens
107
+ reason_system_prompt=reason_system_prompt,
108
+ max_tokens=max_tokens,
109
109
  )
110
110
 
111
111
  if max_tokens is not None:
@@ -145,6 +145,7 @@ def chat_completion_stream(
145
145
  reason_args=None,
146
146
  response_args=None,
147
147
  reason_prompt: str = "<Think>{}</Think>",
148
+ reason_system_prompt: Optional[str] = None,
148
149
  created: int = int(time.time()),
149
150
  _id: str = make_id_by_timestamp(),
150
151
  max_tokens : Optional[int] = None
@@ -166,6 +167,7 @@ def chat_completion_stream(
166
167
  reason_stream = reason_client.reason_stream(
167
168
  messages,
168
169
  reason_model,
170
+ reason_system_prompt,
169
171
  **reason_args
170
172
  )
171
173
  stream = reason_stream
@@ -206,6 +208,7 @@ async def chat_completion_async(
206
208
  reason_args=None,
207
209
  response_args=None,
208
210
  reason_prompt: str = "<Think>{}</Think>",
211
+ reason_system_prompt: Optional[str] = None,
209
212
  created: int = int(time.time()),
210
213
  _id: str = make_id_by_timestamp(),
211
214
  stream=False,
@@ -228,6 +231,7 @@ async def chat_completion_async(
228
231
  created=created,
229
232
  _id=_id,
230
233
  reason_prompt=reason_prompt,
234
+ reason_system_prompt=reason_system_prompt,
231
235
  max_tokens=max_tokens
232
236
  )
233
237
 
@@ -246,11 +250,14 @@ async def chat_completion_async(
246
250
  return reason_chat_completion
247
251
  response_args["max_tokens"] = max_tokens
248
252
 
253
+ messages = _build_message(
254
+ messages,
255
+ reason_chat_completion.choices[0].message.reasoning_content,
256
+ reason_prompt
257
+ )
258
+
249
259
  response_chat_completion:ChatCompletion = await response_client.chat_completions(
250
- messages=messages + [make_chat_completion_message(
251
- role="assistant",
252
- content=reason_prompt.format(reason_chat_completion.choices[0].message.reasoning_content)
253
- )],
260
+ messages=messages,
254
261
  model=response_model,
255
262
  **response_args
256
263
  )
@@ -267,6 +274,7 @@ async def chat_completion_stream_async(
267
274
  reason_args=None,
268
275
  response_args=None,
269
276
  reason_prompt: str = "<Think>{}</Think>",
277
+ reason_system_prompt: Optional[str] = None,
270
278
  created: int = int(time.time()),
271
279
  _id: str = make_id_by_timestamp(),
272
280
  max_tokens : Optional[int] = None
@@ -287,6 +295,7 @@ async def chat_completion_stream_async(
287
295
  reason_stream = await reason_client.reason_stream(
288
296
  messages,
289
297
  reason_model,
298
+ reason_system_prompt,
290
299
  **reason_args
291
300
  )
292
301
 
@@ -342,6 +351,7 @@ class DeepAnythingClient:
342
351
  show_model : str,
343
352
  reason_args=None,
344
353
  response_args=None,
354
+ reason_system_prompt: Optional[str] = None,
345
355
  created : int = int(time.time()),
346
356
  _id : str = make_id_by_timestamp(),
347
357
  stream = False
@@ -355,6 +365,7 @@ class DeepAnythingClient:
355
365
  show_model=show_model,
356
366
  reason_args=reason_args,
357
367
  response_args=response_args,
368
+ reason_system_prompt=reason_system_prompt,
358
369
  created=created,
359
370
  _id=_id,
360
371
  stream=stream,
@@ -369,6 +380,7 @@ class DeepAnythingClient:
369
380
  show_model : str,
370
381
  reason_args=None,
371
382
  response_args=None,
383
+ reason_system_prompt: Optional[str] = None,
372
384
  created : int = int(time.time()),
373
385
  _id : str = make_id_by_timestamp()
374
386
  ) -> Stream:
@@ -381,6 +393,7 @@ class DeepAnythingClient:
381
393
  show_model=show_model,
382
394
  reason_args=reason_args,
383
395
  response_args=response_args,
396
+ reason_system_prompt=reason_system_prompt,
384
397
  created=created,
385
398
  _id=_id,
386
399
  reason_prompt=self.reason_prompt
@@ -411,6 +424,7 @@ class AsyncDeepAnythingClient:
411
424
  show_model: str,
412
425
  reason_args=None,
413
426
  response_args=None,
427
+ reason_system_prompt: Optional[str] = None,
414
428
  created: int = int(time.time()),
415
429
  _id: str = make_id_by_timestamp(),
416
430
  stream=False
@@ -424,6 +438,7 @@ class AsyncDeepAnythingClient:
424
438
  show_model=show_model,
425
439
  reason_args=reason_args,
426
440
  response_args=response_args,
441
+ reason_system_prompt=reason_system_prompt,
427
442
  created=created,
428
443
  _id=_id,
429
444
  stream=stream,
@@ -438,6 +453,7 @@ class AsyncDeepAnythingClient:
438
453
  show_model : str,
439
454
  reason_args=None,
440
455
  response_args=None,
456
+ reason_system_prompt: Optional[str] = None,
441
457
  created : int = int(time.time()),
442
458
  _id : str = make_id_by_timestamp()
443
459
  ) -> AsyncStream:
@@ -450,6 +466,7 @@ class AsyncDeepAnythingClient:
450
466
  show_model=show_model,
451
467
  reason_args=reason_args,
452
468
  response_args=response_args,
469
+ reason_system_prompt=reason_system_prompt,
453
470
  created=created,
454
471
  _id=_id,
455
472
  reason_prompt=self.reason_prompt
@@ -1,4 +1,7 @@
1
+ from typing import Optional
2
+
1
3
  import openai
4
+ from openai import OpenAI
2
5
  from openai.types.chat import chat_completion, chat_completion_chunk
3
6
  from deepanything.Stream import Stream,AsyncStream
4
7
  from deepanything import Utility
@@ -12,6 +15,7 @@ class ReasonClient:
12
15
  self,
13
16
  messages:list[dict],
14
17
  model:str,
18
+ reason_system_prompt:Optional[str] = None,
15
19
  stream = False,
16
20
  **kwargs
17
21
  ) -> Stream or chat_completion.ChatCompletion:
@@ -26,6 +30,7 @@ class ReasonClient:
26
30
  def reason_stream(self,
27
31
  messages:list[dict],
28
32
  model:str,
33
+ reason_system_prompt:Optional[str] = None,
29
34
  **kwargs
30
35
  ) -> Stream:
31
36
  raise NotImplementedError
@@ -38,6 +43,7 @@ class AsyncReasonClient:
38
43
  self,
39
44
  messages:list[dict],
40
45
  model:str,
46
+ reason_system_prompt:Optional[str] = None,
41
47
  stream = False,
42
48
  **kwargs
43
49
  ) -> AsyncStream or chat_completion.ChatCompletion:
@@ -52,6 +58,7 @@ class AsyncReasonClient:
52
58
  async def reason_stream(self,
53
59
  messages:list[dict],
54
60
  model:str,
61
+ reason_system_prompt:Optional[str] = None,
55
62
  **kwargs
56
63
  ) -> AsyncStream:
57
64
  raise NotImplementedError
@@ -70,6 +77,7 @@ class DeepseekReasonClient(ReasonClient):
70
77
  def reason_stream(self,
71
78
  messages: list[dict],
72
79
  model: str,
80
+ reason_system_prompt:Optional[str] = None, # not used
73
81
  **kwargs
74
82
  ) -> Stream:
75
83
  stream = self.client.chat.completions.create(
@@ -108,6 +116,7 @@ class AsyncDeepseekReasonClient(AsyncReasonClient):
108
116
  async def reason_stream(self,
109
117
  messages: list[dict],
110
118
  model: str,
119
+ reason_system_prompt:Optional[str] = None,
111
120
  **kwargs
112
121
  ) -> AsyncStream:
113
122
  stream = await self.client.chat.completions.create(
@@ -161,8 +170,12 @@ class OpenaiReasonClient(ReasonClient):
161
170
  def reason_stream(self,
162
171
  messages: list[dict],
163
172
  model: str,
173
+ reason_system_prompt:Optional[str] = None,
164
174
  **kwargs
165
175
  ) -> Stream:
176
+ if reason_system_prompt is not None:
177
+ messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
178
+
166
179
  stream = self.client.chat.completions.create(
167
180
  messages=messages,
168
181
  model=model,
@@ -177,11 +190,16 @@ class OpenaiReasonClient(ReasonClient):
177
190
  self,
178
191
  messages:list[dict],
179
192
  model:str,
193
+ reason_system_prompt:Optional[str] = None,
180
194
  stream = False,
181
195
  **kwargs
182
196
  ) -> Stream or chat_completion.ChatCompletion:
183
197
  if stream:
184
198
  return self.reason_stream(messages, model, **kwargs)
199
+
200
+ if reason_system_prompt is not None:
201
+ messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
202
+
185
203
  completion = self.client.chat.completions.create(
186
204
  messages=messages,
187
205
  model=model,
@@ -207,9 +225,13 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
207
225
  async def reason_stream(self,
208
226
  messages: list[dict],
209
227
  model: str,
228
+ reason_system_prompt:Optional[str] = None,
210
229
  **kwargs
211
230
  ) -> AsyncStream:
212
231
 
232
+ if reason_system_prompt is not None:
233
+ messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
234
+
213
235
  stream = await self.client.chat.completions.create(
214
236
  messages=messages,
215
237
  model=model,
@@ -226,12 +248,16 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
226
248
  async def reason(self,
227
249
  messages: list[dict],
228
250
  model: str,
251
+ reason_system_prompt:Optional[str] = None,
229
252
  stream = False,
230
253
  **kwargs
231
254
  ) -> AsyncStream or chat_completion.ChatCompletion:
232
255
  if stream:
233
256
  return await self.reason_stream(messages, model, **kwargs)
234
257
 
258
+ if reason_system_prompt is not None:
259
+ messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
260
+
235
261
  completion = await self.client.chat.completions.create(
236
262
  messages=messages,
237
263
  model=model,
@@ -1,3 +1,4 @@
1
+ from chunk import Chunk
1
2
  from dataclasses import dataclass
2
3
  import time
3
4
  import uvicorn
@@ -6,15 +7,19 @@ import json
6
7
 
7
8
  from openai.types.model import Model as OpenaiModel
8
9
  from fastapi import FastAPI,Depends, HTTPException, status,Header,Request
9
- from fastapi.responses import StreamingResponse
10
+ from fastapi.responses import StreamingResponse,Response
10
11
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
11
12
  from uvicorn.config import LOGGING_CONFIG
13
+ import logging
14
+ import logging.config
12
15
 
13
16
  from deepanything.DeepAnythingClient import chat_completion_stream_async, chat_completion_async
14
17
  from deepanything.ResponseClient import AsyncOpenaiResponseClient,AsyncResponseClient
15
18
  from deepanything.Stream import AsyncStream
16
19
  from deepanything.ReasonClient import AsyncDeepseekReasonClient,AsyncOpenaiReasonClient,AsyncReasonClient
17
20
  from deepanything.Server import Types
21
+ from deepanything.metadatas import VERSION,PYTHON_RUNTIME
22
+
18
23
 
19
24
  @dataclass
20
25
  class ModelInfo:
@@ -24,12 +29,14 @@ class ModelInfo:
24
29
  response_client : str
25
30
  response_model : str
26
31
  created : int = int(time.time())
27
- reason_prompt : str = "<Think>{}</Think>"
32
+ reason_prompt : str = "<Think>{}</Think>",
33
+ reason_system_prompt : Optional[str] = None
28
34
 
29
35
  class DeepAnythingServer:
30
36
  app : FastAPI = FastAPI()
31
37
  host : str
32
38
  port : int
39
+ logger : logging.Logger
33
40
  reason_clients : Dict[str,AsyncReasonClient] = {}
34
41
  response_clients : Dict[str,AsyncResponseClient] = {}
35
42
  models : Dict[str,ModelInfo] = {}
@@ -39,6 +46,7 @@ class DeepAnythingServer:
39
46
  log_config : Dict[str,Any] = LOGGING_CONFIG
40
47
 
41
48
  def __init__(self, host:str = None, port:int = None, config : Any or str = None):
49
+ print(f"DeepAnything Server {VERSION} on {PYTHON_RUNTIME}")
42
50
  if config is not None:
43
51
  if isinstance(config,str):
44
52
  with open(config) as f:
@@ -53,7 +61,9 @@ class DeepAnythingServer:
53
61
  self.app.add_api_route("/v1/models",self.get_models,methods=["GET"],response_model=Types.ModelsListResponse)
54
62
  self.app.add_api_route("/v1/chat/completions",self.chat_completions,methods=["POST"])
55
63
 
64
+
56
65
  def run(self):
66
+ self.logger.info(f"DeepAnything server is now running at http://{self.host}:{self.port}")
57
67
  uvicorn.run(self.app,host=self.host,port=self.port,log_config=self.log_config)
58
68
 
59
69
  @staticmethod
@@ -64,21 +74,26 @@ class DeepAnythingServer:
64
74
  return args
65
75
 
66
76
  def load_config(self,config_object : Dict) -> None:
77
+ print("Loading config")
67
78
  self.host = config_object.get("host","0.0.0.0")
68
79
  self.port = config_object.get("port",8000)
69
80
  self.model_owner = config_object.get("model_owner","deepanything")
70
81
 
82
+ self.log_config = config_object.get("log",LOGGING_CONFIG)
83
+ if self.log_config == {}:
84
+ self.log_config = LOGGING_CONFIG
85
+ logging.config.dictConfig(self.log_config)
86
+ self.logger = logging.getLogger("deepanything")
87
+
71
88
  self._load_reason_clients(config_object)
72
89
  self._load_response_clients(config_object)
73
90
  self._load_models(config_object)
74
91
 
75
92
  self.api_keys = config_object.get("api_keys",[])
76
- self.log_config = config_object.get("log",LOGGING_CONFIG)
77
- if self.log_config == {}:
78
- self.log_config = LOGGING_CONFIG
79
93
 
80
94
 
81
95
  def _load_models(self, config_object):
96
+ self.logger.info("Loading models")
82
97
  models: List[Dict] = config_object.get("models", [])
83
98
  for _model in models:
84
99
  name = _model["name"]
@@ -88,12 +103,18 @@ class DeepAnythingServer:
88
103
  response_model = _model["response_model"]
89
104
  created = _model.get("created", int(time.time()))
90
105
  reason_prompt = _model.get("reason_prompt", "<Think>{}</Think>")
106
+ reason_system_prompt = _model.get("reason_system_prompt", None)
107
+
108
+ if name in self.models:
109
+ self.logger.error(f"Detected duplicate model : {name}")
91
110
 
92
111
  if reason_client not in self.reason_clients:
93
- raise ValueError(f"Reason client '{reason_model}' for '{name}' not found")
112
+ self.logger.error(f"Reason client '{reason_model}' for '{name}' not found")
113
+ exit(0)
94
114
 
95
115
  if response_client not in self.response_clients:
96
- raise ValueError(f"Response client '{response_model}' for '{name}' not found")
116
+ self.logger.error(f"Response client '{response_model}' for '{name}' not found")
117
+ exit(0)
97
118
 
98
119
  self.models[name] = ModelInfo(
99
120
  name=name,
@@ -102,10 +123,14 @@ class DeepAnythingServer:
102
123
  response_client=response_client,
103
124
  response_model=response_model,
104
125
  created=created,
105
- reason_prompt=reason_prompt
126
+ reason_prompt=reason_prompt,
127
+ reason_system_prompt=reason_system_prompt
106
128
  )
107
129
 
130
+ self.logger.info(f"Loaded model : {name}")
131
+
108
132
  def _load_response_clients(self, config_object):
133
+ self.logger.info("Loading response clients")
109
134
  response_clients: List[Dict] = config_object.get("response_clients", [])
110
135
  for client in response_clients:
111
136
  name = client["name"]
@@ -113,12 +138,20 @@ class DeepAnythingServer:
113
138
  api_key = client.get("api_key", "")
114
139
  extract_args = client.get("extract_args", {})
115
140
 
141
+ if name in self.response_clients:
142
+ self.logger.error(f"Detected duplicate response clients : {name}")
143
+ exit(0)
144
+
116
145
  if client["type"] == 'openai':
117
146
  self.response_clients[name] = AsyncOpenaiResponseClient(base_url, api_key, **extract_args)
118
147
  else:
119
- raise ValueError(f"Unsupported response client type '{client['type']}'")
148
+ self.logger.error(f"Unsupported response client type '{client['type']}'")
149
+ exit(0)
150
+
151
+ self.logger.info(f"Loaded response client : {name}")
120
152
 
121
153
  def _load_reason_clients(self, config_object):
154
+ self.logger.info("Loading reason clients")
122
155
  reason_clients: List[Dict] = config_object.get("reason_clients", [])
123
156
  for client in reason_clients:
124
157
  name = client["name"]
@@ -126,12 +159,20 @@ class DeepAnythingServer:
126
159
  api_key = client.get("api_key", "")
127
160
  extract_args = client.get("extract_args", {})
128
161
 
162
+
163
+ if name in self.response_clients:
164
+ self.logger.error(f"Detected duplicate response clients : {name}")
165
+ exit(0)
166
+
129
167
  if client["type"] == 'deepseek':
130
168
  self.reason_clients[name] = AsyncDeepseekReasonClient(base_url, api_key, **extract_args)
131
169
  elif client["type"] == 'openai':
132
170
  self.reason_clients[name] = AsyncOpenaiReasonClient(base_url, api_key, **extract_args)
133
171
  else:
134
- raise Exception("unknown reason client type")
172
+ self.logger.error(f"Unsupported reason client type '{client['type']}'")
173
+ exit(0)
174
+
175
+ self.logger.info(f"Loaded reason client : {name}")
135
176
 
136
177
  def add_reason_client(self,name:str,client:AsyncReasonClient):
137
178
  self.reason_clients[name] = client
@@ -142,31 +183,31 @@ class DeepAnythingServer:
142
183
  def add_model(self,name:str,model:ModelInfo):
143
184
  self.models[name] = model
144
185
 
186
+ @staticmethod
187
+ def _extract_token(authorization:str):
188
+ if (authorization is None) or (not authorization.startswith("Bearer ")):
189
+ return None
190
+ return authorization[7:]
145
191
  def _verify_authorization(self, authorization:Optional[str]):
192
+ token = DeepAnythingServer._extract_token(authorization)
193
+
146
194
  if not self.api_keys:
147
- return
195
+ return DeepAnythingServer._extract_token(authorization)
148
196
 
149
- if authorization is None:
197
+ if authorization is None or token is None:
150
198
  raise HTTPException(
151
199
  status_code=status.HTTP_401_UNAUTHORIZED,
152
200
  detail="Expect token",
153
201
  headers={"WWW-Authenticate": "Bearer"},
154
202
  )
155
203
 
156
- if not authorization.startswith("Bearer "):
157
- raise HTTPException(
158
- status_code=status.HTTP_401_UNAUTHORIZED,
159
- detail="Invalid or expired token",
160
- headers={"WWW-Authenticate": "Bearer"},
161
- )
162
-
163
- token =authorization[7:]
164
204
  if token not in self.api_keys:
165
205
  raise HTTPException(
166
206
  status_code=status.HTTP_401_UNAUTHORIZED,
167
207
  detail="Invalid or expired token",
168
208
  headers={"WWW-Authenticate": "Bearer"},
169
209
  )
210
+ return token
170
211
 
171
212
  async def chat_completions(
172
213
  self,
@@ -174,7 +215,9 @@ class DeepAnythingServer:
174
215
  query: Types.ChatCompletionQuery,
175
216
  authorization: Optional[str] = Header(None)
176
217
  ):
177
- self._verify_authorization(authorization)
218
+ token = self._verify_authorization(authorization)
219
+
220
+ self.logger.info(f"ChatCompletions : {token} -> {query.model}")
178
221
 
179
222
  if query.model not in self.models:
180
223
  raise HTTPException(
@@ -184,14 +227,13 @@ class DeepAnythingServer:
184
227
 
185
228
  model = self.models[query.model]
186
229
 
187
- # 修改点1:将request传递给_sse_warp生成器
188
230
  async def _sse_warp(it: AsyncStream, req: Request):
189
231
  async for chunk in it:
190
232
  if await req.is_disconnected():
191
233
  await it.close()
192
234
  break
193
- yield f"data: {chunk.model_dump_json(indent=None)}\n\n"
194
- yield "data: [DONE]"
235
+ yield f"data: {chunk.model_dump_json(indent=None)}\n\n".encode("utf-8")
236
+ yield "data: [DONE]".encode('utf-8')
195
237
 
196
238
  args = DeepAnythingServer._extract_args(query)
197
239
 
@@ -212,13 +254,14 @@ class DeepAnythingServer:
212
254
  reason_prompt=model.reason_prompt,
213
255
  response_args=args,
214
256
  reason_args=args,
215
- max_tokens=max_tokens
257
+ max_tokens=max_tokens,
258
+
216
259
  ),
217
- request # 传入request对象
260
+ request
218
261
  )
219
262
  return StreamingResponse(
220
263
  res,
221
- media_type="text/event-stream"
264
+ media_type="text/event-stream",
222
265
  )
223
266
  else:
224
267
  res = await chat_completion_async(
@@ -233,7 +276,10 @@ class DeepAnythingServer:
233
276
  reason_args=args,
234
277
  max_tokens=max_tokens
235
278
  )
236
- return res.model_dump_json()
279
+ return Response(
280
+ content=res.model_dump_json(indent=None),
281
+ media_type="application/json"
282
+ )
237
283
 
238
284
  def get_models(self) -> Types.ModelsListResponse:
239
285
  return Types.ModelsListResponse(
@@ -6,6 +6,7 @@ import uuid
6
6
  from openai.types.chat import chat_completion_chunk,chat_completion,chat_completion_message
7
7
  from openai.types import completion_usage,completion_choice
8
8
 
9
+
9
10
  def make_usage(
10
11
  completion_tokens,
11
12
  prompt_tokens,
@@ -207,4 +208,16 @@ def merge_usage(
207
208
  )
208
209
 
209
210
  def make_id_by_timestamp():
210
- return "chatcmpl-" + str(uuid.uuid4())
211
+ return "chatcmpl-" + str(uuid.uuid4())
212
+
213
+ def attend_message(
214
+ messages : List,
215
+ role : Literal["developer", "system", "user", "assistant", "tool"],
216
+ content : Optional[str] = None,
217
+ reason_content : Optional[str] = None,
218
+ ) -> List:
219
+ return messages + [make_chat_completion_message(
220
+ role=role,
221
+ content=content,
222
+ reasoning_content=reason_content
223
+ )]
@@ -1,16 +1,18 @@
1
1
  from deepanything.Server.Server import DeepAnythingServer
2
2
  import argparse
3
3
  import json
4
+ from .metadatas import VERSION
4
5
  def main():
5
- parser = argparse.ArgumentParser(prog="deepanything",description="Run a DeepAnything Server.")
6
+ parser = argparse.ArgumentParser(prog=f"deepanything {VERSION}",description="Run a DeepAnything Server.")
6
7
  parser.add_argument('--host', type=str, required=False, help='Specific the host to listen.If specified,the host will be overwritten by this.')
7
8
  parser.add_argument('--port', type=int, required=False, help='Specific the port to listen.If specified,the port will be overwritten by this.')
8
9
  parser.add_argument('--config', type=str, required=True, help='Specific the confi path.')
10
+ parser.add_argument('--version', action='version', version=f'%(prog)s {VERSION}')
9
11
 
10
12
  args = parser.parse_args()
11
13
 
12
14
  if args.config is not None:
13
- with open(args.config) as f:
15
+ with open(args.config, encoding='utf-8') as f:
14
16
  config = json.load(f)
15
17
  server = DeepAnythingServer(host=args.host, port=args.port, config=config)
16
18
  server.run()
@@ -0,0 +1,4 @@
1
+ from sys import version
2
+
3
+ VERSION = "v0.1.7"
4
+ PYTHON_RUNTIME = f"python{version}"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepanything
3
- Version: 0.1.5
3
+ Version: 0.1.7
4
4
  Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
5
5
  Author: Junity
6
6
  Author-email: 1727636624@qq.com
@@ -9,6 +9,7 @@ deepanything/Stream.py
9
9
  deepanything/Utility.py
10
10
  deepanything/__init__.py
11
11
  deepanything/__main__.py
12
+ deepanything/metadatas.py
12
13
  deepanything.egg-info/PKG-INFO
13
14
  deepanything.egg-info/SOURCES.txt
14
15
  deepanything.egg-info/dependency_links.txt
@@ -1,4 +1,5 @@
1
1
  from setuptools import setup, find_packages
2
+ from deepanything.metadatas import VERSION
2
3
 
3
4
  with open("README.md",encoding='utf-8') as f:
4
5
  long_description = f.read()
@@ -8,7 +9,7 @@ with open("requirements.txt") as f:
8
9
 
9
10
  setup(
10
11
  name="deepanything",
11
- version="0.1.5",
12
+ version=VERSION,
12
13
  author="Junity",
13
14
  author_email="1727636624@qq.com",
14
15
  description="DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).",
@@ -1,7 +1,7 @@
1
1
  from deepanything.Server.Server import DeepAnythingServer
2
2
  import json
3
3
 
4
- with open("test/config.json") as f:
4
+ with open("test/config.json", encoding="utf-8") as f:
5
5
  conf = json.load(f)
6
6
 
7
7
  server = DeepAnythingServer(config=conf)
File without changes
File without changes
File without changes
File without changes