deepanything 0.1.4__tar.gz → 0.1.5__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (25) hide show
  1. {deepanything-0.1.4 → deepanything-0.1.5}/PKG-INFO +56 -3
  2. {deepanything-0.1.4 → deepanything-0.1.5}/README.md +55 -2
  3. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/Server/Server.py +88 -54
  4. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything.egg-info/PKG-INFO +56 -3
  5. {deepanything-0.1.4 → deepanything-0.1.5}/setup.py +1 -1
  6. {deepanything-0.1.4 → deepanything-0.1.5}/test/think.py +16 -4
  7. {deepanything-0.1.4 → deepanything-0.1.5}/LICENSE +0 -0
  8. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/DeepAnythingClient.py +0 -0
  9. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/ReasonClient.py +0 -0
  10. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/ResponseClient.py +0 -0
  11. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/Server/Types.py +0 -0
  12. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/Server/__init__.py +0 -0
  13. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/Stream.py +0 -0
  14. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/Utility.py +0 -0
  15. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/__init__.py +0 -0
  16. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything/__main__.py +0 -0
  17. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything.egg-info/SOURCES.txt +0 -0
  18. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything.egg-info/dependency_links.txt +0 -0
  19. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything.egg-info/entry_points.txt +0 -0
  20. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything.egg-info/requires.txt +0 -0
  21. {deepanything-0.1.4 → deepanything-0.1.5}/deepanything.egg-info/top_level.txt +0 -0
  22. {deepanything-0.1.4 → deepanything-0.1.5}/requirements.txt +0 -0
  23. {deepanything-0.1.4 → deepanything-0.1.5}/setup.cfg +0 -0
  24. {deepanything-0.1.4 → deepanything-0.1.5}/test/server.py +0 -0
  25. {deepanything-0.1.4 → deepanything-0.1.5}/test/think_async.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepanything
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
5
5
  Author: Junity
6
6
  Author-email: 1727636624@qq.com
@@ -159,6 +159,7 @@ More example can be find in [examples](examples).
159
159
  Below is an example of a configuration file:
160
160
 
161
161
  ```json
162
+ // Using R1 with Qwen-Max-Latest
162
163
  {
163
164
  "host" : "0.0.0.0",
164
165
  "port" : 8080,
@@ -187,9 +188,41 @@ Below is an example of a configuration file:
187
188
  "response_model" : "qwen-max-latest"
188
189
  }
189
190
  ],
190
- "api_keys": [
191
+ "api_keys" : [
191
192
  "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
192
- ]
193
+ ],
194
+ "log": {
195
+ "version": 1,
196
+ "disable_existing_loggers": false,
197
+ "formatters": {
198
+ "default": {
199
+ "()": "uvicorn.logging.DefaultFormatter",
200
+ "fmt": "%(levelprefix)s %(message)s",
201
+ "use_colors": null
202
+ },
203
+ "access": {
204
+ "()": "uvicorn.logging.AccessFormatter",
205
+ "fmt": "%(levelprefix)s %(client_addr)s - \"%(request_line)s\" %(status_code)s"
206
+ }
207
+ },
208
+ "handlers": {
209
+ "default": {
210
+ "formatter": "default",
211
+ "class": "logging.StreamHandler",
212
+ "stream": "ext://sys.stderr"
213
+ },
214
+ "access": {
215
+ "formatter": "access",
216
+ "class": "logging.StreamHandler",
217
+ "stream": "ext://sys.stdout"
218
+ }
219
+ },
220
+ "loggers": {
221
+ "uvicorn": {"handlers": ["default"], "level": "INFO", "propagate": false},
222
+ "uvicorn.error": {"level": "INFO"},
223
+ "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": false}
224
+ }
225
+ }
193
226
  }
194
227
  ```
195
228
 
@@ -198,6 +231,26 @@ Below is an example of a configuration file:
198
231
  - reason_clients: Configuration for thinking models, currently supports deepseek and openai types. When the type is openai, deepanything directly uses the model's output as the thinking content, and it is recommended to use qwq-32b in this case.
199
232
  - response_clients: Configuration for response models, currently only supports the openai type.
200
233
  - api_keys: API keys for user authentication. When left blank or an empty list, the server does not use API keys for authentication.
234
+ - log: Log configuration. If this item is not filled in, the default logging configuration of uvicorn will be used. For details, please refer to [uvicorn logging configuration](https://www.uvicorn.org/settings/#logging).
235
+
236
+ ## Deploying with Docker
237
+ ### 1. Pull the Image
238
+ ```bash
239
+ docker pull junity233/deepanything:latest
240
+ ```
241
+
242
+ ### 2. Create config.json
243
+ First, create a folder in your desired directory, and then create a file named `config.json` inside it. This folder will be mounted into the container:
244
+ ```bash
245
+ mkdir deepanything-data # You can replace this with another name
246
+ vim deepanything-data/config.json # Edit the configuration file, you can refer to examples/config.json
247
+ ```
248
+
249
+ ### 3. Run the Container
250
+ ```bash
251
+ # Remember to modify the port mapping
252
+ docker run -v ./deepanything-data:/data -p 8080:8080 junity233/deepanything:latest
253
+ ```
201
254
 
202
255
  ## License
203
256
 
@@ -141,6 +141,7 @@ More example can be find in [examples](examples).
141
141
  Below is an example of a configuration file:
142
142
 
143
143
  ```json
144
+ // Using R1 with Qwen-Max-Latest
144
145
  {
145
146
  "host" : "0.0.0.0",
146
147
  "port" : 8080,
@@ -169,9 +170,41 @@ Below is an example of a configuration file:
169
170
  "response_model" : "qwen-max-latest"
170
171
  }
171
172
  ],
172
- "api_keys": [
173
+ "api_keys" : [
173
174
  "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
174
- ]
175
+ ],
176
+ "log": {
177
+ "version": 1,
178
+ "disable_existing_loggers": false,
179
+ "formatters": {
180
+ "default": {
181
+ "()": "uvicorn.logging.DefaultFormatter",
182
+ "fmt": "%(levelprefix)s %(message)s",
183
+ "use_colors": null
184
+ },
185
+ "access": {
186
+ "()": "uvicorn.logging.AccessFormatter",
187
+ "fmt": "%(levelprefix)s %(client_addr)s - \"%(request_line)s\" %(status_code)s"
188
+ }
189
+ },
190
+ "handlers": {
191
+ "default": {
192
+ "formatter": "default",
193
+ "class": "logging.StreamHandler",
194
+ "stream": "ext://sys.stderr"
195
+ },
196
+ "access": {
197
+ "formatter": "access",
198
+ "class": "logging.StreamHandler",
199
+ "stream": "ext://sys.stdout"
200
+ }
201
+ },
202
+ "loggers": {
203
+ "uvicorn": {"handlers": ["default"], "level": "INFO", "propagate": false},
204
+ "uvicorn.error": {"level": "INFO"},
205
+ "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": false}
206
+ }
207
+ }
175
208
  }
176
209
  ```
177
210
 
@@ -180,6 +213,26 @@ Below is an example of a configuration file:
180
213
  - reason_clients: Configuration for thinking models, currently supports deepseek and openai types. When the type is openai, deepanything directly uses the model's output as the thinking content, and it is recommended to use qwq-32b in this case.
181
214
  - response_clients: Configuration for response models, currently only supports the openai type.
182
215
  - api_keys: API keys for user authentication. When left blank or an empty list, the server does not use API keys for authentication.
216
+ - log: Log configuration. If this item is not filled in, the default logging configuration of uvicorn will be used. For details, please refer to [uvicorn logging configuration](https://www.uvicorn.org/settings/#logging).
217
+
218
+ ## Deploying with Docker
219
+ ### 1. Pull the Image
220
+ ```bash
221
+ docker pull junity233/deepanything:latest
222
+ ```
223
+
224
+ ### 2. Create config.json
225
+ First, create a folder in your desired directory, and then create a file named `config.json` inside it. This folder will be mounted into the container:
226
+ ```bash
227
+ mkdir deepanything-data # You can replace this with another name
228
+ vim deepanything-data/config.json # Edit the configuration file, you can refer to examples/config.json
229
+ ```
230
+
231
+ ### 3. Run the Container
232
+ ```bash
233
+ # Remember to modify the port mapping
234
+ docker run -v ./deepanything-data:/data -p 8080:8080 junity233/deepanything:latest
235
+ ```
183
236
 
184
237
  ## License
185
238
 
@@ -5,9 +5,10 @@ from typing import Dict, List, Optional, Any
5
5
  import json
6
6
 
7
7
  from openai.types.model import Model as OpenaiModel
8
- from fastapi import FastAPI,Depends, HTTPException, status,Header
8
+ from fastapi import FastAPI,Depends, HTTPException, status,Header,Request
9
9
  from fastapi.responses import StreamingResponse
10
10
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
11
+ from uvicorn.config import LOGGING_CONFIG
11
12
 
12
13
  from deepanything.DeepAnythingClient import chat_completion_stream_async, chat_completion_async
13
14
  from deepanything.ResponseClient import AsyncOpenaiResponseClient,AsyncResponseClient
@@ -35,6 +36,7 @@ class DeepAnythingServer:
35
36
  model_owner : str = "deepanything"
36
37
  api_keys : List[str] = []
37
38
  security = HTTPBearer()
39
+ log_config : Dict[str,Any] = LOGGING_CONFIG
38
40
 
39
41
  def __init__(self, host:str = None, port:int = None, config : Any or str = None):
40
42
  if config is not None:
@@ -52,7 +54,7 @@ class DeepAnythingServer:
52
54
  self.app.add_api_route("/v1/chat/completions",self.chat_completions,methods=["POST"])
53
55
 
54
56
  def run(self):
55
- uvicorn.run(self.app,host=self.host,port=self.port,log_level="trace")
57
+ uvicorn.run(self.app,host=self.host,port=self.port,log_config=self.log_config)
56
58
 
57
59
  @staticmethod
58
60
  def _extract_args(query : Types.ChatCompletionQuery) -> dict:
@@ -66,36 +68,18 @@ class DeepAnythingServer:
66
68
  self.port = config_object.get("port",8000)
67
69
  self.model_owner = config_object.get("model_owner","deepanything")
68
70
 
69
- reason_clients:List[Dict] = config_object.get("reason_clients",[])
71
+ self._load_reason_clients(config_object)
72
+ self._load_response_clients(config_object)
73
+ self._load_models(config_object)
70
74
 
71
- for client in reason_clients:
72
- name = client["name"]
73
- base_url = client["base_url"]
74
- api_key = client.get("api_key","")
75
- extract_args = client.get("extract_args",{})
76
-
77
- if client["type"] == 'deepseek':
78
- self.reason_clients[name] = AsyncDeepseekReasonClient(base_url, api_key, **extract_args)
79
- elif client["type"] == 'openai':
80
- self.reason_clients[name] = AsyncOpenaiReasonClient(base_url, api_key, **extract_args)
81
- else:
82
- raise Exception("unknown reason client type")
83
-
84
- response_clients : List[Dict] = config_object.get("response_clients",[])
85
-
86
- for client in response_clients:
87
- name = client["name"]
88
- base_url = client["base_url"]
89
- api_key = client.get("api_key","")
90
- extract_args = client.get("extract_args",{})
91
-
92
- if client["type"] == 'openai':
93
- self.response_clients[name] = AsyncOpenaiResponseClient(base_url,api_key,**extract_args)
94
- else:
95
- raise Exception("unknown response client type")
75
+ self.api_keys = config_object.get("api_keys",[])
76
+ self.log_config = config_object.get("log",LOGGING_CONFIG)
77
+ if self.log_config == {}:
78
+ self.log_config = LOGGING_CONFIG
96
79
 
97
- models : List[Dict] = config_object.get("models",[])
98
80
 
81
+ def _load_models(self, config_object):
82
+ models: List[Dict] = config_object.get("models", [])
99
83
  for _model in models:
100
84
  name = _model["name"]
101
85
  reason_client = _model["reason_client"]
@@ -103,20 +87,51 @@ class DeepAnythingServer:
103
87
  response_client = _model["response_client"]
104
88
  response_model = _model["response_model"]
105
89
  created = _model.get("created", int(time.time()))
106
- reason_prompt = _model.get("reason_prompt","<Think>{}</Think>")
90
+ reason_prompt = _model.get("reason_prompt", "<Think>{}</Think>")
91
+
92
+ if reason_client not in self.reason_clients:
93
+ raise ValueError(f"Reason client '{reason_model}' for '{name}' not found")
94
+
95
+ if response_client not in self.response_clients:
96
+ raise ValueError(f"Response client '{response_model}' for '{name}' not found")
107
97
 
108
98
  self.models[name] = ModelInfo(
109
- name = name,
110
- reason_client = reason_client,
111
- reason_model = reason_model,
112
- response_client = response_client,
113
- response_model = response_model,
114
- created = created,
115
- reason_prompt = reason_prompt
99
+ name=name,
100
+ reason_client=reason_client,
101
+ reason_model=reason_model,
102
+ response_client=response_client,
103
+ response_model=response_model,
104
+ created=created,
105
+ reason_prompt=reason_prompt
116
106
  )
117
107
 
118
- self.api_keys = config_object.get("api_keys",[])
108
+ def _load_response_clients(self, config_object):
109
+ response_clients: List[Dict] = config_object.get("response_clients", [])
110
+ for client in response_clients:
111
+ name = client["name"]
112
+ base_url = client["base_url"]
113
+ api_key = client.get("api_key", "")
114
+ extract_args = client.get("extract_args", {})
119
115
 
116
+ if client["type"] == 'openai':
117
+ self.response_clients[name] = AsyncOpenaiResponseClient(base_url, api_key, **extract_args)
118
+ else:
119
+ raise ValueError(f"Unsupported response client type '{client['type']}'")
120
+
121
+ def _load_reason_clients(self, config_object):
122
+ reason_clients: List[Dict] = config_object.get("reason_clients", [])
123
+ for client in reason_clients:
124
+ name = client["name"]
125
+ base_url = client["base_url"]
126
+ api_key = client.get("api_key", "")
127
+ extract_args = client.get("extract_args", {})
128
+
129
+ if client["type"] == 'deepseek':
130
+ self.reason_clients[name] = AsyncDeepseekReasonClient(base_url, api_key, **extract_args)
131
+ elif client["type"] == 'openai':
132
+ self.reason_clients[name] = AsyncOpenaiReasonClient(base_url, api_key, **extract_args)
133
+ else:
134
+ raise Exception("unknown reason client type")
120
135
 
121
136
  def add_reason_client(self,name:str,client:AsyncReasonClient):
122
137
  self.reason_clients[name] = client
@@ -126,7 +141,8 @@ class DeepAnythingServer:
126
141
 
127
142
  def add_model(self,name:str,model:ModelInfo):
128
143
  self.models[name] = model
129
- def verify_authorization(self, authorization:Optional[str]):
144
+
145
+ def _verify_authorization(self, authorization:Optional[str]):
130
146
  if not self.api_keys:
131
147
  return
132
148
 
@@ -152,11 +168,28 @@ class DeepAnythingServer:
152
168
  headers={"WWW-Authenticate": "Bearer"},
153
169
  )
154
170
 
155
- async def chat_completions(self, query : Types.ChatCompletionQuery, authorization:Optional[str] = Header(None)):
156
- self.verify_authorization(authorization)
171
+ async def chat_completions(
172
+ self,
173
+ request: Request, # 新增加Request参数
174
+ query: Types.ChatCompletionQuery,
175
+ authorization: Optional[str] = Header(None)
176
+ ):
177
+ self._verify_authorization(authorization)
178
+
179
+ if query.model not in self.models:
180
+ raise HTTPException(
181
+ status_code=status.HTTP_400_BAD_REQUEST,
182
+ detail="Model not found",
183
+ )
184
+
157
185
  model = self.models[query.model]
158
- async def sse(it: AsyncStream):
186
+
187
+ # 修改点1:将request传递给_sse_warp生成器
188
+ async def _sse_warp(it: AsyncStream, req: Request):
159
189
  async for chunk in it:
190
+ if await req.is_disconnected():
191
+ await it.close()
192
+ break
160
193
  yield f"data: {chunk.model_dump_json(indent=None)}\n\n"
161
194
  yield "data: [DONE]"
162
195
 
@@ -168,7 +201,7 @@ class DeepAnythingServer:
168
201
  args.pop("max_tokens")
169
202
 
170
203
  if query.stream:
171
- res = sse(
204
+ res = _sse_warp(
172
205
  await chat_completion_stream_async(
173
206
  messages=query.messages,
174
207
  reason_client=self.reason_clients[model.reason_client],
@@ -180,7 +213,8 @@ class DeepAnythingServer:
180
213
  response_args=args,
181
214
  reason_args=args,
182
215
  max_tokens=max_tokens
183
- )
216
+ ),
217
+ request # 传入request对象
184
218
  )
185
219
  return StreamingResponse(
186
220
  res,
@@ -188,16 +222,16 @@ class DeepAnythingServer:
188
222
  )
189
223
  else:
190
224
  res = await chat_completion_async(
191
- messages=query.messages,
192
- reason_client=self.reason_clients[model.reason_client],
193
- reason_model=model.reason_model,
194
- response_client=self.response_clients[model.response_client],
195
- response_model=model.response_model,
196
- show_model=model.name,
197
- reason_prompt=model.reason_prompt,
198
- response_args=args,
199
- reason_args=args,
200
- max_tokens=max_tokens
225
+ messages=query.messages,
226
+ reason_client=self.reason_clients[model.reason_client],
227
+ reason_model=model.reason_model,
228
+ response_client=self.response_clients[model.response_client],
229
+ response_model=model.response_model,
230
+ show_model=model.name,
231
+ reason_prompt=model.reason_prompt,
232
+ response_args=args,
233
+ reason_args=args,
234
+ max_tokens=max_tokens
201
235
  )
202
236
  return res.model_dump_json()
203
237
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepanything
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
5
5
  Author: Junity
6
6
  Author-email: 1727636624@qq.com
@@ -159,6 +159,7 @@ More example can be find in [examples](examples).
159
159
  Below is an example of a configuration file:
160
160
 
161
161
  ```json
162
+ // Using R1 with Qwen-Max-Latest
162
163
  {
163
164
  "host" : "0.0.0.0",
164
165
  "port" : 8080,
@@ -187,9 +188,41 @@ Below is an example of a configuration file:
187
188
  "response_model" : "qwen-max-latest"
188
189
  }
189
190
  ],
190
- "api_keys": [
191
+ "api_keys" : [
191
192
  "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
192
- ]
193
+ ],
194
+ "log": {
195
+ "version": 1,
196
+ "disable_existing_loggers": false,
197
+ "formatters": {
198
+ "default": {
199
+ "()": "uvicorn.logging.DefaultFormatter",
200
+ "fmt": "%(levelprefix)s %(message)s",
201
+ "use_colors": null
202
+ },
203
+ "access": {
204
+ "()": "uvicorn.logging.AccessFormatter",
205
+ "fmt": "%(levelprefix)s %(client_addr)s - \"%(request_line)s\" %(status_code)s"
206
+ }
207
+ },
208
+ "handlers": {
209
+ "default": {
210
+ "formatter": "default",
211
+ "class": "logging.StreamHandler",
212
+ "stream": "ext://sys.stderr"
213
+ },
214
+ "access": {
215
+ "formatter": "access",
216
+ "class": "logging.StreamHandler",
217
+ "stream": "ext://sys.stdout"
218
+ }
219
+ },
220
+ "loggers": {
221
+ "uvicorn": {"handlers": ["default"], "level": "INFO", "propagate": false},
222
+ "uvicorn.error": {"level": "INFO"},
223
+ "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": false}
224
+ }
225
+ }
193
226
  }
194
227
  ```
195
228
 
@@ -198,6 +231,26 @@ Below is an example of a configuration file:
198
231
  - reason_clients: Configuration for thinking models, currently supports deepseek and openai types. When the type is openai, deepanything directly uses the model's output as the thinking content, and it is recommended to use qwq-32b in this case.
199
232
  - response_clients: Configuration for response models, currently only supports the openai type.
200
233
  - api_keys: API keys for user authentication. When left blank or an empty list, the server does not use API keys for authentication.
234
+ - log: Log configuration. If this item is not filled in, the default logging configuration of uvicorn will be used. For details, please refer to [uvicorn logging configuration](https://www.uvicorn.org/settings/#logging).
235
+
236
+ ## Deploying with Docker
237
+ ### 1. Pull the Image
238
+ ```bash
239
+ docker pull junity233/deepanything:latest
240
+ ```
241
+
242
+ ### 2. Create config.json
243
+ First, create a folder in your desired directory, and then create a file named `config.json` inside it. This folder will be mounted into the container:
244
+ ```bash
245
+ mkdir deepanything-data # You can replace this with another name
246
+ vim deepanything-data/config.json # Edit the configuration file, you can refer to examples/config.json
247
+ ```
248
+
249
+ ### 3. Run the Container
250
+ ```bash
251
+ # Remember to modify the port mapping
252
+ docker run -v ./deepanything-data:/data -p 8080:8080 junity233/deepanything:latest
253
+ ```
201
254
 
202
255
  ## License
203
256
 
@@ -8,7 +8,7 @@ with open("requirements.txt") as f:
8
8
 
9
9
  setup(
10
10
  name="deepanything",
11
- version="0.1.4",
11
+ version="0.1.5",
12
12
  author="Junity",
13
13
  author_email="1727636624@qq.com",
14
14
  description="DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).",
@@ -20,7 +20,21 @@ da_client = DeepAnythingClient(
20
20
  reason_prompt="<Think>{}</Think>"
21
21
  )
22
22
  def main():
23
- stream = da_client.chat_completion_stream(
23
+ # stream = da_client.chat_completion_stream(
24
+ # messages=[
25
+ # {
26
+ # "role": "user",
27
+ # "content": "你好"
28
+ # }
29
+ # ],
30
+ # reason_model="Pro/deepseek-ai/DeepSeek-R1",
31
+ # response_model="qwen-max-latest",
32
+ # show_model="R1-qwen-max"
33
+ # )
34
+ #
35
+ # for chunk in stream:
36
+ # print(chunk)
37
+ res = da_client.chat_completion(
24
38
  messages=[
25
39
  {
26
40
  "role": "user",
@@ -32,8 +46,6 @@ def main():
32
46
  show_model="R1-qwen-max"
33
47
  )
34
48
 
35
- for chunk in stream:
36
- print(chunk)
37
-
49
+ print(res)
38
50
 
39
51
  main()
File without changes
File without changes