deepanything 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ from typing import Optional
2
+
1
3
  import openai
2
4
  from openai.types.chat import chat_completion, chat_completion_chunk
3
5
  from deepanything.Stream import Stream,AsyncStream
@@ -5,6 +7,9 @@ from deepanything import Utility
5
7
 
6
8
 
7
9
  class ReasonClient:
10
+ """
11
+ Base Class for Reason Clients
12
+ """
8
13
  def __init__(self) -> None:
9
14
  pass
10
15
 
@@ -12,9 +17,20 @@ class ReasonClient:
12
17
  self,
13
18
  messages:list[dict],
14
19
  model:str,
20
+ reason_system_prompt:Optional[str] = None,
15
21
  stream = False,
16
22
  **kwargs
17
23
  ) -> Stream or chat_completion.ChatCompletion:
24
+ """
25
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.
26
+
27
+ :param messages: Messages
28
+ :param model: Model
29
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
30
+ :param stream: Whether you use streaming return
31
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
32
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
33
+ """
18
34
  if stream:
19
35
  return self.reason_stream(messages, model, **kwargs)
20
36
 
@@ -26,11 +42,24 @@ class ReasonClient:
26
42
  def reason_stream(self,
27
43
  messages:list[dict],
28
44
  model:str,
45
+ reason_system_prompt:Optional[str] = None,
29
46
  **kwargs
30
47
  ) -> Stream:
48
+ """
49
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.This method uses streaming return
50
+
51
+ :param messages: Messages
52
+ :param model: Model
53
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
54
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
55
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
56
+ """
31
57
  raise NotImplementedError
32
58
 
33
59
  class AsyncReasonClient:
60
+ """
61
+ Base Class for Async Reason Clients
62
+ """
34
63
  def __init__(self) -> None:
35
64
  pass
36
65
 
@@ -38,9 +67,20 @@ class AsyncReasonClient:
38
67
  self,
39
68
  messages:list[dict],
40
69
  model:str,
70
+ reason_system_prompt:Optional[str] = None,
41
71
  stream = False,
42
72
  **kwargs
43
73
  ) -> AsyncStream or chat_completion.ChatCompletion:
74
+ """
75
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.
76
+
77
+ :param messages: Messages
78
+ :param model: Model
79
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
80
+ :param stream: Whether you use streaming return
81
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
82
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
83
+ """
44
84
  if stream:
45
85
  return await self.reason_stream(messages, model, **kwargs)
46
86
 
@@ -52,14 +92,32 @@ class AsyncReasonClient:
52
92
  async def reason_stream(self,
53
93
  messages:list[dict],
54
94
  model:str,
95
+ reason_system_prompt:Optional[str] = None,
55
96
  **kwargs
56
97
  ) -> AsyncStream:
98
+ """
99
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.This method uses streaming return
100
+
101
+ :param messages: Messages
102
+ :param model: Model
103
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
104
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
105
+ :return: Return a AsyncStream if stream is Ture,otherwise return a ChatCompletion
106
+ """
57
107
  raise NotImplementedError
58
108
 
59
109
  class DeepseekReasonClient(ReasonClient):
110
+ """
111
+ Deepseek Reason Client
112
+ """
60
113
  client : openai.OpenAI
61
114
 
62
115
  def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
116
+ """
117
+ :param base_url: Base url
118
+ :param api_key: Api key
119
+ :param kwargs: Other parameters used to create clients
120
+ """
63
121
  super().__init__()
64
122
  self.client = openai.OpenAI(
65
123
  base_url=base_url,
@@ -70,6 +128,7 @@ class DeepseekReasonClient(ReasonClient):
70
128
  def reason_stream(self,
71
129
  messages: list[dict],
72
130
  model: str,
131
+ reason_system_prompt:Optional[str] = None, # not used
73
132
  **kwargs
74
133
  ) -> Stream:
75
134
  stream = self.client.chat.completions.create(
@@ -95,9 +154,18 @@ class DeepseekReasonClient(ReasonClient):
95
154
  .on_close(lambda _: stream.close()))
96
155
 
97
156
  class AsyncDeepseekReasonClient(AsyncReasonClient):
157
+ """
158
+ Deepseek Reason Async Client
159
+ """
98
160
  client : openai.AsyncOpenAI
99
161
 
100
162
  def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
163
+ """
164
+ :param base_url: Base url
165
+ :param api_key: Api key
166
+ :param kwargs: Other parameters used to create clients
167
+ """
168
+
101
169
  super().__init__()
102
170
  self.client = openai.AsyncOpenAI(
103
171
  base_url=base_url,
@@ -108,6 +176,7 @@ class AsyncDeepseekReasonClient(AsyncReasonClient):
108
176
  async def reason_stream(self,
109
177
  messages: list[dict],
110
178
  model: str,
179
+ reason_system_prompt:Optional[str] = None,
111
180
  **kwargs
112
181
  ) -> AsyncStream:
113
182
  stream = await self.client.chat.completions.create(
@@ -144,6 +213,9 @@ def _rebuild_chunk_for_openai(
144
213
 
145
214
 
146
215
  class OpenaiReasonClient(ReasonClient):
216
+ """
217
+ OpenAI Reason Client.Used When using models similar to QWQ
218
+ """
147
219
  client : openai.OpenAI
148
220
  def __init__(
149
221
  self,
@@ -151,6 +223,11 @@ class OpenaiReasonClient(ReasonClient):
151
223
  api_key:str,
152
224
  **kwargs
153
225
  ) -> None:
226
+ """
227
+ :param base_url: Base url
228
+ :param api_key: Api key
229
+ :param kwargs: Other parameters used to create clients
230
+ """
154
231
  super().__init__()
155
232
  self.client = openai.OpenAI(
156
233
  base_url=base_url,
@@ -161,8 +238,12 @@ class OpenaiReasonClient(ReasonClient):
161
238
  def reason_stream(self,
162
239
  messages: list[dict],
163
240
  model: str,
241
+ reason_system_prompt:Optional[str] = None,
164
242
  **kwargs
165
243
  ) -> Stream:
244
+ if reason_system_prompt is not None:
245
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
246
+
166
247
  stream = self.client.chat.completions.create(
167
248
  messages=messages,
168
249
  model=model,
@@ -177,11 +258,16 @@ class OpenaiReasonClient(ReasonClient):
177
258
  self,
178
259
  messages:list[dict],
179
260
  model:str,
261
+ reason_system_prompt:Optional[str] = None,
180
262
  stream = False,
181
263
  **kwargs
182
264
  ) -> Stream or chat_completion.ChatCompletion:
183
265
  if stream:
184
266
  return self.reason_stream(messages, model, **kwargs)
267
+
268
+ if reason_system_prompt is not None:
269
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
270
+
185
271
  completion = self.client.chat.completions.create(
186
272
  messages=messages,
187
273
  model=model,
@@ -195,8 +281,16 @@ class OpenaiReasonClient(ReasonClient):
195
281
  return completion
196
282
 
197
283
  class AsyncOpenaiReasonClient(AsyncReasonClient):
284
+ """
285
+ OpenAI Reason Async Client.Used When using models similar to QWQ
286
+ """
198
287
  client : openai.AsyncOpenAI
199
288
  def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
289
+ """
290
+ :param base_url: Base url
291
+ :param api_key: Api key
292
+ :param kwargs: Other parameters used to create clients
293
+ """
200
294
  super().__init__()
201
295
  self.client = openai.AsyncOpenAI(
202
296
  base_url=base_url,
@@ -207,9 +301,13 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
207
301
  async def reason_stream(self,
208
302
  messages: list[dict],
209
303
  model: str,
304
+ reason_system_prompt:Optional[str] = None,
210
305
  **kwargs
211
306
  ) -> AsyncStream:
212
307
 
308
+ if reason_system_prompt is not None:
309
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
310
+
213
311
  stream = await self.client.chat.completions.create(
214
312
  messages=messages,
215
313
  model=model,
@@ -226,12 +324,16 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
226
324
  async def reason(self,
227
325
  messages: list[dict],
228
326
  model: str,
327
+ reason_system_prompt:Optional[str] = None,
229
328
  stream = False,
230
329
  **kwargs
231
330
  ) -> AsyncStream or chat_completion.ChatCompletion:
232
331
  if stream:
233
332
  return await self.reason_stream(messages, model, **kwargs)
234
333
 
334
+ if reason_system_prompt is not None:
335
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
336
+
235
337
  completion = await self.client.chat.completions.create(
236
338
  messages=messages,
237
339
  model=model,
@@ -5,36 +5,84 @@ import openai
5
5
  from deepanything import Utility
6
6
 
7
7
  class ResponseClient:
8
+ """
9
+ Base Class for Response Client
10
+ """
8
11
  def __init__(self):
9
12
  pass
10
13
 
11
14
  def chat_completions(self,messages,model,stream = False,**kwargs) -> Stream or chat_completion.ChatCompletion:
15
+ """
16
+ Make chat completion for responding
17
+
18
+ :param messages: Messages
19
+ :param model: Model
20
+ :param stream: Whether you use streaming return
21
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
22
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
23
+ """
12
24
  if stream:
13
25
  return self.chat_completions_stream(messages,model,**kwargs)
14
26
 
15
27
  return Utility.merge_chunk(self.chat_completions_stream(messages,model,**kwargs),model)
16
28
 
17
29
  def chat_completions_stream(self,messages,model,**kwargs) -> Stream:
18
- pass
30
+ """
31
+ Make chat completion for responding.This method uses streaming return
32
+
33
+ :param messages: Messages
34
+ :param model: Model
35
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
36
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
37
+ """
38
+ raise NotImplementedError()
19
39
 
20
40
  class AsyncResponseClient:
41
+ """
42
+ Base Class for Response Async Client
43
+ """
21
44
  def __init__(self):
22
45
  pass
23
46
 
24
47
  async def chat_completions(self,messages,model,stream = False,**kwargs) -> AsyncStream or chat_completion.ChatCompletion:
48
+ """
49
+ Make chat completion for responding
50
+
51
+ :param messages: Messages
52
+ :param model: Model
53
+ :param stream: Whether you use streaming return
54
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
55
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
56
+ """
25
57
  if stream:
26
58
  return self.chat_completions_stream(messages,model,**kwargs)
27
59
 
28
60
  return await Utility.async_merge_chunk(await self.chat_completions_stream(messages,model,**kwargs),model)
29
61
 
30
62
  async def chat_completions_stream(self,messages,model,**kwargs) -> AsyncStream:
31
- pass
63
+ """
64
+ Make chat completion for responding.This method uses streaming return
32
65
 
66
+ :param messages: Messages
67
+ :param model: Model
68
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
69
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
70
+ """
71
+ raise NotImplementedError()
33
72
 
34
73
  class OpenaiResponseClient(ResponseClient):
74
+ """
75
+ OpenAI-like response client
76
+ """
35
77
  client : openai.OpenAI
36
78
 
37
79
  def __init__(self,base_url,api_key,**kwargs):
80
+ """
81
+
82
+ :param base_url: Base url
83
+ :param api_key: API Key
84
+ :param kwargs: Other parameters used to create clients
85
+ """
38
86
  super().__init__()
39
87
  self.client = openai.OpenAI(
40
88
  base_url=base_url,
@@ -61,9 +109,17 @@ class OpenaiResponseClient(ResponseClient):
61
109
 
62
110
 
63
111
  class AsyncOpenaiResponseClient(AsyncResponseClient):
112
+ """
113
+ OpenAI-like async response client
114
+ """
64
115
  client : openai.AsyncOpenAI
65
116
 
66
117
  def __init__(self,base_url,api_key,**kwargs):
118
+ """
119
+ :param base_url: Base url
120
+ :param api_key: API Key
121
+ :param kwargs: Other parameters used to create clients
122
+ """
67
123
  super().__init__()
68
124
  self.client = openai.AsyncOpenAI(
69
125
  base_url=base_url,
@@ -1,21 +1,24 @@
1
- from chunk import Chunk
2
- from dataclasses import dataclass
1
+ import json
2
+ import logging
3
+ import logging.config
3
4
  import time
4
- import uvicorn
5
+ from dataclasses import dataclass
5
6
  from typing import Dict, List, Optional, Any
6
- import json
7
7
 
8
+ import uvicorn
9
+ from fastapi import FastAPI, HTTPException, status, Header, Request
10
+ from fastapi.responses import StreamingResponse, Response
11
+ from fastapi.security import HTTPBearer
8
12
  from openai.types.model import Model as OpenaiModel
9
- from fastapi import FastAPI,Depends, HTTPException, status,Header,Request
10
- from fastapi.responses import StreamingResponse,Response
11
- from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
12
13
  from uvicorn.config import LOGGING_CONFIG
13
14
 
14
15
  from deepanything.DeepAnythingClient import chat_completion_stream_async, chat_completion_async
15
- from deepanything.ResponseClient import AsyncOpenaiResponseClient,AsyncResponseClient
16
- from deepanything.Stream import AsyncStream
17
- from deepanything.ReasonClient import AsyncDeepseekReasonClient,AsyncOpenaiReasonClient,AsyncReasonClient
16
+ from deepanything.ReasonClient import AsyncDeepseekReasonClient, AsyncOpenaiReasonClient, AsyncReasonClient
17
+ from deepanything.ResponseClient import AsyncOpenaiResponseClient, AsyncResponseClient
18
18
  from deepanything.Server import Types
19
+ from deepanything.Stream import AsyncStream
20
+ from deepanything.metadatas import VERSION, PYTHON_RUNTIME
21
+
19
22
 
20
23
  @dataclass
21
24
  class ModelInfo:
@@ -25,12 +28,14 @@ class ModelInfo:
25
28
  response_client : str
26
29
  response_model : str
27
30
  created : int = int(time.time())
28
- reason_prompt : str = "<Think>{}</Think>"
31
+ reason_prompt : str = "<think>{}</think>",
32
+ reason_system_prompt : Optional[str] = None
29
33
 
30
34
  class DeepAnythingServer:
31
35
  app : FastAPI = FastAPI()
32
36
  host : str
33
37
  port : int
38
+ logger : logging.Logger
34
39
  reason_clients : Dict[str,AsyncReasonClient] = {}
35
40
  response_clients : Dict[str,AsyncResponseClient] = {}
36
41
  models : Dict[str,ModelInfo] = {}
@@ -40,6 +45,7 @@ class DeepAnythingServer:
40
45
  log_config : Dict[str,Any] = LOGGING_CONFIG
41
46
 
42
47
  def __init__(self, host:str = None, port:int = None, config : Any or str = None):
48
+ print(f"DeepAnything Server {VERSION} on {PYTHON_RUNTIME}")
43
49
  if config is not None:
44
50
  if isinstance(config,str):
45
51
  with open(config) as f:
@@ -54,7 +60,9 @@ class DeepAnythingServer:
54
60
  self.app.add_api_route("/v1/models",self.get_models,methods=["GET"],response_model=Types.ModelsListResponse)
55
61
  self.app.add_api_route("/v1/chat/completions",self.chat_completions,methods=["POST"])
56
62
 
63
+
57
64
  def run(self):
65
+ self.logger.info(f"DeepAnything server is now running at http://{self.host}:{self.port}")
58
66
  uvicorn.run(self.app,host=self.host,port=self.port,log_config=self.log_config)
59
67
 
60
68
  @staticmethod
@@ -65,21 +73,26 @@ class DeepAnythingServer:
65
73
  return args
66
74
 
67
75
  def load_config(self,config_object : Dict) -> None:
76
+ print("Loading config")
68
77
  self.host = config_object.get("host","0.0.0.0")
69
78
  self.port = config_object.get("port",8000)
70
79
  self.model_owner = config_object.get("model_owner","deepanything")
71
80
 
81
+ self.log_config = config_object.get("log",LOGGING_CONFIG)
82
+ if self.log_config == {}:
83
+ self.log_config = LOGGING_CONFIG
84
+ logging.config.dictConfig(self.log_config)
85
+ self.logger = logging.getLogger("deepanything")
86
+
72
87
  self._load_reason_clients(config_object)
73
88
  self._load_response_clients(config_object)
74
89
  self._load_models(config_object)
75
90
 
76
91
  self.api_keys = config_object.get("api_keys",[])
77
- self.log_config = config_object.get("log",LOGGING_CONFIG)
78
- if self.log_config == {}:
79
- self.log_config = LOGGING_CONFIG
80
92
 
81
93
 
82
94
  def _load_models(self, config_object):
95
+ self.logger.info("Loading models")
83
96
  models: List[Dict] = config_object.get("models", [])
84
97
  for _model in models:
85
98
  name = _model["name"]
@@ -88,13 +101,19 @@ class DeepAnythingServer:
88
101
  response_client = _model["response_client"]
89
102
  response_model = _model["response_model"]
90
103
  created = _model.get("created", int(time.time()))
91
- reason_prompt = _model.get("reason_prompt", "<Think>{}</Think>")
104
+ reason_prompt = _model.get("reason_prompt", "<think>{}</think>")
105
+ reason_system_prompt = _model.get("reason_system_prompt", None)
106
+
107
+ if name in self.models:
108
+ self.logger.error(f"Detected duplicate model : {name}")
92
109
 
93
110
  if reason_client not in self.reason_clients:
94
- raise ValueError(f"Reason client '{reason_model}' for '{name}' not found")
111
+ self.logger.error(f"Reason client '{reason_model}' for '{name}' not found")
112
+ exit(0)
95
113
 
96
114
  if response_client not in self.response_clients:
97
- raise ValueError(f"Response client '{response_model}' for '{name}' not found")
115
+ self.logger.error(f"Response client '{response_model}' for '{name}' not found")
116
+ exit(0)
98
117
 
99
118
  self.models[name] = ModelInfo(
100
119
  name=name,
@@ -103,10 +122,14 @@ class DeepAnythingServer:
103
122
  response_client=response_client,
104
123
  response_model=response_model,
105
124
  created=created,
106
- reason_prompt=reason_prompt
125
+ reason_prompt=reason_prompt,
126
+ reason_system_prompt=reason_system_prompt
107
127
  )
108
128
 
129
+ self.logger.info(f"Loaded model : {name}")
130
+
109
131
  def _load_response_clients(self, config_object):
132
+ self.logger.info("Loading response clients")
110
133
  response_clients: List[Dict] = config_object.get("response_clients", [])
111
134
  for client in response_clients:
112
135
  name = client["name"]
@@ -114,12 +137,20 @@ class DeepAnythingServer:
114
137
  api_key = client.get("api_key", "")
115
138
  extract_args = client.get("extract_args", {})
116
139
 
140
+ if name in self.response_clients:
141
+ self.logger.error(f"Detected duplicate response clients : {name}")
142
+ exit(0)
143
+
117
144
  if client["type"] == 'openai':
118
145
  self.response_clients[name] = AsyncOpenaiResponseClient(base_url, api_key, **extract_args)
119
146
  else:
120
- raise ValueError(f"Unsupported response client type '{client['type']}'")
147
+ self.logger.error(f"Unsupported response client type '{client['type']}'")
148
+ exit(0)
149
+
150
+ self.logger.info(f"Loaded response client : {name}")
121
151
 
122
152
  def _load_reason_clients(self, config_object):
153
+ self.logger.info("Loading reason clients")
123
154
  reason_clients: List[Dict] = config_object.get("reason_clients", [])
124
155
  for client in reason_clients:
125
156
  name = client["name"]
@@ -127,12 +158,20 @@ class DeepAnythingServer:
127
158
  api_key = client.get("api_key", "")
128
159
  extract_args = client.get("extract_args", {})
129
160
 
161
+
162
+ if name in self.response_clients:
163
+ self.logger.error(f"Detected duplicate reason clients : {name}")
164
+ exit(0)
165
+
130
166
  if client["type"] == 'deepseek':
131
167
  self.reason_clients[name] = AsyncDeepseekReasonClient(base_url, api_key, **extract_args)
132
168
  elif client["type"] == 'openai':
133
169
  self.reason_clients[name] = AsyncOpenaiReasonClient(base_url, api_key, **extract_args)
134
170
  else:
135
- raise Exception("unknown reason client type")
171
+ self.logger.error(f"Unsupported reason client type '{client['type']}'")
172
+ exit(0)
173
+
174
+ self.logger.info(f"Loaded reason client : {name}")
136
175
 
137
176
  def add_reason_client(self,name:str,client:AsyncReasonClient):
138
177
  self.reason_clients[name] = client
@@ -143,31 +182,31 @@ class DeepAnythingServer:
143
182
  def add_model(self,name:str,model:ModelInfo):
144
183
  self.models[name] = model
145
184
 
185
+ @staticmethod
186
+ def _extract_token(authorization:str):
187
+ if (authorization is None) or (not authorization.startswith("Bearer ")):
188
+ return None
189
+ return authorization[7:]
146
190
  def _verify_authorization(self, authorization:Optional[str]):
191
+ token = DeepAnythingServer._extract_token(authorization)
192
+
147
193
  if not self.api_keys:
148
- return
194
+ return DeepAnythingServer._extract_token(authorization)
149
195
 
150
- if authorization is None:
196
+ if authorization is None or token is None:
151
197
  raise HTTPException(
152
198
  status_code=status.HTTP_401_UNAUTHORIZED,
153
199
  detail="Expect token",
154
200
  headers={"WWW-Authenticate": "Bearer"},
155
201
  )
156
202
 
157
- if not authorization.startswith("Bearer "):
158
- raise HTTPException(
159
- status_code=status.HTTP_401_UNAUTHORIZED,
160
- detail="Invalid or expired token",
161
- headers={"WWW-Authenticate": "Bearer"},
162
- )
163
-
164
- token =authorization[7:]
165
203
  if token not in self.api_keys:
166
204
  raise HTTPException(
167
205
  status_code=status.HTTP_401_UNAUTHORIZED,
168
206
  detail="Invalid or expired token",
169
207
  headers={"WWW-Authenticate": "Bearer"},
170
208
  )
209
+ return token
171
210
 
172
211
  async def chat_completions(
173
212
  self,
@@ -175,7 +214,9 @@ class DeepAnythingServer:
175
214
  query: Types.ChatCompletionQuery,
176
215
  authorization: Optional[str] = Header(None)
177
216
  ):
178
- self._verify_authorization(authorization)
217
+ token = self._verify_authorization(authorization)
218
+
219
+ self.logger.info(f"ChatCompletions : {token} -> {query.model}")
179
220
 
180
221
  if query.model not in self.models:
181
222
  raise HTTPException(
@@ -212,7 +253,8 @@ class DeepAnythingServer:
212
253
  reason_prompt=model.reason_prompt,
213
254
  response_args=args,
214
255
  reason_args=args,
215
- max_tokens=max_tokens
256
+ max_tokens=max_tokens,
257
+
216
258
  ),
217
259
  request
218
260
  )
@@ -1,6 +1,7 @@
1
- from pydantic import BaseModel
1
+ from typing import List, Optional
2
+
2
3
  from openai.types.model import Model as OpenaiModel
3
- from typing import Dict, List, Optional
4
+ from pydantic import BaseModel
4
5
 
5
6
 
6
7
  class ModelsListResponse(BaseModel):
deepanything/Stream.py CHANGED
@@ -3,9 +3,12 @@ from typing import Any, AsyncIterator, Iterator
3
3
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
4
4
 
5
5
  class Stream:
6
+ """
7
+ Implementation of streaming return. Implement using simple callback functions
8
+ """
6
9
  next_fc : Callable[[Any],ChatCompletionChunk]
7
10
  close_fc : Callable[[Any],None]
8
- data : dict
11
+ data : Any
9
12
 
10
13
 
11
14
  def __init__(self,data):
@@ -14,10 +17,22 @@ class Stream:
14
17
  return self
15
18
 
16
19
  def on_next(self,fc : Callable[[Any],ChatCompletionChunk]) -> 'Stream':
20
+ """
21
+ Set callback for `__next__()`
22
+
23
+ :param fc: Callback
24
+ :return: Stream itself.
25
+ """
17
26
  self.next_fc = fc
18
27
  return self
19
28
 
20
29
  def on_close(self,fc : Callable[[Any],None]) -> 'Stream':
30
+ """
31
+ Set callback for `close()`
32
+
33
+ :param fc: Callback
34
+ :return: Stream itself.
35
+ """
21
36
  self.close_fc = fc
22
37
  return self
23
38
 
@@ -25,9 +40,16 @@ class Stream:
25
40
  return self.next_fc(self.data)
26
41
 
27
42
  def close(self) -> None:
43
+ """
44
+ Close the stream
45
+ :return: None
46
+ """
28
47
  self.close_fc(self.data)
29
48
 
30
49
  class AsyncStream:
50
+ """
51
+ Implementation of streaming return. Implement using simple callback functions
52
+ """
31
53
  next_fc: Callable[[Any], Awaitable[ChatCompletionChunk]]
32
54
  close_fc: Callable[[Any], Awaitable[None]]
33
55
  data : Any
@@ -39,10 +61,22 @@ class AsyncStream:
39
61
  return self
40
62
 
41
63
  def on_next(self, fc: Callable[[Any], Awaitable[ChatCompletionChunk]]) -> 'AsyncStream':
64
+ """
65
+ Set callback for `__anext__()`
66
+
67
+ :param fc: Callback
68
+ :return: Stream itself.
69
+ """
42
70
  self.next_fc = fc
43
71
  return self
44
72
 
45
73
  def on_close(self, fc: Callable[[Any], Awaitable[None]]) -> 'AsyncStream':
74
+ """
75
+ Set callback for `close()`
76
+
77
+ :param fc: Callback
78
+ :return: Stream itself.
79
+ """
46
80
  self.close_fc = fc
47
81
  return self
48
82
 
@@ -50,4 +84,8 @@ class AsyncStream:
50
84
  return await self.next_fc(self.data)
51
85
 
52
86
  async def close(self) -> None:
87
+ """
88
+ Close the stream
89
+ :return: None
90
+ """
53
91
  await self.close_fc(self.data)