deepanything 0.1.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,87 @@
1
+ from deepanything.Stream import Stream,AsyncStream
2
+ from openai.types.chat import chat_completion
3
+ import openai
4
+
5
+ from deepanything import Utility
6
+
7
+ class ResponseClient:
8
+ def __init__(self):
9
+ pass
10
+
11
+ def chat_completions(self,messages,model,stream = False,**kwargs) -> Stream or chat_completion.ChatCompletion:
12
+ if stream:
13
+ return self.chat_completions_stream(messages,model,**kwargs)
14
+
15
+ return Utility.merge_chunk(self.chat_completions_stream(messages,model,**kwargs),model)
16
+
17
+ def chat_completions_stream(self,messages,model,**kwargs) -> Stream:
18
+ pass
19
+
20
+ class AsyncResponseClient:
21
+ def __init__(self):
22
+ pass
23
+
24
+ async def chat_completions(self,messages,model,stream = False,**kwargs) -> AsyncStream or chat_completion.ChatCompletion:
25
+ if stream:
26
+ return self.chat_completions_stream(messages,model,**kwargs)
27
+
28
+ return await Utility.async_merge_chunk(await self.chat_completions_stream(messages,model,**kwargs),model)
29
+
30
+ async def chat_completions_stream(self,messages,model,**kwargs) -> AsyncStream:
31
+ pass
32
+
33
+
34
+ class OpenaiResponseClient(ResponseClient):
35
+ client : openai.OpenAI
36
+
37
+ def __init__(self,base_url,api_key,**kwargs):
38
+ super().__init__()
39
+ self.client = openai.OpenAI(
40
+ base_url=base_url,
41
+ api_key=api_key,
42
+ **kwargs
43
+ )
44
+
45
+ def chat_completions(self,messages,model,stream = False,**kwargs) -> Stream or chat_completion.ChatCompletion:
46
+ return self.client.chat.completions.create(
47
+ messages=messages,
48
+ model=model,
49
+ stream=stream,
50
+ **kwargs
51
+ )
52
+
53
+ def chat_completions_stream(self,messages,model,**kwargs) -> Stream:
54
+ return self.client.chat.completions.create(
55
+ messages=messages,
56
+ model=model,
57
+ stream=True,
58
+ **kwargs
59
+ )
60
+
61
+
62
+ class AsyncOpenaiResponseClient(AsyncResponseClient):
63
+ client : openai.AsyncOpenAI
64
+
65
+ def __init__(self,base_url,api_key,**kwargs):
66
+ super().__init__()
67
+ self.client = openai.AsyncOpenAI(
68
+ base_url=base_url,
69
+ api_key=api_key,
70
+ **kwargs
71
+ )
72
+
73
+ async def chat_completions(self,messages,model,stream = False,**kwargs) -> AsyncStream or chat_completion.ChatCompletion:
74
+ return await self.client.chat.completions.create(
75
+ messages=messages,
76
+ model=model,
77
+ stream=stream,
78
+ **kwargs
79
+ )
80
+
81
+ async def chat_completions_stream(self,messages,model,**kwargs) -> AsyncStream:
82
+ return await self.client.chat.completions.create(
83
+ messages=messages,
84
+ model=model,
85
+ stream=True,
86
+ **kwargs
87
+ )
@@ -0,0 +1,181 @@
1
+ from dataclasses import dataclass
2
+ import time
3
+ import uvicorn
4
+ from typing import Dict, List, Optional
5
+
6
+ from openai.types.model import Model as OpenaiModel
7
+ from fastapi import FastAPI,Depends, HTTPException, status,Header
8
+ from fastapi.responses import StreamingResponse
9
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
10
+
11
+ from deepanything.DeepAnythingClient import chat_completion_stream_async, chat_completion_async
12
+ from deepanything.ResponseClient import AsyncOpenaiResponseClient,AsyncResponseClient
13
+ from deepanything.Stream import AsyncStream
14
+ from deepanything.ReasonClient import AsyncDeepseekReasonClient,AsyncOpenaiReasonClient,AsyncReasonClient
15
+ from deepanything.Server import Types
16
+
17
+ @dataclass
18
+ class ModelInfo:
19
+ name : str
20
+ reason_client : str
21
+ reason_model : str
22
+ response_client : str
23
+ response_model : str
24
+ created : int
25
+ reason_prompt : str
26
+
27
+ class DeepAnythingServer:
28
+ app : FastAPI = FastAPI()
29
+ host : str
30
+ port : int
31
+ reason_clients : Dict[str,AsyncReasonClient] = {}
32
+ response_clients : Dict[str,AsyncResponseClient] = {}
33
+ models : Dict[str,ModelInfo] = {}
34
+ model_owner : str = "deepanything"
35
+ api_keys : List[str] = []
36
+ security = HTTPBearer()
37
+
38
+ def __init__(self, host:str = None, port:int = None,config_object : Dict = None):
39
+ if config_object is not None:
40
+ self.load_config(config_object)
41
+
42
+ if host:
43
+ self.host = host
44
+ if port:
45
+ self.port = port
46
+
47
+ self.app.add_api_route("/v1/models",self.get_models,methods=["GET"],response_model=Types.ModelsListResponse)
48
+ self.app.add_api_route("/v1/chat/completions",self.chat_completions,methods=["POST"])
49
+
50
+ def run(self):
51
+ uvicorn.run(self.app,host=self.host,port=self.port,log_level="trace")
52
+
53
+ def load_config(self,config_object : Dict) -> None:
54
+ self.host = config_object.get("host","0.0.0.0")
55
+ self.port = config_object.get("port",8000)
56
+ self.model_owner = config_object.get("model_owner","deepanything")
57
+
58
+ reason_clients:List[Dict] = config_object.get("reason_clients",[])
59
+
60
+ for client in reason_clients:
61
+ name = client["name"]
62
+ base_url = client["base_url"]
63
+ api_key = client.get("api_key","")
64
+ extract_args = client.get("extract_args",{})
65
+
66
+ if client["type"] == 'deepseek':
67
+ self.reason_clients[name] = AsyncDeepseekReasonClient(base_url, api_key, **extract_args)
68
+ elif client["type"] == 'openai':
69
+ self.reason_clients[name] = AsyncOpenaiReasonClient(base_url, api_key, **extract_args)
70
+ else:
71
+ raise Exception("unknown reason client type")
72
+
73
+ response_clients : List[Dict] = config_object.get("response_clients",[])
74
+
75
+ for client in response_clients:
76
+ name = client["name"]
77
+ base_url = client["base_url"]
78
+ api_key = client.get("api_key","")
79
+ extract_args = client.get("extract_args",{})
80
+
81
+ if client["type"] == 'openai':
82
+ self.response_clients[name] = AsyncOpenaiResponseClient(base_url,api_key,**extract_args)
83
+ else:
84
+ raise Exception("unknown response client type")
85
+
86
+ models : List[Dict] = config_object.get("models",[])
87
+
88
+ for _model in models:
89
+ name = _model["name"]
90
+ reason_client = _model["reason_client"]
91
+ reason_model = _model["reason_model"]
92
+ response_client = _model["response_client"]
93
+ response_model = _model["response_model"]
94
+ created = _model.get("created", int(time.time()))
95
+ reason_prompt = _model.get("reason_prompt","<Think>{}</Think>")
96
+
97
+ self.models[name] = ModelInfo(
98
+ name = name,
99
+ reason_client = reason_client,
100
+ reason_model = reason_model,
101
+ response_client = response_client,
102
+ response_model = response_model,
103
+ created = created,
104
+ reason_prompt = reason_prompt
105
+ )
106
+
107
+ self.api_keys = config_object.get("api_keys",[])
108
+
109
+ def verify_authorization(self, authorization:Optional[str]):
110
+ if self.api_keys == []:
111
+ return
112
+
113
+ if authorization is None:
114
+ raise HTTPException(
115
+ status_code=status.HTTP_401_UNAUTHORIZED,
116
+ detail="Expect token",
117
+ headers={"WWW-Authenticate": "Bearer"},
118
+ )
119
+
120
+ if not authorization.startswith("Bearer "):
121
+ raise HTTPException(
122
+ status_code=status.HTTP_401_UNAUTHORIZED,
123
+ detail="Invalid or expired token",
124
+ headers={"WWW-Authenticate": "Bearer"},
125
+ )
126
+
127
+ token =authorization[7:]
128
+ if token not in self.api_keys:
129
+ raise HTTPException(
130
+ status_code=status.HTTP_401_UNAUTHORIZED,
131
+ detail="Invalid or expired token",
132
+ headers={"WWW-Authenticate": "Bearer"},
133
+ )
134
+
135
+ async def chat_completions(self, query : Types.ChatCompletionQuery, authorization:Optional[str] = Header(None)):
136
+ self.verify_authorization(authorization)
137
+ model = self.models[query.model]
138
+ async def sse(it: AsyncStream):
139
+ async for chunk in it:
140
+ yield f"data: {chunk.model_dump_json(indent=None)}\n\n"
141
+ yield "data: [DONE]"
142
+
143
+ if query.stream:
144
+ res = sse(
145
+ await chat_completion_stream_async(
146
+ messages=query.messages,
147
+ reason_client=self.reason_clients[model.reason_client],
148
+ reason_model=model.reason_model,
149
+ response_client=self.response_clients[model.response_client],
150
+ response_model=model.response_model,
151
+ show_model=model.name,
152
+ reason_prompt=model.reason_prompt,
153
+ )
154
+ )
155
+ return StreamingResponse(
156
+ res,
157
+ media_type="text/event-stream"
158
+ )
159
+ else:
160
+ res = await chat_completion_async(
161
+ messages=query.messages,
162
+ reason_client=self.reason_clients[model.reason_client],
163
+ reason_model=model.reason_model,
164
+ response_client=self.response_clients[model.response_client],
165
+ response_model=model.response_model,
166
+ show_model=model.name,
167
+ reason_prompt=model.reason_prompt
168
+ )
169
+ return res.model_dump_json()
170
+
171
+ async def get_models(self) -> Types.ModelsListResponse:
172
+ return Types.ModelsListResponse(
173
+ data = [OpenaiModel(
174
+ id = model_info.name,
175
+ owned_by = self.model_owner,
176
+ created = model_info.created,
177
+ object = "model"
178
+ )
179
+ for model_info in self.models.values()
180
+ ]
181
+ )
@@ -0,0 +1,15 @@
1
+ from pydantic import BaseModel
2
+ from openai.types.model import Model as OpenaiModel
3
+ from typing import Dict,List
4
+
5
+ class ModelsListResponse(BaseModel):
6
+ data : List[OpenaiModel]
7
+
8
+ class ChatCompletionMessage(BaseModel):
9
+ role : str
10
+ content : str
11
+
12
+ class ChatCompletionQuery(BaseModel):
13
+ model : str
14
+ messages : List[ChatCompletionMessage]
15
+ stream : bool = False
@@ -0,0 +1 @@
1
+ __all__ = ["Server", "Types"]
deepanything/Stream.py ADDED
@@ -0,0 +1,53 @@
1
+ from collections.abc import Callable, Awaitable
2
+ from typing import Any, AsyncIterator, Iterator
3
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
4
+
5
+ class Stream:
6
+ next_fc : Callable[[Any],ChatCompletionChunk]
7
+ close_fc : Callable[[Any],None]
8
+ data : dict
9
+
10
+
11
+ def __init__(self,data):
12
+ self.data = data
13
+ def __iter__(self) -> Iterator[ChatCompletionChunk]:
14
+ return self
15
+
16
+ def on_next(self,fc : Callable[[Any],ChatCompletionChunk]) -> 'Stream':
17
+ self.next_fc = fc
18
+ return self
19
+
20
+ def on_close(self,fc : Callable[[Any],None]) -> 'Stream':
21
+ self.close_fc = fc
22
+ return self
23
+
24
+ def __next__(self) -> ChatCompletionChunk:
25
+ return self.next_fc(self.data)
26
+
27
+ def close(self) -> None:
28
+ self.close_fc(self.data)
29
+
30
+ class AsyncStream:
31
+ next_fc: Callable[[Any], Awaitable[ChatCompletionChunk]]
32
+ close_fc: Callable[[Any], Awaitable[None]]
33
+ data : Any
34
+
35
+ def __init__(self,data):
36
+ self.data = data
37
+
38
+ def __aiter__(self) -> AsyncIterator[ChatCompletionChunk]:
39
+ return self
40
+
41
+ def on_next(self, fc: Callable[[Any], Awaitable[ChatCompletionChunk]]) -> 'AsyncStream':
42
+ self.next_fc = fc
43
+ return self
44
+
45
+ def on_close(self, fc: Callable[[Any], Awaitable[None]]) -> 'AsyncStream':
46
+ self.close_fc = fc
47
+ return self
48
+
49
+ async def __anext__(self) -> ChatCompletionChunk:
50
+ return await self.next_fc(self.data)
51
+
52
+ async def close(self) -> None:
53
+ await self.close_fc(self.data)
@@ -0,0 +1,210 @@
1
+ import time
2
+ from collections.abc import Iterable
3
+ from typing import Optional, List, Literal, AsyncIterable
4
+ import uuid
5
+
6
+ from openai.types.chat import chat_completion_chunk,chat_completion,chat_completion_message
7
+ from openai.types import completion_usage,completion_choice
8
+
9
+ def make_usage(
10
+ completion_tokens,
11
+ prompt_tokens,
12
+ total_tokens
13
+ ) -> completion_usage.CompletionUsage:
14
+ usage = completion_usage.CompletionUsage(
15
+ completion_tokens = completion_tokens,
16
+ prompt_tokens = prompt_tokens,
17
+ total_tokens = total_tokens
18
+ )
19
+
20
+ return usage
21
+
22
+ def make_chat_completion_chunk_delta(
23
+ role : Literal["developer", "system", "user", "assistant", "tool"],
24
+ content : Optional[str] = None,
25
+ reasoning_content : Optional[str] = None
26
+ ) -> chat_completion_chunk.ChoiceDelta:
27
+ delta = chat_completion_chunk.ChoiceDelta(
28
+ role = role,
29
+ content = content
30
+ )
31
+
32
+ delta.reasoning_content = reasoning_content
33
+
34
+ return delta
35
+
36
+ def make_chat_completion_chunk_choice(
37
+ delta : chat_completion_chunk.ChoiceDelta,
38
+ index : int = 0,
39
+ finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None,
40
+ ) -> chat_completion_chunk.Choice:
41
+ choice = chat_completion_chunk.Choice(
42
+ delta = delta,
43
+ index = index,
44
+ finish_reason = finish_reason
45
+ )
46
+
47
+ return choice
48
+
49
+
50
+ def make_chat_completion_chunk(
51
+ _id : str,
52
+ usage : Optional[chat_completion_chunk.CompletionUsage],
53
+ model : str,
54
+ choices : List[completion_choice.CompletionChoice],
55
+ created : int = int(time.time()),
56
+ ) -> chat_completion_chunk.ChatCompletionChunk:
57
+ chunk = chat_completion_chunk.ChatCompletionChunk(
58
+ id = _id,
59
+ created = created,
60
+ model = model,
61
+ choices = choices,
62
+ object = "chat.completion.chunk",
63
+ usage = usage
64
+ )
65
+
66
+ return chunk
67
+
68
+
69
+ def make_chat_completion_message(
70
+ role : Literal["developer", "system", "user", "assistant", "tool"],
71
+ content : Optional[str] = None,
72
+ reasoning_content : Optional[str] = None
73
+ ) -> chat_completion_message.ChatCompletionMessage:
74
+ message = chat_completion_message.ChatCompletionMessage(
75
+ role = role
76
+ )
77
+
78
+ message.content = content
79
+ message.reasoning_content = reasoning_content
80
+
81
+ return message
82
+
83
+ def make_chat_completion_choice(
84
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"],
85
+ message :chat_completion_message.ChatCompletionMessage,
86
+ index : int = 0
87
+ ) -> chat_completion.Choice:
88
+ choice = chat_completion.Choice(
89
+ message = message,
90
+ finish_reason = finish_reason,
91
+ index = index,
92
+ logprobs = None
93
+ )
94
+ return choice
95
+
96
+
97
+ def make_chat_completion(
98
+ _id : str,
99
+ choices : List[chat_completion.Choice],
100
+ model : str,
101
+ usage : Optional[chat_completion.CompletionUsage],
102
+ created : int = int(time.time()),
103
+ ) -> chat_completion.ChatCompletion:
104
+ res = chat_completion.ChatCompletion(
105
+ object="chat.completion",
106
+ id = _id,
107
+ choices = choices,
108
+ model = model,
109
+ usage = usage,
110
+ created = created,
111
+ system_fingerprint = None
112
+ )
113
+
114
+ return res
115
+
116
+
117
+ def _update_accumulators(
118
+ reasoning: str,
119
+ content: str,
120
+ comp_tokens: int,
121
+ prompt_tokens: int,
122
+ total_tokens: int,
123
+ chunk: chat_completion_chunk.ChatCompletionChunk
124
+ ) -> tuple[str, str, int, int, int]:
125
+ delta = chunk.choices[0].delta
126
+
127
+ if delta.content is None:
128
+ reasoning += delta.reasoning_content or ""
129
+ else:
130
+ content += delta.content or ""
131
+
132
+ if chunk.usage is not None:
133
+ comp_tokens,prompt_tokens,total_tokens = chunk.usage.completion_tokens,chunk.usage.prompt_tokens,chunk.usage.total_tokens
134
+
135
+ return reasoning, content, comp_tokens, prompt_tokens, total_tokens
136
+
137
+
138
+ def _build_final_completion(
139
+ model: str,
140
+ created: int,
141
+ reasoning_content: str,
142
+ content: str,
143
+ comp_tokens: int,
144
+ prompt_tokens: int,
145
+ total_tokens: int
146
+ ) -> chat_completion.ChatCompletion:
147
+
148
+ usage = make_usage(comp_tokens, prompt_tokens, total_tokens)
149
+
150
+ return make_chat_completion(
151
+ _id=str(uuid.uuid4()),
152
+ choices=[make_chat_completion_choice(
153
+ finish_reason="stop",
154
+ message=make_chat_completion_message(
155
+ role="assistant",
156
+ reasoning_content=reasoning_content if reasoning_content else None,
157
+ content=content if content else None
158
+ )
159
+ )],
160
+ model=model,
161
+ usage=usage,
162
+ created=created
163
+ )
164
+
165
+
166
+ def merge_chunk(
167
+ chunks: Iterable[chat_completion_chunk.ChatCompletionChunk],
168
+ model: str,
169
+ created: int = int(time.time())
170
+ ) -> chat_completion.ChatCompletion:
171
+ """同步合并 chunk 流"""
172
+ reasoning_content = ""
173
+ content = ""
174
+ comp_tokens = prompt_tokens = total_tokens = 0
175
+
176
+ for chunk in chunks:
177
+ reasoning_content, content, comp_tokens, prompt_tokens, total_tokens = \
178
+ _update_accumulators(reasoning_content, content, comp_tokens, prompt_tokens, total_tokens, chunk)
179
+
180
+ return _build_final_completion(model, created, reasoning_content, content, comp_tokens, prompt_tokens, total_tokens)
181
+
182
+
183
+ async def async_merge_chunk(
184
+ chunks: AsyncIterable[chat_completion_chunk.ChatCompletionChunk],
185
+ model: str,
186
+ created: int = int(time.time())
187
+ ) -> chat_completion.ChatCompletion:
188
+ """异步合并 chunk 流"""
189
+ reasoning_content = ""
190
+ content = ""
191
+ comp_tokens = prompt_tokens = total_tokens = 0
192
+
193
+ async for chunk in chunks:
194
+ reasoning_content, content, comp_tokens, prompt_tokens, total_tokens = \
195
+ _update_accumulators(reasoning_content, content, comp_tokens, prompt_tokens, total_tokens, chunk)
196
+
197
+ return _build_final_completion(model, created, reasoning_content, content, comp_tokens, prompt_tokens, total_tokens)
198
+
199
+ def merge_usage(
200
+ a:completion_usage.CompletionUsage,
201
+ b:completion_usage.CompletionUsage
202
+ ) -> completion_usage.CompletionUsage:
203
+ return make_usage(
204
+ completion_tokens=a.completion_tokens + b.completion_tokens,
205
+ prompt_tokens=a.prompt_tokens + b.prompt_tokens,
206
+ total_tokens=a.total_tokens + b.total_tokens
207
+ )
208
+
209
+ def make_id_by_timestamp():
210
+ return "chatcmpl-" + str(uuid.uuid4())
@@ -0,0 +1 @@
1
+ __all__ = ['DeepAnythingClient', 'ReasonClient', 'Utility', 'Stream', 'ResponseClient', 'Server']
@@ -0,0 +1,21 @@
1
+ from deepanything.Server.Server import DeepAnythingServer
2
+ import argparse
3
+ import json
4
+ def main():
5
+ parser = argparse.ArgumentParser(prog="deepanything",description="Run a DeepAnything Server.")
6
+ parser.add_argument('--host', type=str, required=False, help='Specific the host to listen.If specified,the host will be overwritten by this.')
7
+ parser.add_argument('--port', type=int, required=False, help='Specific the port to listen.If specified,the port will be overwritten by this.')
8
+ parser.add_argument('--config', type=str, required=True, help='Specific the confi path.')
9
+
10
+ args = parser.parse_args()
11
+
12
+ if args.config is not None:
13
+ with open(args.config) as f:
14
+ config = json.load(f)
15
+ server = DeepAnythingServer(host=args.host,port=args.port,config_object=config)
16
+ server.run()
17
+ else:
18
+ print("No config file specified.")
19
+
20
+ if __name__ == "__main__":
21
+ main()
@@ -0,0 +1,7 @@
1
+ Copyright 2025 Junity
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.