deepanything 0.1.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,443 @@
1
+ import time
2
+ from typing import Optional, List
3
+ from openai.types.chat.chat_completion import ChatCompletion
4
+
5
+ from deepanything.Stream import Stream,AsyncStream
6
+ from deepanything.Utility import make_usage, make_chat_completion_message, merge_chunk, async_merge_chunk, \
7
+ make_chat_completion_chunk, make_chat_completion, make_chat_completion_choice, merge_usage, make_id_by_timestamp
8
+ from deepanything.ResponseClient import ResponseClient,AsyncResponseClient
9
+ from deepanything.ReasonClient import ReasonClient,AsyncReasonClient
10
+
11
+
12
+ def _merge_chat_completion(
13
+ reason: ChatCompletion,
14
+ response: ChatCompletion,
15
+ show_model: str,
16
+ created: int = int(time.time()),
17
+ _id: str = make_id_by_timestamp()
18
+ ) -> ChatCompletion:
19
+ return make_chat_completion(
20
+ _id=_id,
21
+ choices=[
22
+ make_chat_completion_choice(
23
+ finish_reason=response.choices[0].finish_reason,
24
+ message=make_chat_completion_message(
25
+ role="assistant",
26
+ content=response.choices[0].message.content,
27
+ reasoning_content=reason.choices[0].message.reasoning_content
28
+ )
29
+ )
30
+ ],
31
+ model=show_model,
32
+ usage=merge_usage(
33
+ reason.usage,
34
+ response.usage
35
+ ),
36
+ created=created
37
+ )
38
+
39
+ def _build_message(
40
+ messages : List,
41
+ reason_content : str,
42
+ reason_prompt : str
43
+ ) -> List:
44
+ return messages + [make_chat_completion_message(
45
+ role="assistant",
46
+ content=reason_prompt.format(reason_content)
47
+ )]
48
+ def _proecess_reason_chunk(chunk, reasoning_contents, reason_usage,show_model, created, _id):
49
+ delta = chunk.choices[0].delta
50
+ reasoning_contents.append(delta.reasoning_content)
51
+ new_chunk = chunk.model_copy(deep=False)
52
+ new_chunk.model = show_model
53
+ new_chunk.created = created
54
+ new_chunk.id = _id
55
+ if new_chunk.usage is not None:
56
+ reason_usage = new_chunk.usage
57
+ return new_chunk, reason_usage
58
+
59
+ def _process_response_chunk(chunk, reason_usage,show_model, created, _id):
60
+ new_chunk = chunk.model_copy(deep=False)
61
+ new_chunk.model = show_model
62
+ new_chunk.created = created
63
+ new_chunk.id = _id
64
+ if new_chunk.usage is not None:
65
+ new_usage = new_chunk.usage.model_copy()
66
+ new_usage.completion_tokens += reason_usage.completion_tokens
67
+ new_usage.prompt_tokens += reason_usage.prompt_tokens
68
+ new_usage.total_tokens += reason_usage.total_tokens
69
+
70
+ new_chunk.usage = new_usage
71
+ return new_chunk
72
+ def chat_completion(
73
+ messages: list,
74
+ reason_client : ReasonClient,
75
+ reason_model: str,
76
+ response_client : ResponseClient,
77
+ response_model: str,
78
+ show_model: str,
79
+ reason_args=None,
80
+ response_args=None,
81
+ reason_prompt: str = "<Think>{}</Think>",
82
+ created: int = int(time.time()),
83
+ stream = False,
84
+ _id: str = make_id_by_timestamp(),
85
+ max_tokens : Optional[int] = None
86
+ ) -> Stream or ChatCompletion:
87
+ if response_args is None:
88
+ response_args = {}
89
+ if reason_args is None:
90
+ reason_args = {}
91
+ if stream:
92
+ return chat_completion_stream(
93
+ messages=messages,
94
+ reason_model=reason_model,
95
+ reason_client=reason_client,
96
+ response_model=response_model,
97
+ response_client=response_client,
98
+ show_model=show_model,
99
+ reason_args=reason_args,
100
+ response_args=response_args,
101
+ created=created,
102
+ _id=_id,
103
+ reason_prompt=reason_prompt,
104
+ max_tokens=max_tokens
105
+ )
106
+
107
+ if max_tokens is not None:
108
+ reason_args["max_tokens"] = max_tokens
109
+
110
+ reason_chat_completion: ChatCompletion = reason_client.reason(
111
+ messages=messages,
112
+ model=reason_model,
113
+ max_tokens = max_tokens,
114
+ **reason_args
115
+ )
116
+
117
+ if max_tokens is not None:
118
+ max_tokens -= reason_chat_completion.usage.completion_tokens
119
+ response_args["max_tokens"] = max_tokens
120
+
121
+ response_chat_completion: ChatCompletion = response_client.chat_completions(
122
+ messages=messages + [make_chat_completion_message(
123
+ role="assistant",
124
+ content=reason_prompt.format(reason_chat_completion.choices[0].message.reasoning_content)
125
+ )],
126
+ model=response_model,
127
+ max_tokens = max_tokens,
128
+ **response_args
129
+ )
130
+
131
+ return _merge_chat_completion(reason_chat_completion, response_chat_completion, show_model, created, _id)
132
+ def chat_completion_stream(
133
+ messages: list,
134
+ reason_client : ReasonClient,
135
+ reason_model: str,
136
+ response_client : ResponseClient,
137
+ response_model: str,
138
+ show_model: str,
139
+ reason_args=None,
140
+ response_args=None,
141
+ reason_prompt: str = "<Think>{}</Think>",
142
+ created: int = int(time.time()),
143
+ _id: str = make_id_by_timestamp(),
144
+ max_tokens : Optional[int] = None
145
+ ) -> Stream:
146
+ if response_args is None:
147
+ response_args = {}
148
+ if reason_args is None:
149
+ reason_args = {}
150
+ stream: Optional[Stream] = None
151
+
152
+ def _iter():
153
+ nonlocal stream,max_tokens
154
+
155
+ # reasoning
156
+ reasoning_contents = []
157
+ if max_tokens is not None:
158
+ reason_args["max_tokens"] = max_tokens
159
+
160
+ reason_stream = reason_client.reason_stream(
161
+ messages,
162
+ reason_model,
163
+ **reason_args
164
+ )
165
+ stream = reason_stream
166
+ reason_usage = make_usage(0, 0, 0)
167
+
168
+ for chunk in reason_stream:
169
+ new_chunk, reason_usage = _proecess_reason_chunk(chunk, reasoning_contents, reason_usage,show_model,created,_id)
170
+ yield new_chunk
171
+
172
+ if max_tokens is not None:
173
+ max_tokens -= reason_usage.completion_tokens
174
+ response_args["max_tokens"] = max_tokens
175
+
176
+ new_messages = _build_message(messages,reason_content="".join(reasoning_contents),reason_prompt=reason_prompt)
177
+
178
+ response_stream = response_client.chat_completions_stream(
179
+ new_messages,
180
+ response_model,
181
+ **response_args
182
+ )
183
+
184
+ stream = response_stream
185
+
186
+ for chunk in response_stream:
187
+ yield _process_response_chunk(chunk, reason_usage,show_model,created,_id)
188
+
189
+ return Stream(_iter()).on_next(lambda it: it.__next__()).on_close(lambda _: stream.close())
190
+
191
+ async def chat_completion_async(
192
+ messages: list,
193
+ reason_client : AsyncReasonClient,
194
+ reason_model: str,
195
+ response_client : AsyncResponseClient,
196
+ response_model: str,
197
+ show_model: str,
198
+ reason_args=None,
199
+ response_args=None,
200
+ reason_prompt: str = "<Think>{}</Think>",
201
+ created: int = int(time.time()),
202
+ _id: str = make_id_by_timestamp(),
203
+ stream=False,
204
+ max_tokens : Optional[int] = None
205
+ ):
206
+ if response_args is None:
207
+ response_args = {}
208
+ if reason_args is None:
209
+ reason_args = {}
210
+ if stream:
211
+ return await chat_completion_stream_async(
212
+ messages=messages,
213
+ reason_model=reason_model,
214
+ reason_client=reason_client,
215
+ response_model=response_model,
216
+ response_client=response_client,
217
+ show_model=show_model,
218
+ reason_args=reason_args,
219
+ response_args=response_args,
220
+ created=created,
221
+ _id=_id,
222
+ reason_prompt=reason_prompt
223
+ )
224
+
225
+ if max_tokens is not None:
226
+ reason_args["max_tokens"] = max_tokens
227
+
228
+ reason_chat_completion:ChatCompletion = await reason_client.reason(
229
+ messages=messages,
230
+ model=reason_model,
231
+ **reason_args
232
+ )
233
+
234
+ if max_tokens is not None:
235
+ max_tokens -= reason_chat_completion.usage.completion_tokens
236
+ response_args["max_tokens"] = max_tokens
237
+
238
+ response_chat_completion:ChatCompletion = await response_client.chat_completions(
239
+ messages=messages + [make_chat_completion_message(
240
+ role="assistant",
241
+ content=reason_prompt.format(reason_chat_completion.choices[0].message.reasoning_content)
242
+ )],
243
+ model=response_model,
244
+ **response_args
245
+ )
246
+
247
+ return _merge_chat_completion(reason_chat_completion,response_chat_completion,show_model,created,_id)
248
+
249
+ async def chat_completion_stream_async(
250
+ messages: list,
251
+ reason_client : AsyncReasonClient,
252
+ reason_model: str,
253
+ response_client : AsyncResponseClient,
254
+ response_model: str,
255
+ show_model: str,
256
+ reason_args=None,
257
+ response_args=None,
258
+ reason_prompt: str = "<Think>{}</Think>",
259
+ created: int = int(time.time()),
260
+ _id: str = make_id_by_timestamp(),
261
+ max_tokens : Optional[int] = None
262
+ ) -> AsyncStream:
263
+ if response_args is None:
264
+ response_args = {}
265
+ if reason_args is None:
266
+ reason_args = {}
267
+ stream : Optional[AsyncStream] = None
268
+ async def _iter():
269
+ nonlocal stream,max_tokens
270
+
271
+ # reasoning
272
+ reasoning_contents = []
273
+ if max_tokens is not None:
274
+ reason_args["max_tokens"] = max_tokens
275
+
276
+ reason_stream = await reason_client.reason_stream(
277
+ messages,
278
+ reason_model,
279
+ **reason_args
280
+ )
281
+
282
+ stream = reason_stream
283
+ reason_usage = make_usage(0,0,0)
284
+
285
+ async for chunk in reason_stream:
286
+ new_chunk,reason_usage = _proecess_reason_chunk(chunk, reasoning_contents, reason_usage,show_model,created,_id)
287
+ yield new_chunk
288
+
289
+ new_messages = _build_message(messages,reason_content="".join(reasoning_contents),reason_prompt=reason_prompt)
290
+
291
+ if max_tokens is not None:
292
+ max_tokens -= reason_usage.completion_tokens
293
+ response_args["max_tokens"] = max_tokens
294
+
295
+ response_stream = await response_client.chat_completions_stream(
296
+ new_messages,
297
+ response_model,
298
+ **response_args
299
+ )
300
+
301
+ stream = response_stream
302
+
303
+ async for chunk in response_stream:
304
+ yield _process_response_chunk(chunk, reason_usage,show_model, created, _id)
305
+
306
+ return AsyncStream(_iter()).on_next(lambda it:it.__anext__()).on_close(lambda _:stream.close())
307
+
308
+ class DeepAnythingClient:
309
+ reason_client : ReasonClient
310
+ response_client : ResponseClient
311
+
312
+ reason_prompt : str
313
+
314
+ def __init__(
315
+ self,
316
+ reason_client: ReasonClient,
317
+ response_client: ResponseClient,
318
+ reason_prompt : str = "<Think>{}</Think>"
319
+ ):
320
+ self.reason_client = reason_client
321
+ self.response_client = response_client
322
+ self.reason_prompt = reason_prompt
323
+
324
+ def chat_completion(
325
+ self,
326
+ messages: list,
327
+ reason_model: str,
328
+ response_model: str,
329
+ show_model : str,
330
+ reason_args=None,
331
+ response_args=None,
332
+ created : int = int(time.time()),
333
+ _id : str = make_id_by_timestamp(),
334
+ stream = False
335
+ ) -> Stream or ChatCompletion:
336
+ return chat_completion(
337
+ messages=messages,
338
+ reason_model=reason_model,
339
+ reason_client=self.reason_client,
340
+ response_model=response_model,
341
+ response_client=self.response_client,
342
+ show_model=show_model,
343
+ reason_args=reason_args,
344
+ response_args=response_args,
345
+ created=created,
346
+ _id=_id,
347
+ stream=stream,
348
+ reason_prompt=self.reason_prompt
349
+ )
350
+
351
+ def chat_completion_stream(
352
+ self,
353
+ messages: list,
354
+ reason_model: str,
355
+ response_model: str,
356
+ show_model : str,
357
+ reason_args=None,
358
+ response_args=None,
359
+ created : int = int(time.time()),
360
+ _id : str = make_id_by_timestamp()
361
+ ) -> Stream:
362
+ return chat_completion_stream(
363
+ messages=messages,
364
+ reason_model=reason_model,
365
+ reason_client=self.reason_client,
366
+ response_model=response_model,
367
+ response_client=self.response_client,
368
+ show_model=show_model,
369
+ reason_args=reason_args,
370
+ response_args=response_args,
371
+ created=created,
372
+ _id=_id,
373
+ reason_prompt=self.reason_prompt
374
+ )
375
+
376
+
377
+ class AsyncDeepAnythingClient:
378
+ reason_client : AsyncReasonClient
379
+ response_client : AsyncResponseClient
380
+
381
+ reason_prompt : str
382
+
383
+ def __init__(
384
+ self,
385
+ reason_client: AsyncReasonClient,
386
+ response_client: AsyncResponseClient,
387
+ reason_prompt : str = "<Think>{}</Think>"
388
+ ):
389
+ self.reason_client = reason_client
390
+ self.response_client = response_client
391
+ self.reason_prompt = reason_prompt
392
+
393
+ async def chat_completion(
394
+ self,
395
+ messages: list,
396
+ reason_model: str,
397
+ response_model: str,
398
+ show_model: str,
399
+ reason_args=None,
400
+ response_args=None,
401
+ created: int = int(time.time()),
402
+ _id: str = make_id_by_timestamp(),
403
+ stream=False
404
+ ):
405
+ return await chat_completion_async(
406
+ messages=messages,
407
+ reason_model=reason_model,
408
+ reason_client=self.reason_client,
409
+ response_model=response_model,
410
+ response_client=self.response_client,
411
+ show_model=show_model,
412
+ reason_args=reason_args,
413
+ response_args=response_args,
414
+ created=created,
415
+ _id=_id,
416
+ stream=stream,
417
+ reason_prompt=self.reason_prompt
418
+ )
419
+
420
+ async def chat_completion_stream(
421
+ self,
422
+ messages: list,
423
+ reason_model: str,
424
+ response_model: str,
425
+ show_model : str,
426
+ reason_args=None,
427
+ response_args=None,
428
+ created : int = int(time.time()),
429
+ _id : str = make_id_by_timestamp()
430
+ ) -> AsyncStream:
431
+ return await chat_completion_stream_async(
432
+ messages=messages,
433
+ reason_model=reason_model,
434
+ reason_client=self.reason_client,
435
+ response_model=response_model,
436
+ response_client=self.response_client,
437
+ show_model=show_model,
438
+ reason_args=reason_args,
439
+ response_args=response_args,
440
+ created=created,
441
+ _id=_id,
442
+ reason_prompt=self.reason_prompt
443
+ )
@@ -0,0 +1,203 @@
1
+ import openai
2
+ from openai.types.chat import chat_completion
3
+ from deepanything.Stream import Stream,AsyncStream
4
+ from deepanything import Utility
5
+
6
+
7
+ class ReasonClient:
8
+ def __init__(self) -> None:
9
+ pass
10
+
11
+ def reason(
12
+ self,
13
+ messages:list[dict],
14
+ model:str,
15
+ stream = False,
16
+ **kwargs
17
+ ) -> Stream or chat_completion.ChatCompletion:
18
+ if stream:
19
+ return self.reason_stream(messages, model, **kwargs)
20
+
21
+ return Utility.merge_chunk(
22
+ self.reason_stream(messages, model, **kwargs),
23
+ model
24
+ )
25
+
26
+ def reason_stream(self,
27
+ messages:list[dict],
28
+ model:str,
29
+ **kwargs
30
+ ) -> Stream:
31
+ raise NotImplementedError
32
+
33
+ class AsyncReasonClient:
34
+ def __init__(self) -> None:
35
+ pass
36
+
37
+ async def reason(
38
+ self,
39
+ messages:list[dict],
40
+ model:str,
41
+ stream = False,
42
+ **kwargs
43
+ ) -> AsyncStream or chat_completion.ChatCompletion:
44
+ if stream:
45
+ return await self.reason_stream(messages, model, **kwargs)
46
+
47
+ return await Utility.async_merge_chunk(
48
+ await self.reason_stream(messages, model, **kwargs),
49
+ model
50
+ )
51
+
52
+ async def reason_stream(self,
53
+ messages:list[dict],
54
+ model:str,
55
+ **kwargs
56
+ ) -> AsyncStream:
57
+ raise NotImplementedError
58
+
59
+ class DeepseekReasonClient(ReasonClient):
60
+ client : openai.OpenAI
61
+
62
+ def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
63
+ super().__init__()
64
+ self.client = openai.OpenAI(
65
+ base_url=base_url,
66
+ api_key=api_key,
67
+ **kwargs
68
+ )
69
+
70
+ def reason_stream(self,
71
+ messages: list[dict],
72
+ model: str,
73
+ **kwargs
74
+ ) -> Stream:
75
+ stream = self.client.chat.completions.create(
76
+ messages=messages,
77
+ model=model,
78
+ stream=True,
79
+ **kwargs
80
+ )
81
+
82
+ def _iter():
83
+ for chunk in stream:
84
+ if chunk.choices[0].delta.reasoning_content is not None:
85
+ yield chunk
86
+ else:
87
+ return
88
+
89
+
90
+ return (Stream(_iter())
91
+ .on_next(lambda it : it.__next__())
92
+ .on_close(lambda _: stream.close()))
93
+
94
+ class AsyncDeepseekReasonClient(AsyncReasonClient):
95
+ client : openai.AsyncOpenAI
96
+
97
+ def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
98
+ super().__init__()
99
+ self.client = openai.AsyncOpenAI(
100
+ base_url=base_url,
101
+ api_key=api_key,
102
+ **kwargs
103
+ )
104
+
105
+ async def reason_stream(self,
106
+ messages: list[dict],
107
+ model: str,
108
+ **kwargs
109
+ ) -> AsyncStream:
110
+ stream = await self.client.chat.completions.create(
111
+ messages=messages,
112
+ model=model,
113
+ stream=True,
114
+ **kwargs
115
+ )
116
+
117
+ async def _iter():
118
+ async for chunk in stream:
119
+ if chunk.choices[0].delta.reasoning_content is not None:
120
+ yield chunk
121
+ else:
122
+ return
123
+
124
+
125
+ return (AsyncStream(_iter())
126
+ .on_next(lambda it : it.__anext__())
127
+ .on_close(lambda _: stream.close()))
128
+
129
+ class OpenaiReasonClient(ReasonClient):
130
+ client : openai.OpenAI
131
+ def __init__(
132
+ self,
133
+ base_url:str,
134
+ api_key:str,
135
+ **kwargs
136
+ ) -> None:
137
+ super().__init__()
138
+ self.client = openai.OpenAI(
139
+ base_url=base_url,
140
+ api_key=api_key,
141
+ **kwargs
142
+ )
143
+
144
+ def reason_stream(self,
145
+ messages: list[dict],
146
+ model: str,
147
+ **kwargs
148
+ ) -> Stream:
149
+ return self.client.chat.completions.create(
150
+ messages=messages,
151
+ model=model,
152
+ stream=True,
153
+ **kwargs
154
+ )
155
+
156
+ def reason(
157
+ self,
158
+ messages:list[dict],
159
+ model:str,
160
+ stream = False,
161
+ **kwargs
162
+ ) -> Stream or chat_completion.ChatCompletion:
163
+ return self.client.chat.completions.create(
164
+ messages=messages,
165
+ model=model,
166
+ stream=stream,
167
+ **kwargs
168
+ )
169
+
170
+ class AsyncOpenaiReasonClient(AsyncReasonClient):
171
+ client : openai.AsyncOpenAI
172
+ def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
173
+ super().__init__()
174
+ self.client = openai.AsyncOpenAI(
175
+ base_url=base_url,
176
+ api_key=api_key,
177
+ **kwargs
178
+ )
179
+
180
+ async def reason_stream(self,
181
+ messages: list[dict],
182
+ model: str,
183
+ **kwargs
184
+ ) -> AsyncStream:
185
+ return await self.client.chat.completions.create(
186
+ messages=messages,
187
+ model=model,
188
+ stream=True,
189
+ **kwargs
190
+ )
191
+
192
+ async def reason(self,
193
+ messages: list[dict],
194
+ model: str,
195
+ stream = False,
196
+ **kwargs
197
+ ) -> AsyncStream or chat_completion.ChatCompletion:
198
+ return await self.client.chat.completions.create(
199
+ messages=messages,
200
+ model=model,
201
+ stream=stream,
202
+ **kwargs
203
+ )