deepanything 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,14 @@
1
1
  import time
2
2
  from typing import Optional, List
3
+
3
4
  from openai.types.chat.chat_completion import ChatCompletion
4
5
 
5
- from deepanything.Stream import Stream,AsyncStream
6
- from deepanything.Utility import make_usage, make_chat_completion_message, merge_chunk, async_merge_chunk, \
7
- make_chat_completion_chunk, make_chat_completion, make_chat_completion_choice, merge_usage, make_id_by_timestamp, \
8
- attend_message
9
- from deepanything.ResponseClient import ResponseClient,AsyncResponseClient
10
- from deepanything.ReasonClient import ReasonClient,AsyncReasonClient
6
+ from deepanything.ReasonClient import ReasonClient, AsyncReasonClient
7
+ from deepanything.ResponseClient import ResponseClient, AsyncResponseClient
8
+ from deepanything.Stream import Stream, AsyncStream
9
+ from deepanything.Utility import make_usage, make_chat_completion_message, make_chat_completion, \
10
+ make_chat_completion_choice, merge_usage, make_id_by_timestamp, \
11
+ extend_message
11
12
 
12
13
 
13
14
  def _merge_chat_completion(
@@ -42,7 +43,7 @@ def _build_message(
42
43
  reason_content : str,
43
44
  reason_prompt : str
44
45
  ) -> List:
45
- return attend_message(messages,role="assistant",content=reason_prompt.format(reason_content))
46
+ return extend_message(messages, role="assistant", content=reason_prompt.format(reason_content))
46
47
  def _process_reason_chunk(chunk, reasoning_contents, reason_usage, show_model, created, _id):
47
48
  new_chunk = chunk.model_copy(deep=False)
48
49
  new_chunk.model = show_model
@@ -80,13 +81,34 @@ def chat_completion(
80
81
  show_model: str,
81
82
  reason_args=None,
82
83
  response_args=None,
83
- reason_prompt: str = "<Think>{}</Think>",
84
+ reason_prompt: str = "<think>{}</think>",
84
85
  reason_system_prompt: Optional[str] = None,
85
86
  created: int = int(time.time()),
86
87
  stream = False,
87
88
  _id: str = make_id_by_timestamp(),
88
89
  max_tokens : Optional[int] = None
89
90
  ) -> Stream or ChatCompletion:
91
+
92
+ """
93
+ Make chat completion synchronously.
94
+
95
+ :param messages: Messages
96
+ :param reason_client: Reason client
97
+ :param reason_model: Reason model
98
+ :param response_client: Response client
99
+ :param response_model: Response model
100
+ :param show_model: Specify the model name in the return value
101
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
102
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
103
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
104
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
105
+ :param created: The timestamp indicating the time when the chat completion was created
106
+ :param stream: Whether you use streaming return
107
+ :param _id: Specify the `id` in the return value
108
+ :param max_tokens: max_tokens
109
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
110
+ """
111
+
90
112
  if response_args is None:
91
113
  response_args = {}
92
114
  if reason_args is None:
@@ -144,12 +166,32 @@ def chat_completion_stream(
144
166
  show_model: str,
145
167
  reason_args=None,
146
168
  response_args=None,
147
- reason_prompt: str = "<Think>{}</Think>",
169
+ reason_prompt: str = "<think>{}</think>",
148
170
  reason_system_prompt: Optional[str] = None,
149
171
  created: int = int(time.time()),
150
172
  _id: str = make_id_by_timestamp(),
151
173
  max_tokens : Optional[int] = None
152
174
  ) -> Stream:
175
+
176
+ """
177
+ Make chat completion synchronously.This method uses streaming return
178
+
179
+ :param messages: Messages
180
+ :param reason_client: Reason client
181
+ :param reason_model: Reason model
182
+ :param response_client: Response client
183
+ :param response_model: Response model
184
+ :param show_model: Specify the model name in the return value
185
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
186
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
187
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
188
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
189
+ :param created: The timestamp indicating the time when the chat completion was created
190
+ :param _id: Specify the `id` in the return value
191
+ :param max_tokens: max_tokens
192
+ :return: A stream
193
+ """
194
+
153
195
  if response_args is None:
154
196
  response_args = {}
155
197
  if reason_args is None:
@@ -207,13 +249,33 @@ async def chat_completion_async(
207
249
  show_model: str,
208
250
  reason_args=None,
209
251
  response_args=None,
210
- reason_prompt: str = "<Think>{}</Think>",
252
+ reason_prompt: str = "<think>{}</think>",
211
253
  reason_system_prompt: Optional[str] = None,
212
254
  created: int = int(time.time()),
213
255
  _id: str = make_id_by_timestamp(),
214
256
  stream=False,
215
257
  max_tokens : Optional[int] = None
216
258
  ):
259
+ """
260
+ Make chat completion asynchronously.
261
+
262
+ :param messages: Messages
263
+ :param reason_client: Reason client
264
+ :param reason_model: Reason model
265
+ :param response_client: Response client
266
+ :param response_model: Response model
267
+ :param show_model: Specify the model name in the return value
268
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
269
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
270
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
271
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
272
+ :param created: The timestamp indicating the time when the chat completion was created
273
+ :param stream: Whether you use streaming return
274
+ :param _id: Specify the `id` in the return value
275
+ :param max_tokens: max_tokens
276
+ :return: Return a AsyncStream if stream is Ture,otherwise return a ChatCompletion
277
+ """
278
+
217
279
  if response_args is None:
218
280
  response_args = {}
219
281
  if reason_args is None:
@@ -273,12 +335,31 @@ async def chat_completion_stream_async(
273
335
  show_model: str,
274
336
  reason_args=None,
275
337
  response_args=None,
276
- reason_prompt: str = "<Think>{}</Think>",
338
+ reason_prompt: str = "<think>{}</think>",
277
339
  reason_system_prompt: Optional[str] = None,
278
340
  created: int = int(time.time()),
279
341
  _id: str = make_id_by_timestamp(),
280
342
  max_tokens : Optional[int] = None
281
343
  ) -> AsyncStream:
344
+ """
345
+ Make chat completion asynchronously.This method uses streaming return
346
+
347
+ :param messages: Messages
348
+ :param reason_client: Reason client
349
+ :param reason_model: Reason model
350
+ :param response_client: Response client
351
+ :param response_model: Response model
352
+ :param show_model: Specify the model name in the return value
353
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
354
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
355
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
356
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
357
+ :param created: The timestamp indicating the time when the chat completion was created
358
+ :param _id: Specify the `id` in the return value
359
+ :param max_tokens: max_tokens
360
+ :return: A AsyncStream
361
+ """
362
+
282
363
  if response_args is None:
283
364
  response_args = {}
284
365
  if reason_args is None:
@@ -328,20 +409,32 @@ async def chat_completion_stream_async(
328
409
  return AsyncStream(_iter()).on_next(lambda it:it.__anext__()).on_close(lambda _:stream.close())
329
410
 
330
411
  class DeepAnythingClient:
412
+ """
413
+ DeepAnything Client
414
+ """
415
+
331
416
  reason_client : ReasonClient
332
417
  response_client : ResponseClient
333
-
334
- reason_prompt : str
418
+ reason_prompt : Optional[str]
419
+ reason_system_prompt : Optional[str]
335
420
 
336
421
  def __init__(
337
422
  self,
338
423
  reason_client: ReasonClient,
339
424
  response_client: ResponseClient,
340
- reason_prompt : str = "<Think>{}</Think>"
425
+ reason_prompt : str = None,
426
+ reason_system_prompt: Optional[str] = None
341
427
  ):
428
+ """
429
+ :param reason_client: Reason client
430
+ :param response_client: Response client
431
+ :param reason_prompt: Default value for reason_prompt
432
+ :param reason_system_prompt: Default value for reason_system_prompt
433
+ """
342
434
  self.reason_client = reason_client
343
435
  self.response_client = response_client
344
436
  self.reason_prompt = reason_prompt
437
+ self.reason_system_prompt = reason_system_prompt
345
438
 
346
439
  def chat_completion(
347
440
  self,
@@ -351,11 +444,35 @@ class DeepAnythingClient:
351
444
  show_model : str,
352
445
  reason_args=None,
353
446
  response_args=None,
447
+ reason_prompt: Optional[str] = None,
354
448
  reason_system_prompt: Optional[str] = None,
355
449
  created : int = int(time.time()),
356
450
  _id : str = make_id_by_timestamp(),
357
- stream = False
451
+ stream = False,
452
+ max_tokens : Optional[int] = None
358
453
  ) -> Stream or ChatCompletion:
454
+ """
455
+ Make chat completion synchronously.
456
+
457
+ :param messages: Messages
458
+ :param reason_model: Reason model
459
+ :param response_model: Response model
460
+ :param show_model: Specify the model name in the return value
461
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
462
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
463
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
464
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
465
+ :param created: The timestamp indicating the time when the chat completion was created
466
+ :param stream: Whether you use streaming return
467
+ :param _id: Specify the `id` in the return value
468
+ :param max_tokens: max_tokens
469
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
470
+ """
471
+ if reason_prompt is None:
472
+ reason_prompt = self.reason_prompt
473
+ if reason_system_prompt is None:
474
+ reason_system_prompt = self.reason_system_prompt
475
+
359
476
  return chat_completion(
360
477
  messages=messages,
361
478
  reason_model=reason_model,
@@ -365,11 +482,12 @@ class DeepAnythingClient:
365
482
  show_model=show_model,
366
483
  reason_args=reason_args,
367
484
  response_args=response_args,
485
+ reason_prompt=reason_prompt,
368
486
  reason_system_prompt=reason_system_prompt,
369
487
  created=created,
370
488
  _id=_id,
371
489
  stream=stream,
372
- reason_prompt=self.reason_prompt
490
+ max_tokens=max_tokens
373
491
  )
374
492
 
375
493
  def chat_completion_stream(
@@ -380,10 +498,33 @@ class DeepAnythingClient:
380
498
  show_model : str,
381
499
  reason_args=None,
382
500
  response_args=None,
501
+ reason_prompt: Optional[str] = None,
383
502
  reason_system_prompt: Optional[str] = None,
384
503
  created : int = int(time.time()),
385
- _id : str = make_id_by_timestamp()
504
+ _id : str = make_id_by_timestamp(),
505
+ max_tokens : Optional[int] = None
386
506
  ) -> Stream:
507
+ """
508
+ Make chat completion synchronously.This method uses streaming return
509
+
510
+ :param messages: Messages
511
+ :param reason_model: Reason model
512
+ :param response_model: Response model
513
+ :param show_model: Specify the model name in the return value
514
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
515
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
516
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
517
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
518
+ :param created: The timestamp indicating the time when the chat completion was created
519
+ :param _id: Specify the `id` in the return value
520
+ :param max_tokens: max_tokens
521
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
522
+ """
523
+
524
+ if reason_prompt is None:
525
+ reason_prompt = self.reason_prompt
526
+ if reason_system_prompt is None:
527
+ reason_system_prompt = self.reason_system_prompt
387
528
  return chat_completion_stream(
388
529
  messages=messages,
389
530
  reason_model=reason_model,
@@ -393,28 +534,40 @@ class DeepAnythingClient:
393
534
  show_model=show_model,
394
535
  reason_args=reason_args,
395
536
  response_args=response_args,
537
+ reason_prompt=reason_prompt,
396
538
  reason_system_prompt=reason_system_prompt,
397
539
  created=created,
398
540
  _id=_id,
399
- reason_prompt=self.reason_prompt
541
+ max_tokens=max_tokens
400
542
  )
401
543
 
402
544
 
403
545
  class AsyncDeepAnythingClient:
546
+ """
547
+ DeepAnything Async Client
548
+ """
404
549
  reason_client : AsyncReasonClient
405
550
  response_client : AsyncResponseClient
406
-
407
- reason_prompt : str
551
+ reason_prompt : Optional[str]
552
+ reason_system_prompt : Optional[str]
408
553
 
409
554
  def __init__(
410
555
  self,
411
556
  reason_client: AsyncReasonClient,
412
557
  response_client: AsyncResponseClient,
413
- reason_prompt : str = "<Think>{}</Think>"
558
+ reason_prompt : Optional[str] = None,
559
+ reason_system_prompt: Optional[str] = None
414
560
  ):
561
+ """
562
+ :param reason_client: Reason client
563
+ :param response_client: Response client
564
+ :param reason_prompt: Default value for reason_prompt
565
+ :param reason_system_prompt: Default value for reason_system_prompt
566
+ """
415
567
  self.reason_client = reason_client
416
568
  self.response_client = response_client
417
569
  self.reason_prompt = reason_prompt
570
+ self.reason_system_prompt = reason_system_prompt
418
571
 
419
572
  async def chat_completion(
420
573
  self,
@@ -424,11 +577,36 @@ class AsyncDeepAnythingClient:
424
577
  show_model: str,
425
578
  reason_args=None,
426
579
  response_args=None,
580
+ reason_prompt: Optional[str] = None,
427
581
  reason_system_prompt: Optional[str] = None,
428
582
  created: int = int(time.time()),
429
583
  _id: str = make_id_by_timestamp(),
430
- stream=False
584
+ stream=False,
585
+ max_tokens : Optional[int] = None
431
586
  ):
587
+ """
588
+ Make chat completion asynchronously.
589
+
590
+ :param stream: Whether you use streaming return
591
+ :param messages: Messages
592
+ :param reason_model: Reason model
593
+ :param response_model: Response model
594
+ :param show_model: Specify the model name in the return value
595
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
596
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
597
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
598
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
599
+ :param created: The timestamp indicating the time when the chat completion was created
600
+ :param _id: Specify the `id` in the return value
601
+ :param max_tokens: max_tokens
602
+ :return: Return an AsyncStream if stream is Ture,otherwise return a ChatCompletion
603
+ """
604
+
605
+ if reason_prompt is None:
606
+ reason_prompt = self.reason_prompt
607
+ if reason_system_prompt is None:
608
+ reason_system_prompt = self.reason_system_prompt
609
+
432
610
  return await chat_completion_async(
433
611
  messages=messages,
434
612
  reason_model=reason_model,
@@ -438,11 +616,12 @@ class AsyncDeepAnythingClient:
438
616
  show_model=show_model,
439
617
  reason_args=reason_args,
440
618
  response_args=response_args,
619
+ reason_prompt=reason_prompt,
441
620
  reason_system_prompt=reason_system_prompt,
442
621
  created=created,
443
622
  _id=_id,
444
623
  stream=stream,
445
- reason_prompt=self.reason_prompt
624
+ max_tokens=max_tokens
446
625
  )
447
626
 
448
627
  async def chat_completion_stream(
@@ -453,10 +632,34 @@ class AsyncDeepAnythingClient:
453
632
  show_model : str,
454
633
  reason_args=None,
455
634
  response_args=None,
635
+ reason_prompt: Optional[str] = None,
456
636
  reason_system_prompt: Optional[str] = None,
457
637
  created : int = int(time.time()),
458
- _id : str = make_id_by_timestamp()
638
+ _id : str = make_id_by_timestamp(),
639
+ max_tokens : Optional[int] = None
459
640
  ) -> AsyncStream:
641
+ """
642
+ Make chat completion asynchronously.This method uses streaming return
643
+
644
+ :param messages: Messages
645
+ :param reason_model: Reason model
646
+ :param response_model: Response model
647
+ :param show_model: Specify the model name in the return value
648
+ :param reason_args: Additional parameters passed to the reason client, such as temperature, top_k, etc.
649
+ :param response_args: Additional parameters passed to the response client, such as temperature, top_k, etc.
650
+ :param reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
651
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
652
+ :param created: The timestamp indicating the time when the chat completion was created
653
+ :param _id: Specify the `id` in the return value
654
+ :param max_tokens: max_tokens
655
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
656
+ """
657
+
658
+ if reason_prompt is None:
659
+ reason_prompt = self.reason_prompt
660
+ if reason_system_prompt is None:
661
+ reason_system_prompt = self.reason_system_prompt
662
+
460
663
  return await chat_completion_stream_async(
461
664
  messages=messages,
462
665
  reason_model=reason_model,
@@ -466,8 +669,9 @@ class AsyncDeepAnythingClient:
466
669
  show_model=show_model,
467
670
  reason_args=reason_args,
468
671
  response_args=response_args,
672
+ reason_prompt=reason_prompt,
469
673
  reason_system_prompt=reason_system_prompt,
470
674
  created=created,
471
675
  _id=_id,
472
- reason_prompt=self.reason_prompt
676
+ max_tokens=max_tokens
473
677
  )
@@ -1,13 +1,15 @@
1
1
  from typing import Optional
2
2
 
3
3
  import openai
4
- from openai import OpenAI
5
4
  from openai.types.chat import chat_completion, chat_completion_chunk
6
5
  from deepanything.Stream import Stream,AsyncStream
7
6
  from deepanything import Utility
8
7
 
9
8
 
10
9
  class ReasonClient:
10
+ """
11
+ Base Class for Reason Clients
12
+ """
11
13
  def __init__(self) -> None:
12
14
  pass
13
15
 
@@ -19,6 +21,16 @@ class ReasonClient:
19
21
  stream = False,
20
22
  **kwargs
21
23
  ) -> Stream or chat_completion.ChatCompletion:
24
+ """
25
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.
26
+
27
+ :param messages: Messages
28
+ :param model: Model
29
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
30
+ :param stream: Whether you use streaming return
31
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
32
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
33
+ """
22
34
  if stream:
23
35
  return self.reason_stream(messages, model, **kwargs)
24
36
 
@@ -33,9 +45,21 @@ class ReasonClient:
33
45
  reason_system_prompt:Optional[str] = None,
34
46
  **kwargs
35
47
  ) -> Stream:
48
+ """
49
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.This method uses streaming return
50
+
51
+ :param messages: Messages
52
+ :param model: Model
53
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
54
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
55
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
56
+ """
36
57
  raise NotImplementedError
37
58
 
38
59
  class AsyncReasonClient:
60
+ """
61
+ Base Class for Async Reason Clients
62
+ """
39
63
  def __init__(self) -> None:
40
64
  pass
41
65
 
@@ -47,6 +71,16 @@ class AsyncReasonClient:
47
71
  stream = False,
48
72
  **kwargs
49
73
  ) -> AsyncStream or chat_completion.ChatCompletion:
74
+ """
75
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.
76
+
77
+ :param messages: Messages
78
+ :param model: Model
79
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
80
+ :param stream: Whether you use streaming return
81
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
82
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
83
+ """
50
84
  if stream:
51
85
  return await self.reason_stream(messages, model, **kwargs)
52
86
 
@@ -61,12 +95,29 @@ class AsyncReasonClient:
61
95
  reason_system_prompt:Optional[str] = None,
62
96
  **kwargs
63
97
  ) -> AsyncStream:
98
+ """
99
+ Generate reason content like Deepseek R1. This function returns a value that is almost the same as the OpenAI API, but 'content' is None and 'reasoning_content' is reason content.This method uses streaming return
100
+
101
+ :param messages: Messages
102
+ :param model: Model
103
+ :param reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
104
+ :param kwargs: Additional parameters passed to the reason client, such as temperature, top_k, etc.
105
+ :return: Return a AsyncStream if stream is Ture,otherwise return a ChatCompletion
106
+ """
64
107
  raise NotImplementedError
65
108
 
66
109
  class DeepseekReasonClient(ReasonClient):
110
+ """
111
+ Deepseek Reason Client
112
+ """
67
113
  client : openai.OpenAI
68
114
 
69
115
  def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
116
+ """
117
+ :param base_url: Base url
118
+ :param api_key: Api key
119
+ :param kwargs: Other parameters used to create clients
120
+ """
70
121
  super().__init__()
71
122
  self.client = openai.OpenAI(
72
123
  base_url=base_url,
@@ -103,9 +154,18 @@ class DeepseekReasonClient(ReasonClient):
103
154
  .on_close(lambda _: stream.close()))
104
155
 
105
156
  class AsyncDeepseekReasonClient(AsyncReasonClient):
157
+ """
158
+ Deepseek Reason Async Client
159
+ """
106
160
  client : openai.AsyncOpenAI
107
161
 
108
162
  def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
163
+ """
164
+ :param base_url: Base url
165
+ :param api_key: Api key
166
+ :param kwargs: Other parameters used to create clients
167
+ """
168
+
109
169
  super().__init__()
110
170
  self.client = openai.AsyncOpenAI(
111
171
  base_url=base_url,
@@ -153,6 +213,9 @@ def _rebuild_chunk_for_openai(
153
213
 
154
214
 
155
215
  class OpenaiReasonClient(ReasonClient):
216
+ """
217
+ OpenAI Reason Client.Used When using models similar to QWQ
218
+ """
156
219
  client : openai.OpenAI
157
220
  def __init__(
158
221
  self,
@@ -160,6 +223,11 @@ class OpenaiReasonClient(ReasonClient):
160
223
  api_key:str,
161
224
  **kwargs
162
225
  ) -> None:
226
+ """
227
+ :param base_url: Base url
228
+ :param api_key: Api key
229
+ :param kwargs: Other parameters used to create clients
230
+ """
163
231
  super().__init__()
164
232
  self.client = openai.OpenAI(
165
233
  base_url=base_url,
@@ -174,7 +242,7 @@ class OpenaiReasonClient(ReasonClient):
174
242
  **kwargs
175
243
  ) -> Stream:
176
244
  if reason_system_prompt is not None:
177
- messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
245
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
178
246
 
179
247
  stream = self.client.chat.completions.create(
180
248
  messages=messages,
@@ -198,7 +266,7 @@ class OpenaiReasonClient(ReasonClient):
198
266
  return self.reason_stream(messages, model, **kwargs)
199
267
 
200
268
  if reason_system_prompt is not None:
201
- messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
269
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
202
270
 
203
271
  completion = self.client.chat.completions.create(
204
272
  messages=messages,
@@ -213,8 +281,16 @@ class OpenaiReasonClient(ReasonClient):
213
281
  return completion
214
282
 
215
283
  class AsyncOpenaiReasonClient(AsyncReasonClient):
284
+ """
285
+ OpenAI Reason Async Client.Used When using models similar to QWQ
286
+ """
216
287
  client : openai.AsyncOpenAI
217
288
  def __init__(self,base_url:str,api_key:str,**kwargs) -> None:
289
+ """
290
+ :param base_url: Base url
291
+ :param api_key: Api key
292
+ :param kwargs: Other parameters used to create clients
293
+ """
218
294
  super().__init__()
219
295
  self.client = openai.AsyncOpenAI(
220
296
  base_url=base_url,
@@ -230,7 +306,7 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
230
306
  ) -> AsyncStream:
231
307
 
232
308
  if reason_system_prompt is not None:
233
- messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
309
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
234
310
 
235
311
  stream = await self.client.chat.completions.create(
236
312
  messages=messages,
@@ -256,7 +332,7 @@ class AsyncOpenaiReasonClient(AsyncReasonClient):
256
332
  return await self.reason_stream(messages, model, **kwargs)
257
333
 
258
334
  if reason_system_prompt is not None:
259
- messages = Utility.attend_message(messages,role="system",content=reason_system_prompt)
335
+ messages = Utility.extend_message(messages, role="system", content=reason_system_prompt)
260
336
 
261
337
  completion = await self.client.chat.completions.create(
262
338
  messages=messages,
@@ -5,36 +5,84 @@ import openai
5
5
  from deepanything import Utility
6
6
 
7
7
  class ResponseClient:
8
+ """
9
+ Base Class for Response Client
10
+ """
8
11
  def __init__(self):
9
12
  pass
10
13
 
11
14
  def chat_completions(self,messages,model,stream = False,**kwargs) -> Stream or chat_completion.ChatCompletion:
15
+ """
16
+ Make chat completion for responding
17
+
18
+ :param messages: Messages
19
+ :param model: Model
20
+ :param stream: Whether you use streaming return
21
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
22
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
23
+ """
12
24
  if stream:
13
25
  return self.chat_completions_stream(messages,model,**kwargs)
14
26
 
15
27
  return Utility.merge_chunk(self.chat_completions_stream(messages,model,**kwargs),model)
16
28
 
17
29
  def chat_completions_stream(self,messages,model,**kwargs) -> Stream:
18
- pass
30
+ """
31
+ Make chat completion for responding.This method uses streaming return
32
+
33
+ :param messages: Messages
34
+ :param model: Model
35
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
36
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
37
+ """
38
+ raise NotImplementedError()
19
39
 
20
40
  class AsyncResponseClient:
41
+ """
42
+ Base Class for Response Async Client
43
+ """
21
44
  def __init__(self):
22
45
  pass
23
46
 
24
47
  async def chat_completions(self,messages,model,stream = False,**kwargs) -> AsyncStream or chat_completion.ChatCompletion:
48
+ """
49
+ Make chat completion for responding
50
+
51
+ :param messages: Messages
52
+ :param model: Model
53
+ :param stream: Whether you use streaming return
54
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
55
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
56
+ """
25
57
  if stream:
26
58
  return self.chat_completions_stream(messages,model,**kwargs)
27
59
 
28
60
  return await Utility.async_merge_chunk(await self.chat_completions_stream(messages,model,**kwargs),model)
29
61
 
30
62
  async def chat_completions_stream(self,messages,model,**kwargs) -> AsyncStream:
31
- pass
63
+ """
64
+ Make chat completion for responding.This method uses streaming return
32
65
 
66
+ :param messages: Messages
67
+ :param model: Model
68
+ :param kwargs: Additional parameters passed to the response client, such as temperature, top_k, etc.
69
+ :return: Return a Stream if stream is Ture,otherwise return a ChatCompletion
70
+ """
71
+ raise NotImplementedError()
33
72
 
34
73
  class OpenaiResponseClient(ResponseClient):
74
+ """
75
+ OpenAI-like response client
76
+ """
35
77
  client : openai.OpenAI
36
78
 
37
79
  def __init__(self,base_url,api_key,**kwargs):
80
+ """
81
+
82
+ :param base_url: Base url
83
+ :param api_key: API Key
84
+ :param kwargs: Other parameters used to create clients
85
+ """
38
86
  super().__init__()
39
87
  self.client = openai.OpenAI(
40
88
  base_url=base_url,
@@ -61,9 +109,17 @@ class OpenaiResponseClient(ResponseClient):
61
109
 
62
110
 
63
111
  class AsyncOpenaiResponseClient(AsyncResponseClient):
112
+ """
113
+ OpenAI-like async response client
114
+ """
64
115
  client : openai.AsyncOpenAI
65
116
 
66
117
  def __init__(self,base_url,api_key,**kwargs):
118
+ """
119
+ :param base_url: Base url
120
+ :param api_key: API Key
121
+ :param kwargs: Other parameters used to create clients
122
+ """
67
123
  super().__init__()
68
124
  self.client = openai.AsyncOpenAI(
69
125
  base_url=base_url,
@@ -1,24 +1,23 @@
1
- from chunk import Chunk
2
- from dataclasses import dataclass
1
+ import json
2
+ import logging
3
+ import logging.config
3
4
  import time
4
- import uvicorn
5
+ from dataclasses import dataclass
5
6
  from typing import Dict, List, Optional, Any
6
- import json
7
7
 
8
+ import uvicorn
9
+ from fastapi import FastAPI, HTTPException, status, Header, Request
10
+ from fastapi.responses import StreamingResponse, Response
11
+ from fastapi.security import HTTPBearer
8
12
  from openai.types.model import Model as OpenaiModel
9
- from fastapi import FastAPI,Depends, HTTPException, status,Header,Request
10
- from fastapi.responses import StreamingResponse,Response
11
- from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
12
13
  from uvicorn.config import LOGGING_CONFIG
13
- import logging
14
- import logging.config
15
14
 
16
15
  from deepanything.DeepAnythingClient import chat_completion_stream_async, chat_completion_async
17
- from deepanything.ResponseClient import AsyncOpenaiResponseClient,AsyncResponseClient
18
- from deepanything.Stream import AsyncStream
19
- from deepanything.ReasonClient import AsyncDeepseekReasonClient,AsyncOpenaiReasonClient,AsyncReasonClient
16
+ from deepanything.ReasonClient import AsyncDeepseekReasonClient, AsyncOpenaiReasonClient, AsyncReasonClient
17
+ from deepanything.ResponseClient import AsyncOpenaiResponseClient, AsyncResponseClient
20
18
  from deepanything.Server import Types
21
- from deepanything.metadatas import VERSION,PYTHON_RUNTIME
19
+ from deepanything.Stream import AsyncStream
20
+ from deepanything.metadatas import VERSION, PYTHON_RUNTIME
22
21
 
23
22
 
24
23
  @dataclass
@@ -29,7 +28,7 @@ class ModelInfo:
29
28
  response_client : str
30
29
  response_model : str
31
30
  created : int = int(time.time())
32
- reason_prompt : str = "<Think>{}</Think>",
31
+ reason_prompt : str = "<think>{}</think>",
33
32
  reason_system_prompt : Optional[str] = None
34
33
 
35
34
  class DeepAnythingServer:
@@ -102,7 +101,7 @@ class DeepAnythingServer:
102
101
  response_client = _model["response_client"]
103
102
  response_model = _model["response_model"]
104
103
  created = _model.get("created", int(time.time()))
105
- reason_prompt = _model.get("reason_prompt", "<Think>{}</Think>")
104
+ reason_prompt = _model.get("reason_prompt", "<think>{}</think>")
106
105
  reason_system_prompt = _model.get("reason_system_prompt", None)
107
106
 
108
107
  if name in self.models:
@@ -161,7 +160,7 @@ class DeepAnythingServer:
161
160
 
162
161
 
163
162
  if name in self.response_clients:
164
- self.logger.error(f"Detected duplicate response clients : {name}")
163
+ self.logger.error(f"Detected duplicate reason clients : {name}")
165
164
  exit(0)
166
165
 
167
166
  if client["type"] == 'deepseek':
@@ -1,6 +1,7 @@
1
- from pydantic import BaseModel
1
+ from typing import List, Optional
2
+
2
3
  from openai.types.model import Model as OpenaiModel
3
- from typing import Dict, List, Optional
4
+ from pydantic import BaseModel
4
5
 
5
6
 
6
7
  class ModelsListResponse(BaseModel):
deepanything/Stream.py CHANGED
@@ -3,9 +3,12 @@ from typing import Any, AsyncIterator, Iterator
3
3
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
4
4
 
5
5
  class Stream:
6
+ """
7
+ Implementation of streaming return. Implement using simple callback functions
8
+ """
6
9
  next_fc : Callable[[Any],ChatCompletionChunk]
7
10
  close_fc : Callable[[Any],None]
8
- data : dict
11
+ data : Any
9
12
 
10
13
 
11
14
  def __init__(self,data):
@@ -14,10 +17,22 @@ class Stream:
14
17
  return self
15
18
 
16
19
  def on_next(self,fc : Callable[[Any],ChatCompletionChunk]) -> 'Stream':
20
+ """
21
+ Set callback for `__next__()`
22
+
23
+ :param fc: Callback
24
+ :return: Stream itself.
25
+ """
17
26
  self.next_fc = fc
18
27
  return self
19
28
 
20
29
  def on_close(self,fc : Callable[[Any],None]) -> 'Stream':
30
+ """
31
+ Set callback for `close()`
32
+
33
+ :param fc: Callback
34
+ :return: Stream itself.
35
+ """
21
36
  self.close_fc = fc
22
37
  return self
23
38
 
@@ -25,9 +40,16 @@ class Stream:
25
40
  return self.next_fc(self.data)
26
41
 
27
42
  def close(self) -> None:
43
+ """
44
+ Close the stream
45
+ :return: None
46
+ """
28
47
  self.close_fc(self.data)
29
48
 
30
49
  class AsyncStream:
50
+ """
51
+ Implementation of streaming return. Implement using simple callback functions
52
+ """
31
53
  next_fc: Callable[[Any], Awaitable[ChatCompletionChunk]]
32
54
  close_fc: Callable[[Any], Awaitable[None]]
33
55
  data : Any
@@ -39,10 +61,22 @@ class AsyncStream:
39
61
  return self
40
62
 
41
63
  def on_next(self, fc: Callable[[Any], Awaitable[ChatCompletionChunk]]) -> 'AsyncStream':
64
+ """
65
+ Set callback for `__anext__()`
66
+
67
+ :param fc: Callback
68
+ :return: Stream itself.
69
+ """
42
70
  self.next_fc = fc
43
71
  return self
44
72
 
45
73
  def on_close(self, fc: Callable[[Any], Awaitable[None]]) -> 'AsyncStream':
74
+ """
75
+ Set callback for `close()`
76
+
77
+ :param fc: Callback
78
+ :return: Stream itself.
79
+ """
46
80
  self.close_fc = fc
47
81
  return self
48
82
 
@@ -50,4 +84,8 @@ class AsyncStream:
50
84
  return await self.next_fc(self.data)
51
85
 
52
86
  async def close(self) -> None:
87
+ """
88
+ Close the stream
89
+ :return: None
90
+ """
53
91
  await self.close_fc(self.data)
deepanything/Utility.py CHANGED
@@ -210,7 +210,7 @@ def merge_usage(
210
210
  def make_id_by_timestamp():
211
211
  return "chatcmpl-" + str(uuid.uuid4())
212
212
 
213
- def attend_message(
213
+ def extend_message(
214
214
  messages : List,
215
215
  role : Literal["developer", "system", "user", "assistant", "tool"],
216
216
  content : Optional[str] = None,
deepanything/metadatas.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from sys import version
2
2
 
3
- VERSION = "v0.1.7"
3
+ VERSION = "v0.1.8"
4
4
  PYTHON_RUNTIME = f"python{version}"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepanything
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: DeepAnything is a project that provides DeepSeek R1's deep thinking capabilities for various large language models (LLMs).
5
5
  Author: Junity
6
6
  Author-email: 1727636624@qq.com
@@ -169,6 +169,12 @@ Below is an example of a configuration file:
169
169
  "type" : "deepseek",
170
170
  "base_url" : "https://api.siliconflow.cn/v1",
171
171
  "api_key" : "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
172
+ },
173
+ {
174
+ "name" : "qwen",
175
+ "type" : "openai",
176
+ "base_url" : "https://dashscope.aliyuncs.com/compatible-mode/v1",
177
+ "api_key" : "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
172
178
  }
173
179
  ],
174
180
  "response_clients": [
@@ -185,7 +191,17 @@ Below is an example of a configuration file:
185
191
  "reason_client" : "siliconflow",
186
192
  "response_client" : "qwen",
187
193
  "reason_model": "Pro/deepseek-ai/DeepSeek-R1",
188
- "response_model" : "qwen-max-latest"
194
+ "response_model" : "qwen-max-latest",
195
+ "reason_prompt" : "<think>{}</think>"
196
+ },
197
+ {
198
+ "name": "QWQ-Qwen-max",
199
+ "reason_client" : "qwen",
200
+ "response_client" : "qwen",
201
+ "reason_model": "qwq-32b-preview",
202
+ "response_model" : "qwen-max-latest",
203
+ "reason_prompt" : "<think>{}</think>",
204
+ "reason_system_prompt" : "You are a model designed to contemplate questions before providing answers. Your thought process and responses are not directly visible to the user but are instead passed as prompts to the next model. For any question posed by the user, you should carefully consider it and provide as detailed a thought process as possible."
189
205
  }
190
206
  ],
191
207
  "api_keys" : [
@@ -220,7 +236,8 @@ Below is an example of a configuration file:
220
236
  "loggers": {
221
237
  "uvicorn": {"handlers": ["default"], "level": "INFO", "propagate": false},
222
238
  "uvicorn.error": {"level": "INFO"},
223
- "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": false}
239
+ "uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": false},
240
+ "deepanything": {"handlers": ["default"], "level": "INFO", "propagate": false}
224
241
  }
225
242
  }
226
243
  }
@@ -228,13 +245,17 @@ Below is an example of a configuration file:
228
245
 
229
246
  #### **Detailed Explanation**
230
247
 
231
- - reason_clients: Configuration for thinking models, currently supports deepseek and openai types. When the type is openai, deepanything directly uses the model's output as the thinking content, and it is recommended to use qwq-32b in this case.
232
- - response_clients: Configuration for response models, currently only supports the openai type.
233
- - api_keys: API keys for user authentication. When left blank or an empty list, the server does not use API keys for authentication.
234
- - log: Log configuration. If this item is not filled in, the default logging configuration of uvicorn will be used. For details, please refer to [uvicorn logging configuration](https://www.uvicorn.org/settings/#logging).
248
+ - reason_clients: Configuration for the thinking model, currently supporting two types: deepseek and openai. When the type is openai, DeepAnything directly uses the model's output as the thinking content; it is recommended to use qwq-32b in this case.
249
+ - response_clients: Configuration for the response model, currently supporting only one type: openai.
250
+ - models: Model configuration, including model name, thinking model, response model, parameters for the thinking model, and parameters for the response model.
251
+ - reason_prompt: Specifies how the thinking content should be embedded into the conversation. DeepAnything will use `reason_prompt` to format the thinking content. The default is `<think>{}</think>`.
252
+ - reason_system_prompt: Adds extra prompt words for the thinking model. This prompt will be placed at the end of the message as a `system` role and passed to the thinking model. If not specified, it will not take effect.
253
+ - api_keys: API keys used for user identity verification. When not filled or an empty list, the server does not use API keys for authentication.
254
+ - log: Log configuration. If this item is not filled, the default uvicorn log configuration will be used. For more details, refer to [uvicorn logging configuration](https://www.uvicorn.org/settings/#logging) and [Python Logging configuration](https://docs.python.org/3/library/logging.config.html).}
235
255
 
236
256
  ## Deploying with Docker
237
257
  ### 1. Pull the Image
258
+
238
259
  ```bash
239
260
  docker pull junity233/deepanything:latest
240
261
  ```
@@ -0,0 +1,17 @@
1
+ deepanything/DeepAnythingClient.py,sha256=4dJg5CFsQLlhxCWgtj7UYa9VTchogeFm_-B3YExk8Ns,28622
2
+ deepanything/ReasonClient.py,sha256=P1uuwfgds6qch5gPp2BKD1y1myLPZ8BKCs1PiqsZHmw,12888
3
+ deepanything/ResponseClient.py,sha256=BT-5qBv1xs3-5SbYfFEjHYzH0E09I4Fl8h4K1IvSJ8U,5089
4
+ deepanything/Stream.py,sha256=8GSsKeAhSwSt0T6JAO4l3ivTwGuJYMxp7GlqpExp8T8,2440
5
+ deepanything/Utility.py,sha256=dZeCg_D28rwJILLuCsE-ZtcKUkbW1prXPVlpSkYFCKA,7024
6
+ deepanything/__init__.py,sha256=_2RolcKcpxmW0dmtiQpXlvgxe5dvqx90Yg_Q_oVLVZQ,175
7
+ deepanything/__main__.py,sha256=BWGtAVWDgJg50uGTEvZNV4P4PFghYwx5cTpXlEAWRio,1084
8
+ deepanything/metadatas.py,sha256=hfYSJI8zYLB9fgn5puFIHPQWxuMPY_n-YXoSW7y0XZ0,82
9
+ deepanything/Server/Server.py,sha256=avsTMQVtX9RErhKm2JqYt_U_auUyhHiE4u-Xu1zPzOI,11460
10
+ deepanything/Server/Types.py,sha256=yGd1vwYMg2nXXOdFh6esHfrAjCglkg9ivqzBMA4go6Y,646
11
+ deepanything/Server/__init__.py,sha256=eIpn6NbNvEg4ST8CuuIuzPT3m_fTlmPC3sikPoPFsYo,92
12
+ deepanything-0.1.8.dist-info/LICENSE,sha256=JWYd2E-mcNcSYjT5nk4ayM5kkkDq6ZlOxVcYsyqCIwU,1059
13
+ deepanything-0.1.8.dist-info/METADATA,sha256=iLcSCMcTt4SpS5VweRtGlUDrU3Bp9GpCc12j9qWoWbI,9486
14
+ deepanything-0.1.8.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
15
+ deepanything-0.1.8.dist-info/entry_points.txt,sha256=UT4gNGx6dJsKBjZIl3VkMekh385O5WMbMidAAla6UB4,60
16
+ deepanything-0.1.8.dist-info/top_level.txt,sha256=wGeRb__4jEJTclCUl0cxhgubD_Bq-QT38VIH6C4KpzY,13
17
+ deepanything-0.1.8.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- deepanything/DeepAnythingClient.py,sha256=uhRQMg1y9O9maHG-sw0lDUx_1-aWCZVb4cvgoGlRdAs,16177
2
- deepanything/ReasonClient.py,sha256=Sbv36lreZksBG_X_Q4NKiBuG4vsZ50YhhkWAxuwIu-4,8868
3
- deepanything/ResponseClient.py,sha256=NbXjlU_0qTKBNjZy8B9J9emuABQYvx3NZsWuja9OnMI,2989
4
- deepanything/Stream.py,sha256=8ESR8ttjyPZ-uXPDENsVWUzaL34_GT2OZBJ0PWu7vsA,1578
5
- deepanything/Utility.py,sha256=LRyawSCVpo5RjcaFFSwIqjsL3JGZpl48KM_KXn_-8ew,7024
6
- deepanything/__init__.py,sha256=_2RolcKcpxmW0dmtiQpXlvgxe5dvqx90Yg_Q_oVLVZQ,175
7
- deepanything/__main__.py,sha256=BWGtAVWDgJg50uGTEvZNV4P4PFghYwx5cTpXlEAWRio,1084
8
- deepanything/metadatas.py,sha256=iACXZlK9YOj5N8U2AdXEY_3EP4uobqpweN6DpVEVNj8,82
9
- deepanything/Server/Server.py,sha256=K9GUVl_c22LSeNW8qCFSpuw3SpUqBulVXymwsF9JwqM,11518
10
- deepanything/Server/Types.py,sha256=b7aMaRBgODEKdyYe0FeraUfrygJuye3b5lfQTOWASXA,650
11
- deepanything/Server/__init__.py,sha256=eIpn6NbNvEg4ST8CuuIuzPT3m_fTlmPC3sikPoPFsYo,92
12
- deepanything-0.1.7.dist-info/LICENSE,sha256=JWYd2E-mcNcSYjT5nk4ayM5kkkDq6ZlOxVcYsyqCIwU,1059
13
- deepanything-0.1.7.dist-info/METADATA,sha256=OAFhDpn_XllHi_Rzm25OsATxG4uh-mEiA1obS3DuH90,7851
14
- deepanything-0.1.7.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
15
- deepanything-0.1.7.dist-info/entry_points.txt,sha256=UT4gNGx6dJsKBjZIl3VkMekh385O5WMbMidAAla6UB4,60
16
- deepanything-0.1.7.dist-info/top_level.txt,sha256=wGeRb__4jEJTclCUl0cxhgubD_Bq-QT38VIH6C4KpzY,13
17
- deepanything-0.1.7.dist-info/RECORD,,