auto-coder 0.1.383__py3-none-any.whl → 0.1.385__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.383.dist-info → auto_coder-0.1.385.dist-info}/METADATA +1 -1
- {auto_coder-0.1.383.dist-info → auto_coder-0.1.385.dist-info}/RECORD +10 -10
- autocoder/auto_coder_runner.py +10 -0
- autocoder/rag/long_context_rag.py +57 -2
- autocoder/rag/types.py +4 -1
- autocoder/version.py +1 -1
- {auto_coder-0.1.383.dist-info → auto_coder-0.1.385.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.383.dist-info → auto_coder-0.1.385.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.383.dist-info → auto_coder-0.1.385.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.383.dist-info → auto_coder-0.1.385.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
autocoder/auto_coder.py,sha256=7602L3tG0JErNxh8vkLAmGUgv2c-DGPzPCkmWIQt9bs,69757
|
|
3
3
|
autocoder/auto_coder_rag.py,sha256=tRAKfo3jIhcaQKN_3g7DZRKtDJSZXJxMRdT6Zz8W9nw,41173
|
|
4
|
-
autocoder/auto_coder_runner.py,sha256=
|
|
4
|
+
autocoder/auto_coder_runner.py,sha256=NhT1fVdV4NnV2SwpXl7G-tTKgduA0D-0lK_p2R6DWew,115569
|
|
5
5
|
autocoder/auto_coder_server.py,sha256=bLORGEclcVdbBVfM140JCI8WtdrU0jbgqdJIVVupiEU,20578
|
|
6
6
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
7
7
|
autocoder/chat_auto_coder.py,sha256=vNQwbYkdqeMl07Vx8z6x-kSPkHKn9AT3sSkYMTJiWtc,26655
|
|
@@ -11,7 +11,7 @@ autocoder/command_parser.py,sha256=fx1g9E6GaM273lGTcJqaFQ-hoksS_Ik2glBMnVltPCE,1
|
|
|
11
11
|
autocoder/lang.py,sha256=PFtATuOhHRnfpqHQkXr6p4C893JvpsgwTMif3l-GEi0,14321
|
|
12
12
|
autocoder/models.py,sha256=pD5u6gcMKRwWaLxeVin18g25k-ERyeHOFsRpOgO_Ae0,13788
|
|
13
13
|
autocoder/run_context.py,sha256=IUfSO6_gp2Wt1blFWAmOpN0b0nDrTTk4LmtCYUBIoro,1643
|
|
14
|
-
autocoder/version.py,sha256=
|
|
14
|
+
autocoder/version.py,sha256=iaY60sgwWc9wED3HJ9j7D-Lk6OSNJoA1OS8XwdbqWtI,25
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/agentic_filter.py,sha256=zlInIRhawKIYTJjCiJBWqPCOV5UtMbh5VnvszfTy2vo,39824
|
|
17
17
|
autocoder/agent/auto_demand_organizer.py,sha256=URAq0gSEiHeV_W4zwhOI_83kHz0Ryfj1gcfh5jwCv_w,6501
|
|
@@ -270,7 +270,7 @@ autocoder/rag/doc_filter.py,sha256=UduVO2mlrngwJICrefjDJTYfdmQ4GcRXrfWDQ7xXksk,1
|
|
|
270
270
|
autocoder/rag/document_retriever.py,sha256=rFwbAuHTvEFJq16HQNlmRLyJp2ddn2RNFslw_ncU7NI,8847
|
|
271
271
|
autocoder/rag/lang.py,sha256=HvcMeu6jReEJOGxyLMn4rwBoD-myFwmykS3VLceBJLs,3364
|
|
272
272
|
autocoder/rag/llm_wrapper.py,sha256=LsNv8maCnvazyXjjtkO9aN3OT7Br20V1ilHV8Lt45Os,4245
|
|
273
|
-
autocoder/rag/long_context_rag.py,sha256=
|
|
273
|
+
autocoder/rag/long_context_rag.py,sha256=gL35Y52lCJB3Lf0JgU-kDPCHEf6cR5QgA8uUontyKbo,52822
|
|
274
274
|
autocoder/rag/qa_conversation_strategy.py,sha256=4CiMK88apKbJ2YM4HHq1KGpr5jUkTh0_m_aCyt-JYgc,10568
|
|
275
275
|
autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
|
|
276
276
|
autocoder/rag/rag_entry.py,sha256=QOdUX_nd1Qak2NyOW0CYcLRDB26AZ6MeByHJaMMGgqs,2316
|
|
@@ -283,7 +283,7 @@ autocoder/rag/token_checker.py,sha256=jc76x6KWmvVxds6W8juZfQGaoErudc2HenG3sNQfSL
|
|
|
283
283
|
autocoder/rag/token_counter.py,sha256=C-Lwc4oIjJpZDEqp9WLHGOe6hb4yhrdJpMtkrtp_1qc,2125
|
|
284
284
|
autocoder/rag/token_limiter.py,sha256=3VgJF4may3ESyATmBIiOe05oc3VsidJcJTJ5EhoSvH8,18854
|
|
285
285
|
autocoder/rag/token_limiter_utils.py,sha256=FATNEXBnFJy8IK3PWNt1pspIv8wuTgy3F_ACNvqoc4I,404
|
|
286
|
-
autocoder/rag/types.py,sha256=
|
|
286
|
+
autocoder/rag/types.py,sha256=XUg47lb9DpJoIM4tcokmRp8bk4_mac0MrNd1GY1EVdg,3383
|
|
287
287
|
autocoder/rag/utils.py,sha256=FPK3Vvk9X9tUuOu4_LctZN5WnRVuEjFiffRtE-pHn0s,6318
|
|
288
288
|
autocoder/rag/variable_holder.py,sha256=PFvBjFcR7-fNDD4Vcsc8CpH2Te057vcpwJMxtrfUgKI,75
|
|
289
289
|
autocoder/rag/cache/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -336,9 +336,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
336
336
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
337
337
|
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=t902pKxQ5xM7zgIHiAOsTPLwxhE6VuvXAqPy751S7fg,14096
|
|
338
338
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
339
|
-
auto_coder-0.1.
|
|
340
|
-
auto_coder-0.1.
|
|
341
|
-
auto_coder-0.1.
|
|
342
|
-
auto_coder-0.1.
|
|
343
|
-
auto_coder-0.1.
|
|
344
|
-
auto_coder-0.1.
|
|
339
|
+
auto_coder-0.1.385.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
340
|
+
auto_coder-0.1.385.dist-info/METADATA,sha256=CBTR-3ZwyAp_dQkgpXTl3XVwwwUjkQjz6zdkVJnc1WI,2796
|
|
341
|
+
auto_coder-0.1.385.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
342
|
+
auto_coder-0.1.385.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
343
|
+
auto_coder-0.1.385.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
344
|
+
auto_coder-0.1.385.dist-info/RECORD,,
|
autocoder/auto_coder_runner.py
CHANGED
|
@@ -657,6 +657,16 @@ def save_memory():
|
|
|
657
657
|
load_memory()
|
|
658
658
|
|
|
659
659
|
|
|
660
|
+
def save_memory_with_new_memory(new_memory):
|
|
661
|
+
memory_path = os.path.join(base_persist_dir, "memory.json")
|
|
662
|
+
lock_path = memory_path + ".lock"
|
|
663
|
+
|
|
664
|
+
with FileLock(lock_path, timeout=30):
|
|
665
|
+
with open(memory_path, "w", encoding="utf-8") as f:
|
|
666
|
+
json.dump(new_memory, f, indent=2, ensure_ascii=False)
|
|
667
|
+
load_memory()
|
|
668
|
+
|
|
669
|
+
|
|
660
670
|
def load_memory():
|
|
661
671
|
global memory
|
|
662
672
|
memory_path = os.path.join(base_persist_dir, "memory.json")
|
|
@@ -539,7 +539,7 @@ class LongContextRAG:
|
|
|
539
539
|
generated_tokens_count=rag_stat.recall_stat.total_generated_tokens + rag_stat.chunk_stat.total_generated_tokens,
|
|
540
540
|
reasoning_content=get_message_with_format_and_newline(
|
|
541
541
|
"context_docs_names",
|
|
542
|
-
context_docs_names="
|
|
542
|
+
context_docs_names="*****"
|
|
543
543
|
)
|
|
544
544
|
))
|
|
545
545
|
|
|
@@ -676,6 +676,8 @@ class LongContextRAG:
|
|
|
676
676
|
def _process_document_retrieval(self, conversations,
|
|
677
677
|
query, rag_stat):
|
|
678
678
|
"""第一阶段:文档召回和过滤"""
|
|
679
|
+
recall_start_time = time.time() # 记录召回阶段开始时间
|
|
680
|
+
|
|
679
681
|
yield ("", SingleOutputMeta(
|
|
680
682
|
input_tokens_count=0,
|
|
681
683
|
generated_tokens_count=0,
|
|
@@ -716,6 +718,7 @@ class LongContextRAG:
|
|
|
716
718
|
rag_stat.recall_stat.total_input_tokens += sum(doc_filter_result.input_tokens_counts)
|
|
717
719
|
rag_stat.recall_stat.total_generated_tokens += sum(doc_filter_result.generated_tokens_counts)
|
|
718
720
|
rag_stat.recall_stat.model_name = doc_filter_result.model_name
|
|
721
|
+
rag_stat.recall_stat.duration = time.time() - recall_start_time # 记录召回阶段耗时
|
|
719
722
|
|
|
720
723
|
relevant_docs = doc_filter_result.docs
|
|
721
724
|
|
|
@@ -724,7 +727,7 @@ class LongContextRAG:
|
|
|
724
727
|
generated_tokens_count=rag_stat.recall_stat.total_generated_tokens,
|
|
725
728
|
reasoning_content=get_message_with_format_and_newline(
|
|
726
729
|
"rag_docs_filter_result",
|
|
727
|
-
filter_time=
|
|
730
|
+
filter_time=rag_stat.recall_stat.duration, # 使用实际耗时
|
|
728
731
|
docs_num=len(relevant_docs),
|
|
729
732
|
input_tokens=rag_stat.recall_stat.total_input_tokens,
|
|
730
733
|
output_tokens=rag_stat.recall_stat.total_generated_tokens,
|
|
@@ -743,6 +746,8 @@ class LongContextRAG:
|
|
|
743
746
|
|
|
744
747
|
def _process_document_chunking(self, relevant_docs, conversations, rag_stat, filter_time):
|
|
745
748
|
"""第二阶段:文档分块与重排序"""
|
|
749
|
+
chunk_start_time = time.time() # 记录分块阶段开始时间
|
|
750
|
+
|
|
746
751
|
yield ("", SingleOutputMeta(
|
|
747
752
|
generated_tokens_count=0,
|
|
748
753
|
reasoning_content=get_message_with_format_and_newline(
|
|
@@ -785,6 +790,8 @@ class LongContextRAG:
|
|
|
785
790
|
# 如果没有tokenizer,直接限制文档数量
|
|
786
791
|
final_relevant_docs = relevant_docs[: self.args.index_filter_file_num]
|
|
787
792
|
|
|
793
|
+
rag_stat.chunk_stat.duration = time.time() - chunk_start_time # 记录分块阶段耗时
|
|
794
|
+
|
|
788
795
|
# 输出分块结果统计
|
|
789
796
|
yield ("", SingleOutputMeta(
|
|
790
797
|
generated_tokens_count=rag_stat.chunk_stat.total_generated_tokens + rag_stat.recall_stat.total_generated_tokens,
|
|
@@ -819,6 +826,7 @@ class LongContextRAG:
|
|
|
819
826
|
llm_config={},
|
|
820
827
|
extra_request_params={}):
|
|
821
828
|
"""第三阶段:大模型问答生成"""
|
|
829
|
+
answer_start_time = time.time() # 记录答案生成阶段开始时间
|
|
822
830
|
|
|
823
831
|
# 使用LLMComputeEngine增强处理(如果可用)
|
|
824
832
|
if LLMComputeEngine is not None and not self.args.disable_inference_enhance:
|
|
@@ -853,6 +861,7 @@ class LongContextRAG:
|
|
|
853
861
|
rag_stat.chunk_stat.total_generated_tokens + \
|
|
854
862
|
rag_stat.answer_stat.total_generated_tokens
|
|
855
863
|
yield chunk
|
|
864
|
+
rag_stat.answer_stat.duration = time.time() - answer_start_time # 记录答案生成阶段耗时
|
|
856
865
|
else:
|
|
857
866
|
# 常规QA处理路径
|
|
858
867
|
qa_strategy = get_qa_strategy(self.args)
|
|
@@ -892,6 +901,7 @@ class LongContextRAG:
|
|
|
892
901
|
rag_stat.chunk_stat.total_generated_tokens + \
|
|
893
902
|
rag_stat.answer_stat.total_generated_tokens
|
|
894
903
|
yield chunk
|
|
904
|
+
rag_stat.answer_stat.duration = time.time() - answer_start_time # 记录答案生成阶段耗时
|
|
895
905
|
|
|
896
906
|
def _print_rag_stats(self, rag_stat: RAGStat, conversations: Optional[List[Dict[str, str]]] = None) -> None:
|
|
897
907
|
"""打印RAG执行的详细统计信息"""
|
|
@@ -906,6 +916,17 @@ class LongContextRAG:
|
|
|
906
916
|
rag_stat.answer_stat.total_generated_tokens
|
|
907
917
|
)
|
|
908
918
|
total_tokens = total_input_tokens + total_generated_tokens
|
|
919
|
+
|
|
920
|
+
# 计算总耗时
|
|
921
|
+
total_duration = (
|
|
922
|
+
rag_stat.recall_stat.duration +
|
|
923
|
+
rag_stat.chunk_stat.duration +
|
|
924
|
+
rag_stat.answer_stat.duration
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
# 添加其他阶段的耗时(如果存在)
|
|
928
|
+
if rag_stat.other_stats:
|
|
929
|
+
total_duration += sum(other_stat.duration for other_stat in rag_stat.other_stats)
|
|
909
930
|
|
|
910
931
|
# 避免除以零错误
|
|
911
932
|
if total_tokens == 0:
|
|
@@ -926,6 +947,20 @@ class LongContextRAG:
|
|
|
926
947
|
other_stat.total_generated_tokens) / total_tokens * 100
|
|
927
948
|
other_percents.append(other_percent)
|
|
928
949
|
|
|
950
|
+
# 计算耗时分布百分比
|
|
951
|
+
if total_duration == 0:
|
|
952
|
+
recall_duration_percent = chunk_duration_percent = answer_duration_percent = 0
|
|
953
|
+
else:
|
|
954
|
+
recall_duration_percent = rag_stat.recall_stat.duration / total_duration * 100
|
|
955
|
+
chunk_duration_percent = rag_stat.chunk_stat.duration / total_duration * 100
|
|
956
|
+
answer_duration_percent = rag_stat.answer_stat.duration / total_duration * 100
|
|
957
|
+
|
|
958
|
+
# 计算其他阶段的耗时占比
|
|
959
|
+
other_duration_percents = []
|
|
960
|
+
if total_duration > 0 and rag_stat.other_stats:
|
|
961
|
+
for other_stat in rag_stat.other_stats:
|
|
962
|
+
other_duration_percents.append(other_stat.duration / total_duration * 100)
|
|
963
|
+
|
|
929
964
|
# 计算成本分布百分比
|
|
930
965
|
if rag_stat.cost == 0:
|
|
931
966
|
recall_cost_percent = chunk_cost_percent = answer_cost_percent = 0
|
|
@@ -957,6 +992,7 @@ class LongContextRAG:
|
|
|
957
992
|
f" * 输入令牌总数: {total_input_tokens}\n"
|
|
958
993
|
f" * 生成令牌总数: {total_generated_tokens}\n"
|
|
959
994
|
f" * 总成本: {rag_stat.cost:.6f}\n"
|
|
995
|
+
f" * 总耗时: {total_duration:.2f} 秒\n"
|
|
960
996
|
f"\n"
|
|
961
997
|
f"阶段统计:\n"
|
|
962
998
|
f" 1. 文档检索阶段:\n"
|
|
@@ -965,6 +1001,7 @@ class LongContextRAG:
|
|
|
965
1001
|
f" - 生成令牌: {rag_stat.recall_stat.total_generated_tokens}\n"
|
|
966
1002
|
f" - 阶段总计: {rag_stat.recall_stat.total_input_tokens + rag_stat.recall_stat.total_generated_tokens}\n"
|
|
967
1003
|
f" - 阶段成本: {rag_stat.recall_stat.cost:.6f}\n"
|
|
1004
|
+
f" - 阶段耗时: {rag_stat.recall_stat.duration:.2f} 秒\n"
|
|
968
1005
|
f"\n"
|
|
969
1006
|
f" 2. 文档分块阶段:\n"
|
|
970
1007
|
f" - 模型: {rag_stat.chunk_stat.model_name}\n"
|
|
@@ -972,6 +1009,7 @@ class LongContextRAG:
|
|
|
972
1009
|
f" - 生成令牌: {rag_stat.chunk_stat.total_generated_tokens}\n"
|
|
973
1010
|
f" - 阶段总计: {rag_stat.chunk_stat.total_input_tokens + rag_stat.chunk_stat.total_generated_tokens}\n"
|
|
974
1011
|
f" - 阶段成本: {rag_stat.chunk_stat.cost:.6f}\n"
|
|
1012
|
+
f" - 阶段耗时: {rag_stat.chunk_stat.duration:.2f} 秒\n"
|
|
975
1013
|
f"\n"
|
|
976
1014
|
f" 3. 答案生成阶段:\n"
|
|
977
1015
|
f" - 模型: {rag_stat.answer_stat.model_name}\n"
|
|
@@ -979,6 +1017,7 @@ class LongContextRAG:
|
|
|
979
1017
|
f" - 生成令牌: {rag_stat.answer_stat.total_generated_tokens}\n"
|
|
980
1018
|
f" - 阶段总计: {rag_stat.answer_stat.total_input_tokens + rag_stat.answer_stat.total_generated_tokens}\n"
|
|
981
1019
|
f" - 阶段成本: {rag_stat.answer_stat.cost:.6f}\n"
|
|
1020
|
+
f" - 阶段耗时: {rag_stat.answer_stat.duration:.2f} 秒\n"
|
|
982
1021
|
f"\n"
|
|
983
1022
|
)
|
|
984
1023
|
|
|
@@ -992,6 +1031,7 @@ class LongContextRAG:
|
|
|
992
1031
|
f" - 生成令牌: {other_stat.total_generated_tokens}\n"
|
|
993
1032
|
f" - 阶段总计: {other_stat.total_input_tokens + other_stat.total_generated_tokens}\n"
|
|
994
1033
|
f" - 阶段成本: {other_stat.cost:.6f}\n"
|
|
1034
|
+
f" - 阶段耗时: {other_stat.duration:.2f} 秒\n"
|
|
995
1035
|
f"\n"
|
|
996
1036
|
)
|
|
997
1037
|
|
|
@@ -1009,6 +1049,21 @@ class LongContextRAG:
|
|
|
1009
1049
|
if other_percent > 0:
|
|
1010
1050
|
stats_str += f" - 其他阶段 {i+1}: {other_percent:.1f}%\n"
|
|
1011
1051
|
|
|
1052
|
+
# 添加耗时分布百分比
|
|
1053
|
+
stats_str += (
|
|
1054
|
+
f"\n"
|
|
1055
|
+
f"耗时分布百分比:\n"
|
|
1056
|
+
f" - 文档检索: {recall_duration_percent:.1f}%\n"
|
|
1057
|
+
f" - 文档分块: {chunk_duration_percent:.1f}%\n"
|
|
1058
|
+
f" - 答案生成: {answer_duration_percent:.1f}%\n"
|
|
1059
|
+
)
|
|
1060
|
+
|
|
1061
|
+
# 如果存在 other_stats,添加其耗时占比
|
|
1062
|
+
if rag_stat.other_stats:
|
|
1063
|
+
for i, other_duration_percent in enumerate(other_duration_percents):
|
|
1064
|
+
if other_duration_percent > 0:
|
|
1065
|
+
stats_str += f" - 其他阶段 {i+1}: {other_duration_percent:.1f}%\n"
|
|
1066
|
+
|
|
1012
1067
|
# 添加成本分布百分比
|
|
1013
1068
|
stats_str += (
|
|
1014
1069
|
f"\n"
|
autocoder/rag/types.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
import os
|
|
3
2
|
import json
|
|
4
3
|
import time
|
|
@@ -13,6 +12,7 @@ class RecallStat(BaseModel):
|
|
|
13
12
|
total_generated_tokens: int
|
|
14
13
|
model_name: str = "unknown"
|
|
15
14
|
cost:float = 0.0
|
|
15
|
+
duration: float = 0.0
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class ChunkStat(BaseModel):
|
|
@@ -20,6 +20,7 @@ class ChunkStat(BaseModel):
|
|
|
20
20
|
total_generated_tokens: int
|
|
21
21
|
model_name: str = "unknown"
|
|
22
22
|
cost:float = 0.0
|
|
23
|
+
duration: float = 0.0
|
|
23
24
|
|
|
24
25
|
|
|
25
26
|
class AnswerStat(BaseModel):
|
|
@@ -27,6 +28,7 @@ class AnswerStat(BaseModel):
|
|
|
27
28
|
total_generated_tokens: int
|
|
28
29
|
model_name: str = "unknown"
|
|
29
30
|
cost:float = 0.0
|
|
31
|
+
duration: float = 0.0
|
|
30
32
|
|
|
31
33
|
|
|
32
34
|
class OtherStat(BaseModel):
|
|
@@ -34,6 +36,7 @@ class OtherStat(BaseModel):
|
|
|
34
36
|
total_generated_tokens: int = 0
|
|
35
37
|
model_name: str = "unknown"
|
|
36
38
|
cost:float = 0.0
|
|
39
|
+
duration: float = 0.0
|
|
37
40
|
|
|
38
41
|
|
|
39
42
|
class RAGStat(BaseModel):
|
autocoder/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
|
|
2
|
-
__version__ = "0.1.
|
|
2
|
+
__version__ = "0.1.385"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|