pro-craft 0.1.34__tar.gz → 0.1.35__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pro-craft might be problematic. Click here for more details.

Files changed (28) hide show
  1. {pro_craft-0.1.34 → pro_craft-0.1.35}/PKG-INFO +1 -1
  2. {pro_craft-0.1.34 → pro_craft-0.1.35}/pyproject.toml +1 -1
  3. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/prompt_craft/async_.py +99 -141
  4. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft.egg-info/PKG-INFO +1 -1
  5. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft.egg-info/SOURCES.txt +0 -1
  6. pro_craft-0.1.34/src/pro_craft/prompt_craft/async_ copy.py +0 -1000
  7. {pro_craft-0.1.34 → pro_craft-0.1.35}/README.md +0 -0
  8. {pro_craft-0.1.34 → pro_craft-0.1.35}/setup.cfg +0 -0
  9. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/__init__.py +0 -0
  10. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/code_helper/coder.py +0 -0
  11. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/code_helper/designer.py +0 -0
  12. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/database.py +0 -0
  13. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/file_manager.py +0 -0
  14. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/log.py +0 -0
  15. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/prompt_craft/__init__.py +0 -0
  16. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/prompt_craft/new.py +0 -0
  17. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/prompt_craft/sync.py +0 -0
  18. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/server/mcp/__init__.py +0 -0
  19. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/server/mcp/prompt.py +0 -0
  20. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/server/router/__init__.py +0 -0
  21. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/server/router/prompt.py +0 -0
  22. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft/utils.py +0 -0
  23. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft.egg-info/dependency_links.txt +0 -0
  24. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft.egg-info/requires.txt +0 -0
  25. {pro_craft-0.1.34 → pro_craft-0.1.35}/src/pro_craft.egg-info/top_level.txt +0 -0
  26. {pro_craft-0.1.34 → pro_craft-0.1.35}/tests/test22.py +0 -0
  27. {pro_craft-0.1.34 → pro_craft-0.1.35}/tests/test_coder.py +0 -0
  28. {pro_craft-0.1.34 → pro_craft-0.1.35}/tests/test_designer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.34
3
+ Version: 0.1.35
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "pro-craft"
3
- version = "0.1.34"
3
+ version = "0.1.35"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -25,7 +25,9 @@ from datetime import datetime, timedelta
25
25
  from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
26
26
  from sqlalchemy import select, and_ # 引入 select 和 and_
27
27
  from sqlalchemy.orm import class_mapper # 用于检查对象是否是持久化的
28
-
28
+ import tqdm
29
+ from tqdm.asyncio import tqdm
30
+ import pandas as pd
29
31
 
30
32
  class IntellectRemoveFormatError(Exception):
31
33
  pass
@@ -166,6 +168,8 @@ class AsyncIntel():
166
168
  self.llm = ArkAdapter(model_name = model_name)
167
169
  else:
168
170
  raise Exception("error llm name")
171
+
172
+ self.df = pd.DataFrame({"name":[],'status':[],"score":[],"total":[],"bad_case":[]})
169
173
 
170
174
  async def create_specific_database(self):
171
175
  tables_to_create_names = ["ai_prompts","ai_usecase"]
@@ -570,7 +574,7 @@ class AsyncIntel():
570
574
  use_case = input_,
571
575
  timestamp = datetime.now(),
572
576
  output = ai_result,
573
- solution = "备注/理想回复",
577
+ solution = output_format,
574
578
  faired_time = 0,
575
579
  session = session,
576
580
  )
@@ -678,140 +682,7 @@ class AsyncIntel():
678
682
  raise
679
683
 
680
684
  return ai_result
681
-
682
- async def intellect_stream_remove(self,
683
- input_data: dict | str,
684
- output_format: str,
685
- prompt_id: str,
686
- version: str = None,
687
- inference_save_case = True,
688
- push_patch = False,
689
- ):
690
- if isinstance(input_data,dict):
691
- input_ = json.dumps(input_data,ensure_ascii=False)
692
- elif isinstance(input_data,str):
693
- input_ = input_data
694
-
695
685
 
696
- # 查数据库, 获取最新提示词对象
697
- with create_session(self.engine) as session:
698
- result_obj = await self.get_prompts_from_sql(prompt_id=prompt_id,session=session)
699
-
700
- '''
701
- if result_obj is None:
702
- await self.save_prompt_increment_version(
703
- prompt_id = prompt_id,
704
- new_prompt = "做一些处理",
705
- use_case = input_,
706
- session = session
707
- )
708
- ai_result = await self.intellect_stream_remove(input_data = input_data,
709
- output_format = output_format,
710
- prompt_id = prompt_id,
711
- version = version,
712
- inference_save_case = inference_save_case
713
- )
714
- return ai_result'''
715
-
716
- prompt = result_obj.prompt
717
- if result_obj.action_type == "inference":
718
- # 直接推理即可
719
-
720
- ai_generate_result = self.llm.aproduct_stream(prompt + output_format + "\n-----input----\n" + input_)
721
- ai_result = ""
722
- async for word in ai_generate_result:
723
- ai_result += word
724
- yield word
725
- if inference_save_case:
726
- await self.save_use_case_by_sql(prompt_id,
727
- use_case = input_,
728
- timestamp = datetime.now(),
729
- output = ai_result,
730
- solution = "备注/理想回复",
731
- faired_time = 0,
732
- session = session,
733
- )
734
-
735
- elif result_obj.action_type == "train":
736
- assert result_obj.demand # 如果type = train 且 demand 是空 则报错
737
- # 则训练推广
738
-
739
- # 新版本 默人修改会 inference 状态
740
- chat_history = prompt
741
- before_input = result_obj.use_case
742
- demand = result_obj.demand
743
-
744
-
745
- assert demand
746
- # 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
747
- chat_history = prompt
748
- if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
749
- # input_prompt = chat_history + "\nuser:" + demand
750
- input_prompt = chat_history + "\nuser:" + demand + output_format
751
- else:
752
- # input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
753
- input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
754
-
755
- ai_generate_result = self.llm.aproduct_stream(input_prompt)
756
- ai_result = ""
757
- async for word in ai_generate_result:
758
- ai_result += word
759
- yield word
760
-
761
- chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
762
- await self.save_prompt_increment_version(prompt_id, chat_history,
763
- use_case = input_,
764
- score = 60,
765
- session = session)
766
-
767
-
768
- elif result_obj.action_type == "summary":
769
-
770
- await self.summary_to_sql(prompt_id = prompt_id,
771
- prompt = prompt,
772
- session = session
773
- )
774
- input_prompt = prompt + output_format + "\n-----input----\n" + input_
775
- ai_generate_result = self.llm.aproduct_stream(input_prompt)
776
- ai_result = ""
777
- async for word in ai_generate_result:
778
- ai_result += word
779
- yield word
780
-
781
- elif result_obj.action_type == "finetune":
782
- demand = result_obj.demand
783
-
784
- assert demand
785
- await self.prompt_finetune_to_sql(prompt_id = prompt_id,
786
- demand = demand,
787
- session = session
788
- )
789
- input_prompt = prompt + output_format + "\n-----input----\n" + input_
790
- ai_generate_result = self.llm.aproduct_stream(input_prompt)
791
- ai_result = ""
792
- async for word in ai_generate_result:
793
- ai_result += word
794
- yield word
795
-
796
- elif result_obj.action_type == "patch":
797
-
798
- demand = result_obj.demand
799
- assert demand
800
-
801
- chat_history = prompt + demand
802
- ai_generate_result = self.llm.aproduct_stream(chat_history + output_format + "\n-----input----\n" + input_)
803
- ai_result = ""
804
- async for word in ai_generate_result:
805
- ai_result += word
806
- yield word
807
- if push_patch:
808
- self.save_prompt_increment_version(prompt_id, chat_history,
809
- use_case = input_,
810
- score = 60,
811
- session = session)
812
- else:
813
- raise
814
-
815
686
  async def intellect_remove_format(self,
816
687
  input_data: dict | str,
817
688
  OutputFormat: object,
@@ -861,7 +732,16 @@ class AsyncIntel():
861
732
 
862
733
  except Exception as e:
863
734
  raise Exception(f"Error {prompt_id} : {e}") from e
864
-
735
+
736
+ # finally:
737
+ # await self.save_use_case_by_sql(prompt_id,
738
+ # use_case = input_data,
739
+ # timestamp = datetime.now(),
740
+ # output = ai_result,
741
+ # solution = output_format,
742
+ # faired_time = 1,
743
+ # session = session,
744
+ # )
865
745
  return ai_result
866
746
 
867
747
  async def intellect_remove_formats(self,
@@ -928,7 +808,8 @@ class AsyncIntel():
928
808
  prompt_id: str,
929
809
  ExtraFormats: list[object] = [],
930
810
  version: str = None,
931
- MIN_SUCCESS_RATE = 80.0
811
+ MIN_SUCCESS_RATE = 80.0,
812
+ ConTent_Function = None,
932
813
  ):
933
814
 
934
815
  async with create_async_session(self.engine) as session:
@@ -958,6 +839,8 @@ class AsyncIntel():
958
839
  # TODO base_eval
959
840
  # TODO 人类评价 eval
960
841
  # TODO llm 评价 eval
842
+ if ConTent_Function:
843
+ ConTent_Function()
961
844
  result_cases.append({"type":"Successful","case":use_case.use_case,"reply":f"pass"})
962
845
  use_case.output = "Successful"
963
846
  except IntellectRemoveFormatError as e:
@@ -969,7 +852,7 @@ class AsyncIntel():
969
852
  await session.commit()
970
853
 
971
854
  tasks = []
972
- for use_case in use_cases:
855
+ for use_case in tqdm.tqdm(use_cases):
973
856
  tasks.append(
974
857
  evals_func(
975
858
  use_case = use_case,
@@ -979,7 +862,8 @@ class AsyncIntel():
979
862
  version = version
980
863
  )
981
864
  )
982
- await asyncio.gather(*tasks, return_exceptions=False)
865
+ await tqdm.gather(*tasks,total=len(tasks))
866
+ # await asyncio.gather(*tasks, return_exceptions=False)
983
867
 
984
868
 
985
869
  successful_assertions = 0
@@ -993,8 +877,82 @@ class AsyncIntel():
993
877
  success_rate = (successful_assertions / total_assertions) * 100
994
878
 
995
879
  if success_rate >= MIN_SUCCESS_RATE:
996
- return "通过", success_rate, total_assertions, json.dumps(bad_case,ensure_ascii=False),
880
+ return "通过", success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
997
881
  else:
998
- return "未通过",success_rate, total_assertions, json.dumps(bad_case,ensure_ascii=False),
882
+ return "未通过",success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
883
+
884
+
885
+
886
+ def draw_data(self):
887
+ df = self.df
888
+ # --- 可视化部分 ---
889
+ fig = go.Figure()
890
+
891
+ # 为每个条形图动态设置颜色
892
+ colors = []
893
+ for status_val in df['status']:
894
+ if status_val == '通过':
895
+ colors.append('mediumseagreen') # 通过为绿色
896
+ else: # 假设其他所有状态都视为“未通过”
897
+ colors.append('lightcoral') # 未通过为红色
898
+
899
+ fig.add_trace(go.Bar(
900
+ y=df['name'], # Y轴显示项目名称
901
+ x=df['score'], # X轴显示通过百分比 (score列现在代表通过百分比)
902
+ orientation='h', # 设置为横向
903
+ name='通过率', # 这个 name 可能会在图例中显示
904
+ marker_color=colors, # !!! 这里根据 status 动态设置颜色 !!!
905
+ text=df['score'].apply(lambda x: f'{x:.2f}%'), # 在条形图上显示百分比文本
906
+ textposition='inside',
907
+ insidetextanchor='middle',
908
+ hovertemplate="<b>prompt:</b> %{y}<br><b>状态:</b> " + df['status'] + "<br><b>总量:</b> "+ df['total'] + "<br><b>通过百分比:</b> %{x:.2f}%<extra></extra>"
909
+ ))
910
+
911
+ # 添加一个辅助的条形图作为背景,表示总的100%
912
+ fig.add_trace(go.Bar(
913
+ y=df['name'],
914
+ x=[100] * len(df), # 所有项目都填充到100%
915
+ orientation='h',
916
+ name='总计',
917
+ marker_color='lightgray', # 背景用灰色
918
+ hoverinfo='none', # 不显示hover信息
919
+ opacity=0.5, # 设置透明度
920
+ showlegend=False # 不显示图例
921
+ ))
922
+
923
+ fig.update_layout(
924
+ title='各项目/批次通过百分比及状态',
925
+ xaxis=dict(
926
+ title='通过百分比 (%)',
927
+ range=[0, 100], # X轴范围0-100
928
+ tickvals=[0, 25, 50, 75, 100],
929
+ showgrid=True,
930
+ gridcolor='lightgray'
931
+ ),
932
+ yaxis=dict(
933
+ title='项目/批次',
934
+ autorange="reversed"
935
+ ),
936
+ barmode='overlay', # 仍使用 overlay 模式,因为背景条是独立的
937
+ hovermode="y unified",
938
+ margin=dict(l=100, r=20, t=60, b=50),
939
+ height=400 + len(df) * 30
940
+ )
941
+
942
+ fig.show()
943
+ pass
944
+
945
+ async def _evals(self,prompt_id, OutputFormat, ExtraFormats_list = [],**kwargs):
946
+
947
+ status,score, total, bad_case = await self.intellect_remove_format_eval(
948
+ prompt_id=prompt_id,
949
+ OutputFormat = OutputFormat,
950
+ ExtraFormats = ExtraFormats_list,
951
+ version = None,
952
+ **kwargs
953
+ )
954
+ self.df.loc[len(self.df)] = {"name":prompt_id,
955
+ 'status':status,"score":score,
956
+ "total":total,"bad_case":bad_case}
999
957
 
1000
958
  # 整体测试d, 测试未通过d, 大模型调整再测试, 依旧不通过, 大模型裂变, 仍不通过, 互换人力
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.34
3
+ Version: 0.1.35
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -13,7 +13,6 @@ src/pro_craft.egg-info/top_level.txt
13
13
  src/pro_craft/code_helper/coder.py
14
14
  src/pro_craft/code_helper/designer.py
15
15
  src/pro_craft/prompt_craft/__init__.py
16
- src/pro_craft/prompt_craft/async_ copy.py
17
16
  src/pro_craft/prompt_craft/async_.py
18
17
  src/pro_craft/prompt_craft/new.py
19
18
  src/pro_craft/prompt_craft/sync.py