pro-craft 0.1.17__py3-none-any.whl → 0.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pro-craft might be problematic. Click here for more details.

@@ -349,12 +349,7 @@ class AsyncIntel():
349
349
  score = 60,
350
350
  session = session
351
351
  )
352
- ai_result = await self.intellect_remove(input_data = input_data,
353
- output_format = output_format,
354
- prompt_id = prompt_id,
355
- version = version,
356
- inference_save_case = inference_save_case
357
- )
352
+ ai_result = "初始化完成"
358
353
  return ai_result
359
354
 
360
355
  prompt = result_obj.prompt
@@ -379,22 +374,39 @@ class AsyncIntel():
379
374
  demand = result_obj.demand
380
375
 
381
376
 
382
- assert demand
383
- # 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
384
- chat_history = prompt
385
- if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
386
- # input_prompt = chat_history + "\nuser:" + demand
387
- input_prompt = chat_history + "\nuser:" + demand + output_format
388
- else:
389
- # input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
390
- input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
377
+ # assert demand
378
+ # # 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
379
+ # chat_history = prompt
380
+ # if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
381
+ # # input_prompt = chat_history + "\nuser:" + demand
382
+ # input_prompt = chat_history + "\nuser:" + demand + output_format
383
+ # else:
384
+ # # input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
385
+ # input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
391
386
 
392
- ai_result = await self.llm.aproduct(input_prompt)
393
- chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
394
- await self.save_prompt_increment_version(prompt_id, chat_history,
395
- use_case = input_,
396
- score = 60,
397
- session = session)
387
+ # ai_result = await self.llm.aproduct(input_prompt)
388
+ # chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
389
+ # await self.save_prompt_increment_version(prompt_id, chat_history,
390
+ # use_case = input_,
391
+ # score = 60,
392
+ # session = session)
393
+
394
+ if input_ == before_input:
395
+ new_prompt = prompt + "\nuser:" + demand
396
+ else:
397
+ new_prompt = prompt + "\nuser:" + input_
398
+
399
+ ai_result = await self.llm.aproduct(new_prompt + output_format)
400
+
401
+ save_new_prompt = new_prompt + "\nassistant:\n" + ai_result
402
+
403
+
404
+ await self.save_prompt_increment_version(
405
+ prompt_id,
406
+ new_prompt=save_new_prompt,
407
+ use_case = input_,
408
+ score = 60,
409
+ session = session)
398
410
 
399
411
  elif result_obj.action_type == "summary":
400
412
 
@@ -514,6 +526,7 @@ class AsyncIntel():
514
526
  use_case = input_,
515
527
  score = 60,
516
528
  session = session)
529
+
517
530
 
518
531
  elif result_obj.action_type == "summary":
519
532
 
@@ -419,24 +419,35 @@ class Intel():
419
419
  # 则训练推广
420
420
 
421
421
  # 新版本 默人修改会 inference 状态
422
- chat_history = prompt
422
+ prompt = result_obj.prompt
423
423
  before_input = result_obj.use_case
424
424
  demand = result_obj.demand
425
+
426
+ # assert demand
427
+ # # 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
428
+
429
+ # if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
430
+ # # input_prompt = chat_history + "\nuser:" + demand
431
+ # input_prompt = chat_history + "\nuser:" + demand + output_format
432
+ # else:
433
+ # # input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
434
+ # input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
425
435
 
436
+ # ai_result = self.llm.product(input_prompt)
437
+ # chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
426
438
 
427
- assert demand
428
- # 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
429
- chat_history = prompt
430
- if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
431
- # input_prompt = chat_history + "\nuser:" + demand
432
- input_prompt = chat_history + "\nuser:" + demand + output_format
439
+ if input_ == before_input:
440
+ new_prompt = prompt + "\nuser:" + demand
433
441
  else:
434
- # input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
435
- input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
436
-
437
- ai_result = self.llm.product(input_prompt)
438
- chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
439
- self.save_prompt_increment_version(prompt_id, chat_history,
442
+ new_prompt = prompt + "\nuser:" + input_
443
+
444
+ ai_result = self.llm.product(new_prompt + output_format)
445
+
446
+ save_new_prompt = new_prompt + "\nassistant:\n" + ai_result
447
+
448
+
449
+ self.save_prompt_increment_version(prompt_id,
450
+ new_prompt=save_new_prompt,
440
451
  use_case = input_,
441
452
  score = 60,
442
453
  session = session)
@@ -66,7 +66,7 @@ def create_router(database_url: str,
66
66
  intels.save_prompt_increment_version(
67
67
  prompt_id = prompt_id,
68
68
  new_prompt = result.prompt,
69
- use_case = "roll_back",
69
+ use_case = result.use_case,
70
70
  action_type = "inference",
71
71
  demand = "",
72
72
  score = 61,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.17
3
+ Version: 0.1.19
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -6,15 +6,15 @@ pro_craft/utils.py,sha256=R1DFkS4dsm5dIhg8lLTgBBvItvIYyyojROdh-ykqiYk,5250
6
6
  pro_craft/code_helper/coder.py,sha256=L6pRQr0pYRIHrMFZ4-pO_tZf1koxgGgF3L7Vl-GIyjM,24687
7
7
  pro_craft/code_helper/designer.py,sha256=3gyCqrjcw61sHzDjUPKhL1LOAE8xWLLbNT8NlK2mFLc,4739
8
8
  pro_craft/prompt_craft/__init__.py,sha256=83ruWO1Oci-DWvdVhPqcQrgdZTNfbmK72VQCkWASk7A,80
9
- pro_craft/prompt_craft/async_.py,sha256=iX1Ok2SrfU2jIR9ScgDoXdLN43lsVSdLpGArP_Hg3wc,27519
9
+ pro_craft/prompt_craft/async_.py,sha256=mMyXg3WuzjlB09GXHW2x-37hPzPirspymmHcfjH8Uu4,27799
10
10
  pro_craft/prompt_craft/evals.py,sha256=XzaaQgA-Vgo8MNPYTdN0hyFJNmgTvw2jroWmeA02pBs,1847
11
11
  pro_craft/prompt_craft/new.py,sha256=cHugfhLNUMQk_l8JQBg5ZQXPYXqspexnCOgp3YUzoD0,25894
12
- pro_craft/prompt_craft/sync.py,sha256=uuVJ3eT3Ukavh0kX_v6GXXeZEd7JhlXiEUpqtLHCfXE,25454
12
+ pro_craft/prompt_craft/sync.py,sha256=-rY1KxAP5FhxI6Vmm2hHoLvYMmtL7rbLcFQMqD9g8ZU,25841
13
13
  pro_craft/server/mcp/__init__.py,sha256=4dbl-lFcm0r2tkOP04OxqiZG2jR-rqF181qi2AfU6UA,123
14
14
  pro_craft/server/mcp/prompt.py,sha256=OZrsyUfSQMOY_KX7dWthW209adz5JfELsQ0ODfuQR44,1245
15
15
  pro_craft/server/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- pro_craft/server/router/prompt.py,sha256=qbwZPL-F_zFwBo9ua46aYyfgShlxfxW5VcoHn5kgYis,2765
17
- pro_craft-0.1.17.dist-info/METADATA,sha256=9X9WeFYQzgbYCwVd9GSuilC1w46MTBZl_34HUR_QLXs,1800
18
- pro_craft-0.1.17.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- pro_craft-0.1.17.dist-info/top_level.txt,sha256=yqYDHArnYMWpeCxkmGRwlL6sJtxiOUnYylLDx9EOgFg,10
20
- pro_craft-0.1.17.dist-info/RECORD,,
16
+ pro_craft/server/router/prompt.py,sha256=81Pid2-e7cxY5qyUjUbvNmtjeZFW9okTBddcqm90vgY,2769
17
+ pro_craft-0.1.19.dist-info/METADATA,sha256=pMku1LZB3Bf6RlXYD8Hh2ehWLTQSNjupZNoUYJwOtjs,1800
18
+ pro_craft-0.1.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ pro_craft-0.1.19.dist-info/top_level.txt,sha256=yqYDHArnYMWpeCxkmGRwlL6sJtxiOUnYylLDx9EOgFg,10
20
+ pro_craft-0.1.19.dist-info/RECORD,,