pro-craft 0.1.23__tar.gz → 0.1.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pro-craft might be problematic. Click here for more details.

Files changed (28) hide show
  1. {pro_craft-0.1.23 → pro_craft-0.1.25}/PKG-INFO +1 -1
  2. {pro_craft-0.1.23 → pro_craft-0.1.25}/pyproject.toml +1 -1
  3. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/file_manager.py +2 -2
  4. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/prompt_craft/async_.py +208 -15
  5. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/prompt_craft/sync.py +22 -17
  6. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/server/router/prompt.py +12 -9
  7. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft.egg-info/PKG-INFO +1 -1
  8. {pro_craft-0.1.23 → pro_craft-0.1.25}/README.md +0 -0
  9. {pro_craft-0.1.23 → pro_craft-0.1.25}/setup.cfg +0 -0
  10. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/__init__.py +0 -0
  11. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/code_helper/coder.py +0 -0
  12. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/code_helper/designer.py +0 -0
  13. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/database.py +0 -0
  14. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/log.py +0 -0
  15. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/prompt_craft/__init__.py +0 -0
  16. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/prompt_craft/evals.py +0 -0
  17. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/prompt_craft/new.py +0 -0
  18. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/server/mcp/__init__.py +0 -0
  19. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/server/mcp/prompt.py +0 -0
  20. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/server/router/__init__.py +0 -0
  21. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft/utils.py +0 -0
  22. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft.egg-info/SOURCES.txt +0 -0
  23. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft.egg-info/dependency_links.txt +0 -0
  24. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft.egg-info/requires.txt +0 -0
  25. {pro_craft-0.1.23 → pro_craft-0.1.25}/src/pro_craft.egg-info/top_level.txt +0 -0
  26. {pro_craft-0.1.23 → pro_craft-0.1.25}/tests/test22.py +0 -0
  27. {pro_craft-0.1.23 → pro_craft-0.1.25}/tests/test_coder.py +0 -0
  28. {pro_craft-0.1.23 → pro_craft-0.1.25}/tests/test_designer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.23
3
+ Version: 0.1.25
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "pro-craft"
3
- version = "0.1.23"
3
+ version = "0.1.25"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -7,7 +7,7 @@ from pro_craft import Intel
7
7
  from enum import Enum
8
8
  import json
9
9
 
10
- from pro_craft.database import Base, Prompt
10
+ from pro_craft.database import Prompt
11
11
  from sqlalchemy import create_engine, Column, Integer, String
12
12
  from sqlalchemy.orm import sessionmaker, declarative_base
13
13
  from pro_craft.utils import create_session
@@ -30,7 +30,7 @@ class ContentManager():
30
30
  self.qdrant = QdrantManager(host = "localhost")
31
31
  self.neo = None
32
32
 
33
-
33
+
34
34
  @intel.intellect_2(IntellectType.inference,
35
35
  prompt_id = "db_help_001",
36
36
  demand="""
@@ -16,9 +16,16 @@ from datetime import datetime
16
16
  from pro_craft.utils import extract_
17
17
  import asyncio
18
18
  import re
19
-
19
+ from pydantic import BaseModel, ValidationError, field_validator
20
20
  from sqlalchemy import select, desc
21
21
  from json.decoder import JSONDecodeError
22
+ from pro_craft.database import SyncMetadata
23
+ from datetime import datetime, timedelta
24
+ from datetime import datetime, timedelta
25
+ from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
26
+ from sqlalchemy import select, and_ # 引入 select 和 and_
27
+ from sqlalchemy.orm import class_mapper # 用于检查对象是否是持久化的
28
+
22
29
 
23
30
  class IntellectRemoveFormatError(Exception):
24
31
  pass
@@ -26,6 +33,7 @@ class IntellectRemoveFormatError(Exception):
26
33
  class IntellectRemoveError(Exception):
27
34
  pass
28
35
 
36
+ BATCH_SIZE = 100
29
37
 
30
38
  def slog(s, target: str = "target",logger = None):
31
39
  COLOR_GREEN = "\033[92m"
@@ -62,6 +70,64 @@ def fix_broken_json_string(broken_json_str):
62
70
  return fixed_json_str
63
71
 
64
72
 
73
+ # def get_last_sync_time(target_session) -> datetime:
74
+ # """从目标数据库获取上次同步时间"""
75
+ # metadata_entry = target_session.query(SyncMetadata).filter_by(table_name="sync_metadata").first()
76
+ # if metadata_entry:
77
+ # return metadata_entry.last_sync_time
78
+ # return datetime(1970, 1, 1) # 默认一个很早的时间
79
+
80
+ # def update_last_sync_time(target_session, new_sync_time: datetime):
81
+ # """更新目标数据库的上次同步时间"""
82
+ # metadata_entry = target_session.query(SyncMetadata).filter_by(table_name="sync_metadata").first()
83
+ # if metadata_entry:
84
+ # metadata_entry.last_sync_time = new_sync_time
85
+ # else:
86
+ # # 如果不存在,则创建
87
+ # new_metadata = SyncMetadata(table_name="sync_metadata", last_sync_time=new_sync_time)
88
+ # target_session.add(new_metadata)
89
+ # target_session.commit()
90
+ # print(f"Updated last sync time to: {new_sync_time}")
91
+
92
+
93
+ async def get_last_sync_time(target_session: AsyncSession) -> datetime:
94
+ """从目标数据库获取上次同步时间"""
95
+ # 修正点:使用 select() 和 execute()
96
+ result = await target_session.execute(
97
+ select(SyncMetadata).filter_by(table_name="sync_metadata")
98
+ )
99
+ metadata_entry = result.scalar_one_or_none() # 获取单个对象或 None
100
+
101
+ if metadata_entry:
102
+ return metadata_entry.last_sync_time
103
+ return datetime(1970, 1, 1) # 默认一个很早的时间
104
+
105
+
106
+ # from your_module import SyncMetadata # 假设 SyncMetadata 已导入
107
+ # from sqlalchemy import select # 确保引入 select
108
+
109
+ async def update_last_sync_time(target_session: AsyncSession, new_sync_time: datetime):
110
+ """更新目标数据库的上次同步时间"""
111
+ # 修正点:使用 select() 和 execute()
112
+ result = await target_session.execute(
113
+ select(SyncMetadata).filter_by(table_name="sync_metadata")
114
+ )
115
+ metadata_entry = result.scalar_one_or_none()
116
+
117
+ if metadata_entry:
118
+ metadata_entry.last_sync_time = new_sync_time
119
+ else:
120
+ # 如果不存在,则创建
121
+ new_metadata = SyncMetadata(table_name="sync_metadata", last_sync_time=new_sync_time)
122
+ target_session.add(new_metadata)
123
+
124
+ # 异步提交事务
125
+ await target_session.commit() # TODO
126
+ print(f"Updated last sync time to: {new_sync_time}")
127
+
128
+
129
+
130
+
65
131
  class IntellectType(Enum):
66
132
  train = "train"
67
133
  inference = "inference"
@@ -146,6 +212,101 @@ class AsyncIntel():
146
212
 
147
213
  return specific_prompt
148
214
 
215
+ async def sync_prompt_data_to_database(self,database_url:str):
216
+ target_engine = create_async_engine(database_url, echo=False)
217
+ async with target_engine.begin() as conn:
218
+ await conn.run_sync(PromptBase.metadata.create_all)
219
+
220
+ async with create_async_session(self.engine) as source_session:
221
+ async with create_async_session(target_engine) as target_session:
222
+
223
+ last_sync_time = await get_last_sync_time(target_session)
224
+ print(f"Starting sync for sync_metadata from: {last_sync_time}")
225
+
226
+
227
+ processed_count = 0
228
+ #2 next_sync_watermark = last_sync_time
229
+ current_batch_max_updated_at = last_sync_time
230
+
231
+ while True:
232
+ source_results = await source_session.execute(
233
+ select(Prompt)
234
+ .filter(Prompt.timestamp > last_sync_time)
235
+ .order_by(Prompt.timestamp.asc(), Prompt.id.asc())
236
+ .limit(BATCH_SIZE)
237
+ )
238
+ records_to_sync = source_results.scalars().all()
239
+ if not records_to_sync:
240
+ break # 没有更多记录了
241
+
242
+ #2 max_timestamp_in_batch = datetime(1970, 1, 1) # 初始化为最早时间
243
+
244
+ # 准备要插入或更新到目标数据库的数据
245
+ for record in records_to_sync:
246
+ # 查找目标数据库中是否存在该ID的记录
247
+ # 这里的 `User` 模型会对应到 target_db.users
248
+ target_prompt_result = await target_session.execute(
249
+ select(Prompt).filter_by(id=record.id) # 假设 prompt_id 是唯一标识符
250
+ )
251
+ target_prompt = target_prompt_result.scalar_one_or_none()
252
+
253
+ if target_prompt:
254
+ # 如果存在,则更新
255
+ target_prompt.prompt_id = record.prompt_id
256
+ target_prompt.version = record.version
257
+ target_prompt.timestamp = record.timestamp
258
+ target_prompt.prompt = record.prompt
259
+ target_prompt.use_case = record.use_case
260
+ target_prompt.action_type = record.action_type
261
+ target_prompt.demand = record.demand
262
+ target_prompt.score = record.score
263
+ target_prompt.is_deleted = record.is_deleted
264
+ else:
265
+ # 如果不存在,则添加新记录
266
+ # 注意:这里需要创建一个新的User实例,而不是直接添加源数据库的record对象
267
+ new_prompt = Prompt(
268
+ prompt_id=record.prompt_id,
269
+ version=record.version,
270
+ timestamp=record.timestamp,
271
+ prompt = record.prompt,
272
+ use_case = record.use_case,
273
+ action_type = record.action_type,
274
+ demand = record.demand,
275
+ score = record.score,
276
+ is_deleted = record.is_deleted
277
+ )
278
+ target_session.add(new_prompt)
279
+
280
+ # 记录当前批次最大的 updated_at
281
+ #2
282
+ # if record.timestamp > max_timestamp_in_batch:
283
+ # max_timestamp_in_batch = record.timestamp
284
+ if record.timestamp > current_batch_max_updated_at:
285
+ current_batch_max_updated_at = record.timestamp
286
+
287
+
288
+ await target_session.commit()
289
+ processed_count += len(records_to_sync)
290
+ print(f"Processed {len(records_to_sync)} records. Total processed: {processed_count}")
291
+
292
+ #2 next_sync_watermark = max_timestamp_in_batch + timedelta(microseconds=1)
293
+ last_sync_time = current_batch_max_updated_at + timedelta(microseconds=1)
294
+
295
+
296
+ if len(records_to_sync) < BATCH_SIZE: # 如果查询到的记录数小于批次大小,说明已经处理完所有符合条件的记录
297
+ break
298
+
299
+ if processed_count > 0:
300
+ # 最终更新last_sync_time到数据库,确保记录的是所有已处理记录中最新的一个
301
+ await update_last_sync_time(target_session, current_batch_max_updated_at + timedelta(microseconds=1))
302
+
303
+ #2 await update_last_sync_time(target_session, next_sync_watermark)
304
+
305
+ await target_session.commit() # 确保最终的 metadata 更新也被提交
306
+ else:
307
+ print("No new records to sync.")
308
+
309
+
149
310
  async def get_prompts_from_sql(self,
150
311
  prompt_id: str,
151
312
  version = None,
@@ -244,7 +405,7 @@ class AsyncIntel():
244
405
  chat_history = s_prompt or system_result
245
406
  await self.save_prompt_increment_version(prompt_id,
246
407
  new_prompt = chat_history,
247
- use_case = " summary ",
408
+ use_case = "",
248
409
  score = 60,
249
410
  session = session)
250
411
 
@@ -295,7 +456,7 @@ class AsyncIntel():
295
456
  new_prompt = prompt_
296
457
  await self.save_prompt_increment_version(prompt_id = prompt_id,
297
458
  new_prompt = new_prompt,
298
- use_case = " finetune ",
459
+ use_case = "",
299
460
  score = 60,
300
461
  session = session)
301
462
 
@@ -335,10 +496,6 @@ class AsyncIntel():
335
496
  return "init"
336
497
 
337
498
 
338
-
339
-
340
-
341
-
342
499
 
343
500
  async def intellect_remove(self,
344
501
  input_data: dict | str,
@@ -346,6 +503,7 @@ class AsyncIntel():
346
503
  prompt_id: str,
347
504
  version: str = None,
348
505
  inference_save_case = True,
506
+ change_case = False,
349
507
  ):
350
508
  if isinstance(input_data,dict):
351
509
  input_ = json.dumps(input_data,ensure_ascii=False)
@@ -361,7 +519,7 @@ class AsyncIntel():
361
519
  prompt = result_obj.prompt
362
520
  if result_obj.action_type == "inference":
363
521
  # 直接推理即可
364
- ai_result = await self.llm.aproduct(prompt + output_format + "\n-----input----\n" + input_)
522
+ ai_result = await self.llm.aproduct(prompt + output_format + "\nuser:" + input_)
365
523
  if inference_save_case:
366
524
  await self.save_use_case_by_sql(prompt_id,
367
525
  use_case = input_,
@@ -397,10 +555,35 @@ class AsyncIntel():
397
555
  # score = 60,
398
556
  # session = session)
399
557
 
400
- if input_ == before_input:
401
- new_prompt = prompt + "\nuser:" + demand
402
- else:
403
- new_prompt = prompt + "\nuser:" + input_
558
+
559
+ # version 2
560
+
561
+ # if input_ == before_input:
562
+ # new_prompt = prompt + "\nuser:" + demand
563
+ # else:
564
+ # new_prompt = prompt + "\nuser:" + input_
565
+
566
+ # ai_result = await self.llm.aproduct(new_prompt + output_format)
567
+
568
+ # save_new_prompt = new_prompt + "\nassistant:\n" + ai_result
569
+
570
+
571
+ # await self.save_prompt_increment_version(
572
+ # prompt_id,
573
+ # new_prompt=save_new_prompt,
574
+ # use_case = input_,
575
+ # action_type = "inference",
576
+ # score = 60,
577
+ # session = session)
578
+
579
+ if before_input == "" or change_case is True:
580
+ result_obj.use_case = input_
581
+ await session.commit()
582
+ # 查询上一条, 将before_input 更新位input_
583
+ prompt += input_
584
+
585
+ # 使用更新后的数据进行后续步骤
586
+ new_prompt = prompt + "\nuser:" + demand
404
587
 
405
588
  ai_result = await self.llm.aproduct(new_prompt + output_format)
406
589
 
@@ -411,6 +594,7 @@ class AsyncIntel():
411
594
  prompt_id,
412
595
  new_prompt=save_new_prompt,
413
596
  use_case = input_,
597
+ action_type = "inference",
414
598
  score = 60,
415
599
  session = session)
416
600
 
@@ -420,7 +604,7 @@ class AsyncIntel():
420
604
  prompt = prompt,
421
605
  session = session
422
606
  )
423
- ai_result = await self.llm.aproduct(prompt + output_format + "\n-----input----\n" + input_)
607
+ ai_result = await self.llm.aproduct(prompt + output_format + "\nuser:" + input_)
424
608
 
425
609
  elif result_obj.action_type == "finetune":
426
610
  demand = result_obj.demand
@@ -430,12 +614,12 @@ class AsyncIntel():
430
614
  demand = demand,
431
615
  session = session
432
616
  )
433
- ai_result = await self.llm.aproduct(prompt + output_format + "\n-----input----\n" + input_)
617
+ ai_result = await self.llm.aproduct(prompt + output_format + "\nuser:" + input_)
434
618
  elif result_obj.action_type == "patch":
435
619
  demand = result_obj.demand
436
620
  assert demand
437
621
  chat_history = prompt + demand
438
- ai_result = await self.llm.aproduct(chat_history + output_format + "\n-----input----\n" + input_)
622
+ ai_result = await self.llm.aproduct(chat_history + output_format + "\nuser:" + input_)
439
623
  self.save_prompt_increment_version(prompt_id,
440
624
  chat_history,
441
625
  use_case = input_,
@@ -621,6 +805,13 @@ class AsyncIntel():
621
805
 
622
806
  except JSONDecodeError as e:
623
807
  raise IntellectRemoveFormatError(f"prompt_id: {prompt_id} 在生成后做json解析时报错") from e
808
+
809
+ except ValidationError as e:
810
+ err_info = e.errors()[0]
811
+ raise IntellectRemoveFormatError(f"{err_info["type"]}: 属性:{err_info['loc']}, 发生了如下错误: {err_info['msg']}, 格式校验失败, 当前输入为: {err_info['input']} 请检查") from e
812
+
813
+ except Exception as e:
814
+ raise Exception(f"Error {prompt_id} : {e}") from e
624
815
 
625
816
  return ai_result
626
817
 
@@ -636,6 +827,8 @@ class AsyncIntel():
636
827
  async with create_async_session(self.engine) as session:
637
828
  prompt_result = await self.get_prompts_from_sql(prompt_id=prompt_id,
638
829
  session=session)
830
+ if prompt_result is None:
831
+ raise IntellectRemoveError("不存在的prompt_id")
639
832
  if prompt_result.action_type != "inference":
640
833
  input_datas = input_datas[:1]
641
834
  tasks = []
@@ -22,6 +22,8 @@ BATCH_SIZE = 1000
22
22
  class IntellectRemoveFormatError(Exception):
23
23
  pass
24
24
 
25
+ class IntellectRemoveError(Exception):
26
+ pass
25
27
 
26
28
  def slog(s, target: str = "target",logger = None):
27
29
  COLOR_GREEN = "\033[92m"
@@ -354,17 +356,28 @@ class Intel():
354
356
  # 查看是否已经存在
355
357
  with create_session(self.engine) as session:
356
358
  latest_prompt = self.get_prompts_from_sql(prompt_id=prompt_id,session=session)
357
-
358
- self.save_prompt_increment_version(prompt_id=latest_prompt.prompt_id,
359
- new_prompt = latest_prompt.prompt,
360
- use_case = latest_prompt.use_case,
361
- action_type=action_type,
359
+
360
+ if latest_prompt:
361
+ self.save_prompt_increment_version(prompt_id=latest_prompt.prompt_id,
362
+ new_prompt = latest_prompt.prompt,
363
+ use_case = latest_prompt.use_case,
364
+ action_type=action_type,
365
+ demand=demand,
366
+ score=latest_prompt.score,
367
+ session=session
368
+ )
369
+
370
+ return "success"
371
+ else:
372
+ self.save_prompt_increment_version(prompt_id=prompt_id,
373
+ new_prompt = demand,
374
+ use_case = "init",
375
+ action_type="inference",
362
376
  demand=demand,
363
- score=latest_prompt.score,
377
+ score=60,
364
378
  session=session
365
379
  )
366
-
367
- return "success"
380
+ return "init"
368
381
 
369
382
  def intellect_remove(self,
370
383
  input_data: dict | str,
@@ -393,15 +406,7 @@ class Intel():
393
406
  result_obj = self.get_prompts_from_sql(prompt_id=prompt_id,session=session)
394
407
 
395
408
  if result_obj is None:
396
- self.save_prompt_increment_version(
397
- prompt_id = prompt_id,
398
- new_prompt = "做一些处理",
399
- use_case = input_,
400
- score = 60,
401
- session = session
402
- )
403
- ai_result = "初始化完成"
404
- return ai_result
409
+ raise IntellectRemoveError("不存在的prompt_id")
405
410
  prompt = result_obj.prompt
406
411
  if result_obj.action_type == "inference":
407
412
  # 直接推理即可
@@ -1,13 +1,15 @@
1
1
 
2
2
 
3
3
  from fastapi import APIRouter
4
- from pro_craft import Intel
4
+ from pro_craft import Intel,AsyncIntel
5
5
  from pro_craft.utils import create_session
6
6
 
7
7
  def create_router(database_url: str,
8
8
  slave_database_url: str,
9
- model_name: str):
9
+ model_name: str,
10
+ logger = None):
10
11
  """
12
+ # TODO 整理改为异步
11
13
  创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
12
14
 
13
15
  Args:
@@ -20,9 +22,10 @@ def create_router(database_url: str,
20
22
  APIRouter: 配置好的 FastAPI APIRouter 实例。
21
23
  """
22
24
 
23
- intels = Intel(
25
+ intels = AsyncIntel(
24
26
  database_url=database_url,
25
- model_name=model_name
27
+ model_name=model_name,
28
+ logger=logger
26
29
  )
27
30
 
28
31
  router = APIRouter(
@@ -32,7 +35,7 @@ def create_router(database_url: str,
32
35
  @router.get("/push_order",
33
36
  description="可选 train,inference,summary,finetune,patch",)
34
37
  async def push_order(demand: str, prompt_id: str, action_type: str = "train"):
35
- result = intels.push_action_order(
38
+ result = await intels.push_action_order(
36
39
  demand=demand,
37
40
  prompt_id=prompt_id,
38
41
  action_type=action_type
@@ -42,7 +45,7 @@ def create_router(database_url: str,
42
45
  @router.get("/get_latest_prompt")
43
46
  async def get_latest_prompt(prompt_id: str):
44
47
  with create_session(intels.engine) as session:
45
- result = intels.get_prompts_from_sql(
48
+ result = await intels.get_prompts_from_sql(
46
49
  prompt_id=prompt_id,
47
50
  session=session
48
51
  )
@@ -50,20 +53,20 @@ def create_router(database_url: str,
50
53
 
51
54
  @router.get("/sync_database")
52
55
  async def sync_database():
53
- result = intels.sync_prompt_data_to_database(slave_database_url)
56
+ result = await intels.sync_prompt_data_to_database(slave_database_url)
54
57
  return {"message": "success","result":result}
55
58
 
56
59
 
57
60
  @router.get("/roll_back")
58
61
  async def roll_back(prompt_id:str,version:str):
59
62
  with create_session(intels.engine) as session:
60
- result = intels.get_prompts_from_sql(
63
+ result = await intels.get_prompts_from_sql(
61
64
  prompt_id=prompt_id,
62
65
  version = version,
63
66
  session=session
64
67
  )
65
68
  assert result.version == version
66
- intels.save_prompt_increment_version(
69
+ await intels.save_prompt_increment_version(
67
70
  prompt_id = prompt_id,
68
71
  new_prompt = result.prompt,
69
72
  use_case = result.use_case,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.23
3
+ Version: 0.1.25
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
File without changes
File without changes
File without changes