dtflow 0.5.6__py3-none-any.whl → 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dtflow/parallel.py ADDED
@@ -0,0 +1,115 @@
1
+ """
2
+ 并行处理模块
3
+
4
+ 提供多进程并行处理工具,用于加速大数据集的 token 统计和 schema 验证。
5
+ """
6
+
7
+ from multiprocessing import Pool, cpu_count
8
+ from typing import Callable, List, Optional, TypeVar
9
+
10
+ T = TypeVar("T")
11
+ R = TypeVar("R")
12
+
13
+
14
+ def parallel_map(
15
+ func: Callable[[T], R],
16
+ data: List[T],
17
+ workers: Optional[int] = None,
18
+ threshold: int = 1000,
19
+ chunksize: Optional[int] = None,
20
+ ) -> List[R]:
21
+ """
22
+ 并行 map 操作。
23
+
24
+ Args:
25
+ func: 处理函数(必须可 pickle,不能是 lambda 或闭包)
26
+ data: 数据列表
27
+ workers: 进程数,None 则使用 CPU 核数
28
+ threshold: 数据量阈值,低于此值使用串行
29
+ chunksize: 每个进程的任务块大小,None 则自动计算
30
+
31
+ Returns:
32
+ 处理结果列表(保持顺序)
33
+ """
34
+ n = len(data)
35
+
36
+ # 数据量小或指定单进程,使用串行
37
+ if n < threshold or workers == 1:
38
+ return [func(item) for item in data]
39
+
40
+ workers = workers or cpu_count()
41
+ workers = min(workers, n) # 进程数不超过数据量
42
+
43
+ # 自动计算 chunksize
44
+ if chunksize is None:
45
+ chunksize = max(1, n // (workers * 4))
46
+
47
+ with Pool(processes=workers) as pool:
48
+ return pool.map(func, data, chunksize=chunksize)
49
+
50
+
51
+ def parallel_imap(
52
+ func: Callable[[T], R],
53
+ data: List[T],
54
+ workers: Optional[int] = None,
55
+ threshold: int = 1000,
56
+ chunksize: Optional[int] = None,
57
+ ):
58
+ """
59
+ 并行 imap 操作(惰性迭代器版本,支持进度回调)。
60
+
61
+ Args:
62
+ func: 处理函数(必须可 pickle)
63
+ data: 数据列表
64
+ workers: 进程数,None 则使用 CPU 核数
65
+ threshold: 数据量阈值,低于此值使用串行
66
+ chunksize: 每个进程的任务块大小
67
+
68
+ Yields:
69
+ 处理结果(按顺序)
70
+ """
71
+ n = len(data)
72
+
73
+ # 数据量小或指定单进程,使用串行
74
+ if n < threshold or workers == 1:
75
+ for item in data:
76
+ yield func(item)
77
+ return
78
+
79
+ workers = workers or cpu_count()
80
+ workers = min(workers, n)
81
+
82
+ if chunksize is None:
83
+ chunksize = max(1, n // (workers * 4))
84
+
85
+ with Pool(processes=workers) as pool:
86
+ for result in pool.imap(func, data, chunksize=chunksize):
87
+ yield result
88
+
89
+
90
+ def get_optimal_workers(data_size: int, default: Optional[int] = None) -> int:
91
+ """
92
+ 根据数据量计算最优进程数。
93
+
94
+ Args:
95
+ data_size: 数据量
96
+ default: 用户指定的进程数,None 则自动计算
97
+
98
+ Returns:
99
+ 最优进程数
100
+ """
101
+ if default is not None:
102
+ return default
103
+
104
+ cpu_cores = cpu_count()
105
+
106
+ # 数据量小于阈值,单进程
107
+ if data_size < 1000:
108
+ return 1
109
+
110
+ # 数据量适中,使用一半 CPU
111
+ if data_size < 10000:
112
+ return max(1, cpu_cores // 2)
113
+
114
+ # 大数据量,使用全部 CPU
115
+ return cpu_cores
dtflow/schema.py CHANGED
@@ -26,10 +26,35 @@ Schema 验证模块
26
26
  results = dt.validate_schema(schema)
27
27
  """
28
28
 
29
- from dataclasses import dataclass, field as dataclass_field
30
- from typing import Any, Callable, Dict, List, Literal, Optional, Set, Union
29
+ from dataclasses import dataclass
30
+ from dataclasses import field as dataclass_field
31
+ from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
32
+
33
+ from .utils.field_path import _parse_path, get_field
34
+
35
+
36
+ def _validate_item_wrapper(args: tuple) -> Tuple[int, bool, list]:
37
+ """
38
+ 验证单条数据(用于多进程)。
39
+
40
+ Args:
41
+ args: (index, item, schema_fields) 元组
42
+
43
+ Returns:
44
+ (index, is_valid, errors_as_dicts) - 返回字典列表而非对象(pickle 兼容)
45
+ """
46
+ idx, item, fields = args
47
+ # 在子进程中重建 Schema
48
+ schema = Schema(fields)
49
+ result = schema.validate(item)
50
+
51
+ if result.valid:
52
+ return (idx, True, [])
53
+ else:
54
+ # 将错误转换为字典(pickle 兼容)
55
+ errors = [{"path": e.path, "message": e.message, "value": e.value} for e in result.errors]
56
+ return (idx, False, errors)
31
57
 
32
- from .utils.field_path import get_field, _parse_path, _get_value_by_segments
33
58
 
34
59
  # 支持的类型
35
60
  FieldType = Literal["str", "int", "float", "bool", "list", "dict", "any"]
@@ -162,9 +187,7 @@ class Field:
162
187
 
163
188
  # 选项检查
164
189
  if self.choices is not None and value not in self.choices:
165
- errors.append(
166
- ValidationError(path, f"值必须是 {self.choices} 之一", value)
167
- )
190
+ errors.append(ValidationError(path, f"值必须是 {self.choices} 之一", value))
168
191
 
169
192
  # 正则表达式检查
170
193
  if self.pattern is not None and isinstance(value, str):
@@ -324,9 +347,7 @@ class Schema:
324
347
 
325
348
  return errors
326
349
 
327
- def validate_batch(
328
- self, data: List[dict], max_errors: int = 100
329
- ) -> List[tuple]:
350
+ def validate_batch(self, data: List[dict], max_errors: int = 100) -> List[tuple]:
330
351
  """
331
352
  批量验证数据
332
353
 
@@ -350,9 +371,76 @@ class Schema:
350
371
 
351
372
  return failed
352
373
 
374
+ def validate_parallel(
375
+ self,
376
+ data: List[dict],
377
+ workers: Optional[int] = None,
378
+ progress_callback: Optional[Callable[[int, int], None]] = None,
379
+ ) -> tuple:
380
+ """
381
+ 并行验证数据列表。
382
+
383
+ Args:
384
+ data: 数据列表
385
+ workers: 进程数,None 自动检测,1 禁用并行
386
+ progress_callback: 进度回调函数
387
+
388
+ Returns:
389
+ (valid_data, invalid_indices_results) 元组
390
+ - valid_data: 有效数据列表
391
+ - invalid_indices_results: [(index, ValidationResult), ...] 无效数据
392
+ """
393
+ if not data:
394
+ return [], []
395
+
396
+ total = len(data)
397
+ use_parallel = workers != 1 and total >= 1000
398
+
399
+ valid_data = []
400
+ invalid_results = []
401
+
402
+ if use_parallel:
403
+ from .parallel import get_optimal_workers, parallel_imap
404
+
405
+ actual_workers = get_optimal_workers(total, workers)
406
+ # 准备参数:(index, item, schema_fields)
407
+ args_list = [(i, item, self._fields) for i, item in enumerate(data)]
408
+
409
+ for i, (idx, is_valid, result_data) in enumerate(
410
+ parallel_imap(
411
+ _validate_item_wrapper,
412
+ args_list,
413
+ workers=actual_workers,
414
+ threshold=1000,
415
+ )
416
+ ):
417
+ if is_valid:
418
+ valid_data.append(data[idx])
419
+ else:
420
+ # 重建 ValidationResult(因为不能直接 pickle)
421
+ errors = [
422
+ ValidationError(path=e["path"], message=e["message"], value=e.get("value"))
423
+ for e in result_data
424
+ ]
425
+ invalid_results.append((idx, ValidationResult(valid=False, errors=errors)))
426
+ if progress_callback:
427
+ progress_callback(i + 1, total)
428
+ else:
429
+ # 串行处理
430
+ for i, item in enumerate(data):
431
+ result = self.validate(item)
432
+ if result.valid:
433
+ valid_data.append(item)
434
+ else:
435
+ invalid_results.append((i, result))
436
+ if progress_callback:
437
+ progress_callback(i + 1, total)
438
+
439
+ return valid_data, invalid_results
440
+
353
441
  def __repr__(self) -> str:
354
442
  field_strs = [f" {path}: {field_def}" for path, field_def in self._fields.items()]
355
- return f"Schema({{\n" + ",\n".join(field_strs) + "\n}})"
443
+ return "Schema({\n" + ",\n".join(field_strs) + "\n}})"
356
444
 
357
445
 
358
446
  # ============================================================================
@@ -461,9 +549,7 @@ def sharegpt_schema(
461
549
  """
462
550
  return Schema(
463
551
  {
464
- "conversations": Field(
465
- type="list", required=True, min_length=min_conversations
466
- ),
552
+ "conversations": Field(type="list", required=True, min_length=min_conversations),
467
553
  "conversations[*].from": Field(
468
554
  type="str", required=True, choices=[human_role, gpt_role]
469
555
  ),
dtflow/tokenizers.py CHANGED
@@ -122,8 +122,8 @@ def _get_tiktoken_encoder(model: str):
122
122
  _tokenizer_cache[model] = tiktoken.get_encoding(model)
123
123
  else:
124
124
  _tokenizer_cache[model] = tiktoken.encoding_for_model(model)
125
- except ImportError:
126
- raise ImportError("需要安装 tiktoken: pip install tiktoken")
125
+ except ImportError as e:
126
+ raise ImportError("需要安装 tiktoken: pip install tiktoken") from e
127
127
  return _tokenizer_cache[model]
128
128
 
129
129
 
@@ -149,12 +149,12 @@ def _get_hf_tokenizer(model: str):
149
149
 
150
150
  tokenizer = AutoTokenizer.from_pretrained(resolved, trust_remote_code=True)
151
151
  _tokenizer_cache[resolved] = ("transformers", tokenizer)
152
- except ImportError:
152
+ except ImportError as e:
153
153
  raise ImportError(
154
154
  "需要安装 tokenizers 或 transformers:\n"
155
155
  " pip install tokenizers huggingface_hub (推荐,更轻量)\n"
156
156
  " pip install transformers"
157
- )
157
+ ) from e
158
158
  return _tokenizer_cache[resolved]
159
159
 
160
160
 
@@ -309,12 +309,29 @@ def _std(counts: List[int], avg: float) -> float:
309
309
  return variance**0.5
310
310
 
311
311
 
312
+ def _count_item_tokens(args: tuple) -> int:
313
+ """
314
+ 计算单条数据的 token 数(用于多进程)。
315
+
316
+ Args:
317
+ args: (item, fields, model, backend) 元组
318
+ """
319
+ item, fields, model, backend = args
320
+ total = 0
321
+ for field in fields:
322
+ value = get_field_with_spec(item, field, default="")
323
+ if value:
324
+ total += count_tokens(str(value), model=model, backend=backend)
325
+ return total
326
+
327
+
312
328
  def token_stats(
313
329
  data: List[Dict[str, Any]],
314
330
  fields: Union[str, List[str]],
315
331
  model: str = DEFAULT_MODEL,
316
332
  backend: Optional[str] = None,
317
333
  progress_callback: Optional[Callable[[int, int], None]] = None,
334
+ workers: Optional[int] = None,
318
335
  ) -> Dict[str, Any]:
319
336
  """
320
337
  统计数据集的 token 信息。
@@ -325,6 +342,7 @@ def token_stats(
325
342
  model: 模型名称或别名,如 "qwen2.5", "gpt-4" 等
326
343
  backend: 后端选择,None 则自动检测
327
344
  progress_callback: 进度回调函数,接收 (current, total) 两个参数
345
+ workers: 进程数,None 自动检测,1 表示禁用并行
328
346
 
329
347
  Returns:
330
348
  统计信息字典,包含:
@@ -342,17 +360,42 @@ def token_stats(
342
360
  if not data:
343
361
  return {"total_tokens": 0, "count": 0}
344
362
 
345
- counts = []
346
363
  total_items = len(data)
347
- for i, item in enumerate(data):
348
- total = 0
349
- for field in fields:
350
- value = get_field_with_spec(item, field, default="")
351
- if value:
352
- total += count_tokens(str(value), model=model, backend=backend)
353
- counts.append(total)
354
- if progress_callback:
355
- progress_callback(i + 1, total_items)
364
+ _backend = backend or _auto_backend(model)
365
+
366
+ # 判断是否使用多进程
367
+ use_parallel = workers != 1 and total_items >= 1000
368
+
369
+ if use_parallel:
370
+ from .parallel import get_optimal_workers, parallel_imap
371
+
372
+ actual_workers = get_optimal_workers(total_items, workers)
373
+ # 准备参数
374
+ args_list = [(item, fields, model, _backend) for item in data]
375
+ counts = []
376
+ for i, result in enumerate(
377
+ parallel_imap(
378
+ _count_item_tokens,
379
+ args_list,
380
+ workers=actual_workers,
381
+ threshold=1000,
382
+ )
383
+ ):
384
+ counts.append(result)
385
+ if progress_callback:
386
+ progress_callback(i + 1, total_items)
387
+ else:
388
+ # 串行处理
389
+ counts = []
390
+ for i, item in enumerate(data):
391
+ total = 0
392
+ for field in fields:
393
+ value = get_field_with_spec(item, field, default="")
394
+ if value:
395
+ total += count_tokens(str(value), model=model, backend=_backend)
396
+ counts.append(total)
397
+ if progress_callback:
398
+ progress_callback(i + 1, total_items)
356
399
 
357
400
  sorted_counts = sorted(counts)
358
401
  avg = sum(counts) / len(counts)
@@ -548,12 +591,27 @@ def messages_token_filter(
548
591
  return filter_func
549
592
 
550
593
 
594
+ def _count_messages_tokens_wrapper(args: tuple) -> Optional[Dict[str, int]]:
595
+ """
596
+ 计算单条 messages 的 token 数(用于多进程)。
597
+
598
+ Args:
599
+ args: (item, messages_field, model, backend) 元组
600
+ """
601
+ item, messages_field, model, backend = args
602
+ messages = get_field_with_spec(item, messages_field, default=[])
603
+ if messages:
604
+ return _count_messages_tokens(messages, model=model, backend=backend)
605
+ return None
606
+
607
+
551
608
  def messages_token_stats(
552
609
  data: List[Dict[str, Any]],
553
610
  messages_field: str = "messages",
554
611
  model: str = DEFAULT_MODEL,
555
612
  backend: Optional[str] = None,
556
613
  progress_callback: Optional[Callable[[int, int], None]] = None,
614
+ workers: Optional[int] = None,
557
615
  ) -> Dict[str, Any]:
558
616
  """
559
617
  统计数据集中 messages 的 token 信息。
@@ -564,6 +622,7 @@ def messages_token_stats(
564
622
  model: 模型名称或别名
565
623
  backend: 后端,None 则自动检测
566
624
  progress_callback: 进度回调函数,接收 (current, total) 两个参数
625
+ workers: 进程数,None 自动检测,1 表示禁用并行
567
626
 
568
627
  Returns:
569
628
  统计信息字典,包含:
@@ -581,14 +640,38 @@ def messages_token_stats(
581
640
  if not data:
582
641
  return {"count": 0, "total_tokens": 0}
583
642
 
584
- all_stats = []
585
643
  total_items = len(data)
586
- for i, item in enumerate(data):
587
- messages = get_field_with_spec(item, messages_field, default=[])
588
- if messages:
589
- all_stats.append(_count_messages_tokens(messages, model=model, backend=_backend))
590
- if progress_callback:
591
- progress_callback(i + 1, total_items)
644
+
645
+ # 判断是否使用多进程
646
+ use_parallel = workers != 1 and total_items >= 1000
647
+
648
+ all_stats = []
649
+ if use_parallel:
650
+ from .parallel import get_optimal_workers, parallel_imap
651
+
652
+ actual_workers = get_optimal_workers(total_items, workers)
653
+ args_list = [(item, messages_field, model, _backend) for item in data]
654
+
655
+ for i, result in enumerate(
656
+ parallel_imap(
657
+ _count_messages_tokens_wrapper,
658
+ args_list,
659
+ workers=actual_workers,
660
+ threshold=1000,
661
+ )
662
+ ):
663
+ if result is not None:
664
+ all_stats.append(result)
665
+ if progress_callback:
666
+ progress_callback(i + 1, total_items)
667
+ else:
668
+ # 串行处理
669
+ for i, item in enumerate(data):
670
+ messages = get_field_with_spec(item, messages_field, default=[])
671
+ if messages:
672
+ all_stats.append(_count_messages_tokens(messages, model=model, backend=_backend))
673
+ if progress_callback:
674
+ progress_callback(i + 1, total_items)
592
675
 
593
676
  if not all_stats:
594
677
  return {"count": 0, "total_tokens": 0}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dtflow
3
- Version: 0.5.6
3
+ Version: 0.5.8
4
4
  Summary: A flexible data transformation tool for ML training formats (SFT, RLHF, Pretrain)
5
5
  Project-URL: Homepage, https://github.com/yourusername/DataTransformer
6
6
  Project-URL: Documentation, https://github.com/yourusername/DataTransformer#readme
@@ -69,8 +69,6 @@ Requires-Dist: tokenizers>=0.15.0; extra == 'full'
69
69
  Requires-Dist: toolong>=1.5.0; extra == 'full'
70
70
  Provides-Extra: logs
71
71
  Requires-Dist: toolong>=1.5.0; extra == 'logs'
72
- Provides-Extra: mcp
73
- Requires-Dist: mcp>=1.0.0; extra == 'mcp'
74
72
  Provides-Extra: similarity
75
73
  Requires-Dist: datasketch>=1.5.0; extra == 'similarity'
76
74
  Requires-Dist: scikit-learn>=0.24.0; extra == 'similarity'
@@ -99,6 +97,17 @@ pip install transformers # Token 统计(HuggingFace 模型)
99
97
  pip install datasets # HuggingFace Dataset 转换
100
98
  ```
101
99
 
100
+ ## 🤖 Claude Code 集成
101
+
102
+ dtflow 内置了 [Claude Code](https://docs.anthropic.com/en/docs/claude-code) skill:
103
+
104
+ ```bash
105
+ dt install-skill # 安装 skill
106
+ dt skill-status # 查看状态
107
+ ```
108
+
109
+ 安装后在 Claude Code 中输入 `/dtflow`,Claude 将掌握 dtflow 的完整用法,可直接协助你完成数据处理任务。
110
+
102
111
  ## 快速开始
103
112
 
104
113
  ```python
@@ -443,6 +452,7 @@ dt run pipeline.yaml --input=new_data.jsonl --output=result.jsonl
443
452
  dt token-stats data.jsonl --field=messages --model=gpt-4
444
453
  dt token-stats data.jsonl --field=messages[-1].content # 统计最后一条消息
445
454
  dt token-stats data.jsonl --field=text --detailed
455
+ dt token-stats data.jsonl --workers=4 # 多进程加速(数据量大时自动启用)
446
456
 
447
457
  # 数据对比
448
458
  dt diff v1/train.jsonl v2/train.jsonl
@@ -471,13 +481,22 @@ dt dedupe data.jsonl --key=text --similar=0.8 # 相似度去重
471
481
  dt concat a.jsonl b.jsonl -o merged.jsonl
472
482
 
473
483
  # 数据统计
474
- dt stats data.jsonl
484
+ dt stats data.jsonl # 快速模式
485
+ dt stats data.jsonl --full # 完整模式(含值分布)
486
+ dt stats data.jsonl --full --field=category # 指定字段统计
487
+ dt stats data.jsonl --full --expand=tags # 展开 list 字段统计元素分布
488
+ dt stats data.jsonl --full --expand='messages[*].role' # 展开嵌套 list 字段
489
+
490
+ # Claude Code Skill 安装
491
+ dt install-skill # 安装到 ~/.claude/skills/
492
+ dt skill-status # 查看安装状态
475
493
 
476
494
  # 数据验证
477
495
  dt validate data.jsonl --preset=openai_chat # 使用预设 schema 验证
478
496
  dt validate data.jsonl --preset=alpaca --verbose # 详细输出
479
497
  dt validate data.jsonl --preset=sharegpt --filter-invalid -o valid.jsonl # 过滤出有效数据
480
498
  dt validate data.jsonl --preset=dpo --max-errors=100 # 限制错误输出数量
499
+ dt validate data.jsonl --preset=openai_chat --workers=4 # 多进程加速
481
500
  ```
482
501
 
483
502
  ### 字段路径语法
@@ -506,6 +525,18 @@ CLI 命令中的字段参数支持嵌套路径语法,可访问深层嵌套的
506
525
  | `token-stats` | `--field=` | `--field=messages[-1].content` |
507
526
  | `diff` | `--key=` | `--key=meta.uuid` |
508
527
 
528
+ `--where` 支持的操作符:
529
+
530
+ | 操作符 | 含义 | 示例 |
531
+ |--------|------|------|
532
+ | `=` | 等于 | `--where="category=tech"` |
533
+ | `!=` | 不等于 | `--where="source!=wiki"` |
534
+ | `~=` | 包含 | `--where="content~=机器学习"` |
535
+ | `>` | 大于 | `--where="score>0.8"` |
536
+ | `>=` | 大于等于 | `--where="messages.#>=2"` |
537
+ | `<` | 小于 | `--where="length<1000"` |
538
+ | `<=` | 小于等于 | `--where="turns<=10"` |
539
+
509
540
  示例数据:
510
541
  ```json
511
542
  {"meta": {"source": "wiki"}, "messages": [{"role": "user", "content": "hi"}, {"role": "assistant", "content": "hello"}]}
@@ -603,6 +634,18 @@ dt history processed.jsonl
603
634
  dt history processed.jsonl --json # JSON 格式输出
604
635
  ```
605
636
 
637
+ ### 日志查看
638
+
639
+ dtflow 内置了 [toolong](https://github.com/Textualize/toolong) 日志查看器:
640
+
641
+ ```bash
642
+ pip install dtflow[logs] # 安装日志工具
643
+
644
+ tl app.log # 交互式 TUI 查看
645
+ tl --tail app.log # 实时跟踪(类似 tail -f)
646
+ dt logs # 查看使用说明
647
+ ```
648
+
606
649
  ### 大文件流式处理
607
650
 
608
651
  专为超大文件设计的流式处理接口,内存占用 O(1),支持 JSONL、CSV、Parquet、Arrow 格式:
@@ -1,37 +1,35 @@
1
- dtflow/__init__.py,sha256=_KUxZUD08hQhhLugGbjo_jlP5JuMCFAcCs0o0SCCoVM,3031
2
- dtflow/__main__.py,sha256=OJ60M0PbA0PcsQfA7FP9k9CflJgzexKhIl-yc-CPXkw,12675
1
+ dtflow/SKILL.md,sha256=nh12TTq_eRzl5O2CTgsiS809BBVR49kmpZ8n7UprMHI,9552
2
+ dtflow/__init__.py,sha256=tofhUr_PMnsONnB3Hu-mwUrD4Q3bV7Kw_0S6dQw6ig8,3031
3
+ dtflow/__main__.py,sha256=p8oZKQhwq04shCB3y_pkXjf-SZ4PZvg5PXdyUP-5rYA,13497
3
4
  dtflow/converters.py,sha256=X3qeFD7FCOMnfiP3MicL5MXimOm4XUYBs5pczIkudU0,22331
4
5
  dtflow/core.py,sha256=qMo6B3LK--TWRK7ZBKObGcs3pKFnd0NPoaM0T8JC7Jw,38135
5
6
  dtflow/framework.py,sha256=jyICi_RWHjX7WfsXdSbWmP1SL7y1OWSPyd5G5Y-lvg4,17578
6
7
  dtflow/lineage.py,sha256=jie3OL1qK90-_cOOqqLbhSJ1oGUktDM1x5HRpQ5Qiyc,12800
8
+ dtflow/parallel.py,sha256=EnIdGEGMrZUNT2-CBIV93UFfpqr_jU_heqqvdGXcP-Y,3046
7
9
  dtflow/pipeline.py,sha256=zZaC4fg5vsp_30Fhbg75vu0yggsdvf28bWBiVDWzZ6Y,13901
8
10
  dtflow/presets.py,sha256=qa8WQJhbNMuGxqqgA9BFadEBwDB9s0zWNxxhzF3q1K8,4701
9
- dtflow/schema.py,sha256=IFcij22_UFKcgKT1YWwRg2QJO0vcAvCb1arZmsGByts,16824
11
+ dtflow/schema.py,sha256=zCZNEAqTMT1BS_p2t0CYczR5S9rqyDREa7ZsYI5pFGA,19885
10
12
  dtflow/streaming.py,sha256=dxpNd1-Wz_PTLTdvM5qn06_2TJr5NRlIIuw0LOSS2Iw,24755
11
- dtflow/tokenizers.py,sha256=7ZAelSmcDxLWH5kICgH9Q1ULH3_BfDZb9suHMjJJRZU,20589
13
+ dtflow/tokenizers.py,sha256=GFQsuLSLn2GHn2kaXhJkP8G85lgsdLzYtJNbppQhYPE,23408
12
14
  dtflow/cli/__init__.py,sha256=QhZ-thgx9IBTFII7T_hdoWFUl0CCsdGQHN5ZEZw2XB0,423
13
- dtflow/cli/clean.py,sha256=y9VCRibgK1j8WIY3h0XZX0m93EdELQC7TdnseMWwS-0,17799
14
- dtflow/cli/commands.py,sha256=ST65Ox_MKu-CKAtPVaxECAPXYOJiF7BhL32A4nsZZl0,1175
15
+ dtflow/cli/clean.py,sha256=KuE9ODjD9gSZUIHaD2mQLTDO-1PDwN7EqUpj8EQfVCs,25663
16
+ dtflow/cli/commands.py,sha256=zKUG-B9Az-spqyqM00cR8Sgc2UgeOPQDThJFHWDNO_w,1336
15
17
  dtflow/cli/common.py,sha256=gCwnF5Sw2ploqfZJO_z3Ms9mR1HNT7Lj6ydHn0uVaIw,13817
16
18
  dtflow/cli/io_ops.py,sha256=BMDisP6dxzzmSjYwmeFwaHmpHHPqirmXAWeNTD-9MQM,13254
17
19
  dtflow/cli/lineage.py,sha256=_lNh35nF9AA0Zy6FyZ4g8IzrXH2ZQnp3inF-o2Hs1pw,1383
18
20
  dtflow/cli/pipeline.py,sha256=QNEo-BJlaC1CVnVeRZr7TwfuZYloJ4TebIzJ5ALzry0,1426
19
21
  dtflow/cli/sample.py,sha256=pubpx4AIzsarBEalD150MC2apYQSt4bal70IZkTfFO0,15475
20
- dtflow/cli/stats.py,sha256=u4ehCfgw1X8WuOyAjrApMRgcIO3BVmINbsTjxEscQro,24086
22
+ dtflow/cli/skill.py,sha256=opiTEBejA7JHKrEMftMOPDQlOgZ4n59rwaHXGU1Nukk,2022
23
+ dtflow/cli/stats.py,sha256=HkTZD80h4tzYXTtMnfpjLUMP6kl_es6ifcmExxzGdMU,31813
21
24
  dtflow/cli/transform.py,sha256=w6xqMOxPxQvL2u_BPCfpDHuPSC9gmcqMPVN8s-B6bbY,15052
22
- dtflow/cli/validate.py,sha256=65aGVlMS_Rq0Ch0YQ-TclVJ03RQP4CnG137wthzb8Ao,4384
23
- dtflow/mcp/__init__.py,sha256=huEJ3rXDbxDRjsLPEvjNT2u3tWs6Poiv6fokPIrByjw,897
24
- dtflow/mcp/__main__.py,sha256=PoT2ZZmJq9xDZxDACJfqDW9Ld_ukHrGNK-0XUd7WGnY,448
25
- dtflow/mcp/cli.py,sha256=ck0oOS_642cNktxULaMRE7BJfMxsBCwotmCj3PSPwVk,13110
26
- dtflow/mcp/docs.py,sha256=DI2Vf-eFo4chRP_bDLsv4Uc3kJt8_1emz8N-NBSVirM,8834
27
- dtflow/mcp/server.py,sha256=Nf0UlqDGhV55ndGuEglfr7VRjDWAC_9rRsNhdr0-ssM,4275
25
+ dtflow/cli/validate.py,sha256=Frs-jKcDHmYozpmIYZueDSX5o2i1Xn-WW81FGUyUrng,5796
28
26
  dtflow/storage/__init__.py,sha256=C0jpWNQU808Ezz7lWneddABal3wILy8ijFUNiSKbHV4,362
29
27
  dtflow/storage/io.py,sha256=ZH2aSE-S89gpy3z4oTqhcqWf4u10OdkDoyul7o_YBDI,23374
30
28
  dtflow/utils/__init__.py,sha256=Pn-ltwV04fBQmeZG7FxInDQmzH29LYOi90LgeLMEuQk,506
31
29
  dtflow/utils/display.py,sha256=OeOdTh6mbDwSkDWlmkjfpTjy2QG8ZUaYU0NpHUWkpEQ,5881
32
30
  dtflow/utils/field_path.py,sha256=K8nU196RxTSJ1OoieTWGcYOWl9KjGq2iSxCAkfjECuM,7621
33
31
  dtflow/utils/helpers.py,sha256=JXN176_B2pm53GLVyZ1wj3wrmBJG52Tkw6AMQSdj7M8,791
34
- dtflow-0.5.6.dist-info/METADATA,sha256=TPSDq-fQDini8uKERCdm_4cZYw-b9t6V8UQ1MlTJ7iA,22698
35
- dtflow-0.5.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
- dtflow-0.5.6.dist-info/entry_points.txt,sha256=dadIDOK7Iu9pMxnMPBfpb4aAPe4hQbBOshpQYjVYpGc,44
37
- dtflow-0.5.6.dist-info/RECORD,,
32
+ dtflow-0.5.8.dist-info/METADATA,sha256=Tm_dfdQfGlShyDt95fNQ87JXiBRnf6mfDgx827h3Rnc,24487
33
+ dtflow-0.5.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
34
+ dtflow-0.5.8.dist-info/entry_points.txt,sha256=dadIDOK7Iu9pMxnMPBfpb4aAPe4hQbBOshpQYjVYpGc,44
35
+ dtflow-0.5.8.dist-info/RECORD,,
dtflow/mcp/__init__.py DELETED
@@ -1,29 +0,0 @@
1
- """DataTransformer MCP (Model Context Protocol) 服务
2
-
3
- 提供 DataTransformer 的用法查询功能,供 AI 模型调用。
4
-
5
- 使用方式:
6
- # 安装 MCP 服务到 Claude Code
7
- dt mcp install
8
-
9
- # 运行 MCP 服务(通常由 Claude 自动调用)
10
- dt-mcp
11
-
12
- 注意: MCP 功能需要安装 mcp 依赖: pip install dtflow[mcp]
13
- """
14
-
15
- __all__ = ["main", "mcp"]
16
-
17
-
18
- def __getattr__(name):
19
- """延迟导入 server 模块,避免在未安装 mcp 依赖时报错"""
20
- if name in ("main", "mcp"):
21
- try:
22
- from .server import main, mcp
23
-
24
- return main if name == "main" else mcp
25
- except ImportError as e:
26
- raise ImportError(
27
- f"MCP 功能需要安装 mcp 依赖: pip install dtflow[mcp]\n原始错误: {e}"
28
- ) from e
29
- raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
dtflow/mcp/__main__.py DELETED
@@ -1,18 +0,0 @@
1
- """Datatron MCP 服务入口
2
-
3
- 使用方式:
4
- python -m dtflow.mcp
5
- """
6
-
7
- if __name__ == "__main__":
8
- try:
9
- from .server import main
10
-
11
- main()
12
- except ImportError as e:
13
- import sys
14
-
15
- print(f"错误: MCP 功能需要安装 mcp 依赖", file=sys.stderr)
16
- print(f"请运行: pip install dtflow[mcp]", file=sys.stderr)
17
- print(f"\n原始错误: {e}", file=sys.stderr)
18
- sys.exit(1)