cifter-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cifter/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from cifter.cli import main
2
+
3
+ __all__ = ["main"]
cifter/__main__.py ADDED
@@ -0,0 +1,4 @@
1
+ from cifter.cli import main
2
+
3
+ if __name__ == "__main__":
4
+ main()
cifter/cli.py ADDED
@@ -0,0 +1,90 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable
4
+ from pathlib import Path
5
+ from typing import Annotated
6
+
7
+ import typer
8
+
9
+ from cifter.errors import CiftError
10
+ from cifter.extract_flow import extract_flow
11
+ from cifter.extract_function import extract_function
12
+ from cifter.extract_path import extract_path
13
+ from cifter.model import TrackPath
14
+ from cifter.parser import parse_source
15
+ from cifter.render import render_result
16
+
17
+ app = typer.Typer(no_args_is_help=True, help="C/C++ の関数実装を抽出する CLI")
18
+
19
+
20
+ def main() -> None:
21
+ app()
22
+
23
+
24
+ SourceOption = Annotated[
25
+ Path,
26
+ typer.Option(
27
+ "--source",
28
+ exists=True,
29
+ file_okay=True,
30
+ dir_okay=False,
31
+ readable=True,
32
+ resolve_path=True,
33
+ help="解析するソースファイル",
34
+ ),
35
+ ]
36
+
37
+
38
+ @app.command("function")
39
+ def function_command(
40
+ name: Annotated[str, typer.Option("--name", help="抽出する関数名")],
41
+ source: SourceOption,
42
+ defines: Annotated[
43
+ list[str] | None,
44
+ typer.Option("--define", "-D", help="条件分岐評価に使うマクロ定義"),
45
+ ] = None,
46
+ ) -> None:
47
+ _run(lambda: render_result(extract_function(parse_source(source, defines or []), name)))
48
+
49
+
50
+ @app.command("flow")
51
+ def flow_command(
52
+ function_name: Annotated[str, typer.Option("--function", help="対象関数名")],
53
+ source: SourceOption,
54
+ track: Annotated[list[str], typer.Option("--track", help="保持するアクセスパス")] | None = None,
55
+ defines: Annotated[
56
+ list[str] | None,
57
+ typer.Option("--define", "-D", help="条件分岐評価に使うマクロ定義"),
58
+ ] = None,
59
+ ) -> None:
60
+ def task() -> str:
61
+ parsed = parse_source(source, defines or [])
62
+ tracks = tuple(TrackPath.parse(value) for value in (track or []))
63
+ return render_result(extract_flow(parsed, function_name, tracks))
64
+
65
+ _run(task)
66
+
67
+
68
+ @app.command("path")
69
+ def path_command(
70
+ function_name: Annotated[str, typer.Option("--function", help="対象関数名")],
71
+ source: SourceOption,
72
+ route: Annotated[str, typer.Option("--route", help="抽出する経路 DSL")],
73
+ defines: Annotated[
74
+ list[str] | None,
75
+ typer.Option("--define", "-D", help="条件分岐評価に使うマクロ定義"),
76
+ ] = None,
77
+ ) -> None:
78
+ def task() -> str:
79
+ parsed = parse_source(source, defines or [])
80
+ return render_result(extract_path(parsed, function_name, route))
81
+
82
+ _run(task)
83
+
84
+
85
+ def _run(task: Callable[[], str]) -> None:
86
+ try:
87
+ typer.echo(task())
88
+ except CiftError as error:
89
+ typer.echo(error.message, err=True)
90
+ raise typer.Exit(code=1) from error
cifter/errors.py ADDED
@@ -0,0 +1,4 @@
1
+ class CiftError(Exception):
2
+ def __init__(self, message: str) -> None:
3
+ super().__init__(message)
4
+ self.message = message
cifter/extract_flow.py ADDED
@@ -0,0 +1,178 @@
1
+ from __future__ import annotations
2
+
3
+ from tree_sitter import Node
4
+
5
+ from cifter.model import ExtractedLine, ExtractionResult, TrackPath
6
+ from cifter.parser import ParsedSource, find_function, function_body, node_text
7
+
8
+ CONTROL_TYPES = {
9
+ "if_statement",
10
+ "switch_statement",
11
+ "case_statement",
12
+ "for_statement",
13
+ "while_statement",
14
+ "do_statement",
15
+ "goto_statement",
16
+ "break_statement",
17
+ "continue_statement",
18
+ "return_statement",
19
+ "labeled_statement",
20
+ }
21
+
22
+ STATEMENT_TYPES = {
23
+ "expression_statement",
24
+ "declaration",
25
+ "goto_statement",
26
+ "break_statement",
27
+ "continue_statement",
28
+ "return_statement",
29
+ }
30
+
31
+
32
+ def extract_flow(parsed: ParsedSource, function_name: str, tracks: tuple[TrackPath, ...]) -> ExtractionResult:
33
+ function_node = find_function(parsed, function_name)
34
+ body = function_body(function_node)
35
+ keep: set[int] = set()
36
+ _keep_range(keep, function_node.start_point.row + 1, body.start_point.row + 1)
37
+ keep.add(body.end_point.row + 1)
38
+ _collect_from_container(parsed, body, keep, tracks)
39
+ line_numbers = sorted(keep)
40
+ lines = tuple(
41
+ ExtractedLine(line_no=line_no, text=parsed.source.line_text(line_no))
42
+ for line_no in line_numbers
43
+ )
44
+ return ExtractionResult(span=parsed.source.span_for_lines(line_numbers), lines=lines)
45
+
46
+
47
+ def _collect_from_container(
48
+ parsed: ParsedSource,
49
+ container: Node,
50
+ keep: set[int],
51
+ tracks: tuple[TrackPath, ...],
52
+ ) -> None:
53
+ for statement in _container_statements(container):
54
+ _collect_statement(parsed, statement, keep, tracks)
55
+
56
+
57
+ def _collect_statement(
58
+ parsed: ParsedSource,
59
+ statement: Node,
60
+ keep: set[int],
61
+ tracks: tuple[TrackPath, ...],
62
+ ) -> None:
63
+ if statement.type == "switch_statement":
64
+ body = statement.named_children[-1]
65
+ _keep_range(keep, statement.start_point.row + 1, body.start_point.row + 1)
66
+ keep.add(body.end_point.row + 1)
67
+ _collect_from_container(parsed, body, keep, tracks)
68
+ return
69
+ if statement.type == "case_statement":
70
+ keep.add(statement.start_point.row + 1)
71
+ _collect_from_container(parsed, statement, keep, tracks)
72
+ return
73
+ if statement.type == "if_statement":
74
+ _collect_if_chain(parsed, statement, keep, tracks)
75
+ return
76
+ if statement.type in {"for_statement", "while_statement"}:
77
+ body = statement.named_children[-1]
78
+ _keep_range(keep, statement.start_point.row + 1, body.start_point.row + 1)
79
+ if body.type == "compound_statement":
80
+ keep.add(body.end_point.row + 1)
81
+ _collect_from_container(parsed, body, keep, tracks)
82
+ else:
83
+ _collect_statement(parsed, body, keep, tracks)
84
+ return
85
+ if statement.type == "do_statement":
86
+ body = statement.named_children[0]
87
+ keep.add(statement.start_point.row + 1)
88
+ keep.add(statement.end_point.row + 1)
89
+ if body.type == "compound_statement":
90
+ keep.add(body.end_point.row + 1)
91
+ _collect_from_container(parsed, body, keep, tracks)
92
+ else:
93
+ _collect_statement(parsed, body, keep, tracks)
94
+ return
95
+ if statement.type == "labeled_statement":
96
+ keep.add(statement.start_point.row + 1)
97
+ nested = statement.named_children[-1]
98
+ _collect_statement(parsed, nested, keep, tracks)
99
+ return
100
+ if statement.type in CONTROL_TYPES:
101
+ _keep_range(keep, statement.start_point.row + 1, statement.end_point.row + 1)
102
+ return
103
+ if statement.type in STATEMENT_TYPES and _matches_track(parsed, statement, tracks):
104
+ _keep_range(keep, statement.start_point.row + 1, statement.end_point.row + 1)
105
+
106
+
107
+ def _collect_if_chain(
108
+ parsed: ParsedSource,
109
+ if_node: Node,
110
+ keep: set[int],
111
+ tracks: tuple[TrackPath, ...],
112
+ ) -> None:
113
+ consequence = if_node.named_children[1]
114
+ _keep_range(keep, if_node.start_point.row + 1, consequence.start_point.row + 1)
115
+ if consequence.type == "compound_statement":
116
+ keep.add(consequence.end_point.row + 1)
117
+ _collect_from_container(parsed, consequence, keep, tracks)
118
+ else:
119
+ _collect_statement(parsed, consequence, keep, tracks)
120
+
121
+ else_clause = _else_clause(if_node)
122
+ if else_clause is None:
123
+ return
124
+ alternative = else_clause.named_children[-1]
125
+ _keep_range(keep, else_clause.start_point.row + 1, alternative.start_point.row + 1)
126
+ if alternative.type == "if_statement":
127
+ _collect_if_chain(parsed, alternative, keep, tracks)
128
+ return
129
+ if alternative.type == "compound_statement":
130
+ keep.add(alternative.end_point.row + 1)
131
+ _collect_from_container(parsed, alternative, keep, tracks)
132
+ return
133
+ _collect_statement(parsed, alternative, keep, tracks)
134
+
135
+
136
+ def _matches_track(parsed: ParsedSource, statement: Node, tracks: tuple[TrackPath, ...]) -> bool:
137
+ if not tracks:
138
+ return False
139
+ candidates = _collect_track_candidates(parsed, statement)
140
+ return any(track.normalized in candidates for track in tracks)
141
+
142
+
143
+ def _collect_track_candidates(parsed: ParsedSource, node: Node) -> set[str]:
144
+ candidates: set[str] = set()
145
+ stack = [node]
146
+ while stack:
147
+ current = stack.pop()
148
+ if current.type == "field_expression":
149
+ candidates.add(node_text(parsed.source, current).replace(" ", ""))
150
+ elif current.type == "identifier" and current.parent is not None and current.parent.type != "field_expression":
151
+ candidates.add(node_text(parsed.source, current))
152
+ stack.extend(reversed(current.named_children))
153
+ return candidates
154
+
155
+
156
+ def _container_statements(container: Node) -> list[Node]:
157
+ if container.type == "compound_statement":
158
+ return list(container.named_children)
159
+ if container.type == "case_statement":
160
+ return [child for child in container.named_children if _is_body_statement(child)]
161
+ if container.type == "labeled_statement":
162
+ return [container.named_children[-1]]
163
+ return [container]
164
+
165
+
166
+ def _is_body_statement(node: Node) -> bool:
167
+ return node.type.endswith("_statement") or node.type == "declaration"
168
+
169
+
170
+ def _else_clause(if_node: Node) -> Node | None:
171
+ for child in if_node.named_children:
172
+ if child.type == "else_clause":
173
+ return child
174
+ return None
175
+
176
+
177
+ def _keep_range(keep: set[int], start_line: int, end_line: int) -> None:
178
+ keep.update(range(start_line, end_line + 1))
@@ -0,0 +1,15 @@
1
+ from __future__ import annotations
2
+
3
+ from cifter.model import ExtractedLine, ExtractionResult
4
+ from cifter.parser import ParsedSource, find_function
5
+
6
+
7
+ def extract_function(parsed: ParsedSource, name: str) -> ExtractionResult:
8
+ function_node = find_function(parsed, name)
9
+ start_line = function_node.start_point.row + 1
10
+ end_line = function_node.end_point.row + 1
11
+ lines = tuple(
12
+ ExtractedLine(line_no=line_no, text=parsed.source.line_text(line_no))
13
+ for line_no in range(start_line, end_line + 1)
14
+ )
15
+ return ExtractionResult(span=parsed.source.span_for_lines(list(range(start_line, end_line + 1))), lines=lines)
cifter/extract_path.py ADDED
@@ -0,0 +1,351 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+
5
+ from tree_sitter import Node
6
+
7
+ from cifter.errors import CiftError
8
+ from cifter.model import (
9
+ ExtractedLine,
10
+ ExtractionResult,
11
+ RouteSegment,
12
+ normalize_condition_text,
13
+ parse_route,
14
+ )
15
+ from cifter.parser import ParsedSource, condition_text, find_function, function_body, node_text
16
+
17
+
18
+ @dataclass(frozen=True)
19
+ class _RouteMatch:
20
+ owner: Node
21
+ owner_index: int
22
+ kind: str
23
+ branch: Node
24
+ header_start_line: int
25
+ header_end_line: int
26
+ trim_end_byte: int | None = None
27
+ selected_start_byte: int | None = None
28
+
29
+
30
+ def extract_path(parsed: ParsedSource, function_name: str, route: str) -> ExtractionResult:
31
+ function_node = find_function(parsed, function_name)
32
+ segments = parse_route(route)
33
+ body = function_body(function_node)
34
+ rendered: dict[int, str] = {}
35
+ _keep_original_range(rendered, parsed, function_node.start_point.row + 1, body.start_point.row + 1)
36
+ rendered[body.end_point.row + 1] = parsed.source.line_text(body.end_point.row + 1)
37
+ _collect_path_from_container(parsed, body, segments, rendered)
38
+ line_numbers = sorted(rendered)
39
+ lines = tuple(ExtractedLine(line_no=line_no, text=rendered[line_no]) for line_no in line_numbers)
40
+ return ExtractionResult(span=parsed.source.span_for_lines(line_numbers), lines=lines)
41
+
42
+
43
+ def _collect_path_from_container(
44
+ parsed: ParsedSource,
45
+ container: Node,
46
+ segments: tuple[RouteSegment, ...],
47
+ rendered: dict[int, str],
48
+ ) -> None:
49
+ statements = _container_statements(container)
50
+ match = _find_unique_match(parsed, container, segments[0])
51
+ if not _is_function_body(container):
52
+ _keep_linear_statements(rendered, parsed, statements[: match.owner_index])
53
+
54
+ if match.kind == "case":
55
+ _render_switch_context(rendered, parsed, match.owner)
56
+ rendered[match.header_start_line] = parsed.source.line_text(match.header_start_line)
57
+ if len(segments) == 1:
58
+ _keep_full_statement(rendered, parsed, match.branch)
59
+ return
60
+ _collect_path_from_container(parsed, match.branch, segments[1:], rendered)
61
+ return
62
+
63
+ _render_if_context(rendered, parsed, match)
64
+ if len(segments) == 1:
65
+ _keep_branch_body(rendered, parsed, match)
66
+ _keep_linear_statements(rendered, parsed, statements[match.owner_index + 1 :])
67
+ return
68
+
69
+ _collect_path_from_container(parsed, match.branch, segments[1:], rendered)
70
+ _keep_branch_closing(rendered, parsed, match)
71
+
72
+
73
+ def _find_unique_match(parsed: ParsedSource, container: Node, segment: RouteSegment) -> _RouteMatch:
74
+ matches = _find_matches(parsed, container, segment)
75
+ if not matches:
76
+ raise CiftError(f"route に一致する枝が見つかりません: {segment.raw}")
77
+ if len(matches) > 1:
78
+ raise CiftError(f"route に一致する枝が複数あります: {segment.raw}")
79
+ return matches[0]
80
+
81
+
82
+ def _find_matches(parsed: ParsedSource, container: Node, segment: RouteSegment) -> list[_RouteMatch]:
83
+ matches: list[_RouteMatch] = []
84
+ statements = _container_statements(container)
85
+ for index, statement in enumerate(statements):
86
+ if segment.kind in {"case", "default"} and statement.type == "switch_statement":
87
+ for case_statement in _switch_cases(statement):
88
+ if segment.kind == "case" and _case_label(parsed, case_statement) == segment.label:
89
+ matches.append(
90
+ _RouteMatch(
91
+ owner=statement,
92
+ owner_index=index,
93
+ kind="case",
94
+ branch=case_statement,
95
+ header_start_line=case_statement.start_point.row + 1,
96
+ header_end_line=case_statement.start_point.row + 1,
97
+ selected_start_byte=case_statement.start_byte,
98
+ )
99
+ )
100
+ if segment.kind == "default" and _is_default_case(case_statement):
101
+ matches.append(
102
+ _RouteMatch(
103
+ owner=statement,
104
+ owner_index=index,
105
+ kind="case",
106
+ branch=case_statement,
107
+ header_start_line=case_statement.start_point.row + 1,
108
+ header_end_line=case_statement.start_point.row + 1,
109
+ selected_start_byte=case_statement.start_byte,
110
+ )
111
+ )
112
+ if statement.type != "if_statement":
113
+ continue
114
+ if segment.kind == "if" and _normalized_if_condition(parsed, statement) == segment.condition:
115
+ consequence = statement.named_children[1]
116
+ matches.append(
117
+ _RouteMatch(
118
+ owner=statement,
119
+ owner_index=index,
120
+ kind="if",
121
+ branch=consequence,
122
+ header_start_line=statement.start_point.row + 1,
123
+ header_end_line=consequence.start_point.row + 1,
124
+ trim_end_byte=_trim_end_byte(statement, consequence),
125
+ selected_start_byte=statement.start_byte,
126
+ )
127
+ )
128
+ if segment.kind == "else":
129
+ else_match = _final_else_match(statement, index)
130
+ if else_match is not None:
131
+ matches.append(else_match)
132
+ if segment.kind == "else_if":
133
+ matches.extend(_else_if_matches(parsed, statement, index, segment.condition or ""))
134
+ return matches
135
+
136
+
137
+ def _else_if_matches(
138
+ parsed: ParsedSource,
139
+ if_node: Node,
140
+ owner_index: int,
141
+ condition: str,
142
+ ) -> list[_RouteMatch]:
143
+ matches: list[_RouteMatch] = []
144
+ current = if_node
145
+ while True:
146
+ else_clause = _else_clause(current)
147
+ if else_clause is None:
148
+ return matches
149
+ alternative = else_clause.named_children[-1]
150
+ if alternative.type != "if_statement":
151
+ return matches
152
+ consequence = alternative.named_children[1]
153
+ if _normalized_if_condition(parsed, alternative) == condition:
154
+ matches.append(
155
+ _RouteMatch(
156
+ owner=if_node,
157
+ owner_index=owner_index,
158
+ kind="else_if",
159
+ branch=consequence,
160
+ header_start_line=else_clause.start_point.row + 1,
161
+ header_end_line=consequence.start_point.row + 1,
162
+ trim_end_byte=_trim_end_byte(alternative, consequence),
163
+ selected_start_byte=alternative.start_byte,
164
+ )
165
+ )
166
+ current = alternative
167
+
168
+
169
+ def _final_else_match(if_node: Node, owner_index: int) -> _RouteMatch | None:
170
+ current = if_node
171
+ while True:
172
+ else_clause = _else_clause(current)
173
+ if else_clause is None:
174
+ return None
175
+ alternative = else_clause.named_children[-1]
176
+ if alternative.type == "if_statement":
177
+ current = alternative
178
+ continue
179
+ return _RouteMatch(
180
+ owner=if_node,
181
+ owner_index=owner_index,
182
+ kind="else",
183
+ branch=alternative,
184
+ header_start_line=else_clause.start_point.row + 1,
185
+ header_end_line=alternative.start_point.row + 1,
186
+ )
187
+
188
+
189
+ def _keep_branch_body(rendered: dict[int, str], parsed: ParsedSource, match: _RouteMatch) -> None:
190
+ if match.kind == "case":
191
+ _keep_full_statement(rendered, parsed, match.branch)
192
+ return
193
+ branch = match.branch
194
+ if branch.type == "compound_statement":
195
+ _keep_original_range(rendered, parsed, branch.start_point.row + 1, branch.end_point.row + 1)
196
+ _keep_branch_closing(rendered, parsed, match)
197
+ return
198
+ _keep_original_range(rendered, parsed, branch.start_point.row + 1, branch.end_point.row + 1)
199
+
200
+
201
+ def _keep_branch_closing(rendered: dict[int, str], parsed: ParsedSource, match: _RouteMatch) -> None:
202
+ _keep_compound_closing(rendered, parsed, match.branch, match.trim_end_byte)
203
+
204
+
205
+ def _keep_full_statement(rendered: dict[int, str], parsed: ParsedSource, statement: Node) -> None:
206
+ _keep_original_range(rendered, parsed, statement.start_point.row + 1, statement.end_point.row + 1)
207
+
208
+
209
+ def _render_switch_context(rendered: dict[int, str], parsed: ParsedSource, switch_node: Node) -> None:
210
+ body = switch_node.named_children[-1]
211
+ _keep_original_range(rendered, parsed, switch_node.start_point.row + 1, body.start_point.row + 1)
212
+ rendered[body.end_point.row + 1] = parsed.source.line_text(body.end_point.row + 1)
213
+
214
+
215
+ def _render_if_context(rendered: dict[int, str], parsed: ParsedSource, match: _RouteMatch) -> None:
216
+ if match.kind == "if":
217
+ _keep_original_range(rendered, parsed, match.header_start_line, match.header_end_line)
218
+ return
219
+
220
+ owner = match.owner
221
+ owner_consequence = owner.named_children[1]
222
+ _keep_original_range(rendered, parsed, owner.start_point.row + 1, owner_consequence.start_point.row + 1)
223
+ _keep_compound_closing(
224
+ rendered,
225
+ parsed,
226
+ owner_consequence,
227
+ _trim_end_byte(owner, owner_consequence),
228
+ )
229
+
230
+ current = owner
231
+ while True:
232
+ else_clause = _else_clause(current)
233
+ if else_clause is None:
234
+ raise CiftError("else 連鎖を特定できません")
235
+
236
+ alternative = else_clause.named_children[-1]
237
+ if match.kind == "else" and alternative.start_byte == match.branch.start_byte:
238
+ _keep_original_range(rendered, parsed, else_clause.start_point.row + 1, match.header_end_line)
239
+ return
240
+ if alternative.type != "if_statement":
241
+ raise CiftError("else if 連鎖を特定できません")
242
+
243
+ consequence = alternative.named_children[1]
244
+ if match.kind == "else_if" and alternative.start_byte == match.selected_start_byte:
245
+ _keep_original_range(rendered, parsed, else_clause.start_point.row + 1, match.header_end_line)
246
+ return
247
+
248
+ _keep_original_range(rendered, parsed, else_clause.start_point.row + 1, consequence.start_point.row + 1)
249
+ _keep_compound_closing(
250
+ rendered,
251
+ parsed,
252
+ consequence,
253
+ _trim_end_byte(alternative, consequence),
254
+ )
255
+ current = alternative
256
+
257
+
258
+ def _keep_compound_closing(
259
+ rendered: dict[int, str],
260
+ parsed: ParsedSource,
261
+ branch: Node,
262
+ trim_end_byte: int | None,
263
+ ) -> None:
264
+ if branch.type != "compound_statement":
265
+ return
266
+ line_no = branch.end_point.row + 1
267
+ if trim_end_byte is None:
268
+ rendered[line_no] = parsed.source.line_text(line_no)
269
+ return
270
+ rendered[line_no] = parsed.source.slice_from_line_start(line_no, trim_end_byte)
271
+
272
+
273
+ def _keep_linear_statements(
274
+ rendered: dict[int, str],
275
+ parsed: ParsedSource,
276
+ statements: list[Node],
277
+ ) -> None:
278
+ for statement in statements:
279
+ if not _is_branching_statement(statement):
280
+ _keep_full_statement(rendered, parsed, statement)
281
+
282
+
283
+ def _keep_original_range(
284
+ rendered: dict[int, str],
285
+ parsed: ParsedSource,
286
+ start_line: int,
287
+ end_line: int,
288
+ ) -> None:
289
+ for line_no in range(start_line, end_line + 1):
290
+ rendered[line_no] = parsed.source.line_text(line_no)
291
+
292
+
293
+ def _container_statements(container: Node) -> list[Node]:
294
+ if container.type == "compound_statement":
295
+ return list(container.named_children)
296
+ if container.type == "case_statement":
297
+ return [child for child in container.named_children if _is_body_statement(child)]
298
+ return [container]
299
+
300
+
301
+ def _is_body_statement(node: Node) -> bool:
302
+ return node.type.endswith("_statement") or node.type == "declaration"
303
+
304
+
305
+ def _switch_cases(switch_node: Node) -> list[Node]:
306
+ body = switch_node.named_children[-1]
307
+ return [child for child in body.named_children if child.type == "case_statement"]
308
+
309
+
310
+ def _case_label(parsed: ParsedSource, case_node: Node) -> str | None:
311
+ if _is_default_case(case_node):
312
+ return None
313
+ for child in case_node.named_children:
314
+ if child.type == "identifier":
315
+ return node_text(parsed.source, child)
316
+ if child.type.endswith("_expression") or child.type.endswith("_literal"):
317
+ return node_text(parsed.source, child).strip()
318
+ return None
319
+
320
+
321
+ def _is_default_case(case_node: Node) -> bool:
322
+ return bool(case_node.children) and case_node.children[0].type == "default"
323
+
324
+
325
+ def _normalized_if_condition(parsed: ParsedSource, if_node: Node) -> str:
326
+ condition_node = if_node.named_children[0]
327
+ return normalize_condition_text(condition_text(parsed.source, condition_node))
328
+
329
+
330
+ def _else_clause(if_node: Node) -> Node | None:
331
+ for child in if_node.named_children:
332
+ if child.type == "else_clause":
333
+ return child
334
+ return None
335
+
336
+
337
+ def _trim_end_byte(if_node: Node, branch: Node) -> int | None:
338
+ else_clause = _else_clause(if_node)
339
+ if else_clause is None:
340
+ return None
341
+ if branch.end_point.row != else_clause.start_point.row:
342
+ return None
343
+ return branch.end_byte
344
+
345
+
346
+ def _is_branching_statement(node: Node) -> bool:
347
+ return node.type in {"if_statement", "switch_statement", "for_statement", "while_statement", "do_statement"}
348
+
349
+
350
+ def _is_function_body(node: Node) -> bool:
351
+ return node.type == "compound_statement" and node.parent is not None and node.parent.type == "function_definition"
cifter/model.py ADDED
@@ -0,0 +1,142 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from dataclasses import dataclass
5
+ from itertools import pairwise
6
+ from pathlib import Path
7
+
8
+ from cifter.errors import CiftError
9
+
10
+ TRACK_PATH_PATTERN = re.compile(r"^[A-Za-z_]\w*(?:(?:->|\.)[A-Za-z_]\w*)*$")
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class SourceSpan:
15
+ file: Path
16
+ start_line: int
17
+ end_line: int
18
+
19
+
20
+ @dataclass(frozen=True)
21
+ class ExtractedLine:
22
+ line_no: int
23
+ text: str
24
+
25
+
26
+ @dataclass(frozen=True)
27
+ class ExtractionResult:
28
+ span: SourceSpan
29
+ lines: tuple[ExtractedLine, ...]
30
+
31
+
32
+ @dataclass(frozen=True)
33
+ class TrackPath:
34
+ raw: str
35
+ normalized: str
36
+
37
+ @classmethod
38
+ def parse(cls, raw: str) -> TrackPath:
39
+ value = raw.strip()
40
+ if not value or not TRACK_PATH_PATTERN.fullmatch(value):
41
+ raise CiftError(f"不正な --track です: {raw}")
42
+ return cls(raw=value, normalized=value.replace(" ", ""))
43
+
44
+
45
+ @dataclass(frozen=True)
46
+ class RouteSegment:
47
+ kind: str
48
+ raw: str
49
+ label: str | None = None
50
+ condition: str | None = None
51
+
52
+ @classmethod
53
+ def parse(cls, raw: str) -> RouteSegment:
54
+ value = raw.strip()
55
+ if not value:
56
+ raise CiftError("空の route 要素は指定できません")
57
+ if value.startswith("case "):
58
+ label = value[5:].strip()
59
+ if not label:
60
+ raise CiftError(f"不正な --route 要素です: {raw}")
61
+ return cls(kind="case", raw=value, label=label)
62
+ if value == "default":
63
+ return cls(kind="default", raw=value)
64
+ if value.startswith("else if "):
65
+ condition = value[8:].strip()
66
+ if not condition:
67
+ raise CiftError(f"不正な --route 要素です: {raw}")
68
+ return cls(
69
+ kind="else_if",
70
+ raw=value,
71
+ condition=normalize_condition_text(condition),
72
+ )
73
+ if value == "else":
74
+ return cls(kind="else", raw=value)
75
+ if value.startswith("if "):
76
+ condition = value[3:].strip()
77
+ if not condition:
78
+ raise CiftError(f"不正な --route 要素です: {raw}")
79
+ return cls(kind="if", raw=value, condition=normalize_condition_text(condition))
80
+ raise CiftError(f"不正な --route 要素です: {raw}")
81
+
82
+
83
+ def normalize_condition_text(text: str) -> str:
84
+ value = "".join(text.split())
85
+ while value.startswith("(") and value.endswith(")") and _covers_entire_text(value):
86
+ value = value[1:-1]
87
+ return value
88
+
89
+
90
+ def _covers_entire_text(text: str) -> bool:
91
+ depth = 0
92
+ for index, char in enumerate(text):
93
+ if char == "(":
94
+ depth += 1
95
+ elif char == ")":
96
+ depth -= 1
97
+ if depth == 0 and index != len(text) - 1:
98
+ return False
99
+ if depth < 0:
100
+ return False
101
+ return depth == 0
102
+
103
+
104
+ def parse_route(route: str) -> tuple[RouteSegment, ...]:
105
+ parts = _split_route(route)
106
+ if not parts:
107
+ raise CiftError("空の --route は指定できません")
108
+ segments = tuple(RouteSegment.parse(part) for part in parts)
109
+ for left, right in pairwise(segments):
110
+ if left.kind == "else" and right.kind == "if":
111
+ raise CiftError("`else > if ...` は非対応です。`else if ...` を使ってください")
112
+ return segments
113
+
114
+
115
+ def _split_route(route: str) -> list[str]:
116
+ parts: list[str] = []
117
+ current: list[str] = []
118
+ index = 0
119
+ while index < len(route):
120
+ char = route[index]
121
+ if char == ">":
122
+ lookahead = index + 1
123
+ while lookahead < len(route) and route[lookahead].isspace():
124
+ lookahead += 1
125
+ tail = route[lookahead:]
126
+ if current and _starts_route_segment(tail):
127
+ parts.append("".join(current).strip())
128
+ current = []
129
+ index = lookahead
130
+ continue
131
+ current.append(char)
132
+ index += 1
133
+ if current:
134
+ parts.append("".join(current).strip())
135
+ return [part for part in parts if part]
136
+
137
+
138
+ def _starts_route_segment(value: str) -> bool:
139
+ return any(
140
+ value.startswith(prefix)
141
+ for prefix in ("case ", "default", "if ", "else ", "else", "else if ")
142
+ )
cifter/parser.py ADDED
@@ -0,0 +1,138 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from pathlib import Path
5
+
6
+ import tree_sitter_c
7
+ import tree_sitter_cpp
8
+ from tree_sitter import Language, Node, Parser, Tree
9
+
10
+ from cifter.errors import CiftError
11
+ from cifter.model import SourceSpan
12
+ from cifter.preprocessor import preprocess_source
13
+
14
+ CPP_EXTENSIONS = {".cc", ".cpp", ".cxx", ".c++", ".hpp", ".hh", ".hxx", ".h++"}
15
+
16
+
17
+ @dataclass(frozen=True)
18
+ class SourceFile:
19
+ path: Path
20
+ text: str
21
+ lines: tuple[str, ...]
22
+ trailing_newline: bool
23
+ line_start_bytes: tuple[int, ...]
24
+
25
+ @classmethod
26
+ def from_text(cls, path: Path, text: str) -> SourceFile:
27
+ trailing_newline = text.endswith("\n")
28
+ lines = tuple(text.splitlines())
29
+ start_bytes: list[int] = []
30
+ offset = 0
31
+ for index, line in enumerate(lines):
32
+ start_bytes.append(offset)
33
+ offset += len(line.encode("utf-8"))
34
+ if index < len(lines) - 1 or trailing_newline:
35
+ offset += 1
36
+ return cls(
37
+ path=path,
38
+ text=text,
39
+ lines=lines,
40
+ trailing_newline=trailing_newline,
41
+ line_start_bytes=tuple(start_bytes),
42
+ )
43
+
44
+ def span_for_lines(self, line_numbers: list[int]) -> SourceSpan:
45
+ return SourceSpan(self.path, min(line_numbers), max(line_numbers))
46
+
47
+ def line_text(self, line_no: int) -> str:
48
+ return self.lines[line_no - 1]
49
+
50
+ def line_start_byte(self, line_no: int) -> int:
51
+ return self.line_start_bytes[line_no - 1]
52
+
53
+ def slice_from_line_start(self, line_no: int, end_byte: int) -> str:
54
+ start_byte = self.line_start_byte(line_no)
55
+ return self.text.encode("utf-8")[start_byte:end_byte].decode("utf-8").rstrip()
56
+
57
+
58
+ @dataclass(frozen=True)
59
+ class ParsedSource:
60
+ source: SourceFile
61
+ tree: Tree
62
+ language_name: str
63
+
64
+
65
+ def parse_source(path: Path, defines: list[str]) -> ParsedSource:
66
+ raw_text = path.read_text(encoding="utf-8")
67
+ preprocessed = preprocess_source(raw_text, defines)
68
+ source = SourceFile.from_text(path, preprocessed)
69
+ parser, language_name = _build_parser(path)
70
+ tree = parser.parse(source.text.encode("utf-8"))
71
+ return ParsedSource(source=source, tree=tree, language_name=language_name)
72
+
73
+
74
+ def find_function(parsed: ParsedSource, name: str) -> Node:
75
+ matches = [node for node in _iter_nodes(parsed.tree.root_node) if _is_function_named(node, parsed.source, name)]
76
+ if not matches:
77
+ raise CiftError(f"関数が見つかりません: {name}")
78
+ if len(matches) > 1:
79
+ raise CiftError(f"同名関数が複数見つかりました: {name}")
80
+ return matches[0]
81
+
82
+
83
+ def function_body(function_node: Node) -> Node:
84
+ for child in function_node.named_children:
85
+ if child.type == "compound_statement":
86
+ return child
87
+ raise CiftError("関数本体を特定できません")
88
+
89
+
90
+ def node_text(source: SourceFile, node: Node) -> str:
91
+ return source.text.encode("utf-8")[node.start_byte:node.end_byte].decode("utf-8")
92
+
93
+
94
+ def condition_text(source: SourceFile, node: Node) -> str:
95
+ text = node_text(source, node)
96
+ return text
97
+
98
+
99
+ def _build_parser(path: Path) -> tuple[Parser, str]:
100
+ if path.suffix.lower() in CPP_EXTENSIONS:
101
+ return Parser(Language(tree_sitter_cpp.language())), "cpp"
102
+ return Parser(Language(tree_sitter_c.language())), "c"
103
+
104
+
105
+ def _iter_nodes(root: Node) -> list[Node]:
106
+ nodes: list[Node] = []
107
+ stack = [root]
108
+ while stack:
109
+ node = stack.pop()
110
+ nodes.append(node)
111
+ stack.extend(reversed(node.named_children))
112
+ return nodes
113
+
114
+
115
+ def _is_function_named(node: Node, source: SourceFile, name: str) -> bool:
116
+ if node.type != "function_definition":
117
+ return False
118
+ declarator = next(
119
+ (child for child in node.named_children if child.type.endswith("declarator")),
120
+ None,
121
+ )
122
+ if declarator is None:
123
+ return False
124
+ return _extract_declarator_name(source, declarator) == name
125
+
126
+
127
+ def _extract_declarator_name(source: SourceFile, node: Node) -> str | None:
128
+ if node.type in {"identifier", "field_identifier"}:
129
+ return node_text(source, node)
130
+ if node.type == "qualified_identifier":
131
+ return node_text(source, node).split("::")[-1]
132
+ for child in node.named_children:
133
+ if child.type in {"parameter_list", "template_parameter_list"}:
134
+ continue
135
+ name = _extract_declarator_name(source, child)
136
+ if name is not None:
137
+ return name
138
+ return None
cifter/preprocessor.py ADDED
@@ -0,0 +1,132 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+
5
+ from pcpp import Preprocessor
6
+
7
+ from cifter.errors import CiftError
8
+
9
+
10
+ @dataclass
11
+ class _ConditionalFrame:
12
+ parent_active: bool
13
+ current_active: bool
14
+ branch_taken: bool
15
+ saw_else: bool = False
16
+
17
+
18
+ def preprocess_source(source: str, defines: list[str]) -> str:
19
+ lines = source.splitlines()
20
+ trailing_newline = source.endswith("\n")
21
+ processor = Preprocessor()
22
+ for define in defines:
23
+ processor.define(_normalize_define(define))
24
+
25
+ output: list[str] = []
26
+ stack: list[_ConditionalFrame] = []
27
+
28
+ for line in lines:
29
+ directive = _parse_directive(line)
30
+ active = stack[-1].current_active if stack else True
31
+
32
+ if directive is not None and directive.name in {"if", "ifdef", "ifndef", "elif", "else", "endif"}:
33
+ _handle_conditional_directive(processor, stack, directive)
34
+ output.append("")
35
+ continue
36
+
37
+ if not active:
38
+ output.append("")
39
+ continue
40
+
41
+ if directive is not None and directive.name == "define" and directive.body:
42
+ processor.define(_normalize_define(directive.body))
43
+ elif directive is not None and directive.name == "undef" and directive.body:
44
+ processor.undef(directive.body.strip())
45
+
46
+ output.append(line)
47
+
48
+ if stack:
49
+ raise CiftError("条件分岐ディレクティブが閉じていません")
50
+
51
+ text = "\n".join(output)
52
+ if trailing_newline:
53
+ text += "\n"
54
+ return text
55
+
56
+
57
+ @dataclass(frozen=True)
58
+ class _Directive:
59
+ name: str
60
+ body: str
61
+
62
+
63
+ def _parse_directive(line: str) -> _Directive | None:
64
+ stripped = line.lstrip()
65
+ if not stripped.startswith("#"):
66
+ return None
67
+ body = stripped[1:].lstrip()
68
+ if not body:
69
+ return None
70
+ name, _, tail = body.partition(" ")
71
+ if not tail and "\t" in body:
72
+ name, _, tail = body.partition("\t")
73
+ return _Directive(name=name, body=tail.lstrip())
74
+
75
+
76
+ def _handle_conditional_directive(
77
+ processor: Preprocessor,
78
+ stack: list[_ConditionalFrame],
79
+ directive: _Directive,
80
+ ) -> None:
81
+ name = directive.name
82
+ if name in {"if", "ifdef", "ifndef"}:
83
+ parent_active = stack[-1].current_active if stack else True
84
+ current_active = parent_active and _evaluate_condition(processor, directive)
85
+ stack.append(
86
+ _ConditionalFrame(
87
+ parent_active=parent_active,
88
+ current_active=current_active,
89
+ branch_taken=current_active,
90
+ )
91
+ )
92
+ return
93
+
94
+ if not stack:
95
+ raise CiftError(f"対応する開始ディレクティブがありません: #{name}")
96
+
97
+ frame = stack[-1]
98
+ if name == "elif":
99
+ if frame.saw_else:
100
+ raise CiftError("#else の後に #elif は指定できません")
101
+ if frame.branch_taken:
102
+ frame.current_active = False
103
+ return
104
+ frame.current_active = frame.parent_active and _evaluate_condition(processor, directive)
105
+ frame.branch_taken = frame.current_active
106
+ return
107
+ if name == "else":
108
+ if frame.saw_else:
109
+ raise CiftError("#else は 1 回だけ指定できます")
110
+ frame.saw_else = True
111
+ frame.current_active = frame.parent_active and not frame.branch_taken
112
+ frame.branch_taken = True
113
+ return
114
+ if name == "endif":
115
+ stack.pop()
116
+ return
117
+
118
+
119
+ def _evaluate_condition(processor: Preprocessor, directive: _Directive) -> bool:
120
+ if directive.name == "ifdef":
121
+ return directive.body.strip() in processor.macros
122
+ if directive.name == "ifndef":
123
+ return directive.body.strip() not in processor.macros
124
+ value, _ = processor.evalexpr(processor.tokenize(directive.body))
125
+ return bool(value)
126
+
127
+
128
+ def _normalize_define(value: str) -> str:
129
+ name, separator, body = value.partition("=")
130
+ if not separator:
131
+ return value
132
+ return f"{name.strip()} {body.strip()}"
cifter/render.py ADDED
@@ -0,0 +1,8 @@
1
+ from __future__ import annotations
2
+
3
+ from cifter.model import ExtractionResult
4
+
5
+
6
+ def render_result(result: ExtractionResult) -> str:
7
+ width = len(str(result.lines[-1].line_no))
8
+ return "\n".join(f"{line.line_no:>{width}}: {line.text}" for line in result.lines)
@@ -0,0 +1,201 @@
1
+ Metadata-Version: 2.4
2
+ Name: cifter-cli
3
+ Version: 0.1.0
4
+ Summary: C/C++ の関数実装を抽出する軽量 CLI
5
+ Keywords: c,c++,cpp,cli,tree-sitter,source-extraction,static-analysis
6
+ Author: t-kenji
7
+ Author-email: t-kenji <protect.2501@gmail.com>
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Environment :: Console
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: C
18
+ Classifier: Programming Language :: C++
19
+ Classifier: Topic :: Software Development
20
+ Requires-Dist: pcpp>=1.30
21
+ Requires-Dist: tree-sitter>=0.25.2
22
+ Requires-Dist: tree-sitter-c>=0.24.1
23
+ Requires-Dist: tree-sitter-cpp>=0.23.4
24
+ Requires-Dist: typer>=0.24.1
25
+ Requires-Python: >=3.12
26
+ Project-URL: Homepage, https://github.com/t-kenji/cifter
27
+ Project-URL: Repository, https://github.com/t-kenji/cifter
28
+ Project-URL: Issues, https://github.com/t-kenji/cifter/issues
29
+ Project-URL: Changelog, https://github.com/t-kenji/cifter/blob/main/CHANGELOG.md
30
+ Description-Content-Type: text/markdown
31
+
32
+ # cifter
33
+
34
+ `cifter` は、C/C++ の関数実装を機械的かつ高速に抽出する CLI です。
35
+ `tree-sitter` で構文を捉え、行番号付き text として返します。重い意味解析や LLM 連携は行いません。
36
+
37
+ ## 概要
38
+
39
+ - 単一の `--source` ファイルから抽出します
40
+ - 公開サブコマンドは `function` / `flow` / `path` の 3 つです
41
+ - 出力は元ソースと対応付け可能な行番号付き text です
42
+ - `-D NAME[=VALUE]` により条件分岐前処理を評価できます
43
+
44
+ ## Why cifter
45
+
46
+ - 関数全体をそのまま抜き出したい
47
+ - 分岐の骨格だけを見たい
48
+ - 特定の route だけを細く追いたい
49
+ - 元の行番号を失わずにレビューや調査へ貼りたい
50
+
51
+ ## Installation
52
+
53
+ PyPI から install:
54
+
55
+ ```sh
56
+ python -m pip install cifter-cli
57
+ ```
58
+
59
+ 最小確認:
60
+
61
+ ```sh
62
+ cift --help
63
+ python -m cifter --help
64
+ ```
65
+
66
+ GitHub Release の `wheel` / `sdist` から install することもできます。
67
+
68
+ ```sh
69
+ python -m pip install ./cifter_cli-0.1.0-py3-none-any.whl
70
+ ```
71
+
72
+ 開発用:
73
+
74
+ ```sh
75
+ uv sync
76
+ uv run cift --help
77
+ ```
78
+
79
+ ## Quick Start
80
+
81
+ サンプルソース:
82
+
83
+ ```c
84
+ int FooFunction(int x)
85
+ {
86
+ if (x > 0) {
87
+ return 1;
88
+ }
89
+
90
+ return 0;
91
+ }
92
+ ```
93
+
94
+ 関数全体を抽出:
95
+
96
+ ```sh
97
+ cift function --name FooFunction --source foo.c
98
+ ```
99
+
100
+ 出力:
101
+
102
+ ```text
103
+ 1: int FooFunction(int x)
104
+ 2: {
105
+ 3: if (x > 0) {
106
+ 4: return 1;
107
+ 5: }
108
+ 6:
109
+ 7: return 0;
110
+ 8: }
111
+ ```
112
+
113
+ ## Commands
114
+
115
+ `function`:
116
+ 指定した関数の実装全体をそのまま抽出します。レビュー対象の最小切り出しに向きます。
117
+
118
+ ```sh
119
+ cift function --name FooFunction --source examples/demo.c
120
+ ```
121
+
122
+ `flow`:
123
+ 制御構造の骨格だけを残します。`--track` を付けると、完全一致したアクセスパスを含む文を追加保持します。
124
+
125
+ ```sh
126
+ cift flow --function FooFunction --source examples/demo.c --track state
127
+ cift flow --function FooFunction --source examples/demo.c --track 'ctx->state'
128
+ ```
129
+
130
+ `path`:
131
+ 指定した route だけを細く抽出します。親構造は残し、route 終端に達したコンテナでは後続の通常文も残します。
132
+
133
+ ```sh
134
+ cift path --function FooFunction --source examples/demo.c --route 'case CMD_HOGE > if ret == OK'
135
+ cift path --function FooFunction --source examples/demo.c --route 'case CMD_HOGE > else if errno == EINT'
136
+ cift path --function ElseRoute --source examples/demo.c --route 'else'
137
+ ```
138
+
139
+ ## Preprocessor / Track / Route
140
+
141
+ `-D`:
142
+ 条件分岐前処理の評価に使うマクロを追加します。
143
+
144
+ ```sh
145
+ cift function --name FooFunction --source examples/demo.c -D DEF_FOO -D ENABLE_BAR=1
146
+ ```
147
+
148
+ `--track`:
149
+ `flow` で保持したいアクセスパスです。構文上の完全一致だけを扱います。
150
+
151
+ - `state`
152
+ - `ctx->state`
153
+ - `a->b.c`
154
+
155
+ `--route`:
156
+ `path` で辿る最小 DSL です。
157
+
158
+ - `case CMD_HOGE`
159
+ - `case CMD_HOGE > if ret == OK`
160
+ - `case CMD_HOGE > else if errno == EINT`
161
+ - `default`
162
+ - `else`
163
+
164
+ ## Limitations
165
+
166
+ - 対象は C/C++ のみです
167
+ - 入力は単一ファイルのみです
168
+ - 出力形式は text のみです
169
+ - 入力文字コードは UTF-8 前提です
170
+ - `.h` は現状 C 扱いです
171
+ - `--route` は `case` / `default` / `if` / `else` / `else if` のみ対応です
172
+ - `--track` は名前解決やスコープ解析を行いません
173
+ - ループ経路、`goto` 横断、意味解析、CFG 構築、JSON 出力は対象外です
174
+
175
+ ## Examples
176
+
177
+ リポジトリには `examples/demo.c` を含めています。
178
+
179
+ ```sh
180
+ cift function --name FooFunction --source examples/demo.c
181
+ cift flow --function FooFunction --source examples/demo.c --track 'ctx->state'
182
+ cift path --function FooFunction --source examples/demo.c --route 'case CMD_LOOP > if ret == OK'
183
+ ```
184
+
185
+ ## Development
186
+
187
+ 開発者向け文書は `docs/` にまとめています。
188
+
189
+ - [docs/overview.md](/home/tkenji/Repos/cifter/docs/overview.md)
190
+ - [docs/cli.md](/home/tkenji/Repos/cifter/docs/cli.md)
191
+ - [docs/output-format.md](/home/tkenji/Repos/cifter/docs/output-format.md)
192
+ - [docs/pipeline.md](/home/tkenji/Repos/cifter/docs/pipeline.md)
193
+ - [docs/data-model.md](/home/tkenji/Repos/cifter/docs/data-model.md)
194
+ - [docs/architecture.md](/home/tkenji/Repos/cifter/docs/architecture.md)
195
+ - [docs/release.md](/home/tkenji/Repos/cifter/docs/release.md)
196
+
197
+ 仕様の正本は `docs/specs/` にあります。
198
+
199
+ ## License
200
+
201
+ MIT License で配布します。詳細は `LICENSE` を参照してください。
@@ -0,0 +1,16 @@
1
+ cifter/__init__.py,sha256=68T9Qi3STMn4RIJnaB5LxEMDE1M2ODGPRAhl-FLJBQo,48
2
+ cifter/__main__.py,sha256=gO8rJ7Ta_JUJqXjSGrshtxWJfafLZod1S8pha5SEO6M,67
3
+ cifter/cli.py,sha256=uMJzJjUnUwvH47fIrBgr68AypMyzD2TssJoeEFcaJ1k,2664
4
+ cifter/errors.py,sha256=Q3yAAXBFK86c7IZgP-SZXDh-H0_AMr5oIshvZNGPwSE,139
5
+ cifter/extract_flow.py,sha256=DZHKIXRfatbvb2Lgqgy_UbD9LrAIozmusVrQqinpe7I,6403
6
+ cifter/extract_function.py,sha256=7xEeEspOlp5gDG0F7Uz7_AkBCJvutelqEvhgUquoNU4,643
7
+ cifter/extract_path.py,sha256=W0m8KpkpWYXWvDGCffHQXFl2zpveI2YYHlUX83WZjMc,13469
8
+ cifter/model.py,sha256=vxwjyQg-iTqjq1nGFsQEihIOb69gcjlTSfjFFihPsiE,4234
9
+ cifter/parser.py,sha256=48-eh3pVoVrWfC-SeMkzmzelhRCDOmRR_Qf0U3CEThE,4502
10
+ cifter/preprocessor.py,sha256=hliAakYaPHS40_xhLiAFoFcGRakkXHpYEMZpZFiohm8,3981
11
+ cifter/render.py,sha256=xvfB-2FrMdOl2ccaz1XBSM86veHL9RJpu9RiceF0khA,266
12
+ cifter_cli-0.1.0.dist-info/licenses/LICENSE,sha256=E53cO1q07FMYhxvM3JtMzhEx47_29e-090jiA1tTWxs,1064
13
+ cifter_cli-0.1.0.dist-info/WHEEL,sha256=Sb1dMJuf3wy6TqB8bzcqZpk8WSKCV8HbGz39HaP5dwE,81
14
+ cifter_cli-0.1.0.dist-info/entry_points.txt,sha256=C6lSWuf3ymAsnFoMp-VJG1_veGSVqL81-g8brdJpwqA,38
15
+ cifter_cli-0.1.0.dist-info/METADATA,sha256=ejqFOg5xuXGXXnB4GObKHIzHRU0svRxJS23BWYDz1lo,5621
16
+ cifter_cli-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.10.10
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ cift = cifter:main
3
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 t-kenji
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.