aicodestat 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aicodestat-0.0.1/PKG-INFO +110 -0
- aicodestat-0.0.1/README.md +93 -0
- aicodestat-0.0.1/aicodestat.egg-info/PKG-INFO +110 -0
- aicodestat-0.0.1/aicodestat.egg-info/SOURCES.txt +40 -0
- aicodestat-0.0.1/aicodestat.egg-info/dependency_links.txt +1 -0
- aicodestat-0.0.1/aicodestat.egg-info/entry_points.txt +5 -0
- aicodestat-0.0.1/aicodestat.egg-info/requires.txt +11 -0
- aicodestat-0.0.1/aicodestat.egg-info/top_level.txt +10 -0
- aicodestat-0.0.1/cli/__init__.py +2 -0
- aicodestat-0.0.1/cli/exporter.py +111 -0
- aicodestat-0.0.1/cli/main.py +213 -0
- aicodestat-0.0.1/cli/menus.py +540 -0
- aicodestat-0.0.1/cli/views.py +277 -0
- aicodestat-0.0.1/compute/__init__.py +2 -0
- aicodestat-0.0.1/compute/cache.py +90 -0
- aicodestat-0.0.1/compute/diff_engine.py +69 -0
- aicodestat-0.0.1/compute/lcs_engine.py +73 -0
- aicodestat-0.0.1/compute/metrics_service.py +362 -0
- aicodestat-0.0.1/config.py +120 -0
- aicodestat-0.0.1/local_mcp_server.py +260 -0
- aicodestat-0.0.1/logging_config.py +68 -0
- aicodestat-0.0.1/main.py +164 -0
- aicodestat-0.0.1/mcp/__init__.py +2 -0
- aicodestat-0.0.1/mcp/agent_adapter.py +69 -0
- aicodestat-0.0.1/mcp/api_schemas.py +26 -0
- aicodestat-0.0.1/mcp/routes_after.py +121 -0
- aicodestat-0.0.1/mcp/routes_before.py +68 -0
- aicodestat-0.0.1/mcp/routes_tools.py +100 -0
- aicodestat-0.0.1/pyproject.toml +38 -0
- aicodestat-0.0.1/service_manager.py +221 -0
- aicodestat-0.0.1/setup.cfg +4 -0
- aicodestat-0.0.1/storage/__init__.py +2 -0
- aicodestat-0.0.1/storage/backup.py +185 -0
- aicodestat-0.0.1/storage/db.py +156 -0
- aicodestat-0.0.1/storage/models.py +338 -0
- aicodestat-0.0.1/storage/scheduler.py +111 -0
- aicodestat-0.0.1/tests/test_api.py +101 -0
- aicodestat-0.0.1/tests/test_compute.py +51 -0
- aicodestat-0.0.1/tests/test_storage.py +108 -0
- aicodestat-0.0.1/utils/__init__.py +2 -0
- aicodestat-0.0.1/utils/port_utils.py +59 -0
- aicodestat-0.0.1/utils/time_utils.py +37 -0
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: aicodestat
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: A local-first metrics tool that analyzes how you use AI coding assistants.
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: fastapi>=0.104.0
|
|
8
|
+
Requires-Dist: uvicorn[standard]>=0.24.0
|
|
9
|
+
Requires-Dist: pydantic>=2.0.0
|
|
10
|
+
Requires-Dist: rich>=13.0.0
|
|
11
|
+
Requires-Dist: questionary>=2.0.0
|
|
12
|
+
Requires-Dist: httpx>=0.25.0
|
|
13
|
+
Requires-Dist: mcp>=1.0.0
|
|
14
|
+
Provides-Extra: dev
|
|
15
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
16
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
17
|
+
|
|
18
|
+
## CodeStat · AI Code Metrics
|
|
19
|
+
|
|
20
|
+
> Quantify how much AI actually contributes to your codebase.
|
|
21
|
+
|
|
22
|
+
[](https://github.com/2hangchen/CodeStat/actions)
|
|
23
|
+
[](LICENSE)
|
|
24
|
+
|
|
25
|
+
`CodeStat` is a local metrics tool that analyzes how you use AI coding assistants:
|
|
26
|
+
how many lines are generated by AI, how many are kept, and how this evolves over time.
|
|
27
|
+
|
|
28
|
+
> 中文文档见:[`README.zh-CN.md`](./README.zh-CN.md)
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
## Features
|
|
33
|
+
|
|
34
|
+
- **Global dashboard for all data**
|
|
35
|
+
- AI generated lines, adopted lines, adoption & generation rates
|
|
36
|
+
- File count, session count, quick bar chart overview
|
|
37
|
+
|
|
38
|
+
- **Multi‑dimension queries**
|
|
39
|
+
- **By file**: see how much of a file comes from AI and how much you kept
|
|
40
|
+
- **By session**: analyze one coding session with detailed diff lines
|
|
41
|
+
- **By project**: aggregate metrics for an entire repository
|
|
42
|
+
|
|
43
|
+
- **Agent / model comparison**
|
|
44
|
+
- Compare multiple sessions (agents / models / settings) side‑by‑side
|
|
45
|
+
- See which one actually produces more adopted code instead of just more tokens
|
|
46
|
+
|
|
47
|
+
- **Local‑first & privacy‑friendly**
|
|
48
|
+
- All metrics are computed locally from your own diffs
|
|
49
|
+
- No source code or prompts are sent to any remote service
|
|
50
|
+
|
|
51
|
+
- **Nice CLI UX**
|
|
52
|
+
- Rich‑based tables & colors, arrow‑key navigation
|
|
53
|
+
- Minimal but informative header (MCP status + repo info)
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## Demo
|
|
58
|
+
|
|
59
|
+
> TODO: add real screenshots / GIFs from your terminal
|
|
60
|
+
|
|
61
|
+
- **Global dashboard**
|
|
62
|
+
|
|
63
|
+
*(insert GIF or screenshot here)*
|
|
64
|
+
|
|
65
|
+
- **Session metrics with diff lines**
|
|
66
|
+
|
|
67
|
+
*(insert GIF or screenshot here)*
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Quickstart
|
|
72
|
+
|
|
73
|
+
### Install
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
git clone https://github.com/2hangchen/CodeStat.git
|
|
77
|
+
cd CodeStat
|
|
78
|
+
pip install -r requirements.txt
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
> Once published to PyPI you can alternatively run:
|
|
82
|
+
> `pip install codestat-ai`
|
|
83
|
+
|
|
84
|
+
### Start the CLI
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
python .\cli\main.py
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
Use `↑/↓` to move, `Enter` to confirm.
|
|
91
|
+
Choose **“📈 Global Dashboard (All Data)”** to see an overview of your local metrics.
|
|
92
|
+
|
|
93
|
+
---
|
|
94
|
+
|
|
95
|
+
## Typical Workflows
|
|
96
|
+
|
|
97
|
+
- **Measure your own AI usage**
|
|
98
|
+
- Record one or more coding sessions with your IDE + MCP server
|
|
99
|
+
- Run `CodeStat` and inspect:
|
|
100
|
+
- AI generated vs adopted lines
|
|
101
|
+
- Which files receive the most AI help
|
|
102
|
+
|
|
103
|
+
- **Compare agents / models / prompts**
|
|
104
|
+
- Map different sessions to different agents / models
|
|
105
|
+
- Use **Compare Agents** to get a per‑session comparison table
|
|
106
|
+
|
|
107
|
+
- **Project‑level health check**
|
|
108
|
+
- For a given repo, run project metrics to see:
|
|
109
|
+
- Where AI contributes the most
|
|
110
|
+
- Whether AI‑generated code is actually being kept
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
## CodeStat · AI Code Metrics
|
|
2
|
+
|
|
3
|
+
> Quantify how much AI actually contributes to your codebase.
|
|
4
|
+
|
|
5
|
+
[](https://github.com/2hangchen/CodeStat/actions)
|
|
6
|
+
[](LICENSE)
|
|
7
|
+
|
|
8
|
+
`CodeStat` is a local metrics tool that analyzes how you use AI coding assistants:
|
|
9
|
+
how many lines are generated by AI, how many are kept, and how this evolves over time.
|
|
10
|
+
|
|
11
|
+
> 中文文档见:[`README.zh-CN.md`](./README.zh-CN.md)
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
## Features
|
|
16
|
+
|
|
17
|
+
- **Global dashboard for all data**
|
|
18
|
+
- AI generated lines, adopted lines, adoption & generation rates
|
|
19
|
+
- File count, session count, quick bar chart overview
|
|
20
|
+
|
|
21
|
+
- **Multi‑dimension queries**
|
|
22
|
+
- **By file**: see how much of a file comes from AI and how much you kept
|
|
23
|
+
- **By session**: analyze one coding session with detailed diff lines
|
|
24
|
+
- **By project**: aggregate metrics for an entire repository
|
|
25
|
+
|
|
26
|
+
- **Agent / model comparison**
|
|
27
|
+
- Compare multiple sessions (agents / models / settings) side‑by‑side
|
|
28
|
+
- See which one actually produces more adopted code instead of just more tokens
|
|
29
|
+
|
|
30
|
+
- **Local‑first & privacy‑friendly**
|
|
31
|
+
- All metrics are computed locally from your own diffs
|
|
32
|
+
- No source code or prompts are sent to any remote service
|
|
33
|
+
|
|
34
|
+
- **Nice CLI UX**
|
|
35
|
+
- Rich‑based tables & colors, arrow‑key navigation
|
|
36
|
+
- Minimal but informative header (MCP status + repo info)
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Demo
|
|
41
|
+
|
|
42
|
+
> TODO: add real screenshots / GIFs from your terminal
|
|
43
|
+
|
|
44
|
+
- **Global dashboard**
|
|
45
|
+
|
|
46
|
+
*(insert GIF or screenshot here)*
|
|
47
|
+
|
|
48
|
+
- **Session metrics with diff lines**
|
|
49
|
+
|
|
50
|
+
*(insert GIF or screenshot here)*
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
## Quickstart
|
|
55
|
+
|
|
56
|
+
### Install
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
git clone https://github.com/2hangchen/CodeStat.git
|
|
60
|
+
cd CodeStat
|
|
61
|
+
pip install -r requirements.txt
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
> Once published to PyPI you can alternatively run:
|
|
65
|
+
> `pip install codestat-ai`
|
|
66
|
+
|
|
67
|
+
### Start the CLI
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
python .\cli\main.py
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
Use `↑/↓` to move, `Enter` to confirm.
|
|
74
|
+
Choose **“📈 Global Dashboard (All Data)”** to see an overview of your local metrics.
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## Typical Workflows
|
|
79
|
+
|
|
80
|
+
- **Measure your own AI usage**
|
|
81
|
+
- Record one or more coding sessions with your IDE + MCP server
|
|
82
|
+
- Run `CodeStat` and inspect:
|
|
83
|
+
- AI generated vs adopted lines
|
|
84
|
+
- Which files receive the most AI help
|
|
85
|
+
|
|
86
|
+
- **Compare agents / models / prompts**
|
|
87
|
+
- Map different sessions to different agents / models
|
|
88
|
+
- Use **Compare Agents** to get a per‑session comparison table
|
|
89
|
+
|
|
90
|
+
- **Project‑level health check**
|
|
91
|
+
- For a given repo, run project metrics to see:
|
|
92
|
+
- Where AI contributes the most
|
|
93
|
+
- Whether AI‑generated code is actually being kept
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: aicodestat
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: A local-first metrics tool that analyzes how you use AI coding assistants.
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: fastapi>=0.104.0
|
|
8
|
+
Requires-Dist: uvicorn[standard]>=0.24.0
|
|
9
|
+
Requires-Dist: pydantic>=2.0.0
|
|
10
|
+
Requires-Dist: rich>=13.0.0
|
|
11
|
+
Requires-Dist: questionary>=2.0.0
|
|
12
|
+
Requires-Dist: httpx>=0.25.0
|
|
13
|
+
Requires-Dist: mcp>=1.0.0
|
|
14
|
+
Provides-Extra: dev
|
|
15
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
16
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
17
|
+
|
|
18
|
+
## CodeStat · AI Code Metrics
|
|
19
|
+
|
|
20
|
+
> Quantify how much AI actually contributes to your codebase.
|
|
21
|
+
|
|
22
|
+
[](https://github.com/2hangchen/CodeStat/actions)
|
|
23
|
+
[](LICENSE)
|
|
24
|
+
|
|
25
|
+
`CodeStat` is a local metrics tool that analyzes how you use AI coding assistants:
|
|
26
|
+
how many lines are generated by AI, how many are kept, and how this evolves over time.
|
|
27
|
+
|
|
28
|
+
> 中文文档见:[`README.zh-CN.md`](./README.zh-CN.md)
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
## Features
|
|
33
|
+
|
|
34
|
+
- **Global dashboard for all data**
|
|
35
|
+
- AI generated lines, adopted lines, adoption & generation rates
|
|
36
|
+
- File count, session count, quick bar chart overview
|
|
37
|
+
|
|
38
|
+
- **Multi‑dimension queries**
|
|
39
|
+
- **By file**: see how much of a file comes from AI and how much you kept
|
|
40
|
+
- **By session**: analyze one coding session with detailed diff lines
|
|
41
|
+
- **By project**: aggregate metrics for an entire repository
|
|
42
|
+
|
|
43
|
+
- **Agent / model comparison**
|
|
44
|
+
- Compare multiple sessions (agents / models / settings) side‑by‑side
|
|
45
|
+
- See which one actually produces more adopted code instead of just more tokens
|
|
46
|
+
|
|
47
|
+
- **Local‑first & privacy‑friendly**
|
|
48
|
+
- All metrics are computed locally from your own diffs
|
|
49
|
+
- No source code or prompts are sent to any remote service
|
|
50
|
+
|
|
51
|
+
- **Nice CLI UX**
|
|
52
|
+
- Rich‑based tables & colors, arrow‑key navigation
|
|
53
|
+
- Minimal but informative header (MCP status + repo info)
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## Demo
|
|
58
|
+
|
|
59
|
+
> TODO: add real screenshots / GIFs from your terminal
|
|
60
|
+
|
|
61
|
+
- **Global dashboard**
|
|
62
|
+
|
|
63
|
+
*(insert GIF or screenshot here)*
|
|
64
|
+
|
|
65
|
+
- **Session metrics with diff lines**
|
|
66
|
+
|
|
67
|
+
*(insert GIF or screenshot here)*
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Quickstart
|
|
72
|
+
|
|
73
|
+
### Install
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
git clone https://github.com/2hangchen/CodeStat.git
|
|
77
|
+
cd CodeStat
|
|
78
|
+
pip install -r requirements.txt
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
> Once published to PyPI you can alternatively run:
|
|
82
|
+
> `pip install codestat-ai`
|
|
83
|
+
|
|
84
|
+
### Start the CLI
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
python .\cli\main.py
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
Use `↑/↓` to move, `Enter` to confirm.
|
|
91
|
+
Choose **“📈 Global Dashboard (All Data)”** to see an overview of your local metrics.
|
|
92
|
+
|
|
93
|
+
---
|
|
94
|
+
|
|
95
|
+
## Typical Workflows
|
|
96
|
+
|
|
97
|
+
- **Measure your own AI usage**
|
|
98
|
+
- Record one or more coding sessions with your IDE + MCP server
|
|
99
|
+
- Run `CodeStat` and inspect:
|
|
100
|
+
- AI generated vs adopted lines
|
|
101
|
+
- Which files receive the most AI help
|
|
102
|
+
|
|
103
|
+
- **Compare agents / models / prompts**
|
|
104
|
+
- Map different sessions to different agents / models
|
|
105
|
+
- Use **Compare Agents** to get a per‑session comparison table
|
|
106
|
+
|
|
107
|
+
- **Project‑level health check**
|
|
108
|
+
- For a given repo, run project metrics to see:
|
|
109
|
+
- Where AI contributes the most
|
|
110
|
+
- Whether AI‑generated code is actually being kept
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
config.py
|
|
3
|
+
local_mcp_server.py
|
|
4
|
+
logging_config.py
|
|
5
|
+
main.py
|
|
6
|
+
pyproject.toml
|
|
7
|
+
service_manager.py
|
|
8
|
+
aicodestat.egg-info/PKG-INFO
|
|
9
|
+
aicodestat.egg-info/SOURCES.txt
|
|
10
|
+
aicodestat.egg-info/dependency_links.txt
|
|
11
|
+
aicodestat.egg-info/entry_points.txt
|
|
12
|
+
aicodestat.egg-info/requires.txt
|
|
13
|
+
aicodestat.egg-info/top_level.txt
|
|
14
|
+
cli/__init__.py
|
|
15
|
+
cli/exporter.py
|
|
16
|
+
cli/main.py
|
|
17
|
+
cli/menus.py
|
|
18
|
+
cli/views.py
|
|
19
|
+
compute/__init__.py
|
|
20
|
+
compute/cache.py
|
|
21
|
+
compute/diff_engine.py
|
|
22
|
+
compute/lcs_engine.py
|
|
23
|
+
compute/metrics_service.py
|
|
24
|
+
mcp/__init__.py
|
|
25
|
+
mcp/agent_adapter.py
|
|
26
|
+
mcp/api_schemas.py
|
|
27
|
+
mcp/routes_after.py
|
|
28
|
+
mcp/routes_before.py
|
|
29
|
+
mcp/routes_tools.py
|
|
30
|
+
storage/__init__.py
|
|
31
|
+
storage/backup.py
|
|
32
|
+
storage/db.py
|
|
33
|
+
storage/models.py
|
|
34
|
+
storage/scheduler.py
|
|
35
|
+
tests/test_api.py
|
|
36
|
+
tests/test_compute.py
|
|
37
|
+
tests/test_storage.py
|
|
38
|
+
utils/__init__.py
|
|
39
|
+
utils/port_utils.py
|
|
40
|
+
utils/time_utils.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""Export query results to CSV/JSON"""
|
|
2
|
+
import json
|
|
3
|
+
import csv
|
|
4
|
+
import logging
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Dict, Any, List
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from utils.time_utils import format_datetime
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def export_to_json(data: Dict[str, Any], output_path: str) -> bool:
|
|
14
|
+
"""
|
|
15
|
+
Export data as JSON format
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
data: Data to export
|
|
19
|
+
output_path: Output file path
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Whether successful
|
|
23
|
+
"""
|
|
24
|
+
try:
|
|
25
|
+
output_file = Path(output_path)
|
|
26
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
|
|
28
|
+
export_data = {
|
|
29
|
+
"export_time": format_datetime(datetime.now()),
|
|
30
|
+
"data": data
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
with open(output_file, 'w', encoding='utf-8') as f:
|
|
34
|
+
json.dump(export_data, f, ensure_ascii=False, indent=2)
|
|
35
|
+
|
|
36
|
+
logger.info(f"Data exported to JSON: {output_path}")
|
|
37
|
+
return True
|
|
38
|
+
except Exception as e:
|
|
39
|
+
logger.error(f"Failed to export to JSON: {e}")
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def export_to_csv(metrics: Dict[str, Any], output_path: str) -> bool:
|
|
44
|
+
"""
|
|
45
|
+
Export metrics data as CSV format
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
metrics: Metrics data
|
|
49
|
+
output_path: Output file path
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Whether successful
|
|
53
|
+
"""
|
|
54
|
+
try:
|
|
55
|
+
output_file = Path(output_path)
|
|
56
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
57
|
+
|
|
58
|
+
with open(output_file, 'w', newline='', encoding='utf-8') as f:
|
|
59
|
+
writer = csv.writer(f)
|
|
60
|
+
|
|
61
|
+
# Write header row
|
|
62
|
+
writer.writerow(["Metric Name", "Value"])
|
|
63
|
+
|
|
64
|
+
# Write metrics data
|
|
65
|
+
writer.writerow(["AI Generated Lines", metrics.get("ai_total_lines", 0)])
|
|
66
|
+
writer.writerow(["Adopted Lines", metrics.get("adopted_lines", 0)])
|
|
67
|
+
writer.writerow(["Code Adoption Rate (%)", metrics.get("adoption_rate", 0.0)])
|
|
68
|
+
writer.writerow(["Code Generation Rate (%)", metrics.get("generation_rate", 0.0)])
|
|
69
|
+
|
|
70
|
+
if "file_count" in metrics:
|
|
71
|
+
writer.writerow(["Files Involved", metrics.get("file_count", 0)])
|
|
72
|
+
|
|
73
|
+
if "session_count" in metrics:
|
|
74
|
+
writer.writerow(["Sessions", metrics.get("session_count", 0)])
|
|
75
|
+
|
|
76
|
+
# If there are diff lines details, write them
|
|
77
|
+
diff_lines = metrics.get("diff_lines", [])
|
|
78
|
+
if diff_lines:
|
|
79
|
+
writer.writerow([]) # Empty row
|
|
80
|
+
writer.writerow(["Diff Lines Details"])
|
|
81
|
+
writer.writerow(["Diff Type", "Line Number", "Code Content"])
|
|
82
|
+
for diff_line in diff_lines:
|
|
83
|
+
writer.writerow([
|
|
84
|
+
diff_line.get("diff_type", ""),
|
|
85
|
+
diff_line.get("line_number", ""),
|
|
86
|
+
diff_line.get("line_content", "")
|
|
87
|
+
])
|
|
88
|
+
|
|
89
|
+
logger.info(f"Data exported to CSV: {output_path}")
|
|
90
|
+
return True
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.error(f"Failed to export to CSV: {e}")
|
|
93
|
+
return False
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def export_metrics(metrics: Dict[str, Any], output_path: str, format: str = "json") -> bool:
|
|
97
|
+
"""
|
|
98
|
+
Export metrics data
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
metrics: Metrics data
|
|
102
|
+
output_path: Output file path
|
|
103
|
+
format: Export format (json or csv)
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Whether successful
|
|
107
|
+
"""
|
|
108
|
+
if format.lower() == "csv":
|
|
109
|
+
return export_to_csv(metrics, output_path)
|
|
110
|
+
else:
|
|
111
|
+
return export_to_json(metrics, output_path)
|