chatgpt-to-markdown 1.5.5 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -0
- package/index.js +73 -56
- package/package.json +11 -8
- package/thinktime.js +281 -0
- package/index.test.js +0 -380
package/README.md
CHANGED
@@ -18,6 +18,26 @@ npx chatgpt-to-markdown path/to/your/conversations.json
|
|
18
18
|
|
19
19
|
This will generate one Markdown file for each chat same directory as the conversations JSON file. The file name will be the chat title, with invalid filename characters replaced by spaces.
|
20
20
|
|
21
|
+
## Thinking Time Analysis
|
22
|
+
|
23
|
+
Analyze thinking time statistics from your ChatGPT conversations:
|
24
|
+
|
25
|
+
```bash
|
26
|
+
npx -p chatgpt-to-markdown thinktime path/to/your/conversations.json
|
27
|
+
```
|
28
|
+
|
29
|
+
This will analyze all conversations and show statistics about thinking/reasoning time, including:
|
30
|
+
|
31
|
+
- Total conversations with thinking
|
32
|
+
- Total thinking time and blocks
|
33
|
+
- Distribution of thinking times
|
34
|
+
- Top conversations by thinking time
|
35
|
+
- Top individual thinking blocks
|
36
|
+
|
37
|
+
Optional flag:
|
38
|
+
|
39
|
+
- `--save-details`: Save detailed results to `thinking_analysis_results_node.json`
|
40
|
+
|
21
41
|
## Example
|
22
42
|
|
23
43
|
Here's an example of the Markdown output for a chat with the title `Medium-Style Table CSS`:
|
@@ -97,6 +117,8 @@ git push --follow-tags
|
|
97
117
|
|
98
118
|
## Release notes
|
99
119
|
|
120
|
+
- 1.7.0: 29 Jun 2025. Add thinktime analysis tool as npx executable. Analyze thinking/reasoning time statistics from ChatGPT conversations.
|
121
|
+
- 1.6.0: 18 Jul 2025. Handle `thoughts`, `reasoning_recap`, `sonic_webpage`. Include projects
|
100
122
|
- 1.5.5: 02 Nov 2024. Add conversation link. Use conversation ID as fallback title if title is empty.
|
101
123
|
- 1.5.4: 02 Nov 2024. Skip `user_editable_context` to avoid polluting Markdown with custom instructions
|
102
124
|
- 1.5.3: 05 Aug 2024. Show text from multimodal prompts
|
package/index.js
CHANGED
@@ -38,6 +38,74 @@ function indent(str) {
|
|
38
38
|
.join("");
|
39
39
|
}
|
40
40
|
|
41
|
+
function blockquote(str) {
|
42
|
+
return str
|
43
|
+
.split("\n")
|
44
|
+
.map((v) => (v ? `> ${v}` : ">"))
|
45
|
+
.join("\n");
|
46
|
+
}
|
47
|
+
|
48
|
+
function nodeToMarkdown(node) {
|
49
|
+
try {
|
50
|
+
const content = node.message?.content;
|
51
|
+
if (!content) return "";
|
52
|
+
let body;
|
53
|
+
switch (content.content_type) {
|
54
|
+
case "text":
|
55
|
+
body = content.parts.join("\n");
|
56
|
+
break;
|
57
|
+
case "code":
|
58
|
+
body = "```" + content.language.replace("unknown", "") + "\n" + content.text + "\n```";
|
59
|
+
break;
|
60
|
+
case "execution_output":
|
61
|
+
body = "```\n" + content.text + "\n```";
|
62
|
+
break;
|
63
|
+
case "multimodal_text":
|
64
|
+
body = content.parts
|
65
|
+
.map((part) =>
|
66
|
+
typeof part == "string"
|
67
|
+
? `${part}\n\n`
|
68
|
+
: part.content_type === "image_asset_pointer"
|
69
|
+
? `Image (${part.width}x${part.height}): ${part?.metadata?.dalle?.prompt ?? ""}\n\n`
|
70
|
+
: `${part.content_type}\n\n`,
|
71
|
+
)
|
72
|
+
.join("");
|
73
|
+
break;
|
74
|
+
case "tether_browsing_display":
|
75
|
+
body = "```\n" + (content.summary ? `${content.summary}\n` : "") + content.result + "\n```";
|
76
|
+
break;
|
77
|
+
case "tether_quote":
|
78
|
+
body = blockquote(`${content.title} (${content.url})\n\n${content.text}`);
|
79
|
+
break;
|
80
|
+
case "system_error":
|
81
|
+
body = `${content.name}\n\n${content.text}\n\n`;
|
82
|
+
break;
|
83
|
+
case "user_editable_context":
|
84
|
+
body = "";
|
85
|
+
break;
|
86
|
+
case "thoughts":
|
87
|
+
body = content.thoughts.map((t) => `##### ${t.summary}\n\n${t.content}\n`).join("\n");
|
88
|
+
break;
|
89
|
+
case "reasoning_recap":
|
90
|
+
body = blockquote(content.content);
|
91
|
+
break;
|
92
|
+
case "sonic_webpage":
|
93
|
+
body = "```\n" + `${content.title} (${content.url})\n\n${content.text}` + "\n```";
|
94
|
+
break;
|
95
|
+
default:
|
96
|
+
body = String(content);
|
97
|
+
}
|
98
|
+
if (!body.trim()) return "";
|
99
|
+
const author = node.message.author;
|
100
|
+
if (author.role == "user") body = indent(body);
|
101
|
+
if (author.role == "tool" && !body.startsWith("```") && !body.endsWith("```")) body = indent(body);
|
102
|
+
return `## ${author.role}${author.name ? ` (${author.name})` : ""}\n\n${body}\n\n`;
|
103
|
+
} catch (err) {
|
104
|
+
err.message += `\nNode: ${JSON.stringify(node)}`;
|
105
|
+
throw err;
|
106
|
+
}
|
107
|
+
}
|
108
|
+
|
41
109
|
const dateFormat = new Intl.DateTimeFormat("en-US", {
|
42
110
|
day: "numeric",
|
43
111
|
month: "short",
|
@@ -73,65 +141,14 @@ async function chatgptToMarkdown(json, sourceDir, { dateFormat } = { dateFormat:
|
|
73
141
|
const fileName = `${sanitizedTitle}.md`;
|
74
142
|
const filePath = path.join(sourceDir, fileName);
|
75
143
|
const title = `# ${wrapHtmlTagsInBackticks(conversation.title)}\n`;
|
76
|
-
const
|
144
|
+
const lines = [
|
77
145
|
`- Created: ${dateFormat(new Date(conversation.create_time * 1000))}\n`,
|
78
146
|
`- Updated: ${dateFormat(new Date(conversation.update_time * 1000))}\n`,
|
79
147
|
`- Link: https://chatgpt.com/c/${conversation.conversation_id}\n`,
|
80
|
-
]
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
if (!content) return "";
|
85
|
-
// Format the body based on the content type
|
86
|
-
let body;
|
87
|
-
switch (content.content_type) {
|
88
|
-
case "text":
|
89
|
-
body = content.parts.join("\n");
|
90
|
-
break;
|
91
|
-
case "code":
|
92
|
-
body = "```" + content.language.replace("unknown", "") + "\n" + content.text + "\n```";
|
93
|
-
break;
|
94
|
-
case "execution_output":
|
95
|
-
body = "```\n" + content.text + "\n```";
|
96
|
-
break;
|
97
|
-
case "multimodal_text":
|
98
|
-
body = content.parts
|
99
|
-
.map((part) =>
|
100
|
-
typeof part == "string"
|
101
|
-
? `${part}\n\n`
|
102
|
-
: part.content_type === "image_asset_pointer"
|
103
|
-
? `Image (${part.width}x${part.height}): ${part?.metadata?.dalle?.prompt ?? ""}\n\n`
|
104
|
-
: `${part.content_type}\n\n`,
|
105
|
-
)
|
106
|
-
.join("");
|
107
|
-
break;
|
108
|
-
case "tether_browsing_display":
|
109
|
-
body = "```\n" + (content.summary ? `${content.summary}\n` : "") + content.result + "\n```";
|
110
|
-
break;
|
111
|
-
case "tether_quote":
|
112
|
-
body = "```\n" + `${content.title} (${content.url})\n\n${content.text}` + "\n```";
|
113
|
-
break;
|
114
|
-
case "system_error":
|
115
|
-
body = `${content.name}\n\n${content.text}\n\n`;
|
116
|
-
break;
|
117
|
-
case "user_editable_context":
|
118
|
-
// We don't want to pollute all Markdown with custom instuctions
|
119
|
-
// in content.user_instructions. So skip it
|
120
|
-
body = "";
|
121
|
-
break;
|
122
|
-
default:
|
123
|
-
body = content;
|
124
|
-
break;
|
125
|
-
}
|
126
|
-
// Ignore empty content
|
127
|
-
if (!body.trim()) return "";
|
128
|
-
// Indent user / tool messages. The sometimes contain code and whitespaces are relevant
|
129
|
-
const author = node.message.author;
|
130
|
-
if (author.role == "user") body = indent(body);
|
131
|
-
if (author.role == "tool" && !body.startsWith("```") && !body.endsWith("```")) body = indent(body);
|
132
|
-
return `## ${author.role}${author.name ? ` (${author.name})` : ""}\n\n${body}\n\n`;
|
133
|
-
})
|
134
|
-
.join("");
|
148
|
+
];
|
149
|
+
if (conversation.gizmo_id) lines.push(`- Project: https://chatgpt.com/g/${conversation.gizmo_id}/project\n`);
|
150
|
+
const metadata = lines.join("");
|
151
|
+
const messages = Object.values(conversation.mapping).map(nodeToMarkdown).join("");
|
135
152
|
const markdownContent = `${title}\n${metadata}\n${messages}`;
|
136
153
|
await fs.writeFile(filePath, markdownContent, "utf8");
|
137
154
|
await fs.utimes(filePath, conversation.update_time, conversation.create_time);
|
package/package.json
CHANGED
@@ -1,15 +1,22 @@
|
|
1
1
|
{
|
2
2
|
"name": "chatgpt-to-markdown",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.7.0",
|
4
4
|
"description": "Convert ChatGPT exported conversations.json to Markdown",
|
5
5
|
"main": "index.js",
|
6
6
|
"type": "module",
|
7
|
+
"files": [
|
8
|
+
"cli.js",
|
9
|
+
"index.js",
|
10
|
+
"thinktime.js",
|
11
|
+
"README.md"
|
12
|
+
],
|
7
13
|
"bin": {
|
8
|
-
"chatgpt-to-markdown": "cli.js"
|
14
|
+
"chatgpt-to-markdown": "cli.js",
|
15
|
+
"thinktime": "thinktime.js"
|
9
16
|
},
|
10
17
|
"scripts": {
|
11
|
-
"prepublishOnly": "npx prettier --write *.js *.md package.json && npm test",
|
12
|
-
"test": "
|
18
|
+
"prepublishOnly": "npx -y prettier@3.5 --write *.js *.md package.json && npm test",
|
19
|
+
"test": "npx -y vitest@3 run --globals"
|
13
20
|
},
|
14
21
|
"keywords": [
|
15
22
|
"chatgpt",
|
@@ -20,9 +27,5 @@
|
|
20
27
|
"license": "MIT",
|
21
28
|
"prettier": {
|
22
29
|
"printWidth": 120
|
23
|
-
},
|
24
|
-
"devDependencies": {
|
25
|
-
"jest": "^29.7.0",
|
26
|
-
"prettier": "^3.2.5"
|
27
30
|
}
|
28
31
|
}
|
package/thinktime.js
ADDED
@@ -0,0 +1,281 @@
|
|
1
|
+
#!/usr/bin/env node
|
2
|
+
/**
|
3
|
+
* Thinking Time Analysis for ChatGPT Conversations (Node.js version)
|
4
|
+
*
|
5
|
+
* This program analyzes a conversations.json file to calculate thinking time for each conversation.
|
6
|
+
*
|
7
|
+
* APPROACH:
|
8
|
+
* 1. The JSON contains conversations with a "mapping" field containing message nodes
|
9
|
+
* 2. Thinking is identified by metadata field "reasoning_status" with values:
|
10
|
+
* - "is_reasoning": indicates the model is currently thinking/reasoning
|
11
|
+
* - "reasoning_ended": indicates thinking has completed
|
12
|
+
* 3. Time measurement uses "finished_duration_sec" field in metadata, which represents
|
13
|
+
* actual generation time (not wall time including user wait time)
|
14
|
+
* 4. Each conversation is analyzed to find thinking blocks and sum their durations
|
15
|
+
*
|
16
|
+
* KEY FINDINGS FROM DATA ANALYSIS:
|
17
|
+
* - reasoning_status="is_reasoning" marks start of thinking
|
18
|
+
* - reasoning_status="reasoning_ended" marks end with "finished_duration_sec"
|
19
|
+
* - finished_duration_sec contains the actual model generation time in seconds
|
20
|
+
* - Not all conversations have thinking - many are regular chat without reasoning
|
21
|
+
*/
|
22
|
+
|
23
|
+
import { promises as fs } from "fs";
|
24
|
+
|
25
|
+
function findPrecedingUserMessage(mapping, targetNodeId) {
|
26
|
+
/**
|
27
|
+
* Find the user message that precedes a thinking block.
|
28
|
+
*/
|
29
|
+
const targetNode = mapping[targetNodeId] || {};
|
30
|
+
const parentId = targetNode.parent;
|
31
|
+
|
32
|
+
// Traverse up the conversation tree to find the last user message
|
33
|
+
const visited = new Set();
|
34
|
+
let currentId = parentId;
|
35
|
+
|
36
|
+
while (currentId && !visited.has(currentId)) {
|
37
|
+
visited.add(currentId);
|
38
|
+
const currentNode = mapping[currentId] || {};
|
39
|
+
const message = currentNode.message || {};
|
40
|
+
|
41
|
+
if (message) {
|
42
|
+
const author = message.author || {};
|
43
|
+
if (author.role === "user") {
|
44
|
+
// Found user message, extract content
|
45
|
+
const content = message.content || {};
|
46
|
+
const parts = content.parts || [];
|
47
|
+
if (parts.length > 0 && typeof parts[0] === "string") {
|
48
|
+
const text = parts[0].replace(/\s+/g, " ").trim();
|
49
|
+
return text.length > 200 ? text.substring(0, 200) + "..." : text;
|
50
|
+
}
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
// Move to parent
|
55
|
+
currentId = currentNode.parent;
|
56
|
+
}
|
57
|
+
|
58
|
+
return "No preceding user message found";
|
59
|
+
}
|
60
|
+
|
61
|
+
function analyzeConversationThinking(conversation) {
|
62
|
+
/**
|
63
|
+
* Analyze a single conversation for thinking time.
|
64
|
+
*
|
65
|
+
* Returns:
|
66
|
+
* - totalThinkingTime: Total seconds spent thinking
|
67
|
+
* - thinkingBlocks: Number of separate thinking sessions
|
68
|
+
* - thinkingDetails: Array of thinking block details with preceding user messages
|
69
|
+
*/
|
70
|
+
const mapping = conversation.mapping || {};
|
71
|
+
let totalThinkingTime = 0.0;
|
72
|
+
let thinkingBlocks = 0;
|
73
|
+
const thinkingDetails = [];
|
74
|
+
|
75
|
+
for (const [nodeId, node] of Object.entries(mapping)) {
|
76
|
+
const message = node.message || {};
|
77
|
+
if (!message) continue;
|
78
|
+
|
79
|
+
const metadata = message.metadata || {};
|
80
|
+
const reasoningStatus = metadata.reasoning_status;
|
81
|
+
|
82
|
+
if (reasoningStatus === "reasoning_ended") {
|
83
|
+
// This node marks the end of a thinking block
|
84
|
+
const duration = metadata.finished_duration_sec || 0;
|
85
|
+
if (duration && duration > 0) {
|
86
|
+
totalThinkingTime += duration;
|
87
|
+
thinkingBlocks++;
|
88
|
+
|
89
|
+
// Find the user message that preceded this thinking block
|
90
|
+
const precedingMessage = findPrecedingUserMessage(mapping, nodeId);
|
91
|
+
|
92
|
+
thinkingDetails.push({
|
93
|
+
nodeId: nodeId,
|
94
|
+
durationSec: duration,
|
95
|
+
createTime: message.create_time,
|
96
|
+
precedingUserMessage: precedingMessage,
|
97
|
+
conversationId: conversation.conversation_id || "unknown",
|
98
|
+
conversationTitle: conversation.title || "Untitled",
|
99
|
+
});
|
100
|
+
}
|
101
|
+
}
|
102
|
+
}
|
103
|
+
|
104
|
+
return { totalThinkingTime, thinkingBlocks, thinkingDetails };
|
105
|
+
}
|
106
|
+
|
107
|
+
async function analyzeAllConversations(filename) {
|
108
|
+
/**
|
109
|
+
* Analyze all conversations in the JSON file.
|
110
|
+
*
|
111
|
+
* Returns object with analysis results.
|
112
|
+
*/
|
113
|
+
console.log(`Loading ${filename}...`);
|
114
|
+
const data = JSON.parse(await fs.readFile(filename, "utf8"));
|
115
|
+
|
116
|
+
console.log(`Found ${data.length} conversations`);
|
117
|
+
|
118
|
+
const results = {
|
119
|
+
totalConversations: data.length,
|
120
|
+
conversationsWithThinking: 0,
|
121
|
+
totalThinkingTime: 0.0,
|
122
|
+
totalThinkingBlocks: 0,
|
123
|
+
conversationDetails: [],
|
124
|
+
thinkingDistribution: {
|
125
|
+
"0-5 seconds": 0,
|
126
|
+
"5-15 seconds": 0,
|
127
|
+
"15-30 seconds": 0,
|
128
|
+
"30-60 seconds": 0,
|
129
|
+
"60+ seconds": 0,
|
130
|
+
},
|
131
|
+
allThinkingBlocks: [], // New: collect all individual thinking blocks
|
132
|
+
};
|
133
|
+
|
134
|
+
for (let i = 0; i < data.length; i++) {
|
135
|
+
if (i % 500 === 0) {
|
136
|
+
console.log(`Processing conversation ${i}...`);
|
137
|
+
}
|
138
|
+
|
139
|
+
const conversation = data[i];
|
140
|
+
const convId = conversation.conversation_id || `unknown_${i}`;
|
141
|
+
const title = conversation.title || "Untitled";
|
142
|
+
|
143
|
+
const { totalThinkingTime, thinkingBlocks, thinkingDetails } = analyzeConversationThinking(conversation);
|
144
|
+
|
145
|
+
if (totalThinkingTime > 0) {
|
146
|
+
results.conversationsWithThinking++;
|
147
|
+
results.totalThinkingTime += totalThinkingTime;
|
148
|
+
results.totalThinkingBlocks += thinkingBlocks;
|
149
|
+
|
150
|
+
// Add all individual thinking blocks to global list
|
151
|
+
results.allThinkingBlocks.push(...thinkingDetails);
|
152
|
+
|
153
|
+
// Categorize thinking time for distribution analysis
|
154
|
+
if (totalThinkingTime < 5) {
|
155
|
+
results.thinkingDistribution["0-5 seconds"]++;
|
156
|
+
} else if (totalThinkingTime < 15) {
|
157
|
+
results.thinkingDistribution["5-15 seconds"]++;
|
158
|
+
} else if (totalThinkingTime < 30) {
|
159
|
+
results.thinkingDistribution["15-30 seconds"]++;
|
160
|
+
} else if (totalThinkingTime < 60) {
|
161
|
+
results.thinkingDistribution["30-60 seconds"]++;
|
162
|
+
} else {
|
163
|
+
results.thinkingDistribution["60+ seconds"]++;
|
164
|
+
}
|
165
|
+
|
166
|
+
results.conversationDetails.push({
|
167
|
+
conversationId: convId,
|
168
|
+
title: title,
|
169
|
+
thinkingTimeSec: totalThinkingTime,
|
170
|
+
thinkingBlocks: thinkingBlocks,
|
171
|
+
thinkingDetails: thinkingDetails,
|
172
|
+
});
|
173
|
+
}
|
174
|
+
}
|
175
|
+
|
176
|
+
return results;
|
177
|
+
}
|
178
|
+
|
179
|
+
function printResults(results) {
|
180
|
+
/**
|
181
|
+
* Print analysis results in a readable format.
|
182
|
+
*/
|
183
|
+
console.log("\n" + "=".repeat(60));
|
184
|
+
console.log("THINKING TIME ANALYSIS RESULTS");
|
185
|
+
console.log("=".repeat(60));
|
186
|
+
|
187
|
+
const totalConvs = results.totalConversations;
|
188
|
+
const thinkingConvs = results.conversationsWithThinking;
|
189
|
+
|
190
|
+
console.log(`Total conversations: ${totalConvs.toLocaleString()}`);
|
191
|
+
console.log(
|
192
|
+
`Conversations with thinking: ${thinkingConvs.toLocaleString()} (${((thinkingConvs / totalConvs) * 100).toFixed(1)}%)`,
|
193
|
+
);
|
194
|
+
console.log(`Conversations without thinking: ${(totalConvs - thinkingConvs).toLocaleString()}`);
|
195
|
+
|
196
|
+
if (thinkingConvs > 0) {
|
197
|
+
console.log(`\nTHINKING TIME STATISTICS:`);
|
198
|
+
console.log(
|
199
|
+
`Total thinking time: ${results.totalThinkingTime.toFixed(1)} seconds (${(results.totalThinkingTime / 60).toFixed(1)} minutes)`,
|
200
|
+
);
|
201
|
+
console.log(`Total thinking blocks: ${results.totalThinkingBlocks.toLocaleString()}`);
|
202
|
+
console.log(
|
203
|
+
`Average thinking time per conversation: ${(results.totalThinkingTime / thinkingConvs).toFixed(1)} seconds`,
|
204
|
+
);
|
205
|
+
console.log(
|
206
|
+
`Average thinking blocks per conversation: ${(results.totalThinkingBlocks / thinkingConvs).toFixed(1)}`,
|
207
|
+
);
|
208
|
+
|
209
|
+
console.log(`\nTHINKING TIME DISTRIBUTION:`);
|
210
|
+
for (const [category, count] of Object.entries(results.thinkingDistribution)) {
|
211
|
+
const percentage = ((count / thinkingConvs) * 100).toFixed(1);
|
212
|
+
console.log(` ${category}: ${count.toLocaleString()} conversations (${percentage}%)`);
|
213
|
+
}
|
214
|
+
|
215
|
+
console.log(`\nTOP 10 CONVERSATIONS BY THINKING TIME:`);
|
216
|
+
const sortedConvs = results.conversationDetails.sort((a, b) => b.thinkingTimeSec - a.thinkingTimeSec).slice(0, 10);
|
217
|
+
|
218
|
+
sortedConvs.forEach((conv, i) => {
|
219
|
+
const title = conv.title.length > 50 ? conv.title.substring(0, 50) + "..." : conv.title;
|
220
|
+
console.log(
|
221
|
+
` ${(i + 1).toString().padStart(2)}. ${conv.thinkingTimeSec.toFixed(1).padStart(6)}s (${conv.thinkingBlocks} blocks) - ${title}`,
|
222
|
+
);
|
223
|
+
});
|
224
|
+
|
225
|
+
console.log(`\nTOP 10 INDIVIDUAL THINKING BLOCKS:`);
|
226
|
+
const sortedBlocks = results.allThinkingBlocks.sort((a, b) => b.durationSec - a.durationSec).slice(0, 10);
|
227
|
+
|
228
|
+
sortedBlocks.forEach((block, i) => {
|
229
|
+
const title =
|
230
|
+
block.conversationTitle.length > 30
|
231
|
+
? block.conversationTitle.substring(0, 30) + "..."
|
232
|
+
: block.conversationTitle;
|
233
|
+
const userMsg =
|
234
|
+
block.precedingUserMessage.length > 80
|
235
|
+
? block.precedingUserMessage.substring(0, 80) + "..."
|
236
|
+
: block.precedingUserMessage;
|
237
|
+
console.log(` ${(i + 1).toString().padStart(2)}. ${block.durationSec.toFixed(1).padStart(6)}s - ${title}`);
|
238
|
+
console.log(` User: ${userMsg}`);
|
239
|
+
console.log();
|
240
|
+
});
|
241
|
+
}
|
242
|
+
}
|
243
|
+
|
244
|
+
async function main() {
|
245
|
+
const filename = process.argv[2] || "conversations.json";
|
246
|
+
|
247
|
+
try {
|
248
|
+
const startTime = process.hrtime.bigint();
|
249
|
+
const results = await analyzeAllConversations(filename);
|
250
|
+
const endTime = process.hrtime.bigint();
|
251
|
+
|
252
|
+
printResults(results);
|
253
|
+
|
254
|
+
// Print execution time
|
255
|
+
const executionTimeMs = Number(endTime - startTime) / 1000000;
|
256
|
+
console.log(`\nExecution time: ${(executionTimeMs / 1000).toFixed(2)} seconds`);
|
257
|
+
|
258
|
+
// Optionally save detailed results to JSON
|
259
|
+
if (process.argv[3] === "--save-details") {
|
260
|
+
const outputFile = "thinking_analysis_results_node.json";
|
261
|
+
await fs.writeFile(outputFile, JSON.stringify(results, null, 2));
|
262
|
+
console.log(`\nDetailed results saved to ${outputFile}`);
|
263
|
+
}
|
264
|
+
} catch (error) {
|
265
|
+
if (error.code === "ENOENT") {
|
266
|
+
console.error(`Error: File '${filename}' not found`);
|
267
|
+
console.error("Usage: node thinking_time.js [conversations.json] [--save-details]");
|
268
|
+
process.exit(1);
|
269
|
+
} else if (error instanceof SyntaxError) {
|
270
|
+
console.error(`Error: Invalid JSON in file '${filename}': ${error.message}`);
|
271
|
+
process.exit(1);
|
272
|
+
} else {
|
273
|
+
console.error(`Error: ${error.message}`);
|
274
|
+
process.exit(1);
|
275
|
+
}
|
276
|
+
}
|
277
|
+
}
|
278
|
+
|
279
|
+
if (import.meta.url === `file://${process.argv[1]}`) {
|
280
|
+
main();
|
281
|
+
}
|
package/index.test.js
DELETED
@@ -1,380 +0,0 @@
|
|
1
|
-
// index.test.js
|
2
|
-
|
3
|
-
import { promises as fs } from "fs";
|
4
|
-
import path from "path";
|
5
|
-
import os from "os";
|
6
|
-
import { default as chatgptToMarkdown, formatDate } from "./index";
|
7
|
-
|
8
|
-
describe("chatgptToMarkdown", () => {
|
9
|
-
let tempDir;
|
10
|
-
|
11
|
-
beforeEach(async () => {
|
12
|
-
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "chatgptToMarkdown-"));
|
13
|
-
});
|
14
|
-
|
15
|
-
afterEach(async () => {
|
16
|
-
await fs.rm(tempDir, { recursive: true, force: true });
|
17
|
-
});
|
18
|
-
|
19
|
-
it("should write a markdown file for each conversation", async () => {
|
20
|
-
const json = [
|
21
|
-
{
|
22
|
-
title: "Test Conversation",
|
23
|
-
create_time: 1630454400,
|
24
|
-
update_time: 1630458000,
|
25
|
-
conversation_id: "abc123",
|
26
|
-
mapping: {
|
27
|
-
0: {
|
28
|
-
message: {
|
29
|
-
author: { role: "user", name: "John" },
|
30
|
-
content: { content_type: "text", parts: ["Hello"] },
|
31
|
-
},
|
32
|
-
},
|
33
|
-
},
|
34
|
-
},
|
35
|
-
];
|
36
|
-
|
37
|
-
await chatgptToMarkdown(json, tempDir);
|
38
|
-
|
39
|
-
const filePath = path.join(tempDir, "Test Conversation.md");
|
40
|
-
const fileContent = await fs.readFile(filePath, "utf8");
|
41
|
-
|
42
|
-
expect(fileContent).toBe(`# Test Conversation
|
43
|
-
|
44
|
-
- Created: ${formatDate(1630454400 * 1000)}
|
45
|
-
- Updated: ${formatDate(1630458000 * 1000)}
|
46
|
-
- Link: https://chatgpt.com/c/abc123
|
47
|
-
|
48
|
-
## user (John)
|
49
|
-
|
50
|
-
Hello
|
51
|
-
|
52
|
-
|
53
|
-
`);
|
54
|
-
});
|
55
|
-
|
56
|
-
it("should handle titles with HTML tags", async () => {
|
57
|
-
const json = [
|
58
|
-
{
|
59
|
-
title: "<h1>Test Conversation</h1>",
|
60
|
-
create_time: 1630454400,
|
61
|
-
update_time: 1630458000,
|
62
|
-
mapping: {},
|
63
|
-
},
|
64
|
-
];
|
65
|
-
await chatgptToMarkdown(json, tempDir);
|
66
|
-
const fileContent = await fs.readFile(path.join(tempDir, "h1 Test Conversation h1.md"), "utf8");
|
67
|
-
expect(fileContent).toContain("# `<h1>`Test Conversation`</h1>`\n");
|
68
|
-
});
|
69
|
-
|
70
|
-
it("should sanitize titles with invalid filename characters", async () => {
|
71
|
-
const json = [
|
72
|
-
{
|
73
|
-
title: ":/In\\<>*valid|?",
|
74
|
-
create_time: 1630454400,
|
75
|
-
update_time: 1630458000,
|
76
|
-
mapping: {},
|
77
|
-
},
|
78
|
-
];
|
79
|
-
await chatgptToMarkdown(json, tempDir);
|
80
|
-
// Check that the file exists with the sanitized title
|
81
|
-
await expect(fs.access(path.join(tempDir, "In valid.md"))).resolves.not.toThrow();
|
82
|
-
});
|
83
|
-
|
84
|
-
it("should handle custom date format functions", async () => {
|
85
|
-
const json = [
|
86
|
-
{
|
87
|
-
title: "Test Conversation",
|
88
|
-
create_time: 1630454400,
|
89
|
-
update_time: 1630458000,
|
90
|
-
mapping: {},
|
91
|
-
},
|
92
|
-
];
|
93
|
-
const customDateFormat = (date) => date.toISOString();
|
94
|
-
await chatgptToMarkdown(json, tempDir, { dateFormat: customDateFormat });
|
95
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
96
|
-
expect(fileContent).toContain("- Created: 2021-09-01T00:00:00.000Z\n");
|
97
|
-
});
|
98
|
-
|
99
|
-
it("should ignore messages with no content", async () => {
|
100
|
-
const json = [
|
101
|
-
{
|
102
|
-
title: "Test Conversation",
|
103
|
-
create_time: 1630454400,
|
104
|
-
update_time: 1630458000,
|
105
|
-
mapping: {
|
106
|
-
0: {
|
107
|
-
message: {
|
108
|
-
/* no content property */
|
109
|
-
},
|
110
|
-
},
|
111
|
-
},
|
112
|
-
},
|
113
|
-
];
|
114
|
-
await chatgptToMarkdown(json, tempDir);
|
115
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
116
|
-
expect(fileContent).not.toContain("## user (John)");
|
117
|
-
});
|
118
|
-
|
119
|
-
it("should ignore messages with empty content", async () => {
|
120
|
-
const json = [
|
121
|
-
{
|
122
|
-
title: "Test Conversation",
|
123
|
-
create_time: 1630454400,
|
124
|
-
update_time: 1630458000,
|
125
|
-
mapping: {
|
126
|
-
0: { message: { content: { content_type: "text", parts: [] } } },
|
127
|
-
},
|
128
|
-
},
|
129
|
-
];
|
130
|
-
await chatgptToMarkdown(json, tempDir);
|
131
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
132
|
-
expect(fileContent).not.toContain("## user (John)");
|
133
|
-
});
|
134
|
-
|
135
|
-
it("should handle tether_browsing_display content", async () => {
|
136
|
-
const json = [
|
137
|
-
{
|
138
|
-
title: "Test Conversation",
|
139
|
-
create_time: 1630454400,
|
140
|
-
update_time: 1630458000,
|
141
|
-
mapping: {
|
142
|
-
0: {
|
143
|
-
message: {
|
144
|
-
author: { role: "tool", name: "browser" },
|
145
|
-
content: { content_type: "tether_browsing_display", result: "L0: x" },
|
146
|
-
},
|
147
|
-
},
|
148
|
-
},
|
149
|
-
},
|
150
|
-
];
|
151
|
-
await chatgptToMarkdown(json, tempDir);
|
152
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
153
|
-
expect(fileContent).toContain("```\nL0: x\n```");
|
154
|
-
});
|
155
|
-
|
156
|
-
it("should handle tether_quote content", async () => {
|
157
|
-
const json = [
|
158
|
-
{
|
159
|
-
title: "Test Conversation",
|
160
|
-
create_time: 1630454400,
|
161
|
-
update_time: 1630458000,
|
162
|
-
mapping: {
|
163
|
-
0: {
|
164
|
-
message: {
|
165
|
-
author: { role: "tool", name: "browser" },
|
166
|
-
content: { content_type: "tether_quote", url: "x.com", domain: "x.com", title: "T", text: "X" },
|
167
|
-
},
|
168
|
-
},
|
169
|
-
},
|
170
|
-
},
|
171
|
-
];
|
172
|
-
await chatgptToMarkdown(json, tempDir);
|
173
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
174
|
-
expect(fileContent).toContain("```\nT (x.com)\n\nX\n```");
|
175
|
-
});
|
176
|
-
|
177
|
-
it("should handle multimodal_text content", async () => {
|
178
|
-
const json = [
|
179
|
-
{
|
180
|
-
title: "Test Conversation",
|
181
|
-
create_time: 1630454400,
|
182
|
-
update_time: 1630458000,
|
183
|
-
mapping: {
|
184
|
-
0: {
|
185
|
-
message: {
|
186
|
-
author: { role: "tool", name: "dalle.text2im" },
|
187
|
-
content: {
|
188
|
-
content_type: "multimodal_text",
|
189
|
-
parts: [
|
190
|
-
{
|
191
|
-
content_type: "image_asset_pointer",
|
192
|
-
width: 1024,
|
193
|
-
height: 1024,
|
194
|
-
metadata: { dalle: { prompt: "Photo" } },
|
195
|
-
},
|
196
|
-
{ content_type: "image_asset_pointer", width: 1024, height: 1024 },
|
197
|
-
{ content_type: "some_other_type" },
|
198
|
-
],
|
199
|
-
},
|
200
|
-
},
|
201
|
-
},
|
202
|
-
},
|
203
|
-
},
|
204
|
-
];
|
205
|
-
await chatgptToMarkdown(json, tempDir);
|
206
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
207
|
-
expect(fileContent).toContain("Image (1024x1024): Photo\n");
|
208
|
-
expect(fileContent).toContain("some_other_type\n");
|
209
|
-
});
|
210
|
-
|
211
|
-
it("should indent messages with tool role that contain ``` fenced code blocks", async () => {
|
212
|
-
const json = [
|
213
|
-
{
|
214
|
-
title: "Test Conversation",
|
215
|
-
create_time: 1630454400,
|
216
|
-
update_time: 1630458000,
|
217
|
-
mapping: {
|
218
|
-
0: {
|
219
|
-
message: {
|
220
|
-
author: { role: "tool" },
|
221
|
-
content: { content_type: "code", language: "javascript", text: 'console.log("Hello, world!");' },
|
222
|
-
},
|
223
|
-
},
|
224
|
-
},
|
225
|
-
},
|
226
|
-
];
|
227
|
-
await chatgptToMarkdown(json, tempDir);
|
228
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
229
|
-
expect(fileContent).toContain('```javascript\nconsole.log("Hello, world!");\n```\n');
|
230
|
-
});
|
231
|
-
|
232
|
-
it("should skip user_editable_context content", async () => {
|
233
|
-
const json = [
|
234
|
-
{
|
235
|
-
title: "Test Conversation",
|
236
|
-
create_time: 1630454400,
|
237
|
-
update_time: 1630458000,
|
238
|
-
mapping: {
|
239
|
-
0: {
|
240
|
-
message: {
|
241
|
-
author: { role: "user" },
|
242
|
-
content: {
|
243
|
-
content_type: "user_editable_context",
|
244
|
-
user_profile: "test profile",
|
245
|
-
user_instructions: "test instructions",
|
246
|
-
},
|
247
|
-
},
|
248
|
-
},
|
249
|
-
},
|
250
|
-
},
|
251
|
-
];
|
252
|
-
await chatgptToMarkdown(json, tempDir);
|
253
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
254
|
-
expect(fileContent).not.toContain("test profile");
|
255
|
-
expect(fileContent).not.toContain("test instructions");
|
256
|
-
});
|
257
|
-
|
258
|
-
it("should handle system_error content", async () => {
|
259
|
-
const json = [
|
260
|
-
{
|
261
|
-
title: "Test Conversation",
|
262
|
-
create_time: 1630454400,
|
263
|
-
update_time: 1630458000,
|
264
|
-
mapping: {
|
265
|
-
0: {
|
266
|
-
message: {
|
267
|
-
author: { role: "system" },
|
268
|
-
content: {
|
269
|
-
content_type: "system_error",
|
270
|
-
name: "Error Name",
|
271
|
-
text: "Error details",
|
272
|
-
},
|
273
|
-
},
|
274
|
-
},
|
275
|
-
},
|
276
|
-
},
|
277
|
-
];
|
278
|
-
|
279
|
-
await chatgptToMarkdown(json, tempDir);
|
280
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
281
|
-
expect(fileContent).toContain("Error Name\n\nError details\n\n");
|
282
|
-
});
|
283
|
-
|
284
|
-
it("should handle execution_output content", async () => {
|
285
|
-
const json = [
|
286
|
-
{
|
287
|
-
title: "Test Conversation",
|
288
|
-
create_time: 1630454400,
|
289
|
-
update_time: 1630458000,
|
290
|
-
mapping: {
|
291
|
-
0: {
|
292
|
-
message: {
|
293
|
-
author: { role: "assistant" },
|
294
|
-
content: {
|
295
|
-
content_type: "execution_output",
|
296
|
-
text: "Program output",
|
297
|
-
},
|
298
|
-
},
|
299
|
-
},
|
300
|
-
},
|
301
|
-
},
|
302
|
-
];
|
303
|
-
|
304
|
-
await chatgptToMarkdown(json, tempDir);
|
305
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
306
|
-
expect(fileContent).toContain("```\nProgram output\n```");
|
307
|
-
});
|
308
|
-
|
309
|
-
it("should handle code content with unknown language", async () => {
|
310
|
-
const json = [
|
311
|
-
{
|
312
|
-
title: "Test Conversation",
|
313
|
-
create_time: 1630454400,
|
314
|
-
update_time: 1630458000,
|
315
|
-
mapping: {
|
316
|
-
0: {
|
317
|
-
message: {
|
318
|
-
author: { role: "assistant" },
|
319
|
-
content: {
|
320
|
-
content_type: "code",
|
321
|
-
language: "unknown",
|
322
|
-
text: "some code",
|
323
|
-
},
|
324
|
-
},
|
325
|
-
},
|
326
|
-
},
|
327
|
-
},
|
328
|
-
];
|
329
|
-
|
330
|
-
await chatgptToMarkdown(json, tempDir);
|
331
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
332
|
-
expect(fileContent).toContain("```\nsome code\n```");
|
333
|
-
});
|
334
|
-
|
335
|
-
it("should handle tether_browsing_display with summary", async () => {
|
336
|
-
const json = [
|
337
|
-
{
|
338
|
-
title: "Test Conversation",
|
339
|
-
create_time: 1630454400,
|
340
|
-
update_time: 1630458000,
|
341
|
-
mapping: {
|
342
|
-
0: {
|
343
|
-
message: {
|
344
|
-
author: { role: "tool" },
|
345
|
-
content: {
|
346
|
-
content_type: "tether_browsing_display",
|
347
|
-
summary: "Page Summary",
|
348
|
-
result: "Search Result",
|
349
|
-
},
|
350
|
-
},
|
351
|
-
},
|
352
|
-
},
|
353
|
-
},
|
354
|
-
];
|
355
|
-
|
356
|
-
await chatgptToMarkdown(json, tempDir);
|
357
|
-
const fileContent = await fs.readFile(path.join(tempDir, "Test Conversation.md"), "utf8");
|
358
|
-
expect(fileContent).toContain("```\nPage Summary\nSearch Result\n```");
|
359
|
-
});
|
360
|
-
|
361
|
-
it("should throw TypeError for invalid arguments", async () => {
|
362
|
-
await expect(chatgptToMarkdown("not an array", tempDir)).rejects.toThrow(TypeError);
|
363
|
-
await expect(chatgptToMarkdown([], 123)).rejects.toThrow(TypeError);
|
364
|
-
});
|
365
|
-
|
366
|
-
it("should use conversation_id as filename when sanitized title is empty", async () => {
|
367
|
-
const json = [
|
368
|
-
{
|
369
|
-
title: "?????", // Will be sanitized to empty string
|
370
|
-
conversation_id: "abc123",
|
371
|
-
create_time: 1630454400,
|
372
|
-
update_time: 1630458000,
|
373
|
-
mapping: {},
|
374
|
-
},
|
375
|
-
];
|
376
|
-
|
377
|
-
await chatgptToMarkdown(json, tempDir);
|
378
|
-
await expect(fs.access(path.join(tempDir, "abc123.md"))).resolves.not.toThrow();
|
379
|
-
});
|
380
|
-
});
|