@amirdaraee/namewise 0.5.2 → 0.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +9 -0
- package/dist/index.js +0 -0
- package/package.json +2 -2
- package/.github/ISSUE_TEMPLATE/bug_report.yml +0 -82
- package/.github/ISSUE_TEMPLATE/feature_request.yml +0 -61
- package/.github/workflows/auto-release.yml +0 -78
- package/.github/workflows/build.yml +0 -55
- package/.github/workflows/publish.yml +0 -134
- package/.github/workflows/test.yml +0 -47
- package/eng.traineddata +0 -0
- package/src/cli/commands.ts +0 -64
- package/src/cli/rename.ts +0 -171
- package/src/index.ts +0 -54
- package/src/parsers/excel-parser.ts +0 -66
- package/src/parsers/factory.ts +0 -38
- package/src/parsers/pdf-parser.ts +0 -99
- package/src/parsers/text-parser.ts +0 -43
- package/src/parsers/word-parser.ts +0 -50
- package/src/services/ai-factory.ts +0 -39
- package/src/services/claude-service.ts +0 -119
- package/src/services/file-renamer.ts +0 -141
- package/src/services/lmstudio-service.ts +0 -161
- package/src/services/ollama-service.ts +0 -191
- package/src/services/openai-service.ts +0 -117
- package/src/types/index.ts +0 -76
- package/src/types/pdf-extraction.d.ts +0 -7
- package/src/utils/ai-prompts.ts +0 -76
- package/src/utils/file-templates.ts +0 -275
- package/src/utils/naming-conventions.ts +0 -67
- package/src/utils/pdf-to-image.ts +0 -137
- package/tests/data/console-test-1.txt +0 -1
- package/tests/data/console-test-2.txt +0 -1
- package/tests/data/console-test-long-filename-for-display-testing.txt +0 -1
- package/tests/data/empty-file.txt +0 -0
- package/tests/data/failure.txt +0 -1
- package/tests/data/file1.txt +0 -1
- package/tests/data/file2.txt +0 -1
- package/tests/data/much-longer-filename-to-test-clearing.txt +0 -1
- package/tests/data/sample-markdown.md +0 -9
- package/tests/data/sample-pdf.pdf +0 -0
- package/tests/data/sample-text.txt +0 -25
- package/tests/data/short.txt +0 -1
- package/tests/data/single-file.txt +0 -1
- package/tests/data/success.txt +0 -1
- package/tests/data/this-is-a-very-long-filename-that-should-be-truncated-for-better-display-purposes.txt +0 -1
- package/tests/data/very-long-filename-that-should-be-cleared-properly.txt +0 -1
- package/tests/data/x.txt +0 -1
- package/tests/integration/ai-prompting.test.ts +0 -386
- package/tests/integration/end-to-end.test.ts +0 -209
- package/tests/integration/person-name-extraction.test.ts +0 -440
- package/tests/integration/workflow.test.ts +0 -336
- package/tests/mocks/mock-ai-service.ts +0 -58
- package/tests/unit/cli/commands.test.ts +0 -169
- package/tests/unit/parsers/factory.test.ts +0 -100
- package/tests/unit/parsers/pdf-parser.test.ts +0 -63
- package/tests/unit/parsers/text-parser.test.ts +0 -85
- package/tests/unit/services/ai-factory.test.ts +0 -85
- package/tests/unit/services/claude-service.test.ts +0 -188
- package/tests/unit/services/file-renamer.test.ts +0 -514
- package/tests/unit/services/lmstudio-service.test.ts +0 -326
- package/tests/unit/services/ollama-service.test.ts +0 -264
- package/tests/unit/services/openai-service.test.ts +0 -196
- package/tests/unit/utils/ai-prompts.test.ts +0 -213
- package/tests/unit/utils/file-templates.test.ts +0 -199
- package/tests/unit/utils/naming-conventions.test.ts +0 -88
- package/tests/unit/utils/pdf-to-image.test.ts +0 -127
- package/tsconfig.json +0 -20
- package/vitest.config.ts +0 -30
|
@@ -1,141 +0,0 @@
|
|
|
1
|
-
import { promises as fs } from 'fs';
|
|
2
|
-
import path from 'path';
|
|
3
|
-
import { FileInfo, Config, RenameResult, AIProvider } from '../types/index.js';
|
|
4
|
-
import { DocumentParserFactory } from '../parsers/factory.js';
|
|
5
|
-
import { categorizeFile, applyTemplate } from '../utils/file-templates.js';
|
|
6
|
-
|
|
7
|
-
export class FileRenamer {
|
|
8
|
-
constructor(
|
|
9
|
-
private parserFactory: DocumentParserFactory,
|
|
10
|
-
private aiService: AIProvider,
|
|
11
|
-
private config: Config
|
|
12
|
-
) {}
|
|
13
|
-
|
|
14
|
-
async renameFiles(files: FileInfo[]): Promise<RenameResult[]> {
|
|
15
|
-
const results: RenameResult[] = [];
|
|
16
|
-
let lastProgressLength = 0;
|
|
17
|
-
|
|
18
|
-
for (let i = 0; i < files.length; i++) {
|
|
19
|
-
const file = files[i];
|
|
20
|
-
|
|
21
|
-
// Create progress message with better formatting
|
|
22
|
-
const progressBar = `[${i + 1}/${files.length}]`;
|
|
23
|
-
const truncatedName = file.name.length > 50 ? file.name.substring(0, 47) + '...' : file.name;
|
|
24
|
-
const progressMessage = `🔄 Processing ${progressBar} ${truncatedName}`;
|
|
25
|
-
|
|
26
|
-
// Clear the previous line completely by using the actual length
|
|
27
|
-
const clearLine = '\r' + ' '.repeat(Math.max(lastProgressLength, progressMessage.length)) + '\r';
|
|
28
|
-
process.stdout.write(clearLine + progressMessage);
|
|
29
|
-
|
|
30
|
-
// Store the length for next iteration
|
|
31
|
-
lastProgressLength = progressMessage.length;
|
|
32
|
-
|
|
33
|
-
try {
|
|
34
|
-
const result = await this.renameFile(file);
|
|
35
|
-
results.push(result);
|
|
36
|
-
} catch (error) {
|
|
37
|
-
results.push({
|
|
38
|
-
originalPath: file.path,
|
|
39
|
-
newPath: file.path,
|
|
40
|
-
suggestedName: file.name,
|
|
41
|
-
success: false,
|
|
42
|
-
error: error instanceof Error ? error.message : 'Unknown error'
|
|
43
|
-
});
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
// Clear the final processing line completely and show completion
|
|
48
|
-
const clearFinal = '\r' + ' '.repeat(lastProgressLength) + '\r';
|
|
49
|
-
if (files.length > 0) {
|
|
50
|
-
const successCount = results.filter(r => r.success).length;
|
|
51
|
-
const completionMessage = `✅ Processed ${files.length} file${files.length === 1 ? '' : 's'} (${successCount} successful)`;
|
|
52
|
-
process.stdout.write(clearFinal + completionMessage + '\n');
|
|
53
|
-
} else {
|
|
54
|
-
process.stdout.write(clearFinal);
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
return results;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
private async renameFile(file: FileInfo): Promise<RenameResult> {
|
|
61
|
-
// Check file size
|
|
62
|
-
if (file.size > this.config.maxFileSize) {
|
|
63
|
-
throw new Error(`File size (${Math.round(file.size / 1024 / 1024)}MB) exceeds maximum allowed size (${Math.round(this.config.maxFileSize / 1024 / 1024)}MB)`);
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
// Get appropriate parser
|
|
67
|
-
const parser = this.parserFactory.getParser(file.path);
|
|
68
|
-
if (!parser) {
|
|
69
|
-
throw new Error(`No parser available for file type: ${file.extension}`);
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
// Extract content and metadata
|
|
73
|
-
const parseResult = await parser.parse(file.path);
|
|
74
|
-
const content = parseResult.content;
|
|
75
|
-
if (!content || content.trim().length === 0) {
|
|
76
|
-
throw new Error('No content could be extracted from the file');
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
// Update file info with extracted document metadata
|
|
80
|
-
file.documentMetadata = parseResult.metadata;
|
|
81
|
-
|
|
82
|
-
// Determine file category (use configured category or auto-categorize)
|
|
83
|
-
const fileCategory = this.config.templateOptions.category === 'auto'
|
|
84
|
-
? categorizeFile(file.path, content, file)
|
|
85
|
-
: this.config.templateOptions.category;
|
|
86
|
-
|
|
87
|
-
// Generate core filename using AI with all available metadata
|
|
88
|
-
const coreFileName = await this.aiService.generateFileName(
|
|
89
|
-
content,
|
|
90
|
-
file.name,
|
|
91
|
-
this.config.namingConvention,
|
|
92
|
-
fileCategory,
|
|
93
|
-
file // Pass the entire file info with all metadata
|
|
94
|
-
);
|
|
95
|
-
if (!coreFileName || coreFileName.trim().length === 0) {
|
|
96
|
-
throw new Error('AI service failed to generate a filename');
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
// Apply template to include personal info, dates, etc.
|
|
100
|
-
const templatedName = applyTemplate(
|
|
101
|
-
coreFileName,
|
|
102
|
-
fileCategory,
|
|
103
|
-
this.config.templateOptions,
|
|
104
|
-
this.config.namingConvention
|
|
105
|
-
);
|
|
106
|
-
|
|
107
|
-
// Create new filename with original extension
|
|
108
|
-
const newFileName = `${templatedName}${file.extension}`;
|
|
109
|
-
const newPath = path.join(path.dirname(file.path), newFileName);
|
|
110
|
-
|
|
111
|
-
// Check if new filename would conflict with existing file
|
|
112
|
-
if (newPath !== file.path) {
|
|
113
|
-
await this.checkForConflicts(newPath);
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
// Perform the rename (or simulate if dry run)
|
|
117
|
-
if (!this.config.dryRun && newPath !== file.path) {
|
|
118
|
-
await fs.rename(file.path, newPath);
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
return {
|
|
122
|
-
originalPath: file.path,
|
|
123
|
-
newPath,
|
|
124
|
-
suggestedName: newFileName,
|
|
125
|
-
success: true
|
|
126
|
-
};
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
private async checkForConflicts(newPath: string): Promise<void> {
|
|
130
|
-
try {
|
|
131
|
-
await fs.access(newPath);
|
|
132
|
-
// If we reach here, the file exists
|
|
133
|
-
throw new Error(`Target filename already exists: ${path.basename(newPath)}`);
|
|
134
|
-
} catch (error) {
|
|
135
|
-
// If the error is ENOENT (file doesn't exist), that's what we want
|
|
136
|
-
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
|
137
|
-
throw error;
|
|
138
|
-
}
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
}
|
|
@@ -1,161 +0,0 @@
|
|
|
1
|
-
import { AIProvider, FileInfo } from '../types/index.js';
|
|
2
|
-
import { buildFileNamePrompt, AI_SYSTEM_PROMPT } from '../utils/ai-prompts.js';
|
|
3
|
-
import { NamingConvention } from '../utils/naming-conventions.js';
|
|
4
|
-
import { FileCategory } from '../utils/file-templates.js';
|
|
5
|
-
|
|
6
|
-
interface OpenAICompatibleResponse {
|
|
7
|
-
choices: Array<{
|
|
8
|
-
message: {
|
|
9
|
-
content: string;
|
|
10
|
-
role: string;
|
|
11
|
-
};
|
|
12
|
-
finish_reason: string;
|
|
13
|
-
}>;
|
|
14
|
-
usage?: {
|
|
15
|
-
prompt_tokens: number;
|
|
16
|
-
completion_tokens: number;
|
|
17
|
-
total_tokens: number;
|
|
18
|
-
};
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
interface OpenAIMessage {
|
|
22
|
-
role: 'system' | 'user' | 'assistant';
|
|
23
|
-
content: string;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
interface ModelInfo {
|
|
27
|
-
id: string;
|
|
28
|
-
object: string;
|
|
29
|
-
created: number;
|
|
30
|
-
owned_by: string;
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
export class LMStudioService implements AIProvider {
|
|
34
|
-
name = 'LMStudio';
|
|
35
|
-
private baseUrl: string;
|
|
36
|
-
private model: string;
|
|
37
|
-
|
|
38
|
-
constructor(
|
|
39
|
-
baseUrl = 'http://localhost:1234',
|
|
40
|
-
model = 'local-model'
|
|
41
|
-
) {
|
|
42
|
-
this.baseUrl = baseUrl;
|
|
43
|
-
this.model = model;
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
async generateFileName(
|
|
47
|
-
content: string,
|
|
48
|
-
originalName: string,
|
|
49
|
-
namingConvention = 'kebab-case',
|
|
50
|
-
category = 'general',
|
|
51
|
-
fileInfo?: FileInfo
|
|
52
|
-
): Promise<string> {
|
|
53
|
-
try {
|
|
54
|
-
// Check if this is a scanned PDF image
|
|
55
|
-
const isScannedPDF = content.startsWith('[SCANNED_PDF_IMAGE]:');
|
|
56
|
-
|
|
57
|
-
if (isScannedPDF) {
|
|
58
|
-
// LM Studio has limited vision support, so we'll fall back to using the original filename
|
|
59
|
-
console.log('⚠️ Scanned PDF detected but LMStudio has limited vision support. Using original filename.');
|
|
60
|
-
return this.sanitizeFilename(originalName);
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
const prompt = this.buildPrompt(content, originalName, namingConvention, category, fileInfo);
|
|
64
|
-
|
|
65
|
-
const response = await this.makeRequest('/v1/chat/completions', {
|
|
66
|
-
model: this.model,
|
|
67
|
-
messages: [
|
|
68
|
-
{
|
|
69
|
-
role: 'system',
|
|
70
|
-
content: AI_SYSTEM_PROMPT
|
|
71
|
-
},
|
|
72
|
-
{
|
|
73
|
-
role: 'user',
|
|
74
|
-
content: prompt
|
|
75
|
-
}
|
|
76
|
-
] as OpenAIMessage[],
|
|
77
|
-
temperature: 0.3,
|
|
78
|
-
max_tokens: 100,
|
|
79
|
-
stream: false
|
|
80
|
-
});
|
|
81
|
-
|
|
82
|
-
if (response.choices?.[0]?.message?.content) {
|
|
83
|
-
return this.sanitizeFilename(response.choices[0].message.content);
|
|
84
|
-
} else {
|
|
85
|
-
throw new Error('No response content from LMStudio');
|
|
86
|
-
}
|
|
87
|
-
} catch (error) {
|
|
88
|
-
console.error('LMStudio API error:', error);
|
|
89
|
-
throw new Error(`LMStudio service failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
90
|
-
}
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
private buildPrompt(
|
|
94
|
-
content: string,
|
|
95
|
-
originalName: string,
|
|
96
|
-
namingConvention: string,
|
|
97
|
-
category: string,
|
|
98
|
-
fileInfo?: FileInfo
|
|
99
|
-
): string {
|
|
100
|
-
return buildFileNamePrompt({
|
|
101
|
-
content,
|
|
102
|
-
originalName,
|
|
103
|
-
namingConvention: namingConvention as NamingConvention,
|
|
104
|
-
category: category as FileCategory,
|
|
105
|
-
fileInfo
|
|
106
|
-
});
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
private sanitizeFilename(filename: string): string {
|
|
110
|
-
return filename
|
|
111
|
-
.trim()
|
|
112
|
-
.replace(/^["']|["']$/g, '') // Remove quotes
|
|
113
|
-
.replace(/\.(txt|pdf|docx?|xlsx?|md|rtf)$/i, '') // Remove extensions
|
|
114
|
-
.replace(/[<>:"/\\|?*]/g, '-') // Replace invalid characters
|
|
115
|
-
.replace(/\s+/g, '-') // Replace spaces with hyphens
|
|
116
|
-
.toLowerCase();
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
private async makeRequest(endpoint: string, payload: any): Promise<OpenAICompatibleResponse> {
|
|
120
|
-
const url = `${this.baseUrl}${endpoint}`;
|
|
121
|
-
|
|
122
|
-
const response = await fetch(url, {
|
|
123
|
-
method: 'POST',
|
|
124
|
-
headers: {
|
|
125
|
-
'Content-Type': 'application/json',
|
|
126
|
-
},
|
|
127
|
-
body: JSON.stringify(payload),
|
|
128
|
-
});
|
|
129
|
-
|
|
130
|
-
if (!response.ok) {
|
|
131
|
-
const errorText = await response.text();
|
|
132
|
-
throw new Error(`LMStudio API request failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
const data = await response.json();
|
|
136
|
-
return data as OpenAICompatibleResponse;
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
// Method to check if LMStudio service is available
|
|
140
|
-
async isAvailable(): Promise<boolean> {
|
|
141
|
-
try {
|
|
142
|
-
const response = await fetch(`${this.baseUrl}/v1/models`);
|
|
143
|
-
return response.ok;
|
|
144
|
-
} catch {
|
|
145
|
-
return false;
|
|
146
|
-
}
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
// Method to list available models
|
|
150
|
-
async listModels(): Promise<string[]> {
|
|
151
|
-
try {
|
|
152
|
-
const response = await fetch(`${this.baseUrl}/v1/models`);
|
|
153
|
-
if (!response.ok) return [];
|
|
154
|
-
|
|
155
|
-
const data = await response.json();
|
|
156
|
-
return data.data?.map((model: ModelInfo) => model.id) || [];
|
|
157
|
-
} catch {
|
|
158
|
-
return [];
|
|
159
|
-
}
|
|
160
|
-
}
|
|
161
|
-
}
|
|
@@ -1,191 +0,0 @@
|
|
|
1
|
-
import { AIProvider, FileInfo } from '../types/index.js';
|
|
2
|
-
import { buildFileNamePrompt, AI_SYSTEM_PROMPT } from '../utils/ai-prompts.js';
|
|
3
|
-
import { NamingConvention } from '../utils/naming-conventions.js';
|
|
4
|
-
import { FileCategory } from '../utils/file-templates.js';
|
|
5
|
-
|
|
6
|
-
interface OllamaResponse {
|
|
7
|
-
model: string;
|
|
8
|
-
response?: string; // For /api/generate
|
|
9
|
-
message?: { // For /api/chat
|
|
10
|
-
content: string;
|
|
11
|
-
role: string;
|
|
12
|
-
};
|
|
13
|
-
done: boolean;
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
interface OllamaChatMessage {
|
|
17
|
-
role: 'system' | 'user' | 'assistant';
|
|
18
|
-
content: string;
|
|
19
|
-
images?: string[]; // For vision models
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
export class OllamaService implements AIProvider {
|
|
23
|
-
name = 'Ollama';
|
|
24
|
-
private baseUrl: string;
|
|
25
|
-
private model: string;
|
|
26
|
-
|
|
27
|
-
constructor(
|
|
28
|
-
baseUrl = 'http://localhost:11434',
|
|
29
|
-
model = 'llama3.1'
|
|
30
|
-
) {
|
|
31
|
-
this.baseUrl = baseUrl;
|
|
32
|
-
this.model = model;
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
async generateFileName(
|
|
36
|
-
content: string,
|
|
37
|
-
originalName: string,
|
|
38
|
-
namingConvention = 'kebab-case',
|
|
39
|
-
category = 'general',
|
|
40
|
-
fileInfo?: FileInfo
|
|
41
|
-
): Promise<string> {
|
|
42
|
-
try {
|
|
43
|
-
// Check if this is a scanned PDF image
|
|
44
|
-
const isScannedPDF = content.startsWith('[SCANNED_PDF_IMAGE]:');
|
|
45
|
-
|
|
46
|
-
let response;
|
|
47
|
-
|
|
48
|
-
if (isScannedPDF) {
|
|
49
|
-
// Extract base64 image data and use a vision model
|
|
50
|
-
const imageBase64 = content.replace('[SCANNED_PDF_IMAGE]:', '');
|
|
51
|
-
const imageData = imageBase64.split(',')[1]; // Remove data:image/format;base64, prefix
|
|
52
|
-
|
|
53
|
-
const prompt = this.buildPrompt(
|
|
54
|
-
'This is a scanned PDF document converted to an image. Please analyze the image and extract the main content to generate an appropriate filename.',
|
|
55
|
-
originalName,
|
|
56
|
-
namingConvention,
|
|
57
|
-
category,
|
|
58
|
-
fileInfo
|
|
59
|
-
);
|
|
60
|
-
|
|
61
|
-
// Use LLaVA model for vision capabilities
|
|
62
|
-
const visionModel = this.getVisionModel();
|
|
63
|
-
|
|
64
|
-
response = await this.makeRequest('/api/chat', {
|
|
65
|
-
model: visionModel,
|
|
66
|
-
messages: [
|
|
67
|
-
{
|
|
68
|
-
role: 'system',
|
|
69
|
-
content: AI_SYSTEM_PROMPT
|
|
70
|
-
},
|
|
71
|
-
{
|
|
72
|
-
role: 'user',
|
|
73
|
-
content: prompt,
|
|
74
|
-
images: [imageData]
|
|
75
|
-
}
|
|
76
|
-
] as OllamaChatMessage[],
|
|
77
|
-
stream: false
|
|
78
|
-
});
|
|
79
|
-
} else {
|
|
80
|
-
// Standard text processing
|
|
81
|
-
const prompt = this.buildPrompt(content, originalName, namingConvention, category, fileInfo);
|
|
82
|
-
|
|
83
|
-
response = await this.makeRequest('/api/chat', {
|
|
84
|
-
model: this.model,
|
|
85
|
-
messages: [
|
|
86
|
-
{
|
|
87
|
-
role: 'system',
|
|
88
|
-
content: AI_SYSTEM_PROMPT
|
|
89
|
-
},
|
|
90
|
-
{
|
|
91
|
-
role: 'user',
|
|
92
|
-
content: prompt
|
|
93
|
-
}
|
|
94
|
-
] as OllamaChatMessage[],
|
|
95
|
-
stream: false
|
|
96
|
-
});
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
if (response.message?.content) {
|
|
100
|
-
return this.sanitizeFilename(response.message.content);
|
|
101
|
-
} else {
|
|
102
|
-
throw new Error('No response content from Ollama');
|
|
103
|
-
}
|
|
104
|
-
} catch (error) {
|
|
105
|
-
console.error('Ollama API error:', error);
|
|
106
|
-
throw new Error(`Ollama service failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
private buildPrompt(
|
|
111
|
-
content: string,
|
|
112
|
-
originalName: string,
|
|
113
|
-
namingConvention: string,
|
|
114
|
-
category: string,
|
|
115
|
-
fileInfo?: FileInfo
|
|
116
|
-
): string {
|
|
117
|
-
return buildFileNamePrompt({
|
|
118
|
-
content,
|
|
119
|
-
originalName,
|
|
120
|
-
namingConvention: namingConvention as NamingConvention,
|
|
121
|
-
category: category as FileCategory,
|
|
122
|
-
fileInfo
|
|
123
|
-
});
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
private getVisionModel(): string {
|
|
127
|
-
// Try to use a vision-capable model, fallback to default if not specified
|
|
128
|
-
const visionModels = ['llava', 'llava:7b', 'llava:13b', 'llava:34b', 'llama3.2-vision', 'qwen2-vl'];
|
|
129
|
-
|
|
130
|
-
// If the current model is already a vision model, use it
|
|
131
|
-
if (visionModels.some(vm => this.model.toLowerCase().includes(vm.split(':')[0]))) {
|
|
132
|
-
return this.model;
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
// Otherwise, default to llava (most common vision model in Ollama)
|
|
136
|
-
return 'llava';
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
private sanitizeFilename(filename: string): string {
|
|
140
|
-
return filename
|
|
141
|
-
.trim()
|
|
142
|
-
.replace(/^["']|["']$/g, '') // Remove quotes
|
|
143
|
-
.replace(/\.(txt|pdf|docx?|xlsx?|md|rtf)$/i, '') // Remove extensions
|
|
144
|
-
.replace(/[<>:"/\\|?*]/g, '-') // Replace invalid characters
|
|
145
|
-
.replace(/\s+/g, '-') // Replace spaces with hyphens
|
|
146
|
-
.toLowerCase();
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
private async makeRequest(endpoint: string, payload: any): Promise<OllamaResponse> {
|
|
150
|
-
const url = `${this.baseUrl}${endpoint}`;
|
|
151
|
-
|
|
152
|
-
const response = await fetch(url, {
|
|
153
|
-
method: 'POST',
|
|
154
|
-
headers: {
|
|
155
|
-
'Content-Type': 'application/json',
|
|
156
|
-
},
|
|
157
|
-
body: JSON.stringify(payload),
|
|
158
|
-
});
|
|
159
|
-
|
|
160
|
-
if (!response.ok) {
|
|
161
|
-
const errorText = await response.text();
|
|
162
|
-
throw new Error(`Ollama API request failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
const data = await response.json();
|
|
166
|
-
return data as OllamaResponse;
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
// Method to check if Ollama service is available
|
|
170
|
-
async isAvailable(): Promise<boolean> {
|
|
171
|
-
try {
|
|
172
|
-
const response = await fetch(`${this.baseUrl}/api/tags`);
|
|
173
|
-
return response.ok;
|
|
174
|
-
} catch {
|
|
175
|
-
return false;
|
|
176
|
-
}
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
// Method to list available models
|
|
180
|
-
async listModels(): Promise<string[]> {
|
|
181
|
-
try {
|
|
182
|
-
const response = await fetch(`${this.baseUrl}/api/tags`);
|
|
183
|
-
if (!response.ok) return [];
|
|
184
|
-
|
|
185
|
-
const data = await response.json();
|
|
186
|
-
return data.models?.map((model: any) => model.name) || [];
|
|
187
|
-
} catch {
|
|
188
|
-
return [];
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
}
|
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
import OpenAI from 'openai';
|
|
2
|
-
import { AIProvider, FileInfo } from '../types/index.js';
|
|
3
|
-
import { applyNamingConvention, NamingConvention } from '../utils/naming-conventions.js';
|
|
4
|
-
import { FileCategory } from '../utils/file-templates.js';
|
|
5
|
-
import { buildFileNamePrompt } from '../utils/ai-prompts.js';
|
|
6
|
-
|
|
7
|
-
export class OpenAIService implements AIProvider {
|
|
8
|
-
name = 'OpenAI';
|
|
9
|
-
private client: OpenAI;
|
|
10
|
-
|
|
11
|
-
constructor(apiKey: string) {
|
|
12
|
-
this.client = new OpenAI({
|
|
13
|
-
apiKey: apiKey
|
|
14
|
-
});
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
async generateFileName(content: string, originalName: string, namingConvention: string = 'kebab-case', category: string = 'general', fileInfo?: FileInfo): Promise<string> {
|
|
18
|
-
const convention = namingConvention as NamingConvention;
|
|
19
|
-
const fileCategory = category as FileCategory;
|
|
20
|
-
|
|
21
|
-
// Check if this is a scanned PDF image
|
|
22
|
-
const isScannedPDF = content.startsWith('[SCANNED_PDF_IMAGE]:');
|
|
23
|
-
|
|
24
|
-
try {
|
|
25
|
-
let response;
|
|
26
|
-
|
|
27
|
-
if (isScannedPDF) {
|
|
28
|
-
// Extract base64 image data
|
|
29
|
-
const imageBase64 = content.replace('[SCANNED_PDF_IMAGE]:', '');
|
|
30
|
-
|
|
31
|
-
const prompt = buildFileNamePrompt({
|
|
32
|
-
content: 'This is a scanned PDF document converted to an image. Please analyze the image and extract the main content to generate an appropriate filename.',
|
|
33
|
-
originalName,
|
|
34
|
-
namingConvention: convention,
|
|
35
|
-
category: fileCategory,
|
|
36
|
-
fileInfo
|
|
37
|
-
});
|
|
38
|
-
|
|
39
|
-
response = await this.client.chat.completions.create({
|
|
40
|
-
model: 'gpt-4o', // Use GPT-4 with vision capabilities
|
|
41
|
-
messages: [
|
|
42
|
-
{
|
|
43
|
-
role: 'user',
|
|
44
|
-
content: [
|
|
45
|
-
{
|
|
46
|
-
type: 'text',
|
|
47
|
-
text: prompt
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
type: 'image_url',
|
|
51
|
-
image_url: {
|
|
52
|
-
url: imageBase64
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
]
|
|
56
|
-
}
|
|
57
|
-
],
|
|
58
|
-
max_tokens: 100,
|
|
59
|
-
temperature: 0.3
|
|
60
|
-
});
|
|
61
|
-
} else {
|
|
62
|
-
// Standard text processing
|
|
63
|
-
const prompt = buildFileNamePrompt({
|
|
64
|
-
content,
|
|
65
|
-
originalName,
|
|
66
|
-
namingConvention: convention,
|
|
67
|
-
category: fileCategory,
|
|
68
|
-
fileInfo
|
|
69
|
-
});
|
|
70
|
-
|
|
71
|
-
response = await this.client.chat.completions.create({
|
|
72
|
-
model: 'gpt-3.5-turbo',
|
|
73
|
-
messages: [
|
|
74
|
-
{
|
|
75
|
-
role: 'user',
|
|
76
|
-
content: prompt
|
|
77
|
-
}
|
|
78
|
-
],
|
|
79
|
-
max_tokens: 100,
|
|
80
|
-
temperature: 0.3
|
|
81
|
-
});
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
const suggestedName = response.choices[0]?.message?.content?.trim() || 'untitled-document';
|
|
85
|
-
|
|
86
|
-
// Clean and validate the suggested name
|
|
87
|
-
return this.sanitizeFileName(suggestedName, convention);
|
|
88
|
-
} catch (error) {
|
|
89
|
-
console.error('OpenAI API error:', error);
|
|
90
|
-
throw new Error(`Failed to generate filename with OpenAI: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
private sanitizeFileName(name: string, convention: NamingConvention): string {
|
|
95
|
-
// Remove any potential file extensions from the suggestion
|
|
96
|
-
const nameWithoutExt = name.replace(/\.[^/.]+$/, '');
|
|
97
|
-
|
|
98
|
-
// Apply the naming convention
|
|
99
|
-
let cleaned = applyNamingConvention(nameWithoutExt, convention);
|
|
100
|
-
|
|
101
|
-
// Ensure it's not empty and not too long
|
|
102
|
-
if (!cleaned) {
|
|
103
|
-
cleaned = applyNamingConvention('untitled document', convention);
|
|
104
|
-
} else if (cleaned.length > 100) {
|
|
105
|
-
// Truncate while preserving naming convention structure
|
|
106
|
-
cleaned = cleaned.substring(0, 100);
|
|
107
|
-
// Clean up any broken separators at the end
|
|
108
|
-
if (convention === 'kebab-case') {
|
|
109
|
-
cleaned = cleaned.replace(/-[^-]*$/, '');
|
|
110
|
-
} else if (convention === 'snake_case') {
|
|
111
|
-
cleaned = cleaned.replace(/_[^_]*$/, '');
|
|
112
|
-
}
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
return cleaned;
|
|
116
|
-
}
|
|
117
|
-
}
|
package/src/types/index.ts
DELETED
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
export interface DocumentMetadata {
|
|
2
|
-
title?: string;
|
|
3
|
-
author?: string;
|
|
4
|
-
creator?: string;
|
|
5
|
-
subject?: string;
|
|
6
|
-
keywords?: string[];
|
|
7
|
-
creationDate?: Date;
|
|
8
|
-
modificationDate?: Date;
|
|
9
|
-
pages?: number;
|
|
10
|
-
wordCount?: number;
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
export interface FileInfo {
|
|
14
|
-
path: string;
|
|
15
|
-
name: string;
|
|
16
|
-
extension: string;
|
|
17
|
-
size: number;
|
|
18
|
-
content?: string;
|
|
19
|
-
// File system metadata
|
|
20
|
-
createdAt: Date;
|
|
21
|
-
modifiedAt: Date;
|
|
22
|
-
accessedAt: Date;
|
|
23
|
-
// Context metadata
|
|
24
|
-
parentFolder: string;
|
|
25
|
-
folderPath: string[];
|
|
26
|
-
// Document metadata (extracted from file contents)
|
|
27
|
-
documentMetadata?: DocumentMetadata;
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
export interface RenameResult {
|
|
31
|
-
originalPath: string;
|
|
32
|
-
newPath: string;
|
|
33
|
-
suggestedName: string;
|
|
34
|
-
success: boolean;
|
|
35
|
-
error?: string;
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
export interface AIProvider {
|
|
39
|
-
name: string;
|
|
40
|
-
generateFileName: (content: string, originalName: string, namingConvention?: string, category?: string, fileInfo?: FileInfo) => Promise<string>;
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
export type NamingConvention = 'kebab-case' | 'snake_case' | 'camelCase' | 'PascalCase' | 'lowercase' | 'UPPERCASE';
|
|
44
|
-
export type FileCategory = 'document' | 'movie' | 'music' | 'series' | 'photo' | 'book' | 'general' | 'auto';
|
|
45
|
-
export type DateFormat = 'YYYY-MM-DD' | 'YYYY' | 'YYYYMMDD' | 'none';
|
|
46
|
-
|
|
47
|
-
export interface TemplateOptions {
|
|
48
|
-
category: FileCategory;
|
|
49
|
-
personalName?: string;
|
|
50
|
-
dateFormat: DateFormat;
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
export interface Config {
|
|
54
|
-
aiProvider: 'claude' | 'openai' | 'ollama' | 'lmstudio';
|
|
55
|
-
apiKey?: string; // Optional for local providers
|
|
56
|
-
maxFileSize: number;
|
|
57
|
-
supportedExtensions: string[];
|
|
58
|
-
dryRun: boolean;
|
|
59
|
-
namingConvention: NamingConvention;
|
|
60
|
-
templateOptions: TemplateOptions;
|
|
61
|
-
// Local LLM specific options
|
|
62
|
-
localLLMConfig?: {
|
|
63
|
-
baseUrl?: string;
|
|
64
|
-
model?: string;
|
|
65
|
-
};
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
export interface ParseResult {
|
|
69
|
-
content: string;
|
|
70
|
-
metadata?: DocumentMetadata;
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
export interface DocumentParser {
|
|
74
|
-
supports: (filePath: string) => boolean;
|
|
75
|
-
parse: (filePath: string) => Promise<ParseResult>;
|
|
76
|
-
}
|