langtrain 0.1.3 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/publish.yml +39 -0
- package/README.md +54 -20
- package/dist/cli.d.mts +3 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.js +12726 -0
- package/dist/cli.js.map +1 -0
- package/dist/cli.mjs +12731 -0
- package/dist/cli.mjs.map +1 -0
- package/package.json +11 -2
- package/src/cli.ts +213 -0
- package/tsup.config.ts +1 -1
package/package.json
CHANGED
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langtrain",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.6",
|
|
4
4
|
"description": "Unified JavaScript SDK for Langtrain Ecosystem",
|
|
5
5
|
"main": "./dist/index.js",
|
|
6
6
|
"module": "./dist/index.mjs",
|
|
7
7
|
"types": "./dist/index.d.ts",
|
|
8
|
+
"bin": {
|
|
9
|
+
"langtrain": "./dist/cli.js"
|
|
10
|
+
},
|
|
8
11
|
"scripts": {
|
|
9
12
|
"build": "tsup",
|
|
10
13
|
"dev": "tsup --watch",
|
|
@@ -26,9 +29,15 @@
|
|
|
26
29
|
"author": "Langtrain AI",
|
|
27
30
|
"license": "MIT",
|
|
28
31
|
"devDependencies": {
|
|
29
|
-
"
|
|
32
|
+
"@types/node": "^25.2.3",
|
|
30
33
|
"langtune": "file:../langtune/js",
|
|
34
|
+
"langvision": "file:../langvision/js",
|
|
31
35
|
"tsup": "^8.0.2",
|
|
32
36
|
"typescript": "^5.4.2"
|
|
37
|
+
},
|
|
38
|
+
"dependencies": {
|
|
39
|
+
"@clack/prompts": "^1.0.1",
|
|
40
|
+
"commander": "^14.0.3",
|
|
41
|
+
"kleur": "^4.1.5"
|
|
33
42
|
}
|
|
34
43
|
}
|
package/src/cli.ts
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { intro, outro, select, text, spinner, isCancel, cancel } from '@clack/prompts';
|
|
3
|
+
import { bgCyan, black, red, green } from 'kleur/colors';
|
|
4
|
+
import { Command } from 'commander';
|
|
5
|
+
import { Langvision, Langtune } from './index';
|
|
6
|
+
|
|
7
|
+
// Initialize clients
|
|
8
|
+
const vision = new Langvision();
|
|
9
|
+
const tune = new Langtune();
|
|
10
|
+
|
|
11
|
+
async function main() {
|
|
12
|
+
const program = new Command();
|
|
13
|
+
|
|
14
|
+
program
|
|
15
|
+
.name('langtrain')
|
|
16
|
+
.description('Langtrain CLI for AI Model Fine-tuning and Generation')
|
|
17
|
+
.version('0.1.5');
|
|
18
|
+
|
|
19
|
+
program.action(async () => {
|
|
20
|
+
console.clear();
|
|
21
|
+
intro(`${bgCyan(black(' langtrain '))}`);
|
|
22
|
+
|
|
23
|
+
const operation = await select({
|
|
24
|
+
message: 'Select an operation:',
|
|
25
|
+
options: [
|
|
26
|
+
{ value: 'tune-finetune', label: '🧠 Fine-tune Text Model (Langtune)' },
|
|
27
|
+
{ value: 'tune-generate', label: '📝 Generate Text (Langtune)' },
|
|
28
|
+
{ value: 'vision-finetune', label: '👁️ Fine-tune Vision Model (Langvision)' },
|
|
29
|
+
{ value: 'vision-generate', label: '🖼️ Generate Vision Response (Langvision)' },
|
|
30
|
+
{ value: 'exit', label: '🚪 Exit' }
|
|
31
|
+
],
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
if (isCancel(operation) || operation === 'exit') {
|
|
35
|
+
outro('Goodbye!');
|
|
36
|
+
process.exit(0);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
try {
|
|
40
|
+
if (operation === 'tune-finetune') {
|
|
41
|
+
await handleTuneFinetune();
|
|
42
|
+
} else if (operation === 'tune-generate') {
|
|
43
|
+
await handleTuneGenerate();
|
|
44
|
+
} else if (operation === 'vision-finetune') {
|
|
45
|
+
await handleVisionFinetune();
|
|
46
|
+
} else if (operation === 'vision-generate') {
|
|
47
|
+
await handleVisionGenerate();
|
|
48
|
+
}
|
|
49
|
+
} catch (error: any) {
|
|
50
|
+
outro(red(`Error: ${error.message}`));
|
|
51
|
+
process.exit(1);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
outro(green('Operation completed successfully!'));
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
program.parse(process.argv);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Handler for Langtune Fine-tuning
|
|
61
|
+
async function handleTuneFinetune() {
|
|
62
|
+
const model = await text({
|
|
63
|
+
message: 'Enter base model (e.g., gpt-3.5-turbo):',
|
|
64
|
+
placeholder: 'gpt-3.5-turbo',
|
|
65
|
+
validate(value) {
|
|
66
|
+
if (!value || value.length === 0) return 'Value is required!';
|
|
67
|
+
},
|
|
68
|
+
});
|
|
69
|
+
if (isCancel(model)) cancel('Operation cancelled.');
|
|
70
|
+
|
|
71
|
+
const trainFile = await text({
|
|
72
|
+
message: 'Enter path to training file:',
|
|
73
|
+
placeholder: './data.jsonl',
|
|
74
|
+
validate(value) {
|
|
75
|
+
if (!value || value.length === 0) return 'Value is required!';
|
|
76
|
+
},
|
|
77
|
+
});
|
|
78
|
+
if (isCancel(trainFile)) cancel('Operation cancelled.');
|
|
79
|
+
|
|
80
|
+
const epochs = await text({
|
|
81
|
+
message: 'Num Epochs:',
|
|
82
|
+
placeholder: '3',
|
|
83
|
+
initialValue: '3'
|
|
84
|
+
});
|
|
85
|
+
if (isCancel(epochs)) cancel('Operation cancelled.');
|
|
86
|
+
|
|
87
|
+
const s = spinner();
|
|
88
|
+
s.start('Starting fine-tuning job...');
|
|
89
|
+
|
|
90
|
+
try {
|
|
91
|
+
// Check if FinetuneConfig types match what's needed.
|
|
92
|
+
// Casting to any to bypass strict type checking for this demo or ensure types are imported correctly.
|
|
93
|
+
// In a real scenario, we'd construct the full config object.
|
|
94
|
+
const config: any = {
|
|
95
|
+
model: model as string,
|
|
96
|
+
trainFile: trainFile as string,
|
|
97
|
+
preset: 'default', // simplified
|
|
98
|
+
epochs: parseInt(epochs as string),
|
|
99
|
+
batchSize: 1,
|
|
100
|
+
learningRate: 2e-5,
|
|
101
|
+
loraRank: 16,
|
|
102
|
+
outputDir: './output'
|
|
103
|
+
};
|
|
104
|
+
|
|
105
|
+
await tune.finetune(config);
|
|
106
|
+
s.stop(green('Fine-tuning job started!'));
|
|
107
|
+
} catch (e: any) {
|
|
108
|
+
s.stop(red('Failed to start job.'));
|
|
109
|
+
throw e;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Handler for Langtune Generation
|
|
114
|
+
async function handleTuneGenerate() {
|
|
115
|
+
const model = await text({
|
|
116
|
+
message: 'Enter model path:',
|
|
117
|
+
placeholder: './output/model',
|
|
118
|
+
initialValue: './output/model'
|
|
119
|
+
});
|
|
120
|
+
if (isCancel(model)) cancel('Operation cancelled');
|
|
121
|
+
|
|
122
|
+
const prompt = await text({
|
|
123
|
+
message: 'Enter prompt:',
|
|
124
|
+
placeholder: 'Hello world',
|
|
125
|
+
});
|
|
126
|
+
if (isCancel(prompt)) cancel('Operation cancelled');
|
|
127
|
+
|
|
128
|
+
const s = spinner();
|
|
129
|
+
s.start('Generating response...');
|
|
130
|
+
|
|
131
|
+
try {
|
|
132
|
+
const response = await tune.generate(model as string, { prompt: prompt as string });
|
|
133
|
+
s.stop('Generation complete');
|
|
134
|
+
intro('Response:');
|
|
135
|
+
console.log(response);
|
|
136
|
+
} catch (e: any) {
|
|
137
|
+
s.stop(red('Generation failed.'));
|
|
138
|
+
throw e;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Handler for Langvision Fine-tuning
|
|
143
|
+
async function handleVisionFinetune() {
|
|
144
|
+
const model = await text({
|
|
145
|
+
message: 'Enter base vision model:',
|
|
146
|
+
placeholder: 'llava-v1.5-7b',
|
|
147
|
+
initialValue: 'llava-v1.5-7b'
|
|
148
|
+
});
|
|
149
|
+
if (isCancel(model)) cancel('Operation cancelled');
|
|
150
|
+
|
|
151
|
+
const dataset = await text({
|
|
152
|
+
message: 'Enter dataset path:',
|
|
153
|
+
placeholder: './dataset',
|
|
154
|
+
});
|
|
155
|
+
if (isCancel(dataset)) cancel('Operation cancelled');
|
|
156
|
+
|
|
157
|
+
const epochs = await text({
|
|
158
|
+
message: 'Num Epochs:',
|
|
159
|
+
placeholder: '3',
|
|
160
|
+
initialValue: '3'
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
const s = spinner();
|
|
164
|
+
s.start('Starting vision fine-tuning...');
|
|
165
|
+
|
|
166
|
+
try {
|
|
167
|
+
const config: any = {
|
|
168
|
+
model: model as string,
|
|
169
|
+
dataset: dataset as string,
|
|
170
|
+
epochs: parseInt(epochs as string),
|
|
171
|
+
batchSize: 1,
|
|
172
|
+
learningRate: 2e-5,
|
|
173
|
+
loraRank: 16,
|
|
174
|
+
outputDir: './vision-output'
|
|
175
|
+
};
|
|
176
|
+
await vision.finetune(config);
|
|
177
|
+
s.stop(green('Vision fine-tuning started!'));
|
|
178
|
+
} catch (e: any) {
|
|
179
|
+
s.stop(red('Failed to start vision job.'));
|
|
180
|
+
throw e;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// Handler for Langvision Generation
|
|
185
|
+
async function handleVisionGenerate() {
|
|
186
|
+
const model = await text({
|
|
187
|
+
message: 'Enter model path:',
|
|
188
|
+
placeholder: './vision-output/model',
|
|
189
|
+
initialValue: './vision-output/model'
|
|
190
|
+
});
|
|
191
|
+
if (isCancel(model)) cancel('Operation cancelled');
|
|
192
|
+
|
|
193
|
+
const prompt = await text({
|
|
194
|
+
message: 'Enter prompt/image path:', // Simplified for CLI
|
|
195
|
+
placeholder: 'Describe this image...',
|
|
196
|
+
});
|
|
197
|
+
if (isCancel(prompt)) cancel('Operation cancelled');
|
|
198
|
+
|
|
199
|
+
const s = spinner();
|
|
200
|
+
s.start('Generating vision response...');
|
|
201
|
+
|
|
202
|
+
try {
|
|
203
|
+
const response = await vision.generate(model as string, { prompt: prompt as string });
|
|
204
|
+
s.stop('Generation complete');
|
|
205
|
+
intro('Response:');
|
|
206
|
+
console.log(response);
|
|
207
|
+
} catch (e: any) {
|
|
208
|
+
s.stop(red('Generation failed.'));
|
|
209
|
+
throw e;
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
main().catch(console.error);
|