@decartai/sdk 0.0.11 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -0
- package/dist/index.d.ts +12 -0
- package/dist/process/request.js +1 -0
- package/dist/process/types.d.ts +136 -3
- package/dist/realtime/client.d.ts +1 -1
- package/dist/shared/model.d.ts +57 -11
- package/dist/shared/model.js +77 -39
- package/dist/version.js +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -82,6 +82,34 @@ const result = await client.process({
|
|
|
82
82
|
videoElement.src = URL.createObjectURL(result);
|
|
83
83
|
```
|
|
84
84
|
|
|
85
|
+
## Development
|
|
86
|
+
|
|
87
|
+
### Setup
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
pnpm install
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Development Commands
|
|
94
|
+
|
|
95
|
+
- `pnpm build` - Build the project
|
|
96
|
+
- `pnpm dev:example` - Run Vite dev server for examples
|
|
97
|
+
- `pnpm test` - Run unit tests
|
|
98
|
+
- `pnpm test:e2e` - Run end-to-end tests
|
|
99
|
+
- `pnpm typecheck` - Type check with TypeScript
|
|
100
|
+
- `pnpm format` - Format code with Biome
|
|
101
|
+
- `pnpm lint` - Lint code with Biome
|
|
102
|
+
|
|
103
|
+
### Publishing
|
|
104
|
+
|
|
105
|
+
1. **Version bump**: Run `pnpm release` to bump the version (this uses `bumpp` to create a new version tag) and push it to GitHub
|
|
106
|
+
2. **Automated publish**: The GitHub Actions workflow will:
|
|
107
|
+
- Build the project
|
|
108
|
+
- Publish to npm
|
|
109
|
+
- Create a GitHub release with changelog
|
|
110
|
+
|
|
111
|
+
The package is published to npm as `@decartai/sdk`.
|
|
112
|
+
|
|
85
113
|
## License
|
|
86
114
|
|
|
87
115
|
MIT
|
package/dist/index.d.ts
CHANGED
|
@@ -17,6 +17,18 @@ declare const createDecartClient: (options: DecartClientOptions) => {
|
|
|
17
17
|
realtime: {
|
|
18
18
|
connect: (stream: MediaStream, options: RealTimeClientConnectOptions) => Promise<RealTimeClient>;
|
|
19
19
|
};
|
|
20
|
+
/**
|
|
21
|
+
* Client for video and image generation.
|
|
22
|
+
*
|
|
23
|
+
* @example
|
|
24
|
+
* ```ts
|
|
25
|
+
* const client = createDecartClient({ apiKey: "your-api-key" });
|
|
26
|
+
* const result = await client.process({
|
|
27
|
+
* model: models.video("lucy-pro-t2v"),
|
|
28
|
+
* prompt: "A beautiful sunset over the ocean"
|
|
29
|
+
* });
|
|
30
|
+
* ```
|
|
31
|
+
*/
|
|
20
32
|
process: ProcessClient;
|
|
21
33
|
};
|
|
22
34
|
//#endregion
|
package/dist/process/request.js
CHANGED
|
@@ -17,6 +17,7 @@ async function fileInputToBlob(input) {
|
|
|
17
17
|
async function sendRequest({ baseUrl, apiKey, model, inputs, signal, integration }) {
|
|
18
18
|
const formData = new FormData();
|
|
19
19
|
for (const [key, value] of Object.entries(inputs)) if (value !== void 0 && value !== null) if (value instanceof Blob) formData.append(key, value);
|
|
20
|
+
else if (typeof value === "object" && value !== null) formData.append(key, JSON.stringify(value));
|
|
20
21
|
else formData.append(key, String(value));
|
|
21
22
|
const endpoint = `${baseUrl}${model.urlPath}`;
|
|
22
23
|
const response = await fetch(endpoint, {
|
package/dist/process/types.d.ts
CHANGED
|
@@ -1,12 +1,145 @@
|
|
|
1
|
-
import { ModelDefinition, ModelInputSchemas } from "../shared/model.js";
|
|
1
|
+
import { ImageModels, ModelDefinition, ModelInputSchemas, VideoModels } from "../shared/model.js";
|
|
2
2
|
import { z } from "zod";
|
|
3
3
|
|
|
4
4
|
//#region src/process/types.d.ts
|
|
5
|
+
type FileInput = File | Blob | ReadableStream | URL | string;
|
|
5
6
|
type InferModelInputs<T extends ModelDefinition> = T["name"] extends keyof ModelInputSchemas ? z.input<ModelInputSchemas[T["name"]]> : Record<string, never>;
|
|
7
|
+
/**
|
|
8
|
+
* Model-specific input documentation for image generation models.
|
|
9
|
+
*/
|
|
10
|
+
interface ImageGenerationInputs {
|
|
11
|
+
/**
|
|
12
|
+
* Text description to use for the generation.
|
|
13
|
+
*
|
|
14
|
+
* See our [Prompt Engineering](https://docs.platform.decart.ai/models/image/image-generation#prompt-engineering) guide for how to write prompt for Decart image models effectively.
|
|
15
|
+
*/
|
|
16
|
+
prompt: string;
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Model-specific input documentation for image editing models.
|
|
20
|
+
*/
|
|
21
|
+
interface ImageEditingInputs {
|
|
22
|
+
/**
|
|
23
|
+
* Text description of the changes to apply to the image.
|
|
24
|
+
*
|
|
25
|
+
* It's highly recommended to read our [Prompt Engineering for Edits](https://docs.platform.decart.ai/models/image/image-editing#prompt-engineering-for-edits) guide for how to write effective editing prompts.
|
|
26
|
+
*/
|
|
27
|
+
prompt: string;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Model-specific input documentation for video models.
|
|
31
|
+
*/
|
|
32
|
+
interface VideoModelInputs {
|
|
33
|
+
/**
|
|
34
|
+
* Text description to use for the generation.
|
|
35
|
+
*
|
|
36
|
+
* See our [Prompt Engineering](https://docs.platform.decart.ai/models/video/video-generation#prompt-engineering) guide for how to write prompt for Decart video models effectively.
|
|
37
|
+
*/
|
|
38
|
+
prompt: string;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Default inputs for models that only require a prompt.
|
|
42
|
+
*/
|
|
43
|
+
interface PromptInput {
|
|
44
|
+
/**
|
|
45
|
+
* Text description to use for the generation.
|
|
46
|
+
*/
|
|
47
|
+
prompt: string;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Conditional type that selects the appropriate model-specific input documentation based on the model type.
|
|
51
|
+
* This allows different models to have field-specific documentation while maintaining type safety.
|
|
52
|
+
* Specific models are checked first, then falls back to category-based selection.
|
|
53
|
+
*/
|
|
54
|
+
type ModelSpecificInputs<T extends ModelDefinition> = T["name"] extends "lucy-pro-i2i" ? ImageEditingInputs : T["name"] extends ImageModels ? ImageGenerationInputs : T["name"] extends VideoModels ? VideoModelInputs : PromptInput;
|
|
55
|
+
interface ProcessInputs {
|
|
56
|
+
/**
|
|
57
|
+
* Random seed for reproducible results.
|
|
58
|
+
*
|
|
59
|
+
* Using the same seed with the same prompt and settings will produce the same output every time.
|
|
60
|
+
* This is useful for testing, debugging, or when you want to recreate a specific result.
|
|
61
|
+
*
|
|
62
|
+
*/
|
|
63
|
+
seed?: number;
|
|
64
|
+
/**
|
|
65
|
+
* The output resolution to use for the generation.
|
|
66
|
+
*
|
|
67
|
+
* @default "720p"
|
|
68
|
+
*/
|
|
69
|
+
resolution?: "480p" | "720p";
|
|
70
|
+
/**
|
|
71
|
+
* The output orientation to use for the generation.
|
|
72
|
+
*
|
|
73
|
+
* @default "landscape"
|
|
74
|
+
*/
|
|
75
|
+
orientation?: "landscape" | "portrait";
|
|
76
|
+
/**
|
|
77
|
+
* The data to use for generation (for image-to-image and video-to-video).
|
|
78
|
+
* Can be a File, Blob, ReadableStream, URL, or string URL.
|
|
79
|
+
*/
|
|
80
|
+
data?: FileInput;
|
|
81
|
+
/**
|
|
82
|
+
* The start frame image (for first-last-frame models).
|
|
83
|
+
* Can be a File, Blob, ReadableStream, URL, or string URL.
|
|
84
|
+
*/
|
|
85
|
+
start?: FileInput;
|
|
86
|
+
/**
|
|
87
|
+
* The end frame image (for first-last-frame models).
|
|
88
|
+
* Can be a File, Blob, ReadableStream, URL, or string URL.
|
|
89
|
+
*/
|
|
90
|
+
end?: FileInput;
|
|
91
|
+
/**
|
|
92
|
+
* Whether to enhance the prompt.
|
|
93
|
+
*
|
|
94
|
+
* @remarks
|
|
95
|
+
* For best results, keep this `true` (default) to let Decart's AI enhance your prompts.
|
|
96
|
+
* Only disable it if you need exact prompt control.
|
|
97
|
+
*
|
|
98
|
+
* @default true
|
|
99
|
+
*/
|
|
100
|
+
enhance_prompt?: boolean;
|
|
101
|
+
/**
|
|
102
|
+
* The number of inference steps.
|
|
103
|
+
*
|
|
104
|
+
* @default 50
|
|
105
|
+
*/
|
|
106
|
+
num_inference_steps?: number;
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* ProcessInputs combined with model-specific inputs.
|
|
110
|
+
* This ensures fields have the correct descriptions based on the model type.
|
|
111
|
+
* Add fields to ImageGenerationInputs, ImageEditingInputs, VideoModelInputs, or PromptInput
|
|
112
|
+
* to provide model-specific documentation for any field.
|
|
113
|
+
*/
|
|
114
|
+
type ModelSpecificProcessInputs<T extends ModelDefinition> = ProcessInputs & ModelSpecificInputs<T>;
|
|
115
|
+
/**
|
|
116
|
+
* Pick only the fields from ModelSpecificProcessInputs that exist in the inferred model inputs,
|
|
117
|
+
* so JSDoc comments will be preserved, while type inference will be accurate.
|
|
118
|
+
*/
|
|
119
|
+
type PickDocumentedInputs<T extends ModelDefinition> = Pick<ModelSpecificProcessInputs<T>, keyof ModelSpecificProcessInputs<T> & keyof InferModelInputs<T>>;
|
|
120
|
+
/**
|
|
121
|
+
* Merge documented inputs with inferred inputs, ensuring zod types take precedence
|
|
122
|
+
* while preserving JSDoc comments from ModelSpecificProcessInputs.
|
|
123
|
+
*
|
|
124
|
+
* By intersecting PickDocumentedInputs with InferModelInputs, we get:
|
|
125
|
+
* - JSDoc comments from ModelSpecificProcessInputs (from PickDocumentedInputs)
|
|
126
|
+
* - Accurate types from zod schemas (from InferModelInputs, takes precedence in intersection)
|
|
127
|
+
*/
|
|
128
|
+
type MergeDocumentedInputs<T extends ModelDefinition> = PickDocumentedInputs<T> & InferModelInputs<T>;
|
|
129
|
+
/**
|
|
130
|
+
* Options for the process client to generate video or image content.
|
|
131
|
+
*
|
|
132
|
+
* @template T - The model definition type
|
|
133
|
+
*/
|
|
6
134
|
type ProcessOptions<T extends ModelDefinition = ModelDefinition> = {
|
|
135
|
+
/**
|
|
136
|
+
* The model definition to use.
|
|
137
|
+
*/
|
|
7
138
|
model: T;
|
|
139
|
+
/**
|
|
140
|
+
* Optional `AbortSignal` for canceling the request.
|
|
141
|
+
*/
|
|
8
142
|
signal?: AbortSignal;
|
|
9
|
-
} &
|
|
10
|
-
type FileInput = File | Blob | ReadableStream | URL | string;
|
|
143
|
+
} & MergeDocumentedInputs<T>;
|
|
11
144
|
//#endregion
|
|
12
145
|
export { FileInput, ProcessOptions };
|
|
@@ -14,7 +14,7 @@ type OnRemoteStreamFn = (stream: MediaStream) => void;
|
|
|
14
14
|
type RealTimeClientInitialState = z.infer<typeof realTimeClientInitialStateSchema>;
|
|
15
15
|
declare const realTimeClientConnectOptionsSchema: z.ZodObject<{
|
|
16
16
|
model: z.ZodObject<{
|
|
17
|
-
name: z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"mirage">, z.ZodLiteral<"mirage_v2">, z.ZodLiteral<"lucy_v2v_720p_rt">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-dev-i2v">, z.ZodLiteral<"lucy-dev-v2v">, z.ZodLiteral<"lucy-pro-t2v">, z.ZodLiteral<"lucy-pro-i2v">, z.ZodLiteral<"lucy-pro-v2v">, z.ZodLiteral<"lucy-pro-flf2v">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-pro-t2i">, z.ZodLiteral<"lucy-pro-i2i">]>]>;
|
|
17
|
+
name: z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"mirage">, z.ZodLiteral<"mirage_v2">, z.ZodLiteral<"lucy_v2v_720p_rt">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-dev-i2v">, z.ZodLiteral<"lucy-dev-v2v">, z.ZodLiteral<"lucy-pro-t2v">, z.ZodLiteral<"lucy-pro-i2v">, z.ZodLiteral<"lucy-pro-v2v">, z.ZodLiteral<"lucy-pro-flf2v">, z.ZodLiteral<"lucy-motion">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-pro-t2i">, z.ZodLiteral<"lucy-pro-i2i">]>]>;
|
|
18
18
|
urlPath: z.ZodString;
|
|
19
19
|
fps: z.ZodNumber;
|
|
20
20
|
width: z.ZodNumber;
|
package/dist/shared/model.d.ts
CHANGED
|
@@ -2,9 +2,9 @@ import { z } from "zod";
|
|
|
2
2
|
|
|
3
3
|
//#region src/shared/model.d.ts
|
|
4
4
|
declare const realtimeModels: z.ZodUnion<readonly [z.ZodLiteral<"mirage">, z.ZodLiteral<"mirage_v2">, z.ZodLiteral<"lucy_v2v_720p_rt">]>;
|
|
5
|
-
declare const videoModels: z.ZodUnion<readonly [z.ZodLiteral<"lucy-dev-i2v">, z.ZodLiteral<"lucy-dev-v2v">, z.ZodLiteral<"lucy-pro-t2v">, z.ZodLiteral<"lucy-pro-i2v">, z.ZodLiteral<"lucy-pro-v2v">, z.ZodLiteral<"lucy-pro-flf2v">]>;
|
|
5
|
+
declare const videoModels: z.ZodUnion<readonly [z.ZodLiteral<"lucy-dev-i2v">, z.ZodLiteral<"lucy-dev-v2v">, z.ZodLiteral<"lucy-pro-t2v">, z.ZodLiteral<"lucy-pro-i2v">, z.ZodLiteral<"lucy-pro-v2v">, z.ZodLiteral<"lucy-pro-flf2v">, z.ZodLiteral<"lucy-motion">]>;
|
|
6
6
|
declare const imageModels: z.ZodUnion<readonly [z.ZodLiteral<"lucy-pro-t2i">, z.ZodLiteral<"lucy-pro-i2i">]>;
|
|
7
|
-
declare const modelSchema: z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"mirage">, z.ZodLiteral<"mirage_v2">, z.ZodLiteral<"lucy_v2v_720p_rt">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-dev-i2v">, z.ZodLiteral<"lucy-dev-v2v">, z.ZodLiteral<"lucy-pro-t2v">, z.ZodLiteral<"lucy-pro-i2v">, z.ZodLiteral<"lucy-pro-v2v">, z.ZodLiteral<"lucy-pro-flf2v">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-pro-t2i">, z.ZodLiteral<"lucy-pro-i2i">]>]>;
|
|
7
|
+
declare const modelSchema: z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"mirage">, z.ZodLiteral<"mirage_v2">, z.ZodLiteral<"lucy_v2v_720p_rt">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-dev-i2v">, z.ZodLiteral<"lucy-dev-v2v">, z.ZodLiteral<"lucy-pro-t2v">, z.ZodLiteral<"lucy-pro-i2v">, z.ZodLiteral<"lucy-pro-v2v">, z.ZodLiteral<"lucy-pro-flf2v">, z.ZodLiteral<"lucy-motion">]>, z.ZodUnion<readonly [z.ZodLiteral<"lucy-pro-t2i">, z.ZodLiteral<"lucy-pro-i2i">]>]>;
|
|
8
8
|
type Model = z.infer<typeof modelSchema>;
|
|
9
9
|
type RealTimeModels = z.infer<typeof realtimeModels>;
|
|
10
10
|
type VideoModels = z.infer<typeof videoModels>;
|
|
@@ -13,32 +13,44 @@ declare const modelInputSchemas: {
|
|
|
13
13
|
readonly "lucy-pro-t2v": z.ZodObject<{
|
|
14
14
|
prompt: z.ZodString;
|
|
15
15
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
16
|
-
resolution: z.ZodOptional<z.
|
|
16
|
+
resolution: z.ZodDefault<z.ZodOptional<z.ZodEnum<{
|
|
17
|
+
"720p": "720p";
|
|
18
|
+
"480p": "480p";
|
|
19
|
+
}>>>;
|
|
17
20
|
orientation: z.ZodOptional<z.ZodString>;
|
|
18
21
|
}, z.core.$strip>;
|
|
19
22
|
readonly "lucy-pro-t2i": z.ZodObject<{
|
|
20
23
|
prompt: z.ZodString;
|
|
21
24
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
22
|
-
resolution: z.ZodOptional<z.
|
|
25
|
+
resolution: z.ZodDefault<z.ZodOptional<z.ZodEnum<{
|
|
26
|
+
"720p": "720p";
|
|
27
|
+
"480p": "480p";
|
|
28
|
+
}>>>;
|
|
23
29
|
orientation: z.ZodOptional<z.ZodString>;
|
|
24
30
|
}, z.core.$strip>;
|
|
25
31
|
readonly "lucy-pro-i2v": z.ZodObject<{
|
|
26
32
|
prompt: z.ZodString;
|
|
27
33
|
data: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
28
34
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
29
|
-
resolution: z.ZodOptional<z.
|
|
35
|
+
resolution: z.ZodDefault<z.ZodOptional<z.ZodEnum<{
|
|
36
|
+
"720p": "720p";
|
|
37
|
+
"480p": "480p";
|
|
38
|
+
}>>>;
|
|
30
39
|
}, z.core.$strip>;
|
|
31
40
|
readonly "lucy-dev-i2v": z.ZodObject<{
|
|
32
41
|
prompt: z.ZodString;
|
|
33
42
|
data: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
34
43
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
35
|
-
resolution: z.ZodOptional<z.
|
|
44
|
+
resolution: z.ZodOptional<z.ZodDefault<z.ZodLiteral<"720p">>>;
|
|
36
45
|
}, z.core.$strip>;
|
|
37
46
|
readonly "lucy-pro-v2v": z.ZodObject<{
|
|
38
47
|
prompt: z.ZodString;
|
|
39
48
|
data: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
40
49
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
41
|
-
resolution: z.ZodOptional<z.
|
|
50
|
+
resolution: z.ZodDefault<z.ZodOptional<z.ZodEnum<{
|
|
51
|
+
"720p": "720p";
|
|
52
|
+
"480p": "480p";
|
|
53
|
+
}>>>;
|
|
42
54
|
enhance_prompt: z.ZodOptional<z.ZodBoolean>;
|
|
43
55
|
num_inference_steps: z.ZodOptional<z.ZodNumber>;
|
|
44
56
|
}, z.core.$strip>;
|
|
@@ -46,7 +58,7 @@ declare const modelInputSchemas: {
|
|
|
46
58
|
prompt: z.ZodString;
|
|
47
59
|
data: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
48
60
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
49
|
-
resolution: z.ZodOptional<z.
|
|
61
|
+
resolution: z.ZodOptional<z.ZodDefault<z.ZodLiteral<"720p">>>;
|
|
50
62
|
enhance_prompt: z.ZodOptional<z.ZodBoolean>;
|
|
51
63
|
}, z.core.$strip>;
|
|
52
64
|
readonly "lucy-pro-flf2v": z.ZodObject<{
|
|
@@ -54,15 +66,31 @@ declare const modelInputSchemas: {
|
|
|
54
66
|
start: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
55
67
|
end: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
56
68
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
57
|
-
resolution: z.ZodOptional<z.
|
|
69
|
+
resolution: z.ZodDefault<z.ZodOptional<z.ZodEnum<{
|
|
70
|
+
"720p": "720p";
|
|
71
|
+
"480p": "480p";
|
|
72
|
+
}>>>;
|
|
58
73
|
}, z.core.$strip>;
|
|
59
74
|
readonly "lucy-pro-i2i": z.ZodObject<{
|
|
60
75
|
prompt: z.ZodString;
|
|
61
76
|
data: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
62
77
|
seed: z.ZodOptional<z.ZodNumber>;
|
|
63
|
-
resolution: z.ZodOptional<z.
|
|
78
|
+
resolution: z.ZodDefault<z.ZodOptional<z.ZodEnum<{
|
|
79
|
+
"720p": "720p";
|
|
80
|
+
"480p": "480p";
|
|
81
|
+
}>>>;
|
|
64
82
|
enhance_prompt: z.ZodOptional<z.ZodBoolean>;
|
|
65
83
|
}, z.core.$strip>;
|
|
84
|
+
readonly "lucy-motion": z.ZodObject<{
|
|
85
|
+
data: z.ZodUnion<readonly [z.ZodCustom<File, File>, z.ZodCustom<Blob, Blob>, z.ZodCustom<ReadableStream<unknown>, ReadableStream<unknown>>, z.ZodCustom<URL, URL>, z.ZodURL]>;
|
|
86
|
+
trajectory: z.ZodArray<z.ZodObject<{
|
|
87
|
+
frame: z.ZodNumber;
|
|
88
|
+
x: z.ZodNumber;
|
|
89
|
+
y: z.ZodNumber;
|
|
90
|
+
}, z.core.$strip>>;
|
|
91
|
+
seed: z.ZodOptional<z.ZodNumber>;
|
|
92
|
+
resolution: z.ZodOptional<z.ZodDefault<z.ZodLiteral<"720p">>>;
|
|
93
|
+
}, z.core.$strip>;
|
|
66
94
|
};
|
|
67
95
|
type ModelInputSchemas = typeof modelInputSchemas;
|
|
68
96
|
type ModelDefinition<T extends Model = Model> = {
|
|
@@ -71,11 +99,29 @@ type ModelDefinition<T extends Model = Model> = {
|
|
|
71
99
|
fps: number;
|
|
72
100
|
width: number;
|
|
73
101
|
height: number;
|
|
74
|
-
inputSchema: T extends keyof ModelInputSchemas ? ModelInputSchemas[T] : z.
|
|
102
|
+
inputSchema: T extends keyof ModelInputSchemas ? ModelInputSchemas[T] : z.ZodTypeAny;
|
|
75
103
|
};
|
|
76
104
|
declare const models: {
|
|
77
105
|
realtime: <T extends RealTimeModels>(model: T) => ModelDefinition<T>;
|
|
106
|
+
/**
|
|
107
|
+
* Get a video model identifier.
|
|
108
|
+
*
|
|
109
|
+
* Available options:
|
|
110
|
+
* - `"lucy-pro-t2v"` - Text-to-video
|
|
111
|
+
* - `"lucy-pro-i2v"` - Image-to-video
|
|
112
|
+
* - `"lucy-pro-v2v"` - Video-to-video
|
|
113
|
+
* - `"lucy-pro-flf2v"` - First-last-frame-to-video
|
|
114
|
+
* - `"lucy-dev-i2v"` - Image-to-video (Dev quality)
|
|
115
|
+
* - `"lucy-dev-v2v"` - Video-to-video (Dev quality)
|
|
116
|
+
*/
|
|
78
117
|
video: <T extends VideoModels>(model: T) => ModelDefinition<T>;
|
|
118
|
+
/**
|
|
119
|
+
* Get an image model identifier.
|
|
120
|
+
*
|
|
121
|
+
* Available options:
|
|
122
|
+
* - `"lucy-pro-t2i"` - Text-to-image
|
|
123
|
+
* - `"lucy-pro-i2i"` - Image-to-image
|
|
124
|
+
*/
|
|
79
125
|
image: <T extends ImageModels>(model: T) => ModelDefinition<T>;
|
|
80
126
|
};
|
|
81
127
|
//#endregion
|
package/dist/shared/model.js
CHANGED
|
@@ -13,7 +13,8 @@ const videoModels = z.union([
|
|
|
13
13
|
z.literal("lucy-pro-t2v"),
|
|
14
14
|
z.literal("lucy-pro-i2v"),
|
|
15
15
|
z.literal("lucy-pro-v2v"),
|
|
16
|
-
z.literal("lucy-pro-flf2v")
|
|
16
|
+
z.literal("lucy-pro-flf2v"),
|
|
17
|
+
z.literal("lucy-motion")
|
|
17
18
|
]);
|
|
18
19
|
const imageModels = z.union([z.literal("lucy-pro-t2i"), z.literal("lucy-pro-i2i")]);
|
|
19
20
|
const modelSchema = z.union([
|
|
@@ -28,59 +29,88 @@ const fileInputSchema = z.union([
|
|
|
28
29
|
z.instanceof(URL),
|
|
29
30
|
z.url()
|
|
30
31
|
]);
|
|
32
|
+
/**
|
|
33
|
+
* Resolution schema for dev models. Supports only 720p.
|
|
34
|
+
*/
|
|
35
|
+
const devResolutionSchema = z.literal("720p").default("720p").optional().describe("The resolution to use for the generation. For dev models, only `720p` is supported.");
|
|
36
|
+
/**
|
|
37
|
+
* Resolution schema for pro models.
|
|
38
|
+
* @param defaultValue - Optional default value (e.g., "720p")
|
|
39
|
+
*/
|
|
40
|
+
const proResolutionSchema = () => {
|
|
41
|
+
return z.enum(["720p", "480p"]).optional().describe("The resolution to use for the generation").default("720p");
|
|
42
|
+
};
|
|
43
|
+
/**
|
|
44
|
+
* Resolution schema for lucy-motion.
|
|
45
|
+
*/
|
|
46
|
+
const motionResolutionSchema = z.literal("720p").default("720p").optional().describe("The resolution to use for the generation");
|
|
47
|
+
/**
|
|
48
|
+
* Resolution schema for lucy-pro-v2v (supports 720p and 480p).
|
|
49
|
+
*/
|
|
50
|
+
const proV2vResolutionSchema = z.enum(["720p", "480p"]).optional().describe("The resolution to use for the generation").default("720p");
|
|
31
51
|
const modelInputSchemas = {
|
|
32
52
|
"lucy-pro-t2v": z.object({
|
|
33
|
-
prompt: z.string(),
|
|
34
|
-
seed: z.number().optional(),
|
|
35
|
-
resolution:
|
|
36
|
-
orientation: z.string().optional()
|
|
53
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
54
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
55
|
+
resolution: proResolutionSchema(),
|
|
56
|
+
orientation: z.string().optional().describe("The orientation to use for the generation")
|
|
37
57
|
}),
|
|
38
58
|
"lucy-pro-t2i": z.object({
|
|
39
|
-
prompt: z.string(),
|
|
40
|
-
seed: z.number().optional(),
|
|
41
|
-
resolution:
|
|
42
|
-
orientation: z.string().optional()
|
|
59
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
60
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
61
|
+
resolution: proResolutionSchema(),
|
|
62
|
+
orientation: z.string().optional().describe("The orientation to use for the generation")
|
|
43
63
|
}),
|
|
44
64
|
"lucy-pro-i2v": z.object({
|
|
45
|
-
prompt: z.string(),
|
|
46
|
-
data: fileInputSchema,
|
|
47
|
-
seed: z.number().optional(),
|
|
48
|
-
resolution:
|
|
65
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
66
|
+
data: fileInputSchema.describe("The image data to use for generation (File, Blob, ReadableStream, URL, or string URL)"),
|
|
67
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
68
|
+
resolution: proResolutionSchema()
|
|
49
69
|
}),
|
|
50
70
|
"lucy-dev-i2v": z.object({
|
|
51
|
-
prompt: z.string(),
|
|
52
|
-
data: fileInputSchema,
|
|
53
|
-
seed: z.number().optional(),
|
|
54
|
-
resolution:
|
|
71
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
72
|
+
data: fileInputSchema.describe("The image data to use for generation (File, Blob, ReadableStream, URL, or string URL)"),
|
|
73
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
74
|
+
resolution: devResolutionSchema
|
|
55
75
|
}),
|
|
56
76
|
"lucy-pro-v2v": z.object({
|
|
57
|
-
prompt: z.string(),
|
|
58
|
-
data: fileInputSchema,
|
|
59
|
-
seed: z.number().optional(),
|
|
60
|
-
resolution:
|
|
61
|
-
enhance_prompt: z.boolean().optional(),
|
|
62
|
-
num_inference_steps: z.number().optional()
|
|
77
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
78
|
+
data: fileInputSchema.describe("The video data to use for generation (File, Blob, ReadableStream, URL, or string URL)"),
|
|
79
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
80
|
+
resolution: proV2vResolutionSchema,
|
|
81
|
+
enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
|
|
82
|
+
num_inference_steps: z.number().optional().describe("The number of inference steps")
|
|
63
83
|
}),
|
|
64
84
|
"lucy-dev-v2v": z.object({
|
|
65
|
-
prompt: z.string(),
|
|
66
|
-
data: fileInputSchema,
|
|
67
|
-
seed: z.number().optional(),
|
|
68
|
-
resolution:
|
|
69
|
-
enhance_prompt: z.boolean().optional()
|
|
85
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
86
|
+
data: fileInputSchema.describe("The video data to use for generation (File, Blob, ReadableStream, URL, or string URL)"),
|
|
87
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
88
|
+
resolution: devResolutionSchema,
|
|
89
|
+
enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt")
|
|
70
90
|
}),
|
|
71
91
|
"lucy-pro-flf2v": z.object({
|
|
72
|
-
prompt: z.string(),
|
|
73
|
-
start: fileInputSchema,
|
|
74
|
-
end: fileInputSchema,
|
|
75
|
-
seed: z.number().optional(),
|
|
76
|
-
resolution:
|
|
92
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
93
|
+
start: fileInputSchema.describe("The start frame image (File, Blob, ReadableStream, URL, or string URL)"),
|
|
94
|
+
end: fileInputSchema.describe("The end frame image (File, Blob, ReadableStream, URL, or string URL)"),
|
|
95
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
96
|
+
resolution: proResolutionSchema()
|
|
77
97
|
}),
|
|
78
98
|
"lucy-pro-i2i": z.object({
|
|
79
|
-
prompt: z.string(),
|
|
80
|
-
data: fileInputSchema,
|
|
81
|
-
seed: z.number().optional(),
|
|
82
|
-
resolution:
|
|
83
|
-
enhance_prompt: z.boolean().optional()
|
|
99
|
+
prompt: z.string().describe("The prompt to use for the generation"),
|
|
100
|
+
data: fileInputSchema.describe("The image data to use for generation (File, Blob, ReadableStream, URL, or string URL)"),
|
|
101
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
102
|
+
resolution: proResolutionSchema(),
|
|
103
|
+
enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt")
|
|
104
|
+
}),
|
|
105
|
+
"lucy-motion": z.object({
|
|
106
|
+
data: fileInputSchema.describe("The image data to use for generation (File, Blob, ReadableStream, URL, or string URL)"),
|
|
107
|
+
trajectory: z.array(z.object({
|
|
108
|
+
frame: z.number().min(0),
|
|
109
|
+
x: z.number().min(0),
|
|
110
|
+
y: z.number().min(0)
|
|
111
|
+
})).min(2).max(121).describe("The trajectory of the desired movement of the object in the image"),
|
|
112
|
+
seed: z.number().optional().describe("The seed to use for the generation"),
|
|
113
|
+
resolution: motionResolutionSchema
|
|
84
114
|
})
|
|
85
115
|
};
|
|
86
116
|
const modelDefinitionSchema = z.object({
|
|
@@ -104,7 +134,7 @@ const _models = {
|
|
|
104
134
|
mirage_v2: {
|
|
105
135
|
urlPath: "/v1/stream",
|
|
106
136
|
name: "mirage_v2",
|
|
107
|
-
fps:
|
|
137
|
+
fps: 22,
|
|
108
138
|
width: 1280,
|
|
109
139
|
height: 704,
|
|
110
140
|
inputSchema: z.object({})
|
|
@@ -184,6 +214,14 @@ const _models = {
|
|
|
184
214
|
width: 1280,
|
|
185
215
|
height: 704,
|
|
186
216
|
inputSchema: modelInputSchemas["lucy-pro-flf2v"]
|
|
217
|
+
},
|
|
218
|
+
"lucy-motion": {
|
|
219
|
+
urlPath: "/v1/generate/lucy-motion",
|
|
220
|
+
name: "lucy-motion",
|
|
221
|
+
fps: 25,
|
|
222
|
+
width: 1280,
|
|
223
|
+
height: 704,
|
|
224
|
+
inputSchema: modelInputSchemas["lucy-motion"]
|
|
187
225
|
}
|
|
188
226
|
}
|
|
189
227
|
};
|
package/dist/version.js
CHANGED