@umituz/react-native-ai-generation-content 1.27.4 → 1.27.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/domain/interfaces/ai-provider.interface.ts +87 -100
- package/src/domains/generation/wizard/infrastructure/strategies/image-generation.strategy.ts +30 -3
- package/src/domains/generation/wizard/infrastructure/strategies/video-generation.strategy.ts +30 -3
- package/src/domains/generation/wizard/infrastructure/strategies/wizard-strategy.constants.ts +2 -0
- package/src/domains/generation/wizard/presentation/hooks/useWizardGeneration.ts +2 -1
- package/src/domains/scenarios/domain/scenario.types.ts +2 -1
- package/src/infrastructure/utils/media-actions.util.ts +6 -12
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@umituz/react-native-ai-generation-content",
|
|
3
|
-
"version": "1.27.
|
|
3
|
+
"version": "1.27.6",
|
|
4
4
|
"description": "Provider-agnostic AI generation orchestration for React Native with result preview components",
|
|
5
5
|
"main": "src/index.ts",
|
|
6
6
|
"types": "src/index.ts",
|
|
@@ -3,6 +3,36 @@
|
|
|
3
3
|
* Provider-agnostic interface for AI generation services
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
|
+
// =============================================================================
|
|
7
|
+
// Feature Types (must be defined first for use in other interfaces)
|
|
8
|
+
// =============================================================================
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Feature types for image processing (output: image)
|
|
12
|
+
*/
|
|
13
|
+
export type ImageFeatureType =
|
|
14
|
+
| "upscale"
|
|
15
|
+
| "photo-restore"
|
|
16
|
+
| "face-swap"
|
|
17
|
+
| "anime-selfie"
|
|
18
|
+
| "remove-background"
|
|
19
|
+
| "remove-object"
|
|
20
|
+
| "hd-touch-up"
|
|
21
|
+
| "replace-background";
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Feature types for video generation (output: video)
|
|
25
|
+
*/
|
|
26
|
+
export type VideoFeatureType =
|
|
27
|
+
| "ai-hug"
|
|
28
|
+
| "ai-kiss"
|
|
29
|
+
| "image-to-video"
|
|
30
|
+
| "text-to-video";
|
|
31
|
+
|
|
32
|
+
// =============================================================================
|
|
33
|
+
// Provider Configuration
|
|
34
|
+
// =============================================================================
|
|
35
|
+
|
|
6
36
|
export interface AIProviderConfig {
|
|
7
37
|
apiKey: string;
|
|
8
38
|
maxRetries?: number;
|
|
@@ -17,40 +47,26 @@ export interface AIProviderConfig {
|
|
|
17
47
|
imageEditModel?: string;
|
|
18
48
|
/** Video generation model ID */
|
|
19
49
|
videoGenerationModel?: string;
|
|
50
|
+
/** Video feature model mapping - app provides models for each feature */
|
|
51
|
+
videoFeatureModels?: Partial<Record<VideoFeatureType, string>>;
|
|
52
|
+
/** Image feature model mapping - app provides models for each feature */
|
|
53
|
+
imageFeatureModels?: Partial<Record<ImageFeatureType, string>>;
|
|
20
54
|
}
|
|
21
55
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
export interface ProviderProgressInfo {
|
|
26
|
-
/** Progress percentage (0-100) */
|
|
27
|
-
progress: number;
|
|
28
|
-
/** Current job status */
|
|
29
|
-
status?: AIJobStatusType;
|
|
30
|
-
/** Human-readable message */
|
|
31
|
-
message?: string;
|
|
32
|
-
/** Estimated time remaining in ms */
|
|
33
|
-
estimatedTimeRemaining?: number;
|
|
34
|
-
}
|
|
56
|
+
// =============================================================================
|
|
57
|
+
// Status Types
|
|
58
|
+
// =============================================================================
|
|
35
59
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
/** Supports text-to-video generation */
|
|
47
|
-
textToVideo: boolean;
|
|
48
|
-
/** Supports image-to-video generation */
|
|
49
|
-
imageToVideo: boolean;
|
|
50
|
-
/** Supports text-to-voice generation */
|
|
51
|
-
textToVoice: boolean;
|
|
52
|
-
/** Supports text-to-text (LLM) generation */
|
|
53
|
-
textToText: boolean;
|
|
60
|
+
export type AIJobStatusType =
|
|
61
|
+
| "IN_QUEUE"
|
|
62
|
+
| "IN_PROGRESS"
|
|
63
|
+
| "COMPLETED"
|
|
64
|
+
| "FAILED";
|
|
65
|
+
|
|
66
|
+
export interface AILogEntry {
|
|
67
|
+
message: string;
|
|
68
|
+
level: "info" | "warn" | "error";
|
|
69
|
+
timestamp?: string;
|
|
54
70
|
}
|
|
55
71
|
|
|
56
72
|
export interface JobSubmission {
|
|
@@ -66,16 +82,19 @@ export interface JobStatus {
|
|
|
66
82
|
eta?: number;
|
|
67
83
|
}
|
|
68
84
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
| "COMPLETED"
|
|
73
|
-
| "FAILED";
|
|
85
|
+
// =============================================================================
|
|
86
|
+
// Progress & Options
|
|
87
|
+
// =============================================================================
|
|
74
88
|
|
|
75
|
-
export interface
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
89
|
+
export interface ProviderProgressInfo {
|
|
90
|
+
/** Progress percentage (0-100) */
|
|
91
|
+
progress: number;
|
|
92
|
+
/** Current job status */
|
|
93
|
+
status?: AIJobStatusType;
|
|
94
|
+
/** Human-readable message */
|
|
95
|
+
message?: string;
|
|
96
|
+
/** Estimated time remaining in ms */
|
|
97
|
+
estimatedTimeRemaining?: number;
|
|
79
98
|
}
|
|
80
99
|
|
|
81
100
|
export interface SubscribeOptions<T = unknown> {
|
|
@@ -89,29 +108,31 @@ export interface RunOptions {
|
|
|
89
108
|
onProgress?: (progress: ProviderProgressInfo) => void;
|
|
90
109
|
}
|
|
91
110
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
export type ImageFeatureType =
|
|
96
|
-
| "upscale"
|
|
97
|
-
| "photo-restore"
|
|
98
|
-
| "face-swap"
|
|
99
|
-
| "anime-selfie"
|
|
100
|
-
| "remove-background"
|
|
101
|
-
| "remove-object"
|
|
102
|
-
| "hd-touch-up"
|
|
103
|
-
| "replace-background";
|
|
111
|
+
// =============================================================================
|
|
112
|
+
// Capabilities
|
|
113
|
+
// =============================================================================
|
|
104
114
|
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
115
|
+
export interface ProviderCapabilities {
|
|
116
|
+
/** Supported image features */
|
|
117
|
+
imageFeatures: readonly ImageFeatureType[];
|
|
118
|
+
/** Supported video features */
|
|
119
|
+
videoFeatures: readonly VideoFeatureType[];
|
|
120
|
+
/** Supports text-to-image generation */
|
|
121
|
+
textToImage: boolean;
|
|
122
|
+
/** Supports text-to-video generation */
|
|
123
|
+
textToVideo: boolean;
|
|
124
|
+
/** Supports image-to-video generation */
|
|
125
|
+
imageToVideo: boolean;
|
|
126
|
+
/** Supports text-to-voice generation */
|
|
127
|
+
textToVoice: boolean;
|
|
128
|
+
/** Supports text-to-text (LLM) generation */
|
|
129
|
+
textToText: boolean;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// =============================================================================
|
|
133
|
+
// Feature Input Data
|
|
134
|
+
// =============================================================================
|
|
111
135
|
|
|
112
|
-
/**
|
|
113
|
-
* Input data for image features
|
|
114
|
-
*/
|
|
115
136
|
export interface ImageFeatureInputData {
|
|
116
137
|
imageBase64: string;
|
|
117
138
|
targetImageBase64?: string;
|
|
@@ -119,9 +140,6 @@ export interface ImageFeatureInputData {
|
|
|
119
140
|
options?: Record<string, unknown>;
|
|
120
141
|
}
|
|
121
142
|
|
|
122
|
-
/**
|
|
123
|
-
* Input data for video features
|
|
124
|
-
*/
|
|
125
143
|
export interface VideoFeatureInputData {
|
|
126
144
|
sourceImageBase64: string;
|
|
127
145
|
targetImageBase64: string;
|
|
@@ -129,34 +147,21 @@ export interface VideoFeatureInputData {
|
|
|
129
147
|
options?: Record<string, unknown>;
|
|
130
148
|
}
|
|
131
149
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
150
|
+
// =============================================================================
|
|
151
|
+
// Provider Interface
|
|
152
|
+
// =============================================================================
|
|
153
|
+
|
|
136
154
|
export interface IAIProvider {
|
|
137
155
|
readonly providerId: string;
|
|
138
156
|
readonly providerName: string;
|
|
139
157
|
|
|
140
158
|
initialize(config: AIProviderConfig): void;
|
|
141
159
|
isInitialized(): boolean;
|
|
142
|
-
|
|
143
|
-
/**
|
|
144
|
-
* Get provider capabilities
|
|
145
|
-
*/
|
|
146
160
|
getCapabilities(): ProviderCapabilities;
|
|
147
|
-
|
|
148
|
-
/**
|
|
149
|
-
* Check if a specific feature is supported
|
|
150
|
-
*/
|
|
151
161
|
isFeatureSupported(feature: ImageFeatureType | VideoFeatureType): boolean;
|
|
152
162
|
|
|
153
|
-
submitJob(
|
|
154
|
-
model: string,
|
|
155
|
-
input: Record<string, unknown>,
|
|
156
|
-
): Promise<JobSubmission>;
|
|
157
|
-
|
|
163
|
+
submitJob(model: string, input: Record<string, unknown>): Promise<JobSubmission>;
|
|
158
164
|
getJobStatus(model: string, requestId: string): Promise<JobStatus>;
|
|
159
|
-
|
|
160
165
|
getJobResult<T = unknown>(model: string, requestId: string): Promise<T>;
|
|
161
166
|
|
|
162
167
|
subscribe<T = unknown>(
|
|
@@ -173,31 +178,13 @@ export interface IAIProvider {
|
|
|
173
178
|
|
|
174
179
|
reset(): void;
|
|
175
180
|
|
|
176
|
-
/**
|
|
177
|
-
* Get model ID for an image feature
|
|
178
|
-
* @throws Error if feature is not supported
|
|
179
|
-
*/
|
|
180
181
|
getImageFeatureModel(feature: ImageFeatureType): string;
|
|
181
|
-
|
|
182
|
-
/**
|
|
183
|
-
* Build provider-specific input for an image feature
|
|
184
|
-
* @throws Error if feature is not supported
|
|
185
|
-
*/
|
|
186
182
|
buildImageFeatureInput(
|
|
187
183
|
feature: ImageFeatureType,
|
|
188
184
|
data: ImageFeatureInputData,
|
|
189
185
|
): Record<string, unknown>;
|
|
190
186
|
|
|
191
|
-
/**
|
|
192
|
-
* Get model ID for a video feature
|
|
193
|
-
* @throws Error if feature is not supported
|
|
194
|
-
*/
|
|
195
187
|
getVideoFeatureModel(feature: VideoFeatureType): string;
|
|
196
|
-
|
|
197
|
-
/**
|
|
198
|
-
* Build provider-specific input for a video feature
|
|
199
|
-
* @throws Error if feature is not supported
|
|
200
|
-
*/
|
|
201
188
|
buildVideoFeatureInput(
|
|
202
189
|
feature: VideoFeatureType,
|
|
203
190
|
data: VideoFeatureInputData,
|
package/src/domains/generation/wizard/infrastructure/strategies/image-generation.strategy.ts
CHANGED
|
@@ -142,6 +142,29 @@ async function executeImageGeneration(
|
|
|
142
142
|
}
|
|
143
143
|
}
|
|
144
144
|
|
|
145
|
+
// ============================================================================
|
|
146
|
+
// Prompt Extraction
|
|
147
|
+
// ============================================================================
|
|
148
|
+
|
|
149
|
+
function extractPromptFromWizardData(wizardData: Record<string, unknown>): string | null {
|
|
150
|
+
// Check for text_input key (standard wizard text input step)
|
|
151
|
+
if (wizardData.text_input && typeof wizardData.text_input === "string") {
|
|
152
|
+
return wizardData.text_input.trim() || null;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// Check for nested prompt in text_input object
|
|
156
|
+
const textInput = wizardData.text_input as { value?: string; text?: string } | undefined;
|
|
157
|
+
if (textInput?.value) return textInput.value.trim() || null;
|
|
158
|
+
if (textInput?.text) return textInput.text.trim() || null;
|
|
159
|
+
|
|
160
|
+
// Check for direct prompt key
|
|
161
|
+
if (wizardData.prompt && typeof wizardData.prompt === "string") {
|
|
162
|
+
return wizardData.prompt.trim() || null;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
return null;
|
|
166
|
+
}
|
|
167
|
+
|
|
145
168
|
// ============================================================================
|
|
146
169
|
// Input Builder
|
|
147
170
|
// ============================================================================
|
|
@@ -153,11 +176,15 @@ export async function buildImageInput(
|
|
|
153
176
|
const photos = await extractPhotosFromWizardData(wizardData);
|
|
154
177
|
if (!photos) return null;
|
|
155
178
|
|
|
156
|
-
|
|
157
|
-
|
|
179
|
+
// Priority: 1) User input from wizard, 2) Scenario prompt
|
|
180
|
+
const userPrompt = extractPromptFromWizardData(wizardData);
|
|
181
|
+
const basePrompt = userPrompt || scenario.aiPrompt?.trim();
|
|
182
|
+
|
|
183
|
+
if (!basePrompt) {
|
|
184
|
+
throw new Error(`No prompt found. Provide text_input in wizard or aiPrompt in scenario "${scenario.id}".`);
|
|
158
185
|
}
|
|
159
186
|
|
|
160
|
-
let prompt =
|
|
187
|
+
let prompt = basePrompt;
|
|
161
188
|
|
|
162
189
|
const styleEnhancements: string[] = [];
|
|
163
190
|
|
package/src/domains/generation/wizard/infrastructure/strategies/video-generation.strategy.ts
CHANGED
|
@@ -74,6 +74,29 @@ function getVideoFeatureType(scenarioId: string): VideoFeatureType {
|
|
|
74
74
|
throw new Error(`Unknown video feature type for scenario "${scenarioId}". Add pattern to VIDEO_FEATURE_PATTERNS.`);
|
|
75
75
|
}
|
|
76
76
|
|
|
77
|
+
// ============================================================================
|
|
78
|
+
// Prompt Extraction
|
|
79
|
+
// ============================================================================
|
|
80
|
+
|
|
81
|
+
function extractPromptFromWizardData(wizardData: Record<string, unknown>): string | null {
|
|
82
|
+
// Check for text_input key (standard wizard text input step)
|
|
83
|
+
if (wizardData.text_input && typeof wizardData.text_input === "string") {
|
|
84
|
+
return wizardData.text_input.trim() || null;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// Check for nested prompt in text_input object
|
|
88
|
+
const textInput = wizardData.text_input as { value?: string; text?: string } | undefined;
|
|
89
|
+
if (textInput?.value) return textInput.value.trim() || null;
|
|
90
|
+
if (textInput?.text) return textInput.text.trim() || null;
|
|
91
|
+
|
|
92
|
+
// Check for direct prompt key
|
|
93
|
+
if (wizardData.prompt && typeof wizardData.prompt === "string") {
|
|
94
|
+
return wizardData.prompt.trim() || null;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
return null;
|
|
98
|
+
}
|
|
99
|
+
|
|
77
100
|
// ============================================================================
|
|
78
101
|
// Input Builder
|
|
79
102
|
// ============================================================================
|
|
@@ -85,14 +108,18 @@ export async function buildVideoInput(
|
|
|
85
108
|
const photos = await extractPhotosFromWizardData(wizardData);
|
|
86
109
|
if (!photos || photos.length < 1) return null;
|
|
87
110
|
|
|
88
|
-
|
|
89
|
-
|
|
111
|
+
// Priority: 1) User input from wizard, 2) Scenario prompt
|
|
112
|
+
const userPrompt = extractPromptFromWizardData(wizardData);
|
|
113
|
+
const prompt = userPrompt || scenario.aiPrompt?.trim();
|
|
114
|
+
|
|
115
|
+
if (!prompt) {
|
|
116
|
+
throw new Error(`No prompt found. Provide text_input in wizard or aiPrompt in scenario "${scenario.id}".`);
|
|
90
117
|
}
|
|
91
118
|
|
|
92
119
|
return {
|
|
93
120
|
sourceImageBase64: photos[0],
|
|
94
121
|
targetImageBase64: photos[1] || photos[0],
|
|
95
|
-
prompt
|
|
122
|
+
prompt,
|
|
96
123
|
};
|
|
97
124
|
}
|
|
98
125
|
|
|
@@ -14,7 +14,8 @@ export type WizardOutputType = "image" | "video";
|
|
|
14
14
|
|
|
15
15
|
export interface WizardScenarioData {
|
|
16
16
|
readonly id: string;
|
|
17
|
-
|
|
17
|
+
/** AI prompt - optional if prompt comes from wizard data (text_input step) */
|
|
18
|
+
readonly aiPrompt?: string;
|
|
18
19
|
readonly outputType?: WizardOutputType;
|
|
19
20
|
readonly model?: string;
|
|
20
21
|
readonly title?: string;
|
|
@@ -198,7 +198,8 @@ export interface ScenarioData {
|
|
|
198
198
|
readonly icon?: string;
|
|
199
199
|
readonly imageUrl?: string;
|
|
200
200
|
readonly previewImageUrl?: string;
|
|
201
|
-
|
|
201
|
+
/** AI prompt - optional if prompt comes from wizard data */
|
|
202
|
+
readonly aiPrompt?: string;
|
|
202
203
|
readonly storyTemplate?: string;
|
|
203
204
|
readonly requiresPhoto?: boolean;
|
|
204
205
|
readonly hidden?: boolean;
|
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
* Provides save to gallery and share functionality for generated media
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
|
+
declare const __DEV__: boolean;
|
|
7
|
+
|
|
6
8
|
export interface MediaActionResult {
|
|
7
9
|
readonly success: boolean;
|
|
8
10
|
readonly error?: string;
|
|
@@ -85,12 +87,8 @@ export const saveMediaToGallery = async (
|
|
|
85
87
|
return { success: true };
|
|
86
88
|
} catch (error) {
|
|
87
89
|
// Debug logging in development
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
console.error("[MediaActions] Save failed:", error);
|
|
91
|
-
}
|
|
92
|
-
} catch {
|
|
93
|
-
// Ignore if __DEV__ check fails
|
|
90
|
+
if (typeof __DEV__ !== "undefined" && __DEV__) {
|
|
91
|
+
console.error("[MediaActions] Save failed:", error);
|
|
94
92
|
}
|
|
95
93
|
const errorMsg = translations?.saveFailed || "Failed to save media";
|
|
96
94
|
toast?.show({
|
|
@@ -139,12 +137,8 @@ export const shareMedia = async (
|
|
|
139
137
|
return { success: true };
|
|
140
138
|
} catch (error) {
|
|
141
139
|
// Debug logging in development
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
console.error("[MediaActions] Share failed:", error);
|
|
145
|
-
}
|
|
146
|
-
} catch {
|
|
147
|
-
// Ignore if __DEV__ check fails
|
|
140
|
+
if (typeof __DEV__ !== "undefined" && __DEV__) {
|
|
141
|
+
console.error("[MediaActions] Share failed:", error);
|
|
148
142
|
}
|
|
149
143
|
const errorMsg = translations?.shareFailed || "Failed to share media";
|
|
150
144
|
toast?.show({
|