@umituz/react-native-ai-generation-content 1.26.6 → 1.26.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/domains/generation/wizard/infrastructure/strategies/wizard-strategy.factory.ts +6 -8
- package/src/domains/generation/wizard/presentation/hooks/usePhotoUploadState.ts +1 -1
- package/src/domains/generation/wizard/presentation/screens/GenericPhotoUploadScreen.tsx +1 -12
- package/src/domains/generation/wizard/presentation/steps/PhotoUploadStep.tsx +1 -3
- package/src/index.ts +0 -30
- package/src/presentation/hooks/generation/useAIGenerateState.ts +3 -0
- package/src/presentation/layouts/types/layout-props.ts +30 -4
- package/src/presentation/hooks/generation/useAIFeatureGeneration.ts +0 -180
- package/src/presentation/screens/ai-feature/AIFeatureScreen.tsx +0 -167
- package/src/presentation/screens/ai-feature/index.ts +0 -21
- package/src/presentation/screens/ai-feature/registry.ts +0 -155
- package/src/presentation/screens/ai-feature/translations.ts +0 -115
- package/src/presentation/screens/ai-feature/types.ts +0 -160
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@umituz/react-native-ai-generation-content",
|
|
3
|
-
"version": "1.26.
|
|
3
|
+
"version": "1.26.8",
|
|
4
4
|
"description": "Provider-agnostic AI generation orchestration for React Native with result preview components",
|
|
5
5
|
"main": "src/index.ts",
|
|
6
6
|
"types": "src/index.ts",
|
|
@@ -9,8 +9,6 @@ import type { GenerationStrategy } from "../../../../presentation/hooks/generati
|
|
|
9
9
|
import type { VideoFeatureType } from "../../../../domain/interfaces";
|
|
10
10
|
import { executeVideoFeature } from "../../../../infrastructure/services/video-feature-executor.service";
|
|
11
11
|
import { createCreationsRepository } from "../../../creations/infrastructure/adapters";
|
|
12
|
-
import { enhanceCouplePrompt } from "../../../../features/couple-future/infrastructure/couplePromptEnhancer";
|
|
13
|
-
import type { CoupleFeatureSelection } from "../../../../features/couple-future/domain/types";
|
|
14
12
|
import type { WizardOutputType, WizardScenarioData } from "../../presentation/hooks/useWizardGeneration";
|
|
15
13
|
|
|
16
14
|
declare const __DEV__: boolean;
|
|
@@ -179,29 +177,29 @@ async function buildGenerationInput(
|
|
|
179
177
|
|
|
180
178
|
// For image generation, enhance prompt with style selections
|
|
181
179
|
if (outputType === "image") {
|
|
182
|
-
const
|
|
180
|
+
const styleEnhancements: string[] = [];
|
|
183
181
|
|
|
184
182
|
// Romantic mood (multi-select array)
|
|
185
183
|
const romanticMoods = wizardData.selection_romantic_mood as string[] | undefined;
|
|
186
184
|
if (romanticMoods && romanticMoods.length > 0) {
|
|
187
|
-
|
|
185
|
+
styleEnhancements.push(`Mood: ${romanticMoods.join(", ")}`);
|
|
188
186
|
}
|
|
189
187
|
|
|
190
188
|
// Art style (single select)
|
|
191
189
|
const artStyle = wizardData.selection_art_style as string | undefined;
|
|
192
190
|
if (artStyle && artStyle !== "original") {
|
|
193
|
-
|
|
191
|
+
styleEnhancements.push(`Art style: ${artStyle}`);
|
|
194
192
|
}
|
|
195
193
|
|
|
196
194
|
// Artist style (single select)
|
|
197
195
|
const artist = wizardData.selection_artist_style as string | undefined;
|
|
198
196
|
if (artist && artist !== "original") {
|
|
199
|
-
|
|
197
|
+
styleEnhancements.push(`Artist style: ${artist}`);
|
|
200
198
|
}
|
|
201
199
|
|
|
202
200
|
// Enhance prompt with selected styles
|
|
203
|
-
if (
|
|
204
|
-
prompt =
|
|
201
|
+
if (styleEnhancements.length > 0) {
|
|
202
|
+
prompt = `${prompt}. ${styleEnhancements.join(", ")}`;
|
|
205
203
|
}
|
|
206
204
|
|
|
207
205
|
return {
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
import { useState, useCallback } from "react";
|
|
8
8
|
import * as ImagePicker from "expo-image-picker";
|
|
9
9
|
import { Alert } from "react-native";
|
|
10
|
-
import type { UploadedImage } from "
|
|
10
|
+
import type { UploadedImage } from "../../../../../presentation/hooks/generation/useAIGenerateState";
|
|
11
11
|
|
|
12
12
|
export interface PhotoUploadConfig {
|
|
13
13
|
readonly maxFileSizeMB?: number;
|
|
@@ -17,8 +17,7 @@ import {
|
|
|
17
17
|
} from "@umituz/react-native-design-system";
|
|
18
18
|
import { PhotoUploadCard } from "../../../../presentation/components";
|
|
19
19
|
import { FaceDetectionToggle } from "../../../../domains/face-detection";
|
|
20
|
-
import {
|
|
21
|
-
import type { UploadedImage } from "../../../../features/partner-upload/domain/types";
|
|
20
|
+
import type { UploadedImage } from "../../../../presentation/hooks/generation/useAIGenerateState";
|
|
22
21
|
import { usePhotoUploadState } from "../hooks/usePhotoUploadState";
|
|
23
22
|
|
|
24
23
|
export interface PhotoUploadScreenTranslations {
|
|
@@ -135,16 +134,6 @@ export const GenericPhotoUploadScreen: React.FC<PhotoUploadScreenProps> = ({
|
|
|
135
134
|
{translations.subtitle}
|
|
136
135
|
</AtomicText>
|
|
137
136
|
|
|
138
|
-
{/* Photo Tips - InfoGrid version */}
|
|
139
|
-
{showPhotoTips && (
|
|
140
|
-
<PhotoTips
|
|
141
|
-
t={t}
|
|
142
|
-
titleKey="photoUpload.tips.title"
|
|
143
|
-
headerIcon="bulb"
|
|
144
|
-
style={{ marginHorizontal: 24, marginBottom: 20 }}
|
|
145
|
-
/>
|
|
146
|
-
)}
|
|
147
|
-
|
|
148
137
|
{showFaceDetection && onFaceDetectionToggle && (
|
|
149
138
|
<FaceDetectionToggle
|
|
150
139
|
isEnabled={faceDetectionEnabled}
|
|
@@ -6,10 +6,8 @@
|
|
|
6
6
|
|
|
7
7
|
import React from "react";
|
|
8
8
|
import type { PhotoUploadStepConfig } from "../../domain/entities/wizard-config.types";
|
|
9
|
-
|
|
10
|
-
// Use wizard domain's generic photo upload screen - NO feature-specific references!
|
|
9
|
+
import type { UploadedImage } from "../../../../../presentation/hooks/generation/useAIGenerateState";
|
|
11
10
|
import { GenericPhotoUploadScreen } from "../screens/GenericPhotoUploadScreen";
|
|
12
|
-
import type { UploadedImage } from "../../../../features/partner-upload/domain/types";
|
|
13
11
|
|
|
14
12
|
export interface PhotoUploadStepProps {
|
|
15
13
|
readonly config: PhotoUploadStepConfig;
|
package/src/index.ts
CHANGED
|
@@ -152,35 +152,5 @@ export {
|
|
|
152
152
|
type GenerationConfigProviderProps,
|
|
153
153
|
} from "./infrastructure/providers";
|
|
154
154
|
|
|
155
|
-
// Result Preview Domain
|
|
156
155
|
export * from "./domains/result-preview";
|
|
157
|
-
|
|
158
|
-
// Unified AI Feature Screen
|
|
159
|
-
export {
|
|
160
|
-
AIFeatureScreen,
|
|
161
|
-
AI_FEATURE_CONFIGS,
|
|
162
|
-
getAIFeatureConfig,
|
|
163
|
-
hasAIFeature,
|
|
164
|
-
getAllAIFeatureIds,
|
|
165
|
-
getAIFeaturesByMode,
|
|
166
|
-
createFeatureTranslations,
|
|
167
|
-
createSingleImageTranslations,
|
|
168
|
-
createDualImageTranslations,
|
|
169
|
-
createComparisonTranslations,
|
|
170
|
-
createPromptTranslations,
|
|
171
|
-
} from "./presentation/screens/ai-feature";
|
|
172
|
-
export type {
|
|
173
|
-
AIFeatureId,
|
|
174
|
-
AIFeatureMode,
|
|
175
|
-
AIFeatureOutputType,
|
|
176
|
-
AIFeatureCreditType,
|
|
177
|
-
AIFeatureConfig,
|
|
178
|
-
AIFeatureScreenProps,
|
|
179
|
-
SingleImageTranslationKeys,
|
|
180
|
-
DualImageTranslationKeys,
|
|
181
|
-
ComparisonTranslationKeys,
|
|
182
|
-
PromptTranslationKeys,
|
|
183
|
-
} from "./presentation/screens/ai-feature";
|
|
184
|
-
|
|
185
|
-
|
|
186
156
|
export * from "./domains/generation";
|
|
@@ -4,10 +4,6 @@
|
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
import type { ReactNode } from "react";
|
|
7
|
-
import type {
|
|
8
|
-
BaseSingleImageHookReturn,
|
|
9
|
-
BaseDualImageHookReturn,
|
|
10
|
-
} from "../../../features/image-to-image/domain/types";
|
|
11
7
|
import type {
|
|
12
8
|
ModalTranslations,
|
|
13
9
|
BaseLayoutTranslations,
|
|
@@ -24,6 +20,36 @@ import type {
|
|
|
24
20
|
SingleImageWithPromptFeatureState,
|
|
25
21
|
} from "./feature-states";
|
|
26
22
|
|
|
23
|
+
/**
|
|
24
|
+
* Base hook return for single image features
|
|
25
|
+
*/
|
|
26
|
+
export interface BaseSingleImageHookReturn {
|
|
27
|
+
readonly imageUri: string | null;
|
|
28
|
+
readonly processedUrl: string | null;
|
|
29
|
+
readonly isProcessing: boolean;
|
|
30
|
+
readonly progress: number;
|
|
31
|
+
selectImage(): Promise<void>;
|
|
32
|
+
process(): Promise<void>;
|
|
33
|
+
save(): Promise<void>;
|
|
34
|
+
reset(): void;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Base hook return for dual image features
|
|
39
|
+
*/
|
|
40
|
+
export interface BaseDualImageHookReturn {
|
|
41
|
+
readonly sourceImageUri: string | null;
|
|
42
|
+
readonly targetImageUri: string | null;
|
|
43
|
+
readonly processedUrl: string | null;
|
|
44
|
+
readonly isProcessing: boolean;
|
|
45
|
+
readonly progress: number;
|
|
46
|
+
selectSourceImage(): Promise<void>;
|
|
47
|
+
selectTargetImage(): Promise<void>;
|
|
48
|
+
process(): Promise<void>;
|
|
49
|
+
save(): Promise<void>;
|
|
50
|
+
reset(): void;
|
|
51
|
+
}
|
|
52
|
+
|
|
27
53
|
/**
|
|
28
54
|
* Single image feature layout props
|
|
29
55
|
* Note: No modal - shows fullscreen progress when processing (FutureUS pattern)
|
|
@@ -1,180 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
import { useCallback } from "react";
|
|
3
|
-
import { useImageGeneration } from "./useImageGeneration";
|
|
4
|
-
import { useVideoGeneration } from "./useVideoGeneration";
|
|
5
|
-
import { executeImageToVideo } from "../../../features/image-to-video";
|
|
6
|
-
import { executeTextToVideo } from "../../../features/text-to-video";
|
|
7
|
-
import { useGenerationOrchestrator } from "./orchestrator";
|
|
8
|
-
import { prepareImage } from "../../../infrastructure/utils";
|
|
9
|
-
import type { AIFeatureId } from "../../screens/ai-feature/types";
|
|
10
|
-
import type { ImageFeatureType, VideoFeatureType } from "../../../domain/interfaces";
|
|
11
|
-
import type { AlertMessages, GenerationError } from "./types";
|
|
12
|
-
|
|
13
|
-
interface FeatureGenerationConfig {
|
|
14
|
-
featureType: AIFeatureId;
|
|
15
|
-
userId?: string;
|
|
16
|
-
alertMessages: AlertMessages;
|
|
17
|
-
onSuccess?: (result: string) => void;
|
|
18
|
-
onError?: (error: GenerationError) => void;
|
|
19
|
-
creditCost?: number;
|
|
20
|
-
onCreditsExhausted?: () => void;
|
|
21
|
-
/** REQUIRED for video features: Video generation model ID from app config */
|
|
22
|
-
videoModel?: string;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
export function useAIFeatureGeneration({
|
|
26
|
-
featureType,
|
|
27
|
-
userId,
|
|
28
|
-
alertMessages,
|
|
29
|
-
onSuccess,
|
|
30
|
-
onError,
|
|
31
|
-
creditCost = 1,
|
|
32
|
-
onCreditsExhausted,
|
|
33
|
-
videoModel,
|
|
34
|
-
}: FeatureGenerationConfig) {
|
|
35
|
-
|
|
36
|
-
// Hook for standard image features
|
|
37
|
-
const { generate: generateImage } = useImageGeneration({
|
|
38
|
-
featureType: featureType as ImageFeatureType,
|
|
39
|
-
userId,
|
|
40
|
-
processResult: (imageUrl) => imageUrl,
|
|
41
|
-
alertMessages,
|
|
42
|
-
onSuccess,
|
|
43
|
-
onError,
|
|
44
|
-
creditCost,
|
|
45
|
-
onCreditsExhausted,
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
// Hook for standard video features (ai-hug, ai-kiss)
|
|
49
|
-
const { generate: generateVideo } = useVideoGeneration({
|
|
50
|
-
featureType: featureType as VideoFeatureType,
|
|
51
|
-
userId,
|
|
52
|
-
processResult: (videoUrl) => videoUrl,
|
|
53
|
-
alertMessages,
|
|
54
|
-
onSuccess,
|
|
55
|
-
onError,
|
|
56
|
-
creditCost,
|
|
57
|
-
onCreditsExhausted,
|
|
58
|
-
});
|
|
59
|
-
|
|
60
|
-
// Orchestrator for Image-to-Video
|
|
61
|
-
const { generate: generateImageToVideo } = useGenerationOrchestrator(
|
|
62
|
-
{
|
|
63
|
-
execute: async (input: { imageUri: string; prompt: string; duration: number }, onProgress) => {
|
|
64
|
-
if (!videoModel) {
|
|
65
|
-
throw new Error(
|
|
66
|
-
"videoModel is required for image-to-video feature. " +
|
|
67
|
-
"Please provide videoModel from app's generation config."
|
|
68
|
-
);
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
const result = await executeImageToVideo(
|
|
72
|
-
{
|
|
73
|
-
imageUri: input.imageUri, // Pass URI directly
|
|
74
|
-
imageBase64: await prepareImage(input.imageUri),
|
|
75
|
-
motionPrompt: input.prompt,
|
|
76
|
-
options: { duration: input.duration },
|
|
77
|
-
userId: userId || "anonymous",
|
|
78
|
-
},
|
|
79
|
-
{
|
|
80
|
-
model: videoModel,
|
|
81
|
-
buildInput: (image, prompt, opts) => ({
|
|
82
|
-
image,
|
|
83
|
-
prompt,
|
|
84
|
-
...opts
|
|
85
|
-
}),
|
|
86
|
-
onProgress,
|
|
87
|
-
}
|
|
88
|
-
);
|
|
89
|
-
if (!result.success || !result.videoUrl) throw new Error(result.error || "Generation failed");
|
|
90
|
-
return result.videoUrl;
|
|
91
|
-
},
|
|
92
|
-
getCreditCost: () => creditCost,
|
|
93
|
-
},
|
|
94
|
-
{ userId, alertMessages, onSuccess, onError, onCreditsExhausted }
|
|
95
|
-
);
|
|
96
|
-
|
|
97
|
-
// Orchestrator for Text-to-Video
|
|
98
|
-
const { generate: generateTextToVideo } = useGenerationOrchestrator(
|
|
99
|
-
{
|
|
100
|
-
execute: async (input: { prompt: string; duration: number }, onProgress) => {
|
|
101
|
-
if (!videoModel) {
|
|
102
|
-
throw new Error(
|
|
103
|
-
"videoModel is required for text-to-video feature. " +
|
|
104
|
-
"Please provide videoModel from app's generation config."
|
|
105
|
-
);
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
const result = await executeTextToVideo(
|
|
109
|
-
{
|
|
110
|
-
prompt: input.prompt,
|
|
111
|
-
options: { duration: input.duration },
|
|
112
|
-
userId: userId || "anonymous",
|
|
113
|
-
},
|
|
114
|
-
{
|
|
115
|
-
model: videoModel,
|
|
116
|
-
buildInput: (prompt, opts) => ({ prompt, ...opts }),
|
|
117
|
-
onProgress,
|
|
118
|
-
}
|
|
119
|
-
);
|
|
120
|
-
if (!result.success || !result.videoUrl) throw new Error(result.error || "Generation failed");
|
|
121
|
-
return result.videoUrl;
|
|
122
|
-
},
|
|
123
|
-
getCreditCost: () => creditCost,
|
|
124
|
-
},
|
|
125
|
-
{ userId, alertMessages, onSuccess, onError, onCreditsExhausted }
|
|
126
|
-
);
|
|
127
|
-
|
|
128
|
-
const generate = useCallback(async (data: {
|
|
129
|
-
prompt: string;
|
|
130
|
-
style: string;
|
|
131
|
-
duration: number;
|
|
132
|
-
images: { uri: string }[];
|
|
133
|
-
}) => {
|
|
134
|
-
switch (featureType) {
|
|
135
|
-
case "image-to-video":
|
|
136
|
-
if (!data.images[0]?.uri) throw new Error("Image required for image-to-video");
|
|
137
|
-
return await generateImageToVideo({
|
|
138
|
-
imageUri: data.images[0].uri,
|
|
139
|
-
prompt: data.prompt,
|
|
140
|
-
duration: data.duration
|
|
141
|
-
});
|
|
142
|
-
|
|
143
|
-
case "text-to-video":
|
|
144
|
-
return await generateTextToVideo({
|
|
145
|
-
prompt: data.prompt,
|
|
146
|
-
duration: data.duration
|
|
147
|
-
});
|
|
148
|
-
|
|
149
|
-
case "ai-hug":
|
|
150
|
-
case "ai-kiss":
|
|
151
|
-
if (data.images.length < 2) throw new Error("Two images required");
|
|
152
|
-
return await generateVideo({
|
|
153
|
-
sourceImageBase64: await prepareImage(data.images[0].uri),
|
|
154
|
-
targetImageBase64: await prepareImage(data.images[1].uri),
|
|
155
|
-
});
|
|
156
|
-
|
|
157
|
-
default:
|
|
158
|
-
// Default to Image Generation
|
|
159
|
-
if (data.images.length > 0) {
|
|
160
|
-
// Single or dual image
|
|
161
|
-
if (data.images.length === 2 && (featureType === "face-swap")) {
|
|
162
|
-
return await generateImage({
|
|
163
|
-
sourceImageBase64: await prepareImage(data.images[0].uri),
|
|
164
|
-
targetImageBase64: await prepareImage(data.images[1].uri),
|
|
165
|
-
options: { style: data.style }
|
|
166
|
-
});
|
|
167
|
-
}
|
|
168
|
-
// Single image features
|
|
169
|
-
return await generateImage({
|
|
170
|
-
imageBase64: await prepareImage(data.images[0].uri),
|
|
171
|
-
prompt: data.prompt,
|
|
172
|
-
options: { style: data.style }
|
|
173
|
-
});
|
|
174
|
-
}
|
|
175
|
-
throw new Error(`Unsupported feature or missing input: ${featureType}`);
|
|
176
|
-
}
|
|
177
|
-
}, [featureType, generateImage, generateVideo, generateImageToVideo, generateTextToVideo]);
|
|
178
|
-
|
|
179
|
-
return { generate };
|
|
180
|
-
}
|
|
@@ -1,167 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AIFeatureScreen
|
|
3
|
-
* Unified screen component for all AI features
|
|
4
|
-
* Reduces 9 screens to 1 configurable component
|
|
5
|
-
*/
|
|
6
|
-
|
|
7
|
-
import React, { useMemo, useCallback } from "react";
|
|
8
|
-
import { View } from "react-native";
|
|
9
|
-
import { ScreenLayout } from "@umituz/react-native-design-system";
|
|
10
|
-
import { useLocalization } from "@umituz/react-native-localization";
|
|
11
|
-
|
|
12
|
-
import { AIGenScreenHeader } from "../../components/headers/AIGenScreenHeader";
|
|
13
|
-
import { CreditBadge } from "../../components/headers/CreditBadge";
|
|
14
|
-
import { prepareImage } from "../../../infrastructure/utils/feature-utils";
|
|
15
|
-
import { useCreationPersistence } from "../../../domains/creations/presentation/hooks/useCreationPersistence";
|
|
16
|
-
|
|
17
|
-
// Feature components
|
|
18
|
-
import { AnimeSelfieFeature } from "../../../features/anime-selfie/presentation/components/AnimeSelfieFeature";
|
|
19
|
-
import { RemoveBackgroundFeature } from "../../../features/remove-background/presentation/components/RemoveBackgroundFeature";
|
|
20
|
-
import { HDTouchUpFeature } from "../../../features/hd-touch-up/presentation/components/HDTouchUpFeature";
|
|
21
|
-
import { UpscaleFeature } from "../../../features/upscaling/presentation/components/UpscaleFeature";
|
|
22
|
-
import { PhotoRestoreFeature } from "../../../features/photo-restoration/presentation/components/PhotoRestoreFeature";
|
|
23
|
-
import { RemoveObjectFeature } from "../../../features/remove-object/presentation/components/RemoveObjectFeature";
|
|
24
|
-
import { ReplaceBackgroundFeature } from "../../../features/replace-background/presentation/components/ReplaceBackgroundFeature";
|
|
25
|
-
import { FaceSwapFeature } from "../../../features/face-swap/presentation/components/FaceSwapFeature";
|
|
26
|
-
import { AIHugFeature } from "../../../features/ai-hug/presentation/components/AIHugFeature";
|
|
27
|
-
import { AIKissFeature } from "../../../features/ai-kiss/presentation/components/AIKissFeature";
|
|
28
|
-
import { MemeGeneratorFeature } from "../../../features/meme-generator/presentation/components/MemeGeneratorFeature";
|
|
29
|
-
|
|
30
|
-
import { createFeatureTranslations } from "./translations";
|
|
31
|
-
import type { AIFeatureScreenProps, AIFeatureId } from "./types";
|
|
32
|
-
|
|
33
|
-
/**
|
|
34
|
-
* Feature component mapping
|
|
35
|
-
* Using explicit any type for component registry to allow dynamic prop injection
|
|
36
|
-
*/
|
|
37
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
38
|
-
const FEATURE_COMPONENTS: Record<AIFeatureId, React.ComponentType<any>> = {
|
|
39
|
-
"anime-selfie": AnimeSelfieFeature,
|
|
40
|
-
"remove-background": RemoveBackgroundFeature,
|
|
41
|
-
"hd-touch-up": HDTouchUpFeature,
|
|
42
|
-
upscale: UpscaleFeature,
|
|
43
|
-
"photo-restore": PhotoRestoreFeature,
|
|
44
|
-
"remove-object": RemoveObjectFeature,
|
|
45
|
-
"replace-background": ReplaceBackgroundFeature,
|
|
46
|
-
"face-swap": FaceSwapFeature,
|
|
47
|
-
"ai-hug": AIHugFeature,
|
|
48
|
-
"ai-kiss": AIKissFeature,
|
|
49
|
-
"meme-generator": MemeGeneratorFeature,
|
|
50
|
-
"image-to-video": React.Fragment,
|
|
51
|
-
"text-to-video": React.Fragment,
|
|
52
|
-
};
|
|
53
|
-
|
|
54
|
-
/**
|
|
55
|
-
* AIFeatureScreen - Unified component for all AI features
|
|
56
|
-
*/
|
|
57
|
-
export const AIFeatureScreen: React.FC<AIFeatureScreenProps> = ({
|
|
58
|
-
config,
|
|
59
|
-
creditCost,
|
|
60
|
-
onDeductCredits,
|
|
61
|
-
onSelectImage,
|
|
62
|
-
onSaveMedia,
|
|
63
|
-
onCheckCreditGuard,
|
|
64
|
-
imageCredits,
|
|
65
|
-
headerRightContent,
|
|
66
|
-
}) => {
|
|
67
|
-
const { t } = useLocalization();
|
|
68
|
-
|
|
69
|
-
// Create persistence callbacks
|
|
70
|
-
const persistence = useCreationPersistence({
|
|
71
|
-
type: config.id,
|
|
72
|
-
creditCost,
|
|
73
|
-
onCreditDeduct: onDeductCredits,
|
|
74
|
-
});
|
|
75
|
-
|
|
76
|
-
// Create translations based on feature mode
|
|
77
|
-
const translations = useMemo(
|
|
78
|
-
() => createFeatureTranslations(config, t),
|
|
79
|
-
[config, t],
|
|
80
|
-
);
|
|
81
|
-
|
|
82
|
-
// Create feature config
|
|
83
|
-
const featureConfig = useMemo(
|
|
84
|
-
() => ({
|
|
85
|
-
prepareImage,
|
|
86
|
-
...config.extraConfig,
|
|
87
|
-
...persistence,
|
|
88
|
-
}),
|
|
89
|
-
[config.extraConfig, persistence],
|
|
90
|
-
);
|
|
91
|
-
|
|
92
|
-
// Credit guard callback
|
|
93
|
-
const handleBeforeProcess = useCallback(async () => {
|
|
94
|
-
// Convert featureId to PascalCase for analytics
|
|
95
|
-
const featureName = config.id
|
|
96
|
-
.split("-")
|
|
97
|
-
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
|
98
|
-
.join("");
|
|
99
|
-
return onCheckCreditGuard(creditCost, featureName);
|
|
100
|
-
}, [config.id, creditCost, onCheckCreditGuard]);
|
|
101
|
-
|
|
102
|
-
// Get the feature component
|
|
103
|
-
const FeatureComponent = FEATURE_COMPONENTS[config.id];
|
|
104
|
-
|
|
105
|
-
// Build props based on feature mode
|
|
106
|
-
const featureProps = useMemo(() => {
|
|
107
|
-
const baseProps = {
|
|
108
|
-
config: featureConfig,
|
|
109
|
-
translations,
|
|
110
|
-
onBeforeProcess: handleBeforeProcess,
|
|
111
|
-
};
|
|
112
|
-
|
|
113
|
-
// Add mode-specific props
|
|
114
|
-
switch (config.mode) {
|
|
115
|
-
case "single":
|
|
116
|
-
case "single-with-prompt":
|
|
117
|
-
return {
|
|
118
|
-
...baseProps,
|
|
119
|
-
onSelectImage,
|
|
120
|
-
onSaveImage: onSaveMedia,
|
|
121
|
-
};
|
|
122
|
-
case "text-input":
|
|
123
|
-
return {
|
|
124
|
-
...baseProps,
|
|
125
|
-
onSaveImage: onSaveMedia,
|
|
126
|
-
// Text input doesn't need image selection
|
|
127
|
-
};
|
|
128
|
-
case "dual":
|
|
129
|
-
return {
|
|
130
|
-
...baseProps,
|
|
131
|
-
onSelectSourceImage: onSelectImage,
|
|
132
|
-
onSelectTargetImage: onSelectImage,
|
|
133
|
-
onSaveImage: onSaveMedia,
|
|
134
|
-
};
|
|
135
|
-
case "dual-video":
|
|
136
|
-
return {
|
|
137
|
-
...baseProps,
|
|
138
|
-
onSelectSourceImage: onSelectImage,
|
|
139
|
-
onSelectTargetImage: onSelectImage,
|
|
140
|
-
onSaveVideo: onSaveMedia,
|
|
141
|
-
};
|
|
142
|
-
default:
|
|
143
|
-
return baseProps;
|
|
144
|
-
}
|
|
145
|
-
}, [config.mode, featureConfig, translations, handleBeforeProcess, onSelectImage, onSaveMedia]);
|
|
146
|
-
|
|
147
|
-
// Default header right content with credit badge
|
|
148
|
-
const defaultHeaderRight = (
|
|
149
|
-
<View style={{ flexDirection: "row", alignItems: "center", gap: 8 }}>
|
|
150
|
-
<CreditBadge credits={imageCredits} compact />
|
|
151
|
-
</View>
|
|
152
|
-
);
|
|
153
|
-
|
|
154
|
-
return (
|
|
155
|
-
<ScreenLayout
|
|
156
|
-
header={
|
|
157
|
-
<AIGenScreenHeader
|
|
158
|
-
title={t(`${config.translationPrefix}.title`)}
|
|
159
|
-
rightContent={headerRightContent ?? defaultHeaderRight}
|
|
160
|
-
/>
|
|
161
|
-
}
|
|
162
|
-
scrollable={false}
|
|
163
|
-
>
|
|
164
|
-
<FeatureComponent {...featureProps} />
|
|
165
|
-
</ScreenLayout>
|
|
166
|
-
);
|
|
167
|
-
};
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AI Feature Screen
|
|
3
|
-
* Unified screen component for all AI features
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
export { AIFeatureScreen } from "./AIFeatureScreen";
|
|
7
|
-
export { AI_FEATURE_CONFIGS, getAIFeatureConfig, hasAIFeature, getAllAIFeatureIds, getAIFeaturesByMode } from "./registry";
|
|
8
|
-
export { createFeatureTranslations, createSingleImageTranslations, createDualImageTranslations, createComparisonTranslations, createPromptTranslations, createTextInputTranslations } from "./translations";
|
|
9
|
-
export type {
|
|
10
|
-
AIFeatureId,
|
|
11
|
-
AIFeatureMode,
|
|
12
|
-
AIFeatureOutputType,
|
|
13
|
-
AIFeatureCreditType,
|
|
14
|
-
AIFeatureConfig,
|
|
15
|
-
AIFeatureScreenProps,
|
|
16
|
-
SingleImageTranslationKeys,
|
|
17
|
-
DualImageTranslationKeys,
|
|
18
|
-
ComparisonTranslationKeys,
|
|
19
|
-
PromptTranslationKeys,
|
|
20
|
-
TextInputTranslationKeys,
|
|
21
|
-
} from "./types";
|
|
@@ -1,155 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AI Feature Registry
|
|
3
|
-
* Static configuration for all AI features
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
import type { AIFeatureConfig, AIFeatureId } from "./types";
|
|
7
|
-
|
|
8
|
-
/**
|
|
9
|
-
* Feature configurations registry
|
|
10
|
-
*/
|
|
11
|
-
export const AI_FEATURE_CONFIGS: Record<AIFeatureId, AIFeatureConfig> = {
|
|
12
|
-
// Single image features
|
|
13
|
-
"anime-selfie": {
|
|
14
|
-
id: "anime-selfie",
|
|
15
|
-
mode: "single",
|
|
16
|
-
outputType: "image",
|
|
17
|
-
creditType: "image",
|
|
18
|
-
translationPrefix: "anime-selfie",
|
|
19
|
-
},
|
|
20
|
-
"remove-background": {
|
|
21
|
-
id: "remove-background",
|
|
22
|
-
mode: "single",
|
|
23
|
-
outputType: "image",
|
|
24
|
-
creditType: "image",
|
|
25
|
-
translationPrefix: "remove-background",
|
|
26
|
-
extraConfig: { featureType: "remove-background" },
|
|
27
|
-
},
|
|
28
|
-
"hd-touch-up": {
|
|
29
|
-
id: "hd-touch-up",
|
|
30
|
-
mode: "single",
|
|
31
|
-
outputType: "image",
|
|
32
|
-
creditType: "image",
|
|
33
|
-
translationPrefix: "hd-touch-up",
|
|
34
|
-
extraConfig: { featureType: "hd-touch-up" },
|
|
35
|
-
},
|
|
36
|
-
|
|
37
|
-
// Comparison result features
|
|
38
|
-
upscale: {
|
|
39
|
-
id: "upscale",
|
|
40
|
-
mode: "single",
|
|
41
|
-
outputType: "image",
|
|
42
|
-
creditType: "image",
|
|
43
|
-
translationPrefix: "upscale",
|
|
44
|
-
hasComparisonResult: true,
|
|
45
|
-
extraConfig: { featureType: "upscale", defaultScaleFactor: 2 },
|
|
46
|
-
},
|
|
47
|
-
"photo-restore": {
|
|
48
|
-
id: "photo-restore",
|
|
49
|
-
mode: "single",
|
|
50
|
-
outputType: "image",
|
|
51
|
-
creditType: "image",
|
|
52
|
-
translationPrefix: "photo-restore",
|
|
53
|
-
hasComparisonResult: true,
|
|
54
|
-
},
|
|
55
|
-
|
|
56
|
-
// Prompt features
|
|
57
|
-
"remove-object": {
|
|
58
|
-
id: "remove-object",
|
|
59
|
-
mode: "single-with-prompt",
|
|
60
|
-
outputType: "image",
|
|
61
|
-
creditType: "image",
|
|
62
|
-
translationPrefix: "remove-object",
|
|
63
|
-
},
|
|
64
|
-
"replace-background": {
|
|
65
|
-
id: "replace-background",
|
|
66
|
-
mode: "single-with-prompt",
|
|
67
|
-
outputType: "image",
|
|
68
|
-
creditType: "image",
|
|
69
|
-
translationPrefix: "replace-background",
|
|
70
|
-
extraConfig: { featureType: "replace-background" },
|
|
71
|
-
},
|
|
72
|
-
|
|
73
|
-
// Text-input features (no image upload)
|
|
74
|
-
"meme-generator": {
|
|
75
|
-
id: "meme-generator",
|
|
76
|
-
mode: "text-input",
|
|
77
|
-
outputType: "image",
|
|
78
|
-
creditType: "image",
|
|
79
|
-
translationPrefix: "meme-generator",
|
|
80
|
-
},
|
|
81
|
-
|
|
82
|
-
// Dual image features
|
|
83
|
-
"face-swap": {
|
|
84
|
-
id: "face-swap",
|
|
85
|
-
mode: "dual",
|
|
86
|
-
outputType: "image",
|
|
87
|
-
creditType: "image",
|
|
88
|
-
translationPrefix: "face-swap",
|
|
89
|
-
extraConfig: { featureType: "face-swap" },
|
|
90
|
-
},
|
|
91
|
-
|
|
92
|
-
// Dual image video features
|
|
93
|
-
"ai-hug": {
|
|
94
|
-
id: "ai-hug",
|
|
95
|
-
mode: "dual-video",
|
|
96
|
-
outputType: "video",
|
|
97
|
-
creditType: "image",
|
|
98
|
-
translationPrefix: "ai-hug",
|
|
99
|
-
},
|
|
100
|
-
"ai-kiss": {
|
|
101
|
-
id: "ai-kiss",
|
|
102
|
-
mode: "dual-video",
|
|
103
|
-
outputType: "video",
|
|
104
|
-
creditType: "image",
|
|
105
|
-
translationPrefix: "ai-kiss",
|
|
106
|
-
},
|
|
107
|
-
|
|
108
|
-
// Generic Video Features
|
|
109
|
-
"image-to-video": {
|
|
110
|
-
id: "image-to-video",
|
|
111
|
-
mode: "single-with-prompt",
|
|
112
|
-
outputType: "video",
|
|
113
|
-
creditType: "video",
|
|
114
|
-
translationPrefix: "image-to-video",
|
|
115
|
-
},
|
|
116
|
-
"text-to-video": {
|
|
117
|
-
id: "text-to-video",
|
|
118
|
-
mode: "text-input",
|
|
119
|
-
outputType: "video",
|
|
120
|
-
creditType: "video",
|
|
121
|
-
translationPrefix: "text-to-video",
|
|
122
|
-
},
|
|
123
|
-
};
|
|
124
|
-
|
|
125
|
-
/**
|
|
126
|
-
* Get feature config by ID
|
|
127
|
-
*/
|
|
128
|
-
export function getAIFeatureConfig(featureId: AIFeatureId): AIFeatureConfig {
|
|
129
|
-
const config = AI_FEATURE_CONFIGS[featureId];
|
|
130
|
-
if (!config) {
|
|
131
|
-
throw new Error(`Unknown AI feature: ${featureId}`);
|
|
132
|
-
}
|
|
133
|
-
return config;
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
/**
|
|
137
|
-
* Check if feature exists
|
|
138
|
-
*/
|
|
139
|
-
export function hasAIFeature(featureId: string): featureId is AIFeatureId {
|
|
140
|
-
return featureId in AI_FEATURE_CONFIGS;
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
/**
|
|
144
|
-
* Get all feature IDs
|
|
145
|
-
*/
|
|
146
|
-
export function getAllAIFeatureIds(): AIFeatureId[] {
|
|
147
|
-
return Object.keys(AI_FEATURE_CONFIGS) as AIFeatureId[];
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
/**
|
|
151
|
-
* Get features by mode
|
|
152
|
-
*/
|
|
153
|
-
export function getAIFeaturesByMode(mode: AIFeatureConfig["mode"]): AIFeatureConfig[] {
|
|
154
|
-
return Object.values(AI_FEATURE_CONFIGS).filter((config) => config.mode === mode);
|
|
155
|
-
}
|
|
@@ -1,115 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AI Feature Translations Factory
|
|
3
|
-
* Creates translation objects from prefix and t function
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
import type { AIFeatureConfig } from "./types";
|
|
7
|
-
|
|
8
|
-
type TranslateFunction = (key: string) => string;
|
|
9
|
-
|
|
10
|
-
/**
|
|
11
|
-
* Create single image translations
|
|
12
|
-
*/
|
|
13
|
-
export function createSingleImageTranslations(prefix: string, t: TranslateFunction) {
|
|
14
|
-
return {
|
|
15
|
-
uploadTitle: t(`${prefix}.uploadTitle`),
|
|
16
|
-
uploadSubtitle: t(`${prefix}.uploadSubtitle`),
|
|
17
|
-
uploadChange: t(`${prefix}.uploadChange`),
|
|
18
|
-
uploadAnalyzing: t(`${prefix}.uploadAnalyzing`),
|
|
19
|
-
description: t(`${prefix}.description`),
|
|
20
|
-
processingText: t(`${prefix}.processingText`),
|
|
21
|
-
processButtonText: t(`${prefix}.processButtonText`),
|
|
22
|
-
successText: t(`${prefix}.successText`),
|
|
23
|
-
saveButtonText: t(`${prefix}.saveButtonText`),
|
|
24
|
-
tryAnotherText: t(`${prefix}.tryAnotherText`),
|
|
25
|
-
};
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
/**
|
|
29
|
-
* Create comparison result translations (upscale, photo-restore)
|
|
30
|
-
*/
|
|
31
|
-
export function createComparisonTranslations(prefix: string, t: TranslateFunction) {
|
|
32
|
-
return {
|
|
33
|
-
...createSingleImageTranslations(prefix, t),
|
|
34
|
-
beforeLabel: t(`${prefix}.beforeLabel`),
|
|
35
|
-
afterLabel: t(`${prefix}.afterLabel`),
|
|
36
|
-
};
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
/**
|
|
40
|
-
* Create prompt feature translations (remove-object, replace-background)
|
|
41
|
-
*/
|
|
42
|
-
export function createPromptTranslations(prefix: string, t: TranslateFunction) {
|
|
43
|
-
return {
|
|
44
|
-
...createSingleImageTranslations(prefix, t),
|
|
45
|
-
promptPlaceholder: t(`${prefix}.promptPlaceholder`),
|
|
46
|
-
maskTitle: t(`${prefix}.maskTitle`),
|
|
47
|
-
maskSubtitle: t(`${prefix}.maskSubtitle`),
|
|
48
|
-
};
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
/**
|
|
52
|
-
* Create text-input translations (text-to-image, meme-generator)
|
|
53
|
-
* For pure text-to-image features without image upload
|
|
54
|
-
*/
|
|
55
|
-
export function createTextInputTranslations(prefix: string, t: TranslateFunction) {
|
|
56
|
-
return {
|
|
57
|
-
title: t(`${prefix}.title`),
|
|
58
|
-
description: t(`${prefix}.description`),
|
|
59
|
-
promptPlaceholder: t(`${prefix}.promptPlaceholder`),
|
|
60
|
-
processButtonText: t(`${prefix}.processButtonText`),
|
|
61
|
-
processingText: t(`${prefix}.processingText`),
|
|
62
|
-
successText: t(`${prefix}.successText`),
|
|
63
|
-
saveButtonText: t(`${prefix}.saveButtonText`),
|
|
64
|
-
tryAnotherText: t(`${prefix}.tryAnotherText`),
|
|
65
|
-
styleLabel: t(`${prefix}.styleLabel`),
|
|
66
|
-
tipsLabel: t(`${prefix}.tipsLabel`),
|
|
67
|
-
};
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
/**
|
|
71
|
-
* Create dual image translations (face-swap, ai-hug, ai-kiss)
|
|
72
|
-
*/
|
|
73
|
-
export function createDualImageTranslations(prefix: string, t: TranslateFunction) {
|
|
74
|
-
return {
|
|
75
|
-
sourceUploadTitle: t(`${prefix}.sourceUploadTitle`),
|
|
76
|
-
sourceUploadSubtitle: t(`${prefix}.sourceUploadSubtitle`),
|
|
77
|
-
targetUploadTitle: t(`${prefix}.targetUploadTitle`),
|
|
78
|
-
targetUploadSubtitle: t(`${prefix}.targetUploadSubtitle`),
|
|
79
|
-
uploadChange: t(`${prefix}.uploadChange`),
|
|
80
|
-
uploadAnalyzing: t(`${prefix}.uploadAnalyzing`),
|
|
81
|
-
description: t(`${prefix}.description`),
|
|
82
|
-
processingText: t(`${prefix}.processingText`),
|
|
83
|
-
processButtonText: t(`${prefix}.processButtonText`),
|
|
84
|
-
successText: t(`${prefix}.successText`),
|
|
85
|
-
saveButtonText: t(`${prefix}.saveButtonText`),
|
|
86
|
-
tryAnotherText: t(`${prefix}.tryAnotherText`),
|
|
87
|
-
modalTitle: t(`${prefix}.modalTitle`),
|
|
88
|
-
modalMessage: t(`${prefix}.modalMessage`),
|
|
89
|
-
modalHint: t(`${prefix}.modalHint`),
|
|
90
|
-
modalBackgroundHint: t(`${prefix}.modalBackgroundHint`),
|
|
91
|
-
};
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
/**
|
|
95
|
-
* Create translations based on feature config
|
|
96
|
-
*/
|
|
97
|
-
export function createFeatureTranslations(config: AIFeatureConfig, t: TranslateFunction) {
|
|
98
|
-
const { translationPrefix, mode, hasComparisonResult } = config;
|
|
99
|
-
|
|
100
|
-
switch (mode) {
|
|
101
|
-
case "single":
|
|
102
|
-
return hasComparisonResult
|
|
103
|
-
? createComparisonTranslations(translationPrefix, t)
|
|
104
|
-
: createSingleImageTranslations(translationPrefix, t);
|
|
105
|
-
case "single-with-prompt":
|
|
106
|
-
return createPromptTranslations(translationPrefix, t);
|
|
107
|
-
case "text-input":
|
|
108
|
-
return createTextInputTranslations(translationPrefix, t);
|
|
109
|
-
case "dual":
|
|
110
|
-
case "dual-video":
|
|
111
|
-
return createDualImageTranslations(translationPrefix, t);
|
|
112
|
-
default:
|
|
113
|
-
return createSingleImageTranslations(translationPrefix, t);
|
|
114
|
-
}
|
|
115
|
-
}
|
|
@@ -1,160 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AIFeatureScreen Types
|
|
3
|
-
* Unified type definitions for all AI feature screens
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
import type { ReactNode } from "react";
|
|
7
|
-
|
|
8
|
-
/**
|
|
9
|
-
* All supported AI feature types
|
|
10
|
-
*/
|
|
11
|
-
export type AIFeatureId =
|
|
12
|
-
| "anime-selfie"
|
|
13
|
-
| "remove-background"
|
|
14
|
-
| "hd-touch-up"
|
|
15
|
-
| "upscale"
|
|
16
|
-
| "photo-restore"
|
|
17
|
-
| "remove-object"
|
|
18
|
-
| "replace-background"
|
|
19
|
-
| "face-swap"
|
|
20
|
-
| "ai-hug"
|
|
21
|
-
| "ai-kiss"
|
|
22
|
-
| "meme-generator"
|
|
23
|
-
| "image-to-video"
|
|
24
|
-
| "text-to-video";
|
|
25
|
-
|
|
26
|
-
/**
|
|
27
|
-
* Image mode for the feature
|
|
28
|
-
*/
|
|
29
|
-
export type AIFeatureMode = "single" | "single-with-prompt" | "dual" | "dual-video" | "text-input";
|
|
30
|
-
|
|
31
|
-
/**
|
|
32
|
-
* Output type of the feature
|
|
33
|
-
*/
|
|
34
|
-
export type AIFeatureOutputType = "image" | "video";
|
|
35
|
-
|
|
36
|
-
/**
|
|
37
|
-
* Credit type for the feature
|
|
38
|
-
*/
|
|
39
|
-
export type AIFeatureCreditType = "image" | "video";
|
|
40
|
-
|
|
41
|
-
/**
|
|
42
|
-
* Translation keys structure for single image features
|
|
43
|
-
*/
|
|
44
|
-
export interface SingleImageTranslationKeys {
|
|
45
|
-
uploadTitle: string;
|
|
46
|
-
uploadSubtitle: string;
|
|
47
|
-
uploadChange: string;
|
|
48
|
-
uploadAnalyzing: string;
|
|
49
|
-
description: string;
|
|
50
|
-
processingText: string;
|
|
51
|
-
processButtonText: string;
|
|
52
|
-
successText: string;
|
|
53
|
-
saveButtonText: string;
|
|
54
|
-
tryAnotherText: string;
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
/**
|
|
58
|
-
* Translation keys structure for comparison result features (upscale, photo-restore)
|
|
59
|
-
*/
|
|
60
|
-
export interface ComparisonTranslationKeys extends SingleImageTranslationKeys {
|
|
61
|
-
beforeLabel: string;
|
|
62
|
-
afterLabel: string;
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
/**
|
|
66
|
-
* Translation keys structure for prompt features (remove-object, replace-background)
|
|
67
|
-
*/
|
|
68
|
-
export interface PromptTranslationKeys extends SingleImageTranslationKeys {
|
|
69
|
-
promptPlaceholder: string;
|
|
70
|
-
maskTitle?: string;
|
|
71
|
-
maskSubtitle?: string;
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
/**
|
|
75
|
-
* Translation keys structure for dual image features
|
|
76
|
-
*/
|
|
77
|
-
export interface DualImageTranslationKeys {
|
|
78
|
-
sourceUploadTitle: string;
|
|
79
|
-
sourceUploadSubtitle: string;
|
|
80
|
-
targetUploadTitle: string;
|
|
81
|
-
targetUploadSubtitle: string;
|
|
82
|
-
uploadChange: string;
|
|
83
|
-
uploadAnalyzing: string;
|
|
84
|
-
description: string;
|
|
85
|
-
processingText: string;
|
|
86
|
-
processButtonText: string;
|
|
87
|
-
successText: string;
|
|
88
|
-
saveButtonText: string;
|
|
89
|
-
tryAnotherText: string;
|
|
90
|
-
modalTitle?: string;
|
|
91
|
-
modalMessage?: string;
|
|
92
|
-
modalHint?: string;
|
|
93
|
-
modalBackgroundHint?: string;
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
/**
|
|
97
|
-
* Translation keys structure for text-input features (text-to-image, meme-generator)
|
|
98
|
-
*/
|
|
99
|
-
export interface TextInputTranslationKeys {
|
|
100
|
-
title: string;
|
|
101
|
-
description: string;
|
|
102
|
-
promptPlaceholder: string;
|
|
103
|
-
processButtonText: string;
|
|
104
|
-
processingText: string;
|
|
105
|
-
successText: string;
|
|
106
|
-
saveButtonText: string;
|
|
107
|
-
tryAnotherText: string;
|
|
108
|
-
styleLabel?: string;
|
|
109
|
-
tipsLabel?: string;
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
/**
|
|
113
|
-
* Static feature configuration (doesn't change at runtime)
|
|
114
|
-
*/
|
|
115
|
-
export interface AIFeatureConfig {
|
|
116
|
-
/** Unique feature identifier */
|
|
117
|
-
readonly id: AIFeatureId;
|
|
118
|
-
/** Feature mode: single image, dual image, etc. */
|
|
119
|
-
readonly mode: AIFeatureMode;
|
|
120
|
-
/** Output type: image or video */
|
|
121
|
-
readonly outputType: AIFeatureOutputType;
|
|
122
|
-
/** Credit type to deduct */
|
|
123
|
-
readonly creditType: AIFeatureCreditType;
|
|
124
|
-
/** Translation key prefix (e.g., "anime-selfie" for t("anime-selfie.uploadTitle")) */
|
|
125
|
-
readonly translationPrefix: string;
|
|
126
|
-
/** Whether this feature has comparison result (before/after slider) */
|
|
127
|
-
readonly hasComparisonResult?: boolean;
|
|
128
|
-
/** Feature-specific extra config */
|
|
129
|
-
readonly extraConfig?: Record<string, unknown>;
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
/**
|
|
133
|
-
* Runtime props for AIFeatureScreen
|
|
134
|
-
*/
|
|
135
|
-
export interface AIFeatureScreenProps {
|
|
136
|
-
/** Feature configuration from registry */
|
|
137
|
-
readonly config: AIFeatureConfig;
|
|
138
|
-
/** Credit cost for this feature */
|
|
139
|
-
readonly creditCost: number;
|
|
140
|
-
/** Deduct credits callback */
|
|
141
|
-
readonly onDeductCredits: (cost: number) => Promise<void | boolean>;
|
|
142
|
-
/** Select image callback */
|
|
143
|
-
readonly onSelectImage: () => Promise<string | null>;
|
|
144
|
-
/** Save media callback */
|
|
145
|
-
readonly onSaveMedia: (url: string) => Promise<void>;
|
|
146
|
-
/** Credit guard check callback */
|
|
147
|
-
readonly onCheckCreditGuard: (cost: number, featureName: string) => Promise<boolean>;
|
|
148
|
-
/** Current image credits count */
|
|
149
|
-
readonly imageCredits: number;
|
|
150
|
-
/** Custom header right content */
|
|
151
|
-
readonly headerRightContent?: ReactNode;
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
/**
|
|
155
|
-
* Feature registry entry with component
|
|
156
|
-
*/
|
|
157
|
-
export interface AIFeatureRegistryEntry extends AIFeatureConfig {
|
|
158
|
-
/** The Feature component to render */
|
|
159
|
-
readonly Component: React.ComponentType<unknown>;
|
|
160
|
-
}
|