@umituz/react-native-ai-fal-provider 2.0.14 → 2.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/package.json +1 -1
  2. package/src/domain/entities/error.types.ts +2 -0
  3. package/src/domain/types/provider.types.ts +1 -0
  4. package/src/exports/infrastructure.ts +0 -3
  5. package/src/exports/presentation.ts +0 -9
  6. package/src/index.ts +0 -3
  7. package/src/infrastructure/services/fal-feature-models.ts +3 -1
  8. package/src/infrastructure/services/fal-provider-subscription.ts +35 -13
  9. package/src/infrastructure/services/fal-provider.ts +6 -0
  10. package/src/infrastructure/services/fal-queue-operations.ts +30 -1
  11. package/src/infrastructure/services/fal-status-mapper.ts +2 -0
  12. package/src/infrastructure/services/request-store.ts +30 -2
  13. package/src/infrastructure/utils/cost-tracker.ts +34 -8
  14. package/src/infrastructure/utils/error-mapper.ts +17 -3
  15. package/src/infrastructure/utils/image-feature-builders.util.ts +10 -5
  16. package/src/infrastructure/utils/index.ts +7 -6
  17. package/src/infrastructure/utils/input-validator.util.ts +92 -0
  18. package/src/infrastructure/utils/type-guards.util.ts +7 -3
  19. package/src/infrastructure/utils/video-feature-builders.util.ts +6 -3
  20. package/src/infrastructure/validators/nsfw-validator.ts +62 -4
  21. package/src/presentation/hooks/index.ts +3 -21
  22. package/src/presentation/hooks/use-fal-generation.ts +5 -4
  23. package/src/domain/constants/default-models.constants.README.md +0 -378
  24. package/src/domain/constants/models/image-to-video.README.md +0 -266
  25. package/src/domain/constants/models/index.README.md +0 -269
  26. package/src/domain/constants/models/text-to-image.README.md +0 -237
  27. package/src/domain/constants/models/text-to-text.README.md +0 -249
  28. package/src/domain/constants/models/text-to-video.README.md +0 -259
  29. package/src/domain/constants/models/text-to-voice.README.md +0 -264
  30. package/src/domain/entities/error.types.README.md +0 -292
  31. package/src/domain/entities/fal.types.README.md +0 -460
  32. package/src/domain/types/index.README.md +0 -229
  33. package/src/domain/types/model-selection.types.README.md +0 -311
  34. package/src/exports/registry.ts +0 -39
  35. package/src/index.README.md +0 -420
  36. package/src/infrastructure/builders/image-feature-builder.README.md +0 -435
  37. package/src/infrastructure/builders/index.ts +0 -7
  38. package/src/infrastructure/services/fal-models-service.README.md +0 -293
  39. package/src/infrastructure/services/fal-provider-subscription.README.md +0 -257
  40. package/src/infrastructure/services/fal-provider.README.md +0 -474
  41. package/src/infrastructure/services/fal-status-mapper.README.md +0 -246
  42. package/src/infrastructure/services/nsfw-content-error.README.md +0 -215
  43. package/src/infrastructure/utils/base-builders.util.README.md +0 -313
  44. package/src/infrastructure/utils/cost-tracker-queries.ts +0 -67
  45. package/src/infrastructure/utils/error-categorizer.README.md +0 -395
  46. package/src/infrastructure/utils/error-mapper.README.md +0 -367
  47. package/src/infrastructure/utils/helpers.util.README.md +0 -395
  48. package/src/infrastructure/utils/image-feature-builders.util.README.md +0 -411
  49. package/src/infrastructure/utils/index.README.md +0 -338
  50. package/src/infrastructure/utils/job-metadata/index.README.md +0 -267
  51. package/src/infrastructure/utils/job-metadata/job-metadata-format.util.README.md +0 -209
  52. package/src/infrastructure/utils/job-metadata/job-metadata-lifecycle.util.README.md +0 -311
  53. package/src/infrastructure/utils/job-metadata/job-metadata-queries.util.README.md +0 -332
  54. package/src/infrastructure/utils/job-metadata/job-metadata.types.README.md +0 -446
  55. package/src/infrastructure/utils/job-metadata.README.md +0 -268
  56. package/src/infrastructure/utils/timing-helpers.util.ts +0 -56
  57. package/src/infrastructure/utils/type-guards.util.README.md +0 -371
  58. package/src/infrastructure/validators/index.README.md +0 -205
  59. package/src/infrastructure/validators/nsfw-validator.README.md +0 -309
  60. package/src/presentation/hooks/index.README.md +0 -224
  61. package/src/presentation/hooks/use-fal-generation.README.md +0 -398
  62. package/src/presentation/hooks/use-model-capabilities.ts +0 -99
  63. package/src/presentation/hooks/use-models.README.md +0 -318
  64. package/src/registry/global-capabilities.ts +0 -75
  65. package/src/registry/index.ts +0 -50
  66. package/src/registry/model-registry.service.ts +0 -93
  67. package/src/registry/model-registry.types.ts +0 -106
  68. package/src/registry/models/index.ts +0 -6
  69. package/src/registry/models/sora-2.config.ts +0 -95
@@ -1,237 +0,0 @@
1
- # Text-to-Image Generation
2
-
3
- Generates high-quality images from text-based prompts.
4
-
5
- **Location:** `src/domain/constants/models/text-to-image.ts`
6
-
7
- ## Overview
8
-
9
- Text-to-image generation creates visual content from textual descriptions using AI models. This module provides model configurations for various image generation capabilities with different quality levels and cost structures.
10
-
11
- ## Purpose
12
-
13
- Provides image generation models by:
14
- - Supporting multiple quality tiers (fast, quality, professional)
15
- - Offering flexible image sizes and aspect ratios
16
- - Enabling batch image generation
17
- - Providing safety checking
18
- - Supporting configurable inference steps
19
-
20
- ## Import
21
-
22
- ```typescript
23
- import {
24
- falProvider,
25
- useFalGeneration
26
- } from '@umituz/react-native-ai-fal-provider';
27
- ```
28
-
29
- ## Available Models
30
-
31
- ### Flux Schnell
32
- - **Model ID:** `fal-ai/flux/schnell`
33
- - **Cost:** 1 credit (Free), 0.5 credit (Premium)
34
- - **Description:** Fast and efficient image generation (Default)
35
- - **Use Cases:** Quick iterations, drafts, preview generations
36
-
37
- ### Flux Dev
38
- - **Model ID:** `fal-ai/flux/dev`
39
- - **Cost:** 2 credits (Free), 1 credit (Premium)
40
- - **Description:** High-quality image generation
41
- - **Use Cases:** Production images, enhanced quality
42
-
43
- ### Flux Pro
44
- - **Model ID:** `fal-ai/flux-pro`
45
- - **Cost:** 3 credits (Free), 1.5 credits (Premium)
46
- - **Description:** Professional-level image generation
47
- - **Use Cases:** Professional work, highest quality requirements
48
-
49
- ## Parameters
50
-
51
- ### Common Parameters
52
-
53
- | Parameter | Type | Description | Default |
54
- |-----------|------|-------------|---------|
55
- | `prompt` | `string` | Description of image to generate | - |
56
- | `image_size` | `string` | Output image dimensions | `square_hd` |
57
- | `num_inference_steps` | `number` | Number of inference steps | 4 |
58
- | `num_images` | `number` | Number of images to generate | 1 |
59
- | `enable_safety_checker` | `boolean` | Enable safety checking | true |
60
-
61
- ### Image Sizes
62
-
63
- - `square_hd`: 1024x1024
64
- - `square`: 512x512
65
- - `portrait_4_3`: 832x1104
66
- - `portrait_16_9`: 832x1216
67
- - `landscape_4_3`: 1104x832
68
- - `landscape_16_9`: 1216x832
69
-
70
- ## Usage Guidelines
71
-
72
- ### For Image Generation
73
-
74
- **Generation Pattern:**
75
- 1. Select model based on quality needs
76
- 2. Construct descriptive prompt
77
- 3. Set appropriate image size
78
- 4. Configure inference steps (quality vs speed)
79
- 5. Handle generation result
80
-
81
- **Best Practices:**
82
- - Use descriptive, detailed prompts
83
- - Match image size to intended use
84
- - Start with Schnell for quick iterations
85
- - Upgrade to Dev/Pro for final output
86
- - Enable safety checker for production
87
-
88
- ### For Model Selection
89
-
90
- **Selection Pattern:**
91
- 1. Evaluate quality requirements
92
- 2. Consider cost constraints
93
- 3. Balance speed vs quality
94
- 4. Use appropriate model for task
95
- 5. Scale up as needed
96
-
97
- **Model Choice Guidelines:**
98
- - Schnell: Testing, drafts, previews
99
- - Dev: Standard production use
100
- - Pro: Professional, highest quality
101
-
102
- ## Best Practices
103
-
104
- ### 1. Write Effective Prompts
105
-
106
- Create detailed descriptions:
107
- - Be specific about subjects and style
108
- - Include lighting and atmosphere details
109
- - Specify composition and framing
110
- - Mention mood and emotion
111
- - Use natural language descriptions
112
-
113
- ### 2. Choose Right Model
114
-
115
- Select appropriate quality tier:
116
- - Use Schnell for rapid iteration
117
- - Upgrade to Dev for better quality
118
- - Use Pro for professional work
119
- - Consider cost vs quality trade-off
120
- - Test with lower tier first
121
-
122
- ### 3. Optimize Parameters
123
-
124
- Balance quality and speed:
125
- - Lower steps for faster generation
126
- - Higher steps for better quality
127
- - Adjust image size for use case
128
- - Generate multiple images when needed
129
- - Enable safety checking
130
-
131
- ### 4. Handle Results
132
-
133
- Process generated images:
134
- - Validate image quality
135
- - Check safety filter results
136
- - Handle generation failures
137
- - Display images appropriately
138
- - Store results efficiently
139
-
140
- ### 5. Manage Costs
141
-
142
- Control generation costs:
143
- - Start with lower tier models
144
- - Limit batch generation
145
- - Use appropriate image sizes
146
- - Cache successful generations
147
- - Monitor credit usage
148
-
149
- ## For AI Agents
150
-
151
- ### When Using Text-to-Image Models
152
-
153
- **DO:**
154
- - Select appropriate model for task
155
- - Write detailed descriptive prompts
156
- - Set correct image sizes
157
- - Enable safety checking
158
- - Handle generation errors
159
- - Monitor credit usage
160
- - Follow parameter constraints
161
-
162
- **DON'T:**
163
- - Use highest tier for testing
164
- - Write vague or short prompts
165
- - Ignore safety checking
166
- - Forget to handle failures
167
- - Generate unnecessary batches
168
- - Exceed credit limits
169
- - Use wrong aspect ratios
170
-
171
- ### When Writing Prompts
172
-
173
- **DO:**
174
- - Be specific and detailed
175
- - Include style information
176
- - Describe lighting and mood
177
- - Specify composition
178
- - Use natural language
179
- - Include important details
180
- - Reference art styles if needed
181
-
182
- **DON'T:**
183
- - Use vague descriptions
184
- - Forget key details
185
- - Ignore style guidance
186
- - Skip composition details
187
- - Use overly complex language
188
- - Include contradictory elements
189
- - Exceed reasonable length
190
-
191
- ### When Selecting Models
192
-
193
- **DO:**
194
- - Start with Schnell for testing
195
- - Upgrade to Dev for production
196
- - Use Pro for professional work
197
- - Consider cost implications
198
- - Balance quality vs speed
199
- - Test before final generation
200
- - Scale appropriately
201
-
202
- **DON'T:**
203
- - Always use highest tier
204
- - Ignore cost constraints
205
- - Skip testing phase
206
- - Use Pro for drafts
207
- - Forget quality requirements
208
- - Waste credits on iterations
209
- - Select model randomly
210
-
211
- ## Implementation Notes
212
-
213
- **Location:** `src/domain/constants/models/text-to-image.ts`
214
-
215
- **Dependencies:**
216
- - FAL provider service
217
- - useFalGeneration hook
218
- - Image format utilities
219
- - Safety validators
220
-
221
- **Model Categories:**
222
- - Fast generation (Flux Schnell)
223
- - Quality generation (Flux Dev)
224
- - Professional generation (Flux Pro)
225
-
226
- **Import:**
227
- ```typescript
228
- import {
229
- falProvider,
230
- useFalGeneration
231
- } from '@umituz/react-native-ai-fal-provider';
232
- ```
233
-
234
- **Related:**
235
- - FAL provider: `src/infrastructure/services/fal-provider.ts`
236
- - Generation hook: `src/presentation/hooks/use-fal-generation.ts`
237
- - Default models: `src/domain/constants/default-models.constants.ts`
@@ -1,249 +0,0 @@
1
- # Text-to-Text Generation
2
-
3
- Generates text from text using LLM-based models.
4
-
5
- **Location:** `src/domain/constants/models/text-to-text.ts`
6
-
7
- ## Overview
8
-
9
- Text-to-text generation uses Large Language Models (LLMs) to generate, transform, or analyze text content. This module provides model configurations for various text generation tasks with controllable parameters.
10
-
11
- ## Purpose
12
-
13
- Provides text generation models by:
14
- - Supporting question answering
15
- - Enabling content generation
16
- - Facilitating text transformation
17
- - Providing summarization
18
- - Supporting translation tasks
19
-
20
- ## Import
21
-
22
- ```typescript
23
- import {
24
- falProvider,
25
- useFalGeneration
26
- } from '@umituz/react-native-ai-fal-provider';
27
- ```
28
-
29
- ## Available Models
30
-
31
- ### Llama 3 8B Instruct
32
- - **Model ID:** `fal-ai/llama-3-8b-instruct`
33
- - **Cost:** 0.1 credits (Free), 0.05 credits (Premium)
34
- - **Description:** Fast and reliable text generation (Default)
35
- - **Use Cases:** QA, content generation, summarization, translation
36
-
37
- ## Parameters
38
-
39
- ### Basic Parameters
40
-
41
- **Configuration:**
42
- - `prompt`: Input prompt or question
43
- - `max_tokens`: Maximum tokens to generate (default: 512)
44
- - `temperature`: Creativity level 0.0 - 1.0 (default: 0.7)
45
- - `top_p`: Nucleus sampling 0.0 - 1.0 (default: 0.9)
46
- - `top_k`: Top-k sampling (default: 40)
47
- - `repetition_penalty`: Reduce repetitions 1.0 - 2.0
48
-
49
- ### Parameter Descriptions
50
-
51
- | Parameter | Range | Description |
52
- |-----------|-------|-------------|
53
- | `prompt` | string | Input text or question |
54
- | `max_tokens` | 1 - 4096 | Maximum tokens to generate |
55
- | `temperature` | 0.0 - 1.0 | Low = focused, High = creative |
56
- | `top_p` | 0.0 - 1.0 | Nucleus sampling threshold |
57
- | `top_k` | 1 - 100 | Top-k sampling value |
58
- | `repetition_penalty` | 1.0 - 2.0 | Reduce repetitions |
59
-
60
- ## Usage Guidelines
61
-
62
- ### For Text Generation
63
-
64
- **Generation Pattern:**
65
- 1. Construct clear prompt
66
- 2. Set appropriate max_tokens
67
- 3. Configure temperature for task
68
- 4. Generate text
69
- 5. Handle output appropriately
70
-
71
- **Best Practices:**
72
- - Write clear, specific prompts
73
- - Use system prompts for behavior
74
- - Adjust temperature for task type
75
- - Set appropriate max_tokens
76
- - Validate output quality
77
-
78
- ### For Temperature Selection
79
-
80
- **Temperature Guidelines:**
81
-
82
- **Low (0.0 - 0.3):**
83
- - Factual responses
84
- - Code generation
85
- - Deterministic output
86
- - Technical content
87
-
88
- **Medium (0.4 - 0.7):**
89
- - Balanced output
90
- - General assistance
91
- - Standard responses
92
- - Most use cases
93
-
94
- **High (0.8 - 1.0):**
95
- - Creative writing
96
- - Diverse outputs
97
- - Brainstorming
98
- - Fiction generation
99
-
100
- ## Best Practices
101
-
102
- ### 1. Engineer Effective Prompts
103
-
104
- Create well-structured prompts:
105
- - Define role or context
106
- - Provide clear instructions
107
- - Specify output format
108
- - Include examples if needed
109
- - Set constraints explicitly
110
-
111
- **Prompt Structure:**
112
- ```
113
- Role/Context
114
- Task/Instructions
115
- Input Content
116
- Output Format/Constraints
117
- ```
118
-
119
- ### 2. Optimize Token Usage
120
-
121
- Manage generation efficiently:
122
- - Set appropriate max_tokens for task
123
- - Use shorter prompts when possible
124
- - Limit output length for simple tasks
125
- - Monitor token consumption
126
- - Cache common responses
127
-
128
- ### 3. Select Right Temperature
129
-
130
- Choose temperature based on task:
131
- - Factual QA: 0.2 - 0.3
132
- - Code generation: 0.2 - 0.4
133
- - Translation: 0.3 - 0.5
134
- - Summarization: 0.5 - 0.7
135
- - General assistance: 0.6 - 0.7
136
- - Creative writing: 0.8 - 1.0
137
-
138
- ### 4. Use System Prompts
139
-
140
- Guide model behavior:
141
- - Define role explicitly
142
- - Set behavioral constraints
143
- - Specify response style
144
- - Include format requirements
145
- - Maintain consistency
146
-
147
- ### 5. Handle Edge Cases
148
-
149
- Manage challenging scenarios:
150
- - Validate input prompts
151
- - Handle timeout errors
152
- - Check quota limits
153
- - Validate output quality
154
- - Implement fallback strategies
155
-
156
- ## For AI Agents
157
-
158
- ### When Using Text-to-Text Models
159
-
160
- **DO:**
161
- - Write clear specific prompts
162
- - Use system prompts for guidance
163
- - Set appropriate temperature
164
- - Configure max_tokens correctly
165
- - Validate output quality
166
- - Handle errors appropriately
167
- - Monitor token usage
168
- - Test with various prompts
169
-
170
- **DON'T:**
171
- - Write vague prompts
172
- - Ignore temperature settings
173
- - Set excessive max_tokens
174
- - Skip output validation
175
- - Forget error handling
176
- - Waste tokens on long outputs
177
- - Use wrong temperature
178
- - Assume perfect output
179
-
180
- ### When Engineering Prompts
181
-
182
- **DO:**
183
- - Define role or context
184
- - Provide clear instructions
185
- - Specify output format
186
- - Include examples when helpful
187
- - Set explicit constraints
188
- - Test prompt variations
189
- - Iterate on prompt design
190
-
191
- **DON'T:**
192
- - Use vague instructions
193
- - Skip context setting
194
- - Forget format specification
195
- - Ignore constraint setting
196
- - Use overly complex prompts
197
- - Skip testing
198
- - Use first draft
199
-
200
- ### When Configuring Parameters
201
-
202
- **DO:**
203
- - Match temperature to task
204
- - Set appropriate max_tokens
205
- - Use repetition penalty
206
- - Configure sampling parameters
207
- - Test different settings
208
- - Monitor output quality
209
- - Adjust based on results
210
-
211
- **DON'T:**
212
- - Use default temperature always
213
- - Set excessive max_tokens
214
- - Ignore sampling parameters
215
- - Skip parameter testing
216
- - Use arbitrary values
217
- - Forget output quality
218
- - Waste tokens
219
-
220
- ## Implementation Notes
221
-
222
- **Location:** `src/domain/constants/models/text-to-text.ts`
223
-
224
- **Dependencies:**
225
- - FAL provider service
226
- - useFalGeneration hook
227
- - Text processing utilities
228
- - Prompt templates
229
-
230
- **Common Use Cases:**
231
- - Question answering
232
- - Content generation
233
- - Text summarization
234
- - Translation
235
- - Code generation
236
- - Creative writing
237
-
238
- **Import:**
239
- ```typescript
240
- import {
241
- falProvider,
242
- useFalGeneration
243
- } from '@umituz/react-native-ai-fal-provider';
244
- ```
245
-
246
- **Related:**
247
- - FAL provider: `src/infrastructure/services/fal-provider.ts`
248
- - Generation hook: `src/presentation/hooks/use-fal-generation.ts`
249
- - Default models: `src/domain/constants/default-models.constants.ts`