@runware/sdk-js 1.0.30 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@runware/sdk-js",
3
- "version": "1.0.30",
3
+ "version": "1.1.0",
4
4
  "description": "The SDK is used to run image inference with the Runware API, powered by the RunWare inference platform. It can be used to generate imaged with text-to-image and image-to-image. It also allows the use of an existing gallery of models or selecting any model or LoRA from the CivitAI gallery. The API also supports upscaling, background removal, inpainting and outpainting, and a series of other ControlNet models.",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
@@ -14,7 +14,7 @@
14
14
  "pkg": "npx pkgfiles",
15
15
  "dev:test": "vitest --reporter verbose",
16
16
  "test": "vitest run --reporter verbose",
17
- "test:single": "vitest run --reporter verbose tests/Runware/request-images.test.ts"
17
+ "test:single": "vitest run --reporter verbose tests/Runware/upscale-gan.test.ts"
18
18
  },
19
19
  "keywords": [
20
20
  "runware",
package/readme.md CHANGED
@@ -47,27 +47,42 @@ NB: All errors can be caught in the catch block of each request
47
47
  const runware = new Runware({ apiKey: "API_KEY" });
48
48
  const images = await runware.requestImages({
49
49
  positivePrompt: string;
50
- imageSize: number;
51
- modelId: number;
52
- numberOfImages?: number;
53
50
  negativePrompt?: string;
54
- useCache?: boolean;
55
- lora?: ILora[];
56
- controlNet?: IControlNet[];
57
- imageInitiator?: File | string;
58
- imageMaskInitiator?: File | string;
51
+ width: number;
52
+ height: number;
53
+ model: string;
54
+ numberResults?: number;
55
+ outputType?: "URL" | "base64Data" | "dataURI";
56
+ outputFormat?: "JPG" | "PNG" | "WEBP";
57
+ uploadEndpoint?: string;
58
+ checkNSFW?: boolean
59
+ seedImage?: File | string;
60
+ maskImage?: File | string;
61
+ strength?: number;
59
62
  steps?: number;
63
+ schedular?: string;
64
+ seed?: number;
65
+ CFGScale?: number;
66
+ clipSkip?: number;
67
+ usePromptWeighting?: number;
68
+ controlNet?: IControlNet[];
69
+ lora?: ILora[];
70
+
71
+ useCache?: boolean;
60
72
  returnBase64Image?: boolean;
61
73
  onPartialImages?: (images: IImage[], error: IError) => void;
62
74
  })
63
- console.log(images)
64
75
 
65
76
  return interface IImage {
66
- imageSrc: string;
77
+ taskType: ETaskType;
67
78
  imageUUID: string;
79
+ inputImageUUID?: string;
68
80
  taskUUID: string;
69
- bNSFWContent: boolean;
70
- cost: string;
81
+ imageURL?: string;
82
+ imageBase64Data?: string;
83
+ imageDataURI?: string;
84
+ NSFWContent?: boolean;
85
+ cost: number;
71
86
  }[]
72
87
  ```
73
88
 
@@ -79,20 +94,19 @@ const runware = new Runware({ apiKey: "API_KEY" });
79
94
  const [firstImagesRequest, secondImagesRequest] = await Promise.all([
80
95
  runware.requestImages({
81
96
  positivePrompt: string;
82
- imageSize: number;
83
- modelId: number;
84
- numberOfImages?: number;
97
+ width: number;
98
+ height: number;
99
+ numberResults: number;
100
+ model: string;
85
101
  negativePrompt?: string;
86
- useCache?: boolean;
87
102
  onPartialImages?: (images: IImage[], error: IError) => void;
88
103
  }),
89
104
  runware.requestImages({
90
105
  positivePrompt: string;
91
- imageSize: number;
92
- modelId: number;
93
- numberOfImages?: number;
94
- negativePrompt?: string;
95
- useCache?: boolean;
106
+ width: number;
107
+ height: number;
108
+ numberResults: number;
109
+ model: string;
96
110
  onPartialImages?: (images: IImage[], error: IError) => void;
97
111
  })
98
112
  ])
@@ -100,38 +114,57 @@ const [firstImagesRequest, secondImagesRequest] = await Promise.all([
100
114
  console.log({firstImagesRequest, secondImagesRequest})
101
115
 
102
116
  return interface IImage {
103
- imageSrc: string;
117
+ taskType: ETaskType;
104
118
  imageUUID: string;
119
+ inputImageUUID?: string;
105
120
  taskUUID: string;
106
- bNSFWContent: boolean;
121
+ imageURL?: string;
122
+ imageBase64Data?: string;
123
+ imageDataURI?: string;
124
+ NSFWContent?: boolean;
125
+ cost: number;
107
126
  }[]
108
127
  ```
109
128
 
110
- | Parameter | Type | Use |
111
- | ------------------ | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
112
- | positivePrompt | string | Defines the positive prompt description of the image. |
113
- | imageSize | number | Controls the image size. |
114
- | modelId | number | The model id of the image to be requested. |
115
- | numberOfImages | number: `(Optional)` (default = 1) | `(Optional)` The number of images to be sent. |
116
- | useCache | string: `(Optional)` | Should use cached images (for faster response) or generate new images. |
117
- | lora | ILora[]: `(Optional)` | If provided it should be an array of objects. Each object must have two attributes: `loraCivitaiAIR` (string) and `weight` (float) with values from 0 to 1. |
118
- | controlNet | IControlNet[]: `(Optional)` | If provided, should be an array of objects. Each object must have five attributes: |
119
- | imageInitiator | string or File: `(Optional)` | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
120
- | imageMaskInitiator | string or File: `(Optional)` | The image to be used as the mask image. It can be the UUID of previously generated image, or an image from a file. |
121
- | returnBase64Image | boolean: `(Optional)` | Returns base64 image. |
122
- | onPartialImages | function: `(Optional)` | If you want to receive the images as they are generated instead of waiting for the async request, you get the images as they are generated from this function. |
129
+ | Parameter | Type | Use |
130
+ | ------------------ | ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
131
+ | positivePrompt | string | Defines the positive prompt description of the image. |
132
+ | negativePrompt | string | Defines the negative prompt description of the image. |
133
+ | width | number | Controls the image width. |
134
+ | height | number | Controls the image height. |
135
+ | model | string | The AIR system ID of the image to be requested. |
136
+ | numberResults | number: `(Optional)` (default = 1) | `(Optional)` The number of images to be generated. |
137
+ | outputType | IOutputType: `(Optional)` | Specifies the output type in which the image is returned. |
138
+ | outputFormat | IOutputFormat: `(Optional)` | Specifies the format of the output image. |
139
+ | uploadEndpoint | string: `(Optional)` | This parameter allows you to specify a URL to which the generated image will be uploaded as binary image data using the HTTP PUT method. For example, an S3 bucket URL can be used as the upload endpoint. |
140
+ | checkNSFW | boolean: `(Optional)` | This parameter is used to enable or disable the NSFW check. When enabled, the API will check if the image contains NSFW (not safe for work) content. This check is done using a pre-trained model that detects adult content in images. |
141
+ | seedImage | string or File: `(Optional)` | When doing Image-to-Image, Inpainting or Outpainting, this parameter is required.Specifies the seed image to be used for the diffusion process. |
142
+ | maskImage | string or File: `(Optional)` | The image to be used as the mask image. It can be the UUID of previously generated image, or an image from a file. |
143
+ | strength | number: `(Optional)` | When doing Image-to-Image, Inpainting or Outpainting, this parameter is used to determine the influence of the seedImage image in the generated output. A higher value results in more influence from the original image, while a lower value allows more creative deviation. |
144
+ | steps | number: `(Optional)` | The number of steps is the number of iterations the model will perform to generate the image. The higher the number of steps, the more detailed the image will be. |
145
+ | scheduler | string: `(Optional)` | An scheduler is a component that manages the inference process. Different schedulers can be used to achieve different results like more detailed images, faster inference, or more accurate results. |
146
+ | seed | number: `(Optional)` | A seed is a value used to randomize the image generation. If you want to make images reproducible (generate the same image multiple times), you can use the same seed value. |
147
+ | CFGScale | number: `(Optional)` | Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results. |
148
+ | clipSkip | number: `(Optional)` | CLIP Skip is a feature that enables skipping layers of the CLIP embedding process, leading to quicker and more varied image generation. |
149
+ | usePromptWeighting | boolean: `(Optional)` | Allow setting different weights per words or expressions in prompts. |
150
+ | clipSkip | number: `(Optional)` | CLIP Skip is a feature that enables skipping layers of the CLIP embedding process, leading to quicker and more varied image generation. |
151
+ | lora | ILora[]: `(Optional)` | With LoRA (Low-Rank Adaptation), you can adapt a model to specific styles or features by emphasizing particular aspects of the data. |
152
+ | controlNet | IControlNet[]: `(Optional)` | With ControlNet, you can provide a guide image to help the model generate images that align with the desired structure. |
153
+ | onPartialImages | function: `(Optional)` | If you want to receive the images as they are generated instead of waiting for the async request, you get the images as they are generated from this function. |
154
+ | includeCost | boolean `(Optional)` | If set to true, the cost to perform the task will be included in the response object. |
123
155
 
124
156
  ##### ControlNet Params
125
157
 
126
- | Parameter | Type | Use |
127
- | --------------------- | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
128
- | preprocessor | string | Defines the positive prompt description of the image. |
129
- | weight | number | an have values between 0 and 1 and represent the weight of the ControlNet preprocessor in the image. |
130
- | startStep | number | represents the moment in which the ControlNet preprocessor starts to control the inference. It can take values from 0 to the maximum number of `steps` in the image create request. This can also be replaced with `startStepPercentage` (float) which represents the same value but in percentages. It takes values from 0 to 1. |
131
- | numberOfImages | number: `(Optional)` (default = 1) | `(Optional)` The number of images to be sent. |
132
- | endStep | number | similar with `startStep` but represents the end of the preprocessor control of the image inference. The equivalent of the percentage option is `startStepPercentage` (float). |
133
- | guideImage | file or string `(Optional)` | The image requires for the guide image. It can be the UUID of previously generated image, or an image from a file. |
134
- | guideImageUnprocessed | file or string `(Optional)` | The image requires for the guide image unprocessed. It can be the UUID of previously generated image, or an image from a file. |
158
+ | Parameter | Type | Use |
159
+ | ------------------- | --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
160
+ | model | string | Defines the model to use for the control net. |
161
+ | guideImage | file or string `(Optional)` | The image requires for the guide image. It can be the UUID of previously generated image, or an image from a file. |
162
+ | weight | number `(Optional)` | an have values between 0 and 1 and represent the weight of the ControlNet preprocessor in the image. |
163
+ | startStep | number `(Optional)` | represents the moment in which the ControlNet preprocessor starts to control the inference. It can take values from 0 to the maximum number of `steps` in the image create request. This can also be replaced with `startStepPercentage` (float) which represents the same value but in percentages. It takes values from 0 to 1. |
164
+ | startStepPercentage | number `(Optional)` | Represents the percentage of steps in which the ControlNet model starts to control the inference process. |
165
+ | endStep | number `(Optional)` | similar with `startStep` but represents the end of the preprocessor control of the image inference. The equivalent of the percentage option is `endStepPercentage` (float). |
166
+ | endStepPercentage | number `(Optional)` | Represents the percentage of steps in which the ControlNet model ends to control the inference process. |
167
+ | controlMode | string `(Optional)` | This parameter has 3 options: prompt, controlnet and balanced |
135
168
 
136
169
   
137
170
 
@@ -141,19 +174,22 @@ return interface IImage {
141
174
 
142
175
  const runware = new Runware({ apiKey: "API_KEY" });
143
176
  const imageToText = await runware.requestImageToText({
144
- imageInitiator: string | File
177
+ inputImage: string | File
145
178
  })
146
179
  console.log(imageToText)
147
180
 
148
181
  return interface IImageToText {
149
- taskUUID: string;
150
- text: string;
182
+ taskType: string;
183
+ taskUUID: string;
184
+ text: string;
185
+ cost?: number;
151
186
  }
152
187
  ```
153
188
 
154
- | Parameter | Type | Use |
155
- | -------------- | -------------- | ------------------------------------------------------------------------------------------------------------------ |
156
- | imageInitiator | string or File | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
189
+ | Parameter | Type | Use |
190
+ | ----------- | -------------------- | ------------------------------------------------------------------------------------------------------------------ |
191
+ | inputImage | string or File | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
192
+ | includeCost | boolean `(Optional)` | If set to true, the cost to perform the task will be included in the response object. |
157
193
 
158
194
   
159
195
 
@@ -164,19 +200,42 @@ return interface IImageToText {
164
200
  const runware = new Runware({ apiKey: "API_KEY" });
165
201
  const image = await runware.removeImageBackground({
166
202
  imageInitiator: string | File
203
+ outputType?: IOutputType;
204
+ outputFormat?: IOutputFormat;
205
+ rgba?: number[];
206
+ postProcessMask?: boolean;
207
+ returnOnlyMask?: boolean;
208
+ alphaMatting?: boolean;
209
+ alphaMattingForegroundThreshold?: number;
210
+ alphaMattingBackgroundThreshold?: number;
211
+ alphaMattingErodeSize?: number;
167
212
  })
168
213
  console.log(image)
169
214
  return interface IImage {
170
- imageSrc: string;
171
- imageUUID: string;
215
+ taskType: ETaskType;
172
216
  taskUUID: string;
173
- bNSFWContent: boolean;
217
+ imageUUID: string;
218
+ inputImageUUID: string;
219
+ imageURL?: string;
220
+ imageBase64Data?: string;
221
+ imageDataURI?: string;
222
+ cost: number;
174
223
  }[]
175
224
  ```
176
225
 
177
- | Parameter | Type | Use |
178
- | -------------- | -------------- | ------------------------------------------------------------------------------------------------------------------ |
179
- | imageInitiator | string or File | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
226
+ | Parameter | Type | Use |
227
+ | ------------------------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
228
+ | inputImage | string or File | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
229
+ | outputType | IOutputType: `(Optional)` | Specifies the output type in which the image is returned. |
230
+ | outputFormat | IOutputFormat: `(Optional)` | Specifies the format of the output image. |
231
+ | includeCost | boolean `(Optional)` | If set to true, the cost to perform the task will be included in the response object. |
232
+ | rgba | number[] `(Optional)` | An array representing the [red, green, blue, alpha] values that define the color of the removed background. The alpha channel controls transparency. |
233
+ | postProcessMask | boolean `(Optional)` | Flag indicating whether to post-process the mask. Controls whether the mask should undergo additional post-processing. |
234
+ | returnOnlyMask | boolean `(Optional)` | Flag indicating whether to return only the mask. The mask is the opposite of the image background removal. |
235
+ | alphaMatting | boolean `(Optional)` | Flag indicating whether to use alpha matting. Alpha matting is a post-processing technique that enhances the quality of the output by refining the edges of the foreground object. |
236
+ | alphaMattingForegroundThreshold | number `(Optional)` | Threshold value used in alpha matting to distinguish the foreground from the background. Adjusting this parameter affects the sharpness and accuracy of the foreground object edges. |
237
+ | alphaMattingBackgroundThreshold | number `(Optional)` | Threshold value used in alpha matting to refine the background areas. It influences how aggressively the algorithm removes the background while preserving image details. |
238
+ | alphaMattingErodeSize | number `(Optional)` | Specifies the size of the erosion operation used in alpha matting. Erosion helps in smoothing the edges of the foreground object for a cleaner removal of the background. |
180
239
 
181
240
   
182
241
 
@@ -186,23 +245,34 @@ return interface IImage {
186
245
 
187
246
  const runware = new Runware({ apiKey: "API_KEY" });
188
247
  const image = await runware.upscaleGan({
189
- imageInitiator: string | File;
248
+ inputImage: File | string;
190
249
  upscaleFactor: number;
250
+ outputType?: IOutputType;
251
+ outputFormat?: IOutputFormat;
252
+ includeCost?: boolean
191
253
  })
192
254
  console.log(image)
193
255
  return interface IImage {
194
- imageSrc: string;
256
+ taskType: ETaskType;
195
257
  imageUUID: string;
258
+ inputImageUUID?: string;
196
259
  taskUUID: string;
197
- bNSFWContent: boolean;
260
+ imageURL?: string;
261
+ imageBase64Data?: string;
262
+ imageDataURI?: string;
263
+ NSFWContent?: boolean;
264
+ cost: number;
198
265
  }[]
199
266
 
200
267
  ```
201
268
 
202
- | Parameter | Type | Use |
203
- | -------------- | -------------- | ------------------------------------------------------------------------------------------------------------------ |
204
- | imageInitiator | string or File | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
205
- | upscaleFactor | number | The number of times to upscale; |
269
+ | Parameter | Type | Use |
270
+ | ------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------ |
271
+ | inputImage | string or File | The image to be used as the seed image. It can be the UUID of previously generated image, or an image from a file. |
272
+ | upscaleFactor | number | The number of times to upscale; |
273
+ | outputType | IOutputType: `(Optional)` | Specifies the output type in which the image is returned. |
274
+ | outputFormat | IOutputFormat: `(Optional)` | Specifies the format of the output image. |
275
+ | includeCost | boolean `(Optional)` | If set to true, the cost to perform the task will be included in the response object. |
206
276
 
207
277
   
208
278
 
@@ -214,23 +284,67 @@ const runware = new Runware({ apiKey: "API_KEY" });
214
284
  const enhancedPrompt = await runware.enhancePrompt({
215
285
  prompt: string;
216
286
  promptMaxLength?: number;
217
- promptLanguageId?: number;
218
287
  promptVersions?: number;
288
+ includeCost?: boolean;
219
289
  })
220
290
  console.log(enhancedPrompt)
221
291
  return interface IEnhancedPrompt {
222
292
  taskUUID: string;
223
293
  text: string;
224
- }[git
294
+ }
225
295
 
226
296
  ```
227
297
 
228
- | Parameter | Type | Use |
229
- | ---------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------- |
230
- | prompt | string | The prompt that you intend to enhance. |
231
- | promptMaxLength | number: `Optional` | Character count. Represents the maximum length of the prompt that you intend to receive. Can take values between 1 and 380. |
232
- | promptVersions | number: `Optional` | The number of prompt versions that will be received. Can take values between 1 and 5. |
233
- | promptLanguageId | number: `Optional` | The language prompt text. Can take values between 1 and 298. Default is `1` - English. Options are provided below. |
298
+ | Parameter | Type | Use |
299
+ | --------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
300
+ | prompt | string | The prompt that you intend to enhance. |
301
+ | promptMaxLength | number: `Optional` | Character count. Represents the maximum length of the prompt that you intend to receive. Can take values between 1 and 380. |
302
+ | promptVersions | number: `Optional` | The number of prompt versions that will be received. Can take values between 1 and 5. |
303
+ | includeCost | boolean: `Optional` | If set to true, the cost to perform the task will be included in the response object. |
304
+
305
+  
306
+
307
+ ### ControlNet Preprocess
308
+
309
+ ```js
310
+
311
+ const runware = new Runware({ apiKey: "API_KEY" });
312
+ const controlNetPreProcessed = await runware.controlNetPreProcess({
313
+ inputImage: string | File;
314
+ preProcessor: EPreProcessor;
315
+ height?: number;
316
+ width?: number;
317
+ outputType?: IOutputType;
318
+ outputFormat?: IOutputFormat;
319
+ highThresholdCanny?: number;
320
+ lowThresholdCanny?: number;
321
+ includeHandsAndFaceOpenPose?: boolean;
322
+ })
323
+ console.log(controlNetPreProcessed)
324
+ return interface IControlNetImage {
325
+ taskUUID: string;
326
+ inputImageUUID: string;
327
+ guideImageUUID: string;
328
+ guideImageURL?: string;
329
+ guideImageBase64Data?: string;
330
+ guideImageDataURI?: string;
331
+ cost: number;
332
+ }
333
+
334
+ ```
335
+
336
+ | Parameter | Type | Use |
337
+ | --------------------------- | --------------------------- | ------------------------------------------------------------------------------------- |
338
+ | inputImage | string or File | Specifies the input image to be preprocessed to generate a guide image. |
339
+ | width | number | Controls the image width. |
340
+ | height | number | Controls the image height. |
341
+ | outputType | IOutputType: `(Optional)` | Specifies the output type in which the image is returned. |
342
+ | outputFormat | IOutputFormat: `(Optional)` | Specifies the format of the output image. |
343
+ | preProcessorType | string: `(Optional)` | Specifies the pre processor type to use. |
344
+ | includeCost | boolean: `Optional` | If set to true, the cost to perform the task will be included in the response object. |
345
+ | lowThresholdCanny | number `Optional` | Defines the lower threshold when using the Canny edge detection preprocessor. |
346
+ | highThresholdCanny | number `Optional` | Defines the high threshold when using the Canny edge detection preprocessor. |
347
+ | includeHandsAndFaceOpenPose | boolean `Optional` | Include the hands and face in the pose outline when using the OpenPose preprocessor. |
234
348
 
235
349
   
236
350
 
@@ -242,6 +356,12 @@ return interface IEnhancedPrompt {
242
356
 
243
357
  ## Changelog
244
358
 
359
+ ### - v1.1.0
360
+
361
+ **Added or Changed**
362
+
363
+ - Refactor Sdk to new runware api changes
364
+
245
365
  ### - v1.0.29/v1.0.30
246
366
 
247
367
  **Added or Changed**