@heyputer/puter.js 2.1.1 → 2.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.ts CHANGED
@@ -51,11 +51,20 @@ interface AI {
51
51
  txt2img(prompt: string, testMode?: boolean): Promise<HTMLImageElement>;
52
52
  txt2img(prompt: string, options?: Txt2ImgOptions): Promise<HTMLImageElement>;
53
53
 
54
+ txt2vid(prompt: string, testMode?: boolean): Promise<HTMLVideoElement>;
55
+ txt2vid(prompt: string, options?: Txt2VidOptions): Promise<HTMLVideoElement>;
56
+
54
57
  txt2speech(text: string): Promise<HTMLAudioElement>;
55
58
  txt2speech(text: string, options?: Txt2SpeechOptions): Promise<HTMLAudioElement>;
56
59
  txt2speech(text: string, language?: string): Promise<HTMLAudioElement>;
57
60
  txt2speech(text: string, language?: string, voice?: string): Promise<HTMLAudioElement>;
58
61
  txt2speech(text: string, language?: string, voice?: string, engine?: string): Promise<HTMLAudioElement>;
62
+
63
+ speech2txt(source: string | File | Blob): Promise<string | Speech2TxtResult>;
64
+ speech2txt(source: string | File | Blob, options?: Speech2TxtOptions): Promise<string | Speech2TxtResult>;
65
+ speech2txt(options: Speech2TxtOptions): Promise<string | Speech2TxtResult>;
66
+ speech2txt(source: string | File | Blob, testMode?: boolean): Promise<string | Speech2TxtResult>;
67
+ speech2txt(source: Speech2TxtOptions, testMode?: boolean): Promise<string | Speech2TxtResult>;
59
68
  }
60
69
 
61
70
  type StreamingChatOptions = Omit<ChatOptions, "stream"> & { stream: true };
@@ -108,16 +117,70 @@ interface ToolCall {
108
117
  }
109
118
 
110
119
  interface Txt2ImgOptions {
111
- model?: 'gpt-image-1' | 'gemini-2.5-flash-image-preview' | 'dall-e-3';
120
+ model?: 'gpt-image-1' | 'gpt-image-1-mini' | 'gemini-2.5-flash-image-preview' | 'dall-e-3';
112
121
  quality?: 'high' | 'medium' | 'low' | 'hd' | 'standard';
113
122
  input_image?: string;
114
123
  input_image_mime_type?: string;
115
124
  }
116
125
 
126
+ interface Txt2VidOptions {
127
+ prompt?: string;
128
+ model?: string;
129
+ duration?: number;
130
+ seconds?: number;
131
+ size?: string;
132
+ resolution?: string;
133
+ width?: number;
134
+ height?: number;
135
+ fps?: number;
136
+ steps?: number;
137
+ guidance_scale?: number;
138
+ seed?: number;
139
+ output_format?: string;
140
+ output_quality?: number;
141
+ negative_prompt?: string;
142
+ reference_images?: string[];
143
+ frame_images?: Array<Record<string, unknown>>;
144
+ metadata?: Record<string, unknown>;
145
+ provider?: string;
146
+ service?: string;
147
+ driver?: string;
148
+ test_mode?: boolean;
149
+ }
150
+
117
151
  interface Txt2SpeechOptions {
118
152
  language?: string;
119
153
  voice?: string;
120
- engine?: 'standard' | 'neural' | 'generative';
154
+ engine?: 'standard' | 'neural' | 'long-form' | 'generative' | string;
155
+ provider?: 'aws-polly' | 'openai' | string;
156
+ model?: 'gpt-4o-mini-tts' | 'tts-1' | 'tts-1-hd' | string;
157
+ response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm' | string;
158
+ instructions?: string;
159
+ }
160
+
161
+ interface Speech2TxtOptions {
162
+ file?: string | File | Blob;
163
+ audio?: string | File | Blob;
164
+ model?: 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | 'gpt-4o-transcribe-diarize' | 'whisper-1' | string;
165
+ response_format?: 'json' | 'text' | 'diarized_json' | 'srt' | 'verbose_json' | 'vtt' | string;
166
+ language?: string;
167
+ prompt?: string;
168
+ temperature?: number;
169
+ logprobs?: boolean;
170
+ timestamp_granularities?: string[];
171
+ translate?: boolean;
172
+ stream?: boolean;
173
+ chunking_strategy?: string;
174
+ known_speaker_names?: string[];
175
+ known_speaker_references?: string[];
176
+ extra_body?: Record<string, unknown>;
177
+ }
178
+
179
+ interface Speech2TxtResult {
180
+ text?: string;
181
+ language?: string;
182
+ segments?: Array<Record<string, unknown>>;
183
+ [key: string]: any;
121
184
  }
122
185
 
123
186
  interface ChatResponseChunk {
@@ -146,6 +209,7 @@ interface CreateAppOptions {
146
209
  icon?: string;
147
210
  maximizeOnStart?: boolean;
148
211
  filetypeAssociations?: string[];
212
+ dedupeName?: boolean;
149
213
  }
150
214
 
151
215
  interface GetAppOptions {
@@ -188,6 +252,8 @@ interface Auth {
188
252
  signOut(): void;
189
253
  isSignedIn(): boolean;
190
254
  getUser(): Promise<User>;
255
+ getMonthlyUsage(): Promise<MonthlyUsage>;
256
+ getDetailedAppUsage(appId: string): Promise<DetailedAppUsage>;
191
257
  }
192
258
 
193
259
  interface User {
@@ -196,6 +262,33 @@ interface User {
196
262
  email_confirmed: boolean;
197
263
  }
198
264
 
265
+ interface AllowanceInfo {
266
+ monthUsageAllowance: number;
267
+ remaining: number;
268
+ }
269
+
270
+ interface AppUsage {
271
+ count: number;
272
+ total: number;
273
+ }
274
+
275
+ interface APIUsage {
276
+ cost: number;
277
+ count: number;
278
+ units: number;
279
+ }
280
+
281
+ interface MonthlyUsage {
282
+ allowanceInfo: AllowanceInfo;
283
+ appTotals: Record<string, AppUsage>;
284
+ usage: Record<string, APIUsage>;
285
+ }
286
+
287
+ interface DetailedAppUsage {
288
+ total: number;
289
+ [key: string]: APIUsage;
290
+ }
291
+
199
292
  // Drivers Module
200
293
  interface Drivers {
201
294
  call(interface: string, driver: string, method: string, args?: object): Promise<any>;
@@ -214,7 +307,7 @@ interface FileSystem {
214
307
  rename(path: string, newName: string): Promise<FSItem>;
215
308
  space(): Promise<SpaceInfo>;
216
309
  stat(path: string): Promise<FSItem>;
217
- upload(items: FileList | File[] | Blob[], dirPath?: string, options?: object): Promise<FSItem[]>;
310
+ upload(items: FileList | File[] | Blob[], dirPath?: string, options?: UploadOptions): Promise<FSItem[]>;
218
311
  write(path: string, data?: string | File | Blob, options?: WriteOptions): Promise<FSItem>;
219
312
  }
220
313
 
@@ -255,6 +348,12 @@ interface WriteOptions {
255
348
  createMissingParents?: boolean;
256
349
  }
257
350
 
351
+ interface UploadOptions {
352
+ overwrite?: boolean;
353
+ dedupeName?: boolean;
354
+ name?: string;
355
+ }
356
+
258
357
  interface SpaceInfo {
259
358
  capacity: number;
260
359
  used: number;
@@ -511,4 +610,3 @@ export {
511
610
  WorkerExecOptions,
512
611
  WorkerInfo, Workers, WriteOptions
513
612
  };
514
-
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@heyputer/puter.js",
3
- "version": "2.1.1",
3
+ "version": "2.1.4",
4
4
  "description": "Puter.js - A JavaScript library for interacting with Puter services.",
5
5
  "main": "src/index.js",
6
6
  "types": "index.d.ts",
package/src/modules/AI.js CHANGED
@@ -1,5 +1,45 @@
1
1
  import * as utils from '../lib/utils.js';
2
2
 
3
+ const normalizeTTSProvider = (value) => {
4
+ if (typeof value !== 'string') {
5
+ return 'aws-polly';
6
+ }
7
+ const lower = value.toLowerCase();
8
+ if (lower === 'openai') return 'openai';
9
+ if (lower === 'aws' || lower === 'polly' || lower === 'aws-polly') return 'aws-polly';
10
+ return value;
11
+ };
12
+
13
+ const TOGETHER_IMAGE_MODEL_PREFIXES = [
14
+ 'black-forest-labs/',
15
+ 'stabilityai/',
16
+ 'togethercomputer/',
17
+ 'playgroundai/',
18
+ 'runwayml/',
19
+ 'lightricks/',
20
+ 'sg161222/',
21
+ 'wavymulder/',
22
+ 'prompthero/',
23
+ ];
24
+
25
+ const TOGETHER_IMAGE_MODEL_KEYWORDS = [
26
+ 'flux',
27
+ 'kling',
28
+ 'sd3',
29
+ 'stable-diffusion',
30
+ 'kolors',
31
+ ];
32
+
33
+ const TOGETHER_VIDEO_MODEL_PREFIXES = [
34
+ 'minimax/',
35
+ 'google/',
36
+ 'bytedance/',
37
+ 'pixverse/',
38
+ 'kwaivgi/',
39
+ 'vidu/',
40
+ 'wan-ai/',
41
+ ];
42
+
3
43
  class AI{
4
44
  /**
5
45
  * Creates a new instance with the given authentication token, API origin, and app ID,
@@ -183,23 +223,43 @@ class AI{
183
223
  throw { message: 'Text parameter is required', code: 'text_required' };
184
224
  }
185
225
 
186
- // Validate engine if provided
187
- if (options.engine) {
188
- const validEngines = ['standard', 'neural', 'long-form', 'generative'];
189
- if (!validEngines.includes(options.engine)) {
226
+ const validEngines = ['standard', 'neural', 'long-form', 'generative'];
227
+ let provider = normalizeTTSProvider(options.provider);
228
+
229
+ if (options.engine && normalizeTTSProvider(options.engine) === 'openai' && !options.provider) {
230
+ provider = 'openai';
231
+ }
232
+
233
+ if (provider === 'openai') {
234
+ if (!options.model && typeof options.engine === 'string') {
235
+ options.model = options.engine;
236
+ }
237
+ if (!options.voice) {
238
+ options.voice = 'alloy';
239
+ }
240
+ if (!options.model) {
241
+ options.model = 'gpt-4o-mini-tts';
242
+ }
243
+ if (!options.response_format) {
244
+ options.response_format = 'mp3';
245
+ }
246
+ delete options.engine;
247
+ } else {
248
+ provider = 'aws-polly';
249
+
250
+ if (options.engine && !validEngines.includes(options.engine)) {
190
251
  throw { message: 'Invalid engine. Must be one of: ' + validEngines.join(', '), code: 'invalid_engine' };
191
252
  }
192
- }
193
253
 
194
- // Set default values if not provided
195
- if (!options.voice) {
196
- options.voice = 'Joanna';
197
- }
198
- if (!options.engine) {
199
- options.engine = 'standard';
200
- }
201
- if (!options.language) {
202
- options.language = 'en-US';
254
+ if (!options.voice) {
255
+ options.voice = 'Joanna';
256
+ }
257
+ if (!options.engine) {
258
+ options.engine = 'standard';
259
+ }
260
+ if (!options.language) {
261
+ options.language = 'en-US';
262
+ }
203
263
  }
204
264
 
205
265
  // check input size
@@ -214,12 +274,28 @@ class AI{
214
274
  break;
215
275
  }
216
276
  }
217
-
218
- return await utils.make_driver_method(['source'], 'puter-tts', 'aws-polly', 'synthesize', {
277
+
278
+ const driverName = provider === 'openai' ? 'openai-tts' : 'aws-polly';
279
+
280
+ return await utils.make_driver_method(['source'], 'puter-tts', driverName, 'synthesize', {
219
281
  responseType: 'blob',
220
282
  test_mode: testMode ?? false,
221
283
  transform: async (result) => {
222
- const url = await utils.blob_to_url(result);
284
+ let url;
285
+ if (typeof result === 'string') {
286
+ url = result;
287
+ } else if (result instanceof Blob) {
288
+ url = await utils.blob_to_url(result);
289
+ } else if (result instanceof ArrayBuffer) {
290
+ const blob = new Blob([result]);
291
+ url = await utils.blob_to_url(blob);
292
+ } else if (result && typeof result === 'object' && typeof result.arrayBuffer === 'function') {
293
+ const arrayBuffer = await result.arrayBuffer();
294
+ const blob = new Blob([arrayBuffer], { type: result.type || undefined });
295
+ url = await utils.blob_to_url(blob);
296
+ } else {
297
+ throw { message: 'Unexpected audio response format', code: 'invalid_audio_response' };
298
+ }
223
299
  const audio = new Audio(url);
224
300
  audio.toString = () => url;
225
301
  audio.valueOf = () => url;
@@ -228,16 +304,105 @@ class AI{
228
304
  }).call(this, options);
229
305
  }
230
306
 
307
+ speech2txt = async (...args) => {
308
+ const MAX_INPUT_SIZE = 25 * 1024 * 1024;
309
+ if ( !args || !args.length ) {
310
+ throw ({ message: 'Arguments are required', code: 'arguments_required' });
311
+ }
312
+
313
+ const normalizeSource = async (value) => {
314
+ if ( value instanceof Blob ) {
315
+ return await utils.blobToDataUri(value);
316
+ }
317
+ return value;
318
+ };
319
+
320
+ let options = {};
321
+ let testMode = false;
322
+
323
+ const primary = args[0];
324
+ if ( primary && typeof primary === 'object' && !Array.isArray(primary) && !(primary instanceof Blob) ) {
325
+ options = { ...primary };
326
+ } else {
327
+ options.file = await normalizeSource(primary);
328
+ }
329
+
330
+ if ( args[1] && typeof args[1] === 'object' && !Array.isArray(args[1]) && !(args[1] instanceof Blob) ) {
331
+ options = { ...options, ...args[1] };
332
+ } else if ( typeof args[1] === 'boolean' ) {
333
+ testMode = args[1];
334
+ }
335
+
336
+ if ( typeof args[2] === 'boolean' ) {
337
+ testMode = args[2];
338
+ }
339
+
340
+ if ( options.audio ) {
341
+ options.file = await normalizeSource(options.audio);
342
+ delete options.audio;
343
+ }
344
+
345
+ if ( options.file instanceof Blob ) {
346
+ options.file = await normalizeSource(options.file);
347
+ }
348
+
349
+ if ( !options.file ) {
350
+ throw { message: 'Audio input is required', code: 'audio_required' };
351
+ }
352
+
353
+ if ( typeof options.file === 'string' && options.file.startsWith('data:') ) {
354
+ const base64 = options.file.split(',')[1] || '';
355
+ const padding = base64.endsWith('==') ? 2 : (base64.endsWith('=') ? 1 : 0);
356
+ const byteLength = Math.floor((base64.length * 3) / 4) - padding;
357
+ if ( byteLength > MAX_INPUT_SIZE ) {
358
+ throw { message: 'Input size cannot be larger than 25 MB', code: 'input_too_large' };
359
+ }
360
+ }
361
+
362
+ const driverMethod = options.translate ? 'translate' : 'transcribe';
363
+ const driverArgs = { ...options };
364
+ delete driverArgs.translate;
365
+
366
+ const responseFormat = driverArgs.response_format;
367
+
368
+ return await utils.make_driver_method([], 'puter-speech2txt', 'openai-speech2txt', driverMethod, {
369
+ test_mode: testMode,
370
+ transform: async (result) => {
371
+ if ( responseFormat === 'text' && result && typeof result === 'object' && typeof result.text === 'string' ) {
372
+ return result.text;
373
+ }
374
+ return result;
375
+ },
376
+ }).call(this, driverArgs);
377
+ }
378
+
231
379
  // Add new methods for TTS engine management
232
380
  txt2speech = Object.assign(this.txt2speech, {
233
381
  /**
234
382
  * List available TTS engines with pricing information
235
383
  * @returns {Promise<Array>} Array of available engines
236
384
  */
237
- listEngines: async () => {
238
- return await utils.make_driver_method(['source'], 'puter-tts', 'aws-polly', 'list_engines', {
385
+ listEngines: async (options = {}) => {
386
+ let provider = 'aws-polly';
387
+ let params = {};
388
+
389
+ if (typeof options === 'string') {
390
+ provider = normalizeTTSProvider(options);
391
+ } else if (options && typeof options === 'object') {
392
+ provider = normalizeTTSProvider(options.provider) || provider;
393
+ params = { ...options };
394
+ delete params.provider;
395
+ }
396
+
397
+ if (provider === 'openai') {
398
+ params.provider = 'openai';
399
+ }
400
+
401
+ const driverName = provider === 'openai' ? 'openai-tts' : 'aws-polly';
402
+
403
+ return await utils.make_driver_method(['source'], 'puter-tts', driverName, 'list_engines', {
239
404
  responseType: 'text',
240
- }).call(this, {});
405
+ }).call(this, params);
241
406
  },
242
407
 
243
408
  /**
@@ -245,13 +410,26 @@ class AI{
245
410
  * @param {string} [engine] - Optional engine filter
246
411
  * @returns {Promise<Array>} Array of available voices
247
412
  */
248
- listVoices: async (engine) => {
249
- const params = {};
250
- if (engine) {
251
- params.engine = engine;
413
+ listVoices: async (options) => {
414
+ let provider = 'aws-polly';
415
+ let params = {};
416
+
417
+ if (typeof options === 'string') {
418
+ params.engine = options;
419
+ } else if (options && typeof options === 'object') {
420
+ provider = normalizeTTSProvider(options.provider) || provider;
421
+ params = { ...options };
422
+ delete params.provider;
252
423
  }
253
424
 
254
- return utils.make_driver_method(['source'], 'puter-tts', 'aws-polly', 'list_voices', {
425
+ if (provider === 'openai') {
426
+ params.provider = 'openai';
427
+ delete params.engine;
428
+ }
429
+
430
+ const driverName = provider === 'openai' ? 'openai-tts' : 'aws-polly';
431
+
432
+ return utils.make_driver_method(['source'], 'puter-tts', driverName, 'list_voices', {
255
433
  responseType: 'text',
256
434
  }).call(this, params);
257
435
  }
@@ -660,21 +838,155 @@ class AI{
660
838
  if (options.model === "nano-banana")
661
839
  options.model = "gemini-2.5-flash-image-preview";
662
840
 
663
- if (options.model === "gemini-2.5-flash-image-preview")
841
+ const driverHint = typeof options.driver === 'string' ? options.driver : undefined;
842
+ const providerRaw = typeof options.provider === 'string'
843
+ ? options.provider
844
+ : (typeof options.service === 'string' ? options.service : undefined);
845
+ const providerHint = typeof providerRaw === 'string' ? providerRaw.toLowerCase() : undefined;
846
+ const modelLower = typeof options.model === 'string' ? options.model.toLowerCase() : '';
847
+
848
+ const looksLikeTogetherModel =
849
+ typeof options.model === 'string' &&
850
+ (TOGETHER_IMAGE_MODEL_PREFIXES.some(prefix => modelLower.startsWith(prefix)) ||
851
+ TOGETHER_IMAGE_MODEL_KEYWORDS.some(keyword => modelLower.includes(keyword)));
852
+
853
+ if (driverHint) {
854
+ AIService = driverHint;
855
+ } else if (providerHint === 'gemini') {
856
+ AIService = "gemini-image-generation";
857
+ } else if (providerHint === 'together' || providerHint === 'together-ai') {
858
+ AIService = "together-image-generation";
859
+ } else if (options.model === "gemini-2.5-flash-image-preview") {
664
860
  AIService = "gemini-image-generation";
861
+ } else if (looksLikeTogetherModel) {
862
+ AIService = "together-image-generation";
863
+ }
665
864
  // Call the original chat.complete method
666
865
  return await utils.make_driver_method(['prompt'], 'puter-image-generation', AIService, 'generate', {
667
866
  responseType: 'blob',
668
867
  test_mode: testMode ?? false,
669
- transform: async blob => {
868
+ transform: async result => {
869
+ let url;
870
+ if ( typeof result === 'string' ) {
871
+ url = result;
872
+ } else if ( result instanceof Blob ) {
873
+ url = await utils.blob_to_url(result);
874
+ } else if ( result instanceof ArrayBuffer ) {
875
+ const blob = new Blob([result]);
876
+ url = await utils.blob_to_url(blob);
877
+ } else if ( result && typeof result === 'object' && typeof result.arrayBuffer === 'function' ) {
878
+ const arrayBuffer = await result.arrayBuffer();
879
+ const blob = new Blob([arrayBuffer], { type: result.type || undefined });
880
+ url = await utils.blob_to_url(blob);
881
+ } else {
882
+ throw { message: 'Unexpected image response format', code: 'invalid_image_response' };
883
+ }
670
884
  let img = new Image();
671
- img.src = await utils.blob_to_url(blob);
885
+ img.src = url;
672
886
  img.toString = () => img.src;
673
887
  img.valueOf = () => img.src;
674
888
  return img;
675
889
  }
676
890
  }).call(this, options);
677
891
  }
892
+
893
+ txt2vid = async (...args) => {
894
+ let options = {};
895
+ let testMode = false;
896
+
897
+ if(!args){
898
+ throw({message: 'Arguments are required', code: 'arguments_required'});
899
+ }
900
+
901
+ if (typeof args[0] === 'string') {
902
+ options = { prompt: args[0] };
903
+ }
904
+
905
+ if (typeof args[1] === 'boolean' && args[1] === true) {
906
+ testMode = true;
907
+ }
908
+
909
+ if (typeof args[0] === 'string' && typeof args[1] === "object") {
910
+ options = args[1];
911
+ options.prompt = args[0];
912
+ }
913
+
914
+ if (typeof args[0] === 'object') {
915
+ options = args[0];
916
+ }
917
+
918
+ if (!options.prompt) {
919
+ throw({message: 'Prompt parameter is required', code: 'prompt_required'});
920
+ }
921
+
922
+ if (!options.model) {
923
+ options.model = 'sora-2';
924
+ }
925
+
926
+ if (options.duration !== undefined && options.seconds === undefined) {
927
+ options.seconds = options.duration;
928
+ }
929
+
930
+ let videoService = 'openai-video-generation';
931
+ const driverHint = typeof options.driver === 'string' ? options.driver : undefined;
932
+ const driverHintLower = driverHint ? driverHint.toLowerCase() : undefined;
933
+ const providerRaw = typeof options.provider === 'string'
934
+ ? options.provider
935
+ : (typeof options.service === 'string' ? options.service : undefined);
936
+ const providerHint = typeof providerRaw === 'string' ? providerRaw.toLowerCase() : undefined;
937
+ const modelLower = typeof options.model === 'string' ? options.model.toLowerCase() : '';
938
+
939
+ const looksLikeTogetherVideoModel = typeof options.model === 'string' &&
940
+ TOGETHER_VIDEO_MODEL_PREFIXES.some(prefix => modelLower.startsWith(prefix));
941
+
942
+ if (driverHintLower === 'together' || driverHintLower === 'together-ai') {
943
+ videoService = 'together-video-generation';
944
+ } else if (driverHintLower === 'together-video-generation') {
945
+ videoService = 'together-video-generation';
946
+ } else if (driverHintLower === 'openai') {
947
+ videoService = 'openai-video-generation';
948
+ } else if (driverHint) {
949
+ videoService = driverHint;
950
+ } else if (providerHint === 'together' || providerHint === 'together-ai') {
951
+ videoService = 'together-video-generation';
952
+ } else if (looksLikeTogetherVideoModel) {
953
+ videoService = 'together-video-generation';
954
+ }
955
+
956
+ return await utils.make_driver_method(['prompt'], 'puter-video-generation', videoService, 'generate', {
957
+ responseType: 'blob',
958
+ test_mode: testMode ?? false,
959
+ transform: async result => {
960
+ let sourceUrl = null;
961
+ let mimeType = null;
962
+ if (result instanceof Blob) {
963
+ sourceUrl = await utils.blob_to_url(result);
964
+ mimeType = result.type || 'video/mp4';
965
+ } else if (typeof result === 'string') {
966
+ sourceUrl = result;
967
+ } else if (result && typeof result === 'object') {
968
+ sourceUrl = result.asset_url || result.url || result.href || null;
969
+ mimeType = result.mime_type || result.content_type || null;
970
+ }
971
+
972
+ if (!sourceUrl) {
973
+ return result;
974
+ }
975
+
976
+ const video = document.createElement('video');
977
+ video.src = sourceUrl;
978
+ video.controls = true;
979
+ video.preload = 'metadata';
980
+ if (mimeType) {
981
+ video.setAttribute('data-mime-type', mimeType);
982
+ }
983
+ video.setAttribute('data-source', sourceUrl);
984
+ video.toString = () => video.src;
985
+ video.valueOf = () => video.src;
986
+ return video;
987
+ }
988
+ }).call(this, options);
989
+ }
678
990
  }
679
991
 
680
992
  export default AI;
@@ -251,6 +251,44 @@ class Auth{
251
251
  throw error;
252
252
  }
253
253
  }
254
+
255
+ async getGlobalUsage() {
256
+ try {
257
+ const resp = await fetch(`${this.APIOrigin}/metering/globalUsage`, {
258
+ headers: {
259
+ Authorization: `Bearer ${this.authToken}`,
260
+ },
261
+ });
262
+
263
+ const result = await resp.json();
264
+
265
+ // Log the response
266
+ if ( globalThis.puter?.apiCallLogger?.isEnabled() ) {
267
+ globalThis.puter.apiCallLogger.logRequest({
268
+ service: 'auth',
269
+ operation: 'global_usage',
270
+ params: {},
271
+ result: result,
272
+ });
273
+ }
274
+
275
+ return result;
276
+ } catch( error ) {
277
+ // Log the error
278
+ if ( globalThis.puter?.apiCallLogger?.isEnabled() ) {
279
+ globalThis.puter.apiCallLogger.logRequest({
280
+ service: 'auth',
281
+ operation: 'global_usage',
282
+ params: {},
283
+ error: {
284
+ message: error.message || error.toString(),
285
+ stack: error.stack,
286
+ },
287
+ });
288
+ }
289
+ throw error;
290
+ }
291
+ }
254
292
  }
255
293
 
256
294
  export default Auth;