@gptmarket/temporal-types 0.0.21 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gptmarket/temporal-types",
3
- "version": "0.0.21",
3
+ "version": "0.0.22",
4
4
  "description": "TypeScript types and workflow registry for GPTMarket Temporal workflows",
5
5
  "main": "index.ts",
6
6
  "types": "index.ts",
package/registry.ts CHANGED
@@ -398,7 +398,7 @@ export const slideshows_pinterest: WorkflowDefinition<
398
398
 
399
399
  export const speaking_doctor: WorkflowDefinition<
400
400
  Types.DoctorInput,
401
- Types.SpeakingAvatarOutput
401
+ Types.SpeakingDoctorOutput
402
402
  > = {
403
403
  id: "speaking_doctor",
404
404
  name: "Speaking Doctor",
@@ -406,7 +406,7 @@ export const speaking_doctor: WorkflowDefinition<
406
406
  "Speaking AI Doctor - Medical professional avatar for video content.",
407
407
  workflowClass: "SpeakingDoctorWorkflow",
408
408
  inputType: "DoctorInput",
409
- outputType: "SpeakingAvatarOutput",
409
+ outputType: "SpeakingDoctorOutput",
410
410
  fields: [
411
411
  {
412
412
  name: "gender",
@@ -516,14 +516,14 @@ export const speaking_doctor: WorkflowDefinition<
516
516
 
517
517
  export const speaking_monk: WorkflowDefinition<
518
518
  Types.MonkInput,
519
- Types.SpeakingAvatarOutput
519
+ Types.SpeakingMonkOutput
520
520
  > = {
521
521
  id: "speaking_monk",
522
522
  name: "Speaking Monk",
523
523
  description: "Speaking AI Monk - Buddhist monk avatar for spiritual content.",
524
524
  workflowClass: "SpeakingMonkWorkflow",
525
525
  inputType: "MonkInput",
526
- outputType: "SpeakingAvatarOutput",
526
+ outputType: "SpeakingMonkOutput",
527
527
  fields: [
528
528
  {
529
529
  name: "gender",
@@ -619,7 +619,7 @@ export const speaking_monk: WorkflowDefinition<
619
619
 
620
620
  export const speaking_priest: WorkflowDefinition<
621
621
  Types.PriestInput,
622
- Types.SpeakingAvatarOutput
622
+ Types.SpeakingPriestOutput
623
623
  > = {
624
624
  id: "speaking_priest",
625
625
  name: "Speaking Priest",
@@ -627,7 +627,7 @@ export const speaking_priest: WorkflowDefinition<
627
627
  "Speaking AI Priest - Christian clergy avatar for religious content.",
628
628
  workflowClass: "SpeakingPriestWorkflow",
629
629
  inputType: "PriestInput",
630
- outputType: "SpeakingAvatarOutput",
630
+ outputType: "SpeakingPriestOutput",
631
631
  fields: [
632
632
  {
633
633
  name: "gender",
@@ -797,7 +797,7 @@ export type WorkflowInputMap = {
797
797
  export type WorkflowOutputMap = {
798
798
  ruby: Types.RubyOutput;
799
799
  slideshows_pinterest: Types.SlideshowsPinterestOutput;
800
- speaking_doctor: Types.SpeakingAvatarOutput;
801
- speaking_monk: Types.SpeakingAvatarOutput;
802
- speaking_priest: Types.SpeakingAvatarOutput;
800
+ speaking_doctor: Types.SpeakingDoctorOutput;
801
+ speaking_monk: Types.SpeakingMonkOutput;
802
+ speaking_priest: Types.SpeakingPriestOutput;
803
803
  };
@@ -1,8 +1,8 @@
1
1
  {
2
- "description": "Output from speaking avatar generation.",
2
+ "description": "Output from Speaking AI Doctor generation.",
3
3
  "properties": {
4
4
  "face_image_url": {
5
- "description": "URL of generated face image",
5
+ "description": "URL of generated doctor face image",
6
6
  "title": "Face Image Url",
7
7
  "type": "string"
8
8
  },
@@ -27,7 +27,7 @@
27
27
  "type": "string"
28
28
  },
29
29
  "enhanced_script": {
30
- "description": "Script enhanced with ElevenLabs emotional tags",
30
+ "description": "Script used for TTS",
31
31
  "title": "Enhanced Script",
32
32
  "type": "string"
33
33
  },
@@ -52,6 +52,6 @@
52
52
  "image_model",
53
53
  "video_model"
54
54
  ],
55
- "title": "SpeakingAvatarOutput",
55
+ "title": "SpeakingDoctorOutput",
56
56
  "type": "object"
57
57
  }
@@ -0,0 +1,57 @@
1
+ {
2
+ "description": "Output from Speaking AI Monk generation.",
3
+ "properties": {
4
+ "face_image_url": {
5
+ "description": "URL of generated monk face image",
6
+ "title": "Face Image Url",
7
+ "type": "string"
8
+ },
9
+ "video_url": {
10
+ "description": "URL of final lip-synced video",
11
+ "title": "Video Url",
12
+ "type": "string"
13
+ },
14
+ "audio_url": {
15
+ "description": "URL of generated voice audio",
16
+ "title": "Audio Url",
17
+ "type": "string"
18
+ },
19
+ "enhanced_image_prompt": {
20
+ "description": "The enhanced prompt used for face generation",
21
+ "title": "Enhanced Image Prompt",
22
+ "type": "string"
23
+ },
24
+ "enhanced_video_prompt": {
25
+ "description": "The enhanced prompt used for video generation",
26
+ "title": "Enhanced Video Prompt",
27
+ "type": "string"
28
+ },
29
+ "enhanced_script": {
30
+ "description": "Script used for TTS",
31
+ "title": "Enhanced Script",
32
+ "type": "string"
33
+ },
34
+ "image_model": {
35
+ "description": "Image model used",
36
+ "title": "Image Model",
37
+ "type": "string"
38
+ },
39
+ "video_model": {
40
+ "description": "Video model used",
41
+ "title": "Video Model",
42
+ "type": "string"
43
+ }
44
+ },
45
+ "required": [
46
+ "face_image_url",
47
+ "video_url",
48
+ "audio_url",
49
+ "enhanced_image_prompt",
50
+ "enhanced_video_prompt",
51
+ "enhanced_script",
52
+ "image_model",
53
+ "video_model"
54
+ ],
55
+ "title": "SpeakingMonkOutput",
56
+ "type": "object"
57
+ }
@@ -0,0 +1,57 @@
1
+ {
2
+ "description": "Output from Speaking AI Priest generation.",
3
+ "properties": {
4
+ "face_image_url": {
5
+ "description": "URL of generated priest face image",
6
+ "title": "Face Image Url",
7
+ "type": "string"
8
+ },
9
+ "video_url": {
10
+ "description": "URL of final lip-synced video",
11
+ "title": "Video Url",
12
+ "type": "string"
13
+ },
14
+ "audio_url": {
15
+ "description": "URL of generated voice audio",
16
+ "title": "Audio Url",
17
+ "type": "string"
18
+ },
19
+ "enhanced_image_prompt": {
20
+ "description": "The enhanced prompt used for face generation",
21
+ "title": "Enhanced Image Prompt",
22
+ "type": "string"
23
+ },
24
+ "enhanced_video_prompt": {
25
+ "description": "The enhanced prompt used for video generation",
26
+ "title": "Enhanced Video Prompt",
27
+ "type": "string"
28
+ },
29
+ "enhanced_script": {
30
+ "description": "Script used for TTS",
31
+ "title": "Enhanced Script",
32
+ "type": "string"
33
+ },
34
+ "image_model": {
35
+ "description": "Image model used",
36
+ "title": "Image Model",
37
+ "type": "string"
38
+ },
39
+ "video_model": {
40
+ "description": "Video model used",
41
+ "title": "Video Model",
42
+ "type": "string"
43
+ }
44
+ },
45
+ "required": [
46
+ "face_image_url",
47
+ "video_url",
48
+ "audio_url",
49
+ "enhanced_image_prompt",
50
+ "enhanced_video_prompt",
51
+ "enhanced_script",
52
+ "image_model",
53
+ "video_model"
54
+ ],
55
+ "title": "SpeakingPriestOutput",
56
+ "type": "object"
57
+ }
package/types.ts CHANGED
@@ -360,8 +360,46 @@ export interface SlideshowsPinterestOutput {
360
360
  total_scraped?: number;
361
361
  }
362
362
 
363
- export interface SpeakingAvatarOutput {
364
- /** URL of generated face image */
363
+ export interface SpeakingDoctorOutput {
364
+ /** URL of generated doctor face image */
365
+ face_image_url: string;
366
+ /** URL of final lip-synced video */
367
+ video_url: string;
368
+ /** URL of generated voice audio */
369
+ audio_url: string;
370
+ /** The enhanced prompt used for face generation */
371
+ enhanced_image_prompt: string;
372
+ /** The enhanced prompt used for video generation */
373
+ enhanced_video_prompt: string;
374
+ /** Script used for TTS */
375
+ enhanced_script: string;
376
+ /** Image model used */
377
+ image_model: string;
378
+ /** Video model used */
379
+ video_model: string;
380
+ }
381
+
382
+ export interface SpeakingMonkOutput {
383
+ /** URL of generated monk face image */
384
+ face_image_url: string;
385
+ /** URL of final lip-synced video */
386
+ video_url: string;
387
+ /** URL of generated voice audio */
388
+ audio_url: string;
389
+ /** The enhanced prompt used for face generation */
390
+ enhanced_image_prompt: string;
391
+ /** The enhanced prompt used for video generation */
392
+ enhanced_video_prompt: string;
393
+ /** Script used for TTS */
394
+ enhanced_script: string;
395
+ /** Image model used */
396
+ image_model: string;
397
+ /** Video model used */
398
+ video_model: string;
399
+ }
400
+
401
+ export interface SpeakingPriestOutput {
402
+ /** URL of generated priest face image */
365
403
  face_image_url: string;
366
404
  /** URL of final lip-synced video */
367
405
  video_url: string;
@@ -371,7 +409,7 @@ export interface SpeakingAvatarOutput {
371
409
  enhanced_image_prompt: string;
372
410
  /** The enhanced prompt used for video generation */
373
411
  enhanced_video_prompt: string;
374
- /** Script enhanced with ElevenLabs emotional tags */
412
+ /** Script used for TTS */
375
413
  enhanced_script: string;
376
414
  /** Image model used */
377
415
  image_model: string;