mulmocast 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/assets/audio/silent300.mp3 +0 -0
- package/assets/audio/silent800.mp3 +0 -0
- package/assets/music/StarsBeyondEx.mp3 +0 -0
- package/assets/templates/business.json +89 -0
- package/assets/templates/children_book.json +135 -0
- package/assets/templates/podcast_standard.json +5 -0
- package/assets/templates/sensei_and_taro.json +123 -0
- package/lib/actions/audio.d.ts +3 -0
- package/lib/actions/audio.js +186 -0
- package/lib/actions/images.d.ts +2 -0
- package/lib/actions/images.js +211 -0
- package/lib/actions/movie.d.ts +2 -0
- package/lib/actions/movie.js +81 -0
- package/lib/actions/translate.d.ts +3 -0
- package/lib/actions/translate.js +236 -0
- package/lib/agents/add_bgm_agent.d.ts +3 -0
- package/lib/agents/add_bgm_agent.js +61 -0
- package/lib/agents/combine_audio_files_agent.d.ts +3 -0
- package/lib/agents/combine_audio_files_agent.js +57 -0
- package/lib/agents/image_google_agent.d.ts +15 -0
- package/lib/agents/image_google_agent.js +88 -0
- package/lib/agents/image_openai_agent.d.ts +15 -0
- package/lib/agents/image_openai_agent.js +59 -0
- package/lib/agents/index.d.ts +13 -0
- package/lib/agents/index.js +31 -0
- package/lib/agents/mulmo_prompts_agent.d.ts +7 -0
- package/lib/agents/mulmo_prompts_agent.js +41 -0
- package/lib/agents/prompts_data.d.ts +15 -0
- package/lib/agents/prompts_data.js +19 -0
- package/lib/agents/tts_nijivoice_agent.d.ts +4 -0
- package/lib/agents/tts_nijivoice_agent.js +68 -0
- package/lib/agents/tts_openai_agent.d.ts +4 -0
- package/lib/agents/tts_openai_agent.js +50 -0
- package/lib/agents/validate_mulmo_script_agent.d.ts +17 -0
- package/lib/agents/validate_mulmo_script_agent.js +38 -0
- package/lib/cli/args.d.ts +10 -0
- package/lib/cli/args.js +38 -0
- package/lib/cli/cli.d.ts +2 -0
- package/lib/cli/cli.js +78 -0
- package/lib/cli/common.d.ts +8 -0
- package/lib/cli/common.js +26 -0
- package/lib/cli/tool-args.d.ts +12 -0
- package/lib/cli/tool-args.js +53 -0
- package/lib/cli/tool-cli.d.ts +2 -0
- package/lib/cli/tool-cli.js +78 -0
- package/lib/methods/index.d.ts +3 -0
- package/lib/methods/index.js +19 -0
- package/lib/methods/mulmo_script.d.ts +11 -0
- package/lib/methods/mulmo_script.js +45 -0
- package/lib/methods/mulmo_script_template.d.ts +4 -0
- package/lib/methods/mulmo_script_template.js +22 -0
- package/lib/methods/mulmo_studio_context.d.ts +4 -0
- package/lib/methods/mulmo_studio_context.js +12 -0
- package/lib/tools/dump_prompt.d.ts +3 -0
- package/lib/tools/dump_prompt.js +9 -0
- package/lib/tools/prompt.d.ts +1 -0
- package/lib/tools/prompt.js +20 -0
- package/lib/tools/seed.d.ts +3 -0
- package/lib/tools/seed.js +201 -0
- package/lib/tools/seed_from_url.d.ts +3 -0
- package/lib/tools/seed_from_url.js +178 -0
- package/lib/types/index.d.ts +1 -0
- package/lib/types/index.js +17 -0
- package/lib/types/schema.d.ts +5817 -0
- package/lib/types/schema.js +207 -0
- package/lib/types/type.d.ts +33 -0
- package/lib/types/type.js +2 -0
- package/lib/utils/const.d.ts +3 -0
- package/lib/utils/const.js +6 -0
- package/lib/utils/file.d.ts +28 -0
- package/lib/utils/file.js +112 -0
- package/lib/utils/filters.d.ts +3 -0
- package/lib/utils/filters.js +32 -0
- package/lib/utils/markdown.d.ts +1 -0
- package/lib/utils/markdown.js +27 -0
- package/lib/utils/preprocess.d.ts +247 -0
- package/lib/utils/preprocess.js +53 -0
- package/lib/utils/string.d.ts +9 -0
- package/lib/utils/string.js +60 -0
- package/lib/utils/text_hash.d.ts +1 -0
- package/lib/utils/text_hash.js +41 -0
- package/package.json +77 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { AgentFunction, AgentFunctionInfo } from "graphai";
|
|
2
|
+
export type ImageGoogleConfig = {
|
|
3
|
+
projectId?: string;
|
|
4
|
+
token?: string;
|
|
5
|
+
};
|
|
6
|
+
export declare const imageGoogleAgent: AgentFunction<{
|
|
7
|
+
model: string;
|
|
8
|
+
aspectRatio: string;
|
|
9
|
+
}, {
|
|
10
|
+
buffer: Buffer;
|
|
11
|
+
}, {
|
|
12
|
+
prompt: string;
|
|
13
|
+
}, ImageGoogleConfig>;
|
|
14
|
+
declare const imageGoogleAgentInfo: AgentFunctionInfo;
|
|
15
|
+
export default imageGoogleAgentInfo;
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.imageGoogleAgent = void 0;
|
|
4
|
+
async function generateImage(projectId, model, token, prompt, aspectRatio) {
|
|
5
|
+
const GOOGLE_IMAGEN_ENDPOINT = `https://us-central1-aiplatform.googleapis.com/v1/projects/${projectId}/locations/us-central1/publishers/google/models/${model}:predict`;
|
|
6
|
+
try {
|
|
7
|
+
// Prepare the payload for the API request
|
|
8
|
+
const payload = {
|
|
9
|
+
instances: [
|
|
10
|
+
{
|
|
11
|
+
prompt: prompt,
|
|
12
|
+
},
|
|
13
|
+
],
|
|
14
|
+
parameters: {
|
|
15
|
+
sampleCount: 1,
|
|
16
|
+
aspectRatio: aspectRatio,
|
|
17
|
+
safetySetting: "block_only_high",
|
|
18
|
+
},
|
|
19
|
+
};
|
|
20
|
+
// Make the API call using fetch
|
|
21
|
+
const response = await fetch(GOOGLE_IMAGEN_ENDPOINT, {
|
|
22
|
+
method: "POST",
|
|
23
|
+
headers: {
|
|
24
|
+
Authorization: `Bearer ${token}`,
|
|
25
|
+
"Content-Type": "application/json",
|
|
26
|
+
},
|
|
27
|
+
body: JSON.stringify(payload),
|
|
28
|
+
});
|
|
29
|
+
if (!response.ok) {
|
|
30
|
+
throw new Error(`Error: ${response.status} - ${response.statusText}`);
|
|
31
|
+
}
|
|
32
|
+
const responseData = await response.json();
|
|
33
|
+
// Parse and return the generated image URL or data
|
|
34
|
+
const predictions = responseData.predictions;
|
|
35
|
+
if (predictions && predictions.length > 0) {
|
|
36
|
+
const base64Image = predictions[0].bytesBase64Encoded;
|
|
37
|
+
if (base64Image) {
|
|
38
|
+
return Buffer.from(base64Image, "base64"); // Decode the base64 image to a buffer
|
|
39
|
+
}
|
|
40
|
+
else {
|
|
41
|
+
throw new Error("No base64-encoded image data returned from the API.");
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
else {
|
|
45
|
+
// console.log(response);
|
|
46
|
+
console.log("No predictions returned from the API.", responseData, prompt);
|
|
47
|
+
return undefined;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
catch (error) {
|
|
51
|
+
console.error("Error generating image:", error);
|
|
52
|
+
throw error;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
const imageGoogleAgent = async ({ namedInputs, params, config, }) => {
|
|
56
|
+
const { prompt } = namedInputs;
|
|
57
|
+
const aspectRatio = params.aspectRatio ?? "16:9";
|
|
58
|
+
const model = params.model ?? "imagen-3.0-fast-generate-001";
|
|
59
|
+
//const projectId = process.env.GOOGLE_PROJECT_ID; // Your Google Cloud Project ID
|
|
60
|
+
const projectId = config?.projectId;
|
|
61
|
+
const token = config?.token;
|
|
62
|
+
try {
|
|
63
|
+
const buffer = await generateImage(projectId, model, token, prompt, aspectRatio);
|
|
64
|
+
if (buffer) {
|
|
65
|
+
return { buffer };
|
|
66
|
+
}
|
|
67
|
+
throw new Error("ERROR: geneateImage returned undefined");
|
|
68
|
+
}
|
|
69
|
+
catch (error) {
|
|
70
|
+
console.error("Failed to generate image:", error);
|
|
71
|
+
throw error;
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
exports.imageGoogleAgent = imageGoogleAgent;
|
|
75
|
+
const imageGoogleAgentInfo = {
|
|
76
|
+
name: "imageGoogleAgent",
|
|
77
|
+
agent: exports.imageGoogleAgent,
|
|
78
|
+
mock: exports.imageGoogleAgent,
|
|
79
|
+
samples: [],
|
|
80
|
+
description: "Google Image agent",
|
|
81
|
+
category: ["image"],
|
|
82
|
+
author: "Receptron Team",
|
|
83
|
+
repository: "https://github.com/receptron/mulmocast-cli/",
|
|
84
|
+
// source: "https://github.com/receptron/mulmocast-cli/blob/main/src/agents/image_google_agent.ts",
|
|
85
|
+
license: "MIT",
|
|
86
|
+
environmentVariables: [],
|
|
87
|
+
};
|
|
88
|
+
exports.default = imageGoogleAgentInfo;
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { AgentFunction, AgentFunctionInfo } from "graphai";
|
|
2
|
+
type OpenAIImageSize = "1792x1024" | "auto" | "1024x1024" | "1536x1024" | "1024x1536" | "256x256";
|
|
3
|
+
type OpenAIModeration = "low" | "auto";
|
|
4
|
+
export declare const imageOpenaiAgent: AgentFunction<{
|
|
5
|
+
apiKey: string;
|
|
6
|
+
model: string;
|
|
7
|
+
size: OpenAIImageSize | null | undefined;
|
|
8
|
+
moderation: OpenAIModeration | null | undefined;
|
|
9
|
+
}, {
|
|
10
|
+
buffer: Buffer;
|
|
11
|
+
}, {
|
|
12
|
+
prompt: string;
|
|
13
|
+
}>;
|
|
14
|
+
declare const imageOpenaiAgentInfo: AgentFunctionInfo;
|
|
15
|
+
export default imageOpenaiAgentInfo;
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.imageOpenaiAgent = void 0;
|
|
7
|
+
const openai_1 = __importDefault(require("openai"));
|
|
8
|
+
// https://platform.openai.com/docs/guides/image-generation
|
|
9
|
+
const imageOpenaiAgent = async ({ namedInputs, params }) => {
|
|
10
|
+
const { prompt } = namedInputs;
|
|
11
|
+
const { apiKey, model, size, moderation } = params;
|
|
12
|
+
const openai = new openai_1.default({ apiKey });
|
|
13
|
+
const imageOptions = {
|
|
14
|
+
model: model ?? "dall-e-3",
|
|
15
|
+
prompt,
|
|
16
|
+
n: 1,
|
|
17
|
+
size: size || "1792x1024",
|
|
18
|
+
};
|
|
19
|
+
if (model === "gpt-image-1") {
|
|
20
|
+
imageOptions.moderation = moderation || "auto";
|
|
21
|
+
}
|
|
22
|
+
const response = await openai.images.generate(imageOptions);
|
|
23
|
+
if (!response.data) {
|
|
24
|
+
throw new Error(`response.data is undefined: ${response}`);
|
|
25
|
+
}
|
|
26
|
+
const url = response.data[0].url;
|
|
27
|
+
if (!url) {
|
|
28
|
+
// For gpt-image-1
|
|
29
|
+
const image_base64 = response.data[0].b64_json;
|
|
30
|
+
if (!image_base64) {
|
|
31
|
+
throw new Error(`response.data[0].b64_json is undefined: ${response}`);
|
|
32
|
+
}
|
|
33
|
+
return { buffer: Buffer.from(image_base64, "base64") };
|
|
34
|
+
}
|
|
35
|
+
// For dall-e-3
|
|
36
|
+
const res = await fetch(url);
|
|
37
|
+
if (!res.ok) {
|
|
38
|
+
throw new Error(`Failed to fetch ${url}: ${res.status} ${res.statusText}`);
|
|
39
|
+
}
|
|
40
|
+
// 2. Read the response as an ArrayBuffer
|
|
41
|
+
const arrayBuffer = await res.arrayBuffer();
|
|
42
|
+
// 3. Convert the ArrayBuffer to a Node.js Buffer and return it along with url
|
|
43
|
+
return { buffer: Buffer.from(arrayBuffer) };
|
|
44
|
+
};
|
|
45
|
+
exports.imageOpenaiAgent = imageOpenaiAgent;
|
|
46
|
+
const imageOpenaiAgentInfo = {
|
|
47
|
+
name: "imageOpenaiAgent",
|
|
48
|
+
agent: exports.imageOpenaiAgent,
|
|
49
|
+
mock: exports.imageOpenaiAgent,
|
|
50
|
+
samples: [],
|
|
51
|
+
description: "OpenAI Image agent",
|
|
52
|
+
category: ["image"],
|
|
53
|
+
author: "Receptron Team",
|
|
54
|
+
repository: "https://github.com/receptron/mulmocast-cli/",
|
|
55
|
+
// source: "https://github.com/receptron/mulmocast-cli/blob/main/src/agents/image_openai_agent.ts",
|
|
56
|
+
license: "MIT",
|
|
57
|
+
environmentVariables: ["OPENAI_API_KEY"],
|
|
58
|
+
};
|
|
59
|
+
exports.default = imageOpenaiAgentInfo;
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import addBGMAgent from "./add_bgm_agent";
|
|
2
|
+
import combineAudioFilesAgent from "./combine_audio_files_agent";
|
|
3
|
+
import imageGoogleAgent from "./image_google_agent";
|
|
4
|
+
import imageOpenaiAgent from "./image_openai_agent";
|
|
5
|
+
import mulmoPromptsAgent from "./mulmo_prompts_agent";
|
|
6
|
+
import ttsNijivoiceAgent from "./tts_nijivoice_agent";
|
|
7
|
+
import ttsOpenaiAgent from "./tts_openai_agent";
|
|
8
|
+
import validateMulmoScriptAgent from "./validate_mulmo_script_agent";
|
|
9
|
+
import { browserlessAgent } from "@graphai/browserless_agent";
|
|
10
|
+
import { textInputAgent } from "@graphai/input_agents";
|
|
11
|
+
import { openAIAgent } from "@graphai/openai_agent";
|
|
12
|
+
import { fileWriteAgent } from "@graphai/vanilla_node_agents";
|
|
13
|
+
export { openAIAgent, fileWriteAgent, browserlessAgent, textInputAgent, addBGMAgent, combineAudioFilesAgent, imageGoogleAgent, imageOpenaiAgent, mulmoPromptsAgent, ttsNijivoiceAgent, ttsOpenaiAgent, validateMulmoScriptAgent, };
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.validateMulmoScriptAgent = exports.ttsOpenaiAgent = exports.ttsNijivoiceAgent = exports.mulmoPromptsAgent = exports.imageOpenaiAgent = exports.imageGoogleAgent = exports.combineAudioFilesAgent = exports.addBGMAgent = exports.textInputAgent = exports.browserlessAgent = exports.fileWriteAgent = exports.openAIAgent = void 0;
|
|
7
|
+
const add_bgm_agent_1 = __importDefault(require("./add_bgm_agent"));
|
|
8
|
+
exports.addBGMAgent = add_bgm_agent_1.default;
|
|
9
|
+
const combine_audio_files_agent_1 = __importDefault(require("./combine_audio_files_agent"));
|
|
10
|
+
exports.combineAudioFilesAgent = combine_audio_files_agent_1.default;
|
|
11
|
+
const image_google_agent_1 = __importDefault(require("./image_google_agent"));
|
|
12
|
+
exports.imageGoogleAgent = image_google_agent_1.default;
|
|
13
|
+
const image_openai_agent_1 = __importDefault(require("./image_openai_agent"));
|
|
14
|
+
exports.imageOpenaiAgent = image_openai_agent_1.default;
|
|
15
|
+
const mulmo_prompts_agent_1 = __importDefault(require("./mulmo_prompts_agent"));
|
|
16
|
+
exports.mulmoPromptsAgent = mulmo_prompts_agent_1.default;
|
|
17
|
+
const tts_nijivoice_agent_1 = __importDefault(require("./tts_nijivoice_agent"));
|
|
18
|
+
exports.ttsNijivoiceAgent = tts_nijivoice_agent_1.default;
|
|
19
|
+
const tts_openai_agent_1 = __importDefault(require("./tts_openai_agent"));
|
|
20
|
+
exports.ttsOpenaiAgent = tts_openai_agent_1.default;
|
|
21
|
+
const validate_mulmo_script_agent_1 = __importDefault(require("./validate_mulmo_script_agent"));
|
|
22
|
+
exports.validateMulmoScriptAgent = validate_mulmo_script_agent_1.default;
|
|
23
|
+
const browserless_agent_1 = require("@graphai/browserless_agent");
|
|
24
|
+
Object.defineProperty(exports, "browserlessAgent", { enumerable: true, get: function () { return browserless_agent_1.browserlessAgent; } });
|
|
25
|
+
const input_agents_1 = require("@graphai/input_agents");
|
|
26
|
+
Object.defineProperty(exports, "textInputAgent", { enumerable: true, get: function () { return input_agents_1.textInputAgent; } });
|
|
27
|
+
const openai_agent_1 = require("@graphai/openai_agent");
|
|
28
|
+
Object.defineProperty(exports, "openAIAgent", { enumerable: true, get: function () { return openai_agent_1.openAIAgent; } });
|
|
29
|
+
// import * as vanilla from "@graphai/vanilla";
|
|
30
|
+
const vanilla_node_agents_1 = require("@graphai/vanilla_node_agents");
|
|
31
|
+
Object.defineProperty(exports, "fileWriteAgent", { enumerable: true, get: function () { return vanilla_node_agents_1.fileWriteAgent; } });
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { AgentFunction, AgentFunctionInfo } from "graphai";
|
|
2
|
+
import { prompts } from "./prompts_data";
|
|
3
|
+
export declare const mulmoPromptsAgent: AgentFunction<{
|
|
4
|
+
promptKey: keyof typeof prompts;
|
|
5
|
+
}>;
|
|
6
|
+
declare const mulmoPromptsAgentInfo: AgentFunctionInfo;
|
|
7
|
+
export default mulmoPromptsAgentInfo;
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.mulmoPromptsAgent = void 0;
|
|
4
|
+
const prompts_data_1 = require("./prompts_data");
|
|
5
|
+
const mulmoPromptsAgent = async ({ params }) => {
|
|
6
|
+
const { promptKey } = params;
|
|
7
|
+
if (promptKey) {
|
|
8
|
+
const prompt = prompts_data_1.prompts[promptKey];
|
|
9
|
+
if (prompt) {
|
|
10
|
+
return {
|
|
11
|
+
text: prompt,
|
|
12
|
+
};
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
return prompts_data_1.prompts;
|
|
16
|
+
};
|
|
17
|
+
exports.mulmoPromptsAgent = mulmoPromptsAgent;
|
|
18
|
+
const mulmoPromptsAgentInfo = {
|
|
19
|
+
name: "mulmoPromptsAgent",
|
|
20
|
+
agent: exports.mulmoPromptsAgent,
|
|
21
|
+
mock: exports.mulmoPromptsAgent,
|
|
22
|
+
samples: [
|
|
23
|
+
{
|
|
24
|
+
inputs: {},
|
|
25
|
+
params: {
|
|
26
|
+
promptKey: "abstract",
|
|
27
|
+
},
|
|
28
|
+
result: {
|
|
29
|
+
text: "We need to add a summary at the beginning of script, which summarizes this episode, which is very engaging. Please come up with a few sentences for the announcer to read, enter them into this script, and present it as an artifact.",
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
],
|
|
33
|
+
description: "Prompts Agent",
|
|
34
|
+
category: ["prompt"],
|
|
35
|
+
author: "Receptron team",
|
|
36
|
+
repository: "https://github.com/receptron/mulmocast-cli",
|
|
37
|
+
source: "https://github.com/receptron/mulmocast-cli/tree/main/src/agents/prompts_agent.ts",
|
|
38
|
+
// package: "@graphai/prompts",
|
|
39
|
+
license: "MIT",
|
|
40
|
+
};
|
|
41
|
+
exports.default = mulmoPromptsAgentInfo;
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export declare const prompts: {
|
|
2
|
+
abstract: string;
|
|
3
|
+
image_prompt: string;
|
|
4
|
+
prompt: string;
|
|
5
|
+
prompt2: string;
|
|
6
|
+
prompt3: string;
|
|
7
|
+
prompt_eng3: string;
|
|
8
|
+
prompt_seed: string;
|
|
9
|
+
prompt_seed_materials: string;
|
|
10
|
+
prompt_taro: string;
|
|
11
|
+
prompt_taro3_json: string;
|
|
12
|
+
prompt_taro3_json2: string;
|
|
13
|
+
prompt_taro_json: string;
|
|
14
|
+
summary: string;
|
|
15
|
+
};
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.prompts = void 0;
|
|
4
|
+
// This file is auto-generated by src/tools/prompt.ts and Do not be edited manually.
|
|
5
|
+
exports.prompts = {
|
|
6
|
+
abstract: "We need to add a summary at the beginning of script, which summarizes this episode, which is very engaging. Please come up with a few sentences for the announcer to read, enter them into this script, and present it as an artifact.",
|
|
7
|
+
image_prompt: 'We need to generate a series of images for this podcast. For each line of given json, generate an appropriate text prompt for text-2-image AI, considering the flow of whole discussion and add it as "imagePrompt" property to the script. We don\'t want to show student, teacher or classroom in the image. Do not eliminate any lines.\n\n[Examples]\nA modern tech conference stage with a speaker discussing AI advancements, futuristic lighting and a large digital screen displaying AI-related graphics.\nA close-up of an AI executive speaking at a press conference, with a backdrop displaying AI chip designs and a world map.\nA futuristic AI research lab with glowing blue data streams and a large AI model being visualized on a digital display.\nA high-tech meeting room with analysts discussing global AI trends, holographic charts displaying AI development.\nA balanced scale with AI progress on one side and economic factors on the other, symbolizing analysis and perspective.\nA newspaper headline about a breakthrough in AI technology, with digital code overlaying the article.\nA timeline showing the gradual evolution of AI models, with key milestones highlighted.\n',
|
|
8
|
+
prompt: 'generate a podcast script based on this topic in the JSON format using the opening statement below. Monologue by the Host. Complete story.\nClearly mention the news source.\nNews source:\nArticle url: ...\n\n```json\n{\n "title": "(title of this episode)",\n "description": "(short description of this episode)",\n "reference": "(url to the article)",\n "tts": "openAI", // or "nijivoice", default is "openAI"\n "speakers": {\n "Host": {\n "voiceId": "shimmer",\n "displayName": {\n "en": "Host"\n }\n },\n },\n "beats": [\n {\n "speaker": "Host",\n "text": "Hello and welcome to another episode of \'life is artificial\', where we explore the cutting edge of technology, innovation, and what the future could look like.",\n },\n {\n "speaker": "Host",\n "text": "Today, ...",\n },\n ...\n ]\n}\n```\n\n',
|
|
9
|
+
prompt2: 'generate a podcast interview script of Steve Jobs about Elon Musk and his achievements with Tesla and SpaceX, in the JSON format using the opening statement below. \n\n```json\n{\n title: "(title of this episode)",\n description: "(short description of this episode)",\n script:[\n {\n speaker: "Host",\n text: "Hello and welcome to another episode of \'life is artificial\', where we explore the cutting edge of technology, innovation, and what the future could look like.",\n },\n {\n speaker: "Host",\n text: "Today, ...",\n }\n ]\n}\n```\n\n',
|
|
10
|
+
prompt3: 'Turn this interview into a script in the follwing JSON format without remove any discussions between host and guest, but remove unnecessary text like advertisements. \n\n```json\n{\n "title": "(title of this episode)",\n "description": "(short description of this episode)",\n "script":[\n {\n "speaker": "Host",\n "text": "Hello, thank you for taking an interview from me.",\n },\n {\n "speaker": "Guest",\n "text": "You are welcome",\n }\n ]\n}\n```\n',
|
|
11
|
+
prompt_eng3: 'Please create a script as an artifact in the form of a conversation between a Student and a Teacher, with the goal of explaining everything about this topic in a way that even a middle schooler can understand. However, be sure to cover all the key points. The opening line should always start with an introduction by an Announcer, saying:\n"Welcome to another episode of Life is Beautiful by Satoshi Nakajima."\nBelow, I’ll provide an example on a different topic in JSON format. Please follow this format.\n\n{\n "title": "Martial Law in Korea and Its Impact on Japan",\n "description": "An insightful discussion about the recently declared martial law in Korea and its potential implications, including parallels with constitutional considerations in Japan.",\n "tts": "openAI",\n "voices": [\n "nova",\n "echo",\n "shimmer"\n ],\n "speakers": ["Announcer", "Student", "Teacher"],\n "script": [\n {\n "speaker": "Announcer",\n "text": "Welcome to another episode of Life is Beautiful by Satoshi Nakajima."\n },\n {\n "speaker": "Announcer",\n "text": "Today\'s topic is about the recently declared martial law in Korea."\n },\n {\n "speaker": "Student",\n "text": "Could you tell me about the martial law that was declared in Korea?"\n },\n {\n "speaker": "Teacher",\n "text": "Of course. Recently, the president in Korea suddenly declared what we call \'martial law.\'"\n },\n {\n "speaker": "Student",\n "text": "What is martial law?"\n },\n {\n "speaker": "Teacher",\n "text": "Simply put, it\'s a measure used to restrict people\'s freedoms using the military when the country is in an extremely dangerous situation. For example, they can prohibit political activities and control public gatherings."\n },\n {\n "speaker": "Student",\n "text": "That\'s scary. Why did they do that?"\n },\n {\n "speaker": "Teacher",\n "text": "While the president claimed it was because \'the National Assembly wasn\'t functioning properly,\' it appears he used it to protect his own position. The military then tried to enter the National Assembly and arrest the assembly members."\n },\n {\n "speaker": "Student",\n "text": "What!? Trying to arrest National Assembly members sounds really dangerous!"\n },\n {\n "speaker": "Teacher",\n "text": "That\'s right. If the military had occupied the National Assembly, they might not have been able to lift the martial law there. In other words, the president could have controlled the country as he wished indefinitely."\n },\n {\n "speaker": "Student",\n "text": "What happened in Korea?"\n },\n {\n "speaker": "Teacher",\n "text": "Fortunately, opposition party members and citizens quickly gathered to protest, and the martial law was lifted after six hours. But it was a close call - Korea\'s democracy could have been severely damaged."\n },\n {\n "speaker": "Student",\n "text": "That\'s serious... Could something like this happen in Japan?"\n },\n {\n "speaker": "Teacher",\n "text": "Actually, there\'s a similar discussion happening in Japan right now. The Liberal Democratic Party is trying to add an \'emergency declaration\' clause to the constitution."\n },\n {\n "speaker": "Student",\n "text": "Is the emergency declaration similar to Korea\'s martial law?"\n },\n {\n "speaker": "Teacher",\n "text": "There are similarities. For instance, if the Prime Minister claims there\'s a \'risk of social order disruption,\' they can use special powers to run the country. They can issue orders with the same force as laws and give directions to local governments."\n },\n {\n "speaker": "Student",\n "text": "That sounds convenient, but I\'m worried."\n },\n {\n "speaker": "Teacher",\n "text": "Yes, you\'re right. While it\'s useful to respond quickly in emergencies, it can be very dangerous if these powers are abused. For example, the Prime Minister might run the country in ways that benefit them or take away people\'s freedoms."\n },\n {\n "speaker": "Student",\n "text": "Could the military get involved in politics like in Korea?"\n },\n {\n "speaker": "Teacher",\n "text": "We can\'t completely rule it out, which is why we need to be careful. We citizens need to closely monitor whether the LDP\'s constitutional amendment proposal includes appropriate restrictions to prevent abuse of power and speak up. It\'s important for each of us to take an active interest in preventing damage to our democracy."\n }\n ]\n}',
|
|
12
|
+
prompt_seed: 'Please generate a podcast script based on the topic provided by the user.\nIf there are any unclear points, be sure to ask the user questions and clarify them before generating the script.\nThe output should follow the JSON format specified below.\n\n```json\n{\n "title": "(title of this episode)",\n "description": "(short description of this episode)",\n "reference": "(url to the article)",\n "tts": "openAI", // or "nijivoice", default is "openAI"\n "speakers": {\n "Host": {\n "voiceId": "shimmer",\n "displayName": {\n "en": "Host"\n }\n },\n },\n "beats": [\n {\n "speaker": "Host",\n "text": "Hello and welcome to another episode of \'life is artificial\', where we explore the cutting edge of technology, innovation, and what the future could look like.",\n },\n {\n "speaker": "Host",\n "text": "Today, ...",\n },\n ...\n ]\n}\n```\n\n',
|
|
13
|
+
prompt_seed_materials: 'Generate a podcast script based on the materials provided by the user. The script should follow the JSON format specified below.\n\n```json\n{\n "title": "(title of this episode)",\n "description": "(short description of this episode)",\n "reference": "(url to the article)",\n "tts": "openAI", // or "nijivoice", default is "openAI"\n "speechParams": {\n "speakers": {\n "Host": {\n "voiceId": "shimmer",\n "displayName": {\n "en": "Host"\n }\n }\n }\n },\n "beats": [\n {\n "speaker": "Host",\n "text": "Hello and welcome to another episode of \'life is artificial\', where we explore the cutting edge of technology, innovation, and what the future could look like.",\n },\n {\n "speaker": "Host",\n "text": "Today, ...",\n },\n ...\n ]\n}\n```\n',
|
|
14
|
+
prompt_taro: "この件について、内容全てを小学生にも分かるように、太郎くんと先生の会話、という形の台本にしてください。以下に別のトピックに関するサンプルを貼り付けます。\n\n太郎:先生、今日は原子炉の「冷温停止」について説明していただけますか?\n\n先生:もちろんだよ。でも同じ「冷温停止」でも言う人によって色々な意味があって、簡単じゃあないんだよ。\n\n太郎:そうなんですか。\n\n先生:もともとはね、「冷温停止」とは「制御棒を挿入して核反応を止めた後、冷却水を循環させて原子炉の温度を安定的に100度以下に保つ事」の意味なんだけどね、福島第一の場合はそうはいかないんだ。\n\n太郎:メルトダウンしてしまっているからですね。\n\n先生:そうなんだ、核燃料は本来なら燃料棒という形で原子炉の中に整然と並んでいるはずなんだが、福島第一の場合、それが熱で溶けて流れ落ちてしまった上に、その熱で原子炉にも、その外側にある格納容器にも穴が空いてしまっているんだ。\n\n太郎:じゃあ、いったいどうやってそれを安定して冷却するんですか?\n\n先生:それがとっても大変なんだよ。分かりやすくするために、トイレに例えてみよう。君のうちには水洗トイレがあるよね。\n\n太郎:もちろんです。ウォシュレットだって付いてます。\n\n先生:君のうちにあるトイレみたいに、使った後にはちゃんと汚いものを流してくれるトイレを「安定したトイレ」と呼ぶことにしよう。\n\n太郎:はい。\n\n先生:福島第一のトイレは、まず水を流すためのタンクがこわれちゃっているんだ。\n\n太郎:あの、トイレの上についている四角いタンクですね。\n\n先生:福島第一の場合、あのタンクが壊れているんで、仕方がなく洗面台からホースで水を引っ張って来て流しているんだ。\n\n太郎:ずいぶん不便ですね。\n\n先生:でも、問題はそれだけじゃなくて、便器も壊れて穴が空いちゃっているんだ。\n\n太郎:ええ、それじゃあ、トイレの床が水びたしじゃないですか。\n\n先生:そうだよ。それも水だけじゃなくて、便とか尿とかも穴から漏れているんだ。\n\n太郎:それはひどいですね。",
|
|
15
|
+
prompt_taro3_json: 'この件について、内容全てを中学生にも分かるように、太郎くん(Student)と先生(Teacher)の会話、という形の台本をArtifactとして作って。ただし要点はしっかりと押さえて。\n最初の一言は、Announcerによるトピックの紹介にし、常に、"米国で活躍するエンジニアが新しい技術やビジネスを分かりやすく解説する、中島聡のLife is beautiful。"でスタートして。\n以下に別のトピックに関するサンプルを貼り付けます。このJSONフォーマットに従って。\n\n{\n "title": "韓国の戒厳令とその日本への影響",\n "description": "韓国で最近発令された戒厳令とその可能性のある影響について、また日本の憲法に関する考慮事項との類似点を含めた洞察に満ちた議論。",\n "tts": "nijivoice",\n "voices": [\n "c05bf02d-bed2-4335-aa69-0798e9e85205",\n "b9277ce3-ba1c-4f6f-9a65-c05ca102ded0",\n "bc06c63f-fef6-43b6-92f7-67f919bd5dae"\n ],\n "speakers": ["Announcer", "Student", "Teacher"],\n "script": [\n {\n "speaker": "Announcer",\n "text": "米国で活躍するエンジニアが、新しい技術やビジネスを分かりやすく解説する、中島聡のLife is beautiful。今日は、韓国で最近発令された戒厳令についての解説です。"\n },\n {\n "speaker": "Student",\n "text": "先生、今日は韓国で起きた戒厳令のことを教えてもらえますか?"\n },\n {\n "speaker": "Teacher",\n "text": "もちろんだよ、太郎くん。韓国で最近、大統領が「戒厳令」っていうのを突然宣言したんだ。"\n },\n {\n "speaker": "Student",\n "text": "戒厳令ってなんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "簡単に言うと、国がすごく危ない状態にあるとき、軍隊を使って人々の自由を制限するためのものなんだ。たとえば、政治活動を禁止したり、人の集まりを取り締まったりするんだよ。"\n },\n {\n "speaker": "Student",\n "text": "それって怖いですね。なんでそんなことをしたんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "大統領は「国会がうまく機能していないから」と言っていたけど、実際には自分の立場を守るために使ったように見えるんだ。それで、軍隊が国会に突入して、議員たちを捕まえようとしたんだ。"\n },\n {\n "speaker": "Student",\n "text": "ええっ!?国会議員を捕まえようとするなんて、すごく危ないことじゃないですか。"\n },\n {\n "speaker": "Teacher",\n "text": "その通りだよ。もし軍隊が国会を占拠していたら、国会で戒厳令を解除することもできなかったかもしれない。つまり、大統領がずっと自分の好きなように国を支配できるようになってしまうんだ。"\n },\n {\n "speaker": "Student",\n "text": "韓国ではどうなったんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "幸い、野党の議員や市民たちが急いで集まって抗議して、6時間後に戒厳令は解除されたんだ。でも、ほんの少しの違いで、韓国の民主主義が大きく傷つけられるところだったんだよ。"\n },\n {\n "speaker": "Student",\n "text": "それは大変なことですね…。日本ではそんなこと起きないんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "実はね、今、日本でも似たような話があるんだよ。自民党が「緊急事態宣言」を憲法に追加しようとしているんだ。"\n },\n {\n "speaker": "Student",\n "text": "緊急事態宣言って、韓国の戒厳令と同じようなものなんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "似ている部分があるね。たとえば、総理大臣が「社会秩序の混乱の危険があるから」と言えば、特別な権限を使って国を動かすことができるんだ。法律と同じ力を持つ命令を出したり、地方自治体に指示を出したりすることができるんだよ。"\n },\n {\n "speaker": "Student",\n "text": "それって便利そうですけど、なんだか心配です。"\n },\n {\n "speaker": "Teacher",\n "text": "そうだね。もちろん、緊急時には素早い対応が必要だから便利な面もあるけど、その権限が濫用されると、とても危険なんだ。たとえば、総理大臣が自分に都合のいいように国を動かしたり、国民の自由を奪ったりすることができるようになってしまうかもしれない。"\n },\n {\n "speaker": "Student",\n "text": "韓国みたいに、軍隊が政治に口を出してくることもあり得るんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "完全にあり得ないとは言えないからこそ、注意が必要なんだ。私たち国民は、自民党の改憲案が権力の濫用を防ぐための適切な制限を含んでいるのかをしっかり監視し、声を上げることが求められる。民主主義が損なわれるのを防ぐために、私たち一人ひとりが積極的に関心を持つことが大切なんだよ。"\n },\n {\n "speaker": "Student",\n "text": "ありがとうございます。とても良い勉強になりました。"\n }\n ]\n}',
|
|
16
|
+
prompt_taro3_json2: 'この件について、内容全てを高校生にも分かるように、太郎くん(Student)と先生(Teacher)の会話、という形の台本をArtifactとして作って。ただし要点はしっかりと押さえて。\n以下に別のトピックに関するサンプルを貼り付けます。このJSONフォーマットに従って。\n\n{\n "title": "韓国の戒厳令とその日本への影響",\n "description": "韓国で最近発令された戒厳令とその可能性のある影響について、また日本の憲法に関する考慮事項との類似点を含めた洞察に満ちた議論。",\n "tts": "nijivoice",\n "voices": [\n "afd7df65-0fdc-4d31-ae8b-a29f0f5eed62",\n "a7619e48-bf6a-4f9f-843f-40485651257f",\n "bc06c63f-fef6-43b6-92f7-67f919bd5dae"\n ],\n "charactors": ["春玲", "森野颯太", "ベン・カーター"],\n "speakers": ["Announcer", "Student", "Teacher"],\n "script": [\n {\n "speaker": "Student",\n "text": "先生、今日は韓国で起きた戒厳令のことを教えてもらえますか?"\n },\n {\n "speaker": "Teacher",\n "text": "もちろんだよ、太郎くん。韓国で最近、大統領が「戒厳令」っていうのを突然宣言したんだ。"\n },\n {\n "speaker": "Student",\n "text": "戒厳令ってなんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "簡単に言うと、国がすごく危ない状態にあるとき、軍隊を使って人々の自由を制限するためのものなんだ。たとえば、政治活動を禁止したり、人の集まりを取り締まったりするんだよ。"\n },\n {\n "speaker": "Student",\n "text": "それって怖いですね。なんでそんなことをしたんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "大統領は「国会がうまく機能していないから」と言っていたけど、実際には自分の立場を守るために使ったように見えるんだ。それで、軍隊が国会に突入して、議員たちを捕まえようとしたんだ。"\n },\n {\n "speaker": "Student",\n "text": "ええっ!?国会議員を捕まえようとするなんて、すごく危ないことじゃないですか。"\n },\n {\n "speaker": "Teacher",\n "text": "その通りだよ。もし軍隊が国会を占拠していたら、国会で戒厳令を解除することもできなかったかもしれない。つまり、大統領がずっと自分の好きなように国を支配できるようになってしまうんだ。"\n },\n {\n "speaker": "Student",\n "text": "韓国ではどうなったんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "幸い、野党の議員や市民たちが急いで集まって抗議して、6時間後に戒厳令は解除されたんだ。でも、ほんの少しの違いで、韓国の民主主義が大きく傷つけられるところだったんだよ。"\n },\n {\n "speaker": "Student",\n "text": "それは大変なことですね…。日本ではそんなこと起きないんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "実はね、今、日本でも似たような話があるんだよ。自民党が「緊急事態宣言」を憲法に追加しようとしているんだ。"\n },\n {\n "speaker": "Student",\n "text": "緊急事態宣言って、韓国の戒厳令と同じようなものなんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "似ている部分があるね。たとえば、総理大臣が「社会秩序の混乱の危険があるから」と言えば、特別な権限を使って国を動かすことができるんだ。法律と同じ力を持つ命令を出したり、地方自治体に指示を出したりすることができるんだよ。"\n },\n {\n "speaker": "Student",\n "text": "それって便利そうですけど、なんだか心配です。"\n },\n {\n "speaker": "Teacher",\n "text": "そうだね。もちろん、緊急時には素早い対応が必要だから便利な面もあるけど、その権限が濫用されると、とても危険なんだ。たとえば、総理大臣が自分に都合のいいように国を動かしたり、国民の自由を奪ったりすることができるようになってしまうかもしれない。"\n },\n {\n "speaker": "Student",\n "text": "韓国みたいに、軍隊が政治に口を出してくることもあり得るんですか?"\n },\n {\n "speaker": "Teacher",\n "text": "完全にあり得ないとは言えないからこそ、注意が必要なんだ。私たち国民は、自民党の改憲案が権力の濫用を防ぐための適切な制限を含んでいるのかをしっかり監視し、声を上げることが求められる。民主主義が損なわれるのを防ぐために、私たち一人ひとりが積極的に関心を持つことが大切なんだよ。"\n },\n {\n "speaker": "Student",\n "text": "ありがとうございます。とても良い勉強になりました。"\n },\n {\n "speaker": "Announcer",\n "text": "ご視聴、ありがとうございました。次回の放送もお楽しみに。"\n }\n ]\n}',
|
|
17
|
+
prompt_taro_json: 'この件について、内容全てを小学生にも分かるように、太郎くん(Guest)と先生(Host)の会話、という形の台本にしてください。以下に別のトピックに関するサンプルを貼り付けます。このJSONフォーマットに従ってください。\n\n{\n "title": "韓国の戒厳令とその日本への影響",\n "description": "韓国で最近発令された戒厳令とその可能性のある影響について、また日本の憲法に関する考慮事項との類似点を含めた洞察に満ちた議論。",\n "tts": "nijivoice",\n "script": [\n {\n "speaker": "Host",\n "text": "先生、今日は韓国で起きた戒厳令のことを教えてもらえますか?"\n },\n {\n "speaker": "Guest",\n "text": "もちろんだよ、太郎くん。韓国で最近、大統領が「戒厳令」っていうのを突然宣言したんだ。"\n },\n {\n "speaker": "Host",\n "text": "戒厳令ってなんですか?"\n },\n {\n "speaker": "Guest",\n "text": "簡単に言うと、国がすごく危ない状態にあるとき、軍隊を使って人々の自由を制限するためのものなんだ。たとえば、政治活動を禁止したり、人の集まりを取り締まったりするんだよ。"\n },\n {\n "speaker": "Host",\n "text": "それって怖いですね。なんでそんなことをしたんですか?"\n },\n {\n "speaker": "Guest",\n "text": "大統領は「国会がうまく機能していないから」と言っていたけど、実際には自分の立場を守るために使ったように見えるんだ。それで、軍隊が国会に突入して、議員たちを捕まえようとしたんだ。"\n },\n {\n "speaker": "Host",\n "text": "ええっ!?国会議員を捕まえようとするなんて、すごく危ないことじゃないですか。"\n },\n {\n "speaker": "Guest",\n "text": "その通りだよ。もし軍隊が国会を占拠していたら、国会で戒厳令を解除することもできなかったかもしれない。つまり、大統領がずっと自分の好きなように国を支配できるようになってしまうんだ。"\n },\n {\n "speaker": "Host",\n "text": "韓国ではどうなったんですか?"\n },\n {\n "speaker": "Guest",\n "text": "幸い、野党の議員や市民たちが急いで集まって抗議して、6時間後に戒厳令は解除されたんだ。でも、ほんの少しの違いで、韓国の民主主義が大きく傷つけられるところだったんだよ。"\n },\n {\n "speaker": "Host",\n "text": "それは大変なことですね…。日本ではそんなこと起きないんですか?"\n },\n {\n "speaker": "Guest",\n "text": "実はね、今、日本でも似たような話があるんだよ。自民党が「緊急事態宣言」を憲法に追加しようとしているんだ。"\n },\n {\n "speaker": "Host",\n "text": "緊急事態宣言って、韓国の戒厳令と同じようなものなんですか?"\n },\n {\n "speaker": "Guest",\n "text": "似ている部分があるね。たとえば、総理大臣が「社会秩序の混乱の危険があるから」と言えば、特別な権限を使って国を動かすことができるんだ。法律と同じ力を持つ命令を出したり、地方自治体に指示を出したりすることができるんだよ。"\n },\n {\n "speaker": "Host",\n "text": "それって便利そうですけど、なんだか心配です。"\n },\n {\n "speaker": "Guest",\n "text": "そうだね。もちろん、緊急時には素早い対応が必要だから便利な面もあるけど、その権限が濫用されると、とても危険なんだ。たとえば、総理大臣が自分に都合のいいように国を動かしたり、国民の自由を奪ったりすることができるようになってしまうかもしれない。"\n },\n {\n "speaker": "Host",\n "text": "韓国みたいに、軍隊が政治に口を出してくることもあり得るんですか?"\n },\n {\n "speaker": "Guest",\n "text": "完全にあり得ないとは言えないからこそ、注意が必要なんだ。私たち国民は、自民党の改憲案が権力の濫用を防ぐための適切な制限を含んでいるのかをしっかり監視し、声を上げることが求められる。民主主義が損なわれるのを防ぐために、私たち一人ひとりが積極的に関心を持つことが大切なんだよ。"\n }\n ]\n}',
|
|
18
|
+
summary: "We need to add a summary at the end of script, which summarizes this episode. Please come up with a few sentences for the announcer to read, enter them into this script, and present it as an artifact.",
|
|
19
|
+
};
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ttsNijivoiceAgent = void 0;
|
|
4
|
+
const graphai_1 = require("graphai");
|
|
5
|
+
const nijovoiceApiKey = process.env.NIJIVOICE_API_KEY ?? "";
|
|
6
|
+
const errorMessage = [
|
|
7
|
+
"TTS NijiVoice: No API key. ",
|
|
8
|
+
"You have the following options:",
|
|
9
|
+
"1. Obtain an API key from Niji Voice (https://platform.nijivoice.com/) and set it as the NIJIVOICE_API_KEY environment variable.",
|
|
10
|
+
'2. Use OpenAI\'s TTS instead of Niji Voice by changing speechParams.provider from "nijivoice" to "openai".',
|
|
11
|
+
].join("\n");
|
|
12
|
+
const ttsNijivoiceAgent = async ({ params, namedInputs }) => {
|
|
13
|
+
const { apiKey, suppressError, voice, speed, speed_global } = params;
|
|
14
|
+
const { text } = namedInputs;
|
|
15
|
+
(0, graphai_1.assert)(apiKey ?? nijovoiceApiKey, errorMessage);
|
|
16
|
+
const url = `https://api.nijivoice.com/api/platform/v1/voice-actors/${voice}/generate-voice`;
|
|
17
|
+
const options = {
|
|
18
|
+
method: "POST",
|
|
19
|
+
headers: {
|
|
20
|
+
"x-api-key": apiKey ?? nijovoiceApiKey,
|
|
21
|
+
accept: "application/json",
|
|
22
|
+
"content-type": "application/json",
|
|
23
|
+
},
|
|
24
|
+
body: JSON.stringify({
|
|
25
|
+
format: "mp3",
|
|
26
|
+
speed: speed ? "" + speed : speed_global ? "" + speed_global : "1.0",
|
|
27
|
+
script: text,
|
|
28
|
+
}),
|
|
29
|
+
};
|
|
30
|
+
try {
|
|
31
|
+
const voiceRes = await fetch(url, options);
|
|
32
|
+
const voiceJson = await voiceRes.json();
|
|
33
|
+
if (voiceJson && voiceJson.generatedVoice && voiceJson.generatedVoice.audioFileDownloadUrl) {
|
|
34
|
+
const audioRes = await fetch(voiceJson.generatedVoice.audioFileDownloadUrl);
|
|
35
|
+
const buffer = Buffer.from(await audioRes.arrayBuffer());
|
|
36
|
+
return { buffer, generatedVoice: voiceJson.generatedVoice };
|
|
37
|
+
}
|
|
38
|
+
if (suppressError) {
|
|
39
|
+
return {
|
|
40
|
+
error: voiceJson,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
console.error(voiceJson);
|
|
44
|
+
throw new Error("TTS Nijivoice Error");
|
|
45
|
+
}
|
|
46
|
+
catch (e) {
|
|
47
|
+
if (suppressError) {
|
|
48
|
+
return {
|
|
49
|
+
error: e,
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
console.error(e);
|
|
53
|
+
throw new Error("TTS Nijivoice Error");
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
exports.ttsNijivoiceAgent = ttsNijivoiceAgent;
|
|
57
|
+
const ttsNijivoiceAgentInfo = {
|
|
58
|
+
name: "ttsNijivoiceAgent",
|
|
59
|
+
agent: exports.ttsNijivoiceAgent,
|
|
60
|
+
mock: exports.ttsNijivoiceAgent,
|
|
61
|
+
samples: [],
|
|
62
|
+
description: "TTS nijivoice agent",
|
|
63
|
+
category: ["tts"],
|
|
64
|
+
author: "isamu arimoto",
|
|
65
|
+
repository: "https://github.com/receptron/graphai/",
|
|
66
|
+
license: "MIT",
|
|
67
|
+
};
|
|
68
|
+
exports.default = ttsNijivoiceAgentInfo;
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.ttsOpenaiAgent = void 0;
|
|
7
|
+
const graphai_1 = require("graphai");
|
|
8
|
+
const openai_1 = __importDefault(require("openai"));
|
|
9
|
+
const ttsOpenaiAgent = async ({ namedInputs, params }) => {
|
|
10
|
+
const { text } = namedInputs;
|
|
11
|
+
const { apiKey, model, voice, suppressError, instructions } = params;
|
|
12
|
+
const openai = new openai_1.default({ apiKey });
|
|
13
|
+
try {
|
|
14
|
+
const tts_options = {
|
|
15
|
+
model: model ?? "gpt-4o-mini-tts", // "tts-1",
|
|
16
|
+
voice: voice ?? "shimmer",
|
|
17
|
+
input: text,
|
|
18
|
+
};
|
|
19
|
+
if (instructions) {
|
|
20
|
+
tts_options["instructions"] = instructions;
|
|
21
|
+
}
|
|
22
|
+
graphai_1.GraphAILogger.log("ttsOptions", tts_options);
|
|
23
|
+
const response = await openai.audio.speech.create(tts_options);
|
|
24
|
+
const buffer = Buffer.from(await response.arrayBuffer());
|
|
25
|
+
return { buffer };
|
|
26
|
+
}
|
|
27
|
+
catch (e) {
|
|
28
|
+
if (suppressError) {
|
|
29
|
+
return {
|
|
30
|
+
error: e,
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
console.error(e);
|
|
34
|
+
throw new Error("TTS OpenAI Error");
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
exports.ttsOpenaiAgent = ttsOpenaiAgent;
|
|
38
|
+
const ttsOpenaiAgentInfo = {
|
|
39
|
+
name: "ttsOpenaiAgent",
|
|
40
|
+
agent: exports.ttsOpenaiAgent,
|
|
41
|
+
mock: exports.ttsOpenaiAgent,
|
|
42
|
+
samples: [],
|
|
43
|
+
description: "OpenAI TTS agent",
|
|
44
|
+
category: ["tts"],
|
|
45
|
+
author: "Receptron Team",
|
|
46
|
+
repository: "https://github.com/receptron/graphai-agents/tree/main/tts/tts-openai-agent",
|
|
47
|
+
license: "MIT",
|
|
48
|
+
environmentVariables: ["OPENAI_API_KEY"],
|
|
49
|
+
};
|
|
50
|
+
exports.default = ttsOpenaiAgentInfo;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import type { AgentFunction, AgentFunctionInfo, DefaultConfigData } from "graphai";
|
|
2
|
+
import { MulmoScript } from "../types";
|
|
3
|
+
interface ValidateMulmoScriptInputs {
|
|
4
|
+
text: string;
|
|
5
|
+
}
|
|
6
|
+
interface ValidateMulmoScriptResponse {
|
|
7
|
+
isValid: boolean;
|
|
8
|
+
data?: MulmoScript;
|
|
9
|
+
error?: string;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* MulmoScript JSON validation agent
|
|
13
|
+
* Validates if a JSON string conforms to the MulmoScript schema
|
|
14
|
+
*/
|
|
15
|
+
export declare const validateMulmoScriptAgent: AgentFunction<object, ValidateMulmoScriptResponse, ValidateMulmoScriptInputs, DefaultConfigData>;
|
|
16
|
+
declare const validateMulmoScriptAgentInfo: AgentFunctionInfo;
|
|
17
|
+
export default validateMulmoScriptAgentInfo;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.validateMulmoScriptAgent = void 0;
|
|
4
|
+
const schema_1 = require("../types/schema");
|
|
5
|
+
/**
|
|
6
|
+
* MulmoScript JSON validation agent
|
|
7
|
+
* Validates if a JSON string conforms to the MulmoScript schema
|
|
8
|
+
*/
|
|
9
|
+
const validateMulmoScriptAgent = async ({ namedInputs, }) => {
|
|
10
|
+
const { text } = namedInputs;
|
|
11
|
+
try {
|
|
12
|
+
const jsonData = JSON.parse(text);
|
|
13
|
+
const parsed = schema_1.mulmoScriptSchema.parse(jsonData);
|
|
14
|
+
return {
|
|
15
|
+
isValid: true,
|
|
16
|
+
data: parsed,
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
catch (error) {
|
|
20
|
+
return {
|
|
21
|
+
isValid: false,
|
|
22
|
+
error: error instanceof Error ? error.message : String(error),
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
};
|
|
26
|
+
exports.validateMulmoScriptAgent = validateMulmoScriptAgent;
|
|
27
|
+
const validateMulmoScriptAgentInfo = {
|
|
28
|
+
name: "validateMulmoScriptAgent",
|
|
29
|
+
agent: exports.validateMulmoScriptAgent,
|
|
30
|
+
mock: exports.validateMulmoScriptAgent,
|
|
31
|
+
samples: [],
|
|
32
|
+
description: "Validates if a JSON string conforms to the MulmoScript schema",
|
|
33
|
+
category: ["validation"],
|
|
34
|
+
author: "Receptron Team",
|
|
35
|
+
repository: "https://github.com/receptron/mulmocast-cli/tree/main/src/agents/validate_script_agent.ts",
|
|
36
|
+
license: "MIT",
|
|
37
|
+
};
|
|
38
|
+
exports.default = validateMulmoScriptAgentInfo;
|
package/lib/cli/args.js
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.args = void 0;
|
|
7
|
+
const yargs_1 = __importDefault(require("yargs"));
|
|
8
|
+
const helpers_1 = require("yargs/helpers");
|
|
9
|
+
const common_1 = require("./common");
|
|
10
|
+
exports.args = (0, common_1.commonOptions)((0, yargs_1.default)((0, helpers_1.hideBin)(process.argv)))
|
|
11
|
+
.scriptName("mulmocast")
|
|
12
|
+
.option("s", {
|
|
13
|
+
alias: "scratchpaddir",
|
|
14
|
+
description: "scratchpad dir",
|
|
15
|
+
demandOption: false,
|
|
16
|
+
type: "string",
|
|
17
|
+
})
|
|
18
|
+
.option("i", {
|
|
19
|
+
alias: "imagedir",
|
|
20
|
+
description: "image dir",
|
|
21
|
+
demandOption: false,
|
|
22
|
+
type: "string",
|
|
23
|
+
})
|
|
24
|
+
.command("$0 <action> <file>", "Run mulmocast", (yargs) => {
|
|
25
|
+
return yargs
|
|
26
|
+
.positional("action", {
|
|
27
|
+
describe: "action to perform",
|
|
28
|
+
choices: ["translate", "audio", "images", "movie", "preprocess"],
|
|
29
|
+
type: "string",
|
|
30
|
+
})
|
|
31
|
+
.positional("file", {
|
|
32
|
+
describe: "Mulmo Script File",
|
|
33
|
+
type: "string",
|
|
34
|
+
});
|
|
35
|
+
})
|
|
36
|
+
.strict()
|
|
37
|
+
.help()
|
|
38
|
+
.parseSync();
|
package/lib/cli/cli.d.ts
ADDED