@humeai/voice-embed-react 0.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +94 -0
- package/dist/index.d.mts +11 -0
- package/dist/index.d.ts +11 -0
- package/dist/index.js +85 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +62 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +53 -0
package/README.md
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
<img src="https://storage.googleapis.com/hume-public-logos/hume/hume-banner.png">
|
|
3
|
+
<h1>Hume AI Voice Embed</h1>
|
|
4
|
+
<p>
|
|
5
|
+
<strong>Integrate Hume's Empathic Voice Interface directly into your web application</strong>
|
|
6
|
+
</p>
|
|
7
|
+
</div>
|
|
8
|
+
|
|
9
|
+
## Overview
|
|
10
|
+
|
|
11
|
+
This package provides a React widget component that encapsulates Hume's Empathic Voice Interface, making it easy to integrate this interface into your web application. The component is designed to be embedded into your web page through an iframe. It abstracts away the complexities of managing websocket connections, capturing user audio via the client's microphone, and handling the playback of the interface's audio responses.
|
|
12
|
+
|
|
13
|
+
## Prerequisites
|
|
14
|
+
|
|
15
|
+
Before installing this package, please ensure your development environment meets the following requirement:
|
|
16
|
+
|
|
17
|
+
- Node.js (`v18.0.0` or higher).
|
|
18
|
+
|
|
19
|
+
To verify your Node.js version, run this command in your terminal:
|
|
20
|
+
|
|
21
|
+
```sh
|
|
22
|
+
node --version
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
If your Node.js version is below `18.0.0`, update it to meet the requirement. For updating Node.js, visit [Node.js' official site](https://nodejs.org/) or use a version management tool like nvm for a more seamless upgrade process.
|
|
26
|
+
|
|
27
|
+
## Installation
|
|
28
|
+
|
|
29
|
+
Add `@humeai/voice-embed-react` to your project by running this command in your project directory:
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
npm install @humeai/voice-embed-react
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
This will download and include the package in your project, making it ready for import and use within your React components.
|
|
36
|
+
|
|
37
|
+
```tsx
|
|
38
|
+
import { EmbeddedVoice } from '@humeai/voice-embed-react';
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Usage
|
|
42
|
+
|
|
43
|
+
### Quickstart
|
|
44
|
+
|
|
45
|
+
Here's a simple example to get you started with the `EmbeddedVoice` component:
|
|
46
|
+
|
|
47
|
+
```tsx
|
|
48
|
+
import React, { useState } from 'react';
|
|
49
|
+
import { EmbeddedVoice } from '@humeai/voice-embed-react';
|
|
50
|
+
|
|
51
|
+
function App() {
|
|
52
|
+
const apiKey = process.env.HUME_API_KEY || '';
|
|
53
|
+
const [isEmbedOpen, setIsEmbedOpen] = useState(false);
|
|
54
|
+
|
|
55
|
+
return (
|
|
56
|
+
<div>
|
|
57
|
+
<button onClick={() => setIsEmbedOpen(true)}>Open Widget</button>
|
|
58
|
+
<EmbeddedVoice
|
|
59
|
+
auth={{ type: 'apiKey', value: apiKey }}
|
|
60
|
+
systemPrompt={'Your system prompt goes here.'}
|
|
61
|
+
onMessage={(msg) => console.log('Message received: ', msg)}
|
|
62
|
+
onClose={() => setIsEmbedOpen(false)}
|
|
63
|
+
isEmbedOpen={isEmbedOpen}
|
|
64
|
+
/>
|
|
65
|
+
</div>
|
|
66
|
+
);
|
|
67
|
+
}
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
**Note:** For integration within server components, instantiate `EmbeddedVoice` within a client component. For more information checkout the [Next.js documentation on client components](https://nextjs.org/docs/app/building-your-application/rendering/client-components).
|
|
71
|
+
|
|
72
|
+
### Component Props
|
|
73
|
+
|
|
74
|
+
The table below outlines the props accepted by the EmbeddedVoice component:
|
|
75
|
+
|
|
76
|
+
| prop | required | description |
|
|
77
|
+
| ----------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
78
|
+
| auth | yes | Authentication strategy and corresponding value. Authentication is required to establish the web socket connection with Hume's Voice API. See our [documentation](https://dev.hume.ai/docs/quick-start#getting-your-api-key) on obtaining your `API key` or `access token`. |
|
|
79
|
+
| isEmbedOpen | yes | Determines the initial visibility of the widget. Assign `true` to render the widget as open on initial load, and `false` to start with the widget closed. While the widget's UI provides a trigger to toggle its visibility, this prop also enables external control over the widget's visibility state through a parent component. |
|
|
80
|
+
| rendererUrl | no | URL of the webpage to inject the `EmbeddedVoice` widget into. |
|
|
81
|
+
| hostname | no | Hostname of the Hume API. If not provided this value will default to `"api.hume.ai"`. |
|
|
82
|
+
| channels | no | Number of channels in the input audio. |
|
|
83
|
+
| sampleRate | no | Sample rate of the input audio. |
|
|
84
|
+
| tts | no | Text-To-Speech service. If not provided this value will default to `"hume_ai"`, specifying Hume's text-to-speech service. Other options include: `"eleven_labs"` and `"play_ht"`. |
|
|
85
|
+
| speedRatio | no | Speed ratio of the TTS service. |
|
|
86
|
+
| reconnectAttempts | no | Number of times to attempt to reconnect to the API. If not provided this value will default to `30`. |
|
|
87
|
+
| debug | no | Enable debug mode. If not provided this value will default to `false`. |
|
|
88
|
+
| systemPrompt | no | System prompt to use for the Voice. The system prompt has a character limit of 100,000 characters. If not provided, this value will default to the default Hume system prompt. |
|
|
89
|
+
| onMessage | no | Callback function to invoke upon receiving a message through the web socket. |
|
|
90
|
+
| onClose | no | Callback function to invoke upon the web socket connection being closed. |
|
|
91
|
+
|
|
92
|
+
## Support
|
|
93
|
+
|
|
94
|
+
If you have questions or require assistance pertaining to this package, [reach out to us on Discord](https://discord.com/invite/WPRSugvAm6)!
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { EmbeddedVoiceConfig, TranscriptMessageHandler, CloseHandler } from '@humeai/voice-embed';
|
|
2
|
+
export { AgentTranscriptMessage, COLLAPSE_WIDGET_ACTION, Config, EXPAND_WIDGET_ACTION, EmotionScores, FrameToClientAction, JSONMessage, LanguageModelOption, MINIMIZE_WIDGET_ACTION, RESIZE_FRAME_ACTION, TRANSCRIPT_MESSAGE_ACTION, UserTranscriptMessage, WIDGET_IFRAME_IS_READY_ACTION, WindowDimensions, parseClientToFrameAction } from '@humeai/voice-embed';
|
|
3
|
+
|
|
4
|
+
type EmbeddedVoiceProps = Partial<EmbeddedVoiceConfig> & NonNullable<Pick<EmbeddedVoiceConfig, 'auth'>> & {
|
|
5
|
+
onMessage?: TranscriptMessageHandler;
|
|
6
|
+
onClose?: CloseHandler;
|
|
7
|
+
isEmbedOpen: boolean;
|
|
8
|
+
};
|
|
9
|
+
declare const EmbeddedVoice: (props: EmbeddedVoiceProps) => null;
|
|
10
|
+
|
|
11
|
+
export { EmbeddedVoice };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { EmbeddedVoiceConfig, TranscriptMessageHandler, CloseHandler } from '@humeai/voice-embed';
|
|
2
|
+
export { AgentTranscriptMessage, COLLAPSE_WIDGET_ACTION, Config, EXPAND_WIDGET_ACTION, EmotionScores, FrameToClientAction, JSONMessage, LanguageModelOption, MINIMIZE_WIDGET_ACTION, RESIZE_FRAME_ACTION, TRANSCRIPT_MESSAGE_ACTION, UserTranscriptMessage, WIDGET_IFRAME_IS_READY_ACTION, WindowDimensions, parseClientToFrameAction } from '@humeai/voice-embed';
|
|
3
|
+
|
|
4
|
+
type EmbeddedVoiceProps = Partial<EmbeddedVoiceConfig> & NonNullable<Pick<EmbeddedVoiceConfig, 'auth'>> & {
|
|
5
|
+
onMessage?: TranscriptMessageHandler;
|
|
6
|
+
onClose?: CloseHandler;
|
|
7
|
+
isEmbedOpen: boolean;
|
|
8
|
+
};
|
|
9
|
+
declare const EmbeddedVoice: (props: EmbeddedVoiceProps) => null;
|
|
10
|
+
|
|
11
|
+
export { EmbeddedVoice };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
'use client';
|
|
2
|
+
"use strict";
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __export = (target, all) => {
|
|
8
|
+
for (var name in all)
|
|
9
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
10
|
+
};
|
|
11
|
+
var __copyProps = (to, from, except, desc) => {
|
|
12
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
13
|
+
for (let key of __getOwnPropNames(from))
|
|
14
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
15
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
16
|
+
}
|
|
17
|
+
return to;
|
|
18
|
+
};
|
|
19
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
20
|
+
|
|
21
|
+
// src/index.ts
|
|
22
|
+
var src_exports = {};
|
|
23
|
+
__export(src_exports, {
|
|
24
|
+
COLLAPSE_WIDGET_ACTION: () => import_voice_embed2.COLLAPSE_WIDGET_ACTION,
|
|
25
|
+
EXPAND_WIDGET_ACTION: () => import_voice_embed2.EXPAND_WIDGET_ACTION,
|
|
26
|
+
EmbeddedVoice: () => EmbeddedVoice,
|
|
27
|
+
LanguageModelOption: () => import_voice_embed2.LanguageModelOption,
|
|
28
|
+
MINIMIZE_WIDGET_ACTION: () => import_voice_embed2.MINIMIZE_WIDGET_ACTION,
|
|
29
|
+
RESIZE_FRAME_ACTION: () => import_voice_embed2.RESIZE_FRAME_ACTION,
|
|
30
|
+
TRANSCRIPT_MESSAGE_ACTION: () => import_voice_embed2.TRANSCRIPT_MESSAGE_ACTION,
|
|
31
|
+
WIDGET_IFRAME_IS_READY_ACTION: () => import_voice_embed2.WIDGET_IFRAME_IS_READY_ACTION,
|
|
32
|
+
parseClientToFrameAction: () => import_voice_embed2.parseClientToFrameAction
|
|
33
|
+
});
|
|
34
|
+
module.exports = __toCommonJS(src_exports);
|
|
35
|
+
|
|
36
|
+
// src/lib/EmbeddedVoice.ts
|
|
37
|
+
var import_voice_embed = require("@humeai/voice-embed");
|
|
38
|
+
var import_react = require("react");
|
|
39
|
+
var EmbeddedVoice = (props) => {
|
|
40
|
+
const { onMessage, isEmbedOpen, onClose, ...config } = props;
|
|
41
|
+
const embeddedVoice = (0, import_react.useRef)(null);
|
|
42
|
+
const onMessageHandler = (0, import_react.useRef)();
|
|
43
|
+
onMessageHandler.current = onMessage;
|
|
44
|
+
const onCloseHandler = (0, import_react.useRef)();
|
|
45
|
+
onCloseHandler.current = onClose;
|
|
46
|
+
const stableConfig = (0, import_react.useRef)();
|
|
47
|
+
stableConfig.current = config;
|
|
48
|
+
(0, import_react.useEffect)(() => {
|
|
49
|
+
let unmount;
|
|
50
|
+
if (!embeddedVoice.current && stableConfig.current) {
|
|
51
|
+
embeddedVoice.current = import_voice_embed.EmbeddedVoice.create({
|
|
52
|
+
onMessage: onMessageHandler.current,
|
|
53
|
+
onClose: onCloseHandler.current,
|
|
54
|
+
...stableConfig.current
|
|
55
|
+
});
|
|
56
|
+
unmount = embeddedVoice.current.mount();
|
|
57
|
+
}
|
|
58
|
+
return () => {
|
|
59
|
+
unmount?.();
|
|
60
|
+
embeddedVoice.current = null;
|
|
61
|
+
};
|
|
62
|
+
}, []);
|
|
63
|
+
(0, import_react.useEffect)(() => {
|
|
64
|
+
if (isEmbedOpen) {
|
|
65
|
+
embeddedVoice.current?.openEmbed();
|
|
66
|
+
}
|
|
67
|
+
}, [isEmbedOpen]);
|
|
68
|
+
return null;
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
// src/index.ts
|
|
72
|
+
var import_voice_embed2 = require("@humeai/voice-embed");
|
|
73
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
74
|
+
0 && (module.exports = {
|
|
75
|
+
COLLAPSE_WIDGET_ACTION,
|
|
76
|
+
EXPAND_WIDGET_ACTION,
|
|
77
|
+
EmbeddedVoice,
|
|
78
|
+
LanguageModelOption,
|
|
79
|
+
MINIMIZE_WIDGET_ACTION,
|
|
80
|
+
RESIZE_FRAME_ACTION,
|
|
81
|
+
TRANSCRIPT_MESSAGE_ACTION,
|
|
82
|
+
WIDGET_IFRAME_IS_READY_ACTION,
|
|
83
|
+
parseClientToFrameAction
|
|
84
|
+
});
|
|
85
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/lib/EmbeddedVoice.ts"],"sourcesContent":["export * from './lib/EmbeddedVoice';\n\nexport {\n COLLAPSE_WIDGET_ACTION,\n EXPAND_WIDGET_ACTION,\n MINIMIZE_WIDGET_ACTION,\n RESIZE_FRAME_ACTION,\n TRANSCRIPT_MESSAGE_ACTION,\n WIDGET_IFRAME_IS_READY_ACTION,\n parseClientToFrameAction,\n LanguageModelOption,\n} from '@humeai/voice-embed';\n\nexport type {\n AgentTranscriptMessage,\n Config,\n FrameToClientAction,\n JSONMessage,\n UserTranscriptMessage,\n WindowDimensions,\n EmotionScores,\n} from '@humeai/voice-embed';\n","import {\n type CloseHandler,\n EmbeddedVoice as EA,\n type EmbeddedVoiceConfig,\n type TranscriptMessageHandler,\n} from '@humeai/voice-embed';\nimport { useEffect, useRef } from 'react';\n\ntype EmbeddedVoiceProps = Partial<EmbeddedVoiceConfig> &\n NonNullable<Pick<EmbeddedVoiceConfig, 'auth'>> & {\n onMessage?: TranscriptMessageHandler;\n onClose?: CloseHandler;\n isEmbedOpen: boolean;\n };\n\nexport const EmbeddedVoice = (props: EmbeddedVoiceProps) => {\n const { onMessage, isEmbedOpen, onClose, ...config } = props;\n const embeddedVoice = useRef<EA | null>(null);\n const onMessageHandler = useRef<TranscriptMessageHandler | undefined>();\n onMessageHandler.current = onMessage;\n\n const onCloseHandler = useRef<CloseHandler | undefined>();\n onCloseHandler.current = onClose;\n\n const stableConfig = useRef<\n Partial<EmbeddedVoiceConfig> &\n NonNullable<Pick<EmbeddedVoiceConfig, 'auth'>>\n >();\n stableConfig.current = config;\n\n useEffect(() => {\n let unmount: () => void;\n if (!embeddedVoice.current && stableConfig.current) {\n embeddedVoice.current = EA.create({\n onMessage: onMessageHandler.current,\n onClose: onCloseHandler.current,\n ...stableConfig.current,\n });\n unmount = embeddedVoice.current.mount();\n }\n\n return () => {\n unmount?.();\n embeddedVoice.current = null;\n };\n }, []);\n\n useEffect(() => {\n if (isEmbedOpen) {\n embeddedVoice.current?.openEmbed();\n }\n }, [isEmbedOpen]);\n\n return null;\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,yBAKO;AACP,mBAAkC;AAS3B,IAAM,gBAAgB,CAAC,UAA8B;AAC1D,QAAM,EAAE,WAAW,aAAa,SAAS,GAAG,OAAO,IAAI;AACvD,QAAM,oBAAgB,qBAAkB,IAAI;AAC5C,QAAM,uBAAmB,qBAA6C;AACtE,mBAAiB,UAAU;AAE3B,QAAM,qBAAiB,qBAAiC;AACxD,iBAAe,UAAU;AAEzB,QAAM,mBAAe,qBAGnB;AACF,eAAa,UAAU;AAEvB,8BAAU,MAAM;AACd,QAAI;AACJ,QAAI,CAAC,cAAc,WAAW,aAAa,SAAS;AAClD,oBAAc,UAAU,mBAAAA,cAAG,OAAO;AAAA,QAChC,WAAW,iBAAiB;AAAA,QAC5B,SAAS,eAAe;AAAA,QACxB,GAAG,aAAa;AAAA,MAClB,CAAC;AACD,gBAAU,cAAc,QAAQ,MAAM;AAAA,IACxC;AAEA,WAAO,MAAM;AACX,gBAAU;AACV,oBAAc,UAAU;AAAA,IAC1B;AAAA,EACF,GAAG,CAAC,CAAC;AAEL,8BAAU,MAAM;AACd,QAAI,aAAa;AACf,oBAAc,SAAS,UAAU;AAAA,IACnC;AAAA,EACF,GAAG,CAAC,WAAW,CAAC;AAEhB,SAAO;AACT;;;ADpDA,IAAAC,sBASO;","names":["EA","import_voice_embed"]}
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
'use client';
|
|
2
|
+
|
|
3
|
+
// src/lib/EmbeddedVoice.ts
|
|
4
|
+
import {
|
|
5
|
+
EmbeddedVoice as EA
|
|
6
|
+
} from "@humeai/voice-embed";
|
|
7
|
+
import { useEffect, useRef } from "react";
|
|
8
|
+
var EmbeddedVoice = (props) => {
|
|
9
|
+
const { onMessage, isEmbedOpen, onClose, ...config } = props;
|
|
10
|
+
const embeddedVoice = useRef(null);
|
|
11
|
+
const onMessageHandler = useRef();
|
|
12
|
+
onMessageHandler.current = onMessage;
|
|
13
|
+
const onCloseHandler = useRef();
|
|
14
|
+
onCloseHandler.current = onClose;
|
|
15
|
+
const stableConfig = useRef();
|
|
16
|
+
stableConfig.current = config;
|
|
17
|
+
useEffect(() => {
|
|
18
|
+
let unmount;
|
|
19
|
+
if (!embeddedVoice.current && stableConfig.current) {
|
|
20
|
+
embeddedVoice.current = EA.create({
|
|
21
|
+
onMessage: onMessageHandler.current,
|
|
22
|
+
onClose: onCloseHandler.current,
|
|
23
|
+
...stableConfig.current
|
|
24
|
+
});
|
|
25
|
+
unmount = embeddedVoice.current.mount();
|
|
26
|
+
}
|
|
27
|
+
return () => {
|
|
28
|
+
unmount?.();
|
|
29
|
+
embeddedVoice.current = null;
|
|
30
|
+
};
|
|
31
|
+
}, []);
|
|
32
|
+
useEffect(() => {
|
|
33
|
+
if (isEmbedOpen) {
|
|
34
|
+
embeddedVoice.current?.openEmbed();
|
|
35
|
+
}
|
|
36
|
+
}, [isEmbedOpen]);
|
|
37
|
+
return null;
|
|
38
|
+
};
|
|
39
|
+
|
|
40
|
+
// src/index.ts
|
|
41
|
+
import {
|
|
42
|
+
COLLAPSE_WIDGET_ACTION,
|
|
43
|
+
EXPAND_WIDGET_ACTION,
|
|
44
|
+
MINIMIZE_WIDGET_ACTION,
|
|
45
|
+
RESIZE_FRAME_ACTION,
|
|
46
|
+
TRANSCRIPT_MESSAGE_ACTION,
|
|
47
|
+
WIDGET_IFRAME_IS_READY_ACTION,
|
|
48
|
+
parseClientToFrameAction,
|
|
49
|
+
LanguageModelOption
|
|
50
|
+
} from "@humeai/voice-embed";
|
|
51
|
+
export {
|
|
52
|
+
COLLAPSE_WIDGET_ACTION,
|
|
53
|
+
EXPAND_WIDGET_ACTION,
|
|
54
|
+
EmbeddedVoice,
|
|
55
|
+
LanguageModelOption,
|
|
56
|
+
MINIMIZE_WIDGET_ACTION,
|
|
57
|
+
RESIZE_FRAME_ACTION,
|
|
58
|
+
TRANSCRIPT_MESSAGE_ACTION,
|
|
59
|
+
WIDGET_IFRAME_IS_READY_ACTION,
|
|
60
|
+
parseClientToFrameAction
|
|
61
|
+
};
|
|
62
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/lib/EmbeddedVoice.ts","../src/index.ts"],"sourcesContent":["import {\n type CloseHandler,\n EmbeddedVoice as EA,\n type EmbeddedVoiceConfig,\n type TranscriptMessageHandler,\n} from '@humeai/voice-embed';\nimport { useEffect, useRef } from 'react';\n\ntype EmbeddedVoiceProps = Partial<EmbeddedVoiceConfig> &\n NonNullable<Pick<EmbeddedVoiceConfig, 'auth'>> & {\n onMessage?: TranscriptMessageHandler;\n onClose?: CloseHandler;\n isEmbedOpen: boolean;\n };\n\nexport const EmbeddedVoice = (props: EmbeddedVoiceProps) => {\n const { onMessage, isEmbedOpen, onClose, ...config } = props;\n const embeddedVoice = useRef<EA | null>(null);\n const onMessageHandler = useRef<TranscriptMessageHandler | undefined>();\n onMessageHandler.current = onMessage;\n\n const onCloseHandler = useRef<CloseHandler | undefined>();\n onCloseHandler.current = onClose;\n\n const stableConfig = useRef<\n Partial<EmbeddedVoiceConfig> &\n NonNullable<Pick<EmbeddedVoiceConfig, 'auth'>>\n >();\n stableConfig.current = config;\n\n useEffect(() => {\n let unmount: () => void;\n if (!embeddedVoice.current && stableConfig.current) {\n embeddedVoice.current = EA.create({\n onMessage: onMessageHandler.current,\n onClose: onCloseHandler.current,\n ...stableConfig.current,\n });\n unmount = embeddedVoice.current.mount();\n }\n\n return () => {\n unmount?.();\n embeddedVoice.current = null;\n };\n }, []);\n\n useEffect(() => {\n if (isEmbedOpen) {\n embeddedVoice.current?.openEmbed();\n }\n }, [isEmbedOpen]);\n\n return null;\n};\n","export * from './lib/EmbeddedVoice';\n\nexport {\n COLLAPSE_WIDGET_ACTION,\n EXPAND_WIDGET_ACTION,\n MINIMIZE_WIDGET_ACTION,\n RESIZE_FRAME_ACTION,\n TRANSCRIPT_MESSAGE_ACTION,\n WIDGET_IFRAME_IS_READY_ACTION,\n parseClientToFrameAction,\n LanguageModelOption,\n} from '@humeai/voice-embed';\n\nexport type {\n AgentTranscriptMessage,\n Config,\n FrameToClientAction,\n JSONMessage,\n UserTranscriptMessage,\n WindowDimensions,\n EmotionScores,\n} from '@humeai/voice-embed';\n"],"mappings":";;;AAAA;AAAA,EAEE,iBAAiB;AAAA,OAGZ;AACP,SAAS,WAAW,cAAc;AAS3B,IAAM,gBAAgB,CAAC,UAA8B;AAC1D,QAAM,EAAE,WAAW,aAAa,SAAS,GAAG,OAAO,IAAI;AACvD,QAAM,gBAAgB,OAAkB,IAAI;AAC5C,QAAM,mBAAmB,OAA6C;AACtE,mBAAiB,UAAU;AAE3B,QAAM,iBAAiB,OAAiC;AACxD,iBAAe,UAAU;AAEzB,QAAM,eAAe,OAGnB;AACF,eAAa,UAAU;AAEvB,YAAU,MAAM;AACd,QAAI;AACJ,QAAI,CAAC,cAAc,WAAW,aAAa,SAAS;AAClD,oBAAc,UAAU,GAAG,OAAO;AAAA,QAChC,WAAW,iBAAiB;AAAA,QAC5B,SAAS,eAAe;AAAA,QACxB,GAAG,aAAa;AAAA,MAClB,CAAC;AACD,gBAAU,cAAc,QAAQ,MAAM;AAAA,IACxC;AAEA,WAAO,MAAM;AACX,gBAAU;AACV,oBAAc,UAAU;AAAA,IAC1B;AAAA,EACF,GAAG,CAAC,CAAC;AAEL,YAAU,MAAM;AACd,QAAI,aAAa;AACf,oBAAc,SAAS,UAAU;AAAA,IACnC;AAAA,EACF,GAAG,CAAC,WAAW,CAAC;AAEhB,SAAO;AACT;;;ACpDA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@humeai/voice-embed-react",
|
|
3
|
+
"version": "0.0.0-beta.1",
|
|
4
|
+
"description": "",
|
|
5
|
+
"main": "./dist/index.js",
|
|
6
|
+
"module": "./dist/index.mjs",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"repository": {
|
|
9
|
+
"type": "git",
|
|
10
|
+
"url": "https://github.com/HumeAI/voice-sdk.git",
|
|
11
|
+
"directory": "packages/react"
|
|
12
|
+
},
|
|
13
|
+
"files": [
|
|
14
|
+
"package.json",
|
|
15
|
+
"dist"
|
|
16
|
+
],
|
|
17
|
+
"keywords": [],
|
|
18
|
+
"author": "",
|
|
19
|
+
"license": "ISC",
|
|
20
|
+
"dependencies": {
|
|
21
|
+
"react": "^18.2.0",
|
|
22
|
+
"react-dom": "^18.2.0",
|
|
23
|
+
"@humeai/voice-embed": "0.0.0-beta.1"
|
|
24
|
+
},
|
|
25
|
+
"devDependencies": {
|
|
26
|
+
"@testing-library/react": "^14.2.1",
|
|
27
|
+
"@testing-library/react-hooks": "^8.0.1",
|
|
28
|
+
"@types/eslint": "^8.56.2",
|
|
29
|
+
"@types/node": "^20.11.19",
|
|
30
|
+
"@types/react": "^18.2.56",
|
|
31
|
+
"@types/react-dom": "^18.2.19",
|
|
32
|
+
"@typescript-eslint/eslint-plugin": "^6.21.0",
|
|
33
|
+
"@typescript-eslint/parser": "^6.21.0",
|
|
34
|
+
"eslint": "^8.56.0",
|
|
35
|
+
"eslint-plugin-react": "^7.33.2",
|
|
36
|
+
"eslint-plugin-react-hooks": "^4.6.0",
|
|
37
|
+
"jsdom": "^24.0.0",
|
|
38
|
+
"tsup": "^8.0.2",
|
|
39
|
+
"typescript": "^5.3.3",
|
|
40
|
+
"vitest": "^1.3.0",
|
|
41
|
+
"@humeai/eslint-config": "0.0.1",
|
|
42
|
+
"@humeai/typescript-config": "0.0.0"
|
|
43
|
+
},
|
|
44
|
+
"browserslist": [
|
|
45
|
+
"last 2 Chrome versions, last 2 iOS major versions, Firefox ESR, not dead"
|
|
46
|
+
],
|
|
47
|
+
"scripts": {
|
|
48
|
+
"build": "tsup",
|
|
49
|
+
"dev": "tsup --watch",
|
|
50
|
+
"lint": "eslint ./src",
|
|
51
|
+
"pack": "npm pack"
|
|
52
|
+
}
|
|
53
|
+
}
|