@simfinity/constellation-client 1.0.9 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -33,6 +33,7 @@ const client = new WebClient({
33
33
  key: "my-key",
34
34
  llm: "openai",
35
35
  model: "gpt-4o-realtime-preview-2024-12-17",
36
+ voiceEnabled: true,
36
37
  });
37
38
 
38
39
  try {
@@ -83,6 +84,9 @@ export interface WebClientConfig {
83
84
  key: string;
84
85
  llm: LlmType;
85
86
  model: string;
87
+ voiceEnabled?: boolean;
88
+ voiceName?: string;
89
+ tools?: string[];
86
90
  }
87
91
  ```
88
92
 
package/dist/index.cjs CHANGED
@@ -44,8 +44,8 @@ var WebClient = class {
44
44
  * @exception
45
45
  * This method throws new Error(...) if unable to execute successfully for any reason.
46
46
  */
47
- async fetchAvailableTools() {
48
- const response = await fetch(`${this.config.sessionEndpoint}/available_tools`, {
47
+ async fetchSupportedTools() {
48
+ const response = await fetch(`${this.config.sessionEndpoint}/supported_tools`, {
49
49
  method: "GET",
50
50
  headers: {
51
51
  "Authorization": `Bearer ${this.config.key}`,
@@ -72,20 +72,16 @@ var WebClient = class {
72
72
  * @remarks
73
73
  * A session MUST exist first in order to connect the stream next.
74
74
  *
75
- * @param audioEnabled whether this session can receive & produce audio as well as text
76
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
77
- * @param tools a list of tool names to be included in this session. These must be matching the
78
- * names returned by 'fetchAvailableTools()'.
79
- *
80
75
  * @exception
81
76
  * This method throws new Error(...) if unable to execute successfully for any reason.
82
77
  */
83
- async startSession(audioEnabled, voiceName, tools = []) {
78
+ async startSession() {
79
+ var _a;
84
80
  const prepareBody = {
85
81
  llmProvider: this.config.llm,
86
- audioEnabled,
87
- voiceName,
88
- includeTools: tools
82
+ audioEnabled: (_a = this.config.voiceEnabled) != null ? _a : false,
83
+ voiceName: this.config.voiceName,
84
+ includeTools: this.config.tools
89
85
  };
90
86
  const response = await fetch(`${this.config.sessionEndpoint}/prepare_session`, {
91
87
  method: "POST",
package/dist/index.d.cts CHANGED
@@ -10,6 +10,10 @@ type LlmType = "openai";
10
10
  * @key : Simfinity API secret key granting access to the server API
11
11
  * @llm : which LLM service to connect to
12
12
  * @model : depends on the LLM service. This is the model name as define by the LLM service
13
+ * @voiceEnabled whether this session can receive & produce audio as well as text. Default is false.
14
+ * @voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
15
+ * @tools a list of tool names to be included in this session. These must be matching the
16
+ * names returned by 'fetchAvailableTools()'.
13
17
  *
14
18
  * @example
15
19
  * ```TypeScript
@@ -18,7 +22,8 @@ type LlmType = "openai";
18
22
  * streamingEndpoint: "wss://simfinity.constellation.com:30003",
19
23
  * key: "some-secret-key"
20
24
  * llm: "openai",
21
- * model: "gpt-4o-realtime-preview-2024-12-17"
25
+ * model: "gpt-4o-realtime-preview-2024-12-17",
26
+ * voiceEnabled: false,
22
27
  * }
23
28
  * ```
24
29
  */
@@ -28,6 +33,9 @@ interface WebClientConfig {
28
33
  key: string;
29
34
  llm: LlmType;
30
35
  model: string;
36
+ voiceEnabled?: boolean;
37
+ voiceName?: string;
38
+ tools?: string[];
31
39
  }
32
40
  /**
33
41
  * System settings influencing the model behavior:
@@ -106,7 +114,7 @@ declare class WebClient {
106
114
  * @exception
107
115
  * This method throws new Error(...) if unable to execute successfully for any reason.
108
116
  */
109
- fetchAvailableTools(): Promise<ToolDescription[]>;
117
+ fetchSupportedTools(): Promise<ToolDescription[]>;
110
118
  /**
111
119
  * Start a persistent chat room on the server, allowing for re-connection,
112
120
  * when the streaming connection is lost. Once a session was started it must
@@ -115,15 +123,10 @@ declare class WebClient {
115
123
  * @remarks
116
124
  * A session MUST exist first in order to connect the stream next.
117
125
  *
118
- * @param audioEnabled whether this session can receive & produce audio as well as text
119
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
120
- * @param tools a list of tool names to be included in this session. These must be matching the
121
- * names returned by 'fetchAvailableTools()'.
122
- *
123
126
  * @exception
124
127
  * This method throws new Error(...) if unable to execute successfully for any reason.
125
128
  */
126
- startSession(audioEnabled: boolean, voiceName?: string, tools?: string[]): Promise<void>;
129
+ startSession(): Promise<void>;
127
130
  /**
128
131
  * Close an opened, persistent chat room, effectively killing the streaming as well if still opened.
129
132
  * If there is no active session, this method does nothing.
package/dist/index.d.ts CHANGED
@@ -10,6 +10,10 @@ type LlmType = "openai";
10
10
  * @key : Simfinity API secret key granting access to the server API
11
11
  * @llm : which LLM service to connect to
12
12
  * @model : depends on the LLM service. This is the model name as define by the LLM service
13
+ * @voiceEnabled whether this session can receive & produce audio as well as text. Default is false.
14
+ * @voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
15
+ * @tools a list of tool names to be included in this session. These must be matching the
16
+ * names returned by 'fetchAvailableTools()'.
13
17
  *
14
18
  * @example
15
19
  * ```TypeScript
@@ -18,7 +22,8 @@ type LlmType = "openai";
18
22
  * streamingEndpoint: "wss://simfinity.constellation.com:30003",
19
23
  * key: "some-secret-key"
20
24
  * llm: "openai",
21
- * model: "gpt-4o-realtime-preview-2024-12-17"
25
+ * model: "gpt-4o-realtime-preview-2024-12-17",
26
+ * voiceEnabled: false,
22
27
  * }
23
28
  * ```
24
29
  */
@@ -28,6 +33,9 @@ interface WebClientConfig {
28
33
  key: string;
29
34
  llm: LlmType;
30
35
  model: string;
36
+ voiceEnabled?: boolean;
37
+ voiceName?: string;
38
+ tools?: string[];
31
39
  }
32
40
  /**
33
41
  * System settings influencing the model behavior:
@@ -106,7 +114,7 @@ declare class WebClient {
106
114
  * @exception
107
115
  * This method throws new Error(...) if unable to execute successfully for any reason.
108
116
  */
109
- fetchAvailableTools(): Promise<ToolDescription[]>;
117
+ fetchSupportedTools(): Promise<ToolDescription[]>;
110
118
  /**
111
119
  * Start a persistent chat room on the server, allowing for re-connection,
112
120
  * when the streaming connection is lost. Once a session was started it must
@@ -115,15 +123,10 @@ declare class WebClient {
115
123
  * @remarks
116
124
  * A session MUST exist first in order to connect the stream next.
117
125
  *
118
- * @param audioEnabled whether this session can receive & produce audio as well as text
119
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
120
- * @param tools a list of tool names to be included in this session. These must be matching the
121
- * names returned by 'fetchAvailableTools()'.
122
- *
123
126
  * @exception
124
127
  * This method throws new Error(...) if unable to execute successfully for any reason.
125
128
  */
126
- startSession(audioEnabled: boolean, voiceName?: string, tools?: string[]): Promise<void>;
129
+ startSession(): Promise<void>;
127
130
  /**
128
131
  * Close an opened, persistent chat room, effectively killing the streaming as well if still opened.
129
132
  * If there is no active session, this method does nothing.
package/dist/index.js CHANGED
@@ -18,8 +18,8 @@ var WebClient = class {
18
18
  * @exception
19
19
  * This method throws new Error(...) if unable to execute successfully for any reason.
20
20
  */
21
- async fetchAvailableTools() {
22
- const response = await fetch(`${this.config.sessionEndpoint}/available_tools`, {
21
+ async fetchSupportedTools() {
22
+ const response = await fetch(`${this.config.sessionEndpoint}/supported_tools`, {
23
23
  method: "GET",
24
24
  headers: {
25
25
  "Authorization": `Bearer ${this.config.key}`,
@@ -46,20 +46,16 @@ var WebClient = class {
46
46
  * @remarks
47
47
  * A session MUST exist first in order to connect the stream next.
48
48
  *
49
- * @param audioEnabled whether this session can receive & produce audio as well as text
50
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
51
- * @param tools a list of tool names to be included in this session. These must be matching the
52
- * names returned by 'fetchAvailableTools()'.
53
- *
54
49
  * @exception
55
50
  * This method throws new Error(...) if unable to execute successfully for any reason.
56
51
  */
57
- async startSession(audioEnabled, voiceName, tools = []) {
52
+ async startSession() {
53
+ var _a;
58
54
  const prepareBody = {
59
55
  llmProvider: this.config.llm,
60
- audioEnabled,
61
- voiceName,
62
- includeTools: tools
56
+ audioEnabled: (_a = this.config.voiceEnabled) != null ? _a : false,
57
+ voiceName: this.config.voiceName,
58
+ includeTools: this.config.tools
63
59
  };
64
60
  const response = await fetch(`${this.config.sessionEndpoint}/prepare_session`, {
65
61
  method: "POST",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@simfinity/constellation-client",
3
- "version": "1.0.9",
3
+ "version": "1.0.11",
4
4
  "type": "module",
5
5
  "exports": {
6
6
  ".": {