@simfinity/constellation-client 1.0.21 → 1.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -39,24 +39,20 @@ var WebClient = class {
39
39
  * @remarks
40
40
  * A session MUST exist first in order to connect the stream next.
41
41
  *
42
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
43
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
44
- * @param behaviour model behaviour parameters. This is optional: default settings
45
- * will be used if omitted and can be changed mid-session with configureSession().
46
- * WARNING: some LLMs may not support mid-session updates, thus it is
47
- * advised and preferred to provide them here at startSession time.
42
+ * @param params the configuration options to start this new session
48
43
  *
49
44
  * @exception
50
45
  * This method throws new Error(...) if unable to execute successfully for any reason.
51
46
  */
52
- async startSession(voiceEnabled, voiceName, behaviour) {
47
+ async startSession(params) {
48
+ var _a, _b, _c;
53
49
  const prepareBody = {
54
- llmProvider: this.config.llm,
55
- audioEnabled: voiceEnabled,
56
- voiceName,
57
- temperature: behaviour == null ? void 0 : behaviour.temperature,
58
- instructions: behaviour == null ? void 0 : behaviour.instructions,
59
- maxResponseToken: behaviour == null ? void 0 : behaviour.maxResponseToken
50
+ llmProvider: params.llmProvider,
51
+ audioEnabled: params.voiceEnabled,
52
+ voiceName: params.voiceName,
53
+ temperature: (_a = params.behaviour) == null ? void 0 : _a.temperature,
54
+ instructions: (_b = params.behaviour) == null ? void 0 : _b.instructions,
55
+ maxResponseToken: (_c = params.behaviour) == null ? void 0 : _c.maxResponseToken
60
56
  };
61
57
  const response = await fetch(`${this.config.sessionEndpoint}/prepare_session`, {
62
58
  method: "POST",
package/dist/index.d.cts CHANGED
@@ -1,44 +1,81 @@
1
1
  /**
2
- * Available server-side LLM types
2
+ * Supported server-side LLM types
3
3
  */
4
- type LlmType = "openai" | "gemini";
4
+ type LlmName = "openai" | "gemini";
5
5
  /**
6
6
  * Configuration required to initiate a connection with the stream server:
7
7
  *
8
- * @sessionEndpoint : REST base URL to the constellation API for managing sessions
9
- * @streamingEndpoint : WebSocket endpoint to the constellation server
10
- * @key : Simfinity API secret key granting access to the server API
11
- * @llm : which LLM service to connect to
12
- * @model : depends on the LLM service. This is the model name as define by the LLM service
13
- *
14
8
  * @example
15
9
  * ```TypeScript
16
10
  * {
17
11
  * sessionEndpoint: "https://simfinity.constellation.com",
18
12
  * streamingEndpoint: "wss://simfinity.constellation.com:30003",
19
13
  * key: "some-secret-key"
20
- * llm: "openai",
21
- * model: "gpt-4o-realtime-preview-2024-12-17",
22
14
  * }
23
15
  * ```
24
16
  */
25
17
  interface WebClientConfig {
18
+ /**
19
+ * HTTP REST base URL to the constellation API for managing sessions.
20
+ */
26
21
  sessionEndpoint: string;
22
+ /**
23
+ * WebSocket endpoint to the constellation server.
24
+ */
27
25
  streamingEndpoint: string;
26
+ /**
27
+ * Simfinity API secret key granting access to the server API.
28
+ */
28
29
  key: string;
29
- llm: LlmType;
30
30
  }
31
31
  /**
32
- * System settings influencing the model behavior:
33
- * @temperature: LLM creativity factor in 0-1 range
34
- * @instructions: system instructions giving context, rules and directions to guide the LLM behavior
35
- * @maxResponseToken: 1-4096 value, maximum number of token used for a single response. Undefined means unlimited.
32
+ * System settings influencing how the model thinks and responds.
36
33
  */
37
34
  interface SessionConfig {
35
+ /**
36
+ * LLM creativity factor in 0-1 range
37
+ */
38
38
  temperature?: number;
39
+ /**
40
+ * System instructions giving context, rules and directions to drive the LLM behavior
41
+ */
39
42
  instructions?: string;
43
+ /**
44
+ * 1-4096 value, maximum number of token used for a single response. Undefined or 0 means unlimited.
45
+ */
40
46
  maxResponseToken?: number;
41
47
  }
48
+ /**
49
+ * Options when starting a new session. Some properties are immutable once
50
+ * the session has started, however the SessionConfig settings may, depending on
51
+ * the LLM provider chosen, be modified mid-session.
52
+ */
53
+ interface SessionStartParameters {
54
+ /**
55
+ * Name of one of the supported LLM.
56
+ */
57
+ llmProvider: LlmName;
58
+ /**
59
+ * When voiceEnabled is set to true, the LLM session is configured to
60
+ * receive and send audio stream and produce transcript of the audio conversation.
61
+ */
62
+ voiceEnabled: boolean;
63
+ /**
64
+ * Optional, the name of a voice supported by the LLM.
65
+ * Default values are 'Alloy' for OpenAI and 'Aoede' for Gemini.
66
+ * WARNING: constellation will accept any value, it belongs to the client to choose
67
+ * a valid voice name for the LLM provider chosen.
68
+ */
69
+ voiceName?: string;
70
+ /**
71
+ * Behavioral options affecting how the model thinks and responds.
72
+ * These options may be modified mid-session with configureSession(), but only
73
+ * if the chosen LLM provider allows it!
74
+ * WARNING: some LLMs like Gemini may not support mid-session updates, thus it is
75
+ * advised and preferred to provide them here at startSession time.
76
+ */
77
+ behaviour?: SessionConfig;
78
+ }
42
79
  /**
43
80
  * Snapshot of the current settings run by the model.
44
81
  */
@@ -155,17 +192,12 @@ declare class WebClient {
155
192
  * @remarks
156
193
  * A session MUST exist first in order to connect the stream next.
157
194
  *
158
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
159
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
160
- * @param behaviour model behaviour parameters. This is optional: default settings
161
- * will be used if omitted and can be changed mid-session with configureSession().
162
- * WARNING: some LLMs may not support mid-session updates, thus it is
163
- * advised and preferred to provide them here at startSession time.
195
+ * @param params the configuration options to start this new session
164
196
  *
165
197
  * @exception
166
198
  * This method throws new Error(...) if unable to execute successfully for any reason.
167
199
  */
168
- startSession(voiceEnabled: boolean, voiceName?: string, behaviour?: SessionConfig): Promise<void>;
200
+ startSession(params: SessionStartParameters): Promise<void>;
169
201
  /**
170
202
  * Close an opened, persistent chat room, effectively killing the streaming as well if still opened.
171
203
  * If there is no active session, this method does nothing.
@@ -274,4 +306,4 @@ declare class WebClient {
274
306
  private send;
275
307
  }
276
308
 
277
- export { type EventHandlers, type LlmType, type SessionConfig, WebClient, type WebClientConfig };
309
+ export { type EventHandlers, type LlmName, type SessionConfig, type SessionSettings, type SessionStartParameters, WebClient, type WebClientConfig };
package/dist/index.d.ts CHANGED
@@ -1,44 +1,81 @@
1
1
  /**
2
- * Available server-side LLM types
2
+ * Supported server-side LLM types
3
3
  */
4
- type LlmType = "openai" | "gemini";
4
+ type LlmName = "openai" | "gemini";
5
5
  /**
6
6
  * Configuration required to initiate a connection with the stream server:
7
7
  *
8
- * @sessionEndpoint : REST base URL to the constellation API for managing sessions
9
- * @streamingEndpoint : WebSocket endpoint to the constellation server
10
- * @key : Simfinity API secret key granting access to the server API
11
- * @llm : which LLM service to connect to
12
- * @model : depends on the LLM service. This is the model name as define by the LLM service
13
- *
14
8
  * @example
15
9
  * ```TypeScript
16
10
  * {
17
11
  * sessionEndpoint: "https://simfinity.constellation.com",
18
12
  * streamingEndpoint: "wss://simfinity.constellation.com:30003",
19
13
  * key: "some-secret-key"
20
- * llm: "openai",
21
- * model: "gpt-4o-realtime-preview-2024-12-17",
22
14
  * }
23
15
  * ```
24
16
  */
25
17
  interface WebClientConfig {
18
+ /**
19
+ * HTTP REST base URL to the constellation API for managing sessions.
20
+ */
26
21
  sessionEndpoint: string;
22
+ /**
23
+ * WebSocket endpoint to the constellation server.
24
+ */
27
25
  streamingEndpoint: string;
26
+ /**
27
+ * Simfinity API secret key granting access to the server API.
28
+ */
28
29
  key: string;
29
- llm: LlmType;
30
30
  }
31
31
  /**
32
- * System settings influencing the model behavior:
33
- * @temperature: LLM creativity factor in 0-1 range
34
- * @instructions: system instructions giving context, rules and directions to guide the LLM behavior
35
- * @maxResponseToken: 1-4096 value, maximum number of token used for a single response. Undefined means unlimited.
32
+ * System settings influencing how the model thinks and responds.
36
33
  */
37
34
  interface SessionConfig {
35
+ /**
36
+ * LLM creativity factor in 0-1 range
37
+ */
38
38
  temperature?: number;
39
+ /**
40
+ * System instructions giving context, rules and directions to drive the LLM behavior
41
+ */
39
42
  instructions?: string;
43
+ /**
44
+ * 1-4096 value, maximum number of token used for a single response. Undefined or 0 means unlimited.
45
+ */
40
46
  maxResponseToken?: number;
41
47
  }
48
+ /**
49
+ * Options when starting a new session. Some properties are immutable once
50
+ * the session has started, however the SessionConfig settings may, depending on
51
+ * the LLM provider chosen, be modified mid-session.
52
+ */
53
+ interface SessionStartParameters {
54
+ /**
55
+ * Name of one of the supported LLM.
56
+ */
57
+ llmProvider: LlmName;
58
+ /**
59
+ * When voiceEnabled is set to true, the LLM session is configured to
60
+ * receive and send audio stream and produce transcript of the audio conversation.
61
+ */
62
+ voiceEnabled: boolean;
63
+ /**
64
+ * Optional, the name of a voice supported by the LLM.
65
+ * Default values are 'Alloy' for OpenAI and 'Aoede' for Gemini.
66
+ * WARNING: constellation will accept any value, it belongs to the client to choose
67
+ * a valid voice name for the LLM provider chosen.
68
+ */
69
+ voiceName?: string;
70
+ /**
71
+ * Behavioral options affecting how the model thinks and responds.
72
+ * These options may be modified mid-session with configureSession(), but only
73
+ * if the chosen LLM provider allows it!
74
+ * WARNING: some LLMs like Gemini may not support mid-session updates, thus it is
75
+ * advised and preferred to provide them here at startSession time.
76
+ */
77
+ behaviour?: SessionConfig;
78
+ }
42
79
  /**
43
80
  * Snapshot of the current settings run by the model.
44
81
  */
@@ -155,17 +192,12 @@ declare class WebClient {
155
192
  * @remarks
156
193
  * A session MUST exist first in order to connect the stream next.
157
194
  *
158
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
159
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
160
- * @param behaviour model behaviour parameters. This is optional: default settings
161
- * will be used if omitted and can be changed mid-session with configureSession().
162
- * WARNING: some LLMs may not support mid-session updates, thus it is
163
- * advised and preferred to provide them here at startSession time.
195
+ * @param params the configuration options to start this new session
164
196
  *
165
197
  * @exception
166
198
  * This method throws new Error(...) if unable to execute successfully for any reason.
167
199
  */
168
- startSession(voiceEnabled: boolean, voiceName?: string, behaviour?: SessionConfig): Promise<void>;
200
+ startSession(params: SessionStartParameters): Promise<void>;
169
201
  /**
170
202
  * Close an opened, persistent chat room, effectively killing the streaming as well if still opened.
171
203
  * If there is no active session, this method does nothing.
@@ -274,4 +306,4 @@ declare class WebClient {
274
306
  private send;
275
307
  }
276
308
 
277
- export { type EventHandlers, type LlmType, type SessionConfig, WebClient, type WebClientConfig };
309
+ export { type EventHandlers, type LlmName, type SessionConfig, type SessionSettings, type SessionStartParameters, WebClient, type WebClientConfig };
package/dist/index.js CHANGED
@@ -13,24 +13,20 @@ var WebClient = class {
13
13
  * @remarks
14
14
  * A session MUST exist first in order to connect the stream next.
15
15
  *
16
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
17
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
18
- * @param behaviour model behaviour parameters. This is optional: default settings
19
- * will be used if omitted and can be changed mid-session with configureSession().
20
- * WARNING: some LLMs may not support mid-session updates, thus it is
21
- * advised and preferred to provide them here at startSession time.
16
+ * @param params the configuration options to start this new session
22
17
  *
23
18
  * @exception
24
19
  * This method throws new Error(...) if unable to execute successfully for any reason.
25
20
  */
26
- async startSession(voiceEnabled, voiceName, behaviour) {
21
+ async startSession(params) {
22
+ var _a, _b, _c;
27
23
  const prepareBody = {
28
- llmProvider: this.config.llm,
29
- audioEnabled: voiceEnabled,
30
- voiceName,
31
- temperature: behaviour == null ? void 0 : behaviour.temperature,
32
- instructions: behaviour == null ? void 0 : behaviour.instructions,
33
- maxResponseToken: behaviour == null ? void 0 : behaviour.maxResponseToken
24
+ llmProvider: params.llmProvider,
25
+ audioEnabled: params.voiceEnabled,
26
+ voiceName: params.voiceName,
27
+ temperature: (_a = params.behaviour) == null ? void 0 : _a.temperature,
28
+ instructions: (_b = params.behaviour) == null ? void 0 : _b.instructions,
29
+ maxResponseToken: (_c = params.behaviour) == null ? void 0 : _c.maxResponseToken
34
30
  };
35
31
  const response = await fetch(`${this.config.sessionEndpoint}/prepare_session`, {
36
32
  method: "POST",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@simfinity/constellation-client",
3
- "version": "1.0.21",
3
+ "version": "1.0.22",
4
4
  "type": "module",
5
5
  "exports": {
6
6
  ".": {