retell-sync-cli 1.1.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,382 +0,0 @@
1
- import { afterEach, beforeEach, describe, expect, test } from "bun:test"
2
- import fs from "node:fs/promises"
3
- import os from "node:os"
4
- import path from "node:path"
5
- import { getLocalState } from "../src/lib/agents"
6
-
7
- describe("getLocalState", () => {
8
- let tmpDir: string
9
-
10
- beforeEach(async () => {
11
- tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "retell-sync-test-"))
12
- })
13
-
14
- afterEach(async () => {
15
- await fs.rm(tmpDir, { recursive: true, force: true })
16
- })
17
-
18
- test("hydrates retell-llm agent with file placeholders resolved", async () => {
19
- const agentsDir = path.join(tmpDir, "agents")
20
- const agentDir = path.join(agentsDir, "test_llm_agent_abc123")
21
-
22
- await fs.mkdir(agentDir, { recursive: true })
23
-
24
- await Bun.write(
25
- path.join(agentDir, ".agent.json"),
26
- JSON.stringify({
27
- id: "agent_abc123456789",
28
- version: 1,
29
- responseEngineVersion: 1,
30
- }),
31
- )
32
-
33
- await Bun.write(
34
- path.join(agentDir, "config.json"),
35
- JSON.stringify(
36
- {
37
- channel: "voice",
38
- agent_name: "Test LLM Agent",
39
- response_engine: {
40
- type: "retell-llm",
41
- llm_id: "llm_test123",
42
- version: 1,
43
- },
44
- voice_id: "11labs-Chloe",
45
- },
46
- null,
47
- 2,
48
- ),
49
- )
50
-
51
- await Bun.write(
52
- path.join(agentDir, "llm.json"),
53
- JSON.stringify(
54
- {
55
- model: "gpt-4.1",
56
- general_prompt: "file://./general_prompt.md",
57
- begin_message: "Hello!",
58
- },
59
- null,
60
- 2,
61
- ),
62
- )
63
-
64
- await Bun.write(
65
- path.join(agentDir, "general_prompt.md"),
66
- `---
67
- some_meta: value
68
- ---
69
-
70
- You are a helpful assistant.
71
-
72
- Be friendly and professional.
73
- `,
74
- )
75
-
76
- const result = await getLocalState({ agentsDir })
77
-
78
- expect(result.voiceAgents).toHaveLength(1)
79
- expect(result.llms).toHaveLength(1)
80
- expect(result.conversationFlows).toHaveLength(0)
81
-
82
- const agent = result.voiceAgents[0]!
83
- expect(agent.agent_name).toBe("Test LLM Agent")
84
- expect(agent.response_engine.type).toBe("retell-llm")
85
- expect(agent.voice_id).toBe("11labs-Chloe")
86
-
87
- const llm = result.llms[0]!
88
- expect(llm.model).toBe("gpt-4.1")
89
- expect(llm.begin_message).toBe("Hello!")
90
- // Verify file placeholder was resolved
91
- expect(llm.general_prompt).toContain("You are a helpful assistant.")
92
- expect(llm.general_prompt).toContain("Be friendly and professional.")
93
- // Frontmatter should be stripped
94
- expect(llm.general_prompt).not.toContain("some_meta")
95
- })
96
-
97
- test("hydrates conversation-flow agent with nested file placeholders", async () => {
98
- const agentsDir = path.join(tmpDir, "agents")
99
- const agentDir = path.join(agentsDir, "test_flow_agent_def456")
100
- const nodesDir = path.join(agentDir, "nodes")
101
-
102
- await fs.mkdir(nodesDir, { recursive: true })
103
-
104
- await Bun.write(
105
- path.join(agentDir, ".agent.json"),
106
- JSON.stringify({
107
- id: "agent_def456789012",
108
- version: 2,
109
- responseEngineVersion: 2,
110
- }),
111
- )
112
-
113
- await Bun.write(
114
- path.join(agentDir, "config.json"),
115
- JSON.stringify(
116
- {
117
- channel: "voice",
118
- agent_name: "Test Flow Agent",
119
- response_engine: {
120
- type: "conversation-flow",
121
- conversation_flow_id: "flow_test456",
122
- version: 2,
123
- },
124
- voice_id: "openai-Alloy",
125
- },
126
- null,
127
- 2,
128
- ),
129
- )
130
-
131
- await Bun.write(
132
- path.join(agentDir, "conversation-flow.json"),
133
- JSON.stringify(
134
- {
135
- global_prompt: "file://./global_prompt.md",
136
- nodes: [
137
- {
138
- id: "start-node",
139
- name: "Welcome",
140
- type: "conversation",
141
- instruction: {
142
- type: "static_text",
143
- text: "Hello, welcome!",
144
- },
145
- edges: [],
146
- },
147
- {
148
- id: "node-inquiry",
149
- name: "Handle Inquiry",
150
- type: "conversation",
151
- instruction: {
152
- type: "prompt",
153
- text: "file://./nodes/inquiry_abc123.md",
154
- },
155
- edges: [],
156
- },
157
- ],
158
- },
159
- null,
160
- 2,
161
- ),
162
- )
163
-
164
- await Bun.write(
165
- path.join(agentDir, "global_prompt.md"),
166
- `---
167
- version: 1
168
- ---
169
-
170
- You are a professional receptionist.
171
-
172
- Always be polite and helpful.
173
- `,
174
- )
175
-
176
- await Bun.write(
177
- path.join(nodesDir, "inquiry_abc123.md"),
178
- `---
179
- nodeId: "node-inquiry"
180
- name: "Handle Inquiry"
181
- ---
182
-
183
- Answer the caller's question to the best of your ability.
184
-
185
- If you don't know, offer to transfer them.
186
- `,
187
- )
188
-
189
- const result = await getLocalState({ agentsDir })
190
-
191
- expect(result.voiceAgents).toHaveLength(1)
192
- expect(result.llms).toHaveLength(0)
193
- expect(result.conversationFlows).toHaveLength(1)
194
-
195
- const agent = result.voiceAgents[0]!
196
- expect(agent.agent_name).toBe("Test Flow Agent")
197
- expect(agent.response_engine.type).toBe("conversation-flow")
198
-
199
- const flow = result.conversationFlows[0]!
200
- // Global prompt placeholder should be resolved
201
- expect(flow.global_prompt).toContain("You are a professional receptionist.")
202
- expect(flow.global_prompt).toContain("Always be polite and helpful.")
203
- expect(flow.global_prompt).not.toContain("version: 1")
204
-
205
- // Node instruction placeholder should be resolved
206
- const inquiryNode = flow.nodes?.find((n) => n.id === "node-inquiry")
207
- expect(inquiryNode).toBeDefined()
208
- expect(
209
- inquiryNode?.type === "conversation" && inquiryNode.instruction.text,
210
- ).toContain("Answer the caller's question")
211
- expect(
212
- inquiryNode?.type === "conversation" && inquiryNode.instruction.text,
213
- ).toContain("offer to transfer them")
214
- // Frontmatter should be stripped from node
215
- expect(
216
- inquiryNode?.type === "conversation" && inquiryNode.instruction.text,
217
- ).not.toContain("nodeId")
218
- })
219
-
220
- test("handles multiple agents of different types", async () => {
221
- const agentsDir = path.join(tmpDir, "agents")
222
-
223
- // Create LLM agent
224
- const llmAgentDir = path.join(agentsDir, "llm_agent_111111")
225
- await fs.mkdir(llmAgentDir, { recursive: true })
226
- await Bun.write(
227
- path.join(llmAgentDir, ".agent.json"),
228
- JSON.stringify({
229
- id: "agent_llm_1",
230
- version: 1,
231
- responseEngineVersion: 1,
232
- }),
233
- )
234
- await Bun.write(
235
- path.join(llmAgentDir, "config.json"),
236
- JSON.stringify({
237
- agent_name: "LLM Agent",
238
- response_engine: { type: "retell-llm", llm_id: "llm_1", version: 1 },
239
- }),
240
- )
241
- await Bun.write(
242
- path.join(llmAgentDir, "llm.json"),
243
- JSON.stringify({ model: "gpt-4" }),
244
- )
245
-
246
- // Create Flow agent
247
- const flowAgentDir = path.join(agentsDir, "flow_agent_222222")
248
- await fs.mkdir(flowAgentDir, { recursive: true })
249
- await Bun.write(
250
- path.join(flowAgentDir, ".agent.json"),
251
- JSON.stringify({
252
- id: "agent_flow_1",
253
- version: 1,
254
- responseEngineVersion: 1,
255
- }),
256
- )
257
- await Bun.write(
258
- path.join(flowAgentDir, "config.json"),
259
- JSON.stringify({
260
- agent_name: "Flow Agent",
261
- response_engine: {
262
- type: "conversation-flow",
263
- conversation_flow_id: "flow_1",
264
- version: 1,
265
- },
266
- }),
267
- )
268
- await Bun.write(
269
- path.join(flowAgentDir, "conversation-flow.json"),
270
- JSON.stringify({ nodes: [] }),
271
- )
272
-
273
- const result = await getLocalState({ agentsDir })
274
-
275
- expect(result.voiceAgents).toHaveLength(2)
276
- expect(result.llms).toHaveLength(1)
277
- expect(result.conversationFlows).toHaveLength(1)
278
-
279
- const agentNames = result.voiceAgents.map((a) => a.agent_name).sort()
280
- expect(agentNames).toEqual(["Flow Agent", "LLM Agent"])
281
- })
282
-
283
- test("skips directories without .agent.json file", async () => {
284
- const agentsDir = path.join(tmpDir, "agents")
285
-
286
- // Create valid agent
287
- const validDir = path.join(agentsDir, "valid_agent_aaa111")
288
- await fs.mkdir(validDir, { recursive: true })
289
- await Bun.write(
290
- path.join(validDir, ".agent.json"),
291
- JSON.stringify({
292
- id: "agent_valid",
293
- version: 1,
294
- responseEngineVersion: 1,
295
- }),
296
- )
297
- await Bun.write(
298
- path.join(validDir, "config.json"),
299
- JSON.stringify({
300
- agent_name: "Valid Agent",
301
- response_engine: { type: "retell-llm", llm_id: "llm_v", version: 1 },
302
- }),
303
- )
304
- await Bun.write(
305
- path.join(validDir, "llm.json"),
306
- JSON.stringify({ model: "gpt-4" }),
307
- )
308
-
309
- // Create directory without .agent.json (should be skipped)
310
- const invalidDir = path.join(agentsDir, "not_an_agent")
311
- await fs.mkdir(invalidDir, { recursive: true })
312
- await Bun.write(
313
- path.join(invalidDir, "config.json"),
314
- JSON.stringify({ agent_name: "Invalid" }),
315
- )
316
-
317
- const result = await getLocalState({ agentsDir })
318
-
319
- expect(result.voiceAgents).toHaveLength(1)
320
- expect(result.voiceAgents[0]!.agent_name).toBe("Valid Agent")
321
- })
322
-
323
- test("returns empty arrays when agentsDir is empty", async () => {
324
- const agentsDir = path.join(tmpDir, "agents")
325
- await fs.mkdir(agentsDir, { recursive: true })
326
-
327
- const result = await getLocalState({ agentsDir })
328
-
329
- expect(result.voiceAgents).toHaveLength(0)
330
- expect(result.llms).toHaveLength(0)
331
- expect(result.conversationFlows).toHaveLength(0)
332
- })
333
-
334
- test("reads current filesystem state", async () => {
335
- const agentsDir = path.join(tmpDir, "agents")
336
- const agentDir = path.join(agentsDir, "dynamic_agent_xyz789")
337
-
338
- await fs.mkdir(agentDir, { recursive: true })
339
- await Bun.write(
340
- path.join(agentDir, ".agent.json"),
341
- JSON.stringify({
342
- id: "agent_dynamic",
343
- version: 1,
344
- responseEngineVersion: 1,
345
- }),
346
- )
347
- await Bun.write(
348
- path.join(agentDir, "config.json"),
349
- JSON.stringify({
350
- agent_name: "Initial Name",
351
- response_engine: { type: "retell-llm", llm_id: "llm_d", version: 1 },
352
- }),
353
- )
354
- await Bun.write(
355
- path.join(agentDir, "llm.json"),
356
- JSON.stringify({ model: "gpt-3.5" }),
357
- )
358
-
359
- // First read
360
- const result1 = await getLocalState({ agentsDir })
361
- expect(result1.voiceAgents[0]!.agent_name).toBe("Initial Name")
362
- expect(result1.llms[0]!.model as string).toBe("gpt-3.5")
363
-
364
- // Modify files
365
- await Bun.write(
366
- path.join(agentDir, "config.json"),
367
- JSON.stringify({
368
- agent_name: "Updated Name",
369
- response_engine: { type: "retell-llm", llm_id: "llm_d", version: 1 },
370
- }),
371
- )
372
- await Bun.write(
373
- path.join(agentDir, "llm.json"),
374
- JSON.stringify({ model: "gpt-4" }),
375
- )
376
-
377
- // Second read should reflect changes
378
- const result2 = await getLocalState({ agentsDir })
379
- expect(result2.voiceAgents[0]!.agent_name).toBe("Updated Name")
380
- expect(result2.llms[0]!.model as string).toBe("gpt-4")
381
- })
382
- })
@@ -1,196 +0,0 @@
1
- import { beforeEach, describe, expect, mock, test } from "bun:test"
2
- import type Retell from "retell-sdk"
3
- import { getRemoteState, retell } from "../src/lib/agents"
4
- import rawAgents from "./raw-agents-list-response.json"
5
- import rawConversationFlows from "./raw-conversation-flow-list-response.json"
6
- import rawLlms from "./raw-llms-list-response.json"
7
-
8
- describe("getRemoteState", () => {
9
- beforeEach(() => {
10
- // Mock the retell API methods - using Object.assign to avoid type issues with mocking
11
- Object.assign(retell.agent, {
12
- list: mock(() =>
13
- Promise.resolve(rawAgents as Retell.Agent.AgentResponse[]),
14
- ),
15
- })
16
- Object.assign(retell.llm, {
17
- list: mock(() => Promise.resolve(rawLlms as Retell.Llm.LlmResponse[])),
18
- })
19
- Object.assign(retell.conversationFlow, {
20
- list: mock(() =>
21
- Promise.resolve(
22
- rawConversationFlows as Retell.ConversationFlow.ConversationFlowResponse[],
23
- ),
24
- ),
25
- })
26
- })
27
-
28
- test("returns all agents (including unpublished)", async () => {
29
- const result = await getRemoteState()
30
-
31
- // All unique agent_ids should be represented
32
- const uniqueAgentIds = new Set(rawAgents.map((a) => a.agent_id))
33
- const returnedIds = new Set(result.voiceAgents.map((a) => a._id))
34
-
35
- expect(returnedIds.size).toBe(uniqueAgentIds.size)
36
- for (const id of uniqueAgentIds) {
37
- expect(returnedIds).toContain(id)
38
- }
39
- })
40
-
41
- test("returns latest version of each unique agent", async () => {
42
- const result = await getRemoteState()
43
-
44
- // Group raw agents by agent_id and find expected latest versions
45
- const agentGroups = new Map<string, (typeof rawAgents)[number][]>()
46
- for (const agent of rawAgents) {
47
- const existing = agentGroups.get(agent.agent_id) ?? []
48
- existing.push(agent)
49
- agentGroups.set(agent.agent_id, existing)
50
- }
51
-
52
- // For each agent_id, we should have exactly one result (the latest version)
53
- const uniqueAgentIds = new Set(result.voiceAgents.map((a) => a._id))
54
- expect(uniqueAgentIds.size).toBe(result.voiceAgents.length)
55
-
56
- // Verify each returned agent is the highest version for its agent_id
57
- for (const agent of result.voiceAgents) {
58
- const allVersions = agentGroups.get(agent._id)
59
- if (allVersions && allVersions.length > 1) {
60
- const maxVersion = Math.max(...allVersions.map((a) => a.version ?? 0))
61
- // The response_engine.version should match the max
62
- const engineVersion =
63
- "version" in agent.response_engine
64
- ? agent.response_engine.version
65
- : undefined
66
- expect(engineVersion).toBe(maxVersion)
67
- }
68
- }
69
- })
70
-
71
- test("strips readonly fields from agents", async () => {
72
- const result = await getRemoteState()
73
-
74
- for (const agent of result.voiceAgents) {
75
- // These fields should be stripped
76
- expect(agent).not.toHaveProperty("last_modification_timestamp")
77
- expect(agent).not.toHaveProperty("version")
78
- expect(agent).not.toHaveProperty("is_published")
79
- expect(agent).not.toHaveProperty("agent_id")
80
-
81
- // _id should exist (renamed from agent_id)
82
- expect(agent).toHaveProperty("_id")
83
- }
84
- })
85
-
86
- test("returns only conversation flows referenced by agents", async () => {
87
- const result = await getRemoteState()
88
-
89
- // Get all conversation flow IDs referenced by returned agents
90
- const referencedFlowIds = new Set<string>()
91
- for (const agent of result.voiceAgents) {
92
- if (agent.response_engine.type === "conversation-flow") {
93
- referencedFlowIds.add(agent.response_engine.conversation_flow_id)
94
- }
95
- }
96
-
97
- // Every returned flow should be referenced by an agent
98
- for (const flow of result.conversationFlows) {
99
- expect(referencedFlowIds).toContain(flow._id)
100
- }
101
-
102
- // The number of flows should match or be less than references (some agents may share flows)
103
- expect(result.conversationFlows.length).toBeLessThanOrEqual(
104
- referencedFlowIds.size,
105
- )
106
- })
107
-
108
- test("returns only LLMs referenced by agents", async () => {
109
- const result = await getRemoteState()
110
-
111
- // Get all LLM IDs referenced by returned agents
112
- const referencedLlmIds = new Set<string>()
113
- for (const agent of result.voiceAgents) {
114
- if (agent.response_engine.type === "retell-llm") {
115
- referencedLlmIds.add(agent.response_engine.llm_id)
116
- }
117
- }
118
-
119
- // Every returned LLM should be referenced by an agent
120
- for (const llm of result.llms) {
121
- expect(referencedLlmIds).toContain(llm._id)
122
- }
123
- })
124
-
125
- test("strips readonly fields from conversation flows", async () => {
126
- const result = await getRemoteState()
127
-
128
- for (const flow of result.conversationFlows) {
129
- expect(flow).not.toHaveProperty("version")
130
- expect(flow).not.toHaveProperty("is_published")
131
- expect(flow).not.toHaveProperty("conversation_flow_id")
132
- expect(flow).toHaveProperty("_id")
133
- }
134
- })
135
-
136
- test("strips readonly fields from LLMs", async () => {
137
- const result = await getRemoteState()
138
-
139
- for (const llm of result.llms) {
140
- expect(llm).not.toHaveProperty("version")
141
- expect(llm).not.toHaveProperty("is_published")
142
- expect(llm).not.toHaveProperty("last_modification_timestamp")
143
- expect(llm).not.toHaveProperty("llm_id")
144
- expect(llm).toHaveProperty("_id")
145
- }
146
- })
147
-
148
- test("matches conversation flow versions to agent references", async () => {
149
- const result = await getRemoteState()
150
-
151
- // Build a map of what flow version each agent expects
152
- const expectedFlowVersions = new Map<string, number | null | undefined>()
153
- for (const agent of result.voiceAgents) {
154
- if (agent.response_engine.type === "conversation-flow") {
155
- expectedFlowVersions.set(
156
- agent.response_engine.conversation_flow_id,
157
- agent.response_engine.version,
158
- )
159
- }
160
- }
161
-
162
- // Each returned flow should have the version that matches what the agent references
163
- for (const flow of result.conversationFlows) {
164
- const expectedVersion = expectedFlowVersions.get(flow._id)
165
- const rawFlow = rawConversationFlows.find(
166
- (f) =>
167
- f.conversation_flow_id === flow._id && f.version === expectedVersion,
168
- )
169
- expect(rawFlow).toBeDefined()
170
- }
171
- })
172
-
173
- test("matches LLM versions to agent references", async () => {
174
- const result = await getRemoteState()
175
-
176
- // Build a map of what LLM version each agent expects
177
- const expectedLlmVersions = new Map<string, number | null | undefined>()
178
- for (const agent of result.voiceAgents) {
179
- if (agent.response_engine.type === "retell-llm") {
180
- expectedLlmVersions.set(
181
- agent.response_engine.llm_id,
182
- agent.response_engine.version,
183
- )
184
- }
185
- }
186
-
187
- // Each returned LLM should have the version that matches what the agent references
188
- for (const llm of result.llms) {
189
- const expectedVersion = expectedLlmVersions.get(llm._id)
190
- const rawLlm = rawLlms.find(
191
- (l) => l.llm_id === llm._id && l.version === expectedVersion,
192
- )
193
- expect(rawLlm).toBeDefined()
194
- }
195
- })
196
- })