march-start-cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1092 @@
1
+ # Medical Q&A Agent Template (TypeScript)
2
+
3
+ A complete TypeScript template for building AI agents with the March Agent SDK. This template demonstrates a medical Q&A agent with conversation history, attachment handling, and LLM streaming.
4
+
5
+ ## Quick Start
6
+
7
+ ### Prerequisites
8
+
9
+ - Node.js 18+ and pnpm
10
+ - **March AI Platform running** (from the parent directory: `cd .. && docker-compose up`)
11
+ - OpenAI API key
12
+
13
+ ### Setup
14
+
15
+ 1. Copy environment variables:
16
+ ```bash
17
+ cp env.example .env
18
+ ```
19
+
20
+ 2. Edit `.env` with your values:
21
+ ```env
22
+ OPENAI_API_KEY=sk-your-openai-api-key
23
+ GATEWAY_URL=agent-gateway:8080
24
+ GATEWAY_API_KEY=your-gateway-api-key
25
+ ```
26
+
27
+ 3. Install dependencies:
28
+ ```bash
29
+ pnpm install
30
+ ```
31
+
32
+ 4. Start the agent:
33
+ ```bash
34
+ pnpm dev
35
+ ```
36
+
37
+ ### Local Development
38
+
39
+ ```bash
40
+ # Development with hot reload
41
+ pnpm dev
42
+
43
+ # Build for production
44
+ pnpm build
45
+
46
+ # Run production build
47
+ pnpm start
48
+
49
+ # Type check
50
+ pnpm typecheck
51
+ ```
52
+
53
+ ## Project Structure
54
+
55
+ ```
56
+ agent-ts-template/
57
+ ├── src/
58
+ │ └── agent.ts # Main agent implementation
59
+ ├── env.example # Environment variables template
60
+ ├── package.json
61
+ ├── tsconfig.json
62
+ └── README.md
63
+ ```
64
+
65
+ ---
66
+
67
+ ## March Agent SDK Reference
68
+
69
+ ### App Configuration
70
+
71
+ Create and configure the March Agent application:
72
+
73
+ ```typescript
74
+ import { config } from 'dotenv'
75
+ import { resolve } from 'path'
76
+ import { MarchAgentApp } from '@march/agent'
77
+
78
+ // Load environment variables (use override: true to ensure .env takes precedence)
79
+ config({ path: resolve(process.cwd(), '.env'), override: true })
80
+
81
+ const app = new MarchAgentApp({
82
+ gatewayUrl: process.env.GATEWAY_URL || 'localhost:8080',
83
+ apiKey: process.env.GATEWAY_API_KEY || 'your-api-key',
84
+ heartbeatInterval: parseInt(process.env.HEARTBEAT_INTERVAL || '10'),
85
+ secure: process.env.CONNECTION_SECURE === 'true',
86
+ })
87
+ ```
88
+
89
+ | Parameter | Description |
90
+ |-----------|-------------|
91
+ | `gatewayUrl` | Agent Gateway address (host:port) |
92
+ | `apiKey` | API key for gateway authentication |
93
+ | `heartbeatInterval` | Heartbeat interval in seconds (default: 60) |
94
+ | `secure` | Use TLS for connections (default: false) |
95
+
96
+ ---
97
+
98
+ ### Agent Registration
99
+
100
+ Register your agent with the platform:
101
+
102
+ ```typescript
103
+ const agent = await app.registerMe({
104
+ name: 'medical-qa-agent-ts',
105
+ about: 'A medical Q&A agent that provides health information',
106
+ document: `
107
+ Medical Q&A Agent provides accurate, evidence-based information
108
+ about general medical topics, symptoms, and health questions.
109
+ `,
110
+ representationName: 'Medical Assistant (TS)',
111
+ baseUrl: process.env.AGENT_BASE_URL || 'http://localhost:8060',
112
+ metadata: {
113
+ version: '1.0.0',
114
+ category: 'healthcare',
115
+ language: 'typescript',
116
+ },
117
+ })
118
+ ```
119
+
120
+ | Parameter | Description |
121
+ |-----------|-------------|
122
+ | `name` | Unique identifier for the agent (used for routing) |
123
+ | `about` | Short description for agent listings |
124
+ | `document` | Detailed documentation about capabilities |
125
+ | `representationName` | Display name in the UI (optional) |
126
+ | `baseUrl` | Base URL for iframe artifacts (optional) |
127
+ | `metadata` | Custom key-value data (optional) |
128
+
129
+ ---
130
+
131
+ ### Message Handling
132
+
133
+ Handle incoming messages with the `onMessage()` method:
134
+
135
+ ```typescript
136
+ import { Message } from '@march/agent'
137
+
138
+ agent.onMessage(async (message: Message, sender: string) => {
139
+ // sender is "user" or the name of another agent
140
+ console.log(`Message from ${sender}: ${message.content}`)
141
+
142
+ const streamer = agent.streamer(message)
143
+ streamer.stream(`You said: ${message.content}`)
144
+ await streamer.finish()
145
+ })
146
+ ```
147
+
148
+ #### Filtering by Sender
149
+
150
+ ```typescript
151
+ // Only handle messages from users
152
+ agent.onMessage(async (message, sender) => {
153
+ // This handler only receives messages where sender === 'user'
154
+ }, { senders: ['user'] })
155
+
156
+ // Exclude messages from a specific agent
157
+ agent.onMessage(async (message, sender) => {
158
+ // This handler receives all messages EXCEPT from 'other-agent'
159
+ }, { senders: ['~other-agent'] })
160
+
161
+ // Handle messages from specific agents
162
+ agent.onMessage(async (message, sender) => {
163
+ // Only from 'analyzer-agent' or 'summarizer-agent'
164
+ }, { senders: ['analyzer-agent', 'summarizer-agent'] })
165
+ ```
166
+
167
+ #### Message Properties
168
+
169
+ | Property | Type | Description |
170
+ |----------|------|-------------|
171
+ | `content` | `string` | The message text |
172
+ | `conversationId` | `string` | Unique conversation identifier |
173
+ | `userId` | `string` | User who sent the message |
174
+ | `id` | `string` | Unique message ID |
175
+ | `createdAt` | `string` | ISO timestamp |
176
+ | `metadata` | `Record<string, unknown> \| undefined` | Custom metadata from frontend |
177
+ | `schema` | `Record<string, unknown> \| undefined` | JSON schema for form responses |
178
+ | `attachment` | `AttachmentInfo \| undefined` | File attachment metadata |
179
+ | `conversation` | `Conversation \| undefined` | Helper for conversation history |
180
+ | `memory` | `Memory \| undefined` | Helper for long-term memory |
181
+
182
+ #### Accessing Frontend Metadata
183
+
184
+ The frontend can send custom metadata with messages:
185
+
186
+ ```typescript
187
+ agent.onMessage(async (message, sender) => {
188
+ if (message.metadata) {
189
+ const source = message.metadata.source as string || 'unknown'
190
+ const priority = message.metadata.priority as string || 'normal'
191
+ console.log(`Request from ${source} with priority ${priority}`)
192
+ }
193
+ })
194
+ ```
195
+
196
+ ---
197
+
198
+ ### Streaming Responses
199
+
200
+ Stream responses using the Streamer:
201
+
202
+ ```typescript
203
+ agent.onMessage(async (message, sender) => {
204
+ const streamer = agent.streamer(message)
205
+ streamer.stream('Hello ')
206
+ streamer.stream('World!')
207
+ await streamer.finish()
208
+ })
209
+ ```
210
+
211
+ #### With LLM Streaming (LangChain.js)
212
+
213
+ ```typescript
214
+ import { ChatOpenAI } from '@langchain/openai'
215
+ import { HumanMessage, SystemMessage, BaseMessage } from '@langchain/core/messages'
216
+
217
+ const llm = new ChatOpenAI({
218
+ model: process.env.OPENAI_MODEL || 'gpt-4o-mini',
219
+ streaming: true,
220
+ openAIApiKey: process.env.OPENAI_API_KEY,
221
+ })
222
+
223
+ agent.onMessage(async (message, sender) => {
224
+ const messages: BaseMessage[] = [
225
+ new SystemMessage('You are a helpful assistant.'),
226
+ new HumanMessage(message.content),
227
+ ]
228
+
229
+ const streamer = agent.streamer(message)
230
+ const stream = await llm.stream(messages)
231
+
232
+ for await (const chunk of stream) {
233
+ if (chunk.content) {
234
+ streamer.stream(chunk.content as string)
235
+ }
236
+ }
237
+
238
+ await streamer.finish()
239
+ })
240
+ ```
241
+
242
+ #### Response Metadata
243
+
244
+ Attach metadata to your response. This metadata is persisted with the message:
245
+
246
+ ```typescript
247
+ const streamer = agent.streamer(message)
248
+ streamer.setMessageMetadata({
249
+ model: 'gpt-4o-mini',
250
+ step: 'diagnosis',
251
+ confidence: 0.95,
252
+ tokens_used: 150,
253
+ })
254
+ streamer.stream('Here is my response...')
255
+ await streamer.finish()
256
+ ```
257
+
258
+ #### Event Types and Non-Persisted Content
259
+
260
+ Use event types to categorize stream content. Use `persist: false` for content that shouldn't be saved to history:
261
+
262
+ ```typescript
263
+ const streamer = agent.streamer(message)
264
+
265
+ // Thinking indicator (not saved to history)
266
+ streamer.stream('Analyzing your question...', { persist: false, eventType: 'thinking' })
267
+
268
+ // Main content (saved to history)
269
+ streamer.stream('Here is my response...')
270
+
271
+ await streamer.finish()
272
+ ```
273
+
274
+ #### Multi-Turn Conversations (Awaiting)
275
+
276
+ Use `awaiting: true` to ensure the next user response comes back to this agent:
277
+
278
+ ```typescript
279
+ agent.onMessage(async (message, sender) => {
280
+ const streamer = agent.streamer(message, { awaiting: true })
281
+ streamer.stream('What symptoms are you experiencing?')
282
+ await streamer.finish()
283
+ // The next message from the user will be routed back to this agent
284
+ })
285
+ ```
286
+
287
+ #### Artifacts
288
+
289
+ Attach files, documents, or iframes to your response:
290
+
291
+ ```typescript
292
+ const streamer = agent.streamer(message)
293
+
294
+ streamer.addArtifact({
295
+ url: `/api/reports/lab-results.pdf`,
296
+ type: 'document',
297
+ title: 'Lab Results',
298
+ description: 'Your recent blood work results',
299
+ metadata: { size: 1024000, mimeType: 'application/pdf' }
300
+ })
301
+
302
+ streamer.addArtifact({
303
+ url: `/dashboard/${message.conversationId}`,
304
+ type: 'iframe',
305
+ title: 'Patient Dashboard',
306
+ description: 'Interactive dashboard showing conversation statistics',
307
+ })
308
+
309
+ streamer.stream('Here are your results.')
310
+ await streamer.finish()
311
+ ```
312
+
313
+ | Artifact Type | Use Case |
314
+ |---------------|----------|
315
+ | `document` | PDFs, reports, guides |
316
+ | `image` | Charts, diagrams, photos |
317
+ | `iframe` | Interactive content, dashboards |
318
+ | `video` | Tutorials, demos |
319
+ | `audio` | Voice messages |
320
+ | `code` | Source code snippets |
321
+ | `link` | External URLs |
322
+ | `file` | Generic downloads |
323
+
324
+ ---
325
+
326
+ ### Response Schema (Forms)
327
+
328
+ Render forms in the frontend using JSON Schema. The platform validates responses automatically.
329
+
330
+ #### Requesting Form Input
331
+
332
+ ```typescript
333
+ agent.onMessage(async (message, sender) => {
334
+ const streamer = agent.streamer(message, { awaiting: true })
335
+
336
+ streamer.setResponseSchema({
337
+ type: 'object',
338
+ title: 'Symptom Assessment',
339
+ properties: {
340
+ symptom: {
341
+ type: 'string',
342
+ title: 'Primary Symptom'
343
+ },
344
+ duration: {
345
+ type: 'string',
346
+ title: 'Duration',
347
+ enum: ['Less than 24 hours', '1-3 days', 'More than 3 days']
348
+ },
349
+ severity: {
350
+ type: 'integer',
351
+ title: 'Severity (1-10)',
352
+ minimum: 1,
353
+ maximum: 10
354
+ }
355
+ },
356
+ required: ['symptom', 'severity']
357
+ })
358
+
359
+ streamer.stream('Please describe your symptoms:')
360
+ await streamer.finish()
361
+ })
362
+ ```
363
+
364
+ #### Handling Form Responses
365
+
366
+ When a user submits a form, `message.schema` contains the schema and `message.content` contains the JSON response:
367
+
368
+ ```typescript
369
+ agent.onMessage(async (message, sender) => {
370
+ if (message.schema) {
371
+ // This is a form response - parse the JSON
372
+ const data = JSON.parse(message.content)
373
+ const symptom = data.symptom as string
374
+ const severity = data.severity as number
375
+
376
+ const streamer = agent.streamer(message)
377
+ streamer.stream(`You reported ${symptom} with severity ${severity}/10.`)
378
+ await streamer.finish()
379
+ } else {
380
+ // Regular text message - show the form
381
+ const streamer = agent.streamer(message, { awaiting: true })
382
+ streamer.setResponseSchema({ /* ... */ })
383
+ streamer.stream('Please fill out the symptom form:')
384
+ await streamer.finish()
385
+ }
386
+ })
387
+ ```
388
+
389
+ ---
390
+
391
+ ### Attachment Handling
392
+
393
+ Handle file attachments (images, PDFs) uploaded by users.
394
+
395
+ #### Checking for Attachments
396
+
397
+ ```typescript
398
+ agent.onMessage(async (message, sender) => {
399
+ if (message.hasAttachment() && message.attachment) {
400
+ const attachment = message.attachment
401
+ console.log(`Received: ${attachment.filename}`)
402
+ console.log(`Type: ${attachment.contentType}`)
403
+ console.log(`Size: ${attachment.size} bytes`)
404
+
405
+ // Check type by contentType
406
+ if (attachment.contentType?.startsWith('image/')) {
407
+ console.log('This is an image')
408
+ } else if (attachment.contentType === 'application/pdf') {
409
+ console.log('This is a PDF')
410
+ }
411
+ }
412
+ })
413
+ ```
414
+
415
+ #### Attachment Metadata
416
+
417
+ | Property | Type | Description |
418
+ |----------|------|-------------|
419
+ | `url` | `string` | Download URL path |
420
+ | `filename` | `string` | Original filename |
421
+ | `contentType` | `string` | MIME type (e.g., `image/jpeg`, `application/pdf`) |
422
+ | `size` | `number \| undefined` | File size in bytes |
423
+ | `fileType` | `string \| undefined` | Plain text type (e.g., `png`, `pdf`) |
424
+
425
+ #### Downloading Attachments
426
+
427
+ Download methods make API calls - use only when you need the content:
428
+
429
+ ```typescript
430
+ agent.onMessage(async (message, sender) => {
431
+ if (message.hasAttachment()) {
432
+ // Download as Buffer
433
+ const content = await message.getAttachmentBytes()
434
+
435
+ // Download as base64 (for LLM vision APIs)
436
+ const base64Data = await message.getAttachmentBase64()
437
+ }
438
+ })
439
+ ```
440
+
441
+ #### Image Processing with Vision LLM
442
+
443
+ ```typescript
444
+ import { ChatOpenAI } from '@langchain/openai'
445
+ import { HumanMessage } from '@langchain/core/messages'
446
+
447
+ const visionLlm = new ChatOpenAI({
448
+ model: 'gpt-4o',
449
+ streaming: true,
450
+ openAIApiKey: process.env.OPENAI_API_KEY,
451
+ })
452
+
453
+ agent.onMessage(async (message, sender) => {
454
+ if (message.hasAttachment() && message.attachment?.contentType?.startsWith('image/')) {
455
+ const base64Image = await message.getAttachmentBase64()
456
+ const contentType = message.attachment.contentType
457
+
458
+ const userMessage = new HumanMessage({
459
+ content: [
460
+ {
461
+ type: 'text',
462
+ text: message.content || 'Describe this image',
463
+ },
464
+ {
465
+ type: 'image_url',
466
+ image_url: {
467
+ url: `data:${contentType};base64,${base64Image}`,
468
+ },
469
+ },
470
+ ],
471
+ })
472
+
473
+ const streamer = agent.streamer(message)
474
+ const stream = await visionLlm.stream([userMessage])
475
+
476
+ for await (const chunk of stream) {
477
+ if (chunk.content) {
478
+ streamer.stream(chunk.content as string)
479
+ }
480
+ }
481
+
482
+ await streamer.finish()
483
+ }
484
+ })
485
+ ```
486
+
487
+ #### PDF Processing
488
+
489
+ ```typescript
490
+ // Note: PDF text extraction requires additional libraries like pdf-parse
491
+ // This example shows how to acknowledge PDF attachments
492
+
493
+ agent.onMessage(async (message, sender) => {
494
+ if (message.hasAttachment() && message.attachment?.contentType === 'application/pdf') {
495
+ const attachment = message.attachment
496
+ const streamer = agent.streamer(message)
497
+
498
+ streamer.stream(`I received your PDF document: ${attachment.filename}`)
499
+ streamer.stream(`\nFile size: ${Math.round((attachment.size || 0) / 1024)} KB`)
500
+
501
+ // To actually process PDF content, you would:
502
+ // 1. Install pdf-parse: pnpm add pdf-parse
503
+ // 2. Download the file: const buffer = await message.getAttachmentBytes()
504
+ // 3. Extract text: const pdfData = await pdfParse(buffer)
505
+ // 4. Use pdfData.text for LLM processing
506
+
507
+ await streamer.finish()
508
+ }
509
+ })
510
+ ```
511
+
512
+ ---
513
+
514
+ ### Conversation History
515
+
516
+ Access conversation history for context:
517
+
518
+ ```typescript
519
+ agent.onMessage(async (message, sender) => {
520
+ if (message.conversation) {
521
+ // Get all messages in this conversation
522
+ const history = await message.conversation.getHistory({ limit: 10 })
523
+
524
+ // Get messages involving current agent (default)
525
+ const agentHistory = await message.conversation.getAgentHistory({ limit: 10 })
526
+
527
+ // Get messages involving a specific agent
528
+ const otherHistory = await message.conversation.getAgentHistory({
529
+ agentName: 'other-agent',
530
+ limit: 10,
531
+ })
532
+
533
+ for (const msg of history) {
534
+ console.log(`${msg.role}: ${msg.content}`)
535
+ }
536
+ }
537
+ })
538
+ ```
539
+
540
+ | Method | Description |
541
+ |--------|-------------|
542
+ | `getHistory({ limit })` | All messages in the conversation |
543
+ | `getAgentHistory({ limit })` | Messages sent to/from current agent |
544
+ | `getAgentHistory({ agentName, limit })` | Messages sent to/from a specific agent |
545
+
546
+ #### Using History with LLMs
547
+
548
+ ```typescript
549
+ import { HumanMessage, AIMessage, SystemMessage, BaseMessage } from '@langchain/core/messages'
550
+
551
+ const SYSTEM_PROMPT = 'You are a helpful medical assistant.'
552
+
553
+ agent.onMessage(async (message, sender) => {
554
+ const messages: BaseMessage[] = [new SystemMessage(SYSTEM_PROMPT)]
555
+
556
+ // Load conversation history
557
+ if (message.conversation) {
558
+ const history = await message.conversation.getAgentHistory({ limit: 20 })
559
+ for (const msg of history) {
560
+ if (msg.role === 'user') {
561
+ messages.push(new HumanMessage(msg.content))
562
+ } else {
563
+ messages.push(new AIMessage(msg.content))
564
+ }
565
+ }
566
+ }
567
+
568
+ // Add current message
569
+ messages.push(new HumanMessage(message.content))
570
+
571
+ // Stream response
572
+ const streamer = agent.streamer(message)
573
+ const stream = await llm.stream(messages)
574
+
575
+ for await (const chunk of stream) {
576
+ if (chunk.content) {
577
+ streamer.stream(chunk.content as string)
578
+ }
579
+ }
580
+
581
+ await streamer.finish()
582
+ })
583
+ ```
584
+
585
+ ---
586
+
587
+ ### AI Memory (Long-term)
588
+
589
+ Search across all user conversations using semantic search:
590
+
591
+ ```typescript
592
+ agent.onMessage(async (message, sender) => {
593
+ if (message.memory) {
594
+ // Search all user's conversations
595
+ const results = await message.memory.queryAboutUser({
596
+ query: message.content,
597
+ limit: 5,
598
+ threshold: 70,
599
+ })
600
+
601
+ for (const result of results) {
602
+ console.log(`Score: ${result.similarity}`)
603
+ console.log(`Content: ${result.content}`)
604
+ }
605
+
606
+ // Search only current conversation
607
+ const convResults = await message.memory.queryAboutConversation({
608
+ query: 'previous symptoms',
609
+ limit: 3,
610
+ })
611
+
612
+ // Get user summary
613
+ const summary = await message.memory.getUserSummary()
614
+ if (summary) {
615
+ console.log(`User summary: ${summary.summary}`)
616
+ }
617
+ }
618
+ })
619
+ ```
620
+
621
+ | Method | Description |
622
+ |--------|-------------|
623
+ | `queryAboutUser({ query, limit, threshold })` | Search all user conversations |
624
+ | `queryAboutConversation({ query, limit, threshold })` | Search current conversation |
625
+ | `getUserSummary()` | Get aggregated user summary |
626
+
627
+ | Parameter | Default | Description |
628
+ |-----------|---------|-------------|
629
+ | `query` | required | Semantic search query |
630
+ | `limit` | 10 | Maximum results (1-100) |
631
+ | `threshold` | 70 | Minimum similarity score (0-100) |
632
+
633
+ ---
634
+
635
+ ### Extensions
636
+
637
+ #### LangGraph Integration
638
+
639
+ Use `HTTPCheckpointSaver` to persist LangGraph state:
640
+
641
+ ```typescript
642
+ import { MarchAgentApp, Message } from '@march/agent'
643
+ import { HTTPCheckpointSaver } from '@march/agent/extensions/langgraph'
644
+ import { StateGraph, START, END, Annotation } from '@langchain/langgraph'
645
+ import { ChatOpenAI } from '@langchain/openai'
646
+ import { HumanMessage, AIMessage, BaseMessage } from '@langchain/core/messages'
647
+
648
+ // Initialize app
649
+ const app = new MarchAgentApp({
650
+ gatewayUrl: process.env.GATEWAY_URL!,
651
+ apiKey: process.env.GATEWAY_API_KEY!,
652
+ })
653
+
654
+ // Create checkpointer for state persistence
655
+ const checkpointer = new HTTPCheckpointSaver(app)
656
+
657
+ // Define state with message accumulation
658
+ const StateAnnotation = Annotation.Root({
659
+ messages: Annotation<BaseMessage[]>({
660
+ reducer: (x, y) => x.concat(y),
661
+ }),
662
+ })
663
+
664
+ // Create LLM
665
+ const llm = new ChatOpenAI({
666
+ model: 'gpt-4o-mini',
667
+ streaming: true,
668
+ openAIApiKey: process.env.OPENAI_API_KEY,
669
+ })
670
+
671
+ // Define respond node
672
+ async function respond(state: typeof StateAnnotation.State) {
673
+ const response = await llm.invoke(state.messages)
674
+ return { messages: [response] }
675
+ }
676
+
677
+ // Build graph
678
+ const graph = new StateGraph(StateAnnotation)
679
+ .addNode('respond', respond)
680
+ .addEdge(START, 'respond')
681
+ .addEdge('respond', END)
682
+
683
+ const compiledGraph = graph.compile({ checkpointer })
684
+
685
+ async function main() {
686
+ const agent = await app.registerMe({
687
+ name: 'langgraph-agent-ts',
688
+ about: 'Agent using LangGraph for stateful conversations',
689
+ document: 'Maintains conversation state using LangGraph checkpointing.',
690
+ })
691
+
692
+ agent.onMessage(async (message: Message, sender: string) => {
693
+ // Use conversation ID as thread ID for state isolation
694
+ const config = { configurable: { thread_id: message.conversationId } }
695
+
696
+ // Check if we have existing state
697
+ const existingState = await compiledGraph.getState(config)
698
+
699
+ if (existingState && existingState.values?.messages?.length > 0) {
700
+ // Update existing state with new message
701
+ await compiledGraph.updateState(
702
+ config,
703
+ { messages: [new HumanMessage(message.content)] }
704
+ )
705
+ } else {
706
+ // Initialize new conversation
707
+ await compiledGraph.invoke({ messages: [] }, config)
708
+ await compiledGraph.updateState(
709
+ config,
710
+ { messages: [new HumanMessage(message.content)] }
711
+ )
712
+ }
713
+
714
+ // Stream response
715
+ const streamer = agent.streamer(message)
716
+
717
+ for await (const event of compiledGraph.streamEvents(null, {
718
+ ...config,
719
+ version: 'v2',
720
+ })) {
721
+ if (event.event === 'on_chat_model_stream') {
722
+ const content = event.data.chunk?.content
723
+ if (content) {
724
+ streamer.stream(content as string)
725
+ }
726
+ }
727
+ }
728
+
729
+ await streamer.finish()
730
+ })
731
+
732
+ await app.run()
733
+ }
734
+
735
+ main().catch(console.error)
736
+ ```
737
+
738
+ #### Vercel AI SDK Integration
739
+
740
+ Use `VercelAIMessageStore` to persist message history:
741
+
742
+ ```typescript
743
+ import { MarchAgentApp, Message } from '@march/agent'
744
+ import { VercelAIMessageStore } from '@march/agent/extensions/vercel-ai'
745
+ import { streamText, CoreMessage } from 'ai'
746
+ import { openai } from '@ai-sdk/openai'
747
+
748
+ const app = new MarchAgentApp({
749
+ gatewayUrl: process.env.GATEWAY_URL!,
750
+ apiKey: process.env.GATEWAY_API_KEY!,
751
+ })
752
+
753
+ // Create message store for persistence
754
+ const messageStore = new VercelAIMessageStore(app)
755
+
756
+ const SYSTEM_PROMPT = 'You are a helpful medical assistant.'
757
+
758
+ async function main() {
759
+ const agent = await app.registerMe({
760
+ name: 'vercel-ai-agent-ts',
761
+ about: 'Agent using Vercel AI SDK',
762
+ document: 'Uses Vercel AI SDK for LLM interactions with message persistence.',
763
+ })
764
+
765
+ agent.onMessage(async (message: Message, sender: string) => {
766
+ // Load existing message history
767
+ const history = await messageStore.load(message.conversationId)
768
+
769
+ // Build messages array
770
+ const messages: CoreMessage[] = [
771
+ { role: 'system', content: SYSTEM_PROMPT },
772
+ ...history,
773
+ { role: 'user', content: message.content }
774
+ ]
775
+
776
+ const streamer = agent.streamer(message)
777
+ let fullResponse = ''
778
+
779
+ // Stream response using Vercel AI SDK
780
+ const result = await streamText({
781
+ model: openai('gpt-4o'),
782
+ messages,
783
+ onChunk: ({ chunk }) => {
784
+ if (chunk.type === 'text-delta') {
785
+ streamer.stream(chunk.textDelta)
786
+ fullResponse += chunk.textDelta
787
+ }
788
+ }
789
+ })
790
+
791
+ await streamer.finish()
792
+
793
+ // Save updated history (without system message)
794
+ await messageStore.save(message.conversationId, [
795
+ ...history,
796
+ { role: 'user', content: message.content },
797
+ { role: 'assistant', content: fullResponse }
798
+ ])
799
+ })
800
+
801
+ await app.run()
802
+ }
803
+
804
+ main().catch(console.error)
805
+ ```
806
+
807
+ ##### VercelAIMessageStore Methods
808
+
809
+ | Method | Description |
810
+ |--------|-------------|
811
+ | `load(conversationId)` | Load message history for a conversation |
812
+ | `save(conversationId, messages)` | Save message history |
813
+ | `clear(conversationId)` | Clear message history |
814
+ | `append(conversationId, messages)` | Append messages to existing history |
815
+ | `getLastMessages(conversationId, count)` | Get last N messages |
816
+
817
+ ---
818
+
819
+ ## Environment Variables
820
+
821
+ | Variable | Description | Default |
822
+ |----------|-------------|---------|
823
+ | `GATEWAY_URL` | Agent Gateway address (host:port) | Required |
824
+ | `GATEWAY_API_KEY` | API key for gateway authentication | Required |
825
+ | `CONNECTION_SECURE` | Use TLS for connections | `false` |
826
+ | `HEARTBEAT_INTERVAL` | Heartbeat interval in seconds | `10` |
827
+ | `AGENT_BASE_URL` | Base URL for artifacts | `http://localhost:8060` |
828
+ | `OPENAI_API_KEY` | OpenAI API key | Required |
829
+ | `OPENAI_MODEL` | OpenAI model for text | `gpt-4o-mini` |
830
+ | `OPENAI_VISION_MODEL` | OpenAI model for images | `gpt-4o` |
831
+ | `DEBUG` | Enable debug logging | `false` |
832
+
833
+ ---
834
+
835
+ ## Running the Agent
836
+
837
+ ### Development Mode
838
+
839
+ ```bash
840
+ # With hot reload
841
+ pnpm dev
842
+ ```
843
+
844
+ This uses `tsx watch` to automatically restart when files change.
845
+
846
+ ### Production Mode
847
+
848
+ ```bash
849
+ # Build
850
+ pnpm build
851
+
852
+ # Run
853
+ pnpm start
854
+ ```
855
+
856
+ ### With Docker
857
+
858
+ Create a `Dockerfile`:
859
+
860
+ ```dockerfile
861
+ FROM node:18-alpine
862
+
863
+ WORKDIR /app
864
+
865
+ COPY package*.json pnpm-lock.yaml ./
866
+ RUN npm install -g pnpm && pnpm install --frozen-lockfile
867
+
868
+ COPY . .
869
+ RUN pnpm build
870
+
871
+ CMD ["pnpm", "start"]
872
+ ```
873
+
874
+ Build and run:
875
+
876
+ ```bash
877
+ docker build -t my-agent .
878
+ docker run --env-file .env my-agent
879
+ ```
880
+
881
+ ---
882
+
883
+ ## Dependencies
884
+
885
+ | Package | Purpose |
886
+ |---------|---------|
887
+ | `@march/agent` | March Agent SDK (or local link) |
888
+ | `@langchain/openai` | LangChain OpenAI integration |
889
+ | `@langchain/core` | LangChain core types |
890
+ | `dotenv` | Environment variable loading |
891
+
892
+ ### Optional Dependencies
893
+
894
+ | Package | Purpose |
895
+ |---------|---------|
896
+ | `@langchain/langgraph` | LangGraph state management |
897
+ | `@langchain/langgraph-checkpoint` | LangGraph checkpointing |
898
+ | `ai` | Vercel AI SDK |
899
+ | `@ai-sdk/openai` | OpenAI provider for Vercel AI |
900
+ | `pdf-parse` | PDF text extraction |
901
+
902
+ ---
903
+
904
+ ## Troubleshooting
905
+
906
+ ### Environment Variables Not Loading
907
+
908
+ If environment variables aren't being picked up:
909
+
910
+ ```typescript
911
+ import { config } from 'dotenv'
912
+ import { resolve } from 'path'
913
+
914
+ // Use explicit path and override: true
915
+ config({ path: resolve(process.cwd(), '.env'), override: true })
916
+
917
+ // Verify loading
918
+ console.log('OPENAI_API_KEY loaded:', !!process.env.OPENAI_API_KEY)
919
+ ```
920
+
921
+ ### Connection Issues
922
+
923
+ If the agent can't connect to the gateway:
924
+
925
+ 1. Verify `GATEWAY_URL` is correct (e.g., `agent-gateway:8080` or `localhost:8080`)
926
+ 2. Ensure the gateway is running
927
+ 3. Check `CONNECTION_SECURE` matches the gateway configuration
928
+
929
+ ### Attachment Download Failures
930
+
931
+ If attachments fail to download with 404:
932
+
933
+ 1. Verify the attachment service is running
934
+ 2. Check the attachment URL format in the logs
935
+ 3. Ensure the gateway is proxying `/s/attachment/*` correctly
936
+
937
+ ### TypeScript Errors
938
+
939
+ If you see type errors with the SDK:
940
+
941
+ ```bash
942
+ # Ensure you have the latest types
943
+ pnpm add -D @types/node
944
+
945
+ # Type check
946
+ pnpm typecheck
947
+ ```
948
+
949
+ ---
950
+
951
+ ## Complete Example
952
+
953
+ Here's a complete agent implementation with all features:
954
+
955
+ ```typescript
956
+ import { config } from 'dotenv'
957
+ import { resolve } from 'path'
958
+ import { MarchAgentApp, Message } from '@march/agent'
959
+ import { ChatOpenAI } from '@langchain/openai'
960
+ import { HumanMessage, AIMessage, SystemMessage, BaseMessage } from '@langchain/core/messages'
961
+
962
+ // Load environment
963
+ config({ path: resolve(process.cwd(), '.env'), override: true })
964
+
965
+ const SYSTEM_PROMPT = `You are a helpful assistant. Be concise and helpful.`
966
+
967
+ // Initialize LLMs
968
+ const llm = new ChatOpenAI({
969
+ model: process.env.OPENAI_MODEL || 'gpt-4o-mini',
970
+ streaming: true,
971
+ openAIApiKey: process.env.OPENAI_API_KEY,
972
+ })
973
+
974
+ const visionLlm = new ChatOpenAI({
975
+ model: process.env.OPENAI_VISION_MODEL || 'gpt-4o',
976
+ streaming: true,
977
+ openAIApiKey: process.env.OPENAI_API_KEY,
978
+ })
979
+
980
+ // Initialize app
981
+ const app = new MarchAgentApp({
982
+ gatewayUrl: process.env.GATEWAY_URL || 'agent-gateway:8080',
983
+ apiKey: process.env.GATEWAY_API_KEY || 'agent-key',
984
+ heartbeatInterval: parseInt(process.env.HEARTBEAT_INTERVAL || '10'),
985
+ secure: process.env.CONNECTION_SECURE === 'true',
986
+ })
987
+
988
+ async function main() {
989
+ // Register agent
990
+ const agent = await app.registerMe({
991
+ name: 'complete-agent-ts',
992
+ about: 'A complete TypeScript agent example',
993
+ document: 'Demonstrates all SDK features including history, attachments, and streaming.',
994
+ representationName: 'Complete Agent (TS)',
995
+ baseUrl: process.env.AGENT_BASE_URL || 'http://localhost:8060',
996
+ metadata: { version: '1.0.0' },
997
+ })
998
+
999
+ // Handle all messages
1000
+ agent.onMessage(async (message: Message, sender: string) => {
1001
+ const messages: BaseMessage[] = [new SystemMessage(SYSTEM_PROMPT)]
1002
+
1003
+ // Load conversation history
1004
+ if (message.conversation) {
1005
+ const history = await message.conversation.getAgentHistory({ limit: 20 })
1006
+ for (const msg of history) {
1007
+ if (msg.role === 'user') {
1008
+ messages.push(new HumanMessage(msg.content))
1009
+ } else {
1010
+ messages.push(new AIMessage(msg.content))
1011
+ }
1012
+ }
1013
+ }
1014
+
1015
+ // Handle attachments
1016
+ let selectedLlm = llm
1017
+ if (message.hasAttachment() && message.attachment) {
1018
+ const attachment = message.attachment
1019
+
1020
+ if (attachment.contentType?.startsWith('image/')) {
1021
+ // Image attachment - use vision model
1022
+ selectedLlm = visionLlm
1023
+ try {
1024
+ const base64Image = await message.getAttachmentBase64()
1025
+ messages.push(new HumanMessage({
1026
+ content: [
1027
+ { type: 'text', text: message.content || 'Describe this image' },
1028
+ {
1029
+ type: 'image_url',
1030
+ image_url: { url: `data:${attachment.contentType};base64,${base64Image}` },
1031
+ },
1032
+ ],
1033
+ }))
1034
+ } catch (e) {
1035
+ messages.push(new HumanMessage(
1036
+ `${message.content}\n\n[Image could not be processed: ${attachment.filename}]`
1037
+ ))
1038
+ }
1039
+ } else if (attachment.contentType === 'application/pdf') {
1040
+ // PDF attachment
1041
+ messages.push(new HumanMessage(
1042
+ `${message.content}\n\nA PDF was attached: ${attachment.filename} (${Math.round((attachment.size || 0) / 1024)} KB)`
1043
+ ))
1044
+ } else {
1045
+ // Other attachment
1046
+ messages.push(new HumanMessage(
1047
+ `${message.content}\n\nFile attached: ${attachment.filename}`
1048
+ ))
1049
+ }
1050
+ } else {
1051
+ // Text-only message
1052
+ messages.push(new HumanMessage(message.content))
1053
+ }
1054
+
1055
+ // Create streamer with metadata
1056
+ const streamer = agent.streamer(message)
1057
+ streamer.setMessageMetadata({
1058
+ model: selectedLlm === visionLlm ? 'gpt-4o' : 'gpt-4o-mini',
1059
+ hasAttachment: message.hasAttachment(),
1060
+ })
1061
+
1062
+ // Stream response
1063
+ try {
1064
+ const stream = await selectedLlm.stream(messages)
1065
+ for await (const chunk of stream) {
1066
+ if (chunk.content) {
1067
+ streamer.stream(chunk.content as string)
1068
+ }
1069
+ }
1070
+ } catch (e) {
1071
+ streamer.stream('I encountered an error. Please try again.')
1072
+ }
1073
+
1074
+ await streamer.finish()
1075
+ })
1076
+
1077
+ // Start agent
1078
+ console.log('Starting agent...')
1079
+ await app.run()
1080
+ }
1081
+
1082
+ main().catch((error) => {
1083
+ console.error('Failed to start agent:', error)
1084
+ process.exit(1)
1085
+ })
1086
+ ```
1087
+
1088
+ ---
1089
+
1090
+ ## License
1091
+
1092
+ MIT