concevent-ai-agent-sdk 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +240 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -84,6 +84,7 @@ console.log(result.message);
|
|
|
84
84
|
- [Tool Definitions](#tool-definitions)
|
|
85
85
|
- [Callbacks & Events](#callbacks--events)
|
|
86
86
|
- [Types](#types)
|
|
87
|
+
- [Streaming](#streaming)
|
|
87
88
|
- [Advanced Usage](#advanced-usage)
|
|
88
89
|
- [Conversation Summarization](#conversation-summarization)
|
|
89
90
|
- [Error Handling](#error-handling)
|
|
@@ -134,6 +135,7 @@ const agent = createAgent(config: AgentConfig): Agent;
|
|
|
134
135
|
| `temperature` | `number` | ❌ | `0.1` | Sampling temperature (0-2) |
|
|
135
136
|
| `reasoningEffort` | `'low' \| 'medium' \| 'high'` | ❌ | `'high'` | Reasoning effort level for supported models |
|
|
136
137
|
| `maxIterations` | `number` | ❌ | `20` | Maximum tool execution iterations per chat |
|
|
138
|
+
| `stream` | `boolean` | ❌ | `true` | Enable streaming responses with delta callbacks |
|
|
137
139
|
| `summarization` | `SummarizationConfig` | ❌ | `{ enabled: true }` | Summarization settings |
|
|
138
140
|
| `errorMessages` | `ErrorMessages` | ❌ | Default messages | Custom error messages |
|
|
139
141
|
|
|
@@ -408,6 +410,8 @@ interface AgentCallbacks {
|
|
|
408
410
|
tokenCount?: number;
|
|
409
411
|
}
|
|
410
412
|
) => void;
|
|
413
|
+
onMessageDelta?: (delta: string) => void;
|
|
414
|
+
onReasoningDelta?: (detail: ReasoningDetail) => void;
|
|
411
415
|
onToolCallStart?: (calls: ToolCallStartData[]) => void;
|
|
412
416
|
onToolResult?: (result: ToolResultData) => void;
|
|
413
417
|
onUsageUpdate?: (usage: UsageMetadata) => void;
|
|
@@ -424,6 +428,10 @@ interface AgentCallbacks {
|
|
|
424
428
|
}
|
|
425
429
|
```
|
|
426
430
|
|
|
431
|
+
> ⚠️ **Security Note for Browser/Client-Side Usage**
|
|
432
|
+
>
|
|
433
|
+
> When using callbacks in browser environments, be careful not to expose sensitive information. Callbacks like `onToolResult`, `onThinking`, `onSummarizationEnd`, and `onComplete` may contain internal data, tool execution details, or conversation content that should not be logged to the browser console or sent to client-side analytics in production. The SDK does not restrict this by design, as server-side integrations may safely log such data. It is the consumer's responsibility to sanitize or filter callback data before exposing it in client-side contexts.
|
|
434
|
+
|
|
427
435
|
#### Callback Examples
|
|
428
436
|
|
|
429
437
|
```typescript
|
|
@@ -441,6 +449,18 @@ await agent.chat("Help me with my task", context, {
|
|
|
441
449
|
}
|
|
442
450
|
},
|
|
443
451
|
|
|
452
|
+
// Called for each chunk of streaming message content
|
|
453
|
+
onMessageDelta: (delta) => {
|
|
454
|
+
process.stdout.write(delta); // Real-time output
|
|
455
|
+
},
|
|
456
|
+
|
|
457
|
+
// Called for each chunk of streaming reasoning (for reasoning models)
|
|
458
|
+
onReasoningDelta: (detail) => {
|
|
459
|
+
if (detail.text) {
|
|
460
|
+
process.stdout.write(detail.text);
|
|
461
|
+
}
|
|
462
|
+
},
|
|
463
|
+
|
|
444
464
|
// Called before tool execution starts
|
|
445
465
|
onToolCallStart: (calls) => {
|
|
446
466
|
calls.forEach((call) => {
|
|
@@ -586,6 +606,154 @@ interface ToolResultData {
|
|
|
586
606
|
|
|
587
607
|
---
|
|
588
608
|
|
|
609
|
+
## Streaming
|
|
610
|
+
|
|
611
|
+
The SDK supports streaming responses by default, providing real-time updates as the model generates content.
|
|
612
|
+
|
|
613
|
+
### Enabling/Disabling Streaming
|
|
614
|
+
|
|
615
|
+
Streaming is **enabled by default**. You can disable it in the agent configuration:
|
|
616
|
+
|
|
617
|
+
```typescript
|
|
618
|
+
const agent = createAgent({
|
|
619
|
+
apiKey: process.env.OPENROUTER_API_KEY!,
|
|
620
|
+
model: "anthropic/claude-3.5-sonnet",
|
|
621
|
+
systemPrompts: ["You are a helpful assistant."],
|
|
622
|
+
tools: [],
|
|
623
|
+
stream: false, // Disable streaming (default: true)
|
|
624
|
+
});
|
|
625
|
+
```
|
|
626
|
+
|
|
627
|
+
### Streaming Callbacks
|
|
628
|
+
|
|
629
|
+
When streaming is enabled, you can use delta callbacks to receive real-time updates:
|
|
630
|
+
|
|
631
|
+
#### onMessageDelta
|
|
632
|
+
|
|
633
|
+
Called whenever a new chunk of the message content is received:
|
|
634
|
+
|
|
635
|
+
```typescript
|
|
636
|
+
await agent.chat("Tell me a story", context, {
|
|
637
|
+
onMessageDelta: (delta) => {
|
|
638
|
+
// Append each chunk to your UI in real-time
|
|
639
|
+
process.stdout.write(delta);
|
|
640
|
+
},
|
|
641
|
+
onMessage: (fullMessage) => {
|
|
642
|
+
// Called when the complete message is ready
|
|
643
|
+
console.log("\n\nFull message:", fullMessage);
|
|
644
|
+
},
|
|
645
|
+
});
|
|
646
|
+
```
|
|
647
|
+
|
|
648
|
+
#### onReasoningDelta
|
|
649
|
+
|
|
650
|
+
Called whenever a new chunk of model reasoning is received (for models that support reasoning):
|
|
651
|
+
|
|
652
|
+
```typescript
|
|
653
|
+
await agent.chat("Solve this problem step by step", context, {
|
|
654
|
+
onReasoningDelta: (detail) => {
|
|
655
|
+
if (detail.type === "reasoning.text" && detail.text) {
|
|
656
|
+
// Stream the reasoning/thinking output
|
|
657
|
+
process.stdout.write(detail.text);
|
|
658
|
+
}
|
|
659
|
+
},
|
|
660
|
+
onThinking: (fullThinking, details) => {
|
|
661
|
+
// Called when reasoning is complete
|
|
662
|
+
console.log("\n\nFull reasoning:", fullThinking);
|
|
663
|
+
},
|
|
664
|
+
});
|
|
665
|
+
```
|
|
666
|
+
|
|
667
|
+
### Complete Streaming Example
|
|
668
|
+
|
|
669
|
+
```typescript
|
|
670
|
+
import { createAgent } from "concevent-ai-agent-sdk";
|
|
671
|
+
|
|
672
|
+
const agent = createAgent({
|
|
673
|
+
apiKey: process.env.OPENROUTER_API_KEY!,
|
|
674
|
+
model: "anthropic/claude-3.5-sonnet",
|
|
675
|
+
systemPrompts: ["You are a helpful assistant."],
|
|
676
|
+
tools: myTools,
|
|
677
|
+
stream: true, // Enabled by default
|
|
678
|
+
});
|
|
679
|
+
|
|
680
|
+
let messageBuffer = "";
|
|
681
|
+
let reasoningBuffer = "";
|
|
682
|
+
|
|
683
|
+
const result = await agent.chat(
|
|
684
|
+
"Explain quantum computing",
|
|
685
|
+
{ userId: "user-123", timezone: "UTC" },
|
|
686
|
+
{
|
|
687
|
+
// Real-time message chunks
|
|
688
|
+
onMessageDelta: (delta) => {
|
|
689
|
+
messageBuffer += delta;
|
|
690
|
+
updateUI(messageBuffer); // Update your UI with partial content
|
|
691
|
+
},
|
|
692
|
+
|
|
693
|
+
// Real-time reasoning chunks (for reasoning models)
|
|
694
|
+
onReasoningDelta: (detail) => {
|
|
695
|
+
if (detail.text) {
|
|
696
|
+
reasoningBuffer += detail.text;
|
|
697
|
+
updateReasoningUI(reasoningBuffer);
|
|
698
|
+
}
|
|
699
|
+
},
|
|
700
|
+
|
|
701
|
+
// Tool execution still works with streaming
|
|
702
|
+
onToolCallStart: (calls) => {
|
|
703
|
+
showToolIndicator(calls.map((c) => c.name));
|
|
704
|
+
},
|
|
705
|
+
|
|
706
|
+
onToolResult: (result) => {
|
|
707
|
+
hideToolIndicator(result.functionName);
|
|
708
|
+
},
|
|
709
|
+
|
|
710
|
+
// Final complete message
|
|
711
|
+
onMessage: (message, reasoning) => {
|
|
712
|
+
console.log("Complete message received");
|
|
713
|
+
},
|
|
714
|
+
}
|
|
715
|
+
);
|
|
716
|
+
```
|
|
717
|
+
|
|
718
|
+
### Streaming Event Types
|
|
719
|
+
|
|
720
|
+
The SDK exports event types for building event-driven streaming systems:
|
|
721
|
+
|
|
722
|
+
```typescript
|
|
723
|
+
import { createEvent } from "concevent-ai-agent-sdk";
|
|
724
|
+
import type {
|
|
725
|
+
MessageDeltaEventData,
|
|
726
|
+
ReasoningDeltaEventData,
|
|
727
|
+
} from "concevent-ai-agent-sdk";
|
|
728
|
+
|
|
729
|
+
// Create typed streaming events
|
|
730
|
+
const messageDeltaEvent = createEvent("message_delta", {
|
|
731
|
+
delta: "Hello, ",
|
|
732
|
+
});
|
|
733
|
+
|
|
734
|
+
const reasoningDeltaEvent = createEvent("reasoning_delta", {
|
|
735
|
+
detail: {
|
|
736
|
+
type: "reasoning.text",
|
|
737
|
+
text: "Let me think about this...",
|
|
738
|
+
format: "text",
|
|
739
|
+
index: 0,
|
|
740
|
+
},
|
|
741
|
+
});
|
|
742
|
+
```
|
|
743
|
+
|
|
744
|
+
### Streaming vs Non-Streaming
|
|
745
|
+
|
|
746
|
+
| Feature | Streaming (`stream: true`) | Non-Streaming (`stream: false`) |
|
|
747
|
+
| ------------------- | --------------------------------------- | ------------------------------- |
|
|
748
|
+
| Message delivery | Real-time chunks via `onMessageDelta` | Complete message only |
|
|
749
|
+
| Reasoning output | Real-time via `onReasoningDelta` | Complete reasoning only |
|
|
750
|
+
| Perceived latency | Lower (immediate feedback) | Higher (wait for completion) |
|
|
751
|
+
| Tool calls | Fully supported | Fully supported |
|
|
752
|
+
| Token usage | Included in final chunk | Included in response |
|
|
753
|
+
| Default | ✅ Enabled | Must explicitly disable |
|
|
754
|
+
|
|
755
|
+
---
|
|
756
|
+
|
|
589
757
|
## Advanced Usage
|
|
590
758
|
|
|
591
759
|
### Conversation Summarization
|
|
@@ -726,6 +894,76 @@ agent.chat("Hello", {
|
|
|
726
894
|
abortController.abort();
|
|
727
895
|
```
|
|
728
896
|
|
|
897
|
+
### Serverless / Stateless Deployments
|
|
898
|
+
|
|
899
|
+
When deploying in serverless environments (e.g., AWS Lambda, Vercel, Cloudflare Workers) or stateless API routes (e.g., Next.js API routes), the agent instance is created fresh for each request. To maintain conversation continuity, **the client must store and forward the conversation history with each request**.
|
|
900
|
+
|
|
901
|
+
#### Pattern
|
|
902
|
+
|
|
903
|
+
1. **Client** maintains `conversationHistory` state
|
|
904
|
+
2. **Client** sends the history with each chat request
|
|
905
|
+
3. **Server** creates a fresh agent, restores history via `setHistory()`, processes the message
|
|
906
|
+
4. **Server** returns the result including `conversationHistory`
|
|
907
|
+
5. **Client** updates its local history from the response
|
|
908
|
+
|
|
909
|
+
#### Server-Side Example (Next.js API Route)
|
|
910
|
+
|
|
911
|
+
```typescript
|
|
912
|
+
import { createAgent } from "concevent-ai-agent-sdk";
|
|
913
|
+
import type { ChatMessage } from "concevent-ai-agent-sdk";
|
|
914
|
+
|
|
915
|
+
export async function POST(request: Request) {
|
|
916
|
+
const { message, conversationHistory = [] } = await request.json();
|
|
917
|
+
|
|
918
|
+
const agent = createAgent({
|
|
919
|
+
apiKey: process.env.API_KEY!,
|
|
920
|
+
model: "anthropic/claude-3.5-sonnet",
|
|
921
|
+
systemPrompts: ["You are a helpful assistant."],
|
|
922
|
+
tools: myTools,
|
|
923
|
+
});
|
|
924
|
+
|
|
925
|
+
// Restore conversation history from the client
|
|
926
|
+
if (conversationHistory.length > 0) {
|
|
927
|
+
agent.setHistory(conversationHistory);
|
|
928
|
+
}
|
|
929
|
+
|
|
930
|
+
const result = await agent.chat(message, {
|
|
931
|
+
userId: "user-123",
|
|
932
|
+
timezone: "UTC",
|
|
933
|
+
});
|
|
934
|
+
|
|
935
|
+
// Return result - client should use result.conversationHistory for next request
|
|
936
|
+
return Response.json({
|
|
937
|
+
message: result.message,
|
|
938
|
+
conversationHistory: result.conversationHistory,
|
|
939
|
+
});
|
|
940
|
+
}
|
|
941
|
+
```
|
|
942
|
+
|
|
943
|
+
#### Client-Side Example
|
|
944
|
+
|
|
945
|
+
```typescript
|
|
946
|
+
const [conversationHistory, setConversationHistory] = useState<ChatMessage[]>(
|
|
947
|
+
[]
|
|
948
|
+
);
|
|
949
|
+
|
|
950
|
+
async function sendMessage(message: string) {
|
|
951
|
+
const response = await fetch("/api/chat", {
|
|
952
|
+
method: "POST",
|
|
953
|
+
body: JSON.stringify({ message, conversationHistory }),
|
|
954
|
+
});
|
|
955
|
+
|
|
956
|
+
const result = await response.json();
|
|
957
|
+
|
|
958
|
+
// Update local history for the next request
|
|
959
|
+
setConversationHistory(result.conversationHistory);
|
|
960
|
+
|
|
961
|
+
return result.message;
|
|
962
|
+
}
|
|
963
|
+
```
|
|
964
|
+
|
|
965
|
+
> **Note:** The SDK handles summarization automatically when context limits are approached. The summarized history is included in `result.conversationHistory`, so clients always receive the properly managed history state.
|
|
966
|
+
|
|
729
967
|
---
|
|
730
968
|
|
|
731
969
|
## Exports Summary
|
|
@@ -759,6 +997,8 @@ export type {
|
|
|
759
997
|
UsageUpdateEventData,
|
|
760
998
|
ErrorEventData,
|
|
761
999
|
CompleteEventData,
|
|
1000
|
+
MessageDeltaEventData,
|
|
1001
|
+
ReasoningDeltaEventData,
|
|
762
1002
|
AgentEvent,
|
|
763
1003
|
} from "./types";
|
|
764
1004
|
```
|
package/package.json
CHANGED