praisonai 1.0.19 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/context.d.ts +68 -0
- package/dist/agent/context.js +119 -0
- package/dist/agent/enhanced.d.ts +92 -0
- package/dist/agent/enhanced.js +267 -0
- package/dist/agent/handoff.d.ts +82 -0
- package/dist/agent/handoff.js +124 -0
- package/dist/agent/image.d.ts +51 -0
- package/dist/agent/image.js +93 -0
- package/dist/agent/prompt-expander.d.ts +40 -0
- package/dist/agent/prompt-expander.js +84 -0
- package/dist/agent/query-rewriter.d.ts +38 -0
- package/dist/agent/query-rewriter.js +79 -0
- package/dist/agent/research.d.ts +52 -0
- package/dist/agent/research.js +118 -0
- package/dist/agent/router.d.ts +77 -0
- package/dist/agent/router.js +113 -0
- package/dist/agent/simple.js +1 -1
- package/dist/agent/types.js +2 -2
- package/dist/auto/index.d.ts +56 -0
- package/dist/auto/index.js +142 -0
- package/dist/cli/index.d.ts +20 -0
- package/dist/cli/index.js +150 -0
- package/dist/db/index.d.ts +23 -0
- package/dist/db/index.js +72 -0
- package/dist/db/memory-adapter.d.ts +42 -0
- package/dist/db/memory-adapter.js +146 -0
- package/dist/db/types.d.ts +113 -0
- package/dist/db/types.js +5 -0
- package/dist/eval/index.d.ts +61 -0
- package/dist/eval/index.js +157 -0
- package/dist/guardrails/index.d.ts +82 -0
- package/dist/guardrails/index.js +202 -0
- package/dist/guardrails/llm-guardrail.d.ts +40 -0
- package/dist/guardrails/llm-guardrail.js +91 -0
- package/dist/index.d.ts +26 -1
- package/dist/index.js +122 -1
- package/dist/knowledge/chunking.d.ts +55 -0
- package/dist/knowledge/chunking.js +157 -0
- package/dist/knowledge/rag.d.ts +80 -0
- package/dist/knowledge/rag.js +147 -0
- package/dist/llm/openai.js +1 -1
- package/dist/llm/providers/anthropic.d.ts +33 -0
- package/dist/llm/providers/anthropic.js +291 -0
- package/dist/llm/providers/base.d.ts +25 -0
- package/dist/llm/providers/base.js +43 -0
- package/dist/llm/providers/google.d.ts +27 -0
- package/dist/llm/providers/google.js +275 -0
- package/dist/llm/providers/index.d.ts +43 -0
- package/dist/llm/providers/index.js +116 -0
- package/dist/llm/providers/openai.d.ts +18 -0
- package/dist/llm/providers/openai.js +203 -0
- package/dist/llm/providers/types.d.ts +94 -0
- package/dist/llm/providers/types.js +5 -0
- package/dist/memory/memory.d.ts +92 -0
- package/dist/memory/memory.js +169 -0
- package/dist/observability/index.d.ts +86 -0
- package/dist/observability/index.js +166 -0
- package/dist/planning/index.d.ts +133 -0
- package/dist/planning/index.js +228 -0
- package/dist/session/index.d.ts +111 -0
- package/dist/session/index.js +250 -0
- package/dist/skills/index.d.ts +70 -0
- package/dist/skills/index.js +233 -0
- package/dist/telemetry/index.d.ts +102 -0
- package/dist/telemetry/index.js +187 -0
- package/dist/tools/decorator.d.ts +91 -0
- package/dist/tools/decorator.js +165 -0
- package/dist/tools/index.d.ts +2 -0
- package/dist/tools/index.js +3 -0
- package/dist/tools/mcpSse.d.ts +41 -0
- package/dist/tools/mcpSse.js +108 -0
- package/dist/workflows/index.d.ts +97 -0
- package/dist/workflows/index.js +216 -0
- package/package.json +5 -2
|
@@ -1 +1,158 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Chunking - Text chunking utilities for RAG
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.Chunking = void 0;
|
|
7
|
+
exports.createChunking = createChunking;
|
|
8
|
+
/**
|
|
9
|
+
* Chunking class for splitting text into chunks
|
|
10
|
+
*/
|
|
11
|
+
class Chunking {
|
|
12
|
+
constructor(config = {}) {
|
|
13
|
+
this.chunkSize = config.chunkSize ?? 500;
|
|
14
|
+
this.overlap = config.overlap ?? 50;
|
|
15
|
+
this.strategy = config.strategy ?? 'size';
|
|
16
|
+
this.separator = config.separator ?? ' ';
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Chunk text based on configured strategy
|
|
20
|
+
*/
|
|
21
|
+
chunk(text) {
|
|
22
|
+
switch (this.strategy) {
|
|
23
|
+
case 'sentence':
|
|
24
|
+
return this.chunkBySentence(text);
|
|
25
|
+
case 'paragraph':
|
|
26
|
+
return this.chunkByParagraph(text);
|
|
27
|
+
case 'semantic':
|
|
28
|
+
return this.chunkBySemantic(text);
|
|
29
|
+
default:
|
|
30
|
+
return this.chunkBySize(text);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Chunk by fixed size with overlap
|
|
35
|
+
*/
|
|
36
|
+
chunkBySize(text) {
|
|
37
|
+
const chunks = [];
|
|
38
|
+
let startOffset = 0;
|
|
39
|
+
let index = 0;
|
|
40
|
+
while (startOffset < text.length) {
|
|
41
|
+
const endOffset = Math.min(startOffset + this.chunkSize, text.length);
|
|
42
|
+
const content = text.slice(startOffset, endOffset);
|
|
43
|
+
chunks.push({
|
|
44
|
+
content,
|
|
45
|
+
index,
|
|
46
|
+
startOffset,
|
|
47
|
+
endOffset
|
|
48
|
+
});
|
|
49
|
+
startOffset = endOffset - this.overlap;
|
|
50
|
+
if (startOffset >= text.length - this.overlap)
|
|
51
|
+
break;
|
|
52
|
+
index++;
|
|
53
|
+
}
|
|
54
|
+
return chunks;
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Chunk by sentences
|
|
58
|
+
*/
|
|
59
|
+
chunkBySentence(text) {
|
|
60
|
+
const sentences = text.match(/[^.!?]+[.!?]+/g) || [text];
|
|
61
|
+
return sentences.map((content, index) => {
|
|
62
|
+
const startOffset = text.indexOf(content);
|
|
63
|
+
return {
|
|
64
|
+
content: content.trim(),
|
|
65
|
+
index,
|
|
66
|
+
startOffset,
|
|
67
|
+
endOffset: startOffset + content.length
|
|
68
|
+
};
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Chunk by paragraphs
|
|
73
|
+
*/
|
|
74
|
+
chunkByParagraph(text) {
|
|
75
|
+
const paragraphs = text.split(/\n\n+/).filter(p => p.trim().length > 0);
|
|
76
|
+
let offset = 0;
|
|
77
|
+
return paragraphs.map((content, index) => {
|
|
78
|
+
const startOffset = text.indexOf(content, offset);
|
|
79
|
+
offset = startOffset + content.length;
|
|
80
|
+
return {
|
|
81
|
+
content: content.trim(),
|
|
82
|
+
index,
|
|
83
|
+
startOffset,
|
|
84
|
+
endOffset: offset
|
|
85
|
+
};
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Chunk by semantic boundaries (simplified)
|
|
90
|
+
*/
|
|
91
|
+
chunkBySemantic(text) {
|
|
92
|
+
// Simplified semantic chunking - split on headers, lists, etc.
|
|
93
|
+
const patterns = [
|
|
94
|
+
/^#{1,6}\s+.+$/gm, // Headers
|
|
95
|
+
/^\s*[-*]\s+/gm, // List items
|
|
96
|
+
/^\d+\.\s+/gm // Numbered lists
|
|
97
|
+
];
|
|
98
|
+
let chunks = [];
|
|
99
|
+
let lastEnd = 0;
|
|
100
|
+
let index = 0;
|
|
101
|
+
// Find semantic boundaries
|
|
102
|
+
const boundaries = [0];
|
|
103
|
+
for (const pattern of patterns) {
|
|
104
|
+
let match;
|
|
105
|
+
while ((match = pattern.exec(text)) !== null) {
|
|
106
|
+
boundaries.push(match.index);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
boundaries.push(text.length);
|
|
110
|
+
boundaries.sort((a, b) => a - b);
|
|
111
|
+
// Create chunks from boundaries
|
|
112
|
+
for (let i = 0; i < boundaries.length - 1; i++) {
|
|
113
|
+
const start = boundaries[i];
|
|
114
|
+
const end = boundaries[i + 1];
|
|
115
|
+
const content = text.slice(start, end).trim();
|
|
116
|
+
if (content.length > 0) {
|
|
117
|
+
chunks.push({
|
|
118
|
+
content,
|
|
119
|
+
index: index++,
|
|
120
|
+
startOffset: start,
|
|
121
|
+
endOffset: end
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
return chunks.length > 0 ? chunks : this.chunkBySize(text);
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Merge small chunks
|
|
129
|
+
*/
|
|
130
|
+
mergeSmallChunks(chunks, minSize = 100) {
|
|
131
|
+
const merged = [];
|
|
132
|
+
let current = null;
|
|
133
|
+
for (const chunk of chunks) {
|
|
134
|
+
if (!current) {
|
|
135
|
+
current = { ...chunk };
|
|
136
|
+
}
|
|
137
|
+
else if (current.content.length < minSize) {
|
|
138
|
+
current.content += '\n' + chunk.content;
|
|
139
|
+
current.endOffset = chunk.endOffset;
|
|
140
|
+
}
|
|
141
|
+
else {
|
|
142
|
+
merged.push(current);
|
|
143
|
+
current = { ...chunk };
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
if (current) {
|
|
147
|
+
merged.push(current);
|
|
148
|
+
}
|
|
149
|
+
return merged.map((c, i) => ({ ...c, index: i }));
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
exports.Chunking = Chunking;
|
|
153
|
+
/**
|
|
154
|
+
* Create a Chunking instance
|
|
155
|
+
*/
|
|
156
|
+
function createChunking(config) {
|
|
157
|
+
return new Chunking(config);
|
|
158
|
+
}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Knowledge Base (RAG) - Retrieval Augmented Generation
|
|
3
|
+
*/
|
|
4
|
+
export interface Document {
|
|
5
|
+
id: string;
|
|
6
|
+
content: string;
|
|
7
|
+
metadata?: Record<string, any>;
|
|
8
|
+
embedding?: number[];
|
|
9
|
+
}
|
|
10
|
+
export interface SearchResult {
|
|
11
|
+
document: Document;
|
|
12
|
+
score: number;
|
|
13
|
+
}
|
|
14
|
+
export interface EmbeddingProvider {
|
|
15
|
+
embed(text: string): Promise<number[]>;
|
|
16
|
+
embedBatch(texts: string[]): Promise<number[][]>;
|
|
17
|
+
}
|
|
18
|
+
export interface KnowledgeBaseConfig {
|
|
19
|
+
embeddingProvider?: EmbeddingProvider;
|
|
20
|
+
similarityThreshold?: number;
|
|
21
|
+
maxResults?: number;
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Simple in-memory vector store for RAG
|
|
25
|
+
*/
|
|
26
|
+
export declare class KnowledgeBase {
|
|
27
|
+
private documents;
|
|
28
|
+
private embeddingProvider?;
|
|
29
|
+
private similarityThreshold;
|
|
30
|
+
private maxResults;
|
|
31
|
+
constructor(config?: KnowledgeBaseConfig);
|
|
32
|
+
/**
|
|
33
|
+
* Add a document to the knowledge base
|
|
34
|
+
*/
|
|
35
|
+
add(doc: Omit<Document, 'embedding'>): Promise<Document>;
|
|
36
|
+
/**
|
|
37
|
+
* Add multiple documents
|
|
38
|
+
*/
|
|
39
|
+
addBatch(docs: Array<Omit<Document, 'embedding'>>): Promise<Document[]>;
|
|
40
|
+
/**
|
|
41
|
+
* Get a document by ID
|
|
42
|
+
*/
|
|
43
|
+
get(id: string): Document | undefined;
|
|
44
|
+
/**
|
|
45
|
+
* Delete a document
|
|
46
|
+
*/
|
|
47
|
+
delete(id: string): boolean;
|
|
48
|
+
/**
|
|
49
|
+
* Search for similar documents
|
|
50
|
+
*/
|
|
51
|
+
search(query: string, limit?: number): Promise<SearchResult[]>;
|
|
52
|
+
/**
|
|
53
|
+
* Simple text-based search fallback
|
|
54
|
+
*/
|
|
55
|
+
private textSearch;
|
|
56
|
+
/**
|
|
57
|
+
* Calculate cosine similarity between two vectors
|
|
58
|
+
*/
|
|
59
|
+
private cosineSimilarity;
|
|
60
|
+
/**
|
|
61
|
+
* Get all documents
|
|
62
|
+
*/
|
|
63
|
+
list(): Document[];
|
|
64
|
+
/**
|
|
65
|
+
* Clear all documents
|
|
66
|
+
*/
|
|
67
|
+
clear(): void;
|
|
68
|
+
/**
|
|
69
|
+
* Get document count
|
|
70
|
+
*/
|
|
71
|
+
get size(): number;
|
|
72
|
+
/**
|
|
73
|
+
* Build context from search results for RAG
|
|
74
|
+
*/
|
|
75
|
+
buildContext(results: SearchResult[]): string;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Create a knowledge base
|
|
79
|
+
*/
|
|
80
|
+
export declare function createKnowledgeBase(config?: KnowledgeBaseConfig): KnowledgeBase;
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Knowledge Base (RAG) - Retrieval Augmented Generation
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.KnowledgeBase = void 0;
|
|
7
|
+
exports.createKnowledgeBase = createKnowledgeBase;
|
|
8
|
+
/**
|
|
9
|
+
* Simple in-memory vector store for RAG
|
|
10
|
+
*/
|
|
11
|
+
class KnowledgeBase {
|
|
12
|
+
constructor(config = {}) {
|
|
13
|
+
this.documents = new Map();
|
|
14
|
+
this.embeddingProvider = config.embeddingProvider;
|
|
15
|
+
this.similarityThreshold = config.similarityThreshold ?? 0.7;
|
|
16
|
+
this.maxResults = config.maxResults ?? 5;
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Add a document to the knowledge base
|
|
20
|
+
*/
|
|
21
|
+
async add(doc) {
|
|
22
|
+
const document = { ...doc };
|
|
23
|
+
if (this.embeddingProvider) {
|
|
24
|
+
document.embedding = await this.embeddingProvider.embed(doc.content);
|
|
25
|
+
}
|
|
26
|
+
this.documents.set(doc.id, document);
|
|
27
|
+
return document;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Add multiple documents
|
|
31
|
+
*/
|
|
32
|
+
async addBatch(docs) {
|
|
33
|
+
if (this.embeddingProvider && docs.length > 0) {
|
|
34
|
+
const embeddings = await this.embeddingProvider.embedBatch(docs.map(d => d.content));
|
|
35
|
+
return Promise.all(docs.map(async (doc, i) => {
|
|
36
|
+
const document = { ...doc, embedding: embeddings[i] };
|
|
37
|
+
this.documents.set(doc.id, document);
|
|
38
|
+
return document;
|
|
39
|
+
}));
|
|
40
|
+
}
|
|
41
|
+
return Promise.all(docs.map(doc => this.add(doc)));
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Get a document by ID
|
|
45
|
+
*/
|
|
46
|
+
get(id) {
|
|
47
|
+
return this.documents.get(id);
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Delete a document
|
|
51
|
+
*/
|
|
52
|
+
delete(id) {
|
|
53
|
+
return this.documents.delete(id);
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Search for similar documents
|
|
57
|
+
*/
|
|
58
|
+
async search(query, limit) {
|
|
59
|
+
const maxResults = limit ?? this.maxResults;
|
|
60
|
+
if (!this.embeddingProvider) {
|
|
61
|
+
// Fallback to simple text matching
|
|
62
|
+
return this.textSearch(query, maxResults);
|
|
63
|
+
}
|
|
64
|
+
const queryEmbedding = await this.embeddingProvider.embed(query);
|
|
65
|
+
const results = [];
|
|
66
|
+
for (const doc of this.documents.values()) {
|
|
67
|
+
if (doc.embedding) {
|
|
68
|
+
const score = this.cosineSimilarity(queryEmbedding, doc.embedding);
|
|
69
|
+
if (score >= this.similarityThreshold) {
|
|
70
|
+
results.push({ document: doc, score });
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
return results
|
|
75
|
+
.sort((a, b) => b.score - a.score)
|
|
76
|
+
.slice(0, maxResults);
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Simple text-based search fallback
|
|
80
|
+
*/
|
|
81
|
+
textSearch(query, limit) {
|
|
82
|
+
const queryLower = query.toLowerCase();
|
|
83
|
+
const results = [];
|
|
84
|
+
for (const doc of this.documents.values()) {
|
|
85
|
+
const contentLower = doc.content.toLowerCase();
|
|
86
|
+
if (contentLower.includes(queryLower)) {
|
|
87
|
+
const score = queryLower.length / contentLower.length;
|
|
88
|
+
results.push({ document: doc, score: Math.min(score * 10, 1) });
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
return results
|
|
92
|
+
.sort((a, b) => b.score - a.score)
|
|
93
|
+
.slice(0, limit);
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Calculate cosine similarity between two vectors
|
|
97
|
+
*/
|
|
98
|
+
cosineSimilarity(a, b) {
|
|
99
|
+
if (a.length !== b.length)
|
|
100
|
+
return 0;
|
|
101
|
+
let dotProduct = 0;
|
|
102
|
+
let normA = 0;
|
|
103
|
+
let normB = 0;
|
|
104
|
+
for (let i = 0; i < a.length; i++) {
|
|
105
|
+
dotProduct += a[i] * b[i];
|
|
106
|
+
normA += a[i] * a[i];
|
|
107
|
+
normB += b[i] * b[i];
|
|
108
|
+
}
|
|
109
|
+
const denominator = Math.sqrt(normA) * Math.sqrt(normB);
|
|
110
|
+
return denominator === 0 ? 0 : dotProduct / denominator;
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Get all documents
|
|
114
|
+
*/
|
|
115
|
+
list() {
|
|
116
|
+
return Array.from(this.documents.values());
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* Clear all documents
|
|
120
|
+
*/
|
|
121
|
+
clear() {
|
|
122
|
+
this.documents.clear();
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Get document count
|
|
126
|
+
*/
|
|
127
|
+
get size() {
|
|
128
|
+
return this.documents.size;
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Build context from search results for RAG
|
|
132
|
+
*/
|
|
133
|
+
buildContext(results) {
|
|
134
|
+
if (results.length === 0)
|
|
135
|
+
return '';
|
|
136
|
+
return results
|
|
137
|
+
.map((r, i) => `[${i + 1}] ${r.document.content}`)
|
|
138
|
+
.join('\n\n');
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
exports.KnowledgeBase = KnowledgeBase;
|
|
142
|
+
/**
|
|
143
|
+
* Create a knowledge base
|
|
144
|
+
*/
|
|
145
|
+
function createKnowledgeBase(config) {
|
|
146
|
+
return new KnowledgeBase(config);
|
|
147
|
+
}
|
package/dist/llm/openai.js
CHANGED
|
@@ -73,7 +73,7 @@ async function getOpenAIClient() {
|
|
|
73
73
|
return openAIInstance;
|
|
74
74
|
}
|
|
75
75
|
class OpenAIService {
|
|
76
|
-
constructor(model = 'gpt-
|
|
76
|
+
constructor(model = 'gpt-5-nano') {
|
|
77
77
|
this.client = null;
|
|
78
78
|
this.model = model;
|
|
79
79
|
logger_1.Logger.debug(`OpenAIService initialized with model: ${model}`);
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic Provider - Implementation for Anthropic Claude API
|
|
3
|
+
*/
|
|
4
|
+
import { BaseProvider } from './base';
|
|
5
|
+
import type { ProviderConfig, GenerateTextOptions, GenerateTextResult, StreamTextOptions, StreamChunk, GenerateObjectOptions, GenerateObjectResult, Message, ToolDefinition } from './types';
|
|
6
|
+
interface AnthropicMessage {
|
|
7
|
+
role: 'user' | 'assistant';
|
|
8
|
+
content: string | Array<{
|
|
9
|
+
type: string;
|
|
10
|
+
text?: string;
|
|
11
|
+
tool_use_id?: string;
|
|
12
|
+
content?: string;
|
|
13
|
+
}>;
|
|
14
|
+
}
|
|
15
|
+
interface AnthropicTool {
|
|
16
|
+
name: string;
|
|
17
|
+
description: string;
|
|
18
|
+
input_schema: Record<string, any>;
|
|
19
|
+
}
|
|
20
|
+
export declare class AnthropicProvider extends BaseProvider {
|
|
21
|
+
readonly providerId = "anthropic";
|
|
22
|
+
private apiKey;
|
|
23
|
+
private baseUrl;
|
|
24
|
+
constructor(modelId: string, config?: ProviderConfig);
|
|
25
|
+
generateText(options: GenerateTextOptions): Promise<GenerateTextResult>;
|
|
26
|
+
streamText(options: StreamTextOptions): Promise<AsyncIterable<StreamChunk>>;
|
|
27
|
+
generateObject<T = any>(options: GenerateObjectOptions<T>): Promise<GenerateObjectResult<T>>;
|
|
28
|
+
private extractSystemPrompt;
|
|
29
|
+
protected formatMessages(messages: Message[]): AnthropicMessage[];
|
|
30
|
+
protected formatTools(tools: ToolDefinition[]): AnthropicTool[];
|
|
31
|
+
private mapStopReason;
|
|
32
|
+
}
|
|
33
|
+
export {};
|