langfn 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -0
- package/dist/chunk-6NDYO7WC.js +131 -0
- package/dist/chunk-IDYTII3W.js +47 -0
- package/dist/chunk-LIUWQ4NY.js +49 -0
- package/dist/chunk-MHMMFGVC.js +60 -0
- package/dist/chunk-MLKGABMK.js +9 -0
- package/dist/index.d.ts +666 -0
- package/dist/index.js +1174 -0
- package/dist/openai-4W5RU3CU.js +7 -0
- package/dist/openai-LHMGJO6V.js +7 -0
- package/dist/react-EKLNOUM4.js +7 -0
- package/dist/sse-4Y3LCWWO.js +13 -0
- package/dist/tool_agent-OA4BZHA6.js +7 -0
- package/package.json +63 -0
package/README.md
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# LangFn (TypeScript)
|
|
2
|
+
|
|
3
|
+
Comprehensive AI Development SDK for LLM-based workflows.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Model Abstraction**: Unified interface for OpenAI, Anthropic, Ollama, and more.
|
|
8
|
+
- **Orchestration**: Sequential, Parallel, and Map-Reduce chains.
|
|
9
|
+
- **Graph Workflows**: Stateful workflows with `StateGraph`.
|
|
10
|
+
- **Agents**: Functional `ToolAgent` and `ReActAgent`.
|
|
11
|
+
- **Structured Output**: Type-safe output parsing with Zod.
|
|
12
|
+
- **RAG**: Embeddings and Vector Store support.
|
|
13
|
+
- **Observability**: Built-in tracing.
|
|
14
|
+
|
|
15
|
+
## Usage
|
|
16
|
+
|
|
17
|
+
```typescript
|
|
18
|
+
import { langfn, models } from "langfn";
|
|
19
|
+
|
|
20
|
+
const lang = langfn({
|
|
21
|
+
model: new models.OpenAIChatModel({
|
|
22
|
+
apiKey: process.env.OPENAI_API_KEY
|
|
23
|
+
})
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
const response = await lang.complete("Hello world");
|
|
27
|
+
console.log(response.content);
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Documentation
|
|
31
|
+
|
|
32
|
+
See the [main documentation](https://docs.superfunctions.dev/langfn) for more details.
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
// src/models/base.ts
|
|
2
|
+
var ChatModel = class {
|
|
3
|
+
};
|
|
4
|
+
|
|
5
|
+
// src/models/openai.ts
|
|
6
|
+
var OpenAIChatModel = class extends ChatModel {
|
|
7
|
+
provider = "openai";
|
|
8
|
+
model;
|
|
9
|
+
apiKey;
|
|
10
|
+
baseUrl;
|
|
11
|
+
organization;
|
|
12
|
+
project;
|
|
13
|
+
constructor(config) {
|
|
14
|
+
super();
|
|
15
|
+
this.apiKey = config.apiKey;
|
|
16
|
+
this.model = config.model || "gpt-4-turbo-preview";
|
|
17
|
+
this.baseUrl = config.baseUrl || "https://api.openai.com/v1";
|
|
18
|
+
this.organization = config.organization;
|
|
19
|
+
this.project = config.project;
|
|
20
|
+
}
|
|
21
|
+
async fetch(path, options) {
|
|
22
|
+
const headers = {
|
|
23
|
+
"Content-Type": "application/json",
|
|
24
|
+
"Authorization": `Bearer ${this.apiKey}`,
|
|
25
|
+
...this.organization ? { "OpenAI-Organization": this.organization } : {},
|
|
26
|
+
...this.project ? { "OpenAI-Project": this.project } : {}
|
|
27
|
+
};
|
|
28
|
+
const response = await fetch(`${this.baseUrl}${path}`, {
|
|
29
|
+
...options,
|
|
30
|
+
headers: {
|
|
31
|
+
...headers,
|
|
32
|
+
...options.headers
|
|
33
|
+
}
|
|
34
|
+
});
|
|
35
|
+
if (!response.ok) {
|
|
36
|
+
const error = await response.text();
|
|
37
|
+
throw new Error(`OpenAI API Error ${response.status}: ${error}`);
|
|
38
|
+
}
|
|
39
|
+
return response;
|
|
40
|
+
}
|
|
41
|
+
async complete(request) {
|
|
42
|
+
const chatResp = await this.chat({
|
|
43
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
44
|
+
metadata: request.metadata
|
|
45
|
+
});
|
|
46
|
+
return {
|
|
47
|
+
content: chatResp.message.content,
|
|
48
|
+
usage: chatResp.usage,
|
|
49
|
+
raw: chatResp.raw,
|
|
50
|
+
trace_id: chatResp.trace_id
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
async chat(request) {
|
|
54
|
+
const body = {
|
|
55
|
+
model: this.model,
|
|
56
|
+
messages: request.messages,
|
|
57
|
+
tools: request.tools,
|
|
58
|
+
tool_choice: request.tool_choice
|
|
59
|
+
};
|
|
60
|
+
const response = await this.fetch("/chat/completions", {
|
|
61
|
+
method: "POST",
|
|
62
|
+
body: JSON.stringify(body)
|
|
63
|
+
});
|
|
64
|
+
const data = await response.json();
|
|
65
|
+
const choice = data.choices[0];
|
|
66
|
+
const message = choice.message;
|
|
67
|
+
const tool_calls = message.tool_calls?.map((tc) => ({
|
|
68
|
+
id: tc.id,
|
|
69
|
+
name: tc.function.name,
|
|
70
|
+
arguments: JSON.parse(tc.function.arguments)
|
|
71
|
+
}));
|
|
72
|
+
const usage = data.usage ? {
|
|
73
|
+
prompt_tokens: data.usage.prompt_tokens,
|
|
74
|
+
completion_tokens: data.usage.completion_tokens,
|
|
75
|
+
total_tokens: data.usage.total_tokens
|
|
76
|
+
} : void 0;
|
|
77
|
+
return {
|
|
78
|
+
message: {
|
|
79
|
+
role: message.role,
|
|
80
|
+
content: message.content || ""
|
|
81
|
+
},
|
|
82
|
+
tool_calls,
|
|
83
|
+
usage,
|
|
84
|
+
raw: data
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
async *stream(request) {
|
|
88
|
+
const response = await this.fetch("/chat/completions", {
|
|
89
|
+
method: "POST",
|
|
90
|
+
body: JSON.stringify({
|
|
91
|
+
model: this.model,
|
|
92
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
93
|
+
stream: true
|
|
94
|
+
})
|
|
95
|
+
});
|
|
96
|
+
if (!response.body) throw new Error("No response body");
|
|
97
|
+
const reader = response.body.getReader();
|
|
98
|
+
const decoder = new TextDecoder();
|
|
99
|
+
let buffer = "";
|
|
100
|
+
while (true) {
|
|
101
|
+
const { done, value } = await reader.read();
|
|
102
|
+
if (done) break;
|
|
103
|
+
buffer += decoder.decode(value, { stream: true });
|
|
104
|
+
const lines = buffer.split("\n");
|
|
105
|
+
buffer = lines.pop() || "";
|
|
106
|
+
for (const line of lines) {
|
|
107
|
+
if (line.trim() === "" || line.trim() === "data: [DONE]") continue;
|
|
108
|
+
if (!line.startsWith("data: ")) continue;
|
|
109
|
+
try {
|
|
110
|
+
const data = JSON.parse(line.slice(6));
|
|
111
|
+
const delta = data.choices[0]?.delta?.content;
|
|
112
|
+
if (delta) {
|
|
113
|
+
yield {
|
|
114
|
+
type: "content",
|
|
115
|
+
content: delta,
|
|
116
|
+
delta
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
} catch (e) {
|
|
120
|
+
console.warn("Failed to parse stream chunk", e);
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
yield { type: "end", finish_reason: "stop" };
|
|
125
|
+
}
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
export {
|
|
129
|
+
ChatModel,
|
|
130
|
+
OpenAIChatModel
|
|
131
|
+
};
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
// src/agents/tool_agent.ts
|
|
2
|
+
var ToolAgent = class {
|
|
3
|
+
constructor(options) {
|
|
4
|
+
this.options = options;
|
|
5
|
+
}
|
|
6
|
+
async run(prompt, options = {}) {
|
|
7
|
+
const max_iterations = this.options.max_iterations ?? 5;
|
|
8
|
+
const messages = [];
|
|
9
|
+
if (options.system) {
|
|
10
|
+
messages.push({ role: "system", content: options.system });
|
|
11
|
+
}
|
|
12
|
+
messages.push({ role: "user", content: prompt });
|
|
13
|
+
const tool_map = Object.fromEntries(this.options.tools.map((t) => [t.name, t]));
|
|
14
|
+
for (let i = 0; i < max_iterations; i++) {
|
|
15
|
+
const resp = await this.options.lang.chat(messages, {
|
|
16
|
+
tools: this.options.tools
|
|
17
|
+
});
|
|
18
|
+
const assistant_msg = resp.message;
|
|
19
|
+
if (resp.tool_calls) {
|
|
20
|
+
assistant_msg.tool_calls = resp.tool_calls.map((c) => ({
|
|
21
|
+
id: c.id,
|
|
22
|
+
type: "function",
|
|
23
|
+
function: { name: c.name, arguments: JSON.stringify(c.arguments) }
|
|
24
|
+
}));
|
|
25
|
+
}
|
|
26
|
+
messages.push(assistant_msg);
|
|
27
|
+
if (!resp.tool_calls || resp.tool_calls.length === 0) {
|
|
28
|
+
return { messages, output: resp.message.content };
|
|
29
|
+
}
|
|
30
|
+
for (const call of resp.tool_calls) {
|
|
31
|
+
const tool = tool_map[call.name];
|
|
32
|
+
if (!tool) throw new Error(`Unknown tool: ${call.name}`);
|
|
33
|
+
const result = await tool.run(call.arguments);
|
|
34
|
+
messages.push({
|
|
35
|
+
role: "tool",
|
|
36
|
+
tool_call_id: call.id,
|
|
37
|
+
content: JSON.stringify(result)
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
throw new Error("Max iterations exceeded");
|
|
42
|
+
}
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
export {
|
|
46
|
+
ToolAgent
|
|
47
|
+
};
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
// src/rag/base.ts
|
|
2
|
+
var Embeddings = class {
|
|
3
|
+
};
|
|
4
|
+
var VectorStore = class {
|
|
5
|
+
};
|
|
6
|
+
var Retriever = class {
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
// src/rag/openai.ts
|
|
10
|
+
var OpenAIEmbeddings = class extends Embeddings {
|
|
11
|
+
apiKey;
|
|
12
|
+
model;
|
|
13
|
+
baseUrl;
|
|
14
|
+
constructor(config) {
|
|
15
|
+
super();
|
|
16
|
+
this.apiKey = config.apiKey;
|
|
17
|
+
this.model = config.model || "text-embedding-3-small";
|
|
18
|
+
this.baseUrl = config.baseUrl || "https://api.openai.com/v1";
|
|
19
|
+
}
|
|
20
|
+
async embedQuery(text) {
|
|
21
|
+
const res = await this.embedDocuments([text]);
|
|
22
|
+
return res[0];
|
|
23
|
+
}
|
|
24
|
+
async embedDocuments(texts) {
|
|
25
|
+
const response = await fetch(`${this.baseUrl}/embeddings`, {
|
|
26
|
+
method: "POST",
|
|
27
|
+
headers: {
|
|
28
|
+
"Content-Type": "application/json",
|
|
29
|
+
"Authorization": `Bearer ${this.apiKey}`
|
|
30
|
+
},
|
|
31
|
+
body: JSON.stringify({
|
|
32
|
+
input: texts,
|
|
33
|
+
model: this.model
|
|
34
|
+
})
|
|
35
|
+
});
|
|
36
|
+
if (!response.ok) {
|
|
37
|
+
throw new Error(`OpenAI Embeddings Error: ${await response.text()}`);
|
|
38
|
+
}
|
|
39
|
+
const data = await response.json();
|
|
40
|
+
return data.data.sort((a, b) => a.index - b.index).map((item) => item.embedding);
|
|
41
|
+
}
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
export {
|
|
45
|
+
Embeddings,
|
|
46
|
+
VectorStore,
|
|
47
|
+
Retriever,
|
|
48
|
+
OpenAIEmbeddings
|
|
49
|
+
};
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
// src/agents/react.ts
|
|
2
|
+
var ReActAgent = class {
|
|
3
|
+
constructor(options) {
|
|
4
|
+
this.options = options;
|
|
5
|
+
}
|
|
6
|
+
async run(prompt) {
|
|
7
|
+
const max_iterations = this.options.max_iterations ?? 10;
|
|
8
|
+
const system_prompt = this.options.system_prompt ?? "You are a helpful assistant with access to tools. Use them to answer the user's request.";
|
|
9
|
+
const messages = [
|
|
10
|
+
{ role: "system", content: system_prompt },
|
|
11
|
+
{ role: "user", content: prompt }
|
|
12
|
+
];
|
|
13
|
+
const tool_map = Object.fromEntries(this.options.tools.map((t) => [t.name, t]));
|
|
14
|
+
for (let i = 0; i < max_iterations; i++) {
|
|
15
|
+
const resp = await this.options.model.chat(messages, {
|
|
16
|
+
tools: this.options.tools,
|
|
17
|
+
tool_choice: "auto"
|
|
18
|
+
});
|
|
19
|
+
const assistant_msg = resp.message;
|
|
20
|
+
if (resp.tool_calls) {
|
|
21
|
+
assistant_msg.tool_calls = resp.tool_calls.map((c) => ({
|
|
22
|
+
id: c.id,
|
|
23
|
+
type: "function",
|
|
24
|
+
function: { name: c.name, arguments: JSON.stringify(c.arguments) }
|
|
25
|
+
}));
|
|
26
|
+
}
|
|
27
|
+
messages.push(assistant_msg);
|
|
28
|
+
if (!resp.tool_calls || resp.tool_calls.length === 0) {
|
|
29
|
+
return { messages, output: resp.message.content };
|
|
30
|
+
}
|
|
31
|
+
for (const call of resp.tool_calls) {
|
|
32
|
+
const tool = tool_map[call.name];
|
|
33
|
+
let result_content;
|
|
34
|
+
if (!tool) {
|
|
35
|
+
result_content = JSON.stringify({ error: `Tool ${call.name} not found` });
|
|
36
|
+
} else {
|
|
37
|
+
try {
|
|
38
|
+
const result = await tool.run(call.arguments);
|
|
39
|
+
result_content = typeof result === "string" ? result : JSON.stringify(result);
|
|
40
|
+
} catch (e) {
|
|
41
|
+
result_content = JSON.stringify({ error: e.message });
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
messages.push({
|
|
45
|
+
role: "tool",
|
|
46
|
+
tool_call_id: call.id,
|
|
47
|
+
content: result_content
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
return {
|
|
52
|
+
messages,
|
|
53
|
+
output: "I could not complete the task within the maximum number of steps."
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
export {
|
|
59
|
+
ReActAgent
|
|
60
|
+
};
|