ai-pipeline-orchestrator 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -0
- package/CONTRIBUTING.md +77 -0
- package/LICENSE +21 -0
- package/README.md +466 -0
- package/dist/context/index.cjs +75 -0
- package/dist/context/index.cjs.map +1 -0
- package/dist/context/index.d.cts +38 -0
- package/dist/context/index.d.ts +38 -0
- package/dist/context/index.mjs +48 -0
- package/dist/context/index.mjs.map +1 -0
- package/dist/core/index.cjs +203 -0
- package/dist/core/index.cjs.map +1 -0
- package/dist/core/index.d.cts +36 -0
- package/dist/core/index.d.ts +36 -0
- package/dist/core/index.mjs +175 -0
- package/dist/core/index.mjs.map +1 -0
- package/dist/handlers/index.cjs +564 -0
- package/dist/handlers/index.cjs.map +1 -0
- package/dist/handlers/index.d.cts +105 -0
- package/dist/handlers/index.d.ts +105 -0
- package/dist/handlers/index.mjs +522 -0
- package/dist/handlers/index.mjs.map +1 -0
- package/dist/index.cjs +971 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +8 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.mjs +920 -0
- package/dist/index.mjs.map +1 -0
- package/dist/intent/index.cjs +265 -0
- package/dist/intent/index.cjs.map +1 -0
- package/dist/intent/index.d.cts +26 -0
- package/dist/intent/index.d.ts +26 -0
- package/dist/intent/index.mjs +226 -0
- package/dist/intent/index.mjs.map +1 -0
- package/dist/llm-classifier-Cq-QHJc4.d.ts +59 -0
- package/dist/llm-classifier-pOp7OO-V.d.cts +59 -0
- package/dist/providers/index.cjs +64 -0
- package/dist/providers/index.cjs.map +1 -0
- package/dist/providers/index.d.cts +13 -0
- package/dist/providers/index.d.ts +13 -0
- package/dist/providers/index.mjs +29 -0
- package/dist/providers/index.mjs.map +1 -0
- package/dist/types-DGSj206n.d.cts +42 -0
- package/dist/types-DGSj206n.d.ts +42 -0
- package/dist/utils/index.cjs +74 -0
- package/dist/utils/index.cjs.map +1 -0
- package/dist/utils/index.d.cts +10 -0
- package/dist/utils/index.d.ts +10 -0
- package/dist/utils/index.mjs +46 -0
- package/dist/utils/index.mjs.map +1 -0
- package/package.json +147 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
## 0.1.0 (2026-01-10)
|
|
2
|
+
|
|
3
|
+
* Add AI generation capabilities with new AI handler, example implementation, and updates to package d ([e6ce00d](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/e6ce00d))
|
|
4
|
+
* Add configuration files for editor, npm, and nvm; create CI and publish workflows; update package.js ([c2361c1](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/c2361c1))
|
|
5
|
+
* Add content moderation, rate limiting, and context handling modules with respective configurations a ([f207aab](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/f207aab))
|
|
6
|
+
* Add context management module with context building and optimization functionality ([023ec93](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/023ec93))
|
|
7
|
+
* Add core orchestration module with orchestrator implementation and type definitions ([32b6400](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/32b6400))
|
|
8
|
+
* Add example implementation for a basic chatbot using intent classification, context optimization, an ([61a4edb](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/61a4edb))
|
|
9
|
+
* Add initial project structure with essential files ([6ddb8aa](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/6ddb8aa))
|
|
10
|
+
* Add intent classification module with detectIntent and LLM classification functionality ([dbc8738](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/dbc8738))
|
|
11
|
+
* add interactive chat CLI. ([a50e4f4](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/a50e4f4))
|
|
12
|
+
* Add Prettier and ESLint configuration files, update package.json for linting and formatting scripts, ([d573b5d](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/d573b5d))
|
|
13
|
+
* Add README.md with project overview, features, installation instructions, quick start guide, core co ([636e34b](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/636e34b))
|
|
14
|
+
* Add streaming AI generation support ([a522b95](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/a522b95))
|
|
15
|
+
* Add support for multiple AI providers by introducing a new provider configuration interface and mode ([e6d659e](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/e6d659e))
|
|
16
|
+
* adjust package dependencies for ai-sdk-ollama. ([5dc6622](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/5dc6622))
|
|
17
|
+
* adjust package.json for module exports and dependencies. ([e5e348e](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/e5e348e))
|
|
18
|
+
* Cast model parameters to string in all handler examples for type consistency ([c10c6c3](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/c10c6c3))
|
|
19
|
+
* Enhance documentation across various handlers and classifiers, adding descriptive comments for conte ([08c3a72](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/08c3a72))
|
|
20
|
+
* Enhance intent detection documentation by detailing the keyword matching algorithm and scoring syste ([316f319](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/316f319))
|
|
21
|
+
* Enhance support for DeepSeek provider by updating configuration instructions in .env.example, README ([73355e8](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/73355e8))
|
|
22
|
+
* Introduce utility file in example folder ([5af9aa9](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/5af9aa9))
|
|
23
|
+
* Refactor context and intent modules by removing unused exports and enhancing documentation. Update c ([fd14466](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/fd14466))
|
|
24
|
+
* Refactor provider types to use AIProvider type across the codebase ([781f3a2](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/781f3a2))
|
|
25
|
+
* Update .env.example and all-handlers.ts to improve environment variable configuration instructions a ([7db6923](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/7db6923))
|
|
26
|
+
* Update .env.example and README to enhance provider configuration instructions; add dotenv dependency ([57a4193](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/57a4193))
|
|
27
|
+
* Update .env.example, README, and examples to streamline provider configuration instructions and enha ([bec25c1](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/bec25c1))
|
|
28
|
+
* Update dependencies for ollama-ai-provider and related packages to version 1.2.0. Improve documentat ([2d71dfe](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/2d71dfe))
|
|
29
|
+
* Update README with comprehensive handler configuration guide ([cf76caf](https://github.com/emmanuel-adu/ai-pipeline-orchestrator/commit/cf76caf))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
package/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# Contributing to ai-pipeline-orchestrator
|
|
2
|
+
|
|
3
|
+
## Conventional Commits
|
|
4
|
+
|
|
5
|
+
This project uses [Conventional Commits](https://www.conventionalcommits.org/) to automate changelog generation and semantic versioning.
|
|
6
|
+
|
|
7
|
+
### Commit Message Format
|
|
8
|
+
|
|
9
|
+
Each commit message consists of a **type**, optional **scope**, and **subject**:
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
<type>(<scope>): <subject>
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
### Types
|
|
16
|
+
|
|
17
|
+
- **feat**: A new feature
|
|
18
|
+
- **fix**: A bug fix
|
|
19
|
+
- **docs**: Documentation only changes
|
|
20
|
+
- **style**: Changes that don't affect code meaning (formatting, semicolons, etc)
|
|
21
|
+
- **refactor**: Code refactoring (neither fixes a bug nor adds a feature)
|
|
22
|
+
- **perf**: Performance improvements
|
|
23
|
+
- **test**: Adding or updating tests
|
|
24
|
+
- **chore**: Maintenance tasks (dependencies, build config, etc)
|
|
25
|
+
- **ci**: CI/CD configuration changes
|
|
26
|
+
- **build**: Build system or external dependency changes
|
|
27
|
+
|
|
28
|
+
### Examples
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
# Adding a new feature
|
|
32
|
+
git commit -m "feat(intent): add support for multi-language classification"
|
|
33
|
+
|
|
34
|
+
# Fixing a bug
|
|
35
|
+
git commit -m "fix(streaming): resolve token counting issue with Ollama"
|
|
36
|
+
|
|
37
|
+
# Documentation update
|
|
38
|
+
git commit -m "docs(readme): add Ollama setup instructions"
|
|
39
|
+
|
|
40
|
+
# Breaking change (adds BREAKING CHANGE footer)
|
|
41
|
+
git commit -m "feat(handlers)!: change handler signature to async
|
|
42
|
+
|
|
43
|
+
BREAKING CHANGE: All handlers must now return Promises"
|
|
44
|
+
|
|
45
|
+
# Chore task
|
|
46
|
+
git commit -m "chore(deps): upgrade ai-sdk to v6.1.0"
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Scopes
|
|
50
|
+
|
|
51
|
+
Common scopes:
|
|
52
|
+
- `core` - orchestration pipeline
|
|
53
|
+
- `intent` - intent classification
|
|
54
|
+
- `context` - context optimization
|
|
55
|
+
- `handlers` - handler functions
|
|
56
|
+
- `providers` - AI provider integration
|
|
57
|
+
- `examples` - example code
|
|
58
|
+
- `deps` - dependencies
|
|
59
|
+
- `ci` - CI/CD
|
|
60
|
+
|
|
61
|
+
### Commit Linting
|
|
62
|
+
|
|
63
|
+
Commits are automatically validated using commitlint. Invalid commits will be rejected.
|
|
64
|
+
|
|
65
|
+
### Changelog Generation
|
|
66
|
+
|
|
67
|
+
The CHANGELOG is auto-generated from commits:
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
# Generate changelog for new commits since last release
|
|
71
|
+
npm run changelog
|
|
72
|
+
|
|
73
|
+
# Regenerate entire changelog
|
|
74
|
+
npm run changelog:init
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
The changelog is automatically updated during the publish process.
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Emmanuel Adu
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,466 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
# AI Pipeline Orchestrator
|
|
4
|
+
|
|
5
|
+
Build production-ready AI chatbots with composable handler pipelines. Handles intent detection, context optimization, token management, rate limiting, and moderation out of the box.
|
|
6
|
+
|
|
7
|
+
[](https://www.npmjs.com/package/ai-pipeline-orchestrator)
|
|
8
|
+
[](https://opensource.org/licenses/MIT)
|
|
9
|
+
[](https://www.typescriptlang.org/)
|
|
10
|
+
|
|
11
|
+
</div>
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
<p align="center">
|
|
16
|
+
<img src="./assets/demo.gif" alt="AI Pipeline Orchestrator Demo" width="900">
|
|
17
|
+
</p>
|
|
18
|
+
|
|
19
|
+
<p align="center">
|
|
20
|
+
<em>Interactive CLI demo showcasing hybrid intent detection, context optimization, and real-time streaming</em><br>
|
|
21
|
+
<a href="#demo-screenshots">View detailed screenshot</a>
|
|
22
|
+
</p>
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
|
|
26
|
+
## Features
|
|
27
|
+
|
|
28
|
+
| Feature | Description |
|
|
29
|
+
|---------|-------------|
|
|
30
|
+
| **Sequential Pipelines** | Compose handlers that run in order with automatic error handling |
|
|
31
|
+
| **Hybrid Intent Detection** | Keyword matching (fast, free) → LLM fallback (accurate, paid) |
|
|
32
|
+
| **Context Optimization** | Load only relevant context based on intent (30-50% token savings) |
|
|
33
|
+
| **Multi-Provider** | Works with Anthropic, OpenAI, or Ollama (local/cloud) |
|
|
34
|
+
| **Production Ready** | Rate limiting, moderation, logging, error handling built-in |
|
|
35
|
+
| **TypeScript** | Full type safety with minimal dependencies (just Zod) |
|
|
36
|
+
|
|
37
|
+
## Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
npm install ai-pipeline-orchestrator
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Install a provider SDK:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
# Anthropic (recommended)
|
|
47
|
+
npm install @ai-sdk/anthropic ai
|
|
48
|
+
|
|
49
|
+
# OpenAI
|
|
50
|
+
npm install @ai-sdk/openai ai
|
|
51
|
+
|
|
52
|
+
# Ollama (free, local/cloud)
|
|
53
|
+
npm install ai-sdk-ollama ai
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Quick Start
|
|
57
|
+
|
|
58
|
+
```typescript
|
|
59
|
+
import { executeOrchestration, createAIHandler } from 'ai-pipeline-orchestrator'
|
|
60
|
+
|
|
61
|
+
const result = await executeOrchestration(
|
|
62
|
+
{
|
|
63
|
+
request: {
|
|
64
|
+
messages: [{ role: 'user', content: 'Tell me a joke' }],
|
|
65
|
+
},
|
|
66
|
+
},
|
|
67
|
+
[
|
|
68
|
+
{
|
|
69
|
+
name: 'ai',
|
|
70
|
+
handler: createAIHandler({
|
|
71
|
+
provider: 'anthropic',
|
|
72
|
+
model: 'claude-3-5-haiku-20241022',
|
|
73
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
74
|
+
getSystemPrompt: () => 'You are a helpful assistant.',
|
|
75
|
+
}),
|
|
76
|
+
},
|
|
77
|
+
]
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
console.log(result.context.aiResponse.text)
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## Providers
|
|
84
|
+
|
|
85
|
+
### Supported Providers
|
|
86
|
+
|
|
87
|
+
| Provider | Package | Models | API Key | Best For |
|
|
88
|
+
|----------|---------|--------|---------|----------|
|
|
89
|
+
| **Anthropic** | `@ai-sdk/anthropic` | `claude-3-5-haiku-20241022`<br/>`claude-3-5-sonnet-20241022` | [Get key](https://console.anthropic.com) | Production, high-quality responses |
|
|
90
|
+
| **OpenAI** | `@ai-sdk/openai` | `gpt-4o-mini`<br/>`gpt-4o` | [Get key](https://platform.openai.com) | Production, wide model selection |
|
|
91
|
+
| **Ollama** | `ai-sdk-ollama` | `llama3.2`, `deepseek-r1`, `qwen2.5`<br/>100+ more | Optional ([Cloud](https://ollama.com)) | Development, cost savings, offline |
|
|
92
|
+
|
|
93
|
+
### Provider Setup
|
|
94
|
+
|
|
95
|
+
<details>
|
|
96
|
+
<summary><strong>Anthropic / OpenAI</strong></summary>
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
# .env
|
|
100
|
+
AI_PROVIDER=anthropic
|
|
101
|
+
AI_MODEL=claude-3-5-haiku-20241022
|
|
102
|
+
ANTHROPIC_API_KEY=your-key-here
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
</details>
|
|
106
|
+
|
|
107
|
+
<details>
|
|
108
|
+
<summary><strong>Ollama Local</strong></summary>
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
# Install and run
|
|
112
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
113
|
+
ollama serve
|
|
114
|
+
ollama pull llama3.2
|
|
115
|
+
|
|
116
|
+
# .env
|
|
117
|
+
AI_PROVIDER=ollama
|
|
118
|
+
AI_MODEL=llama3.2:latest
|
|
119
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
</details>
|
|
123
|
+
|
|
124
|
+
<details>
|
|
125
|
+
<summary><strong>Ollama Cloud (Free Tier)</strong></summary>
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
# .env
|
|
129
|
+
AI_PROVIDER=ollama
|
|
130
|
+
AI_MODEL=llama3.2:latest
|
|
131
|
+
OLLAMA_BASE_URL=https://ollama.com
|
|
132
|
+
OLLAMA_API_KEY=your-key-here # Get from https://ollama.com
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
</details>
|
|
136
|
+
|
|
137
|
+
<details>
|
|
138
|
+
<summary><strong>💡 Hybrid Setup (Recommended)</strong></summary>
|
|
139
|
+
|
|
140
|
+
Use a cloud provider for chat (best quality) + Ollama for intent classification (free):
|
|
141
|
+
|
|
142
|
+
```bash
|
|
143
|
+
# .env
|
|
144
|
+
AI_PROVIDER=anthropic
|
|
145
|
+
AI_MODEL=claude-3-5-haiku-20241022
|
|
146
|
+
ANTHROPIC_API_KEY=your-key-here
|
|
147
|
+
|
|
148
|
+
INTENT_PROVIDER=ollama
|
|
149
|
+
INTENT_MODEL=deepseek-r1:latest
|
|
150
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
This gives you high-quality chat responses with zero-cost intent classification.
|
|
154
|
+
|
|
155
|
+
</details>
|
|
156
|
+
|
|
157
|
+
## Usage
|
|
158
|
+
|
|
159
|
+
### Pipeline Orchestration
|
|
160
|
+
|
|
161
|
+
Handlers run sequentially, passing context between them:
|
|
162
|
+
|
|
163
|
+
```typescript
|
|
164
|
+
import {
|
|
165
|
+
executeOrchestration,
|
|
166
|
+
createModerationHandler,
|
|
167
|
+
createIntentHandler,
|
|
168
|
+
createContextHandler,
|
|
169
|
+
createAIHandler,
|
|
170
|
+
} from 'ai-pipeline-orchestrator'
|
|
171
|
+
|
|
172
|
+
const result = await executeOrchestration(
|
|
173
|
+
context,
|
|
174
|
+
[
|
|
175
|
+
{ name: 'moderation', handler: createModerationHandler() },
|
|
176
|
+
{ name: 'intent', handler: createIntentHandler({ classifier }) },
|
|
177
|
+
{ name: 'context', handler: createContextHandler({ optimizer }) },
|
|
178
|
+
{ name: 'ai', handler: createAIHandler({ provider, model, apiKey }) },
|
|
179
|
+
],
|
|
180
|
+
{
|
|
181
|
+
logger: myLogger,
|
|
182
|
+
onStepComplete: (step, duration) => console.log(`${step}: ${duration}ms`),
|
|
183
|
+
}
|
|
184
|
+
)
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
If any handler sets `context.error` or throws, the pipeline stops.
|
|
188
|
+
|
|
189
|
+
### Handlers
|
|
190
|
+
|
|
191
|
+
#### Content Moderation
|
|
192
|
+
|
|
193
|
+
```typescript
|
|
194
|
+
import { createModerationHandler } from 'ai-pipeline-orchestrator'
|
|
195
|
+
|
|
196
|
+
const handler = createModerationHandler({
|
|
197
|
+
spamPatterns: ['buy now', 'click here'],
|
|
198
|
+
customRules: [{ pattern: /badword/i, reason: 'Profanity detected' }],
|
|
199
|
+
})
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
#### Rate Limiting
|
|
203
|
+
|
|
204
|
+
Bring your own rate limiter (Upstash, Redis, etc):
|
|
205
|
+
|
|
206
|
+
```typescript
|
|
207
|
+
import { createRateLimitHandler } from 'ai-pipeline-orchestrator'
|
|
208
|
+
|
|
209
|
+
const handler = createRateLimitHandler({
|
|
210
|
+
limiter: {
|
|
211
|
+
check: async (id) => ({
|
|
212
|
+
allowed: await checkLimit(id),
|
|
213
|
+
retryAfter: 60, // seconds
|
|
214
|
+
}),
|
|
215
|
+
},
|
|
216
|
+
identifierKey: 'userId',
|
|
217
|
+
})
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
#### Intent Detection
|
|
221
|
+
|
|
222
|
+
Three modes: keyword-only, LLM-only, or hybrid.
|
|
223
|
+
|
|
224
|
+
<details>
|
|
225
|
+
<summary><strong>Keyword-based</strong> (fast, free)</summary>
|
|
226
|
+
|
|
227
|
+
```typescript
|
|
228
|
+
import { IntentClassifier, createIntentHandler } from 'ai-pipeline-orchestrator'
|
|
229
|
+
|
|
230
|
+
const classifier = new IntentClassifier({
|
|
231
|
+
patterns: [
|
|
232
|
+
{ category: 'greeting', keywords: ['hello', 'hi', 'hey'] },
|
|
233
|
+
{ category: 'help', keywords: ['help', 'support'] },
|
|
234
|
+
],
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
const handler = createIntentHandler({ classifier })
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
</details>
|
|
241
|
+
|
|
242
|
+
<details>
|
|
243
|
+
<summary><strong>LLM-based</strong> (accurate, paid)</summary>
|
|
244
|
+
|
|
245
|
+
```typescript
|
|
246
|
+
import { LLMIntentClassifier, createIntentHandler } from 'ai-pipeline-orchestrator'
|
|
247
|
+
|
|
248
|
+
const classifier = new LLMIntentClassifier({
|
|
249
|
+
provider: 'anthropic',
|
|
250
|
+
model: 'claude-3-5-haiku-20241022',
|
|
251
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
252
|
+
categories: ['greeting', 'help', 'question'],
|
|
253
|
+
categoryDescriptions: {
|
|
254
|
+
greeting: 'User says hello',
|
|
255
|
+
help: 'User needs help',
|
|
256
|
+
question: 'User asks a question',
|
|
257
|
+
},
|
|
258
|
+
})
|
|
259
|
+
|
|
260
|
+
const handler = createIntentHandler({ classifier })
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
</details>
|
|
264
|
+
|
|
265
|
+
<details>
|
|
266
|
+
<summary><strong>Hybrid</strong> (best of both)</summary>
|
|
267
|
+
|
|
268
|
+
```typescript
|
|
269
|
+
const handler = createIntentHandler({
|
|
270
|
+
classifier: keywordClassifier,
|
|
271
|
+
llmFallback: {
|
|
272
|
+
enabled: true,
|
|
273
|
+
classifier: llmClassifier,
|
|
274
|
+
confidenceThreshold: 0.5, // use LLM if keyword confidence < 0.5
|
|
275
|
+
},
|
|
276
|
+
})
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
</details>
|
|
280
|
+
|
|
281
|
+
#### Context Optimization
|
|
282
|
+
|
|
283
|
+
Load only relevant context based on intent:
|
|
284
|
+
|
|
285
|
+
```typescript
|
|
286
|
+
import { ContextOptimizer, createContextHandler } from 'ai-pipeline-orchestrator'
|
|
287
|
+
|
|
288
|
+
const optimizer = new ContextOptimizer({
|
|
289
|
+
sections: [
|
|
290
|
+
{
|
|
291
|
+
id: 'core',
|
|
292
|
+
content: 'You are a helpful assistant.',
|
|
293
|
+
alwaysInclude: true,
|
|
294
|
+
},
|
|
295
|
+
{
|
|
296
|
+
id: 'help',
|
|
297
|
+
content: 'Help documentation...',
|
|
298
|
+
topics: ['help', 'support'],
|
|
299
|
+
},
|
|
300
|
+
],
|
|
301
|
+
strategy: {
|
|
302
|
+
firstMessage: 'full',
|
|
303
|
+
followUp: 'selective',
|
|
304
|
+
},
|
|
305
|
+
})
|
|
306
|
+
|
|
307
|
+
const handler = createContextHandler({
|
|
308
|
+
optimizer,
|
|
309
|
+
getTopics: (ctx) => [ctx.intent?.intent],
|
|
310
|
+
})
|
|
311
|
+
```
|
|
312
|
+
|
|
313
|
+
#### AI Generation
|
|
314
|
+
|
|
315
|
+
<details>
|
|
316
|
+
<summary><strong>Non-streaming</strong></summary>
|
|
317
|
+
|
|
318
|
+
```typescript
|
|
319
|
+
import { createAIHandler } from 'ai-pipeline-orchestrator'
|
|
320
|
+
|
|
321
|
+
const handler = createAIHandler({
|
|
322
|
+
provider: 'anthropic',
|
|
323
|
+
model: 'claude-3-5-sonnet-20241022',
|
|
324
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
325
|
+
temperature: 0.7,
|
|
326
|
+
maxTokens: 1024,
|
|
327
|
+
getSystemPrompt: (ctx) => ctx.promptContext?.systemPrompt || 'You are a helpful assistant.',
|
|
328
|
+
})
|
|
329
|
+
```
|
|
330
|
+
|
|
331
|
+
</details>
|
|
332
|
+
|
|
333
|
+
<details>
|
|
334
|
+
<summary><strong>Streaming</strong></summary>
|
|
335
|
+
|
|
336
|
+
```typescript
|
|
337
|
+
import { createStreamingAIHandler } from 'ai-pipeline-orchestrator'
|
|
338
|
+
|
|
339
|
+
const handler = createStreamingAIHandler({
|
|
340
|
+
provider: 'anthropic',
|
|
341
|
+
model: 'claude-3-5-sonnet-20241022',
|
|
342
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
343
|
+
onChunk: (chunk) => sendToClient(chunk),
|
|
344
|
+
})
|
|
345
|
+
|
|
346
|
+
// Full text still available after streaming
|
|
347
|
+
console.log(result.context.aiResponse.text)
|
|
348
|
+
```
|
|
349
|
+
|
|
350
|
+
</details>
|
|
351
|
+
|
|
352
|
+
#### Custom Handlers
|
|
353
|
+
|
|
354
|
+
```typescript
|
|
355
|
+
import { OrchestrationHandler } from 'ai-pipeline-orchestrator'
|
|
356
|
+
|
|
357
|
+
const authHandler: OrchestrationHandler = async (context) => {
|
|
358
|
+
const userId = context.request.metadata?.userId
|
|
359
|
+
|
|
360
|
+
if (!userId) {
|
|
361
|
+
return {
|
|
362
|
+
...context,
|
|
363
|
+
error: {
|
|
364
|
+
message: 'Authentication required',
|
|
365
|
+
statusCode: 401,
|
|
366
|
+
step: 'auth',
|
|
367
|
+
},
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
return { ...context, user: await fetchUser(userId) }
|
|
372
|
+
}
|
|
373
|
+
```
|
|
374
|
+
|
|
375
|
+
## Try the Interactive Demo
|
|
376
|
+
|
|
377
|
+
```bash
|
|
378
|
+
git clone https://github.com/emmanuel-adu/ai-pipeline-orchestrator.git
|
|
379
|
+
cd ai-pipeline-orchestrator
|
|
380
|
+
npm install
|
|
381
|
+
|
|
382
|
+
# Configure (copy .env.example to .env and add your API key)
|
|
383
|
+
cp .env.example .env
|
|
384
|
+
|
|
385
|
+
# Run the demo
|
|
386
|
+
npm run example:chat
|
|
387
|
+
```
|
|
388
|
+
|
|
389
|
+
The demo showcases all features in real-time:
|
|
390
|
+
|
|
391
|
+
- Content moderation
|
|
392
|
+
- Rate limiting
|
|
393
|
+
- Hybrid intent classification
|
|
394
|
+
- Context optimization (30-50% token savings)
|
|
395
|
+
- Real-time streaming
|
|
396
|
+
- Token usage breakdown
|
|
397
|
+
|
|
398
|
+
## API Reference
|
|
399
|
+
|
|
400
|
+
### Core
|
|
401
|
+
|
|
402
|
+
| Export | Description |
|
|
403
|
+
|--------|-------------|
|
|
404
|
+
| `executeOrchestration(context, steps, config?)` | Run the pipeline |
|
|
405
|
+
| `Orchestrator` | Class-based version for stateful pipelines |
|
|
406
|
+
| `OrchestrationHandler` | Handler function type |
|
|
407
|
+
| `OrchestrationContext` | Context object passed between handlers |
|
|
408
|
+
|
|
409
|
+
### Intent
|
|
410
|
+
|
|
411
|
+
| Export | Description |
|
|
412
|
+
|--------|-------------|
|
|
413
|
+
| `IntentClassifier` | Keyword-based detection |
|
|
414
|
+
| `LLMIntentClassifier` | LLM-based detection with structured output |
|
|
415
|
+
| `createIntentHandler(config)` | Creates intent handler |
|
|
416
|
+
|
|
417
|
+
### Context
|
|
418
|
+
|
|
419
|
+
| Export | Description |
|
|
420
|
+
|--------|-------------|
|
|
421
|
+
| `ContextOptimizer` | Smart context selection |
|
|
422
|
+
| `createContextHandler(config)` | Creates context handler |
|
|
423
|
+
|
|
424
|
+
### AI
|
|
425
|
+
|
|
426
|
+
| Export | Description |
|
|
427
|
+
|--------|-------------|
|
|
428
|
+
| `createAIHandler(config)` | Text generation |
|
|
429
|
+
| `createStreamingAIHandler(config)` | Streaming generation |
|
|
430
|
+
|
|
431
|
+
### Utilities
|
|
432
|
+
|
|
433
|
+
| Export | Description |
|
|
434
|
+
|--------|-------------|
|
|
435
|
+
| `createRateLimitHandler(config)` | Rate limiting |
|
|
436
|
+
| `createModerationHandler(config)` | Content moderation |
|
|
437
|
+
|
|
438
|
+
## Examples
|
|
439
|
+
|
|
440
|
+
Check out the [examples](./examples) directory:
|
|
441
|
+
|
|
442
|
+
- [`basic-chatbot.ts`](./examples/basic-chatbot.ts) - Minimal working example
|
|
443
|
+
- [`complete-chatbot.ts`](./examples/complete-chatbot.ts) - All features combined
|
|
444
|
+
- [`streaming-chatbot.ts`](./examples/streaming-chatbot.ts) - Streaming responses
|
|
445
|
+
- [`chat-cli.ts`](./examples/chat-cli.ts) - Interactive CLI demo
|
|
446
|
+
|
|
447
|
+
### Demo Screenshots
|
|
448
|
+
|
|
449
|
+
<details>
|
|
450
|
+
<summary>📸 Click to see detailed CLI output</summary>
|
|
451
|
+
|
|
452
|
+
<p align="center">
|
|
453
|
+
<img src="./assets/demo_image.png" alt="CLI Demo Screenshot" width="800">
|
|
454
|
+
</p>
|
|
455
|
+
|
|
456
|
+
*The interactive CLI displays real-time metadata including moderation status, rate limits, intent classification, context optimization savings, and token usage for full transparency.*
|
|
457
|
+
|
|
458
|
+
</details>
|
|
459
|
+
|
|
460
|
+
## Contributing
|
|
461
|
+
|
|
462
|
+
Contributions are welcome! Please read [CONTRIBUTING.md](./CONTRIBUTING.md) for guidelines.
|
|
463
|
+
|
|
464
|
+
## License
|
|
465
|
+
|
|
466
|
+
MIT © [Emmanuel Adu](https://github.com/emmanuel-adu)
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/context/index.ts
|
|
21
|
+
var context_exports = {};
|
|
22
|
+
__export(context_exports, {
|
|
23
|
+
ContextOptimizer: () => ContextOptimizer
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(context_exports);
|
|
26
|
+
|
|
27
|
+
// src/context/optimizer.ts
|
|
28
|
+
function buildContext(topics, isFirstMessage, config) {
|
|
29
|
+
const { sections, strategy } = config;
|
|
30
|
+
const useFullContext = isFirstMessage && strategy?.firstMessage !== "selective" || !isFirstMessage && strategy?.followUp === "full";
|
|
31
|
+
let selectedSections;
|
|
32
|
+
if (useFullContext) {
|
|
33
|
+
selectedSections = sections;
|
|
34
|
+
} else {
|
|
35
|
+
selectedSections = sections.filter((section) => {
|
|
36
|
+
if (section.alwaysInclude) return true;
|
|
37
|
+
if (!section.topics || section.topics.length === 0) return false;
|
|
38
|
+
return section.topics.some((topic) => topics.includes(topic));
|
|
39
|
+
});
|
|
40
|
+
selectedSections.sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
41
|
+
}
|
|
42
|
+
const systemPrompt = selectedSections.map((section) => section.content).join("\n\n");
|
|
43
|
+
const tokenEstimate = Math.ceil(systemPrompt.length / 4);
|
|
44
|
+
const allSectionsPrompt = sections.map((section) => section.content).join("\n\n");
|
|
45
|
+
const maxTokenEstimate = Math.ceil(allSectionsPrompt.length / 4);
|
|
46
|
+
return {
|
|
47
|
+
systemPrompt,
|
|
48
|
+
sectionsIncluded: selectedSections.map((s) => s.id),
|
|
49
|
+
totalSections: sections.length,
|
|
50
|
+
tokenEstimate,
|
|
51
|
+
maxTokenEstimate
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
var ContextOptimizer = class {
|
|
55
|
+
constructor(config) {
|
|
56
|
+
this.config = config;
|
|
57
|
+
}
|
|
58
|
+
build(topics, isFirstMessage) {
|
|
59
|
+
return buildContext(topics, isFirstMessage, this.config);
|
|
60
|
+
}
|
|
61
|
+
addSection(section) {
|
|
62
|
+
this.config.sections.push(section);
|
|
63
|
+
}
|
|
64
|
+
removeSection(id) {
|
|
65
|
+
this.config.sections = this.config.sections.filter((s) => s.id !== id);
|
|
66
|
+
}
|
|
67
|
+
getSections() {
|
|
68
|
+
return [...this.config.sections];
|
|
69
|
+
}
|
|
70
|
+
};
|
|
71
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
72
|
+
0 && (module.exports = {
|
|
73
|
+
ContextOptimizer
|
|
74
|
+
});
|
|
75
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/context/index.ts","../../src/context/optimizer.ts"],"sourcesContent":["export { ContextOptimizer } from './optimizer'\nexport type { ContextSection, ContextStrategy, ContextConfig, ContextResult } from './types'\n","import type { ContextConfig, ContextResult, ContextSection } from './types'\n\nfunction buildContext(\n topics: string[],\n isFirstMessage: boolean,\n config: ContextConfig\n): ContextResult {\n const { sections, strategy } = config\n\n const useFullContext =\n (isFirstMessage && strategy?.firstMessage !== 'selective') ||\n (!isFirstMessage && strategy?.followUp === 'full')\n\n let selectedSections: ContextSection[]\n\n if (useFullContext) {\n selectedSections = sections\n } else {\n selectedSections = sections.filter(section => {\n if (section.alwaysInclude) return true\n\n if (!section.topics || section.topics.length === 0) return false\n\n return section.topics.some(topic => topics.includes(topic))\n })\n\n selectedSections.sort((a, b) => (b.priority || 0) - (a.priority || 0))\n }\n\n const systemPrompt = selectedSections.map(section => section.content).join('\\n\\n')\n\n // Calculate token estimates\n const tokenEstimate = Math.ceil(systemPrompt.length / 4)\n\n // Calculate what tokens WOULD be if all sections were loaded (for savings calculation)\n const allSectionsPrompt = sections.map(section => section.content).join('\\n\\n')\n const maxTokenEstimate = Math.ceil(allSectionsPrompt.length / 4)\n\n return {\n systemPrompt,\n sectionsIncluded: selectedSections.map(s => s.id),\n totalSections: sections.length,\n tokenEstimate,\n maxTokenEstimate,\n }\n}\n\n/**\n * Dynamic context optimizer for token reduction.\n * Selectively loads context sections based on topics and message position.\n */\nexport class ContextOptimizer {\n constructor(private config: ContextConfig) {}\n\n build(topics: string[], isFirstMessage: boolean): ContextResult {\n return buildContext(topics, isFirstMessage, this.config)\n }\n\n addSection(section: ContextSection): void {\n this.config.sections.push(section)\n }\n\n removeSection(id: string): void {\n this.config.sections = this.config.sections.filter(s => s.id !== id)\n }\n\n getSections(): ContextSection[] {\n return [...this.config.sections]\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACEA,SAAS,aACP,QACA,gBACA,QACe;AACf,QAAM,EAAE,UAAU,SAAS,IAAI;AAE/B,QAAM,iBACH,kBAAkB,UAAU,iBAAiB,eAC7C,CAAC,kBAAkB,UAAU,aAAa;AAE7C,MAAI;AAEJ,MAAI,gBAAgB;AAClB,uBAAmB;AAAA,EACrB,OAAO;AACL,uBAAmB,SAAS,OAAO,aAAW;AAC5C,UAAI,QAAQ,cAAe,QAAO;AAElC,UAAI,CAAC,QAAQ,UAAU,QAAQ,OAAO,WAAW,EAAG,QAAO;AAE3D,aAAO,QAAQ,OAAO,KAAK,WAAS,OAAO,SAAS,KAAK,CAAC;AAAA,IAC5D,CAAC;AAED,qBAAiB,KAAK,CAAC,GAAG,OAAO,EAAE,YAAY,MAAM,EAAE,YAAY,EAAE;AAAA,EACvE;AAEA,QAAM,eAAe,iBAAiB,IAAI,aAAW,QAAQ,OAAO,EAAE,KAAK,MAAM;AAGjF,QAAM,gBAAgB,KAAK,KAAK,aAAa,SAAS,CAAC;AAGvD,QAAM,oBAAoB,SAAS,IAAI,aAAW,QAAQ,OAAO,EAAE,KAAK,MAAM;AAC9E,QAAM,mBAAmB,KAAK,KAAK,kBAAkB,SAAS,CAAC;AAE/D,SAAO;AAAA,IACL;AAAA,IACA,kBAAkB,iBAAiB,IAAI,OAAK,EAAE,EAAE;AAAA,IAChD,eAAe,SAAS;AAAA,IACxB;AAAA,IACA;AAAA,EACF;AACF;AAMO,IAAM,mBAAN,MAAuB;AAAA,EAC5B,YAAoB,QAAuB;AAAvB;AAAA,EAAwB;AAAA,EAE5C,MAAM,QAAkB,gBAAwC;AAC9D,WAAO,aAAa,QAAQ,gBAAgB,KAAK,MAAM;AAAA,EACzD;AAAA,EAEA,WAAW,SAA+B;AACxC,SAAK,OAAO,SAAS,KAAK,OAAO;AAAA,EACnC;AAAA,EAEA,cAAc,IAAkB;AAC9B,SAAK,OAAO,WAAW,KAAK,OAAO,SAAS,OAAO,OAAK,EAAE,OAAO,EAAE;AAAA,EACrE;AAAA,EAEA,cAAgC;AAC9B,WAAO,CAAC,GAAG,KAAK,OAAO,QAAQ;AAAA,EACjC;AACF;","names":[]}
|