chat-nest-server 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,95 @@
1
+ # chat-nest-server
2
+
3
+ > Streaming AI backend server for Chat Nest with built-in cost protection and cancellation propagation.
4
+
5
+ This package exposes an Express-compatible request handler that:
6
+ - Streams AI responses
7
+ - Enforces rate limits and budgets
8
+ - Supports abort propagation
9
+ - Protects against runaway usage
10
+
11
+ ---
12
+
13
+ ## ✨ Features
14
+
15
+ - Streaming responses over HTTP
16
+ - End-to-end cancellation support
17
+ - Daily token budget enforcement
18
+ - Rate limiting
19
+ - Message trimming
20
+ - Safe retry semantics
21
+ - OpenAI adapter included
22
+
23
+ ---
24
+
25
+ ## 📦 Installation
26
+
27
+ ```bash
28
+ npm install chat-nest-server
29
+ ```
30
+
31
+ ## 🚀 Usage
32
+ Express Integration
33
+
34
+ ```
35
+ import express from "express";
36
+ import cors from "cors";
37
+ import { createChatHandler } from "chat-nest-server";
38
+
39
+ const app = express();
40
+
41
+ app.use(cors());
42
+ app.use(express.json());
43
+
44
+ app.post(
45
+ "/api/chat",
46
+ createChatHandler({
47
+ apiKey: process.env.OPENAI_API_KEY!,
48
+ })
49
+ );
50
+
51
+ app.listen(3001, () => {
52
+ console.log("API running on http://localhost:3001");
53
+ });
54
+ ```
55
+
56
+ ---
57
+
58
+ ## 🔐 Environment Variables
59
+
60
+ | Variable | Description |
61
+ | -------------- | -------------- |
62
+ | OPENAI_API_KEY | OpenAI API key |
63
+
64
+ ## 💰 Cost Controls
65
+
66
+ ```
67
+ The server enforces:
68
+
69
+ Maximum tokens per request
70
+
71
+ Daily token budget
72
+
73
+ Request rate limiting
74
+
75
+ Prompt size trimming
76
+
77
+ Retry classification
78
+
79
+ This prevents accidental overspending and abuse.
80
+ ```
81
+
82
+ ---
83
+
84
+ ## ⚙ Configuration
85
+ Limits can be customized in:
86
+
87
+ src/config/
88
+ aiLimits.ts
89
+ budget.ts
90
+
91
+ ---
92
+
93
+ ## 📄 License
94
+
95
+ ISC
@@ -0,0 +1,8 @@
1
+ import { Request, Response } from 'express';
2
+
3
+ type ChatHandlerConfig = {
4
+ apiKey: string;
5
+ };
6
+ declare function createChatHandler(config: ChatHandlerConfig): (req: Request, res: Response) => Promise<void>;
7
+
8
+ export { type ChatHandlerConfig, createChatHandler };
package/dist/index.js ADDED
@@ -0,0 +1,142 @@
1
+ // src/createChatHandler.ts
2
+ import OpenAI from "openai";
3
+
4
+ // src/config/aiLimits.ts
5
+ var AI_MODEL = "gpt-4o-mini";
6
+ var AI_LIMITS = {
7
+ maxOutputTokens: 300,
8
+ maxMessages: 6,
9
+ rateLimitWindowMs: 6e4,
10
+ maxRequestsPerWindow: 30
11
+ };
12
+
13
+ // src/config/budget.ts
14
+ var BUDGET = {
15
+ dailyTokenLimit: 7e4,
16
+ // ~safe for $5 / 3 months
17
+ maxTokensPerRequest: 600
18
+ // input + output guard
19
+ };
20
+
21
+ // src/utils/tokenEstimator.ts
22
+ function estimateTokens(messages) {
23
+ const text = messages.map((m) => m.content).join(" ");
24
+ return Math.ceil(text.length / 4);
25
+ }
26
+
27
+ // src/utils/tokenBudget.ts
28
+ var currentDay = (/* @__PURE__ */ new Date()).toDateString();
29
+ var tokensUsedToday = 0;
30
+ function canSpendTokens(estimated, limit) {
31
+ const today = (/* @__PURE__ */ new Date()).toDateString();
32
+ if (today !== currentDay) {
33
+ currentDay = today;
34
+ tokensUsedToday = 0;
35
+ }
36
+ return tokensUsedToday + estimated <= limit;
37
+ }
38
+ function recordTokenUsage(used) {
39
+ tokensUsedToday += used;
40
+ }
41
+
42
+ // src/createChatHandler.ts
43
+ var requestCount = 0;
44
+ var windowStart = Date.now();
45
+ function isRateLimited() {
46
+ const now = Date.now();
47
+ if (now - windowStart > AI_LIMITS.rateLimitWindowMs) {
48
+ windowStart = now;
49
+ requestCount = 0;
50
+ }
51
+ requestCount++;
52
+ return requestCount > AI_LIMITS.maxRequestsPerWindow;
53
+ }
54
+ function trimMessages(messages) {
55
+ return messages.slice(-AI_LIMITS.maxMessages);
56
+ }
57
+ function createChatHandler(config) {
58
+ if (!config.apiKey) {
59
+ throw new Error("OPENAI_API_KEY is missing");
60
+ }
61
+ const client = new OpenAI({
62
+ apiKey: config.apiKey
63
+ });
64
+ return async function handler(req, res) {
65
+ const abortController = new AbortController();
66
+ let streamStarted = false;
67
+ res.on("close", () => {
68
+ if (streamStarted && !abortController.signal.aborted) {
69
+ abortController.abort();
70
+ }
71
+ });
72
+ try {
73
+ const body = req.body;
74
+ if (!Array.isArray(body.messages)) {
75
+ res.status(400).json({ error: "Invalid messages payload" });
76
+ return;
77
+ }
78
+ if (isRateLimited()) {
79
+ res.status(429).json({
80
+ error: "Rate limit exceeded. Please slow down."
81
+ });
82
+ return;
83
+ }
84
+ const trimmedMessages = trimMessages(body.messages);
85
+ const estimatedInputTokens = estimateTokens(trimmedMessages);
86
+ const estimatedTotalTokens = estimatedInputTokens + AI_LIMITS.maxOutputTokens;
87
+ if (estimatedTotalTokens > BUDGET.maxTokensPerRequest || !canSpendTokens(estimatedTotalTokens, BUDGET.dailyTokenLimit)) {
88
+ res.status(429).json({
89
+ error: "Daily AI budget exceeded. Try again tomorrow."
90
+ });
91
+ return;
92
+ }
93
+ res.setHeader("Content-Type", "text/plain");
94
+ res.setHeader("Transfer-Encoding", "chunked");
95
+ const stream = await client.chat.completions.create(
96
+ {
97
+ model: AI_MODEL,
98
+ stream: true,
99
+ temperature: 0.7,
100
+ max_tokens: AI_LIMITS.maxOutputTokens,
101
+ messages: trimmedMessages.map((m) => ({
102
+ role: m.role,
103
+ content: m.content
104
+ }))
105
+ },
106
+ {
107
+ signal: abortController.signal
108
+ }
109
+ );
110
+ streamStarted = true;
111
+ try {
112
+ for await (const chunk of stream) {
113
+ if (abortController.signal.aborted) {
114
+ break;
115
+ }
116
+ const token = chunk.choices[0]?.delta?.content;
117
+ if (token) {
118
+ res.write(token);
119
+ }
120
+ }
121
+ } catch (error) {
122
+ if (abortController.signal.aborted) {
123
+ console.log("Stream aborted by client");
124
+ } else {
125
+ throw error;
126
+ }
127
+ } finally {
128
+ recordTokenUsage(estimatedTotalTokens);
129
+ res.end();
130
+ }
131
+ } catch (error) {
132
+ if (error?.name === "AbortError") {
133
+ return;
134
+ }
135
+ console.error("AI error:", error);
136
+ res.status(500).json({ error: "AI request failed" });
137
+ }
138
+ };
139
+ }
140
+ export {
141
+ createChatHandler
142
+ };
package/package.json ADDED
@@ -0,0 +1,40 @@
1
+ {
2
+ "name": "chat-nest-server",
3
+ "version": "1.0.0",
4
+ "type": "module",
5
+ "main": "dist/index.js",
6
+ "license": "ISC",
7
+ "author": "Shivam Shukla",
8
+ "types": "dist/index.d.ts",
9
+ "repository": {
10
+ "type": "git",
11
+ "url": "https://github.com/shivams10/chat-nest"
12
+ },
13
+ "homepage": "https://github.com/shivams10/chat-nest",
14
+ "bugs": {
15
+ "url": "https://github.com/shivams10/chat-nest/issues"
16
+ },
17
+ "exports": {
18
+ ".": {
19
+ "import": "./dist/index.js",
20
+ "types": "./dist/index.d.ts"
21
+ }
22
+ },
23
+ "files": [
24
+ "dist"
25
+ ],
26
+ "scripts": {
27
+ "build": "tsup src/index.ts --format esm --dts",
28
+ "dev": "tsup src/index.ts --format esm --watch"
29
+ },
30
+ "dependencies": {
31
+ "chat-nest-core": "*",
32
+ "express": "^5.2.1",
33
+ "openai": "^6.16.0"
34
+ },
35
+ "devDependencies": {
36
+ "@types/express": "^5.0.6",
37
+ "tsup": "^8.5.1",
38
+ "typescript": "^5.9.3"
39
+ }
40
+ }