@ngvuhuy/promptglass 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +39 -0
  2. package/cli.js +35 -0
  3. package/client/dist/assets/geist-cyrillic-wght-normal-CHSlOQsW.woff2 +0 -0
  4. package/client/dist/assets/geist-latin-ext-wght-normal-DMtmJ5ZE.woff2 +0 -0
  5. package/client/dist/assets/geist-latin-wght-normal-Dm3htQBi.woff2 +0 -0
  6. package/client/dist/assets/index-CcUIhnxD.js +7768 -0
  7. package/client/dist/assets/index-DHZWNzeC.css +2 -0
  8. package/client/dist/index.html +14 -0
  9. package/client/package.json +43 -0
  10. package/package.json +36 -0
  11. package/server/dist/server/index.d.ts +4 -0
  12. package/server/dist/server/index.d.ts.map +1 -0
  13. package/server/dist/server/index.js +54 -0
  14. package/server/dist/server/index.js.map +1 -0
  15. package/server/dist/server/routes/dashboard.d.ts +4 -0
  16. package/server/dist/server/routes/dashboard.d.ts.map +1 -0
  17. package/server/dist/server/routes/dashboard.js +102 -0
  18. package/server/dist/server/routes/dashboard.js.map +1 -0
  19. package/server/dist/server/routes/proxy.d.ts +4 -0
  20. package/server/dist/server/routes/proxy.d.ts.map +1 -0
  21. package/server/dist/server/routes/proxy.js +130 -0
  22. package/server/dist/server/routes/proxy.js.map +1 -0
  23. package/server/dist/server/services/storage.d.ts +13 -0
  24. package/server/dist/server/services/storage.d.ts.map +1 -0
  25. package/server/dist/server/services/storage.js +135 -0
  26. package/server/dist/server/services/storage.js.map +1 -0
  27. package/server/dist/server/services/stream.d.ts +13 -0
  28. package/server/dist/server/services/stream.d.ts.map +1 -0
  29. package/server/dist/server/services/stream.js +152 -0
  30. package/server/dist/server/services/stream.js.map +1 -0
  31. package/server/dist/shared/types.d.ts +24 -0
  32. package/server/dist/shared/types.d.ts.map +1 -0
  33. package/server/dist/shared/types.js +3 -0
  34. package/server/dist/shared/types.js.map +1 -0
  35. package/server/package.json +33 -0
  36. package/shared/types.d.ts +24 -0
  37. package/shared/types.d.ts.map +1 -0
  38. package/shared/types.js +3 -0
  39. package/shared/types.js.map +1 -0
  40. package/shared/types.ts +26 -0
package/README.md ADDED
@@ -0,0 +1,39 @@
1
+ # Promptglass
2
+
3
+ An LLM Observability tool - see what your program sends to the LLM, when they do it, and how long each request takes.
4
+
5
+ Promptglass acts as an OpenAI-compatible proxy that sits between your program and the upstream LLM provider.
6
+ Simply point the LLM provider URL in your program to `http://localhost:3001/` and Promptglass will transparently proxy
7
+ and record the requests/responses.
8
+
9
+ ## Features
10
+ - View the LLM requests and responses, both formatted and in raw JSON format.
11
+ - View the metrics for each request (time to first token, total latency, prompt processing and token generation speed)
12
+ - Compare the text and metrics across different calls, to detect prompt changes that might affect caching
13
+ - Benchmark the LLM inference with input-heavy and output-heavy benchmark.
14
+
15
+ ## Installation
16
+ ```bash
17
+ npx @ngvuhuy/promptglass
18
+ ```
19
+ or
20
+ ```bash
21
+ git clone https://github.com/ngvuhuy/promptglass.git
22
+ cd promptglass
23
+ npm install
24
+ npm run dev
25
+ ```
26
+ This will start a frontend listening on `localhost:5173` and the proxy on `localhost:3001`.
27
+
28
+ Then, point the OpenAI URL of your project to `localhost:3001`. Go to the frontend, click on Settings, and put the original upstream LLM provider API there
29
+ (e.g. `https://api.openai.com/v1/`). Optionally, you can set the API key (if blank, the proxy will keep forwarding the API key already inside your request).
30
+ ## Screenshots
31
+
32
+ ## Example use cases
33
+ - Your program makes multiple LLM calls, but some seem to respond slower than others, stalling your program.
34
+ You use Promptglass to diagnose, and found out that those calls run sequentially instead of in parallel like others.
35
+ - Using Promptglass, you found out that your program slightly changes an early part of the prompt in subsequent calls,
36
+ invalidating the prompt caching and make later LLM calls cost more.
37
+ - Using Promptglass, you found out that subsequent calls put irrelevant information in context, causing the result quality to suffer.
38
+ - You deploy a local LLM model, and use Promptglass's benchmark mode to measure the metrics to optimize the performance of the model.
39
+
package/cli.js ADDED
@@ -0,0 +1,35 @@
1
+ #!/usr/bin/env node
2
+ import { spawn } from "child_process";
3
+ import { fileURLToPath } from "url";
4
+ import { dirname, join } from "path";
5
+ import fs from "fs";
6
+
7
+ const __dirname = dirname(fileURLToPath(import.meta.url));
8
+
9
+ const startServer = () => {
10
+ const serverPath = join(__dirname, "server", "dist", "server", "index.js");
11
+
12
+ if (!fs.existsSync(serverPath)) {
13
+ console.error("Error: Production build not found. Please run 'npm run build' first.");
14
+ process.exit(1);
15
+ }
16
+
17
+ console.log("Starting PromptGlass...");
18
+
19
+ const proc = spawn("node", [serverPath], {
20
+ cwd: __dirname,
21
+ stdio: "inherit",
22
+ env: {
23
+ ...process.env,
24
+ NODE_ENV: "production",
25
+ },
26
+ });
27
+
28
+ proc.on("close", (code) => process.exit(code ?? 0));
29
+
30
+ // Handle termination signals
31
+ process.on("SIGINT", () => proc.kill("SIGINT"));
32
+ process.on("SIGTERM", () => proc.kill("SIGTERM"));
33
+ };
34
+
35
+ startServer();