@yetter/client 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.js CHANGED
@@ -4,11 +4,11 @@ import { EventSourcePolyfill } from 'event-source-polyfill';
4
4
  export class yetter {
5
5
  static async subscribe(model, options) {
6
6
  var _b;
7
- if (!process.env.YTR_API_KEY) {
8
- throw new Error("YTR_API_KEY is not set");
7
+ if (!process.env.YTR_API_KEY && !process.env.REACT_APP_YTR_API_KEY) {
8
+ throw new Error("YTR_API_KEY and REACT_APP_YTR_API_KEY are not set");
9
9
  }
10
10
  const client = new YetterImageClient({
11
- apiKey: process.env.YTR_API_KEY,
11
+ apiKey: process.env.YTR_API_KEY || process.env.REACT_APP_YTR_API_KEY || "",
12
12
  });
13
13
  const generateResponse = await client.generateImage({
14
14
  model: model,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@yetter/client",
3
- "version": "0.0.2",
3
+ "version": "0.0.3",
4
4
  "type": "module",
5
5
  "scripts": {
6
6
  "build": "tsc",
package/src/client.ts CHANGED
@@ -20,11 +20,11 @@ export class yetter {
20
20
  model: string,
21
21
  options: SubscribeOptions
22
22
  ): Promise<GetResponseResponse> {
23
- if (!process.env.YTR_API_KEY) {
24
- throw new Error("YTR_API_KEY is not set");
23
+ if (!process.env.YTR_API_KEY && !process.env.REACT_APP_YTR_API_KEY) {
24
+ throw new Error("YTR_API_KEY and REACT_APP_YTR_API_KEY are not set");
25
25
  }
26
26
  const client = new YetterImageClient({
27
- apiKey: process.env.YTR_API_KEY,
27
+ apiKey: process.env.YTR_API_KEY || process.env.REACT_APP_YTR_API_KEY || "",
28
28
  });
29
29
 
30
30
  const generateResponse = await client.generateImage({
package/stress.ts ADDED
@@ -0,0 +1,135 @@
1
+ import { yetter } from "../src/client.js";
2
+
3
+ const CONCURRENCY_LEVEL = 5; // Number of streams to run in parallel
4
+ const MODEL_NAME = "ytr-ai/flux/dev"; // Model to use for streaming
5
+
6
+ let streamCounter = 0;
7
+ const activePromises: Promise<void>[] = [];
8
+ let shuttingDown = false;
9
+
10
+ /**
11
+ * Runs a single stream, measures latency, and logs results.
12
+ * @param id - A unique identifier for the stream.
13
+ */
14
+ async function runStream(id: number): Promise<void> {
15
+ const startTime = Date.now();
16
+ let streamRequestId = "";
17
+
18
+ console.log(`[${new Date().toLocaleTimeString()}] [Stream ${id}] Starting...`);
19
+
20
+ try {
21
+ const streamInstance = await yetter.stream(MODEL_NAME, {
22
+ input: {
23
+ prompt: `A beautiful landscape painting, style of Van Gogh, stream ${id}`,
24
+ // You can add other parameters like seed or num_inference_steps here
25
+ },
26
+ });
27
+ streamRequestId = streamInstance.getRequestId();
28
+ console.log(`[${new Date().toLocaleTimeString()}] [Stream ${id}] Initiated. Request ID: ${streamRequestId}`);
29
+
30
+ // We are primarily interested in the latency of done(), so event iteration can be minimal or skipped.
31
+ // for await (const event of streamInstance) {
32
+ // console.log(`[${new Date().toLocaleTimeString()}][STREAM EVENT - ${streamRequestId}] Status: ${event.status}, QPos: ${event.queue_position}`);
33
+ // }
34
+
35
+ // Wait for the final result from the done() method
36
+ await streamInstance.done();
37
+ const endTime = Date.now();
38
+ const latency = endTime - startTime;
39
+ console.log(`[${new Date().toLocaleTimeString()}] [Stream ${id}] Finished. Request ID: ${streamRequestId}. Latency: ${latency}ms`);
40
+
41
+ } catch (err: any) {
42
+ const endTime = Date.now();
43
+ const latency = endTime - startTime; // Still record latency up to the point of failure
44
+ console.error(`[${new Date().toLocaleTimeString()}] [Stream ${id}] Failed. Request ID: ${streamRequestId || 'UNKNOWN'}. Latency: ${latency}ms. Error: ${err.message || err}`);
45
+ // Optionally, rethrow or handle more specifically if needed
46
+ }
47
+ }
48
+
49
+ /**
50
+ * Launches a new stream if concurrency limit is not reached and not shutting down.
51
+ * Manages the activePromises array.
52
+ */
53
+ async function launchStreamIfNotBusy(): Promise<void> {
54
+ if (shuttingDown || activePromises.length >= CONCURRENCY_LEVEL) {
55
+ return;
56
+ }
57
+
58
+ streamCounter++;
59
+ const currentStreamId = streamCounter;
60
+
61
+ console.log(`[${new Date().toLocaleTimeString()}] Launching stream ${currentStreamId}. Active: ${activePromises.length}/${CONCURRENCY_LEVEL}`);
62
+
63
+ const streamTask = runStream(currentStreamId);
64
+ activePromises.push(streamTask);
65
+
66
+ try {
67
+ await streamTask;
68
+ } catch (e) {
69
+ // Errors are logged within runStream
70
+ } finally {
71
+ const index = activePromises.indexOf(streamTask);
72
+ if (index > -1) {
73
+ activePromises.splice(index, 1);
74
+ }
75
+ console.log(`[${new Date().toLocaleTimeString()}] Stream ${currentStreamId} completed. Active: ${activePromises.length}/${CONCURRENCY_LEVEL}`);
76
+ // After a stream finishes, try to fill its slot immediately
77
+ if (!shuttingDown) {
78
+ launchStreamIfNotBusy(); // Non-blocking call to fill the slot
79
+ }
80
+ }
81
+ }
82
+
83
+ /**
84
+ * Fills available slots up to the CONCURRENCY_LEVEL.
85
+ */
86
+ function fillSlots() {
87
+ while (!shuttingDown && activePromises.length < CONCURRENCY_LEVEL) {
88
+ // launchStreamIfNotBusy is async but we don't await it here
89
+ // as we want to launch multiple streams in parallel.
90
+ // It handles its own addition to activePromises.
91
+ launchStreamIfNotBusy();
92
+ }
93
+ }
94
+
95
+ /**
96
+ * Main function to run the stress test.
97
+ */
98
+ async function mainStressTest() {
99
+ console.log("--- Starting Yetter Stream Stress Test ---");
100
+ console.log(`Concurrency Level (N): ${CONCURRENCY_LEVEL}`);
101
+ console.log(`Target Model: ${MODEL_NAME}`);
102
+ console.log("Press Ctrl+C to stop gracefully (will finish active streams).");
103
+
104
+ process.on('SIGINT', async () => {
105
+ console.log('\nSIGINT received. Gracefully shutting down...');
106
+ shuttingDown = true;
107
+ console.log(`No new streams will be launched. Waiting for ${activePromises.length} active stream(s) to complete...`);
108
+ // The main loop will exit once activePromises is empty after shuttingDown is true.
109
+ });
110
+
111
+ // Initial fill
112
+ fillSlots();
113
+
114
+ // Keep the script running and responsive to shutdown, also periodically check to fill slots
115
+ // in case some mechanism external to stream completion is needed (though launchStreamIfNotBusy's finally block should cover it)
116
+ while (true) {
117
+ if (shuttingDown && activePromises.length === 0) {
118
+ break; // Exit condition: shutting down and all streams are done.
119
+ }
120
+ // Periodically try to fill slots, mainly as a fallback or if initial fills didn't max out.
121
+ // The primary mechanism for refilling is within launchStreamIfNotBusy's finally block.
122
+ if (!shuttingDown) {
123
+ fillSlots();
124
+ }
125
+ await new Promise(resolve => setTimeout(resolve, 200)); // Interval for the main loop check
126
+ }
127
+
128
+ console.log('--- Stress Test Finished ---');
129
+ console.log(`Total streams launched during the session: ${streamCounter}`);
130
+ }
131
+
132
+ mainStressTest().catch(err => {
133
+ console.error("Unhandled error in mainStressTest:", err);
134
+ process.exit(1);
135
+ });