@t8n/ui 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,314 @@
1
+ use std::thread;
2
+ use crossbeam::channel::{bounded, Sender};
3
+ use tokio::sync::oneshot;
4
+ use bytes::Bytes;
5
+ use smallvec::SmallVec;
6
+ use crate::extensions::{self, TitanRuntime, WorkerAsyncResult};
7
+ use std::sync::{Arc, Mutex};
8
+
9
+ // ----------------------------------------------------------------------------
10
+ // TITANVM: HIGH-PERFORMANCE WORKER POOL (SCHEDULER V2)
11
+ // ----------------------------------------------------------------------------
12
+
13
+ pub enum WorkerCommand {
14
+ Request(RequestTask),
15
+ Resume {
16
+ isolate_id: usize,
17
+ drift_id: u32,
18
+ result: WorkerAsyncResult,
19
+ },
20
+ }
21
+
22
+ pub struct RequestTask {
23
+ pub action_name: String,
24
+ pub body: Option<Bytes>,
25
+ pub method: String,
26
+ pub path: String,
27
+ pub headers: SmallVec<[(String, String); 8]>,
28
+ pub params: SmallVec<[(String, String); 4]>,
29
+ pub query: SmallVec<[(String, String); 4]>,
30
+ pub response_tx: oneshot::Sender<WorkerResult>,
31
+ }
32
+
33
+ pub struct WorkerResult {
34
+ pub json: serde_json::Value,
35
+ pub timings: Vec<(String, f64)>,
36
+ }
37
+
38
+ pub struct RuntimeManager {
39
+ // Normal Priority (New Requests)
40
+ request_tx: Sender<WorkerCommand>,
41
+ // High Priority (Drift Resumes)
42
+ _resume_tx: Sender<WorkerCommand>,
43
+
44
+ _workers: Vec<thread::JoinHandle<()>>,
45
+ }
46
+
47
+ impl RuntimeManager {
48
+ pub fn new(project_root: std::path::PathBuf, num_threads: usize) -> Self {
49
+ // We Use 4x Isolates vs Threads to ensure Drift concurrency can exceed Thread count
50
+ let num_isolates = std::cmp::max(num_threads * 4, 16);
51
+
52
+ // Priority Queues
53
+ let (req_tx, req_rx) = bounded::<WorkerCommand>(10000);
54
+ let (res_tx, res_rx) = bounded::<WorkerCommand>(10000);
55
+
56
+ let tokio_handle = tokio::runtime::Handle::current();
57
+ let (async_tx, mut async_rx) = tokio::sync::mpsc::channel::<extensions::AsyncOpRequest>(10000);
58
+
59
+ // Global Async Executor (Tokio)
60
+ tokio_handle.spawn(async move {
61
+ println!("\x1b[38;5;39m[Titan]\x1b[0m Tokio Async Executor Online");
62
+ while let Some(req) = async_rx.recv().await {
63
+ let drift_id = req.drift_id;
64
+ let respond_tx = req.respond_tx;
65
+ tokio::spawn(async move {
66
+ let start = std::time::Instant::now();
67
+ let result = extensions::builtin::run_async_operation(req.op).await;
68
+ let duration_ms = start.elapsed().as_secs_f64() * 1000.0;
69
+ let _ = respond_tx.send(WorkerAsyncResult {
70
+ drift_id,
71
+ result,
72
+ duration_ms,
73
+ });
74
+ });
75
+ }
76
+ });
77
+
78
+ // Registry of Isolates
79
+ // Since V8 Isolates are NOT thread-safe, we must use a Mutex or 1-thread-at-a-time lock.
80
+ // We use a Vec of Mutexes. The threads will 'pick' an isolate and lock it while working.
81
+ let mut isolates = Vec::new();
82
+ for i in 0..num_isolates {
83
+ let rt = extensions::init_runtime_worker(
84
+ i,
85
+ project_root.clone(),
86
+ res_tx.clone(), // Resumes go back to the high-priority queue
87
+ tokio_handle.clone(),
88
+ async_tx.clone(),
89
+ );
90
+ isolates.push(Arc::new(Mutex::new(rt)));
91
+ }
92
+
93
+ // Isolate State Registry
94
+ let free_isolates = bounded::<usize>(num_isolates);
95
+ for i in 0..num_isolates {
96
+ // Set Isolate ID data for the native bridge to use
97
+ let mut rt = isolates[i].lock().unwrap();
98
+ let ptr = &mut *rt as *mut extensions::TitanRuntime as *mut std::ffi::c_void;
99
+ rt.isolate.set_data(0, ptr);
100
+ free_isolates.0.send(i).unwrap();
101
+ }
102
+
103
+ let isolates = Arc::new(isolates);
104
+ let free_isolates_rx = free_isolates.1;
105
+ let free_isolates_tx = free_isolates.0;
106
+
107
+ let mut workers = Vec::new();
108
+ for i in 0..num_threads {
109
+ let req_rx_c = req_rx.clone();
110
+ let res_rx_c = res_rx.clone();
111
+ let isolates_c = isolates.clone();
112
+ let free_isolates_tx_c = free_isolates_tx.clone();
113
+ let free_isolates_rx_c = free_isolates_rx.clone();
114
+
115
+ let handle = thread::Builder::new()
116
+ .name(format!("titan-worker-{}", i))
117
+ .spawn(move || {
118
+ loop {
119
+ // 1. Check for HIGHEST PRIORITY: Resumes
120
+ // Resumes are pinned to specific isolates, but ANY thread can pick them up.
121
+ if let Ok(cmd) = res_rx_c.try_recv() {
122
+ handle_resume(cmd, &isolates_c);
123
+ continue;
124
+ }
125
+
126
+ // 2. Regular Requests
127
+ // If no resumes, pick a new request IF an isolate is free.
128
+ crossbeam::select! {
129
+ recv(res_rx_c) -> cmd => {
130
+ if let Ok(cmd) = cmd {
131
+ handle_resume(cmd, &isolates_c);
132
+ }
133
+ }
134
+ recv(req_rx_c) -> cmd => {
135
+ if let Ok(cmd) = cmd {
136
+ // We need an isolate to handle a new request
137
+ if let Ok(iso_id) = free_isolates_rx_c.recv() {
138
+ handle_new_request(cmd, iso_id, &isolates_c, &free_isolates_tx_c);
139
+ }
140
+ } else {
141
+ break; // Channel closed
142
+ }
143
+ }
144
+ }
145
+ }
146
+ })
147
+ .expect("Failed to spawn worker thread");
148
+ workers.push(handle);
149
+ }
150
+
151
+ Self {
152
+ request_tx: req_tx,
153
+ _resume_tx: res_tx,
154
+ _workers: workers,
155
+ }
156
+ }
157
+
158
+ pub async fn execute(
159
+ &self,
160
+ action: String,
161
+ method: String,
162
+ path: String,
163
+ body: Option<Bytes>,
164
+ headers: SmallVec<[(String, String); 8]>,
165
+ params: SmallVec<[(String, String); 4]>,
166
+ query: SmallVec<[(String, String); 4]>,
167
+ ) -> Result<(serde_json::Value, Vec<(String, f64)>), String> {
168
+ let (tx, rx) = oneshot::channel();
169
+ let task = RequestTask {
170
+ action_name: action,
171
+ body,
172
+ method,
173
+ path,
174
+ headers,
175
+ params,
176
+ query,
177
+ response_tx: tx,
178
+ };
179
+ self.request_tx.send(WorkerCommand::Request(task)).map_err(|e| e.to_string())?;
180
+ match rx.await {
181
+ Ok(res) => Ok((res.json, res.timings)),
182
+ Err(_) => Err("Worker channel closed".to_string()),
183
+ }
184
+ }
185
+ }
186
+
187
+ fn handle_new_request(
188
+ cmd: WorkerCommand,
189
+ iso_id: usize,
190
+ isolates: &[Arc<Mutex<TitanRuntime>>],
191
+ free_tx: &Sender<usize>
192
+ ) {
193
+ if let WorkerCommand::Request(task) = cmd {
194
+ let mut rt = isolates[iso_id].lock().unwrap();
195
+ rt.request_counter += 1;
196
+ let request_id = rt.request_counter;
197
+ rt.pending_requests.insert(request_id, task.response_tx);
198
+
199
+ // Store request data for potential replay
200
+ let req_data = extensions::RequestData {
201
+ action_name: task.action_name.clone(),
202
+ body: task.body.clone(),
203
+ method: task.method.clone(),
204
+ path: task.path.clone(),
205
+ headers: task.headers.iter().map(|(k,v)| (k.clone(), v.clone())).collect(),
206
+ params: task.params.iter().map(|(k,v)| (k.clone(), v.clone())).collect(),
207
+ query: task.query.iter().map(|(k,v)| (k.clone(), v.clone())).collect(),
208
+ };
209
+ rt.active_requests.insert(request_id, req_data);
210
+ let drift_count = rt.drift_counter;
211
+ rt.request_start_counters.insert(request_id, drift_count);
212
+
213
+ extensions::execute_action_optimized(
214
+ &mut rt,
215
+ request_id,
216
+ &task.action_name,
217
+ task.body,
218
+ &task.method,
219
+ &task.path,
220
+ &task.headers,
221
+ &task.params,
222
+ &task.query
223
+ );
224
+
225
+ // After execution, check if finished or suspended.
226
+ // If finished, pending_requests will not have the key (removed by t._finish_request).
227
+ // If suspended, it will still have the key.
228
+
229
+ if !rt.pending_requests.contains_key(&request_id) {
230
+ rt.active_requests.remove(&request_id);
231
+ rt.request_start_counters.remove(&request_id);
232
+ }
233
+
234
+ // ALWAYS free the isolate for other work
235
+ free_tx.send(iso_id).unwrap();
236
+ }
237
+ }
238
+
239
+ fn handle_resume(
240
+ cmd: WorkerCommand,
241
+ isolates: &[Arc<Mutex<TitanRuntime>>]
242
+ ) {
243
+ if let WorkerCommand::Resume { isolate_id, drift_id, result } = cmd {
244
+ let mut rt = isolates[isolate_id].lock().unwrap();
245
+
246
+ // 1. Identify which request this drift belongs to
247
+ let req_id = rt.drift_to_request.get(&drift_id).copied().unwrap_or(0);
248
+
249
+ // 2. Perform Timing
250
+ let timing_type = if result.result.get("error").is_some() { "drift_error" } else { "drift" };
251
+ rt.request_timings.entry(req_id).or_default().push((timing_type.to_string(), result.duration_ms));
252
+
253
+ // 3. Store Result for Replay
254
+ rt.completed_drifts.insert(drift_id, result.result);
255
+
256
+ // 4. Reset drift counter for the replay?
257
+ // No, we need to match the drift_id.
258
+ // But if we replay, the action calls drift() again.
259
+ // It will increment drift_counter again.
260
+ // So we must RESET `drift_counter` to the start value for this request?
261
+ // Or we use a deterministic counter based on call order.
262
+ // `TitanRuntime.drift_counter` is global monotonic if we don't reset.
263
+ // IF we use global monotonic, the 2nd run will generate NEW IDs.
264
+ // So we must use a PER-REQUEST counter (or logic to match).
265
+
266
+ // REPLAY STRATEGY:
267
+ // We rely on `drift_id` being stable.
268
+ // If `rt.drift_counter` is global, it is NOT stable across replays.
269
+ // We need to SAVE `drift_counter` state? Or use a Request-Local counter.
270
+ // Since we are single-threaded per isolate, we can just reset `drift_counter` if we treat the runtime as fresh for the request?
271
+ // But `TitanRuntime` handles multiple requests sequentially.
272
+
273
+ // Use `rt.request_drift_counter` map?
274
+ // Let's assume for now we need to reset the counter for *this* request re-run.
275
+ // But `drift_counter` is currently U32 on Runtime.
276
+ // We need to fix that.
277
+
278
+ // 5. Trigger Replay (If we have the data)
279
+ // Check if we have active request data
280
+ if let Some(req_data) = rt.active_requests.get(&req_id).cloned() {
281
+ // Reset counter for deterministic ID generation?
282
+ // This is tricky if we interleaved multiple requests on same runtime (we don't, we free isolate after request).
283
+ // So for a single request on an isolate, we can reset a local counter.
284
+ // Let's add `rt.last_start_drift_id`?
285
+ // Actually, if we just use `rt.drift_counter` and don't reset, the Replay will generate `drift_id + 1`.
286
+ // But we stored result at `drift_id`.
287
+ // So `drift()` needs to look up `drift_id` or `drift_id + 1`?
288
+ // NO. The Replay MUST generate the SAME ID.
289
+ // So `drift_counter` MUST be reset to what it was at start of request.
290
+
291
+ let start_counter = rt.request_start_counters.get(&req_id).copied().unwrap_or(0);
292
+ rt.drift_counter = start_counter;
293
+
294
+ extensions::execute_action_optimized(
295
+ &mut rt,
296
+ req_id,
297
+ &req_data.action_name,
298
+ req_data.body,
299
+ &req_data.method,
300
+ &req_data.path,
301
+ &req_data.headers,
302
+ &req_data.params,
303
+ &req_data.query
304
+ );
305
+ }
306
+
307
+ // 6. Check if finished
308
+ if req_id != 0 && !rt.pending_requests.contains_key(&req_id) {
309
+ // Clean up request data
310
+ rt.active_requests.remove(&req_id);
311
+ rt.request_start_counters.remove(&req_id);
312
+ }
313
+ }
314
+ }
@@ -0,0 +1,33 @@
1
+ pub fn blue(s: &str) -> String {
2
+ format!("\x1b[38;5;39m{}\x1b[0m", s)
3
+ }
4
+ pub fn white(s: &str) -> String {
5
+ format!("\x1b[39m{}\x1b[0m", s)
6
+ }
7
+ pub fn yellow(s: &str) -> String {
8
+ format!("\x1b[33m{}\x1b[0m", s)
9
+ }
10
+ pub fn green(s: &str) -> String {
11
+ format!("\x1b[32m{}\x1b[0m", s)
12
+ }
13
+ pub fn gray(s: &str) -> String {
14
+ format!("\x1b[90m{}\x1b[0m", s)
15
+ }
16
+ pub fn red(s: &str) -> String {
17
+ format!("\x1b[31m{}\x1b[0m", s)
18
+ }
19
+
20
+ pub fn parse_expires_in(value: &str) -> Option<u64> {
21
+ let (num, unit) = value.split_at(value.len() - 1);
22
+ let n: u64 = num.parse().ok()?;
23
+
24
+ match unit {
25
+ "s" => Some(n),
26
+ "m" => Some(n * 60),
27
+ "h" => Some(n * 60 * 60),
28
+ "d" => Some(n * 60 * 60 * 24),
29
+ _ => None,
30
+ }
31
+ }
32
+
33
+
@@ -0,0 +1,5 @@
1
+ {
2
+ "css:../app/static/styles.css": "body{\r\n background-color: black;\r\n height: 100vh;\r\n width: 100%;\r\n}\r\n\r\nh1{\r\n color: rgb(32, 215, 215);\r\n}",
3
+ "tpl:../app/static/app.html": "<!DOCTYPE html>\r\n<html lang=\"en\">\r\n<head>\r\n <meta charset=\"UTF-8\">\r\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\r\n <title>Document</title>\r\n</head>\r\n<body>\r\n <h1>tpl{{ name }}</h1>\r\n\r\n</body>\r\n</html>",
4
+ "html:../app/static/app.html": "<!DOCTYPE html>\r\n<html>\r\n<head>\r\n tpl{{ css }}\r\n</head>\r\n<body>\r\n <h1>tpl{{ name }}</h1>\r\n</body>\r\n</html>\r\n"
5
+ }
@@ -0,0 +1,264 @@
1
+ /**
2
+ * Bundle.js
3
+ * Handles esbuild bundling with comprehensive error reporting
4
+ * RULE: This file handles ALL esbuild errors and prints error boxes directly
5
+ */
6
+
7
+ import esbuild from 'esbuild';
8
+ import path from 'path';
9
+ import fs from 'fs';
10
+ import { fileURLToPath } from 'url';
11
+ import { createRequire } from 'module';
12
+ import { renderErrorBox, parseEsbuildError } from './error-box.js';
13
+
14
+ const __filename = fileURLToPath(import.meta.url);
15
+ const __dirname = path.dirname(__filename);
16
+
17
+ /**
18
+ * Get Titan version for error branding
19
+ */
20
+ function getTitanVersion() {
21
+ try {
22
+ const require = createRequire(import.meta.url);
23
+ const pkgPath = require.resolve("@ezetgalaxy/titan/package.json");
24
+ return JSON.parse(fs.readFileSync(pkgPath, "utf-8")).version;
25
+ } catch (e) {
26
+ return "0.1.0";
27
+ }
28
+ }
29
+
30
+ /**
31
+ * Custom error class for bundle errors
32
+ */
33
+ export class BundleError extends Error {
34
+ constructor(message, errors = [], warnings = []) {
35
+ super(message);
36
+ this.name = 'BundleError';
37
+ this.errors = errors;
38
+ this.warnings = warnings;
39
+ this.isBundleError = true;
40
+ }
41
+ }
42
+
43
+ /**
44
+ * Validates that the entry point exists and is readable
45
+ * @param {string} entryPoint - Entry file path
46
+ * @throws {BundleError} If file doesn't exist or isn't readable
47
+ */
48
+ async function validateEntryPoint(entryPoint) {
49
+ const absPath = path.resolve(entryPoint);
50
+
51
+ if (!fs.existsSync(absPath)) {
52
+ throw new BundleError(
53
+ `Entry point does not exist: ${entryPoint}`,
54
+ [{
55
+ text: `Cannot find file: ${absPath}`,
56
+ location: { file: entryPoint }
57
+ }]
58
+ );
59
+ }
60
+
61
+ try {
62
+ await fs.promises.access(absPath, fs.constants.R_OK);
63
+ } catch (err) {
64
+ throw new BundleError(
65
+ `Entry point is not readable: ${entryPoint}`,
66
+ [{
67
+ text: `Cannot read file: ${absPath}`,
68
+ location: { file: entryPoint }
69
+ }]
70
+ );
71
+ }
72
+ }
73
+
74
+ /**
75
+ * Bundles a single JavaScript/TypeScript file using esbuild
76
+ * @param {Object} options - Bundle options
77
+ * @returns {Promise<void>}
78
+ * @throws {BundleError} If bundling fails
79
+ */
80
+ export async function bundleFile(options) {
81
+ const {
82
+ entryPoint,
83
+ outfile,
84
+ format = 'iife',
85
+ minify = false,
86
+ sourcemap = false,
87
+ platform = 'neutral',
88
+ globalName = '__titan_exports',
89
+ target = 'es2020',
90
+ banner = {},
91
+ footer = {}
92
+ } = options;
93
+
94
+ // Validate entry point exists
95
+ await validateEntryPoint(entryPoint);
96
+
97
+ // Ensure output directory exists
98
+ const outDir = path.dirname(outfile);
99
+ await fs.promises.mkdir(outDir, { recursive: true });
100
+
101
+ try {
102
+ // Run esbuild with error logging enabled
103
+ const result = await esbuild.build({
104
+ entryPoints: [entryPoint],
105
+ bundle: true,
106
+ outfile,
107
+ format,
108
+ globalName,
109
+ platform,
110
+ target,
111
+ banner,
112
+ footer,
113
+ minify,
114
+ sourcemap,
115
+ logLevel: 'silent', // We handle all errors ourselves
116
+ logLimit: 0,
117
+ color: false,
118
+ write: true,
119
+ metafile: false,
120
+ });
121
+
122
+ // Check for errors in the result
123
+ if (result.errors && result.errors.length > 0) {
124
+ throw new BundleError(
125
+ `Build failed with ${result.errors.length} error(s)`,
126
+ result.errors,
127
+ result.warnings || []
128
+ );
129
+ }
130
+
131
+ } catch (err) {
132
+ if (err.errors && err.errors.length > 0) {
133
+ // This is an esbuild error with detailed error information
134
+ throw new BundleError(
135
+ `Build failed with ${err.errors.length} error(s)`,
136
+ err.errors,
137
+ err.warnings || []
138
+ );
139
+ }
140
+
141
+ // Other unexpected errors
142
+ throw new BundleError(
143
+ `Unexpected build error: ${err.message}`,
144
+ [{
145
+ text: err.message,
146
+ location: { file: entryPoint }
147
+ }]
148
+ );
149
+ }
150
+ }
151
+
152
+ /**
153
+ * Main bundle function - scans app/actions and bundles all files
154
+ * RULE: This function handles ALL esbuild errors and prints error boxes directly
155
+ * RULE: After printing error box, throws Error("__TITAN_BUNDLE_FAILED__")
156
+ * @returns {Promise<void>}
157
+ */
158
+ export async function bundle() {
159
+ const root = process.cwd();
160
+ const actionsDir = path.join(root, 'app', 'actions');
161
+ const bundleDir = path.join(root, 'server', 'actions');
162
+
163
+ // Ensure bundle directory exists and is clean
164
+ if (fs.existsSync(bundleDir)) {
165
+ fs.rmSync(bundleDir, { recursive: true, force: true });
166
+ }
167
+ await fs.promises.mkdir(bundleDir, { recursive: true });
168
+
169
+ // Check if actions directory exists
170
+ if (!fs.existsSync(actionsDir)) {
171
+ return; // No actions to bundle
172
+ }
173
+
174
+ // Get all JS/TS files in actions directory
175
+ const files = fs.readdirSync(actionsDir).filter(f =>
176
+ (f.endsWith('.js') || f.endsWith('.ts')) && !f.endsWith('.d.ts')
177
+ );
178
+
179
+ if (files.length === 0) {
180
+ return; // No action files
181
+ }
182
+
183
+ // Bundle each action file
184
+ for (const file of files) {
185
+ const actionName = path.basename(file, path.extname(file));
186
+ const entryPoint = path.join(actionsDir, file);
187
+ const outfile = path.join(bundleDir, actionName + ".jsbundle");
188
+
189
+ try {
190
+ await bundleFile({
191
+ entryPoint,
192
+ outfile,
193
+ format: 'iife',
194
+ globalName: '__titan_exports',
195
+ platform: 'neutral',
196
+ target: 'es2020',
197
+ minify: false,
198
+ sourcemap: false,
199
+ banner: {
200
+ js: "const defineAction = (fn) => fn; const Titan = t;"
201
+ },
202
+ footer: {
203
+ js: `
204
+ (function () {
205
+ const fn =
206
+ __titan_exports["${actionName}"] ||
207
+ __titan_exports.default;
208
+
209
+ if (typeof fn !== "function") {
210
+ throw new Error("[Titan] Action '${actionName}' not found or not a function");
211
+ }
212
+
213
+ globalThis["${actionName}"] = globalThis.defineAction(fn);
214
+ })();
215
+ `
216
+ }
217
+ });
218
+ } catch (error) {
219
+ // RULE: Handle esbuild errors HERE and print error boxes
220
+ if (error.isBundleError && error.errors && error.errors.length > 0) {
221
+ // Print error box for each esbuild error
222
+ console.error(); // Empty line for spacing
223
+
224
+ const titanVersion = getTitanVersion();
225
+
226
+ for (let i = 0; i < error.errors.length; i++) {
227
+ const esbuildError = error.errors[i];
228
+ const errorInfo = parseEsbuildError(esbuildError);
229
+
230
+ // Add error number to title if multiple errors
231
+ if (error.errors.length > 1) {
232
+ errorInfo.title = `Build Error ${i + 1}/${error.errors.length}`;
233
+ }
234
+
235
+ // Add Titan version
236
+ errorInfo.titanVersion = titanVersion;
237
+
238
+ // Print the error box
239
+ console.error(renderErrorBox(errorInfo));
240
+
241
+ if (i < error.errors.length - 1) {
242
+ console.error(); // Empty line between errors
243
+ }
244
+ }
245
+
246
+ console.error(); // Empty line after all errors
247
+ } else {
248
+ // Other errors
249
+ console.error();
250
+ const errorInfo = {
251
+ title: 'Build Error',
252
+ file: entryPoint,
253
+ message: error.message || 'Unknown error',
254
+ titanVersion: getTitanVersion()
255
+ };
256
+ console.error(renderErrorBox(errorInfo));
257
+ console.error();
258
+ }
259
+
260
+ // RULE: Throw special error to signal bundle failure
261
+ throw new Error('__TITAN_BUNDLE_FAILED__');
262
+ }
263
+ }
264
+ }