@stacksjs/zig-dtsx 0.9.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +21 -0
- package/README.md +73 -0
- package/build.zig +79 -0
- package/build.zig.zon +11 -0
- package/package.json +23 -0
- package/src/char_utils.zig +158 -0
- package/src/emitter.zig +1045 -0
- package/src/extractors.zig +2464 -0
- package/src/index.ts +222 -0
- package/src/lib.zig +254 -0
- package/src/main.zig +532 -0
- package/src/scan_loop.zig +330 -0
- package/src/scanner.zig +908 -0
- package/src/type_inference.zig +1564 -0
- package/src/types.zig +105 -0
- package/test/benchmark.ts +343 -0
- package/test/fixtures/output/variable.d.ts +157 -0
- package/test/zig-dtsx.test.ts +1386 -0
- package/zig-out/bin/zig-dtsx +0 -0
- package/zig-out/bin/zig-dtsx.exe +0 -0
package/src/index.ts
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TypeScript wrapper for the Zig DTS emitter using Bun FFI.
|
|
3
|
+
*/
|
|
4
|
+
import { dlopen, FFIType, type Pointer, ptr, suffix, toArrayBuffer } from 'bun:ffi'
|
|
5
|
+
import { join } from 'node:path'
|
|
6
|
+
|
|
7
|
+
const LIB_NAME = `libzig-dtsx.${suffix}`
|
|
8
|
+
const encoder = new TextEncoder()
|
|
9
|
+
const decoder = new TextDecoder()
|
|
10
|
+
const outLenBuffer = new BigUint64Array(1)
|
|
11
|
+
const outLenBufferIsolated = new BigUint64Array(1)
|
|
12
|
+
|
|
13
|
+
// Pre-allocated input buffer for encodeInto(); grows only when needed.
|
|
14
|
+
// Avoids per-call Uint8Array allocation from encoder.encode().
|
|
15
|
+
let inputBuf = new Uint8Array(4 * 1024 * 1024) // 4 MB initial
|
|
16
|
+
|
|
17
|
+
// Try to find the shared library
|
|
18
|
+
const libPaths = [
|
|
19
|
+
join(import.meta.dir, '..', 'zig-out', 'lib', LIB_NAME),
|
|
20
|
+
join(import.meta.dir, '..', `zig-out/lib/${LIB_NAME}`),
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
let lib: ReturnType<typeof dlopen> | null = null
|
|
24
|
+
for (const libPath of libPaths) {
|
|
25
|
+
try {
|
|
26
|
+
lib = dlopen(libPath, {
|
|
27
|
+
process_source: {
|
|
28
|
+
args: [FFIType.ptr, FFIType.u64, FFIType.bool],
|
|
29
|
+
returns: FFIType.ptr,
|
|
30
|
+
},
|
|
31
|
+
process_source_with_len: {
|
|
32
|
+
args: [FFIType.ptr, FFIType.u64, FFIType.bool, FFIType.ptr],
|
|
33
|
+
returns: FFIType.ptr,
|
|
34
|
+
},
|
|
35
|
+
process_source_with_options: {
|
|
36
|
+
args: [FFIType.ptr, FFIType.u64, FFIType.bool, FFIType.bool],
|
|
37
|
+
returns: FFIType.ptr,
|
|
38
|
+
},
|
|
39
|
+
process_source_with_options_len: {
|
|
40
|
+
args: [FFIType.ptr, FFIType.u64, FFIType.bool, FFIType.bool, FFIType.ptr],
|
|
41
|
+
returns: FFIType.ptr,
|
|
42
|
+
},
|
|
43
|
+
result_length: {
|
|
44
|
+
args: [FFIType.ptr],
|
|
45
|
+
returns: FFIType.u64,
|
|
46
|
+
},
|
|
47
|
+
free_result: {
|
|
48
|
+
args: [FFIType.ptr, FFIType.u64],
|
|
49
|
+
returns: FFIType.void,
|
|
50
|
+
},
|
|
51
|
+
process_batch: {
|
|
52
|
+
args: [FFIType.ptr, FFIType.ptr, FFIType.u32, FFIType.bool, FFIType.ptr, FFIType.ptr, FFIType.u32],
|
|
53
|
+
returns: FFIType.void,
|
|
54
|
+
},
|
|
55
|
+
free_batch_results: {
|
|
56
|
+
args: [FFIType.ptr, FFIType.ptr, FFIType.u32],
|
|
57
|
+
returns: FFIType.void,
|
|
58
|
+
},
|
|
59
|
+
})
|
|
60
|
+
break
|
|
61
|
+
}
|
|
62
|
+
catch {
|
|
63
|
+
continue
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
const symbols = lib?.symbols as any
|
|
68
|
+
const {
|
|
69
|
+
process_source,
|
|
70
|
+
process_source_with_len,
|
|
71
|
+
process_source_with_options,
|
|
72
|
+
process_source_with_options_len,
|
|
73
|
+
result_length,
|
|
74
|
+
free_result,
|
|
75
|
+
process_batch: _process_batch,
|
|
76
|
+
free_batch_results: _free_batch_results,
|
|
77
|
+
} = symbols ?? {}
|
|
78
|
+
|
|
79
|
+
function readResult(resultPtr: ReturnType<typeof process_source>, knownLen?: number): string {
|
|
80
|
+
if (!resultPtr) {
|
|
81
|
+
return ''
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const len = knownLen ?? Number(result_length(resultPtr))
|
|
85
|
+
if (len === 0) {
|
|
86
|
+
free_result(resultPtr, 0)
|
|
87
|
+
return ''
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const buf = toArrayBuffer(resultPtr, 0, len)
|
|
91
|
+
const result = decoder.decode(buf)
|
|
92
|
+
|
|
93
|
+
free_result(resultPtr, len)
|
|
94
|
+
|
|
95
|
+
return result
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Process TypeScript source code and generate .d.ts declarations.
|
|
100
|
+
*
|
|
101
|
+
* @param sourceCode - The TypeScript source code to process
|
|
102
|
+
* @param keepComments - Whether to preserve comments in output (default: true)
|
|
103
|
+
* @param isolatedDeclarations - Skip initializer parsing when explicit type annotations exist (default: false)
|
|
104
|
+
* @returns The generated .d.ts declaration content
|
|
105
|
+
*/
|
|
106
|
+
export const ZIG_AVAILABLE: boolean = !!lib
|
|
107
|
+
|
|
108
|
+
export function processSource(sourceCode: string, keepComments: boolean = true, isolatedDeclarations: boolean = false): string {
|
|
109
|
+
if (!lib) {
|
|
110
|
+
throw new Error(
|
|
111
|
+
`zig-dtsx shared library not found. Run 'zig build -Doptimize=ReleaseFast' first.`,
|
|
112
|
+
)
|
|
113
|
+
}
|
|
114
|
+
if (!sourceCode || sourceCode.length === 0) {
|
|
115
|
+
return ''
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Encode into pre-allocated buffer (grow if needed). Avoids a new Uint8Array per call.
|
|
119
|
+
const maxBytes = sourceCode.length * 3 // UTF-8 worst case
|
|
120
|
+
if (maxBytes > inputBuf.length) {
|
|
121
|
+
inputBuf = new Uint8Array(maxBytes)
|
|
122
|
+
}
|
|
123
|
+
const { written } = encoder.encodeInto(sourceCode, inputBuf)
|
|
124
|
+
|
|
125
|
+
if (isolatedDeclarations) {
|
|
126
|
+
if (process_source_with_options_len) {
|
|
127
|
+
const resultPtr = process_source_with_options_len(ptr(inputBuf), written, keepComments, true, ptr(outLenBufferIsolated))
|
|
128
|
+
return readResult(resultPtr, Number(outLenBufferIsolated[0]))
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
const resultPtr = process_source_with_options(ptr(inputBuf), written, keepComments, true)
|
|
132
|
+
return readResult(resultPtr)
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
if (process_source_with_len) {
|
|
136
|
+
const resultPtr = process_source_with_len(ptr(inputBuf), written, keepComments, ptr(outLenBuffer))
|
|
137
|
+
return readResult(resultPtr, Number(outLenBuffer[0]))
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const resultPtr = process_source(ptr(inputBuf), written, keepComments)
|
|
141
|
+
return readResult(resultPtr)
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Process multiple TypeScript source files in parallel.
|
|
146
|
+
* Uses Zig threads for true parallelism — faster than sequential processSource calls.
|
|
147
|
+
*
|
|
148
|
+
* @param sources - Array of TypeScript source code strings
|
|
149
|
+
* @param keepComments - Whether to preserve comments in output (default: true)
|
|
150
|
+
* @param threadCount - Number of worker threads (0 = auto-detect CPU count)
|
|
151
|
+
* @returns Array of generated .d.ts declaration strings
|
|
152
|
+
*/
|
|
153
|
+
export function processBatch(sources: string[], keepComments: boolean = true, threadCount: number = 0): string[] {
|
|
154
|
+
if (!lib) {
|
|
155
|
+
throw new Error(
|
|
156
|
+
`zig-dtsx shared library not found. Run 'zig build -Doptimize=ReleaseFast' first.`,
|
|
157
|
+
)
|
|
158
|
+
}
|
|
159
|
+
const count = sources.length
|
|
160
|
+
if (count === 0) return []
|
|
161
|
+
|
|
162
|
+
// Encode all sources into separate buffers and collect pointers
|
|
163
|
+
const encodedBuffers: Uint8Array[] = new Array(count)
|
|
164
|
+
const encodedLens: number[] = new Array(count)
|
|
165
|
+
for (let i = 0; i < count; i++) {
|
|
166
|
+
const src = sources[i]
|
|
167
|
+
if (!src || src.length === 0) {
|
|
168
|
+
encodedBuffers[i] = new Uint8Array(1)
|
|
169
|
+
encodedLens[i] = 0
|
|
170
|
+
}
|
|
171
|
+
else {
|
|
172
|
+
const buf = new Uint8Array(src.length * 3)
|
|
173
|
+
const { written } = encoder.encodeInto(src, buf)
|
|
174
|
+
encodedBuffers[i] = buf
|
|
175
|
+
encodedLens[i] = written
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// Build pointer array (array of pointers to input buffers)
|
|
180
|
+
const inputPtrsArray = new BigUint64Array(count)
|
|
181
|
+
const inputLensArray = new BigUint64Array(count)
|
|
182
|
+
for (let i = 0; i < count; i++) {
|
|
183
|
+
inputPtrsArray[i] = BigInt(ptr(encodedBuffers[i]))
|
|
184
|
+
inputLensArray[i] = BigInt(encodedLens[i])
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Allocate output arrays
|
|
188
|
+
const outPtrsArray = new BigUint64Array(count)
|
|
189
|
+
const outLensArray = new BigUint64Array(count)
|
|
190
|
+
|
|
191
|
+
// Call batch API
|
|
192
|
+
_process_batch(
|
|
193
|
+
ptr(inputPtrsArray),
|
|
194
|
+
ptr(inputLensArray),
|
|
195
|
+
count,
|
|
196
|
+
keepComments,
|
|
197
|
+
ptr(outPtrsArray),
|
|
198
|
+
ptr(outLensArray),
|
|
199
|
+
threadCount,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
// Read results
|
|
203
|
+
const results: string[] = new Array(count)
|
|
204
|
+
for (let i = 0; i < count; i++) {
|
|
205
|
+
const resultPtr = Number(outPtrsArray[i])
|
|
206
|
+
const resultLen = Number(outLensArray[i])
|
|
207
|
+
if (resultLen === 0) {
|
|
208
|
+
results[i] = ''
|
|
209
|
+
}
|
|
210
|
+
else {
|
|
211
|
+
const buf = toArrayBuffer(resultPtr as Pointer, 0, resultLen)
|
|
212
|
+
results[i] = decoder.decode(buf)
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// Free all results at once
|
|
217
|
+
_free_batch_results(ptr(outPtrsArray), ptr(outLensArray), count)
|
|
218
|
+
|
|
219
|
+
return results
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
export default { processSource, processBatch } as { processSource: typeof processSource, processBatch: typeof processBatch }
|
package/src/lib.zig
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
/// C ABI exports for Bun FFI integration.
|
|
2
|
+
/// Provides process_source(+len variants), result_length, free_result.
|
|
3
|
+
const std = @import("std");
|
|
4
|
+
const Scanner = @import("scanner.zig").Scanner;
|
|
5
|
+
const emitter = @import("emitter.zig");
|
|
6
|
+
|
|
7
|
+
const ProcessResult = struct {
|
|
8
|
+
ptr: [*]const u8,
|
|
9
|
+
len: usize,
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
fn emptyResult() ProcessResult {
|
|
13
|
+
const empty = std.heap.c_allocator.alloc(u8, 1) catch @panic("OOM");
|
|
14
|
+
empty[0] = 0;
|
|
15
|
+
return .{ .ptr = empty.ptr, .len = 0 };
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/// Process TypeScript source → .d.ts output.
|
|
19
|
+
/// Returns a pointer to the result string (null-terminated).
|
|
20
|
+
/// Caller must call free_result() when done.
|
|
21
|
+
export fn process_source(
|
|
22
|
+
input: [*]const u8,
|
|
23
|
+
len: usize,
|
|
24
|
+
keep_comments: bool,
|
|
25
|
+
) [*]const u8 {
|
|
26
|
+
const result = processSourceInternal(input, len, keep_comments, false) catch {
|
|
27
|
+
// Return empty string on error — must be c_allocator-allocated so free_result() is safe
|
|
28
|
+
return emptyResult().ptr;
|
|
29
|
+
};
|
|
30
|
+
return result.ptr;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/// Process source and return output length through out_len.
|
|
34
|
+
/// This avoids an extra pass in result_length() on the JS side.
|
|
35
|
+
export fn process_source_with_len(
|
|
36
|
+
input: [*]const u8,
|
|
37
|
+
len: usize,
|
|
38
|
+
keep_comments: bool,
|
|
39
|
+
out_len: *u64,
|
|
40
|
+
) [*]const u8 {
|
|
41
|
+
const result = processSourceInternal(input, len, keep_comments, false) catch {
|
|
42
|
+
out_len.* = 0;
|
|
43
|
+
return emptyResult().ptr;
|
|
44
|
+
};
|
|
45
|
+
out_len.* = @intCast(result.len);
|
|
46
|
+
return result.ptr;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/// Thread-local arena: reuse across calls to avoid repeated mmap/munmap syscalls.
|
|
50
|
+
/// reset(.retain_capacity) keeps the backing memory warm in CPU cache.
|
|
51
|
+
threadlocal var tls_arena: ?std.heap.ArenaAllocator = null;
|
|
52
|
+
|
|
53
|
+
fn getOrInitArena() *std.heap.ArenaAllocator {
|
|
54
|
+
if (tls_arena == null) {
|
|
55
|
+
tls_arena = std.heap.ArenaAllocator.init(std.heap.c_allocator);
|
|
56
|
+
}
|
|
57
|
+
return &(tls_arena.?);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
fn processSourceInternal(
|
|
61
|
+
input: [*]const u8,
|
|
62
|
+
len: usize,
|
|
63
|
+
keep_comments: bool,
|
|
64
|
+
isolated_declarations: bool,
|
|
65
|
+
) !ProcessResult {
|
|
66
|
+
// Handle empty input
|
|
67
|
+
if (len == 0) {
|
|
68
|
+
const result = try std.heap.c_allocator.alloc(u8, 1);
|
|
69
|
+
result[0] = 0;
|
|
70
|
+
return .{ .ptr = result.ptr, .len = 0 };
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const source = input[0..len];
|
|
74
|
+
|
|
75
|
+
// Reuse thread-local arena — reset frees all allocations but keeps backing pages
|
|
76
|
+
const arena = getOrInitArena();
|
|
77
|
+
_ = arena.reset(.retain_capacity);
|
|
78
|
+
const arena_alloc = arena.allocator();
|
|
79
|
+
|
|
80
|
+
// Scan
|
|
81
|
+
var scanner = Scanner.init(arena_alloc, source, keep_comments, isolated_declarations);
|
|
82
|
+
_ = try scanner.scan();
|
|
83
|
+
|
|
84
|
+
// Emit — result buffer uses c_allocator directly so it survives arena.reset().
|
|
85
|
+
// processDeclarations appends '\0' before toOwnedSlice, so the result is
|
|
86
|
+
// already null-terminated with no extra copy needed.
|
|
87
|
+
const default_import_order = [_][]const u8{"bun"};
|
|
88
|
+
const dts_output = try emitter.processDeclarations(
|
|
89
|
+
arena_alloc,
|
|
90
|
+
std.heap.c_allocator,
|
|
91
|
+
scanner.declarations.items,
|
|
92
|
+
source,
|
|
93
|
+
keep_comments,
|
|
94
|
+
&default_import_order,
|
|
95
|
+
);
|
|
96
|
+
|
|
97
|
+
// dts_output[0..len] is content, dts_output.ptr[len] == 0 (null terminator)
|
|
98
|
+
return .{ .ptr = dts_output.ptr, .len = dts_output.len };
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/// Get the length of a result string (without null terminator)
|
|
102
|
+
export fn result_length(ptr: [*]const u8) usize {
|
|
103
|
+
var i: usize = 0;
|
|
104
|
+
while (ptr[i] != 0) i += 1;
|
|
105
|
+
return i;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/// Free a result string previously returned by process_source
|
|
109
|
+
export fn free_result(ptr: [*]const u8, len: usize) void {
|
|
110
|
+
const slice = @as([*]u8, @constCast(ptr))[0 .. len + 1]; // +1 for null terminator
|
|
111
|
+
std.heap.c_allocator.free(slice);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/// Process source with isolatedDeclarations option
|
|
115
|
+
export fn process_source_with_options(
|
|
116
|
+
input: [*]const u8,
|
|
117
|
+
len: usize,
|
|
118
|
+
keep_comments: bool,
|
|
119
|
+
isolated_declarations: bool,
|
|
120
|
+
) [*]const u8 {
|
|
121
|
+
const result = processSourceInternal(input, len, keep_comments, isolated_declarations) catch {
|
|
122
|
+
return emptyResult().ptr;
|
|
123
|
+
};
|
|
124
|
+
return result.ptr;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/// Same as process_source_with_options but returns length through out_len.
|
|
128
|
+
export fn process_source_with_options_len(
|
|
129
|
+
input: [*]const u8,
|
|
130
|
+
len: usize,
|
|
131
|
+
keep_comments: bool,
|
|
132
|
+
isolated_declarations: bool,
|
|
133
|
+
out_len: *u64,
|
|
134
|
+
) [*]const u8 {
|
|
135
|
+
const result = processSourceInternal(input, len, keep_comments, isolated_declarations) catch {
|
|
136
|
+
out_len.* = 0;
|
|
137
|
+
return emptyResult().ptr;
|
|
138
|
+
};
|
|
139
|
+
out_len.* = @intCast(result.len);
|
|
140
|
+
return result.ptr;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// ---------------------------------------------------------------------------
|
|
144
|
+
// Batch API — process multiple files in parallel from a single FFI call
|
|
145
|
+
// ---------------------------------------------------------------------------
|
|
146
|
+
|
|
147
|
+
const BatchTask = struct {
|
|
148
|
+
input: [*]const u8,
|
|
149
|
+
input_len: usize,
|
|
150
|
+
keep_comments: bool,
|
|
151
|
+
out_ptr: *usize, // where to write result pointer
|
|
152
|
+
out_len: *u64, // where to write result length
|
|
153
|
+
};
|
|
154
|
+
|
|
155
|
+
fn batchWorker(tasks: []const BatchTask) void {
|
|
156
|
+
for (tasks) |task| {
|
|
157
|
+
const result = processSourceInternal(task.input, task.input_len, task.keep_comments, false) catch {
|
|
158
|
+
const empty = emptyResult();
|
|
159
|
+
task.out_ptr.* = @intFromPtr(empty.ptr);
|
|
160
|
+
task.out_len.* = 0;
|
|
161
|
+
continue;
|
|
162
|
+
};
|
|
163
|
+
task.out_ptr.* = @intFromPtr(result.ptr);
|
|
164
|
+
task.out_len.* = @intCast(result.len);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/// Process multiple files in parallel.
|
|
169
|
+
/// inputs: array of pointers to source buffers
|
|
170
|
+
/// input_lens: array of source lengths
|
|
171
|
+
/// count: number of files
|
|
172
|
+
/// keep_comments: whether to preserve comments
|
|
173
|
+
/// out_ptrs: pre-allocated array where result pointers are written (as usize)
|
|
174
|
+
/// out_lens: pre-allocated array where result lengths are written
|
|
175
|
+
/// thread_count: number of worker threads (0 = auto-detect)
|
|
176
|
+
export fn process_batch(
|
|
177
|
+
inputs: [*]const [*]const u8,
|
|
178
|
+
input_lens: [*]const u64,
|
|
179
|
+
count: u32,
|
|
180
|
+
keep_comments: bool,
|
|
181
|
+
out_ptrs: [*]usize,
|
|
182
|
+
out_lens: [*]u64,
|
|
183
|
+
thread_count: u32,
|
|
184
|
+
) void {
|
|
185
|
+
const n: usize = @intCast(count);
|
|
186
|
+
if (n == 0) return;
|
|
187
|
+
|
|
188
|
+
// Build task list
|
|
189
|
+
const tasks = std.heap.c_allocator.alloc(BatchTask, n) catch return;
|
|
190
|
+
defer std.heap.c_allocator.free(tasks);
|
|
191
|
+
|
|
192
|
+
for (0..n) |i| {
|
|
193
|
+
tasks[i] = .{
|
|
194
|
+
.input = inputs[i],
|
|
195
|
+
.input_len = @intCast(input_lens[i]),
|
|
196
|
+
.keep_comments = keep_comments,
|
|
197
|
+
.out_ptr = &out_ptrs[i],
|
|
198
|
+
.out_len = &out_lens[i],
|
|
199
|
+
};
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Determine thread count
|
|
203
|
+
const max_threads: usize = if (thread_count > 0)
|
|
204
|
+
@intCast(thread_count)
|
|
205
|
+
else
|
|
206
|
+
@intCast(std.Thread.getCpuCount() catch 4);
|
|
207
|
+
const num_threads = @min(max_threads, n);
|
|
208
|
+
|
|
209
|
+
if (num_threads <= 1) {
|
|
210
|
+
// Single-threaded: process all sequentially
|
|
211
|
+
batchWorker(tasks);
|
|
212
|
+
return;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Distribute tasks across threads
|
|
216
|
+
const threads = std.heap.c_allocator.alloc(std.Thread, num_threads) catch {
|
|
217
|
+
// Fallback to single-threaded
|
|
218
|
+
batchWorker(tasks);
|
|
219
|
+
return;
|
|
220
|
+
};
|
|
221
|
+
defer std.heap.c_allocator.free(threads);
|
|
222
|
+
|
|
223
|
+
const chunk_size = (n + num_threads - 1) / num_threads;
|
|
224
|
+
var thread_spawned: [64]bool = .{false} ** 64; // max 64 threads
|
|
225
|
+
|
|
226
|
+
for (0..num_threads) |t| {
|
|
227
|
+
const start = t * chunk_size;
|
|
228
|
+
if (start >= n) break;
|
|
229
|
+
const end = @min(start + chunk_size, n);
|
|
230
|
+
threads[t] = std.Thread.spawn(.{}, batchWorker, .{tasks[start..end]}) catch {
|
|
231
|
+
// If spawn fails, process this chunk on the main thread immediately
|
|
232
|
+
batchWorker(tasks[start..end]);
|
|
233
|
+
continue;
|
|
234
|
+
};
|
|
235
|
+
thread_spawned[t] = true;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// Join all successfully spawned threads
|
|
239
|
+
for (0..num_threads) |t| {
|
|
240
|
+
if (thread_spawned[t]) {
|
|
241
|
+
threads[t].join();
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
/// Free multiple results from a batch call.
|
|
247
|
+
export fn free_batch_results(ptrs: [*]const usize, lens: [*]const u64, count: u32) void {
|
|
248
|
+
const n: usize = @intCast(count);
|
|
249
|
+
for (0..n) |i| {
|
|
250
|
+
const p: [*]u8 = @ptrFromInt(ptrs[i]);
|
|
251
|
+
const l: usize = @intCast(lens[i]);
|
|
252
|
+
std.heap.c_allocator.free(p[0 .. l + 1]); // +1 for null terminator
|
|
253
|
+
}
|
|
254
|
+
}
|