opencode-skills-antigravity 1.0.10 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/bundled-skills/cpp-pro/references/build-tooling.md +440 -0
  2. package/bundled-skills/cpp-pro/references/concurrency.md +437 -0
  3. package/bundled-skills/cpp-pro/references/memory-performance.md +397 -0
  4. package/bundled-skills/cpp-pro/references/modern-cpp.md +304 -0
  5. package/bundled-skills/cpp-pro/references/templates.md +357 -0
  6. package/bundled-skills/cpp-pro/resources/implementation-playbook.md +43 -0
  7. package/bundled-skills/docs/integrations/jetski-cortex.md +3 -3
  8. package/bundled-skills/docs/integrations/jetski-gemini-loader/README.md +4 -4
  9. package/bundled-skills/docs/integrations/jetski-gemini-loader/{loader.ts → loader.mjs} +38 -50
  10. package/bundled-skills/docs/maintainers/repo-growth-seo.md +3 -3
  11. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-15.csv +1 -1
  12. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-15.md +1 -1
  13. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-18-addendum.md +1 -1
  14. package/bundled-skills/docs/maintainers/skills-update-guide.md +1 -1
  15. package/bundled-skills/docs/users/bundles.md +1 -1
  16. package/bundled-skills/docs/users/claude-code-skills.md +1 -1
  17. package/bundled-skills/docs/users/gemini-cli-skills.md +1 -1
  18. package/bundled-skills/docs/users/getting-started.md +1 -1
  19. package/bundled-skills/docs/users/kiro-integration.md +1 -1
  20. package/bundled-skills/docs/users/usage.md +4 -4
  21. package/bundled-skills/docs/users/visual-guide.md +4 -4
  22. package/bundled-skills/jobgpt/SKILL.md +100 -0
  23. package/bundled-skills/moyu/SKILL.md +267 -0
  24. package/bundled-skills/windows-shell-reliability/SKILL.md +107 -0
  25. package/package.json +1 -1
@@ -0,0 +1,437 @@
1
+ # Concurrency and Parallel Programming
2
+
3
+ ## Atomics and Memory Ordering
4
+
5
+ ```cpp
6
+ #include <atomic>
7
+ #include <thread>
8
+
9
+ // Basic atomics
10
+ std::atomic<int> counter{0};
11
+ std::atomic<bool> flag{false};
12
+
13
+ // Memory ordering
14
+ void producer(std::atomic<int>& data, std::atomic<bool>& ready) {
15
+ data.store(42, std::memory_order_relaxed);
16
+ ready.store(true, std::memory_order_release); // Release barrier
17
+ }
18
+
19
+ void consumer(std::atomic<int>& data, std::atomic<bool>& ready) {
20
+ while (!ready.load(std::memory_order_acquire)) { // Acquire barrier
21
+ std::this_thread::yield();
22
+ }
23
+ int value = data.load(std::memory_order_relaxed);
24
+ }
25
+
26
+ // Compare-and-swap
27
+ bool try_acquire_lock(std::atomic<bool>& lock) {
28
+ bool expected = false;
29
+ return lock.compare_exchange_strong(expected, true,
30
+ std::memory_order_acquire,
31
+ std::memory_order_relaxed);
32
+ }
33
+
34
+ // Fetch-and-add
35
+ int increment_counter(std::atomic<int>& counter) {
36
+ return counter.fetch_add(1, std::memory_order_relaxed);
37
+ }
38
+ ```
39
+
40
+ ## Lock-Free Data Structures
41
+
42
+ ```cpp
43
+ #include <atomic>
44
+ #include <memory>
45
+
46
+ // Lock-free stack
47
+ template<typename T>
48
+ class LockFreeStack {
49
+ struct Node {
50
+ T data;
51
+ Node* next;
52
+ Node(const T& value) : data(value), next(nullptr) {}
53
+ };
54
+
55
+ std::atomic<Node*> head_{nullptr};
56
+
57
+ public:
58
+ void push(const T& value) {
59
+ Node* new_node = new Node(value);
60
+ new_node->next = head_.load(std::memory_order_relaxed);
61
+
62
+ while (!head_.compare_exchange_weak(new_node->next, new_node,
63
+ std::memory_order_release,
64
+ std::memory_order_relaxed)) {
65
+ // Retry with updated head
66
+ }
67
+ }
68
+
69
+ bool pop(T& result) {
70
+ Node* old_head = head_.load(std::memory_order_relaxed);
71
+
72
+ while (old_head &&
73
+ !head_.compare_exchange_weak(old_head, old_head->next,
74
+ std::memory_order_acquire,
75
+ std::memory_order_relaxed)) {
76
+ // Retry
77
+ }
78
+
79
+ if (old_head) {
80
+ result = old_head->data;
81
+ delete old_head; // Note: ABA problem exists
82
+ return true;
83
+ }
84
+ return false;
85
+ }
86
+ };
87
+
88
+ // Lock-free queue (single producer, single consumer)
89
+ template<typename T, size_t Size>
90
+ class SPSCQueue {
91
+ std::array<T, Size> buffer_;
92
+ alignas(64) std::atomic<size_t> head_{0};
93
+ alignas(64) std::atomic<size_t> tail_{0};
94
+
95
+ public:
96
+ bool push(const T& item) {
97
+ size_t head = head_.load(std::memory_order_relaxed);
98
+ size_t next_head = (head + 1) % Size;
99
+
100
+ if (next_head == tail_.load(std::memory_order_acquire)) {
101
+ return false; // Queue full
102
+ }
103
+
104
+ buffer_[head] = item;
105
+ head_.store(next_head, std::memory_order_release);
106
+ return true;
107
+ }
108
+
109
+ bool pop(T& item) {
110
+ size_t tail = tail_.load(std::memory_order_relaxed);
111
+
112
+ if (tail == head_.load(std::memory_order_acquire)) {
113
+ return false; // Queue empty
114
+ }
115
+
116
+ item = buffer_[tail];
117
+ tail_.store((tail + 1) % Size, std::memory_order_release);
118
+ return true;
119
+ }
120
+ };
121
+ ```
122
+
123
+ ## Thread Pool
124
+
125
+ ```cpp
126
+ #include <thread>
127
+ #include <queue>
128
+ #include <mutex>
129
+ #include <condition_variable>
130
+ #include <functional>
131
+ #include <future>
132
+
133
+ class ThreadPool {
134
+ std::vector<std::thread> workers_;
135
+ std::queue<std::function<void()>> tasks_;
136
+ std::mutex queue_mutex_;
137
+ std::condition_variable condition_;
138
+ bool stop_ = false;
139
+
140
+ public:
141
+ ThreadPool(size_t num_threads) {
142
+ for (size_t i = 0; i < num_threads; ++i) {
143
+ workers_.emplace_back([this] {
144
+ while (true) {
145
+ std::function<void()> task;
146
+
147
+ {
148
+ std::unique_lock<std::mutex> lock(queue_mutex_);
149
+ condition_.wait(lock, [this] {
150
+ return stop_ || !tasks_.empty();
151
+ });
152
+
153
+ if (stop_ && tasks_.empty()) {
154
+ return;
155
+ }
156
+
157
+ task = std::move(tasks_.front());
158
+ tasks_.pop();
159
+ }
160
+
161
+ task();
162
+ }
163
+ });
164
+ }
165
+ }
166
+
167
+ ~ThreadPool() {
168
+ {
169
+ std::unique_lock<std::mutex> lock(queue_mutex_);
170
+ stop_ = true;
171
+ }
172
+ condition_.notify_all();
173
+ for (auto& worker : workers_) {
174
+ worker.join();
175
+ }
176
+ }
177
+
178
+ template<typename F, typename... Args>
179
+ auto enqueue(F&& f, Args&&... args)
180
+ -> std::future<typename std::invoke_result_t<F, Args...>> {
181
+
182
+ using return_type = typename std::invoke_result_t<F, Args...>;
183
+
184
+ auto task = std::make_shared<std::packaged_task<return_type()>>(
185
+ std::bind(std::forward<F>(f), std::forward<Args>(args)...)
186
+ );
187
+
188
+ std::future<return_type> result = task->get_future();
189
+
190
+ {
191
+ std::unique_lock<std::mutex> lock(queue_mutex_);
192
+ if (stop_) {
193
+ throw std::runtime_error("enqueue on stopped ThreadPool");
194
+ }
195
+ tasks_.emplace([task]() { (*task)(); });
196
+ }
197
+
198
+ condition_.notify_one();
199
+ return result;
200
+ }
201
+ };
202
+ ```
203
+
204
+ ## Parallel STL Algorithms
205
+
206
+ ```cpp
207
+ #include <algorithm>
208
+ #include <execution>
209
+ #include <vector>
210
+ #include <numeric>
211
+
212
+ void parallel_algorithms_demo() {
213
+ std::vector<int> vec(1'000'000);
214
+ std::iota(vec.begin(), vec.end(), 0);
215
+
216
+ // Parallel sort
217
+ std::sort(std::execution::par, vec.begin(), vec.end());
218
+
219
+ // Parallel for_each
220
+ std::for_each(std::execution::par_unseq, vec.begin(), vec.end(),
221
+ [](int& x) { x *= 2; });
222
+
223
+ // Parallel transform
224
+ std::vector<int> result(vec.size());
225
+ std::transform(std::execution::par, vec.begin(), vec.end(),
226
+ result.begin(), [](int x) { return x * x; });
227
+
228
+ // Parallel reduce
229
+ int sum = std::reduce(std::execution::par, vec.begin(), vec.end());
230
+
231
+ // Parallel transform_reduce (map-reduce)
232
+ int sum_of_squares = std::transform_reduce(
233
+ std::execution::par,
234
+ vec.begin(), vec.end(),
235
+ 0,
236
+ std::plus<>(),
237
+ [](int x) { return x * x; }
238
+ );
239
+ }
240
+ ```
241
+
242
+ ## Synchronization Primitives
243
+
244
+ ```cpp
245
+ #include <mutex>
246
+ #include <shared_mutex>
247
+ #include <condition_variable>
248
+
249
+ // Mutex types
250
+ std::mutex mtx;
251
+ std::recursive_mutex rec_mtx;
252
+ std::timed_mutex timed_mtx;
253
+ std::shared_mutex shared_mtx;
254
+
255
+ // RAII locks
256
+ void exclusive_access() {
257
+ std::lock_guard<std::mutex> lock(mtx);
258
+ // Critical section
259
+ }
260
+
261
+ void unique_lock_example() {
262
+ std::unique_lock<std::mutex> lock(mtx);
263
+ // Can unlock and relock
264
+ lock.unlock();
265
+ // Do some work
266
+ lock.lock();
267
+ }
268
+
269
+ // Reader-writer lock
270
+ class SharedData {
271
+ mutable std::shared_mutex mutex_;
272
+ std::string data_;
273
+
274
+ public:
275
+ std::string read() const {
276
+ std::shared_lock<std::shared_mutex> lock(mutex_);
277
+ return data_;
278
+ }
279
+
280
+ void write(std::string new_data) {
281
+ std::unique_lock<std::shared_mutex> lock(mutex_);
282
+ data_ = std::move(new_data);
283
+ }
284
+ };
285
+
286
+ // Condition variable
287
+ class Queue {
288
+ std::queue<int> queue_;
289
+ std::mutex mutex_;
290
+ std::condition_variable cv_;
291
+
292
+ public:
293
+ void push(int value) {
294
+ {
295
+ std::lock_guard<std::mutex> lock(mutex_);
296
+ queue_.push(value);
297
+ }
298
+ cv_.notify_one();
299
+ }
300
+
301
+ int pop() {
302
+ std::unique_lock<std::mutex> lock(mutex_);
303
+ cv_.wait(lock, [this] { return !queue_.empty(); });
304
+ int value = queue_.front();
305
+ queue_.pop();
306
+ return value;
307
+ }
308
+ };
309
+
310
+ // std::scoped_lock - multiple mutexes
311
+ std::mutex mtx1, mtx2;
312
+
313
+ void transfer(Account& from, Account& to, int amount) {
314
+ std::scoped_lock lock(from.mutex, to.mutex); // Deadlock-free
315
+ from.balance -= amount;
316
+ to.balance += amount;
317
+ }
318
+ ```
319
+
320
+ ## Async and Futures
321
+
322
+ ```cpp
323
+ #include <future>
324
+
325
+ // std::async
326
+ auto future = std::async(std::launch::async, []() {
327
+ return expensive_computation();
328
+ });
329
+
330
+ // Get result (blocks until ready)
331
+ auto result = future.get();
332
+
333
+ // Promise and future
334
+ void producer(std::promise<int> promise) {
335
+ int value = compute_value();
336
+ promise.set_value(value);
337
+ }
338
+
339
+ void consumer(std::future<int> future) {
340
+ int value = future.get();
341
+ }
342
+
343
+ std::promise<int> promise;
344
+ std::future<int> future = promise.get_future();
345
+
346
+ std::thread producer_thread(producer, std::move(promise));
347
+ std::thread consumer_thread(consumer, std::move(future));
348
+
349
+ // Packaged task
350
+ std::packaged_task<int(int, int)> task([](int a, int b) {
351
+ return a + b;
352
+ });
353
+
354
+ std::future<int> task_future = task.get_future();
355
+ std::thread task_thread(std::move(task), 5, 3);
356
+
357
+ int sum = task_future.get(); // 8
358
+ task_thread.join();
359
+ ```
360
+
361
+ ## Coroutine-Based Concurrency
362
+
363
+ ```cpp
364
+ #include <coroutine>
365
+ #include <optional>
366
+
367
+ // Async task coroutine
368
+ template<typename T>
369
+ struct AsyncTask {
370
+ struct promise_type {
371
+ std::optional<T> value;
372
+ std::exception_ptr exception;
373
+
374
+ AsyncTask get_return_object() {
375
+ return AsyncTask{
376
+ std::coroutine_handle<promise_type>::from_promise(*this)
377
+ };
378
+ }
379
+
380
+ std::suspend_never initial_suspend() { return {}; }
381
+ std::suspend_always final_suspend() noexcept { return {}; }
382
+
383
+ void return_value(T v) {
384
+ value = std::move(v);
385
+ }
386
+
387
+ void unhandled_exception() {
388
+ exception = std::current_exception();
389
+ }
390
+ };
391
+
392
+ std::coroutine_handle<promise_type> handle;
393
+
394
+ AsyncTask(std::coroutine_handle<promise_type> h) : handle(h) {}
395
+ ~AsyncTask() { if (handle) handle.destroy(); }
396
+
397
+ T get() {
398
+ if (!handle.done()) {
399
+ handle.resume();
400
+ }
401
+
402
+ if (handle.promise().exception) {
403
+ std::rethrow_exception(handle.promise().exception);
404
+ }
405
+
406
+ return *handle.promise().value;
407
+ }
408
+ };
409
+
410
+ // Usage
411
+ AsyncTask<int> async_compute() {
412
+ co_return 42;
413
+ }
414
+ ```
415
+
416
+ ## Quick Reference
417
+
418
+ | Primitive | Use Case | Performance |
419
+ |-----------|----------|-------------|
420
+ | std::atomic | Simple shared state | Lock-free |
421
+ | std::mutex | Exclusive access | Kernel call |
422
+ | std::shared_mutex | Read-heavy workload | Better than mutex |
423
+ | Lock-free structures | High contention | Best throughput |
424
+ | Thread pool | Task parallelism | Avoid thread overhead |
425
+ | Parallel STL | Data parallelism | Automatic scaling |
426
+ | std::async | Simple async tasks | Thread pool |
427
+ | Coroutines | Async I/O | Minimal overhead |
428
+
429
+ ## Memory Ordering Guide
430
+
431
+ | Ordering | Guarantees | Use Case |
432
+ |----------|-----------|----------|
433
+ | relaxed | No synchronization | Counters |
434
+ | acquire | Load barrier | Consumer |
435
+ | release | Store barrier | Producer |
436
+ | acq_rel | Both | RMW operations |
437
+ | seq_cst | Total order | Default |