io-event 1.6.6 → 1.7.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +10 -4
- data/ext/io/event/event.c +2 -8
- data/ext/io/event/interrupt.c +7 -0
- data/ext/io/event/selector/array.h +4 -0
- data/ext/io/event/selector/epoll.c +4 -7
- data/ext/io/event/selector/kqueue.c +4 -7
- data/ext/io/event/selector/selector.c +13 -1
- data/ext/io/event/selector/selector.h +9 -6
- data/ext/io/event/selector/uring.c +4 -7
- data/lib/io/event/version.rb +1 -1
- data/license.md +2 -0
- data.tar.gz.sig +0 -0
- metadata +2 -2
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 542a0e0f15dd7d59246ac8b03a3579ff9fe8e85618009277572b04a1d1fdbfc9
|
4
|
+
data.tar.gz: e440cf4f7f2a33a7e7c0b995f4b7829d4d66a44acb3f3d3ca00683b838062fcb
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: cd577bac5f3b70b70af3ca48f012179ec8d1b1774fc89cd46982fed601fe3a163a9ae9742b58ded385764c5233abc90cbf63f3b2563a114d232233085ba1c5ef
|
7
|
+
data.tar.gz: ff23dd077067823bd748d89482d39bb1b49cee26e87b6733561d2c3c09ffa3711015d0e8da514ac4851eccd81b84b0e7b66c2d5d7815ade30c551052e0dda536
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/ext/extconf.rb
CHANGED
@@ -17,11 +17,9 @@ extension_name = "IO_Event"
|
|
17
17
|
$CFLAGS << " -Wall -Wno-unknown-pragmas -std=c99"
|
18
18
|
|
19
19
|
if ENV.key?("RUBY_DEBUG")
|
20
|
-
$
|
20
|
+
$stderr.puts "Enabling debug mode..."
|
21
21
|
|
22
|
-
|
23
|
-
$CFLAGS << " -fsanitize=undefined -fno-omit-frame-pointer"
|
24
|
-
$LDFLAGS << " -fsanitize=undefined"
|
22
|
+
$CFLAGS << " -DRUBY_DEBUG -O0"
|
25
23
|
end
|
26
24
|
|
27
25
|
$srcs = ["io/event/event.c", "io/event/selector/selector.c"]
|
@@ -56,6 +54,14 @@ have_func("epoll_pwait2")
|
|
56
54
|
|
57
55
|
have_header("ruby/io/buffer.h")
|
58
56
|
|
57
|
+
if ENV.key?("RUBY_SANITIZE")
|
58
|
+
$stderr.puts "Enabling sanitizers..."
|
59
|
+
|
60
|
+
# Add address and undefined behaviour sanitizers:
|
61
|
+
$CFLAGS << " -fsanitize=address -fsanitize=undefined -fno-omit-frame-pointer"
|
62
|
+
$LDFLAGS << " -fsanitize=address -fsanitize=undefined"
|
63
|
+
end
|
64
|
+
|
59
65
|
create_header
|
60
66
|
|
61
67
|
# Generate the makefile to compile the native binary into `lib`:
|
data/ext/io/event/event.c
CHANGED
@@ -21,20 +21,14 @@
|
|
21
21
|
#include "event.h"
|
22
22
|
#include "selector/selector.h"
|
23
23
|
|
24
|
-
VALUE IO_Event = Qnil;
|
25
|
-
VALUE IO_Event_Selector = Qnil;
|
26
|
-
|
27
24
|
void Init_IO_Event(void)
|
28
25
|
{
|
29
26
|
#ifdef HAVE_RB_EXT_RACTOR_SAFE
|
30
27
|
rb_ext_ractor_safe(true);
|
31
28
|
#endif
|
32
29
|
|
33
|
-
IO_Event = rb_define_module_under(rb_cIO, "Event");
|
34
|
-
|
35
|
-
|
36
|
-
IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
|
37
|
-
rb_gc_register_mark_object(IO_Event_Selector);
|
30
|
+
VALUE IO_Event = rb_define_module_under(rb_cIO, "Event");
|
31
|
+
VALUE IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
|
38
32
|
|
39
33
|
Init_IO_Event_Selector(IO_Event_Selector);
|
40
34
|
|
data/ext/io/event/interrupt.c
CHANGED
@@ -25,6 +25,13 @@
|
|
25
25
|
|
26
26
|
#include "selector/selector.h"
|
27
27
|
|
28
|
+
#ifdef HAVE_RUBY_WIN32_H
|
29
|
+
#include <ruby/win32.h>
|
30
|
+
#if !defined(HAVE_PIPE) && !defined(pipe)
|
31
|
+
#define pipe(p) rb_w32_pipe(p)
|
32
|
+
#endif
|
33
|
+
#endif
|
34
|
+
|
28
35
|
#ifdef HAVE_SYS_EVENTFD_H
|
29
36
|
#include <sys/eventfd.h>
|
30
37
|
|
@@ -29,6 +29,8 @@ inline static void IO_Event_Array_allocate(struct IO_Event_Array *array, size_t
|
|
29
29
|
{
|
30
30
|
if (count) {
|
31
31
|
array->base = (void**)calloc(count, sizeof(void*));
|
32
|
+
assert(array->base);
|
33
|
+
|
32
34
|
array->count = count;
|
33
35
|
} else {
|
34
36
|
array->base = NULL;
|
@@ -51,6 +53,7 @@ inline static void IO_Event_Array_free(struct IO_Event_Array *array)
|
|
51
53
|
void *element = array->base[i];
|
52
54
|
if (element) {
|
53
55
|
array->element_free(element);
|
56
|
+
|
54
57
|
free(element);
|
55
58
|
}
|
56
59
|
}
|
@@ -107,6 +110,7 @@ inline static void* IO_Event_Array_lookup(struct IO_Event_Array *array, size_t i
|
|
107
110
|
// Allocate the element if it doesn't exist:
|
108
111
|
if (*element == NULL) {
|
109
112
|
*element = malloc(array->element_size);
|
113
|
+
assert(*element);
|
110
114
|
|
111
115
|
if (array->element_initialize) {
|
112
116
|
array->element_initialize(*element);
|
@@ -34,8 +34,6 @@ enum {
|
|
34
34
|
DEBUG = 0,
|
35
35
|
};
|
36
36
|
|
37
|
-
static VALUE IO_Event_Selector_EPoll = Qnil;
|
38
|
-
|
39
37
|
enum {EPOLL_MAX_EVENTS = 64};
|
40
38
|
|
41
39
|
// This represents an actual fiber waiting for a specific event.
|
@@ -184,7 +182,7 @@ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
|
|
184
182
|
.dsize = IO_Event_Selector_EPoll_Type_size,
|
185
183
|
},
|
186
184
|
.data = NULL,
|
187
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
185
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
188
186
|
};
|
189
187
|
|
190
188
|
inline static
|
@@ -333,7 +331,7 @@ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
|
|
333
331
|
struct IO_Event_Selector_EPoll *selector = NULL;
|
334
332
|
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
335
333
|
|
336
|
-
IO_Event_Selector_initialize(&selector->backend, Qnil);
|
334
|
+
IO_Event_Selector_initialize(&selector->backend, self, Qnil);
|
337
335
|
selector->descriptor = -1;
|
338
336
|
selector->blocked = 0;
|
339
337
|
|
@@ -363,7 +361,7 @@ VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
|
|
363
361
|
struct IO_Event_Selector_EPoll *selector = NULL;
|
364
362
|
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
365
363
|
|
366
|
-
IO_Event_Selector_initialize(&selector->backend, loop);
|
364
|
+
IO_Event_Selector_initialize(&selector->backend, self, loop);
|
367
365
|
int result = epoll_create1(EPOLL_CLOEXEC);
|
368
366
|
|
369
367
|
if (result == -1) {
|
@@ -1037,8 +1035,7 @@ VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
|
|
1037
1035
|
}
|
1038
1036
|
|
1039
1037
|
void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector) {
|
1040
|
-
IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
|
1041
|
-
rb_gc_register_mark_object(IO_Event_Selector_EPoll);
|
1038
|
+
VALUE IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
|
1042
1039
|
|
1043
1040
|
rb_define_alloc_func(IO_Event_Selector_EPoll, IO_Event_Selector_EPoll_allocate);
|
1044
1041
|
rb_define_method(IO_Event_Selector_EPoll, "initialize", IO_Event_Selector_EPoll_initialize, 1);
|
@@ -43,8 +43,6 @@ enum {
|
|
43
43
|
#define IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
44
44
|
#endif
|
45
45
|
|
46
|
-
static VALUE IO_Event_Selector_KQueue = Qnil;
|
47
|
-
|
48
46
|
enum {KQUEUE_MAX_EVENTS = 64};
|
49
47
|
|
50
48
|
// This represents an actual fiber waiting for a specific event.
|
@@ -183,7 +181,7 @@ static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
|
|
183
181
|
.dsize = IO_Event_Selector_KQueue_Type_size,
|
184
182
|
},
|
185
183
|
.data = NULL,
|
186
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
184
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
187
185
|
};
|
188
186
|
|
189
187
|
inline static
|
@@ -307,7 +305,7 @@ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
|
|
307
305
|
struct IO_Event_Selector_KQueue *selector = NULL;
|
308
306
|
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
309
307
|
|
310
|
-
IO_Event_Selector_initialize(&selector->backend, Qnil);
|
308
|
+
IO_Event_Selector_initialize(&selector->backend, self, Qnil);
|
311
309
|
selector->descriptor = -1;
|
312
310
|
selector->blocked = 0;
|
313
311
|
|
@@ -340,7 +338,7 @@ VALUE IO_Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
|
|
340
338
|
struct IO_Event_Selector_KQueue *selector = NULL;
|
341
339
|
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
342
340
|
|
343
|
-
IO_Event_Selector_initialize(&selector->backend, loop);
|
341
|
+
IO_Event_Selector_initialize(&selector->backend, self, loop);
|
344
342
|
int result = kqueue();
|
345
343
|
|
346
344
|
if (result == -1) {
|
@@ -1052,8 +1050,7 @@ VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
|
|
1052
1050
|
}
|
1053
1051
|
|
1054
1052
|
void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector) {
|
1055
|
-
IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
|
1056
|
-
rb_gc_register_mark_object(IO_Event_Selector_KQueue);
|
1053
|
+
VALUE IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
|
1057
1054
|
|
1058
1055
|
rb_define_alloc_func(IO_Event_Selector_KQueue, IO_Event_Selector_KQueue_allocate);
|
1059
1056
|
rb_define_method(IO_Event_Selector_KQueue, "initialize", IO_Event_Selector_KQueue_initialize, 1);
|
@@ -172,12 +172,14 @@ static void queue_pop(struct IO_Event_Selector *backend, struct IO_Event_Selecto
|
|
172
172
|
if (waiting->head) {
|
173
173
|
waiting->head->tail = waiting->tail;
|
174
174
|
} else {
|
175
|
+
// We must have been at the head of the queue:
|
175
176
|
backend->waiting = waiting->tail;
|
176
177
|
}
|
177
178
|
|
178
179
|
if (waiting->tail) {
|
179
180
|
waiting->tail->head = waiting->head;
|
180
181
|
} else {
|
182
|
+
// We must have been at the tail of the queue:
|
181
183
|
backend->ready = waiting->head;
|
182
184
|
}
|
183
185
|
|
@@ -190,12 +192,15 @@ static void queue_push(struct IO_Event_Selector *backend, struct IO_Event_Select
|
|
190
192
|
assert(waiting->tail == NULL);
|
191
193
|
|
192
194
|
if (backend->waiting) {
|
195
|
+
// If there was an item in the queue already, we shift it along:
|
193
196
|
backend->waiting->head = waiting;
|
194
197
|
waiting->tail = backend->waiting;
|
195
198
|
} else {
|
199
|
+
// If the queue was empty, we update the tail too:
|
196
200
|
backend->ready = waiting;
|
197
201
|
}
|
198
202
|
|
203
|
+
// We always push to the front/head:
|
199
204
|
backend->waiting = waiting;
|
200
205
|
}
|
201
206
|
|
@@ -276,11 +281,14 @@ VALUE IO_Event_Selector_raise(struct IO_Event_Selector *backend, int argc, VALUE
|
|
276
281
|
void IO_Event_Selector_queue_push(struct IO_Event_Selector *backend, VALUE fiber)
|
277
282
|
{
|
278
283
|
struct IO_Event_Selector_Queue *waiting = malloc(sizeof(struct IO_Event_Selector_Queue));
|
284
|
+
assert(waiting);
|
279
285
|
|
280
286
|
waiting->head = NULL;
|
281
287
|
waiting->tail = NULL;
|
282
288
|
waiting->flags = IO_EVENT_SELECTOR_QUEUE_INTERNAL;
|
289
|
+
|
283
290
|
waiting->fiber = fiber;
|
291
|
+
RB_OBJ_WRITTEN(backend->self, Qundef, fiber);
|
284
292
|
|
285
293
|
queue_push(backend, waiting);
|
286
294
|
}
|
@@ -292,7 +300,7 @@ void IO_Event_Selector_queue_pop(struct IO_Event_Selector *backend, struct IO_Ev
|
|
292
300
|
|
293
301
|
if (ready->flags & IO_EVENT_SELECTOR_QUEUE_FIBER) {
|
294
302
|
IO_Event_Selector_fiber_transfer(ready->fiber, 0, NULL);
|
295
|
-
} else {
|
303
|
+
} else if (ready->flags & IO_EVENT_SELECTOR_QUEUE_INTERNAL) {
|
296
304
|
VALUE fiber = ready->fiber;
|
297
305
|
queue_pop(backend, ready);
|
298
306
|
free(ready);
|
@@ -300,6 +308,8 @@ void IO_Event_Selector_queue_pop(struct IO_Event_Selector *backend, struct IO_Ev
|
|
300
308
|
if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
|
301
309
|
rb_funcall(fiber, id_transfer, 0);
|
302
310
|
}
|
311
|
+
} else {
|
312
|
+
rb_raise(rb_eRuntimeError, "Unknown queue type!");
|
303
313
|
}
|
304
314
|
}
|
305
315
|
|
@@ -307,6 +317,8 @@ int IO_Event_Selector_queue_flush(struct IO_Event_Selector *backend)
|
|
307
317
|
{
|
308
318
|
int count = 0;
|
309
319
|
|
320
|
+
// During iteration of the queue, the same item may be re-queued. If we don't handle this correctly, we may end up in an infinite loop. So, to avoid this situation, we keep note of the current head of the queue and break the loop if we reach the same item again.
|
321
|
+
|
310
322
|
// Get the current tail and head of the queue:
|
311
323
|
struct IO_Event_Selector_Queue *waiting = backend->waiting;
|
312
324
|
if (DEBUG) fprintf(stderr, "IO_Event_Selector_queue_flush waiting = %p\n", waiting);
|
@@ -95,27 +95,29 @@ struct IO_Event_Selector_Queue {
|
|
95
95
|
};
|
96
96
|
|
97
97
|
struct IO_Event_Selector {
|
98
|
+
VALUE self;
|
98
99
|
VALUE loop;
|
99
100
|
|
100
|
-
|
101
|
-
|
102
|
-
// Append to waiting.
|
101
|
+
// Append to waiting (front/head of queue).
|
103
102
|
struct IO_Event_Selector_Queue *waiting;
|
104
|
-
// Process from ready.
|
103
|
+
// Process from ready (back/tail of queue).
|
105
104
|
struct IO_Event_Selector_Queue *ready;
|
106
105
|
};
|
107
106
|
|
108
107
|
static inline
|
109
|
-
void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE loop) {
|
110
|
-
backend->
|
108
|
+
void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE self, VALUE loop) {
|
109
|
+
RB_OBJ_WRITE(self, &backend->self, self);
|
110
|
+
RB_OBJ_WRITE(self, &backend->loop, loop);
|
111
111
|
backend->waiting = NULL;
|
112
112
|
backend->ready = NULL;
|
113
113
|
}
|
114
114
|
|
115
115
|
static inline
|
116
116
|
void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
|
117
|
+
rb_gc_mark_movable(backend->self);
|
117
118
|
rb_gc_mark_movable(backend->loop);
|
118
119
|
|
120
|
+
// Walk backwards through the ready queue:
|
119
121
|
struct IO_Event_Selector_Queue *ready = backend->ready;
|
120
122
|
while (ready) {
|
121
123
|
rb_gc_mark_movable(ready->fiber);
|
@@ -125,6 +127,7 @@ void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
|
|
125
127
|
|
126
128
|
static inline
|
127
129
|
void IO_Event_Selector_compact(struct IO_Event_Selector *backend) {
|
130
|
+
backend->self = rb_gc_location(backend->self);
|
128
131
|
backend->loop = rb_gc_location(backend->loop);
|
129
132
|
|
130
133
|
struct IO_Event_Selector_Queue *ready = backend->ready;
|
@@ -37,8 +37,6 @@ enum {
|
|
37
37
|
DEBUG_COMPLETION = 0,
|
38
38
|
};
|
39
39
|
|
40
|
-
static VALUE IO_Event_Selector_URing = Qnil;
|
41
|
-
|
42
40
|
enum {URING_ENTRIES = 64};
|
43
41
|
|
44
42
|
#pragma mark - Data Type
|
@@ -152,7 +150,7 @@ static const rb_data_type_t IO_Event_Selector_URing_Type = {
|
|
152
150
|
.dsize = IO_Event_Selector_URing_Type_size,
|
153
151
|
},
|
154
152
|
.data = NULL,
|
155
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
153
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
156
154
|
};
|
157
155
|
|
158
156
|
inline static
|
@@ -228,7 +226,7 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
|
|
228
226
|
struct IO_Event_Selector_URing *selector = NULL;
|
229
227
|
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
230
228
|
|
231
|
-
IO_Event_Selector_initialize(&selector->backend, Qnil);
|
229
|
+
IO_Event_Selector_initialize(&selector->backend, self, Qnil);
|
232
230
|
selector->ring.ring_fd = -1;
|
233
231
|
|
234
232
|
selector->pending = 0;
|
@@ -249,7 +247,7 @@ VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
|
|
249
247
|
struct IO_Event_Selector_URing *selector = NULL;
|
250
248
|
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
251
249
|
|
252
|
-
IO_Event_Selector_initialize(&selector->backend, loop);
|
250
|
+
IO_Event_Selector_initialize(&selector->backend, self, loop);
|
253
251
|
int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0);
|
254
252
|
|
255
253
|
if (result < 0) {
|
@@ -1094,8 +1092,7 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
|
|
1094
1092
|
#pragma mark - Native Methods
|
1095
1093
|
|
1096
1094
|
void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
|
1097
|
-
IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
|
1098
|
-
rb_gc_register_mark_object(IO_Event_Selector_URing);
|
1095
|
+
VALUE IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
|
1099
1096
|
|
1100
1097
|
rb_define_alloc_func(IO_Event_Selector_URing, IO_Event_Selector_URing_allocate);
|
1101
1098
|
rb_define_method(IO_Event_Selector_URing, "initialize", IO_Event_Selector_URing_initialize, 1);
|
data/lib/io/event/version.rb
CHANGED
data/license.md
CHANGED
@@ -9,6 +9,8 @@ Copyright, 2022, by Bruno Sutic.
|
|
9
9
|
Copyright, 2023, by Math Ieu.
|
10
10
|
Copyright, 2024, by Pavel Rosický.
|
11
11
|
Copyright, 2024, by Anthony Ross.
|
12
|
+
Copyright, 2024, by Shizuo Fujita.
|
13
|
+
Copyright, 2024, by Jean Boussier.
|
12
14
|
|
13
15
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
14
16
|
of this software and associated documentation files (the "Software"), to deal
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.7.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -45,7 +45,7 @@ cert_chain:
|
|
45
45
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
46
46
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
47
47
|
-----END CERTIFICATE-----
|
48
|
-
date: 2024-10-
|
48
|
+
date: 2024-10-04 00:00:00.000000000 Z
|
49
49
|
dependencies: []
|
50
50
|
description:
|
51
51
|
email:
|
metadata.gz.sig
CHANGED
Binary file
|