renet 0.1.14 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README +6 -16
- data/ext/renet/callbacks.c +53 -0
- data/ext/renet/compress.c +654 -0
- data/ext/renet/enet/callbacks.h +27 -0
- data/ext/renet/enet/enet.h +592 -0
- data/ext/renet/enet/list.h +43 -0
- data/ext/renet/enet/protocol.h +198 -0
- data/ext/renet/enet/time.h +18 -0
- data/ext/renet/enet/types.h +13 -0
- data/ext/renet/enet/unix.h +47 -0
- data/ext/renet/enet/utility.h +12 -0
- data/ext/renet/enet/win32.h +57 -0
- data/ext/renet/extconf.rb +5 -16
- data/ext/renet/host.c +492 -0
- data/ext/renet/list.c +75 -0
- data/ext/renet/packet.c +165 -0
- data/ext/renet/peer.c +1004 -0
- data/ext/renet/protocol.c +1913 -0
- data/ext/renet/renet.c +1 -0
- data/ext/renet/renet_connection.c +383 -208
- data/ext/renet/renet_connection.h +3 -3
- data/ext/renet/renet_server.c +263 -159
- data/ext/renet/renet_server.h +5 -5
- data/ext/renet/unix.c +557 -0
- data/ext/renet/win32.c +478 -0
- data/lib/renet.rb +1 -16
- metadata +33 -16
@@ -27,7 +27,7 @@ typedef struct {
|
|
27
27
|
ENetEvent* event;
|
28
28
|
ENetAddress* address;
|
29
29
|
int channels;
|
30
|
-
int
|
30
|
+
int online;
|
31
31
|
} Connection;
|
32
32
|
|
33
33
|
void init_renet_connection();
|
@@ -48,10 +48,10 @@ void renet_connection_execute_on_connection();
|
|
48
48
|
|
49
49
|
VALUE renet_connection_on_packet_receive(VALUE self, VALUE method);
|
50
50
|
/*VALUE renet_connection_on_packet_receive(int argc, VALUE *argv, VALUE self);*/
|
51
|
-
void renet_connection_execute_on_packet_receive(
|
51
|
+
void renet_connection_execute_on_packet_receive(VALUE self, ENetPacket * const packet, enet_uint8 channelID);
|
52
52
|
|
53
53
|
VALUE renet_connection_on_disconnection(VALUE self, VALUE method);
|
54
|
-
void renet_connection_execute_on_disconnection();
|
54
|
+
void renet_connection_execute_on_disconnection(VALUE self);
|
55
55
|
|
56
56
|
VALUE renet_connection_online(VALUE self);
|
57
57
|
|
data/ext/renet/renet_server.c
CHANGED
@@ -18,11 +18,10 @@
|
|
18
18
|
|
19
19
|
#include "renet_server.h"
|
20
20
|
|
21
|
-
VALUE cENetServer;
|
22
21
|
|
23
22
|
void init_renet_server()
|
24
23
|
{
|
25
|
-
cENetServer = rb_define_class_under(mENet, "Server", rb_cObject);
|
24
|
+
VALUE cENetServer = rb_define_class_under(mENet, "Server", rb_cObject);
|
26
25
|
rb_define_alloc_func(cENetServer, renet_server_allocate);
|
27
26
|
|
28
27
|
rb_define_method(cENetServer, "initialize", renet_server_initialize, 5);
|
@@ -40,6 +39,11 @@ void init_renet_server()
|
|
40
39
|
|
41
40
|
rb_define_method(cENetServer, "max_clients", renet_server_max_clients, 0);
|
42
41
|
rb_define_method(cENetServer, "clients_count", renet_server_clients_count, 0);
|
42
|
+
|
43
|
+
rb_define_attr(cENetServer, "total_sent_data", 1, 1);
|
44
|
+
rb_define_attr(cENetServer, "total_received_data", 1, 1);
|
45
|
+
rb_define_attr(cENetServer, "total_sent_packets", 1, 1);
|
46
|
+
rb_define_attr(cENetServer, "total_received_packets", 1, 1);
|
43
47
|
}
|
44
48
|
|
45
49
|
VALUE renet_server_allocate(VALUE self)
|
@@ -54,227 +58,327 @@ VALUE renet_server_allocate(VALUE self)
|
|
54
58
|
|
55
59
|
void renet_server_deallocate(void* server)
|
56
60
|
{
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
+
free(((Server*)server)->event);
|
62
|
+
free(((Server*)server)->address);
|
63
|
+
enet_host_destroy(((Server*)server)->host);
|
64
|
+
free((Server*)server);
|
61
65
|
}
|
62
66
|
|
63
67
|
VALUE renet_server_initialize(VALUE self, VALUE port, VALUE n_peers, VALUE channels, VALUE download, VALUE upload)
|
64
68
|
{
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
69
|
+
rb_funcall(mENet, rb_intern("initialize"), 0);
|
70
|
+
Server* server;
|
71
|
+
Data_Get_Struct(self, Server, server);
|
72
|
+
|
73
|
+
VALUE lock = rb_mutex_new();
|
74
|
+
rb_iv_set(self, "@lock", lock);
|
75
|
+
rb_mutex_lock(lock);
|
76
|
+
|
77
|
+
server->address->host = ENET_HOST_ANY;
|
78
|
+
server->address->port = NUM2UINT(port);
|
79
|
+
server->channels = NUM2UINT(channels);
|
80
|
+
server->host = enet_host_create(server->address, NUM2UINT(n_peers), server->channels, NUM2UINT(download), NUM2UINT(upload));
|
81
|
+
if (server->host == NULL)
|
74
82
|
{
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
rb_define_attr(cENetServer, "total_received_packets", 1, 1);
|
86
|
-
|
87
|
-
return self;
|
83
|
+
rb_raise(rb_eStandardError, "Cannot create server");
|
84
|
+
}
|
85
|
+
rb_iv_set(self, "@total_sent_data", INT2FIX(0));
|
86
|
+
rb_iv_set(self, "@total_received_data", INT2FIX(0));
|
87
|
+
rb_iv_set(self, "@total_sent_packets", INT2FIX(0));
|
88
|
+
rb_iv_set(self, "@total_received_packets", INT2FIX(0));
|
89
|
+
|
90
|
+
rb_mutex_unlock(lock);
|
91
|
+
|
92
|
+
return self;
|
88
93
|
}
|
89
94
|
|
90
95
|
VALUE renet_server_disconnect_client(VALUE self, VALUE peer_id)
|
91
96
|
{
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
+
Server* server;
|
98
|
+
Data_Get_Struct(self, Server, server);
|
99
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
100
|
+
rb_mutex_lock(lock);
|
101
|
+
enet_peer_disconnect_now(&(server->host->peers[NUM2UINT(peer_id)]), 0);
|
102
|
+
renet_server_execute_on_disconnection(self, peer_id);
|
103
|
+
rb_mutex_unlock(lock);
|
104
|
+
return Qtrue;
|
97
105
|
}
|
98
106
|
|
99
107
|
VALUE renet_server_send_packet(VALUE self, VALUE peer_id, VALUE data, VALUE flag, VALUE channel)
|
100
108
|
{
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
109
|
+
Server* server;
|
110
|
+
Data_Get_Struct(self, Server, server);
|
111
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
112
|
+
rb_mutex_lock(lock);
|
113
|
+
Check_Type(data, T_STRING);
|
114
|
+
char* cdata = StringValuePtr(data);
|
115
|
+
ENetPacket* packet;
|
116
|
+
if (flag == Qtrue)
|
117
|
+
{
|
118
|
+
packet = enet_packet_create(cdata, RSTRING_LEN(data) + 1, ENET_PACKET_FLAG_RELIABLE);
|
119
|
+
}
|
120
|
+
else
|
121
|
+
{
|
122
|
+
packet = enet_packet_create(cdata, RSTRING_LEN(data) + 1, 0);
|
123
|
+
}
|
114
124
|
enet_peer_send(&(server->host->peers[NUM2UINT(peer_id)]), NUM2UINT(channel), packet);
|
115
|
-
|
125
|
+
rb_mutex_unlock(lock);
|
126
|
+
return Qnil;
|
116
127
|
}
|
117
128
|
|
118
129
|
VALUE renet_server_broadcast_packet(VALUE self, VALUE data, VALUE flag, VALUE channel)
|
119
130
|
{
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
131
|
+
Server* server;
|
132
|
+
Data_Get_Struct(self, Server, server);
|
133
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
134
|
+
rb_mutex_lock(lock);
|
135
|
+
Check_Type(data, T_STRING);
|
136
|
+
char* cdata = StringValuePtr(data);
|
137
|
+
ENetPacket* packet;
|
138
|
+
if (flag == Qtrue)
|
139
|
+
{
|
140
|
+
packet = enet_packet_create(cdata, RSTRING_LEN(data) + 1, ENET_PACKET_FLAG_RELIABLE);
|
141
|
+
}
|
142
|
+
else
|
143
|
+
{
|
144
|
+
packet = enet_packet_create(cdata, RSTRING_LEN(data) + 1, 0);
|
145
|
+
}
|
133
146
|
enet_host_broadcast(server->host, NUM2UINT(channel), packet);
|
134
|
-
|
147
|
+
rb_mutex_unlock(lock);
|
148
|
+
return Qnil;
|
135
149
|
}
|
136
150
|
|
137
151
|
VALUE renet_server_send_queued_packets(VALUE self)
|
138
152
|
{
|
139
|
-
|
140
|
-
|
153
|
+
Server* server;
|
154
|
+
Data_Get_Struct(self, Server, server);
|
155
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
156
|
+
rb_mutex_lock(lock);
|
141
157
|
enet_host_flush(server->host);
|
158
|
+
rb_mutex_unlock(lock);
|
142
159
|
return Qnil;
|
143
160
|
}
|
144
161
|
|
162
|
+
/* These let us release the global interpreter lock if while we're waiting for
|
163
|
+
enet_host_service to finish:
|
164
|
+
|
165
|
+
CallbackData
|
166
|
+
do_service
|
167
|
+
service
|
168
|
+
*/
|
169
|
+
typedef struct
|
170
|
+
{
|
171
|
+
Server * server;
|
172
|
+
enet_uint32 timeout;
|
173
|
+
} CallbackData;
|
174
|
+
|
175
|
+
static VALUE do_service(void *data)
|
176
|
+
{
|
177
|
+
CallbackData* temp_data = data;
|
178
|
+
int result = enet_host_service(temp_data->server->host, temp_data->server->event, temp_data->timeout);
|
179
|
+
// this will do weird things with the negative numbers but we'll undo it on the other side
|
180
|
+
return (unsigned int)result;
|
181
|
+
}
|
182
|
+
|
183
|
+
static int service(VALUE self, Server* server, enet_uint32 timeout)
|
184
|
+
{
|
185
|
+
CallbackData data = {server, timeout};
|
186
|
+
VALUE result;
|
187
|
+
if (timeout > 0)
|
188
|
+
{
|
189
|
+
result = rb_thread_blocking_region(do_service, &data, RUBY_UBF_IO, 0);
|
190
|
+
}
|
191
|
+
else
|
192
|
+
{
|
193
|
+
result = do_service(&data);
|
194
|
+
}
|
195
|
+
// undo our cast to VALUE in a way that will properly restore negative numbers
|
196
|
+
unsigned int fix_negatives = (unsigned int)result;
|
197
|
+
return (int)fix_negatives;
|
198
|
+
}
|
199
|
+
|
145
200
|
VALUE renet_server_update(VALUE self, VALUE timeout)
|
146
201
|
{
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
202
|
+
Server* server;
|
203
|
+
Data_Get_Struct(self, Server, server);
|
204
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
205
|
+
rb_mutex_lock(lock);
|
206
|
+
int peer_id;
|
207
|
+
|
208
|
+
/* wait up to timeout milliseconds for a packet */
|
209
|
+
if (service(self, server, NUM2UINT(timeout)) > 0)
|
210
|
+
{
|
211
|
+
do
|
152
212
|
{
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
213
|
+
switch (server->event->type)
|
214
|
+
{
|
215
|
+
case ENET_EVENT_TYPE_NONE:
|
216
|
+
break;
|
217
|
+
|
218
|
+
case ENET_EVENT_TYPE_CONNECT:
|
219
|
+
server->n_clients += 1;
|
220
|
+
enet_address_get_host_ip(&(server->event->peer->address), server->conn_ip, 20);
|
221
|
+
peer_id = (int)(server->event->peer - server->host->peers);
|
222
|
+
renet_server_execute_on_connection(self, INT2NUM(peer_id), rb_str_new2(server->conn_ip));
|
223
|
+
break;
|
224
|
+
|
225
|
+
case ENET_EVENT_TYPE_RECEIVE:
|
226
|
+
peer_id = (int)(server->event->peer - server->host->peers);
|
227
|
+
renet_server_execute_on_packet_receive(self, INT2NUM(peer_id), server->event->packet, server->event->channelID);
|
228
|
+
break;
|
229
|
+
|
230
|
+
case ENET_EVENT_TYPE_DISCONNECT:
|
231
|
+
server->n_clients -= 1;
|
232
|
+
peer_id = (int)(server->event->peer - server->host->peers);
|
233
|
+
server->event->peer->data = NULL;
|
234
|
+
renet_server_execute_on_disconnection(self, INT2NUM(peer_id));
|
235
|
+
break;
|
236
|
+
}
|
177
237
|
}
|
238
|
+
while (service(self, server, 0) > 0);
|
239
|
+
}
|
240
|
+
|
241
|
+
/* we are unlocking now because it's important to unlock before going
|
242
|
+
back into ruby land (which rb_funcall will do). If we don't then an
|
243
|
+
exception can leave the locks in an inconsistent state */
|
244
|
+
rb_mutex_unlock(lock);
|
245
|
+
|
246
|
+
{
|
247
|
+
VALUE total = rb_iv_get(self, "@total_sent_data");
|
248
|
+
VALUE result = rb_funcall( total
|
249
|
+
, rb_intern("+")
|
250
|
+
, 1
|
251
|
+
, UINT2NUM(server->host->totalSentData));
|
252
|
+
rb_iv_set(self, "@total_sent_data", result);
|
253
|
+
server->host->totalSentData = 0;
|
254
|
+
}
|
255
|
+
|
256
|
+
{
|
257
|
+
VALUE total = rb_iv_get(self, "@total_received_data");
|
258
|
+
VALUE result = rb_funcall( total
|
259
|
+
, rb_intern("+")
|
260
|
+
, 1
|
261
|
+
, UINT2NUM(server->host->totalReceivedData));
|
262
|
+
rb_iv_set(self, "@total_received_data", result);
|
263
|
+
server->host->totalReceivedData = 0;
|
264
|
+
}
|
178
265
|
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
return Qtrue;
|
266
|
+
{
|
267
|
+
VALUE total = rb_iv_get(self, "@total_sent_packets");
|
268
|
+
VALUE result = rb_funcall( total
|
269
|
+
, rb_intern("+")
|
270
|
+
, 1
|
271
|
+
, UINT2NUM(server->host->totalSentPackets));
|
272
|
+
rb_iv_set(self, "@total_sent_packets", result);
|
273
|
+
server->host->totalSentPackets = 0;
|
274
|
+
}
|
275
|
+
|
276
|
+
{
|
277
|
+
VALUE total = rb_iv_get(self, "@total_received_packets");
|
278
|
+
VALUE result = rb_funcall( total
|
279
|
+
, rb_intern("+")
|
280
|
+
, 1
|
281
|
+
, UINT2NUM(server->host->totalReceivedPackets));
|
282
|
+
rb_iv_set(self, "@total_received_packets", result);
|
283
|
+
server->host->totalReceivedPackets = 0;
|
284
|
+
}
|
285
|
+
|
286
|
+
return Qtrue;
|
201
287
|
}
|
202
288
|
|
203
289
|
VALUE renet_server_use_compression(VALUE self, VALUE flag)
|
204
290
|
{
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
291
|
+
Server* server;
|
292
|
+
Data_Get_Struct(self, Server, server);
|
293
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
294
|
+
rb_mutex_lock(lock);
|
295
|
+
if (flag == Qtrue)
|
296
|
+
{
|
297
|
+
enet_host_compress_with_range_coder(server->host);
|
298
|
+
}
|
299
|
+
else
|
300
|
+
{
|
301
|
+
enet_host_compress(server->host, NULL);
|
302
|
+
}
|
303
|
+
rb_mutex_unlock(lock);
|
304
|
+
return Qnil;
|
216
305
|
}
|
217
306
|
|
218
307
|
VALUE renet_server_on_connection(VALUE self, VALUE method)
|
219
308
|
{
|
220
|
-
|
221
|
-
|
222
|
-
|
309
|
+
/*VALUE method = rb_funcall(rb_cObject, rb_intern("method"), 1, symbol);*/
|
310
|
+
rb_iv_set(self, "@on_connection", method);
|
311
|
+
return Qnil;
|
223
312
|
}
|
224
313
|
|
225
|
-
void renet_server_execute_on_connection(VALUE peer_id, VALUE ip)
|
314
|
+
void renet_server_execute_on_connection(VALUE self, VALUE peer_id, VALUE ip)
|
226
315
|
{
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
316
|
+
VALUE method = rb_iv_get(self, "@on_connection");
|
317
|
+
if (method != Qnil)
|
318
|
+
{
|
319
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
320
|
+
rb_mutex_unlock(lock);
|
321
|
+
rb_funcall(method, rb_intern("call"), 2, peer_id, ip);
|
322
|
+
rb_mutex_lock(lock);
|
323
|
+
}
|
232
324
|
}
|
233
325
|
|
234
326
|
VALUE renet_server_on_packet_receive(VALUE self, VALUE method)
|
235
327
|
{
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
328
|
+
|
329
|
+
/*VALUE method = rb_funcall(rb_cObject, rb_intern("method"), 1, symbol);*/
|
330
|
+
rb_iv_set(self, "@on_packet_receive", method);
|
331
|
+
return Qnil;
|
240
332
|
}
|
241
333
|
|
242
|
-
void renet_server_execute_on_packet_receive(VALUE peer_id,
|
334
|
+
void renet_server_execute_on_packet_receive(VALUE self, VALUE peer_id, ENetPacket * const packet, enet_uint8 channelID)
|
243
335
|
{
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
336
|
+
VALUE method = rb_iv_get(self, "@on_packet_receive");
|
337
|
+
VALUE data = rb_str_new((char const *)packet->data, packet->dataLength);
|
338
|
+
/* marshal data and then destroy packet
|
339
|
+
if we don't do this now the packet might become invalid before we get
|
340
|
+
back and we'd get a segfault when we attempt to destroy */
|
341
|
+
enet_packet_destroy(packet);
|
342
|
+
|
343
|
+
if (method != Qnil)
|
344
|
+
{
|
345
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
346
|
+
rb_mutex_unlock(lock);
|
347
|
+
rb_funcall(method, rb_intern("call"), 3, peer_id, data, UINT2NUM(channelID));
|
348
|
+
rb_mutex_lock(lock);
|
349
|
+
}
|
249
350
|
}
|
250
351
|
|
251
352
|
VALUE renet_server_on_disconnection(VALUE self, VALUE method)
|
252
353
|
{
|
253
|
-
|
254
|
-
|
255
|
-
|
354
|
+
/*VALUE method = rb_funcall(rb_cObject, rb_intern("method"), 1, symbol);*/
|
355
|
+
rb_iv_set(self, "@on_disconnection", method);
|
356
|
+
return Qnil;
|
256
357
|
}
|
257
358
|
|
258
|
-
void renet_server_execute_on_disconnection(VALUE peer_id)
|
359
|
+
void renet_server_execute_on_disconnection(VALUE self, VALUE peer_id)
|
259
360
|
{
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
361
|
+
VALUE method = rb_iv_get(self, "@on_disconnection");
|
362
|
+
if (method != Qnil)
|
363
|
+
{
|
364
|
+
VALUE lock = rb_iv_get(self, "@lock");
|
365
|
+
rb_mutex_unlock(lock);
|
366
|
+
rb_funcall(method, rb_intern("call"), 1, peer_id);
|
367
|
+
rb_mutex_lock(lock);
|
368
|
+
}
|
265
369
|
}
|
266
370
|
|
267
371
|
VALUE renet_server_max_clients(VALUE self)
|
268
372
|
{
|
269
|
-
|
270
|
-
|
271
|
-
|
373
|
+
Server* server;
|
374
|
+
Data_Get_Struct(self, Server, server);
|
375
|
+
return UINT2NUM(server->host->peerCount);
|
272
376
|
}
|
273
377
|
|
274
378
|
VALUE renet_server_clients_count(VALUE self)
|
275
379
|
{
|
276
|
-
|
277
|
-
|
278
|
-
|
380
|
+
Server* server;
|
381
|
+
Data_Get_Struct(self, Server, server);
|
382
|
+
return UINT2NUM(server->n_clients);
|
279
383
|
}
|
280
384
|
|