ffi-tox 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/ProjectTox-Core/CMakeLists.txt +28 -0
- data/ProjectTox-Core/COPYING +674 -0
- data/ProjectTox-Core/INSTALL.md +91 -0
- data/ProjectTox-Core/README.md +54 -0
- data/ProjectTox-Core/core/CMakeLists.txt +17 -0
- data/ProjectTox-Core/core/DHT.c +1104 -0
- data/ProjectTox-Core/core/DHT.h +111 -0
- data/ProjectTox-Core/core/LAN_discovery.c +79 -0
- data/ProjectTox-Core/core/LAN_discovery.h +50 -0
- data/ProjectTox-Core/core/Lossless_UDP.c +749 -0
- data/ProjectTox-Core/core/Lossless_UDP.h +106 -0
- data/ProjectTox-Core/core/Messenger.c +581 -0
- data/ProjectTox-Core/core/Messenger.h +157 -0
- data/ProjectTox-Core/core/friend_requests.c +131 -0
- data/ProjectTox-Core/core/friend_requests.h +51 -0
- data/ProjectTox-Core/core/net_crypto.c +564 -0
- data/ProjectTox-Core/core/net_crypto.h +134 -0
- data/ProjectTox-Core/core/network.c +188 -0
- data/ProjectTox-Core/core/network.h +134 -0
- data/ProjectTox-Core/other/CMakeLists.txt +9 -0
- data/ProjectTox-Core/other/DHT_bootstrap.c +139 -0
- data/ProjectTox-Core/testing/CMakeLists.txt +18 -0
- data/ProjectTox-Core/testing/DHT_cryptosendfiletest.c +228 -0
- data/ProjectTox-Core/testing/DHT_sendfiletest.c +176 -0
- data/ProjectTox-Core/testing/DHT_test.c +182 -0
- data/ProjectTox-Core/testing/Lossless_UDP_testclient.c +214 -0
- data/ProjectTox-Core/testing/Lossless_UDP_testserver.c +201 -0
- data/ProjectTox-Core/testing/Messenger_test.c +145 -0
- data/ProjectTox-Core/testing/misc_tools.c +40 -0
- data/ProjectTox-Core/testing/misc_tools.h +29 -0
- data/ProjectTox-Core/testing/nTox.c +381 -0
- data/ProjectTox-Core/testing/nTox.h +50 -0
- data/ProjectTox-Core/testing/nTox_win32.c +323 -0
- data/ProjectTox-Core/testing/nTox_win32.h +31 -0
- data/ProjectTox-Core/testing/rect.py +45 -0
- data/ext/ffi-tox/extconf.rb +16 -0
- data/interfaces/libtox.i +18 -0
- data/lib/ffi-tox/libtox.rb +37 -0
- data/lib/ffi-tox.rb +1 -0
- metadata +116 -0
@@ -0,0 +1,749 @@
|
|
1
|
+
/* Lossless_UDP.c
|
2
|
+
*
|
3
|
+
* An implementation of the Lossless_UDP protocol as seen in docs/Lossless_UDP.txt
|
4
|
+
*
|
5
|
+
* Copyright (C) 2013 Tox project All Rights Reserved.
|
6
|
+
*
|
7
|
+
* This file is part of Tox.
|
8
|
+
*
|
9
|
+
* Tox is free software: you can redistribute it and/or modify
|
10
|
+
* it under the terms of the GNU General Public License as published by
|
11
|
+
* the Free Software Foundation, either version 3 of the License, or
|
12
|
+
* (at your option) any later version.
|
13
|
+
*
|
14
|
+
* Tox is distributed in the hope that it will be useful,
|
15
|
+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
16
|
+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
17
|
+
* GNU General Public License for more details.
|
18
|
+
*
|
19
|
+
* You should have received a copy of the GNU General Public License
|
20
|
+
* along with Tox. If not, see <http://www.gnu.org/licenses/>.
|
21
|
+
*
|
22
|
+
*/
|
23
|
+
|
24
|
+
/* TODO: clean this file a bit.
|
25
|
+
There are a couple of useless variables to get rid of. */
|
26
|
+
#include "Lossless_UDP.h"
|
27
|
+
|
28
|
+
/* maximum data packets in sent and receive queues. */
|
29
|
+
#define MAX_QUEUE_NUM 16
|
30
|
+
|
31
|
+
/* maximum length of the data in the data packets */
|
32
|
+
/* #define MAX_DATA_SIZE 1024 */ /* defined in Lossless_UDP.h */
|
33
|
+
|
34
|
+
/* maximum number of data packets in the buffer */
|
35
|
+
#define BUFFER_PACKET_NUM (16-1)
|
36
|
+
|
37
|
+
/* Lossless UDP connection timeout.
|
38
|
+
timeout per connection is randomly set between CONNEXION_TIMEOUT and 2*CONNEXION_TIMEOUT */
|
39
|
+
#define CONNEXION_TIMEOUT 5
|
40
|
+
|
41
|
+
/* initial amount of sync/hanshake packets to send per second. */
|
42
|
+
#define SYNC_RATE 2
|
43
|
+
|
44
|
+
/* initial send rate of data. */
|
45
|
+
#define DATA_SYNC_RATE 30
|
46
|
+
|
47
|
+
typedef struct {
|
48
|
+
uint8_t data[MAX_DATA_SIZE];
|
49
|
+
uint16_t size;
|
50
|
+
} Data;
|
51
|
+
|
52
|
+
typedef struct {
|
53
|
+
IP_Port ip_port;
|
54
|
+
uint8_t status; /* 0 if connection is dead, 1 if attempting handshake,
|
55
|
+
2 if handshake is done (we start sending SYNC packets)
|
56
|
+
3 if we are sending SYNC packets and can send data
|
57
|
+
4 if the connection has timed out. */
|
58
|
+
|
59
|
+
uint8_t inbound; /* 1 or 2 if connection was initiated by someone else, 0 if not.
|
60
|
+
2 if incoming_connection() has not returned it yet, 1 if it has. */
|
61
|
+
|
62
|
+
uint16_t SYNC_rate; /* current SYNC packet send rate packets per second. */
|
63
|
+
uint16_t data_rate; /* current data packet send rate packets per second. */
|
64
|
+
uint64_t last_SYNC; /* time at which our last SYNC packet was sent. */
|
65
|
+
uint64_t last_sent; /* time at which our last data or handshake packet was sent. */
|
66
|
+
uint64_t last_recvSYNC; /* time at which we last received a SYNC packet from the other */
|
67
|
+
uint64_t last_recvdata; /* time at which we last received a DATA packet from the other */
|
68
|
+
uint64_t killat; /* time at which to kill the connection */
|
69
|
+
Data sendbuffer[MAX_QUEUE_NUM]; /* packet send buffer. */
|
70
|
+
Data recvbuffer[MAX_QUEUE_NUM]; /* packet receive buffer. */
|
71
|
+
uint32_t handshake_id1;
|
72
|
+
uint32_t handshake_id2;
|
73
|
+
uint32_t recv_packetnum; /* number of data packets received (also used as handshake_id1) */
|
74
|
+
uint32_t orecv_packetnum; /* number of packets received by the other peer */
|
75
|
+
uint32_t sent_packetnum; /* number of data packets sent */
|
76
|
+
uint32_t osent_packetnum; /* number of packets sent by the other peer. */
|
77
|
+
uint32_t sendbuff_packetnum; /* number of latest packet written onto the sendbuffer */
|
78
|
+
uint32_t successful_sent; /* we know all packets before that number were successfully sent */
|
79
|
+
uint32_t successful_read; /* packet number of last packet read with the read_packet function */
|
80
|
+
uint32_t req_packets[BUFFER_PACKET_NUM]; /* list of currently requested packet numbers(by the other person) */
|
81
|
+
uint16_t num_req_paquets; /* total number of currently requested packets(by the other person) */
|
82
|
+
uint8_t recv_counter;
|
83
|
+
uint8_t send_counter;
|
84
|
+
uint8_t timeout; /* connection timeout in seconds. */
|
85
|
+
} Connection;
|
86
|
+
|
87
|
+
|
88
|
+
static Connection * connections;
|
89
|
+
|
90
|
+
static uint32_t connections_length; /* Length of connections array */
|
91
|
+
static uint32_t connections_number; /* Number of connections in connections array */
|
92
|
+
|
93
|
+
#define MAX_CONNECTIONS connections_length
|
94
|
+
|
95
|
+
/* Functions */
|
96
|
+
|
97
|
+
/* get connection id from IP_Port
|
98
|
+
return -1 if there are no connections like we are looking for
|
99
|
+
return id if it found it */
|
100
|
+
int getconnection_id(IP_Port ip_port)
|
101
|
+
{
|
102
|
+
uint32_t i;
|
103
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
104
|
+
if (connections[i].ip_port.ip.i == ip_port.ip.i &&
|
105
|
+
connections[i].ip_port.port == ip_port.port && connections[i].status > 0)
|
106
|
+
return i;
|
107
|
+
}
|
108
|
+
return -1;
|
109
|
+
}
|
110
|
+
|
111
|
+
/* table of random numbers used below. */
|
112
|
+
static uint32_t randtable[6][256];
|
113
|
+
|
114
|
+
/* generate a handshake_id which depends on the ip_port.
|
115
|
+
this function will always give one unique handshake_id per ip_port.
|
116
|
+
TODO: make this better */
|
117
|
+
uint32_t handshake_id(IP_Port source)
|
118
|
+
{
|
119
|
+
uint32_t id = 0, i;
|
120
|
+
for (i = 0; i < 6; ++i) {
|
121
|
+
if(randtable[i][((uint8_t *)&source)[i]] == 0)
|
122
|
+
randtable[i][((uint8_t *)&source)[i]] = random_int();
|
123
|
+
id ^= randtable[i][((uint8_t *)&source)[i]];
|
124
|
+
}
|
125
|
+
if (id == 0) /* id can't be zero */
|
126
|
+
id = 1;
|
127
|
+
return id;
|
128
|
+
}
|
129
|
+
|
130
|
+
/* change the hnshake id associated with that ip_port
|
131
|
+
TODO: make this better */
|
132
|
+
void change_handshake(IP_Port source)
|
133
|
+
{
|
134
|
+
uint8_t rand = random_int() % 4;
|
135
|
+
randtable[rand][((uint8_t *)&source)[rand]] = random_int();
|
136
|
+
}
|
137
|
+
|
138
|
+
/* initialize a new connection to ip_port
|
139
|
+
returns an integer corresponding to the connection id.
|
140
|
+
return -1 if it could not initialize the connection.
|
141
|
+
if there already was an existing connection to that ip_port return its number. */
|
142
|
+
int new_connection(IP_Port ip_port)
|
143
|
+
{
|
144
|
+
int connect = getconnection_id(ip_port);
|
145
|
+
if (connect != -1)
|
146
|
+
return connect;
|
147
|
+
|
148
|
+
if(connections_number == connections_length) {
|
149
|
+
Connection * temp;
|
150
|
+
temp = realloc(connections, sizeof(Connection) * (connections_length + 1));
|
151
|
+
if(temp == NULL)
|
152
|
+
return -1;
|
153
|
+
memset(&temp[connections_length], 0, sizeof(Connection));
|
154
|
+
++connections_length;
|
155
|
+
connections = temp;
|
156
|
+
}
|
157
|
+
|
158
|
+
uint32_t i;
|
159
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
160
|
+
if(connections[i].status == 0) {
|
161
|
+
memset(&connections[i], 0, sizeof(Connection));
|
162
|
+
connections[i].ip_port = ip_port;
|
163
|
+
connections[i].status = 1;
|
164
|
+
connections[i].inbound = 0;
|
165
|
+
connections[i].handshake_id1 = handshake_id(ip_port);
|
166
|
+
connections[i].sent_packetnum = connections[i].handshake_id1;
|
167
|
+
connections[i].sendbuff_packetnum = connections[i].handshake_id1;
|
168
|
+
connections[i].successful_sent = connections[i].handshake_id1;
|
169
|
+
connections[i].SYNC_rate = SYNC_RATE;
|
170
|
+
connections[i].data_rate = DATA_SYNC_RATE;
|
171
|
+
connections[i].last_recvSYNC = current_time();
|
172
|
+
connections[i].last_sent = current_time();
|
173
|
+
connections[i].killat = ~0;
|
174
|
+
connections[i].send_counter = 0;
|
175
|
+
/* add randomness to timeout to prevent connections getting stuck in a loop. */
|
176
|
+
connections[i].timeout = CONNEXION_TIMEOUT + rand() % CONNEXION_TIMEOUT;
|
177
|
+
++connections_number;
|
178
|
+
return i;
|
179
|
+
}
|
180
|
+
}
|
181
|
+
return -1;
|
182
|
+
}
|
183
|
+
|
184
|
+
/* initialize a new inbound connection from ip_port
|
185
|
+
returns an integer corresponding to the connection id.
|
186
|
+
return -1 if it could not initialize the connection. */
|
187
|
+
int new_inconnection(IP_Port ip_port)
|
188
|
+
{
|
189
|
+
if (getconnection_id(ip_port) != -1)
|
190
|
+
return -1;
|
191
|
+
|
192
|
+
if(connections_number == connections_length) {
|
193
|
+
Connection * temp;
|
194
|
+
temp = realloc(connections, sizeof(Connection) * (connections_length + 1));
|
195
|
+
if(temp == NULL)
|
196
|
+
return -1;
|
197
|
+
memset(&temp[connections_length], 0, sizeof(Connection));
|
198
|
+
++connections_length;
|
199
|
+
connections = temp;
|
200
|
+
}
|
201
|
+
|
202
|
+
uint32_t i;
|
203
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
204
|
+
if (connections[i].status == 0) {
|
205
|
+
memset(&connections[i], 0, sizeof(Connection));
|
206
|
+
connections[i].ip_port = ip_port;
|
207
|
+
connections[i].status = 2;
|
208
|
+
connections[i].inbound = 2;
|
209
|
+
connections[i].SYNC_rate = SYNC_RATE;
|
210
|
+
connections[i].data_rate = DATA_SYNC_RATE;
|
211
|
+
connections[i].last_recvSYNC = current_time();
|
212
|
+
connections[i].last_sent = current_time();
|
213
|
+
/* add randomness to timeout to prevent connections getting stuck in a loop. */
|
214
|
+
connections[i].timeout = CONNEXION_TIMEOUT + rand() % CONNEXION_TIMEOUT;
|
215
|
+
/* if this connection isn't handled within the timeout kill it. */
|
216
|
+
connections[i].killat = current_time() + 1000000UL*connections[i].timeout;
|
217
|
+
connections[i].send_counter = 127;
|
218
|
+
++connections_number;
|
219
|
+
return i;
|
220
|
+
}
|
221
|
+
}
|
222
|
+
return -1;
|
223
|
+
}
|
224
|
+
|
225
|
+
/* returns an integer corresponding to the next connection in our incoming connection list
|
226
|
+
return -1 if there are no new incoming connections in the list. */
|
227
|
+
int incoming_connection()
|
228
|
+
{
|
229
|
+
uint32_t i;
|
230
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
231
|
+
if (connections[i].inbound == 2) {
|
232
|
+
connections[i].inbound = 1;
|
233
|
+
return i;
|
234
|
+
}
|
235
|
+
}
|
236
|
+
return -1;
|
237
|
+
}
|
238
|
+
/*Try to free some memory from the connections array.*/
|
239
|
+
static void free_connections()
|
240
|
+
{
|
241
|
+
uint32_t i;
|
242
|
+
for(i = connections_length; i != 0; --i)
|
243
|
+
if (connections[i - 1].status != 0)
|
244
|
+
break;
|
245
|
+
|
246
|
+
if(connections_length == i)
|
247
|
+
return;
|
248
|
+
Connection * temp;
|
249
|
+
temp = realloc(connections, sizeof(Connection) * i);
|
250
|
+
if(temp == NULL && i != 0)
|
251
|
+
return;
|
252
|
+
connections = temp;
|
253
|
+
connections_length = i;
|
254
|
+
}
|
255
|
+
|
256
|
+
/* return -1 if it could not kill the connection.
|
257
|
+
return 0 if killed successfully */
|
258
|
+
int kill_connection(int connection_id)
|
259
|
+
{
|
260
|
+
if (connection_id >= 0 && connection_id < MAX_CONNECTIONS) {
|
261
|
+
if (connections[connection_id].status > 0) {
|
262
|
+
connections[connection_id].status = 0;
|
263
|
+
change_handshake(connections[connection_id].ip_port);
|
264
|
+
--connections_number;
|
265
|
+
free_connections();
|
266
|
+
return 0;
|
267
|
+
}
|
268
|
+
}
|
269
|
+
return -1;
|
270
|
+
}
|
271
|
+
|
272
|
+
/* kill connection in seconds seconds.
|
273
|
+
return -1 if it can not kill the connection.
|
274
|
+
return 0 if it will kill it */
|
275
|
+
int kill_connection_in(int connection_id, uint32_t seconds)
|
276
|
+
{
|
277
|
+
if (connection_id >= 0 && connection_id < MAX_CONNECTIONS) {
|
278
|
+
if (connections[connection_id].status > 0) {
|
279
|
+
connections[connection_id].killat = current_time() + 1000000UL*seconds;
|
280
|
+
return 0;
|
281
|
+
}
|
282
|
+
}
|
283
|
+
return -1;
|
284
|
+
}
|
285
|
+
|
286
|
+
/* check if connection is connected
|
287
|
+
return 0 no.
|
288
|
+
return 1 if attempting handshake
|
289
|
+
return 2 if handshake is done
|
290
|
+
return 3 if fully connected
|
291
|
+
return 4 if timed out and waiting to be killed */
|
292
|
+
int is_connected(int connection_id)
|
293
|
+
{
|
294
|
+
if (connection_id >= 0 && connection_id < MAX_CONNECTIONS)
|
295
|
+
return connections[connection_id].status;
|
296
|
+
return 0;
|
297
|
+
}
|
298
|
+
|
299
|
+
/* returns the ip_port of the corresponding connection. */
|
300
|
+
IP_Port connection_ip(int connection_id)
|
301
|
+
{
|
302
|
+
if (connection_id >= 0 && connection_id < MAX_CONNECTIONS)
|
303
|
+
return connections[connection_id].ip_port;
|
304
|
+
IP_Port zero = {{{0}}, 0};
|
305
|
+
return zero;
|
306
|
+
}
|
307
|
+
|
308
|
+
/* returns the number of packets in the queue waiting to be successfully sent. */
|
309
|
+
uint32_t sendqueue(int connection_id)
|
310
|
+
{
|
311
|
+
return connections[connection_id].sendbuff_packetnum - connections[connection_id].successful_sent;
|
312
|
+
}
|
313
|
+
|
314
|
+
/* returns the number of packets in the queue waiting to be successfully read with read_packet(...) */
|
315
|
+
uint32_t recvqueue(int connection_id)
|
316
|
+
{
|
317
|
+
return connections[connection_id].recv_packetnum - connections[connection_id].successful_read;
|
318
|
+
}
|
319
|
+
|
320
|
+
/* returns the id of the next packet in the queue
|
321
|
+
return -1 if no packet in queue */
|
322
|
+
char id_packet(int connection_id)
|
323
|
+
{
|
324
|
+
if (recvqueue(connection_id) != 0 && connections[connection_id].status != 0)
|
325
|
+
return connections[connection_id].recvbuffer[connections[connection_id].successful_read % MAX_QUEUE_NUM].data[0];
|
326
|
+
return -1;
|
327
|
+
}
|
328
|
+
|
329
|
+
/* return 0 if there is no received data in the buffer.
|
330
|
+
return length of received packet if successful */
|
331
|
+
int read_packet(int connection_id, uint8_t * data)
|
332
|
+
{
|
333
|
+
if (recvqueue(connection_id) != 0) {
|
334
|
+
uint16_t index = connections[connection_id].successful_read % MAX_QUEUE_NUM;
|
335
|
+
uint16_t size = connections[connection_id].recvbuffer[index].size;
|
336
|
+
memcpy(data, connections[connection_id].recvbuffer[index].data, size);
|
337
|
+
++connections[connection_id].successful_read;
|
338
|
+
connections[connection_id].recvbuffer[index].size = 0;
|
339
|
+
return size;
|
340
|
+
}
|
341
|
+
return 0;
|
342
|
+
}
|
343
|
+
|
344
|
+
/* return 0 if data could not be put in packet queue
|
345
|
+
return 1 if data was put into the queue */
|
346
|
+
int write_packet(int connection_id, uint8_t * data, uint32_t length)
|
347
|
+
{
|
348
|
+
if (length > MAX_DATA_SIZE)
|
349
|
+
return 0;
|
350
|
+
if (length == 0)
|
351
|
+
return 0;
|
352
|
+
if (sendqueue(connection_id) < BUFFER_PACKET_NUM) {
|
353
|
+
uint32_t index = connections[connection_id].sendbuff_packetnum % MAX_QUEUE_NUM;
|
354
|
+
memcpy(connections[connection_id].sendbuffer[index].data, data, length);
|
355
|
+
connections[connection_id].sendbuffer[index].size = length;
|
356
|
+
connections[connection_id].sendbuff_packetnum++;
|
357
|
+
return 1;
|
358
|
+
}
|
359
|
+
return 0;
|
360
|
+
}
|
361
|
+
|
362
|
+
/* put the packet numbers the we are missing in requested and return the number */
|
363
|
+
uint32_t missing_packets(int connection_id, uint32_t * requested)
|
364
|
+
{
|
365
|
+
uint32_t number = 0;
|
366
|
+
uint32_t i;
|
367
|
+
uint32_t temp;
|
368
|
+
if (recvqueue(connection_id) >= (BUFFER_PACKET_NUM - 1)) /* don't request packets if the buffer is full. */
|
369
|
+
return 0;
|
370
|
+
for (i = connections[connection_id].recv_packetnum; i != connections[connection_id].osent_packetnum; i++) {
|
371
|
+
if(connections[connection_id].recvbuffer[i % MAX_QUEUE_NUM].size == 0) {
|
372
|
+
temp = htonl(i);
|
373
|
+
memcpy(requested + number, &temp, 4);
|
374
|
+
++number;
|
375
|
+
}
|
376
|
+
}
|
377
|
+
if(number == 0)
|
378
|
+
connections[connection_id].recv_packetnum = connections[connection_id].osent_packetnum;
|
379
|
+
return number;
|
380
|
+
}
|
381
|
+
|
382
|
+
/* Packet sending functions
|
383
|
+
One per packet type.
|
384
|
+
see docs/Lossless_UDP.txt for more information. */
|
385
|
+
int send_handshake(IP_Port ip_port, uint32_t handshake_id1, uint32_t handshake_id2)
|
386
|
+
{
|
387
|
+
uint8_t packet[1 + 4 + 4];
|
388
|
+
uint32_t temp;
|
389
|
+
|
390
|
+
packet[0] = 16;
|
391
|
+
temp = htonl(handshake_id1);
|
392
|
+
memcpy(packet + 1, &temp, 4);
|
393
|
+
temp = htonl(handshake_id2);
|
394
|
+
memcpy(packet + 5, &temp, 4);
|
395
|
+
return sendpacket(ip_port, packet, sizeof(packet));
|
396
|
+
}
|
397
|
+
|
398
|
+
int send_SYNC(uint32_t connection_id)
|
399
|
+
{
|
400
|
+
|
401
|
+
uint8_t packet[(BUFFER_PACKET_NUM*4 + 4 + 4 + 2)];
|
402
|
+
uint16_t index = 0;
|
403
|
+
|
404
|
+
IP_Port ip_port = connections[connection_id].ip_port;
|
405
|
+
uint8_t counter = connections[connection_id].send_counter;
|
406
|
+
uint32_t recv_packetnum = htonl(connections[connection_id].recv_packetnum);
|
407
|
+
uint32_t sent_packetnum = htonl(connections[connection_id].sent_packetnum);
|
408
|
+
uint32_t requested[BUFFER_PACKET_NUM];
|
409
|
+
uint32_t number = missing_packets(connection_id, requested);
|
410
|
+
|
411
|
+
packet[0] = 17;
|
412
|
+
index += 1;
|
413
|
+
memcpy(packet + index, &counter, 1);
|
414
|
+
index += 1;
|
415
|
+
memcpy(packet + index, &recv_packetnum, 4);
|
416
|
+
index += 4;
|
417
|
+
memcpy(packet + index, &sent_packetnum, 4);
|
418
|
+
index += 4;
|
419
|
+
memcpy(packet + index, requested, 4 * number);
|
420
|
+
|
421
|
+
return sendpacket(ip_port, packet, (number*4 + 4 + 4 + 2));
|
422
|
+
|
423
|
+
}
|
424
|
+
|
425
|
+
int send_data_packet(uint32_t connection_id, uint32_t packet_num)
|
426
|
+
{
|
427
|
+
uint32_t index = packet_num % MAX_QUEUE_NUM;
|
428
|
+
uint32_t temp;
|
429
|
+
uint8_t packet[1 + 4 + MAX_DATA_SIZE];
|
430
|
+
packet[0] = 18;
|
431
|
+
temp = htonl(packet_num);
|
432
|
+
memcpy(packet + 1, &temp, 4);
|
433
|
+
memcpy(packet + 5, connections[connection_id].sendbuffer[index].data,
|
434
|
+
connections[connection_id].sendbuffer[index].size);
|
435
|
+
return sendpacket(connections[connection_id].ip_port, packet,
|
436
|
+
1 + 4 + connections[connection_id].sendbuffer[index].size);
|
437
|
+
}
|
438
|
+
|
439
|
+
/* sends 1 data packet */
|
440
|
+
int send_DATA(uint32_t connection_id)
|
441
|
+
{
|
442
|
+
int ret;
|
443
|
+
uint32_t buffer[BUFFER_PACKET_NUM];
|
444
|
+
if (connections[connection_id].num_req_paquets > 0) {
|
445
|
+
ret = send_data_packet(connection_id, connections[connection_id].req_packets[0]);
|
446
|
+
connections[connection_id].num_req_paquets--;
|
447
|
+
memcpy(buffer, connections[connection_id].req_packets + 1, connections[connection_id].num_req_paquets * 4);
|
448
|
+
memcpy(connections[connection_id].req_packets, buffer, connections[connection_id].num_req_paquets * 4);
|
449
|
+
return ret;
|
450
|
+
}
|
451
|
+
if (connections[connection_id].sendbuff_packetnum != connections[connection_id].sent_packetnum) {
|
452
|
+
ret = send_data_packet(connection_id, connections[connection_id].sent_packetnum);
|
453
|
+
connections[connection_id].sent_packetnum++;
|
454
|
+
return ret;
|
455
|
+
}
|
456
|
+
return 0;
|
457
|
+
}
|
458
|
+
|
459
|
+
/* END of packet sending functions */
|
460
|
+
|
461
|
+
/* Packet handling functions
|
462
|
+
One to handle each type of packets we receive
|
463
|
+
return 0 if handled correctly, 1 if packet is bad. */
|
464
|
+
int handle_handshake(uint8_t * packet, uint32_t length, IP_Port source)
|
465
|
+
{
|
466
|
+
if (length != (1 + 4 + 4))
|
467
|
+
return 1;
|
468
|
+
uint32_t temp;
|
469
|
+
uint32_t handshake_id1, handshake_id2;
|
470
|
+
int connection = getconnection_id(source);
|
471
|
+
memcpy(&temp, packet + 1, 4);
|
472
|
+
handshake_id1 = ntohl(temp);
|
473
|
+
memcpy(&temp, packet + 5, 4);
|
474
|
+
handshake_id2 = ntohl(temp);
|
475
|
+
|
476
|
+
if (handshake_id2 == 0 && is_connected(connection) < 3) {
|
477
|
+
send_handshake(source, handshake_id(source), handshake_id1);
|
478
|
+
return 0;
|
479
|
+
}
|
480
|
+
if (is_connected(connection) != 1)
|
481
|
+
return 1;
|
482
|
+
if (handshake_id2 == connections[connection].handshake_id1) { /* if handshake_id2 is what we sent previously as handshake_id1 */
|
483
|
+
connections[connection].status = 2;
|
484
|
+
/* NOTE: is this necessary?
|
485
|
+
connections[connection].handshake_id2 = handshake_id1; */
|
486
|
+
connections[connection].orecv_packetnum = handshake_id2;
|
487
|
+
connections[connection].osent_packetnum = handshake_id1;
|
488
|
+
connections[connection].recv_packetnum = handshake_id1;
|
489
|
+
connections[connection].successful_read = handshake_id1;
|
490
|
+
}
|
491
|
+
return 0;
|
492
|
+
|
493
|
+
}
|
494
|
+
|
495
|
+
/* returns 1 if sync packet is valid
|
496
|
+
0 if not. */
|
497
|
+
int SYNC_valid(uint32_t length)
|
498
|
+
{
|
499
|
+
if (length < 4 + 4 + 2)
|
500
|
+
return 0;
|
501
|
+
if (length > (BUFFER_PACKET_NUM*4 + 4 + 4 + 2) ||
|
502
|
+
((length - 4 - 4 - 2) % 4) != 0)
|
503
|
+
return 0;
|
504
|
+
return 1;
|
505
|
+
}
|
506
|
+
|
507
|
+
/* case 1: */
|
508
|
+
int handle_SYNC1(IP_Port source, uint32_t recv_packetnum, uint32_t sent_packetnum)
|
509
|
+
{
|
510
|
+
if (handshake_id(source) == recv_packetnum) {
|
511
|
+
int x = new_inconnection(source);
|
512
|
+
if (x != -1) {
|
513
|
+
connections[x].orecv_packetnum = recv_packetnum;
|
514
|
+
connections[x].sent_packetnum = recv_packetnum;
|
515
|
+
connections[x].sendbuff_packetnum = recv_packetnum;
|
516
|
+
connections[x].successful_sent = recv_packetnum;
|
517
|
+
connections[x].osent_packetnum = sent_packetnum;
|
518
|
+
connections[x].recv_packetnum = sent_packetnum;
|
519
|
+
connections[x].successful_read = sent_packetnum;
|
520
|
+
|
521
|
+
return x;
|
522
|
+
}
|
523
|
+
}
|
524
|
+
return -1;
|
525
|
+
}
|
526
|
+
|
527
|
+
/* case 2: */
|
528
|
+
int handle_SYNC2(int connection_id, uint8_t counter, uint32_t recv_packetnum, uint32_t sent_packetnum)
|
529
|
+
{
|
530
|
+
if (recv_packetnum == connections[connection_id].orecv_packetnum) {
|
531
|
+
/* && sent_packetnum == connections[connection_id].osent_packetnum) */
|
532
|
+
connections[connection_id].status = 3;
|
533
|
+
connections[connection_id].recv_counter = counter;
|
534
|
+
++connections[connection_id].send_counter;
|
535
|
+
send_SYNC(connection_id);
|
536
|
+
return 0;
|
537
|
+
}
|
538
|
+
return 1;
|
539
|
+
}
|
540
|
+
/* case 3: */
|
541
|
+
int handle_SYNC3(int connection_id, uint8_t counter, uint32_t recv_packetnum, uint32_t sent_packetnum, uint32_t * req_packets,
|
542
|
+
uint16_t number)
|
543
|
+
{
|
544
|
+
uint8_t comp_counter = (counter - connections[connection_id].recv_counter );
|
545
|
+
uint32_t i, temp;
|
546
|
+
/* uint32_t comp_1 = (recv_packetnum - connections[connection_id].successful_sent);
|
547
|
+
uint32_t comp_2 = (sent_packetnum - connections[connection_id].successful_read); */
|
548
|
+
uint32_t comp_1 = (recv_packetnum - connections[connection_id].orecv_packetnum);
|
549
|
+
uint32_t comp_2 = (sent_packetnum - connections[connection_id].osent_packetnum);
|
550
|
+
if (comp_1 <= BUFFER_PACKET_NUM && comp_2 <= BUFFER_PACKET_NUM && comp_counter < 10 && comp_counter != 0) { /* packet valid */
|
551
|
+
connections[connection_id].orecv_packetnum = recv_packetnum;
|
552
|
+
connections[connection_id].osent_packetnum = sent_packetnum;
|
553
|
+
connections[connection_id].successful_sent = recv_packetnum;
|
554
|
+
connections[connection_id].last_recvSYNC = current_time();
|
555
|
+
connections[connection_id].recv_counter = counter;
|
556
|
+
++connections[connection_id].send_counter;
|
557
|
+
for (i = 0; i < number; ++i) {
|
558
|
+
temp = ntohl(req_packets[i]);
|
559
|
+
memcpy(connections[connection_id].req_packets + i, &temp, 4 * number);
|
560
|
+
}
|
561
|
+
connections[connection_id].num_req_paquets = number;
|
562
|
+
return 0;
|
563
|
+
}
|
564
|
+
return 1;
|
565
|
+
}
|
566
|
+
|
567
|
+
int handle_SYNC(uint8_t *packet, uint32_t length, IP_Port source)
|
568
|
+
{
|
569
|
+
|
570
|
+
if (!SYNC_valid(length))
|
571
|
+
return 1;
|
572
|
+
int connection = getconnection_id(source);
|
573
|
+
uint8_t counter;
|
574
|
+
uint32_t temp;
|
575
|
+
uint32_t recv_packetnum, sent_packetnum;
|
576
|
+
uint32_t req_packets[BUFFER_PACKET_NUM];
|
577
|
+
uint16_t number = (length - 4 - 4 - 2)/ 4;
|
578
|
+
|
579
|
+
memcpy(&counter, packet + 1, 1);
|
580
|
+
memcpy(&temp, packet + 2, 4);
|
581
|
+
recv_packetnum = ntohl(temp);
|
582
|
+
memcpy(&temp,packet + 6, 4);
|
583
|
+
sent_packetnum = ntohl(temp);
|
584
|
+
if (number != 0)
|
585
|
+
memcpy(req_packets, packet + 10, 4 * number);
|
586
|
+
if (connection == -1)
|
587
|
+
return handle_SYNC1(source, recv_packetnum, sent_packetnum);
|
588
|
+
if (connections[connection].status == 2)
|
589
|
+
return handle_SYNC2(connection, counter, recv_packetnum, sent_packetnum);
|
590
|
+
if (connections[connection].status == 3)
|
591
|
+
return handle_SYNC3(connection, counter, recv_packetnum, sent_packetnum, req_packets, number);
|
592
|
+
return 0;
|
593
|
+
}
|
594
|
+
|
595
|
+
/* add a packet to the received buffer and set the recv_packetnum of the connection to its proper value.
|
596
|
+
return 1 if data was too big, 0 if not. */
|
597
|
+
int add_recv(int connection_id, uint32_t data_num, uint8_t *data, uint16_t size)
|
598
|
+
{
|
599
|
+
if (size > MAX_DATA_SIZE)
|
600
|
+
return 1;
|
601
|
+
|
602
|
+
uint32_t i;
|
603
|
+
uint32_t maxnum = connections[connection_id].successful_read + BUFFER_PACKET_NUM;
|
604
|
+
uint32_t sent_packet = data_num - connections[connection_id].osent_packetnum;
|
605
|
+
for (i = connections[connection_id].recv_packetnum; i != maxnum; ++i) {
|
606
|
+
if (i == data_num) {
|
607
|
+
memcpy(connections[connection_id].recvbuffer[i % MAX_QUEUE_NUM].data, data, size);
|
608
|
+
connections[connection_id].recvbuffer[i % MAX_QUEUE_NUM].size = size;
|
609
|
+
connections[connection_id].last_recvdata = current_time();
|
610
|
+
if (sent_packet < BUFFER_PACKET_NUM)
|
611
|
+
connections[connection_id].osent_packetnum = data_num;
|
612
|
+
break;
|
613
|
+
}
|
614
|
+
}
|
615
|
+
for (i = connections[connection_id].recv_packetnum; i != maxnum; ++i) {
|
616
|
+
if (connections[connection_id].recvbuffer[i % MAX_QUEUE_NUM].size != 0)
|
617
|
+
connections[connection_id].recv_packetnum = i;
|
618
|
+
else
|
619
|
+
break;
|
620
|
+
}
|
621
|
+
|
622
|
+
return 0;
|
623
|
+
}
|
624
|
+
|
625
|
+
int handle_data(uint8_t *packet, uint32_t length, IP_Port source)
|
626
|
+
{
|
627
|
+
int connection = getconnection_id(source);
|
628
|
+
|
629
|
+
if (connection == -1)
|
630
|
+
return 1;
|
631
|
+
|
632
|
+
if (connections[connection].status != 3) /* Drop the data packet if connection is not connected. */
|
633
|
+
return 1;
|
634
|
+
|
635
|
+
if (length > 1 + 4 + MAX_DATA_SIZE || length < 1 + 4 + 1)
|
636
|
+
return 1;
|
637
|
+
uint32_t temp;
|
638
|
+
uint32_t number;
|
639
|
+
uint16_t size = length - 1 - 4;
|
640
|
+
|
641
|
+
memcpy(&temp, packet + 1, 4);
|
642
|
+
number = ntohl(temp);
|
643
|
+
return add_recv(connection, number, packet + 5, size);
|
644
|
+
}
|
645
|
+
|
646
|
+
/* END of packet handling functions */
|
647
|
+
|
648
|
+
int LosslessUDP_handlepacket(uint8_t *packet, uint32_t length, IP_Port source)
|
649
|
+
{
|
650
|
+
switch (packet[0]) { //TODO: check if no break statement is correct???
|
651
|
+
case 16:
|
652
|
+
return handle_handshake(packet, length, source);
|
653
|
+
|
654
|
+
case 17:
|
655
|
+
return handle_SYNC(packet, length, source);
|
656
|
+
|
657
|
+
case 18:
|
658
|
+
return handle_data(packet, length, source);
|
659
|
+
|
660
|
+
default:
|
661
|
+
return 1;
|
662
|
+
}
|
663
|
+
|
664
|
+
return 0;
|
665
|
+
}
|
666
|
+
|
667
|
+
/* Send handshake requests
|
668
|
+
handshake packets are sent at the same rate as SYNC packets */
|
669
|
+
void doNew()
|
670
|
+
{
|
671
|
+
uint32_t i;
|
672
|
+
uint64_t temp_time = current_time();
|
673
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
674
|
+
if (connections[i].status == 1)
|
675
|
+
if ((connections[i].last_sent + (1000000UL/connections[i].SYNC_rate)) <= temp_time) {
|
676
|
+
send_handshake(connections[i].ip_port, connections[i].handshake_id1, 0);
|
677
|
+
connections[i].last_sent = temp_time;
|
678
|
+
}
|
679
|
+
|
680
|
+
/* kill all timed out connections */
|
681
|
+
if ( connections[i].status > 0 && (connections[i].last_recvSYNC + connections[i].timeout * 1000000UL) < temp_time &&
|
682
|
+
connections[i].status != 4)
|
683
|
+
/* kill_connection(i); */
|
684
|
+
connections[i].status = 4;
|
685
|
+
if (connections[i].status > 0 && connections[i].killat < temp_time)
|
686
|
+
kill_connection(i);
|
687
|
+
}
|
688
|
+
}
|
689
|
+
|
690
|
+
void doSYNC()
|
691
|
+
{
|
692
|
+
uint32_t i;
|
693
|
+
uint64_t temp_time = current_time();
|
694
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
695
|
+
if (connections[i].status == 2 || connections[i].status == 3)
|
696
|
+
if ((connections[i].last_SYNC + (1000000UL/connections[i].SYNC_rate)) <= temp_time) {
|
697
|
+
send_SYNC(i);
|
698
|
+
connections[i].last_SYNC = temp_time;
|
699
|
+
}
|
700
|
+
}
|
701
|
+
}
|
702
|
+
|
703
|
+
void doData()
|
704
|
+
{
|
705
|
+
uint32_t i;
|
706
|
+
uint64_t j;
|
707
|
+
uint64_t temp_time = current_time();
|
708
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i)
|
709
|
+
if (connections[i].status == 3 && sendqueue(i) != 0)
|
710
|
+
if ((connections[i].last_sent + (1000000UL/connections[i].data_rate)) <= temp_time) {
|
711
|
+
for (j = connections[i].last_sent; j < temp_time; j += (1000000UL/connections[i].data_rate))
|
712
|
+
send_DATA(i);
|
713
|
+
connections[i].last_sent = temp_time;
|
714
|
+
}
|
715
|
+
}
|
716
|
+
|
717
|
+
/* TODO: flow control.
|
718
|
+
automatically adjusts send rates of packets for optimal transmission. */
|
719
|
+
|
720
|
+
#define MAX_SYNC_RATE 10
|
721
|
+
|
722
|
+
void adjustRates()
|
723
|
+
{
|
724
|
+
uint32_t i;
|
725
|
+
uint64_t temp_time = current_time();
|
726
|
+
for (i = 0; i < MAX_CONNECTIONS; ++i) {
|
727
|
+
if (connections[i].status == 1 || connections[i].status == 2)
|
728
|
+
connections[i].SYNC_rate = MAX_SYNC_RATE;
|
729
|
+
if (connections[i].status == 3) {
|
730
|
+
if (sendqueue(i) != 0) {
|
731
|
+
connections[i].data_rate = (BUFFER_PACKET_NUM - connections[i].num_req_paquets) * MAX_SYNC_RATE;
|
732
|
+
connections[i].SYNC_rate = MAX_SYNC_RATE;
|
733
|
+
} else if (connections[i].last_recvdata + 1000000UL > temp_time)
|
734
|
+
connections[i].SYNC_rate = MAX_SYNC_RATE;
|
735
|
+
else
|
736
|
+
connections[i].SYNC_rate = SYNC_RATE;
|
737
|
+
}
|
738
|
+
}
|
739
|
+
}
|
740
|
+
|
741
|
+
/* Call this function a couple times per second
|
742
|
+
It's the main loop. */
|
743
|
+
void doLossless_UDP()
|
744
|
+
{
|
745
|
+
doNew();
|
746
|
+
doSYNC();
|
747
|
+
doData();
|
748
|
+
adjustRates();
|
749
|
+
}
|