ffi-tox 0.1.1 → 0.1.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ProjectTox-Core/AUTHORS +0 -0
- data/ProjectTox-Core/ChangeLog +0 -0
- data/ProjectTox-Core/INSTALL +370 -0
- data/ProjectTox-Core/INSTALL.md +455 -56
- data/ProjectTox-Core/Makefile.am +35 -0
- data/ProjectTox-Core/NEWS +0 -0
- data/ProjectTox-Core/README +43 -0
- data/ProjectTox-Core/README.md +34 -44
- data/ProjectTox-Core/auto_tests/Makefile.inc +110 -0
- data/ProjectTox-Core/auto_tests/TCP_test.c +519 -0
- data/ProjectTox-Core/auto_tests/assoc_test.c +160 -0
- data/ProjectTox-Core/auto_tests/crypto_test.c +302 -0
- data/ProjectTox-Core/auto_tests/dht_test.c +362 -0
- data/ProjectTox-Core/auto_tests/encryptsave_test.c +104 -0
- data/ProjectTox-Core/auto_tests/friends_test.c +238 -0
- data/ProjectTox-Core/auto_tests/helpers.h +15 -0
- data/ProjectTox-Core/auto_tests/messenger_test.c +365 -0
- data/ProjectTox-Core/auto_tests/network_test.c +171 -0
- data/ProjectTox-Core/auto_tests/onion_test.c +363 -0
- data/ProjectTox-Core/auto_tests/skeleton_test.c +49 -0
- data/ProjectTox-Core/auto_tests/tox_test.c +454 -0
- data/ProjectTox-Core/auto_tests/toxav_basic_test.c +597 -0
- data/ProjectTox-Core/auto_tests/toxav_many_test.c +402 -0
- data/ProjectTox-Core/autogen.sh +6 -0
- data/ProjectTox-Core/build/Makefile.am +14 -0
- data/ProjectTox-Core/configure.ac +694 -0
- data/ProjectTox-Core/dist-build/android-arm.sh +3 -0
- data/ProjectTox-Core/dist-build/android-armv7.sh +3 -0
- data/ProjectTox-Core/dist-build/android-build.sh +59 -0
- data/ProjectTox-Core/dist-build/android-mips.sh +3 -0
- data/ProjectTox-Core/dist-build/android-x86.sh +3 -0
- data/ProjectTox-Core/docs/Group-Chats.md +71 -0
- data/ProjectTox-Core/docs/Hardening.txt +60 -0
- data/ProjectTox-Core/docs/Hardening_docs.txt +30 -0
- data/ProjectTox-Core/docs/Prevent_Tracking.txt +160 -0
- data/ProjectTox-Core/docs/TCP_Network.txt +154 -0
- data/ProjectTox-Core/docs/TODO +62 -0
- data/ProjectTox-Core/docs/Tox_middle_level_network_protocol.txt +120 -0
- data/ProjectTox-Core/docs/av_api.md +194 -0
- data/ProjectTox-Core/libtoxav.pc.in +11 -0
- data/ProjectTox-Core/libtoxcore.pc.in +11 -0
- data/ProjectTox-Core/m4/ax_have_epoll.m4 +104 -0
- data/ProjectTox-Core/m4/ax_pthread.m4 +317 -0
- data/ProjectTox-Core/m4/pkg.m4 +199 -0
- data/ProjectTox-Core/other/DHT_bootstrap.c +121 -58
- data/ProjectTox-Core/other/DHTnodes +3 -0
- data/ProjectTox-Core/other/Makefile.inc +20 -0
- data/ProjectTox-Core/other/bootstrap_node_packets.c +65 -0
- data/ProjectTox-Core/other/tox.png +0 -0
- data/ProjectTox-Core/testing/DHT_test.c +170 -98
- data/ProjectTox-Core/testing/Makefile.inc +112 -0
- data/ProjectTox-Core/testing/Messenger_test.c +133 -69
- data/ProjectTox-Core/testing/dns3_test.c +115 -0
- data/ProjectTox-Core/testing/misc_tools.c +59 -13
- data/ProjectTox-Core/testing/nTox.c +1127 -264
- data/ProjectTox-Core/testing/nTox.h +10 -19
- data/ProjectTox-Core/testing/tox_shell.c +159 -0
- data/ProjectTox-Core/testing/tox_sync.c +299 -0
- data/ProjectTox-Core/tools/README +11 -0
- data/ProjectTox-Core/tools/astylerc +11 -0
- data/ProjectTox-Core/tools/pre-commit +17 -0
- data/ProjectTox-Core/toxav/Makefile.inc +36 -0
- data/ProjectTox-Core/toxav/codec.c +357 -0
- data/ProjectTox-Core/toxav/codec.h +116 -0
- data/ProjectTox-Core/toxav/msi.c +1949 -0
- data/ProjectTox-Core/toxav/msi.h +267 -0
- data/ProjectTox-Core/toxav/rtp.c +600 -0
- data/ProjectTox-Core/toxav/rtp.h +196 -0
- data/ProjectTox-Core/toxav/toxav.c +1148 -0
- data/ProjectTox-Core/toxav/toxav.h +389 -0
- data/ProjectTox-Core/toxcore/DHT.c +2521 -0
- data/ProjectTox-Core/toxcore/DHT.h +412 -0
- data/ProjectTox-Core/toxcore/LAN_discovery.c +322 -0
- data/ProjectTox-Core/{core → toxcore}/LAN_discovery.h +17 -12
- data/ProjectTox-Core/toxcore/Makefile.inc +67 -0
- data/ProjectTox-Core/toxcore/Messenger.c +3006 -0
- data/ProjectTox-Core/toxcore/Messenger.h +818 -0
- data/ProjectTox-Core/toxcore/TCP_client.c +858 -0
- data/ProjectTox-Core/toxcore/TCP_client.h +156 -0
- data/ProjectTox-Core/toxcore/TCP_server.c +1332 -0
- data/ProjectTox-Core/toxcore/TCP_server.h +181 -0
- data/ProjectTox-Core/toxcore/assoc.c +1033 -0
- data/ProjectTox-Core/toxcore/assoc.h +104 -0
- data/ProjectTox-Core/toxcore/crypto_core.c +278 -0
- data/ProjectTox-Core/toxcore/crypto_core.h +151 -0
- data/ProjectTox-Core/toxcore/friend_requests.c +175 -0
- data/ProjectTox-Core/toxcore/friend_requests.h +83 -0
- data/ProjectTox-Core/toxcore/group_chats.c +837 -0
- data/ProjectTox-Core/toxcore/group_chats.h +199 -0
- data/ProjectTox-Core/toxcore/list.c +256 -0
- data/ProjectTox-Core/toxcore/list.h +85 -0
- data/ProjectTox-Core/toxcore/logger.c +153 -0
- data/ProjectTox-Core/toxcore/logger.h +84 -0
- data/ProjectTox-Core/toxcore/misc_tools.h +70 -0
- data/ProjectTox-Core/toxcore/net_crypto.c +2753 -0
- data/ProjectTox-Core/toxcore/net_crypto.h +410 -0
- data/ProjectTox-Core/toxcore/network.c +979 -0
- data/ProjectTox-Core/toxcore/network.h +367 -0
- data/ProjectTox-Core/toxcore/onion.c +540 -0
- data/ProjectTox-Core/toxcore/onion.h +150 -0
- data/ProjectTox-Core/toxcore/onion_announce.c +433 -0
- data/ProjectTox-Core/toxcore/onion_announce.h +139 -0
- data/ProjectTox-Core/toxcore/onion_client.c +1347 -0
- data/ProjectTox-Core/toxcore/onion_client.h +253 -0
- data/ProjectTox-Core/toxcore/ping.c +346 -0
- data/ProjectTox-Core/toxcore/ping.h +47 -0
- data/ProjectTox-Core/toxcore/ping_array.c +162 -0
- data/ProjectTox-Core/toxcore/ping_array.h +75 -0
- data/ProjectTox-Core/toxcore/tox.c +940 -0
- data/ProjectTox-Core/toxcore/tox.h +734 -0
- data/ProjectTox-Core/toxcore/util.c +193 -0
- data/ProjectTox-Core/toxcore/util.h +63 -0
- data/ProjectTox-Core/toxdns/Makefile.inc +29 -0
- data/ProjectTox-Core/toxdns/toxdns.c +238 -0
- data/ProjectTox-Core/toxdns/toxdns.h +88 -0
- data/ProjectTox-Core/toxencryptsave/Makefile.inc +45 -0
- data/ProjectTox-Core/toxencryptsave/toxencryptsave.c +179 -0
- data/ProjectTox-Core/toxencryptsave/toxencryptsave.h +74 -0
- data/interfaces/libtox.i +2 -6
- data/lib/ffi-tox/libtox.rb +406 -28
- metadata +124 -46
- data/ProjectTox-Core/CMakeLists.txt +0 -50
- data/ProjectTox-Core/cmake/FindLIBCONFIG.cmake +0 -15
- data/ProjectTox-Core/cmake/FindNaCl.cmake +0 -17
- data/ProjectTox-Core/cmake/FindSODIUM.cmake +0 -15
- data/ProjectTox-Core/core/CMakeLists.txt +0 -19
- data/ProjectTox-Core/core/DHT.c +0 -1104
- data/ProjectTox-Core/core/DHT.h +0 -111
- data/ProjectTox-Core/core/LAN_discovery.c +0 -79
- data/ProjectTox-Core/core/Lossless_UDP.c +0 -755
- data/ProjectTox-Core/core/Lossless_UDP.h +0 -106
- data/ProjectTox-Core/core/Messenger.c +0 -596
- data/ProjectTox-Core/core/Messenger.h +0 -165
- data/ProjectTox-Core/core/friend_requests.c +0 -131
- data/ProjectTox-Core/core/friend_requests.h +0 -51
- data/ProjectTox-Core/core/net_crypto.c +0 -575
- data/ProjectTox-Core/core/net_crypto.h +0 -134
- data/ProjectTox-Core/core/network.c +0 -205
- data/ProjectTox-Core/core/network.h +0 -134
- data/ProjectTox-Core/docs/commands.md +0 -25
- data/ProjectTox-Core/docs/start_guide.de.md +0 -40
- data/ProjectTox-Core/docs/start_guide.md +0 -38
- data/ProjectTox-Core/other/CMakeLists.txt +0 -9
- data/ProjectTox-Core/testing/CMakeLists.txt +0 -18
- data/ProjectTox-Core/testing/DHT_cryptosendfiletest.c +0 -228
- data/ProjectTox-Core/testing/DHT_sendfiletest.c +0 -176
- data/ProjectTox-Core/testing/Lossless_UDP_testclient.c +0 -214
- data/ProjectTox-Core/testing/Lossless_UDP_testserver.c +0 -201
- data/ProjectTox-Core/testing/misc_tools.h +0 -29
- data/ProjectTox-Core/testing/nTox_win32.c +0 -387
- data/ProjectTox-Core/testing/nTox_win32.h +0 -40
- data/ProjectTox-Core/testing/rect.py +0 -45
@@ -0,0 +1,389 @@
|
|
1
|
+
/** toxav.h
|
2
|
+
*
|
3
|
+
* Copyright (C) 2013 Tox project All Rights Reserved.
|
4
|
+
*
|
5
|
+
* This file is part of Tox.
|
6
|
+
*
|
7
|
+
* Tox is free software: you can redistribute it and/or modify
|
8
|
+
* it under the terms of the GNU General Public License as published by
|
9
|
+
* the Free Software Foundation, either version 3 of the License, or
|
10
|
+
* (at your option) any later version.
|
11
|
+
*
|
12
|
+
* Tox is distributed in the hope that it will be useful,
|
13
|
+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14
|
+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
15
|
+
* GNU General Public License for more details.
|
16
|
+
*
|
17
|
+
* You should have received a copy of the GNU General Public License
|
18
|
+
* along with Tox. If not, see <http://www.gnu.org/licenses/>.
|
19
|
+
*
|
20
|
+
*/
|
21
|
+
|
22
|
+
|
23
|
+
#ifndef __TOXAV
|
24
|
+
#define __TOXAV
|
25
|
+
#include <inttypes.h>
|
26
|
+
|
27
|
+
#ifdef __cplusplus
|
28
|
+
extern "C" {
|
29
|
+
#endif
|
30
|
+
|
31
|
+
/* vpx_image_t */
|
32
|
+
#include <vpx/vpx_image.h>
|
33
|
+
|
34
|
+
typedef void ( *ToxAVCallback ) ( void *agent, int32_t call_idx, void *arg );
|
35
|
+
typedef struct _ToxAv ToxAv;
|
36
|
+
|
37
|
+
#ifndef __TOX_DEFINED__
|
38
|
+
#define __TOX_DEFINED__
|
39
|
+
typedef struct Tox Tox;
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#define RTP_PAYLOAD_SIZE 65535
|
43
|
+
|
44
|
+
|
45
|
+
/**
|
46
|
+
* @brief Callbacks ids that handle the call states.
|
47
|
+
*/
|
48
|
+
typedef enum {
|
49
|
+
/* Requests */
|
50
|
+
av_OnInvite,
|
51
|
+
av_OnStart,
|
52
|
+
av_OnCancel,
|
53
|
+
av_OnReject,
|
54
|
+
av_OnEnd,
|
55
|
+
|
56
|
+
/* Responses */
|
57
|
+
av_OnRinging,
|
58
|
+
av_OnStarting,
|
59
|
+
av_OnEnding,
|
60
|
+
|
61
|
+
/* Protocol */
|
62
|
+
av_OnRequestTimeout,
|
63
|
+
av_OnPeerTimeout,
|
64
|
+
av_OnMediaChange
|
65
|
+
} ToxAvCallbackID;
|
66
|
+
|
67
|
+
|
68
|
+
/**
|
69
|
+
* @brief Call type identifier.
|
70
|
+
*/
|
71
|
+
typedef enum {
|
72
|
+
TypeAudio = 192,
|
73
|
+
TypeVideo
|
74
|
+
} ToxAvCallType;
|
75
|
+
|
76
|
+
|
77
|
+
typedef enum {
|
78
|
+
av_CallNonExistant = -1,
|
79
|
+
av_CallInviting, /* when sending call invite */
|
80
|
+
av_CallStarting, /* when getting call invite */
|
81
|
+
av_CallActive,
|
82
|
+
av_CallHold,
|
83
|
+
av_CallHanged_up
|
84
|
+
} ToxAvCallState;
|
85
|
+
|
86
|
+
/**
|
87
|
+
* @brief Error indicators.
|
88
|
+
*/
|
89
|
+
typedef enum {
|
90
|
+
ErrorNone = 0,
|
91
|
+
ErrorInternal = -1, /* Internal error */
|
92
|
+
ErrorAlreadyInCall = -2, /* Already has an active call */
|
93
|
+
ErrorNoCall = -3, /* Trying to perform call action while not in a call */
|
94
|
+
ErrorInvalidState = -4, /* Trying to perform call action while in invalid state*/
|
95
|
+
ErrorNoRtpSession = -5, /* Trying to perform rtp action on invalid session */
|
96
|
+
ErrorAudioPacketLost = -6, /* Indicating packet loss */
|
97
|
+
ErrorStartingAudioRtp = -7, /* Error in toxav_prepare_transmission() */
|
98
|
+
ErrorStartingVideoRtp = -8 , /* Error in toxav_prepare_transmission() */
|
99
|
+
ErrorTerminatingAudioRtp = -9, /* Returned in toxav_kill_transmission() */
|
100
|
+
ErrorTerminatingVideoRtp = -10, /* Returned in toxav_kill_transmission() */
|
101
|
+
ErrorPacketTooLarge = -11, /* Buffer exceeds size while encoding */
|
102
|
+
ErrorInvalidCodecState = -12, /* Codec state not initialized */
|
103
|
+
|
104
|
+
} ToxAvError;
|
105
|
+
|
106
|
+
|
107
|
+
/**
|
108
|
+
* @brief Locally supported capabilities.
|
109
|
+
*/
|
110
|
+
typedef enum {
|
111
|
+
AudioEncoding = 1 << 0,
|
112
|
+
AudioDecoding = 1 << 1,
|
113
|
+
VideoEncoding = 1 << 2,
|
114
|
+
VideoDecoding = 1 << 3
|
115
|
+
} ToxAvCapabilities;
|
116
|
+
|
117
|
+
|
118
|
+
/**
|
119
|
+
* @brief Encoding settings.
|
120
|
+
*/
|
121
|
+
typedef struct _ToxAvCodecSettings {
|
122
|
+
ToxAvCallType call_type;
|
123
|
+
|
124
|
+
uint32_t video_bitrate; /* In kbits/s */
|
125
|
+
uint16_t max_video_width; /* In px */
|
126
|
+
uint16_t max_video_height; /* In px */
|
127
|
+
|
128
|
+
uint32_t audio_bitrate; /* In bits/s */
|
129
|
+
uint16_t audio_frame_duration; /* In ms */
|
130
|
+
uint32_t audio_sample_rate; /* In Hz */
|
131
|
+
uint32_t audio_channels;
|
132
|
+
} ToxAvCSettings;
|
133
|
+
|
134
|
+
extern const ToxAvCSettings av_DefaultSettings;
|
135
|
+
extern const uint32_t av_jbufdc; /* Jitter buffer default capacity */
|
136
|
+
extern const uint32_t av_VADd; /* VAD default treshold */
|
137
|
+
|
138
|
+
/**
|
139
|
+
* @brief Start new A/V session. There can only be one session at the time. If you register more
|
140
|
+
* it will result in undefined behaviour.
|
141
|
+
*
|
142
|
+
* @param messenger The messenger handle.
|
143
|
+
* @param userdata The agent handling A/V session (i.e. phone).
|
144
|
+
* @param video_width Width of video frame.
|
145
|
+
* @param video_height Height of video frame.
|
146
|
+
* @return ToxAv*
|
147
|
+
* @retval NULL On error.
|
148
|
+
*/
|
149
|
+
ToxAv *toxav_new(Tox *messenger, int32_t max_calls);
|
150
|
+
|
151
|
+
/**
|
152
|
+
* @brief Remove A/V session.
|
153
|
+
*
|
154
|
+
* @param av Handler.
|
155
|
+
* @return void
|
156
|
+
*/
|
157
|
+
void toxav_kill(ToxAv *av);
|
158
|
+
|
159
|
+
/**
|
160
|
+
* @brief Register callback for call state.
|
161
|
+
*
|
162
|
+
* @param av Handler.
|
163
|
+
* @param callback The callback
|
164
|
+
* @param id One of the ToxAvCallbackID values
|
165
|
+
* @return void
|
166
|
+
*/
|
167
|
+
void toxav_register_callstate_callback (ToxAv *av, ToxAVCallback callback, ToxAvCallbackID id, void *userdata);
|
168
|
+
|
169
|
+
/**
|
170
|
+
* @brief Register callback for receiving audio data
|
171
|
+
*
|
172
|
+
* @param av Handler.
|
173
|
+
* @param callback The callback
|
174
|
+
* @return void
|
175
|
+
*/
|
176
|
+
void toxav_register_audio_recv_callback (ToxAv *av, void (*callback)(ToxAv *, int32_t, int16_t *, int, void *),
|
177
|
+
void *user_data);
|
178
|
+
|
179
|
+
/**
|
180
|
+
* @brief Register callback for receiving video data
|
181
|
+
*
|
182
|
+
* @param av Handler.
|
183
|
+
* @param callback The callback
|
184
|
+
* @return void
|
185
|
+
*/
|
186
|
+
void toxav_register_video_recv_callback (ToxAv *av, void (*callback)(ToxAv *, int32_t, vpx_image_t *, void *),
|
187
|
+
void *user_data);
|
188
|
+
|
189
|
+
/**
|
190
|
+
* @brief Call user. Use its friend_id.
|
191
|
+
*
|
192
|
+
* @param av Handler.
|
193
|
+
* @param user The user.
|
194
|
+
* @param call_type Call type.
|
195
|
+
* @param ringing_seconds Ringing timeout.
|
196
|
+
* @return int
|
197
|
+
* @retval 0 Success.
|
198
|
+
* @retval ToxAvError On error.
|
199
|
+
*/
|
200
|
+
int toxav_call(ToxAv *av, int32_t *call_index, int user, const ToxAvCSettings *csettings, int ringing_seconds);
|
201
|
+
|
202
|
+
/**
|
203
|
+
* @brief Hangup active call.
|
204
|
+
*
|
205
|
+
* @param av Handler.
|
206
|
+
* @return int
|
207
|
+
* @retval 0 Success.
|
208
|
+
* @retval ToxAvError On error.
|
209
|
+
*/
|
210
|
+
int toxav_hangup(ToxAv *av, int32_t call_index);
|
211
|
+
|
212
|
+
/**
|
213
|
+
* @brief Answer incomming call.
|
214
|
+
*
|
215
|
+
* @param av Handler.
|
216
|
+
* @param call_type Answer with...
|
217
|
+
* @return int
|
218
|
+
* @retval 0 Success.
|
219
|
+
* @retval ToxAvError On error.
|
220
|
+
*/
|
221
|
+
int toxav_answer(ToxAv *av, int32_t call_index, const ToxAvCSettings *csettings );
|
222
|
+
|
223
|
+
/**
|
224
|
+
* @brief Reject incomming call.
|
225
|
+
*
|
226
|
+
* @param av Handler.
|
227
|
+
* @param reason Optional reason. Set NULL if none.
|
228
|
+
* @return int
|
229
|
+
* @retval 0 Success.
|
230
|
+
* @retval ToxAvError On error.
|
231
|
+
*/
|
232
|
+
int toxav_reject(ToxAv *av, int32_t call_index, const char *reason);
|
233
|
+
|
234
|
+
/**
|
235
|
+
* @brief Cancel outgoing request.
|
236
|
+
*
|
237
|
+
* @param av Handler.
|
238
|
+
* @param reason Optional reason.
|
239
|
+
* @param peer_id peer friend_id
|
240
|
+
* @return int
|
241
|
+
* @retval 0 Success.
|
242
|
+
* @retval ToxAvError On error.
|
243
|
+
*/
|
244
|
+
int toxav_cancel(ToxAv *av, int32_t call_index, int peer_id, const char *reason);
|
245
|
+
|
246
|
+
/**
|
247
|
+
* @brief Notify peer that we are changing call settings
|
248
|
+
*
|
249
|
+
* @param av Handler.
|
250
|
+
* @return int
|
251
|
+
* @retval 0 Success.
|
252
|
+
* @retval ToxAvError On error.
|
253
|
+
*/
|
254
|
+
int toxav_change_settings(ToxAv *av, int32_t call_index, const ToxAvCSettings *csettings);
|
255
|
+
|
256
|
+
/**
|
257
|
+
* @brief Terminate transmission. Note that transmission will be terminated without informing remote peer.
|
258
|
+
*
|
259
|
+
* @param av Handler.
|
260
|
+
* @return int
|
261
|
+
* @retval 0 Success.
|
262
|
+
* @retval ToxAvError On error.
|
263
|
+
*/
|
264
|
+
int toxav_stop_call(ToxAv *av, int32_t call_index);
|
265
|
+
|
266
|
+
/**
|
267
|
+
* @brief Must be call before any RTP transmission occurs.
|
268
|
+
*
|
269
|
+
* @param av Handler.
|
270
|
+
* @param support_video Is video supported ? 1 : 0
|
271
|
+
* @return int
|
272
|
+
* @retval 0 Success.
|
273
|
+
* @retval ToxAvError On error.
|
274
|
+
*/
|
275
|
+
int toxav_prepare_transmission(ToxAv *av, int32_t call_index, uint32_t jbuf_size, uint32_t VAD_treshold,
|
276
|
+
int support_video);
|
277
|
+
|
278
|
+
/**
|
279
|
+
* @brief Call this at the end of the transmission.
|
280
|
+
*
|
281
|
+
* @param av Handler.
|
282
|
+
* @return int
|
283
|
+
* @retval 0 Success.
|
284
|
+
* @retval ToxAvError On error.
|
285
|
+
*/
|
286
|
+
int toxav_kill_transmission(ToxAv *av, int32_t call_index);
|
287
|
+
|
288
|
+
/**
|
289
|
+
* @brief Encode and send video packet.
|
290
|
+
*
|
291
|
+
* @param av Handler.
|
292
|
+
* @param frame The encoded frame.
|
293
|
+
* @param frame_size The size of the encoded frame.
|
294
|
+
* @return int
|
295
|
+
* @retval 0 Success.
|
296
|
+
* @retval ToxAvError On error.
|
297
|
+
*/
|
298
|
+
int toxav_send_video ( ToxAv *av, int32_t call_index, const uint8_t *frame, unsigned int frame_size);
|
299
|
+
|
300
|
+
/**
|
301
|
+
* @brief Send audio frame.
|
302
|
+
*
|
303
|
+
* @param av Handler.
|
304
|
+
* @param data The audio data encoded with toxav_prepare_audio_frame().
|
305
|
+
* @param size Its size in number of bytes.
|
306
|
+
* @return int
|
307
|
+
* @retval 0 Success.
|
308
|
+
* @retval ToxAvError On error.
|
309
|
+
*/
|
310
|
+
int toxav_send_audio ( ToxAv *av, int32_t call_index, const uint8_t *frame, unsigned int size);
|
311
|
+
|
312
|
+
/**
|
313
|
+
* @brief Encode video frame
|
314
|
+
*
|
315
|
+
* @param av Handler
|
316
|
+
* @param dest Where to
|
317
|
+
* @param dest_max Max size
|
318
|
+
* @param input What to encode
|
319
|
+
* @return int
|
320
|
+
* @retval ToxAvError On error.
|
321
|
+
* @retval >0 On success
|
322
|
+
*/
|
323
|
+
int toxav_prepare_video_frame ( ToxAv *av, int32_t call_index, uint8_t *dest, int dest_max, vpx_image_t *input );
|
324
|
+
|
325
|
+
/**
|
326
|
+
* @brief Encode audio frame
|
327
|
+
*
|
328
|
+
* @param av Handler
|
329
|
+
* @param dest dest
|
330
|
+
* @param dest_max Max dest size
|
331
|
+
* @param frame The frame
|
332
|
+
* @param frame_size The frame size
|
333
|
+
* @return int
|
334
|
+
* @retval ToxAvError On error.
|
335
|
+
* @retval >0 On success
|
336
|
+
*/
|
337
|
+
int toxav_prepare_audio_frame ( ToxAv *av, int32_t call_index, uint8_t *dest, int dest_max, const int16_t *frame,
|
338
|
+
int frame_size);
|
339
|
+
|
340
|
+
/**
|
341
|
+
* @brief Get peer transmission type. It can either be audio or video.
|
342
|
+
*
|
343
|
+
* @param av Handler.
|
344
|
+
* @param peer The peer
|
345
|
+
* @return int
|
346
|
+
* @retval ToxAvCallType On success.
|
347
|
+
* @retval ToxAvError On error.
|
348
|
+
*/
|
349
|
+
int toxav_get_peer_csettings ( ToxAv *av, int32_t call_index, int peer, ToxAvCSettings *dest );
|
350
|
+
|
351
|
+
/**
|
352
|
+
* @brief Get id of peer participating in conversation
|
353
|
+
*
|
354
|
+
* @param av Handler
|
355
|
+
* @param peer peer index
|
356
|
+
* @return int
|
357
|
+
* @retval ToxAvError No peer id
|
358
|
+
*/
|
359
|
+
int toxav_get_peer_id ( ToxAv *av, int32_t call_index, int peer );
|
360
|
+
|
361
|
+
/**
|
362
|
+
* @brief Get current call state
|
363
|
+
*
|
364
|
+
* @param av Handler
|
365
|
+
* @param call_index What call
|
366
|
+
* @return int
|
367
|
+
* @retval ToxAvCallState State id
|
368
|
+
*/
|
369
|
+
ToxAvCallState toxav_get_call_state ( ToxAv *av, int32_t call_index );
|
370
|
+
/**
|
371
|
+
* @brief Is certain capability supported
|
372
|
+
*
|
373
|
+
* @param av Handler
|
374
|
+
* @return int
|
375
|
+
* @retval 1 Yes.
|
376
|
+
* @retval 0 No.
|
377
|
+
*/
|
378
|
+
int toxav_capability_supported ( ToxAv *av, int32_t call_index, ToxAvCapabilities capability );
|
379
|
+
|
380
|
+
|
381
|
+
Tox *toxav_get_tox(ToxAv *av);
|
382
|
+
|
383
|
+
int toxav_has_activity ( ToxAv *av, int32_t call_index, int16_t *PCM, uint16_t frame_size, float ref_energy );
|
384
|
+
|
385
|
+
#ifdef __cplusplus
|
386
|
+
}
|
387
|
+
#endif
|
388
|
+
|
389
|
+
#endif /* __TOXAV */
|
@@ -0,0 +1,2521 @@
|
|
1
|
+
/* DHT.c
|
2
|
+
*
|
3
|
+
* An implementation of the DHT as seen in http://wiki.tox.im/index.php/DHT
|
4
|
+
*
|
5
|
+
* Copyright (C) 2013 Tox project All Rights Reserved.
|
6
|
+
*
|
7
|
+
* This file is part of Tox.
|
8
|
+
*
|
9
|
+
* Tox is free software: you can redistribute it and/or modify
|
10
|
+
* it under the terms of the GNU General Public License as published by
|
11
|
+
* the Free Software Foundation, either version 3 of the License, or
|
12
|
+
* (at your option) any later version.
|
13
|
+
*
|
14
|
+
* Tox is distributed in the hope that it will be useful,
|
15
|
+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
16
|
+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
17
|
+
* GNU General Public License for more details.
|
18
|
+
*
|
19
|
+
* You should have received a copy of the GNU General Public License
|
20
|
+
* along with Tox. If not, see <http://www.gnu.org/licenses/>.
|
21
|
+
*
|
22
|
+
*/
|
23
|
+
|
24
|
+
/*----------------------------------------------------------------------------------*/
|
25
|
+
|
26
|
+
#ifdef HAVE_CONFIG_H
|
27
|
+
#include "config.h"
|
28
|
+
#endif
|
29
|
+
|
30
|
+
#ifdef DEBUG
|
31
|
+
#include <assert.h>
|
32
|
+
#endif
|
33
|
+
|
34
|
+
#include "logger.h"
|
35
|
+
|
36
|
+
#include "DHT.h"
|
37
|
+
|
38
|
+
#ifdef ENABLE_ASSOC_DHT
|
39
|
+
#include "assoc.h"
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#include "ping.h"
|
43
|
+
|
44
|
+
#include "network.h"
|
45
|
+
#include "LAN_discovery.h"
|
46
|
+
#include "misc_tools.h"
|
47
|
+
#include "util.h"
|
48
|
+
|
49
|
+
/* The timeout after which a node is discarded completely. */
|
50
|
+
#define KILL_NODE_TIMEOUT 300
|
51
|
+
|
52
|
+
/* Ping interval in seconds for each random sending of a get nodes request. */
|
53
|
+
#define GET_NODE_INTERVAL 20
|
54
|
+
|
55
|
+
#define MAX_PUNCHING_PORTS 48
|
56
|
+
|
57
|
+
/* Interval in seconds between punching attempts*/
|
58
|
+
#define PUNCH_INTERVAL 3
|
59
|
+
|
60
|
+
#define MAX_NORMAL_PUNCHING_TRIES 5
|
61
|
+
|
62
|
+
#define NAT_PING_REQUEST 0
|
63
|
+
#define NAT_PING_RESPONSE 1
|
64
|
+
|
65
|
+
/* Number of get node requests to send to quickly find close nodes. */
|
66
|
+
#define MAX_BOOTSTRAP_TIMES 10
|
67
|
+
|
68
|
+
/* Compares client_id1 and client_id2 with client_id.
|
69
|
+
*
|
70
|
+
* return 0 if both are same distance.
|
71
|
+
* return 1 if client_id1 is closer.
|
72
|
+
* return 2 if client_id2 is closer.
|
73
|
+
*/
|
74
|
+
int id_closest(const uint8_t *id, const uint8_t *id1, const uint8_t *id2)
|
75
|
+
{
|
76
|
+
size_t i;
|
77
|
+
uint8_t distance1, distance2;
|
78
|
+
|
79
|
+
for (i = 0; i < CLIENT_ID_SIZE; ++i) {
|
80
|
+
|
81
|
+
distance1 = abs(((int8_t *)id)[i] ^ ((int8_t *)id1)[i]);
|
82
|
+
distance2 = abs(((int8_t *)id)[i] ^ ((int8_t *)id2)[i]);
|
83
|
+
|
84
|
+
if (distance1 < distance2)
|
85
|
+
return 1;
|
86
|
+
|
87
|
+
if (distance1 > distance2)
|
88
|
+
return 2;
|
89
|
+
}
|
90
|
+
|
91
|
+
return 0;
|
92
|
+
}
|
93
|
+
|
94
|
+
/* Shared key generations are costly, it is therefor smart to store commonly used
|
95
|
+
* ones so that they can re used later without being computed again.
|
96
|
+
*
|
97
|
+
* If shared key is already in shared_keys, copy it to shared_key.
|
98
|
+
* else generate it into shared_key and copy it to shared_keys
|
99
|
+
*/
|
100
|
+
void get_shared_key(Shared_Keys *shared_keys, uint8_t *shared_key, const uint8_t *secret_key, const uint8_t *client_id)
|
101
|
+
{
|
102
|
+
uint32_t i, num = ~0, curr = 0;
|
103
|
+
|
104
|
+
for (i = 0; i < MAX_KEYS_PER_SLOT; ++i) {
|
105
|
+
int index = client_id[30] * MAX_KEYS_PER_SLOT + i;
|
106
|
+
|
107
|
+
if (shared_keys->keys[index].stored) {
|
108
|
+
if (memcmp(client_id, shared_keys->keys[index].client_id, CLIENT_ID_SIZE) == 0) {
|
109
|
+
memcpy(shared_key, shared_keys->keys[index].shared_key, crypto_box_BEFORENMBYTES);
|
110
|
+
++shared_keys->keys[index].times_requested;
|
111
|
+
shared_keys->keys[index].time_last_requested = unix_time();
|
112
|
+
return;
|
113
|
+
}
|
114
|
+
|
115
|
+
if (num != 0) {
|
116
|
+
if (is_timeout(shared_keys->keys[index].time_last_requested, KEYS_TIMEOUT)) {
|
117
|
+
num = 0;
|
118
|
+
curr = index;
|
119
|
+
} else if (num > shared_keys->keys[index].times_requested) {
|
120
|
+
num = shared_keys->keys[index].times_requested;
|
121
|
+
curr = index;
|
122
|
+
}
|
123
|
+
}
|
124
|
+
} else {
|
125
|
+
if (num != 0) {
|
126
|
+
num = 0;
|
127
|
+
curr = index;
|
128
|
+
}
|
129
|
+
}
|
130
|
+
}
|
131
|
+
|
132
|
+
encrypt_precompute(client_id, secret_key, shared_key);
|
133
|
+
|
134
|
+
if (num != (uint32_t)~0) {
|
135
|
+
shared_keys->keys[curr].stored = 1;
|
136
|
+
shared_keys->keys[curr].times_requested = 1;
|
137
|
+
memcpy(shared_keys->keys[curr].client_id, client_id, CLIENT_ID_SIZE);
|
138
|
+
memcpy(shared_keys->keys[curr].shared_key, shared_key, crypto_box_BEFORENMBYTES);
|
139
|
+
shared_keys->keys[curr].time_last_requested = unix_time();
|
140
|
+
}
|
141
|
+
}
|
142
|
+
|
143
|
+
/* Copy shared_key to encrypt/decrypt DHT packet from client_id into shared_key
|
144
|
+
* for packets that we receive.
|
145
|
+
*/
|
146
|
+
void DHT_get_shared_key_recv(DHT *dht, uint8_t *shared_key, const uint8_t *client_id)
|
147
|
+
{
|
148
|
+
return get_shared_key(&dht->shared_keys_recv, shared_key, dht->self_secret_key, client_id);
|
149
|
+
}
|
150
|
+
|
151
|
+
/* Copy shared_key to encrypt/decrypt DHT packet from client_id into shared_key
|
152
|
+
* for packets that we send.
|
153
|
+
*/
|
154
|
+
void DHT_get_shared_key_sent(DHT *dht, uint8_t *shared_key, const uint8_t *client_id)
|
155
|
+
{
|
156
|
+
return get_shared_key(&dht->shared_keys_sent, shared_key, dht->self_secret_key, client_id);
|
157
|
+
}
|
158
|
+
|
159
|
+
void to_net_family(IP *ip)
|
160
|
+
{
|
161
|
+
if (ip->family == AF_INET)
|
162
|
+
ip->family = TOX_AF_INET;
|
163
|
+
else if (ip->family == AF_INET6)
|
164
|
+
ip->family = TOX_AF_INET6;
|
165
|
+
}
|
166
|
+
|
167
|
+
void to_host_family(IP *ip)
|
168
|
+
{
|
169
|
+
if (ip->family == TOX_AF_INET)
|
170
|
+
ip->family = AF_INET;
|
171
|
+
else if (ip->family == TOX_AF_INET6)
|
172
|
+
ip->family = AF_INET6;
|
173
|
+
}
|
174
|
+
|
175
|
+
/* Pack number of nodes into data of maxlength length.
|
176
|
+
*
|
177
|
+
* return length of packed nodes on success.
|
178
|
+
* return -1 on failure.
|
179
|
+
*/
|
180
|
+
int pack_nodes(uint8_t *data, uint16_t length, const Node_format *nodes, uint16_t number)
|
181
|
+
{
|
182
|
+
uint32_t i, packed_length = 0;
|
183
|
+
|
184
|
+
for (i = 0; i < number; ++i) {
|
185
|
+
int ipv6 = -1;
|
186
|
+
uint8_t net_family;
|
187
|
+
|
188
|
+
if (nodes[i].ip_port.ip.family == AF_INET) {
|
189
|
+
ipv6 = 0;
|
190
|
+
net_family = TOX_AF_INET;
|
191
|
+
} else if (nodes[i].ip_port.ip.family == TCP_INET) {
|
192
|
+
ipv6 = 0;
|
193
|
+
net_family = TOX_TCP_INET;
|
194
|
+
} else if (nodes[i].ip_port.ip.family == AF_INET6) {
|
195
|
+
ipv6 = 1;
|
196
|
+
net_family = TOX_AF_INET6;
|
197
|
+
} else if (nodes[i].ip_port.ip.family == TCP_INET6) {
|
198
|
+
ipv6 = 1;
|
199
|
+
net_family = TOX_TCP_INET6;
|
200
|
+
} else {
|
201
|
+
return -1;
|
202
|
+
}
|
203
|
+
|
204
|
+
if (ipv6 == 0) {
|
205
|
+
uint32_t size = 1 + sizeof(IP4) + sizeof(uint16_t) + CLIENT_ID_SIZE;
|
206
|
+
|
207
|
+
if (packed_length + size > length)
|
208
|
+
return -1;
|
209
|
+
|
210
|
+
data[packed_length] = net_family;
|
211
|
+
memcpy(data + packed_length + 1, &nodes[i].ip_port.ip.ip4, sizeof(IP4));
|
212
|
+
memcpy(data + packed_length + 1 + sizeof(IP4), &nodes[i].ip_port.port, sizeof(uint16_t));
|
213
|
+
memcpy(data + packed_length + 1 + sizeof(IP4) + sizeof(uint16_t), nodes[i].client_id, CLIENT_ID_SIZE);
|
214
|
+
packed_length += size;
|
215
|
+
} else if (ipv6 == 1) {
|
216
|
+
uint32_t size = 1 + sizeof(IP6) + sizeof(uint16_t) + CLIENT_ID_SIZE;
|
217
|
+
|
218
|
+
if (packed_length + size > length)
|
219
|
+
return -1;
|
220
|
+
|
221
|
+
data[packed_length] = net_family;
|
222
|
+
memcpy(data + packed_length + 1, &nodes[i].ip_port.ip.ip6, sizeof(IP6));
|
223
|
+
memcpy(data + packed_length + 1 + sizeof(IP6), &nodes[i].ip_port.port, sizeof(uint16_t));
|
224
|
+
memcpy(data + packed_length + 1 + sizeof(IP6) + sizeof(uint16_t), nodes[i].client_id, CLIENT_ID_SIZE);
|
225
|
+
packed_length += size;
|
226
|
+
} else {
|
227
|
+
return -1;
|
228
|
+
}
|
229
|
+
}
|
230
|
+
|
231
|
+
return packed_length;
|
232
|
+
}
|
233
|
+
|
234
|
+
/* Unpack data of length into nodes of size max_num_nodes.
|
235
|
+
* Put the length of the data processed in processed_data_len.
|
236
|
+
* tcp_enabled sets if TCP nodes are expected (true) or not (false).
|
237
|
+
*
|
238
|
+
* return number of unpacked nodes on success.
|
239
|
+
* return -1 on failure.
|
240
|
+
*/
|
241
|
+
int unpack_nodes(Node_format *nodes, uint16_t max_num_nodes, uint16_t *processed_data_len, const uint8_t *data,
|
242
|
+
uint16_t length, uint8_t tcp_enabled)
|
243
|
+
{
|
244
|
+
uint32_t num = 0, len_processed = 0;
|
245
|
+
|
246
|
+
while (num < max_num_nodes && len_processed < length) {
|
247
|
+
int ipv6 = -1;
|
248
|
+
uint8_t host_family;
|
249
|
+
|
250
|
+
if (data[len_processed] == TOX_AF_INET) {
|
251
|
+
ipv6 = 0;
|
252
|
+
host_family = AF_INET;
|
253
|
+
} else if (data[len_processed] == TOX_TCP_INET) {
|
254
|
+
if (!tcp_enabled)
|
255
|
+
return -1;
|
256
|
+
|
257
|
+
ipv6 = 0;
|
258
|
+
host_family = TCP_INET;
|
259
|
+
} else if (data[len_processed] == TOX_AF_INET6) {
|
260
|
+
ipv6 = 1;
|
261
|
+
host_family = AF_INET6;
|
262
|
+
} else if (data[len_processed] == TOX_TCP_INET6) {
|
263
|
+
if (!tcp_enabled)
|
264
|
+
return -1;
|
265
|
+
|
266
|
+
ipv6 = 1;
|
267
|
+
host_family = TCP_INET6;
|
268
|
+
} else {
|
269
|
+
return -1;
|
270
|
+
}
|
271
|
+
|
272
|
+
if (ipv6 == 0) {
|
273
|
+
uint32_t size = 1 + sizeof(IP4) + sizeof(uint16_t) + CLIENT_ID_SIZE;
|
274
|
+
|
275
|
+
if (len_processed + size > length)
|
276
|
+
return -1;
|
277
|
+
|
278
|
+
nodes[num].ip_port.ip.family = host_family;
|
279
|
+
memcpy(&nodes[num].ip_port.ip.ip4, data + len_processed + 1, sizeof(IP4));
|
280
|
+
memcpy(&nodes[num].ip_port.port, data + len_processed + 1 + sizeof(IP4), sizeof(uint16_t));
|
281
|
+
memcpy(nodes[num].client_id, data + len_processed + 1 + sizeof(IP4) + sizeof(uint16_t), CLIENT_ID_SIZE);
|
282
|
+
len_processed += size;
|
283
|
+
++num;
|
284
|
+
} else if (ipv6 == 1) {
|
285
|
+
uint32_t size = 1 + sizeof(IP6) + sizeof(uint16_t) + CLIENT_ID_SIZE;
|
286
|
+
|
287
|
+
if (len_processed + size > length)
|
288
|
+
return -1;
|
289
|
+
|
290
|
+
nodes[num].ip_port.ip.family = host_family;
|
291
|
+
memcpy(&nodes[num].ip_port.ip.ip6, data + len_processed + 1, sizeof(IP6));
|
292
|
+
memcpy(&nodes[num].ip_port.port, data + len_processed + 1 + sizeof(IP6), sizeof(uint16_t));
|
293
|
+
memcpy(nodes[num].client_id, data + len_processed + 1 + sizeof(IP6) + sizeof(uint16_t), CLIENT_ID_SIZE);
|
294
|
+
len_processed += size;
|
295
|
+
++num;
|
296
|
+
} else {
|
297
|
+
return -1;
|
298
|
+
}
|
299
|
+
}
|
300
|
+
|
301
|
+
if (processed_data_len)
|
302
|
+
*processed_data_len = len_processed;
|
303
|
+
|
304
|
+
return num;
|
305
|
+
}
|
306
|
+
|
307
|
+
|
308
|
+
|
309
|
+
/* Check if client with client_id is already in list of length length.
|
310
|
+
* If it is then set its corresponding timestamp to current time.
|
311
|
+
* If the id is already in the list with a different ip_port, update it.
|
312
|
+
* TODO: Maybe optimize this.
|
313
|
+
*
|
314
|
+
* return True(1) or False(0)
|
315
|
+
*/
|
316
|
+
static int client_or_ip_port_in_list(Client_data *list, uint32_t length, const uint8_t *client_id, IP_Port ip_port)
|
317
|
+
{
|
318
|
+
uint32_t i;
|
319
|
+
uint64_t temp_time = unix_time();
|
320
|
+
|
321
|
+
/* if client_id is in list, find it and maybe overwrite ip_port */
|
322
|
+
for (i = 0; i < length; ++i)
|
323
|
+
if (id_equal(list[i].client_id, client_id)) {
|
324
|
+
/* Refresh the client timestamp. */
|
325
|
+
if (ip_port.ip.family == AF_INET) {
|
326
|
+
|
327
|
+
LOGGER_SCOPE( if (!ipport_equal(&list[i].assoc4.ip_port, &ip_port)) {
|
328
|
+
LOGGER_INFO("coipil[%u]: switching ipv4 from %s:%u to %s:%u", i,
|
329
|
+
ip_ntoa(&list[i].assoc4.ip_port.ip), ntohs(list[i].assoc4.ip_port.port),
|
330
|
+
ip_ntoa(&ip_port.ip), ntohs(ip_port.port));
|
331
|
+
}
|
332
|
+
);
|
333
|
+
|
334
|
+
if (LAN_ip(list[i].assoc4.ip_port.ip) != 0 && LAN_ip(ip_port.ip) == 0)
|
335
|
+
return 1;
|
336
|
+
|
337
|
+
list[i].assoc4.ip_port = ip_port;
|
338
|
+
list[i].assoc4.timestamp = temp_time;
|
339
|
+
} else if (ip_port.ip.family == AF_INET6) {
|
340
|
+
|
341
|
+
LOGGER_SCOPE( if (!ipport_equal(&list[i].assoc4.ip_port, &ip_port)) {
|
342
|
+
LOGGER_INFO("coipil[%u]: switching ipv6 from %s:%u to %s:%u", i,
|
343
|
+
ip_ntoa(&list[i].assoc6.ip_port.ip), ntohs(list[i].assoc6.ip_port.port),
|
344
|
+
ip_ntoa(&ip_port.ip), ntohs(ip_port.port));
|
345
|
+
}
|
346
|
+
);
|
347
|
+
|
348
|
+
if (LAN_ip(list[i].assoc6.ip_port.ip) != 0 && LAN_ip(ip_port.ip) == 0)
|
349
|
+
return 1;
|
350
|
+
|
351
|
+
list[i].assoc6.ip_port = ip_port;
|
352
|
+
list[i].assoc6.timestamp = temp_time;
|
353
|
+
}
|
354
|
+
|
355
|
+
return 1;
|
356
|
+
}
|
357
|
+
|
358
|
+
/* client_id not in list yet: see if we can find an identical ip_port, in
|
359
|
+
* that case we kill the old client_id by overwriting it with the new one
|
360
|
+
* TODO: maybe we SHOULDN'T do that if that client_id is in a friend_list
|
361
|
+
* and the one who is the actual friend's client_id/address set? */
|
362
|
+
for (i = 0; i < length; ++i) {
|
363
|
+
/* MAYBE: check the other address, if valid, don't nuke? */
|
364
|
+
if ((ip_port.ip.family == AF_INET) && ipport_equal(&list[i].assoc4.ip_port, &ip_port)) {
|
365
|
+
/* Initialize client timestamp. */
|
366
|
+
list[i].assoc4.timestamp = temp_time;
|
367
|
+
memcpy(list[i].client_id, client_id, CLIENT_ID_SIZE);
|
368
|
+
|
369
|
+
LOGGER_DEBUG("coipil[%u]: switching client_id (ipv4)", i);
|
370
|
+
|
371
|
+
/* kill the other address, if it was set */
|
372
|
+
memset(&list[i].assoc6, 0, sizeof(list[i].assoc6));
|
373
|
+
return 1;
|
374
|
+
} else if ((ip_port.ip.family == AF_INET6) && ipport_equal(&list[i].assoc6.ip_port, &ip_port)) {
|
375
|
+
/* Initialize client timestamp. */
|
376
|
+
list[i].assoc6.timestamp = temp_time;
|
377
|
+
memcpy(list[i].client_id, client_id, CLIENT_ID_SIZE);
|
378
|
+
|
379
|
+
LOGGER_DEBUG("coipil[%u]: switching client_id (ipv6)", i);
|
380
|
+
|
381
|
+
/* kill the other address, if it was set */
|
382
|
+
memset(&list[i].assoc4, 0, sizeof(list[i].assoc4));
|
383
|
+
return 1;
|
384
|
+
}
|
385
|
+
}
|
386
|
+
|
387
|
+
return 0;
|
388
|
+
}
|
389
|
+
|
390
|
+
/* Check if client with client_id is already in node format list of length length.
|
391
|
+
*
|
392
|
+
* return 1 if true.
|
393
|
+
* return 0 if false.
|
394
|
+
*/
|
395
|
+
static int client_in_nodelist(const Node_format *list, uint32_t length, const uint8_t *client_id)
|
396
|
+
{
|
397
|
+
uint32_t i;
|
398
|
+
|
399
|
+
for (i = 0; i < length; ++i) {
|
400
|
+
if (id_equal(list[i].client_id, client_id))
|
401
|
+
return 1;
|
402
|
+
}
|
403
|
+
|
404
|
+
return 0;
|
405
|
+
}
|
406
|
+
|
407
|
+
/* return friend number from the client_id.
|
408
|
+
* return -1 if a failure occurs.
|
409
|
+
*/
|
410
|
+
static int friend_number(const DHT *dht, const uint8_t *client_id)
|
411
|
+
{
|
412
|
+
uint32_t i;
|
413
|
+
|
414
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
415
|
+
if (id_equal(dht->friends_list[i].client_id, client_id))
|
416
|
+
return i;
|
417
|
+
}
|
418
|
+
|
419
|
+
return -1;
|
420
|
+
}
|
421
|
+
|
422
|
+
/*TODO: change this to 7 when done*/
|
423
|
+
#define HARDENING_ALL_OK 2
|
424
|
+
/* return 0 if not.
|
425
|
+
* return 1 if route request are ok
|
426
|
+
* return 2 if it responds to send node packets correctly
|
427
|
+
* return 4 if it can test other nodes correctly
|
428
|
+
* return HARDENING_ALL_OK if all ok.
|
429
|
+
*/
|
430
|
+
static uint8_t hardening_correct(const Hardening *h)
|
431
|
+
{
|
432
|
+
return h->routes_requests_ok + (h->send_nodes_ok << 1) + (h->testing_requests << 2);
|
433
|
+
}
|
434
|
+
/*
|
435
|
+
* helper for get_close_nodes(). argument list is a monster :D
|
436
|
+
*/
|
437
|
+
static void get_close_nodes_inner(const uint8_t *client_id, Node_format *nodes_list,
|
438
|
+
sa_family_t sa_family, const Client_data *client_list, uint32_t client_list_length,
|
439
|
+
uint32_t *num_nodes_ptr, uint8_t is_LAN, uint8_t want_good)
|
440
|
+
{
|
441
|
+
if ((sa_family != AF_INET) && (sa_family != AF_INET6) && (sa_family != 0))
|
442
|
+
return;
|
443
|
+
|
444
|
+
uint32_t num_nodes = *num_nodes_ptr;
|
445
|
+
int j, closest;
|
446
|
+
uint32_t i;
|
447
|
+
|
448
|
+
for (i = 0; i < client_list_length; i++) {
|
449
|
+
const Client_data *client = &client_list[i];
|
450
|
+
|
451
|
+
/* node already in list? */
|
452
|
+
if (client_in_nodelist(nodes_list, MAX_SENT_NODES, client->client_id))
|
453
|
+
continue;
|
454
|
+
|
455
|
+
const IPPTsPng *ipptp = NULL;
|
456
|
+
|
457
|
+
if (sa_family == AF_INET) {
|
458
|
+
ipptp = &client->assoc4;
|
459
|
+
} else if (sa_family == AF_INET6) {
|
460
|
+
ipptp = &client->assoc6;
|
461
|
+
} else {
|
462
|
+
if (client->assoc4.timestamp >= client->assoc6.timestamp) {
|
463
|
+
ipptp = &client->assoc4;
|
464
|
+
} else {
|
465
|
+
ipptp = &client->assoc6;
|
466
|
+
}
|
467
|
+
}
|
468
|
+
|
469
|
+
/* node not in a good condition? */
|
470
|
+
if (is_timeout(ipptp->timestamp, BAD_NODE_TIMEOUT))
|
471
|
+
continue;
|
472
|
+
|
473
|
+
/* don't send LAN ips to non LAN peers */
|
474
|
+
if (LAN_ip(ipptp->ip_port.ip) == 0 && !is_LAN)
|
475
|
+
continue;
|
476
|
+
|
477
|
+
if (LAN_ip(ipptp->ip_port.ip) != 0 && want_good && hardening_correct(&ipptp->hardening) != HARDENING_ALL_OK
|
478
|
+
&& !id_equal(client_id, client->client_id))
|
479
|
+
continue;
|
480
|
+
|
481
|
+
if (num_nodes < MAX_SENT_NODES) {
|
482
|
+
memcpy(nodes_list[num_nodes].client_id,
|
483
|
+
client->client_id,
|
484
|
+
CLIENT_ID_SIZE );
|
485
|
+
|
486
|
+
nodes_list[num_nodes].ip_port = ipptp->ip_port;
|
487
|
+
num_nodes++;
|
488
|
+
} else {
|
489
|
+
/* see if node_list contains a client_id that's "further away"
|
490
|
+
* compared to the one we're looking at at the moment, if there
|
491
|
+
* is, replace it
|
492
|
+
*/
|
493
|
+
for (j = 0; j < MAX_SENT_NODES; ++j) {
|
494
|
+
closest = id_closest( client_id,
|
495
|
+
nodes_list[j].client_id,
|
496
|
+
client->client_id );
|
497
|
+
|
498
|
+
/* second client_id is closer than current: change to it */
|
499
|
+
if (closest == 2) {
|
500
|
+
memcpy( nodes_list[j].client_id,
|
501
|
+
client->client_id,
|
502
|
+
CLIENT_ID_SIZE);
|
503
|
+
|
504
|
+
nodes_list[j].ip_port = ipptp->ip_port;
|
505
|
+
break;
|
506
|
+
}
|
507
|
+
}
|
508
|
+
}
|
509
|
+
}
|
510
|
+
|
511
|
+
*num_nodes_ptr = num_nodes;
|
512
|
+
}
|
513
|
+
|
514
|
+
/* Find MAX_SENT_NODES nodes closest to the client_id for the send nodes request:
|
515
|
+
* put them in the nodes_list and return how many were found.
|
516
|
+
*
|
517
|
+
* TODO: For the love of based <your favorite deity, in doubt use "love"> make
|
518
|
+
* this function cleaner and much more efficient.
|
519
|
+
*
|
520
|
+
* want_good : do we want only good nodes as checked with the hardening returned or not?
|
521
|
+
*/
|
522
|
+
static int get_somewhat_close_nodes(const DHT *dht, const uint8_t *client_id, Node_format *nodes_list,
|
523
|
+
sa_family_t sa_family, uint8_t is_LAN, uint8_t want_good)
|
524
|
+
{
|
525
|
+
uint32_t num_nodes = 0, i;
|
526
|
+
get_close_nodes_inner(client_id, nodes_list, sa_family,
|
527
|
+
dht->close_clientlist, LCLIENT_LIST, &num_nodes, is_LAN, want_good);
|
528
|
+
|
529
|
+
/*TODO uncomment this when hardening is added to close friend clients
|
530
|
+
for (i = 0; i < dht->num_friends; ++i)
|
531
|
+
get_close_nodes_inner(dht, client_id, nodes_list, sa_family,
|
532
|
+
dht->friends_list[i].client_list, MAX_FRIEND_CLIENTS,
|
533
|
+
&num_nodes, is_LAN, want_good);
|
534
|
+
*/
|
535
|
+
for (i = 0; i < dht->num_friends; ++i)
|
536
|
+
get_close_nodes_inner(client_id, nodes_list, sa_family,
|
537
|
+
dht->friends_list[i].client_list, MAX_FRIEND_CLIENTS,
|
538
|
+
&num_nodes, is_LAN, 0);
|
539
|
+
|
540
|
+
return num_nodes;
|
541
|
+
}
|
542
|
+
|
543
|
+
int get_close_nodes(const DHT *dht, const uint8_t *client_id, Node_format *nodes_list, sa_family_t sa_family,
|
544
|
+
uint8_t is_LAN, uint8_t want_good)
|
545
|
+
{
|
546
|
+
memset(nodes_list, 0, MAX_SENT_NODES * sizeof(Node_format));
|
547
|
+
#ifdef ENABLE_ASSOC_DHT
|
548
|
+
|
549
|
+
if (!dht->assoc)
|
550
|
+
#endif
|
551
|
+
return get_somewhat_close_nodes(dht, client_id, nodes_list, sa_family, is_LAN, want_good);
|
552
|
+
|
553
|
+
#ifdef ENABLE_ASSOC_DHT
|
554
|
+
//TODO: assoc, sa_family 0 (don't care if ipv4 or ipv6) support.
|
555
|
+
Client_data *result[MAX_SENT_NODES];
|
556
|
+
|
557
|
+
Assoc_close_entries request;
|
558
|
+
memset(&request, 0, sizeof(request));
|
559
|
+
request.count = MAX_SENT_NODES;
|
560
|
+
request.count_good = MAX_SENT_NODES - 2; /* allow 2 'indirect' nodes */
|
561
|
+
request.result = result;
|
562
|
+
request.wanted_id = client_id;
|
563
|
+
request.flags = (is_LAN ? LANOk : 0) + (sa_family == AF_INET ? ProtoIPv4 : ProtoIPv6);
|
564
|
+
|
565
|
+
uint8_t num_found = Assoc_get_close_entries(dht->assoc, &request);
|
566
|
+
|
567
|
+
if (!num_found) {
|
568
|
+
LOGGER_DEBUG("get_close_nodes(): Assoc_get_close_entries() returned zero nodes");
|
569
|
+
return get_somewhat_close_nodes(dht, client_id, nodes_list, sa_family, is_LAN, want_good);
|
570
|
+
}
|
571
|
+
|
572
|
+
LOGGER_DEBUG("get_close_nodes(): Assoc_get_close_entries() returned %i 'direct' and %i 'indirect' nodes",
|
573
|
+
request.count_good, num_found - request.count_good);
|
574
|
+
|
575
|
+
uint8_t i, num_returned = 0;
|
576
|
+
|
577
|
+
for (i = 0; i < num_found; i++) {
|
578
|
+
Client_data *client = result[i];
|
579
|
+
|
580
|
+
if (client) {
|
581
|
+
id_copy(nodes_list[num_returned].client_id, client->client_id);
|
582
|
+
|
583
|
+
if (sa_family == AF_INET)
|
584
|
+
if (ipport_isset(&client->assoc4.ip_port)) {
|
585
|
+
nodes_list[num_returned].ip_port = client->assoc4.ip_port;
|
586
|
+
num_returned++;
|
587
|
+
continue;
|
588
|
+
}
|
589
|
+
|
590
|
+
if (sa_family == AF_INET6)
|
591
|
+
if (ipport_isset(&client->assoc6.ip_port)) {
|
592
|
+
nodes_list[num_returned].ip_port = client->assoc6.ip_port;
|
593
|
+
num_returned++;
|
594
|
+
continue;
|
595
|
+
}
|
596
|
+
}
|
597
|
+
}
|
598
|
+
|
599
|
+
return num_returned;
|
600
|
+
#endif
|
601
|
+
}
|
602
|
+
|
603
|
+
/* Replace a first bad (or empty) node with this one
|
604
|
+
* or replace a possibly bad node (tests failed or not done yet)
|
605
|
+
* that is further than any other in the list
|
606
|
+
* from the comp_client_id
|
607
|
+
* or replace a good node that is further
|
608
|
+
* than any other in the list from the comp_client_id
|
609
|
+
* and further than client_id.
|
610
|
+
*
|
611
|
+
* Do not replace any node if the list has no bad or possibly bad nodes
|
612
|
+
* and all nodes in the list are closer to comp_client_id
|
613
|
+
* than client_id.
|
614
|
+
*
|
615
|
+
* returns True(1) when the item was stored, False(0) otherwise */
|
616
|
+
static int replace_all( Client_data *list,
|
617
|
+
uint32_t length,
|
618
|
+
const uint8_t *client_id,
|
619
|
+
IP_Port ip_port,
|
620
|
+
const uint8_t *comp_client_id )
|
621
|
+
{
|
622
|
+
if ((ip_port.ip.family != AF_INET) && (ip_port.ip.family != AF_INET6))
|
623
|
+
return 1;
|
624
|
+
|
625
|
+
uint32_t i, replace = ~0, bad = ~0, possibly_bad = ~0, good = ~0;
|
626
|
+
|
627
|
+
for (i = 0; i < length; ++i) {
|
628
|
+
|
629
|
+
Client_data *client = &list[i];
|
630
|
+
|
631
|
+
if (is_timeout(client->assoc4.timestamp, BAD_NODE_TIMEOUT) &&
|
632
|
+
is_timeout(client->assoc6.timestamp, BAD_NODE_TIMEOUT)) {
|
633
|
+
// "bad" node
|
634
|
+
bad = i;
|
635
|
+
break;
|
636
|
+
} else if (hardening_correct(&client->assoc4.hardening) != HARDENING_ALL_OK &&
|
637
|
+
hardening_correct(&client->assoc6.hardening) != HARDENING_ALL_OK) {
|
638
|
+
// "possibly bad" node
|
639
|
+
if (possibly_bad == (uint32_t)~0 ||
|
640
|
+
id_closest(comp_client_id, list[possibly_bad].client_id, list[i].client_id) == 1)
|
641
|
+
possibly_bad = i;
|
642
|
+
} else {
|
643
|
+
// "good" node
|
644
|
+
if (good == (uint32_t)~0 ||
|
645
|
+
id_closest(comp_client_id, list[good].client_id, list[i].client_id) == 1)
|
646
|
+
good = i;
|
647
|
+
}
|
648
|
+
}
|
649
|
+
|
650
|
+
if (bad != (uint32_t)~0)
|
651
|
+
replace = bad;
|
652
|
+
else if (possibly_bad != (uint32_t)~0)
|
653
|
+
replace = possibly_bad;
|
654
|
+
else if (good != (uint32_t)~0 && id_closest(comp_client_id, list[good].client_id, client_id) == 2)
|
655
|
+
replace = good;
|
656
|
+
|
657
|
+
if (replace != (uint32_t)~0) {
|
658
|
+
Client_data *client = &list[replace];
|
659
|
+
IPPTsPng *ipptp_write = NULL;
|
660
|
+
IPPTsPng *ipptp_clear = NULL;
|
661
|
+
|
662
|
+
if (ip_port.ip.family == AF_INET) {
|
663
|
+
ipptp_write = &client->assoc4;
|
664
|
+
ipptp_clear = &client->assoc6;
|
665
|
+
} else {
|
666
|
+
ipptp_write = &client->assoc6;
|
667
|
+
ipptp_clear = &client->assoc4;
|
668
|
+
}
|
669
|
+
|
670
|
+
id_copy(client->client_id, client_id);
|
671
|
+
ipptp_write->ip_port = ip_port;
|
672
|
+
ipptp_write->timestamp = unix_time();
|
673
|
+
|
674
|
+
ip_reset(&ipptp_write->ret_ip_port.ip);
|
675
|
+
ipptp_write->ret_ip_port.port = 0;
|
676
|
+
ipptp_write->ret_timestamp = 0;
|
677
|
+
|
678
|
+
/* zero out other address */
|
679
|
+
memset(ipptp_clear, 0, sizeof(*ipptp_clear));
|
680
|
+
|
681
|
+
return 1;
|
682
|
+
}
|
683
|
+
|
684
|
+
return 0;
|
685
|
+
}
|
686
|
+
|
687
|
+
/* Attempt to add client with ip_port and client_id to the friends client list
|
688
|
+
* and close_clientlist.
|
689
|
+
*
|
690
|
+
* returns 1+ if the item is used in any list, 0 else
|
691
|
+
*/
|
692
|
+
int addto_lists(DHT *dht, IP_Port ip_port, const uint8_t *client_id)
|
693
|
+
{
|
694
|
+
uint32_t i, used = 0;
|
695
|
+
|
696
|
+
/* convert IPv4-in-IPv6 to IPv4 */
|
697
|
+
if ((ip_port.ip.family == AF_INET6) && IPV6_IPV4_IN_V6(ip_port.ip.ip6)) {
|
698
|
+
ip_port.ip.family = AF_INET;
|
699
|
+
ip_port.ip.ip4.uint32 = ip_port.ip.ip6.uint32[3];
|
700
|
+
}
|
701
|
+
|
702
|
+
/* NOTE: Current behavior if there are two clients with the same id is
|
703
|
+
* to replace the first ip by the second.
|
704
|
+
*/
|
705
|
+
if (!client_or_ip_port_in_list(dht->close_clientlist, LCLIENT_LIST, client_id, ip_port)) {
|
706
|
+
if (replace_all(dht->close_clientlist, LCLIENT_LIST, client_id, ip_port, dht->self_public_key))
|
707
|
+
used++;
|
708
|
+
} else
|
709
|
+
used++;
|
710
|
+
|
711
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
712
|
+
if (!client_or_ip_port_in_list(dht->friends_list[i].client_list,
|
713
|
+
MAX_FRIEND_CLIENTS, client_id, ip_port)) {
|
714
|
+
if (replace_all(dht->friends_list[i].client_list, MAX_FRIEND_CLIENTS,
|
715
|
+
client_id, ip_port, dht->friends_list[i].client_id))
|
716
|
+
used++;
|
717
|
+
} else
|
718
|
+
used++;
|
719
|
+
}
|
720
|
+
|
721
|
+
#ifdef ENABLE_ASSOC_DHT
|
722
|
+
|
723
|
+
if (dht->assoc) {
|
724
|
+
IPPTs ippts;
|
725
|
+
|
726
|
+
ippts.ip_port = ip_port;
|
727
|
+
ippts.timestamp = unix_time();
|
728
|
+
|
729
|
+
Assoc_add_entry(dht->assoc, client_id, &ippts, NULL, used ? 1 : 0);
|
730
|
+
}
|
731
|
+
|
732
|
+
#endif
|
733
|
+
return used;
|
734
|
+
}
|
735
|
+
|
736
|
+
/* If client_id is a friend or us, update ret_ip_port
|
737
|
+
* nodeclient_id is the id of the node that sent us this info.
|
738
|
+
*/
|
739
|
+
static int returnedip_ports(DHT *dht, IP_Port ip_port, const uint8_t *client_id, const uint8_t *nodeclient_id)
|
740
|
+
{
|
741
|
+
uint32_t i, j;
|
742
|
+
uint64_t temp_time = unix_time();
|
743
|
+
|
744
|
+
uint32_t used = 0;
|
745
|
+
|
746
|
+
/* convert IPv4-in-IPv6 to IPv4 */
|
747
|
+
if ((ip_port.ip.family == AF_INET6) && IPV6_IPV4_IN_V6(ip_port.ip.ip6)) {
|
748
|
+
ip_port.ip.family = AF_INET;
|
749
|
+
ip_port.ip.ip4.uint32 = ip_port.ip.ip6.uint32[3];
|
750
|
+
}
|
751
|
+
|
752
|
+
if (id_equal(client_id, dht->self_public_key)) {
|
753
|
+
for (i = 0; i < LCLIENT_LIST; ++i) {
|
754
|
+
if (id_equal(nodeclient_id, dht->close_clientlist[i].client_id)) {
|
755
|
+
if (ip_port.ip.family == AF_INET) {
|
756
|
+
dht->close_clientlist[i].assoc4.ret_ip_port = ip_port;
|
757
|
+
dht->close_clientlist[i].assoc4.ret_timestamp = temp_time;
|
758
|
+
} else if (ip_port.ip.family == AF_INET6) {
|
759
|
+
dht->close_clientlist[i].assoc6.ret_ip_port = ip_port;
|
760
|
+
dht->close_clientlist[i].assoc6.ret_timestamp = temp_time;
|
761
|
+
}
|
762
|
+
|
763
|
+
++used;
|
764
|
+
break;
|
765
|
+
}
|
766
|
+
}
|
767
|
+
} else {
|
768
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
769
|
+
if (id_equal(client_id, dht->friends_list[i].client_id)) {
|
770
|
+
for (j = 0; j < MAX_FRIEND_CLIENTS; ++j) {
|
771
|
+
if (id_equal(nodeclient_id, dht->friends_list[i].client_list[j].client_id)) {
|
772
|
+
if (ip_port.ip.family == AF_INET) {
|
773
|
+
dht->friends_list[i].client_list[j].assoc4.ret_ip_port = ip_port;
|
774
|
+
dht->friends_list[i].client_list[j].assoc4.ret_timestamp = temp_time;
|
775
|
+
} else if (ip_port.ip.family == AF_INET6) {
|
776
|
+
dht->friends_list[i].client_list[j].assoc6.ret_ip_port = ip_port;
|
777
|
+
dht->friends_list[i].client_list[j].assoc6.ret_timestamp = temp_time;
|
778
|
+
}
|
779
|
+
|
780
|
+
++used;
|
781
|
+
goto end;
|
782
|
+
}
|
783
|
+
}
|
784
|
+
}
|
785
|
+
}
|
786
|
+
}
|
787
|
+
|
788
|
+
end:
|
789
|
+
#ifdef ENABLE_ASSOC_DHT
|
790
|
+
|
791
|
+
if (dht->assoc) {
|
792
|
+
IPPTs ippts;
|
793
|
+
ippts.ip_port = ip_port;
|
794
|
+
ippts.timestamp = temp_time;
|
795
|
+
/* this is only a hear-say entry, so ret-ipp is NULL, but used is required
|
796
|
+
* to decide how valuable it is ("used" may throw an "unused" entry out) */
|
797
|
+
Assoc_add_entry(dht->assoc, client_id, &ippts, NULL, used ? 1 : 0);
|
798
|
+
}
|
799
|
+
|
800
|
+
#endif
|
801
|
+
return 0;
|
802
|
+
}
|
803
|
+
|
804
|
+
#define NODES_ENCRYPTED_MESSAGE_LENGTH (crypto_box_NONCEBYTES + sizeof(uint64_t) + sizeof(Node_format) + sizeof(Node_format) + crypto_box_MACBYTES)
|
805
|
+
|
806
|
+
/* Send a getnodes request.
|
807
|
+
sendback_node is the node that it will send back the response to (set to NULL to disable this) */
|
808
|
+
static int getnodes(DHT *dht, IP_Port ip_port, const uint8_t *public_key, const uint8_t *client_id,
|
809
|
+
const Node_format *sendback_node)
|
810
|
+
{
|
811
|
+
/* Check if packet is going to be sent to ourself. */
|
812
|
+
if (id_equal(public_key, dht->self_public_key))
|
813
|
+
return -1;
|
814
|
+
|
815
|
+
uint8_t plain_message[sizeof(Node_format) * 2] = {0};
|
816
|
+
|
817
|
+
Node_format receiver;
|
818
|
+
memcpy(receiver.client_id, public_key, CLIENT_ID_SIZE);
|
819
|
+
receiver.ip_port = ip_port;
|
820
|
+
memcpy(plain_message, &receiver, sizeof(receiver));
|
821
|
+
|
822
|
+
uint64_t ping_id = 0;
|
823
|
+
|
824
|
+
if (sendback_node != NULL) {
|
825
|
+
memcpy(plain_message + sizeof(receiver), sendback_node, sizeof(Node_format));
|
826
|
+
ping_id = ping_array_add(&dht->dht_harden_ping_array, plain_message, sizeof(plain_message));
|
827
|
+
} else {
|
828
|
+
ping_id = ping_array_add(&dht->dht_ping_array, plain_message, sizeof(receiver));
|
829
|
+
}
|
830
|
+
|
831
|
+
if (ping_id == 0)
|
832
|
+
return -1;
|
833
|
+
|
834
|
+
uint8_t plain[CLIENT_ID_SIZE + sizeof(ping_id)];
|
835
|
+
uint8_t encrypt[sizeof(plain) + crypto_box_MACBYTES];
|
836
|
+
uint8_t data[1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES + sizeof(encrypt)];
|
837
|
+
|
838
|
+
memcpy(plain, client_id, CLIENT_ID_SIZE);
|
839
|
+
memcpy(plain + CLIENT_ID_SIZE, &ping_id, sizeof(ping_id));
|
840
|
+
|
841
|
+
uint8_t shared_key[crypto_box_BEFORENMBYTES];
|
842
|
+
DHT_get_shared_key_sent(dht, shared_key, public_key);
|
843
|
+
|
844
|
+
uint8_t nonce[crypto_box_NONCEBYTES];
|
845
|
+
new_nonce(nonce);
|
846
|
+
|
847
|
+
int len = encrypt_data_symmetric( shared_key,
|
848
|
+
nonce,
|
849
|
+
plain,
|
850
|
+
sizeof(plain),
|
851
|
+
encrypt );
|
852
|
+
|
853
|
+
if (len != sizeof(encrypt))
|
854
|
+
return -1;
|
855
|
+
|
856
|
+
data[0] = NET_PACKET_GET_NODES;
|
857
|
+
memcpy(data + 1, dht->self_public_key, CLIENT_ID_SIZE);
|
858
|
+
memcpy(data + 1 + CLIENT_ID_SIZE, nonce, crypto_box_NONCEBYTES);
|
859
|
+
memcpy(data + 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES, encrypt, len);
|
860
|
+
|
861
|
+
return sendpacket(dht->net, ip_port, data, sizeof(data));
|
862
|
+
}
|
863
|
+
|
864
|
+
/* Send a send nodes response: message for IPv6 nodes */
|
865
|
+
static int sendnodes_ipv6(const DHT *dht, IP_Port ip_port, const uint8_t *public_key, const uint8_t *client_id,
|
866
|
+
const uint8_t *sendback_data, uint16_t length, const uint8_t *shared_encryption_key)
|
867
|
+
{
|
868
|
+
/* Check if packet is going to be sent to ourself. */
|
869
|
+
if (id_equal(public_key, dht->self_public_key))
|
870
|
+
return -1;
|
871
|
+
|
872
|
+
if (length > NODES_ENCRYPTED_MESSAGE_LENGTH || length == 0)
|
873
|
+
return -1;
|
874
|
+
|
875
|
+
size_t Node_format_size = sizeof(Node_format);
|
876
|
+
uint8_t data[1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES
|
877
|
+
+ Node_format_size * MAX_SENT_NODES + length + crypto_box_MACBYTES];
|
878
|
+
|
879
|
+
Node_format nodes_list[MAX_SENT_NODES];
|
880
|
+
uint32_t num_nodes = get_close_nodes(dht, client_id, nodes_list, 0, LAN_ip(ip_port.ip) == 0, 1);
|
881
|
+
|
882
|
+
if (num_nodes == 0)
|
883
|
+
return 0;
|
884
|
+
|
885
|
+
uint8_t plain[1 + Node_format_size * MAX_SENT_NODES + length];
|
886
|
+
uint8_t encrypt[sizeof(plain) + crypto_box_MACBYTES];
|
887
|
+
uint8_t nonce[crypto_box_NONCEBYTES];
|
888
|
+
new_nonce(nonce);
|
889
|
+
|
890
|
+
int nodes_length = pack_nodes(plain + 1, Node_format_size * MAX_SENT_NODES, nodes_list, num_nodes);
|
891
|
+
|
892
|
+
if (nodes_length <= 0)
|
893
|
+
return -1;
|
894
|
+
|
895
|
+
plain[0] = num_nodes;
|
896
|
+
memcpy(plain + 1 + nodes_length, sendback_data, length);
|
897
|
+
int len = encrypt_data_symmetric( shared_encryption_key,
|
898
|
+
nonce,
|
899
|
+
plain,
|
900
|
+
1 + nodes_length + length,
|
901
|
+
encrypt );
|
902
|
+
|
903
|
+
if (len != 1 + nodes_length + length + crypto_box_MACBYTES)
|
904
|
+
return -1;
|
905
|
+
|
906
|
+
data[0] = NET_PACKET_SEND_NODES_IPV6;
|
907
|
+
memcpy(data + 1, dht->self_public_key, CLIENT_ID_SIZE);
|
908
|
+
memcpy(data + 1 + CLIENT_ID_SIZE, nonce, crypto_box_NONCEBYTES);
|
909
|
+
memcpy(data + 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES, encrypt, len);
|
910
|
+
|
911
|
+
return sendpacket(dht->net, ip_port, data, 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES + len);
|
912
|
+
}
|
913
|
+
|
914
|
+
static int handle_getnodes(void *object, IP_Port source, const uint8_t *packet, uint32_t length)
|
915
|
+
{
|
916
|
+
uint32_t cmp_len = 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES + CLIENT_ID_SIZE + crypto_box_MACBYTES;
|
917
|
+
|
918
|
+
if (length <= cmp_len)
|
919
|
+
return 1;
|
920
|
+
|
921
|
+
if (length > cmp_len + NODES_ENCRYPTED_MESSAGE_LENGTH)
|
922
|
+
return 1;
|
923
|
+
|
924
|
+
uint16_t sendback_data_length = length - cmp_len;
|
925
|
+
|
926
|
+
DHT *dht = object;
|
927
|
+
|
928
|
+
/* Check if packet is from ourself. */
|
929
|
+
if (id_equal(packet + 1, dht->self_public_key))
|
930
|
+
return 1;
|
931
|
+
|
932
|
+
uint8_t plain[CLIENT_ID_SIZE + sendback_data_length];
|
933
|
+
uint8_t shared_key[crypto_box_BEFORENMBYTES];
|
934
|
+
|
935
|
+
DHT_get_shared_key_recv(dht, shared_key, packet + 1);
|
936
|
+
int len = decrypt_data_symmetric( shared_key,
|
937
|
+
packet + 1 + CLIENT_ID_SIZE,
|
938
|
+
packet + 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES,
|
939
|
+
CLIENT_ID_SIZE + sendback_data_length + crypto_box_MACBYTES,
|
940
|
+
plain );
|
941
|
+
|
942
|
+
if (len != CLIENT_ID_SIZE + sendback_data_length)
|
943
|
+
return 1;
|
944
|
+
|
945
|
+
sendnodes_ipv6(dht, source, packet + 1, plain, plain + CLIENT_ID_SIZE, sendback_data_length, shared_key);
|
946
|
+
|
947
|
+
add_to_ping(dht->ping, packet + 1, source);
|
948
|
+
|
949
|
+
return 0;
|
950
|
+
}
|
951
|
+
/* return 0 if no
|
952
|
+
return 1 if yes */
|
953
|
+
static uint8_t sent_getnode_to_node(DHT *dht, const uint8_t *client_id, IP_Port node_ip_port, uint64_t ping_id,
|
954
|
+
Node_format *sendback_node)
|
955
|
+
{
|
956
|
+
uint8_t data[sizeof(Node_format) * 2];
|
957
|
+
|
958
|
+
if (ping_array_check(data, sizeof(data), &dht->dht_ping_array, ping_id) == sizeof(Node_format)) {
|
959
|
+
memset(sendback_node, 0, sizeof(Node_format));
|
960
|
+
} else if (ping_array_check(data, sizeof(data), &dht->dht_harden_ping_array, ping_id) == sizeof(data)) {
|
961
|
+
memcpy(sendback_node, data + sizeof(Node_format), sizeof(Node_format));
|
962
|
+
} else {
|
963
|
+
return 0;
|
964
|
+
}
|
965
|
+
|
966
|
+
Node_format test;
|
967
|
+
memcpy(&test, data, sizeof(Node_format));
|
968
|
+
|
969
|
+
if (!ipport_equal(&test.ip_port, &node_ip_port) || memcmp(test.client_id, client_id, CLIENT_ID_SIZE) != 0)
|
970
|
+
return 0;
|
971
|
+
|
972
|
+
return 1;
|
973
|
+
}
|
974
|
+
|
975
|
+
/* Function is needed in following functions. */
|
976
|
+
static int send_hardening_getnode_res(const DHT *dht, const Node_format *sendto, const uint8_t *queried_client_id,
|
977
|
+
const uint8_t *nodes_data, uint16_t nodes_data_length);
|
978
|
+
|
979
|
+
static int handle_sendnodes_core(void *object, IP_Port source, const uint8_t *packet, uint32_t length,
|
980
|
+
Node_format *plain_nodes, uint16_t size_plain_nodes, uint32_t *num_nodes_out)
|
981
|
+
{
|
982
|
+
DHT *dht = object;
|
983
|
+
uint32_t cid_size = 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES + 1 + sizeof(uint64_t) + crypto_box_MACBYTES;
|
984
|
+
|
985
|
+
if (length <= cid_size) /* too short */
|
986
|
+
return 1;
|
987
|
+
|
988
|
+
uint32_t data_size = length - cid_size;
|
989
|
+
|
990
|
+
if (data_size == 0)
|
991
|
+
return 1;
|
992
|
+
|
993
|
+
if (data_size > sizeof(Node_format) * MAX_SENT_NODES) /* invalid length */
|
994
|
+
return 1;
|
995
|
+
|
996
|
+
uint8_t plain[1 + data_size + sizeof(uint64_t)];
|
997
|
+
uint8_t shared_key[crypto_box_BEFORENMBYTES];
|
998
|
+
DHT_get_shared_key_sent(dht, shared_key, packet + 1);
|
999
|
+
int len = decrypt_data_symmetric(
|
1000
|
+
shared_key,
|
1001
|
+
packet + 1 + CLIENT_ID_SIZE,
|
1002
|
+
packet + 1 + CLIENT_ID_SIZE + crypto_box_NONCEBYTES,
|
1003
|
+
1 + data_size + sizeof(uint64_t) + crypto_box_MACBYTES,
|
1004
|
+
plain);
|
1005
|
+
|
1006
|
+
if ((unsigned int)len != sizeof(plain))
|
1007
|
+
return 1;
|
1008
|
+
|
1009
|
+
if (plain[0] > size_plain_nodes || plain[0] == 0)
|
1010
|
+
return 1;
|
1011
|
+
|
1012
|
+
Node_format sendback_node;
|
1013
|
+
|
1014
|
+
uint64_t ping_id;
|
1015
|
+
memcpy(&ping_id, plain + 1 + data_size, sizeof(ping_id));
|
1016
|
+
|
1017
|
+
if (!sent_getnode_to_node(dht, packet + 1, source, ping_id, &sendback_node))
|
1018
|
+
return 1;
|
1019
|
+
|
1020
|
+
uint16_t length_nodes = 0;
|
1021
|
+
int num_nodes = unpack_nodes(plain_nodes, plain[0], &length_nodes, plain + 1, data_size, 0);
|
1022
|
+
|
1023
|
+
if (length_nodes != data_size)
|
1024
|
+
return 1;
|
1025
|
+
|
1026
|
+
if (num_nodes != plain[0])
|
1027
|
+
return 1;
|
1028
|
+
|
1029
|
+
if (num_nodes <= 0)
|
1030
|
+
return 1;
|
1031
|
+
|
1032
|
+
/* store the address the *request* was sent to */
|
1033
|
+
addto_lists(dht, source, packet + 1);
|
1034
|
+
|
1035
|
+
*num_nodes_out = num_nodes;
|
1036
|
+
|
1037
|
+
send_hardening_getnode_res(dht, &sendback_node, packet + 1, plain + 1, data_size);
|
1038
|
+
return 0;
|
1039
|
+
}
|
1040
|
+
|
1041
|
+
static int handle_sendnodes_ipv6(void *object, IP_Port source, const uint8_t *packet, uint32_t length)
|
1042
|
+
{
|
1043
|
+
DHT *dht = object;
|
1044
|
+
Node_format plain_nodes[MAX_SENT_NODES];
|
1045
|
+
uint32_t num_nodes;
|
1046
|
+
|
1047
|
+
if (handle_sendnodes_core(object, source, packet, length, plain_nodes, MAX_SENT_NODES, &num_nodes))
|
1048
|
+
return 1;
|
1049
|
+
|
1050
|
+
if (num_nodes == 0)
|
1051
|
+
return 0;
|
1052
|
+
|
1053
|
+
uint32_t i;
|
1054
|
+
|
1055
|
+
for (i = 0; i < num_nodes; i++) {
|
1056
|
+
if (ipport_isset(&plain_nodes[i].ip_port)) {
|
1057
|
+
send_ping_request(dht->ping, plain_nodes[i].ip_port, plain_nodes[i].client_id);
|
1058
|
+
returnedip_ports(dht, plain_nodes[i].ip_port, plain_nodes[i].client_id, packet + 1);
|
1059
|
+
}
|
1060
|
+
}
|
1061
|
+
|
1062
|
+
return 0;
|
1063
|
+
}
|
1064
|
+
|
1065
|
+
/*----------------------------------------------------------------------------------*/
|
1066
|
+
/*------------------------END of packet handling functions--------------------------*/
|
1067
|
+
|
1068
|
+
/*
|
1069
|
+
* Send get nodes requests with client_id to max_num peers in list of length length
|
1070
|
+
*/
|
1071
|
+
/*
|
1072
|
+
static void get_bunchnodes(DHT *dht, Client_data *list, uint16_t length, uint16_t max_num, uint8_t *client_id)
|
1073
|
+
{
|
1074
|
+
uint32_t i, num = 0;
|
1075
|
+
|
1076
|
+
for (i = 0; i < length; ++i) {
|
1077
|
+
IPPTsPng *assoc;
|
1078
|
+
uint32_t a;
|
1079
|
+
|
1080
|
+
for (a = 0, assoc = &list[i].assoc6; a < 2; a++, assoc = &list[i].assoc4)
|
1081
|
+
if (ipport_isset(&(assoc->ip_port)) &&
|
1082
|
+
!is_timeout(assoc->ret_timestamp, BAD_NODE_TIMEOUT)) {
|
1083
|
+
getnodes(dht, assoc->ip_port, list[i].client_id, client_id, NULL);
|
1084
|
+
++num;
|
1085
|
+
|
1086
|
+
if (num >= max_num)
|
1087
|
+
return;
|
1088
|
+
}
|
1089
|
+
}
|
1090
|
+
}
|
1091
|
+
*/
|
1092
|
+
int DHT_addfriend(DHT *dht, const uint8_t *client_id)
|
1093
|
+
{
|
1094
|
+
if (friend_number(dht, client_id) != -1) /* Is friend already in DHT? */
|
1095
|
+
return 1;
|
1096
|
+
|
1097
|
+
DHT_Friend *temp;
|
1098
|
+
temp = realloc(dht->friends_list, sizeof(DHT_Friend) * (dht->num_friends + 1));
|
1099
|
+
|
1100
|
+
if (temp == NULL)
|
1101
|
+
return 1;
|
1102
|
+
|
1103
|
+
dht->friends_list = temp;
|
1104
|
+
memset(&dht->friends_list[dht->num_friends], 0, sizeof(DHT_Friend));
|
1105
|
+
memcpy(dht->friends_list[dht->num_friends].client_id, client_id, CLIENT_ID_SIZE);
|
1106
|
+
|
1107
|
+
dht->friends_list[dht->num_friends].nat.NATping_id = random_64b();
|
1108
|
+
++dht->num_friends;
|
1109
|
+
#ifdef ENABLE_ASSOC_DHT
|
1110
|
+
|
1111
|
+
if (dht->assoc) {
|
1112
|
+
/* get up to MAX_FRIEND_CLIENTS connectable nodes */
|
1113
|
+
DHT_Friend *friend = &dht->friends_list[dht->num_friends - 1];
|
1114
|
+
|
1115
|
+
Assoc_close_entries close_entries;
|
1116
|
+
memset(&close_entries, 0, sizeof(close_entries));
|
1117
|
+
close_entries.wanted_id = client_id;
|
1118
|
+
close_entries.count_good = MAX_FRIEND_CLIENTS / 2;
|
1119
|
+
close_entries.count = MAX_FRIEND_CLIENTS;
|
1120
|
+
close_entries.result = calloc(MAX_FRIEND_CLIENTS, sizeof(*close_entries.result));
|
1121
|
+
|
1122
|
+
uint8_t i, found = Assoc_get_close_entries(dht->assoc, &close_entries);
|
1123
|
+
|
1124
|
+
for (i = 0; i < found; i++)
|
1125
|
+
memcpy(&friend->client_list[i], close_entries.result[i], sizeof(*close_entries.result[i]));
|
1126
|
+
|
1127
|
+
if (found) {
|
1128
|
+
/* send getnodes to the "best" entry */
|
1129
|
+
Client_data *client = &friend->client_list[0];
|
1130
|
+
|
1131
|
+
if (ipport_isset(&client->assoc4.ip_port))
|
1132
|
+
getnodes(dht, client->assoc4.ip_port, client->client_id, friend->client_id, NULL);
|
1133
|
+
|
1134
|
+
if (ipport_isset(&client->assoc6.ip_port))
|
1135
|
+
getnodes(dht, client->assoc6.ip_port, client->client_id, friend->client_id, NULL);
|
1136
|
+
}
|
1137
|
+
}
|
1138
|
+
|
1139
|
+
#endif
|
1140
|
+
/*this isn't really useful anymore.
|
1141
|
+
get_bunchnodes(dht, dht->close_clientlist, LCLIENT_LIST, MAX_FRIEND_CLIENTS, client_id);*/
|
1142
|
+
|
1143
|
+
return 0;
|
1144
|
+
}
|
1145
|
+
|
1146
|
+
int DHT_delfriend(DHT *dht, const uint8_t *client_id)
|
1147
|
+
{
|
1148
|
+
uint32_t i;
|
1149
|
+
DHT_Friend *temp;
|
1150
|
+
|
1151
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
1152
|
+
/* Equal */
|
1153
|
+
if (id_equal(dht->friends_list[i].client_id, client_id)) {
|
1154
|
+
--dht->num_friends;
|
1155
|
+
|
1156
|
+
if (dht->num_friends != i) {
|
1157
|
+
memcpy( &dht->friends_list[i],
|
1158
|
+
&dht->friends_list[dht->num_friends],
|
1159
|
+
sizeof(DHT_Friend) );
|
1160
|
+
}
|
1161
|
+
|
1162
|
+
if (dht->num_friends == 0) {
|
1163
|
+
free(dht->friends_list);
|
1164
|
+
dht->friends_list = NULL;
|
1165
|
+
return 0;
|
1166
|
+
}
|
1167
|
+
|
1168
|
+
temp = realloc(dht->friends_list, sizeof(DHT_Friend) * (dht->num_friends));
|
1169
|
+
|
1170
|
+
if (temp == NULL)
|
1171
|
+
return 1;
|
1172
|
+
|
1173
|
+
dht->friends_list = temp;
|
1174
|
+
return 0;
|
1175
|
+
}
|
1176
|
+
}
|
1177
|
+
|
1178
|
+
return 1;
|
1179
|
+
}
|
1180
|
+
|
1181
|
+
/* TODO: Optimize this. */
|
1182
|
+
int DHT_getfriendip(const DHT *dht, const uint8_t *client_id, IP_Port *ip_port)
|
1183
|
+
{
|
1184
|
+
uint32_t i, j;
|
1185
|
+
|
1186
|
+
ip_reset(&ip_port->ip);
|
1187
|
+
ip_port->port = 0;
|
1188
|
+
|
1189
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
1190
|
+
/* Equal */
|
1191
|
+
if (id_equal(dht->friends_list[i].client_id, client_id)) {
|
1192
|
+
for (j = 0; j < MAX_FRIEND_CLIENTS; ++j) {
|
1193
|
+
Client_data *client = &dht->friends_list[i].client_list[j];
|
1194
|
+
|
1195
|
+
if (id_equal(client->client_id, client_id)) {
|
1196
|
+
IPPTsPng *assoc = NULL;
|
1197
|
+
uint32_t a;
|
1198
|
+
|
1199
|
+
for (a = 0, assoc = &client->assoc6; a < 2; a++, assoc = &client->assoc4)
|
1200
|
+
if (!is_timeout(assoc->timestamp, BAD_NODE_TIMEOUT)) {
|
1201
|
+
*ip_port = assoc->ip_port;
|
1202
|
+
return 1;
|
1203
|
+
}
|
1204
|
+
}
|
1205
|
+
}
|
1206
|
+
|
1207
|
+
return 0;
|
1208
|
+
}
|
1209
|
+
}
|
1210
|
+
|
1211
|
+
return -1;
|
1212
|
+
}
|
1213
|
+
|
1214
|
+
/* returns number of nodes not in kill-timeout */
|
1215
|
+
static uint8_t do_ping_and_sendnode_requests(DHT *dht, uint64_t *lastgetnode, const uint8_t *client_id,
|
1216
|
+
Client_data *list, uint32_t list_count, uint32_t *bootstrap_times)
|
1217
|
+
{
|
1218
|
+
uint32_t i;
|
1219
|
+
uint8_t not_kill = 0;
|
1220
|
+
uint64_t temp_time = unix_time();
|
1221
|
+
|
1222
|
+
uint32_t num_nodes = 0;
|
1223
|
+
Client_data *client_list[list_count * 2];
|
1224
|
+
IPPTsPng *assoc_list[list_count * 2];
|
1225
|
+
|
1226
|
+
for (i = 0; i < list_count; i++) {
|
1227
|
+
/* If node is not dead. */
|
1228
|
+
Client_data *client = &list[i];
|
1229
|
+
IPPTsPng *assoc;
|
1230
|
+
uint32_t a;
|
1231
|
+
|
1232
|
+
for (a = 0, assoc = &client->assoc6; a < 2; a++, assoc = &client->assoc4)
|
1233
|
+
if (!is_timeout(assoc->timestamp, KILL_NODE_TIMEOUT)) {
|
1234
|
+
not_kill++;
|
1235
|
+
|
1236
|
+
if (is_timeout(assoc->last_pinged, PING_INTERVAL)) {
|
1237
|
+
send_ping_request(dht->ping, assoc->ip_port, client->client_id );
|
1238
|
+
assoc->last_pinged = temp_time;
|
1239
|
+
}
|
1240
|
+
|
1241
|
+
/* If node is good. */
|
1242
|
+
if (!is_timeout(assoc->timestamp, BAD_NODE_TIMEOUT)) {
|
1243
|
+
client_list[num_nodes] = client;
|
1244
|
+
assoc_list[num_nodes] = assoc;
|
1245
|
+
++num_nodes;
|
1246
|
+
}
|
1247
|
+
}
|
1248
|
+
}
|
1249
|
+
|
1250
|
+
if ((num_nodes != 0) && (is_timeout(*lastgetnode, GET_NODE_INTERVAL) || *bootstrap_times < MAX_BOOTSTRAP_TIMES)) {
|
1251
|
+
uint32_t rand_node = rand() % num_nodes;
|
1252
|
+
getnodes(dht, assoc_list[rand_node]->ip_port, client_list[rand_node]->client_id,
|
1253
|
+
client_id, NULL);
|
1254
|
+
*lastgetnode = temp_time;
|
1255
|
+
++*bootstrap_times;
|
1256
|
+
}
|
1257
|
+
|
1258
|
+
return not_kill;
|
1259
|
+
}
|
1260
|
+
|
1261
|
+
/* Ping each client in the "friends" list every PING_INTERVAL seconds. Send a get nodes request
|
1262
|
+
* every GET_NODE_INTERVAL seconds to a random good node for each "friend" in our "friends" list.
|
1263
|
+
*/
|
1264
|
+
static void do_DHT_friends(DHT *dht)
|
1265
|
+
{
|
1266
|
+
uint32_t i;
|
1267
|
+
|
1268
|
+
for (i = 0; i < dht->num_friends; ++i)
|
1269
|
+
do_ping_and_sendnode_requests(dht, &dht->friends_list[i].lastgetnode, dht->friends_list[i].client_id,
|
1270
|
+
dht->friends_list[i].client_list, MAX_FRIEND_CLIENTS, &dht->friends_list[i].bootstrap_times);
|
1271
|
+
}
|
1272
|
+
|
1273
|
+
/* Ping each client in the close nodes list every PING_INTERVAL seconds.
|
1274
|
+
* Send a get nodes request every GET_NODE_INTERVAL seconds to a random good node in the list.
|
1275
|
+
*/
|
1276
|
+
static void do_Close(DHT *dht)
|
1277
|
+
{
|
1278
|
+
uint8_t not_killed = do_ping_and_sendnode_requests(dht, &dht->close_lastgetnodes, dht->self_public_key,
|
1279
|
+
dht->close_clientlist, LCLIENT_LIST, &dht->close_bootstrap_times);
|
1280
|
+
|
1281
|
+
if (!not_killed) {
|
1282
|
+
/* all existing nodes are at least KILL_NODE_TIMEOUT,
|
1283
|
+
* which means we are mute, as we only send packets to
|
1284
|
+
* nodes NOT in KILL_NODE_TIMEOUT
|
1285
|
+
*
|
1286
|
+
* so: reset all nodes to be BAD_NODE_TIMEOUT, but not
|
1287
|
+
* KILL_NODE_TIMEOUT, so we at least keep trying pings */
|
1288
|
+
uint64_t badonly = unix_time() - BAD_NODE_TIMEOUT;
|
1289
|
+
size_t i, a;
|
1290
|
+
|
1291
|
+
for (i = 0; i < LCLIENT_LIST; i++) {
|
1292
|
+
Client_data *client = &dht->close_clientlist[i];
|
1293
|
+
IPPTsPng *assoc;
|
1294
|
+
|
1295
|
+
for (a = 0, assoc = &client->assoc4; a < 2; a++, assoc = &client->assoc6)
|
1296
|
+
if (assoc->timestamp)
|
1297
|
+
assoc->timestamp = badonly;
|
1298
|
+
}
|
1299
|
+
}
|
1300
|
+
}
|
1301
|
+
|
1302
|
+
void DHT_getnodes(DHT *dht, const IP_Port *from_ipp, const uint8_t *from_id, const uint8_t *which_id)
|
1303
|
+
{
|
1304
|
+
getnodes(dht, *from_ipp, from_id, which_id, NULL);
|
1305
|
+
}
|
1306
|
+
|
1307
|
+
void DHT_bootstrap(DHT *dht, IP_Port ip_port, const uint8_t *public_key)
|
1308
|
+
{
|
1309
|
+
/*#ifdef ENABLE_ASSOC_DHT
|
1310
|
+
if (dht->assoc) {
|
1311
|
+
IPPTs ippts;
|
1312
|
+
ippts.ip_port = ip_port;
|
1313
|
+
ippts.timestamp = 0;
|
1314
|
+
|
1315
|
+
Assoc_add_entry(dht->assoc, public_key, &ippts, NULL, 0);
|
1316
|
+
}
|
1317
|
+
#endif*/
|
1318
|
+
|
1319
|
+
getnodes(dht, ip_port, public_key, dht->self_public_key, NULL);
|
1320
|
+
}
|
1321
|
+
int DHT_bootstrap_from_address(DHT *dht, const char *address, uint8_t ipv6enabled,
|
1322
|
+
uint16_t port, const uint8_t *public_key)
|
1323
|
+
{
|
1324
|
+
IP_Port ip_port_v64;
|
1325
|
+
IP *ip_extra = NULL;
|
1326
|
+
IP_Port ip_port_v4;
|
1327
|
+
ip_init(&ip_port_v64.ip, ipv6enabled);
|
1328
|
+
|
1329
|
+
if (ipv6enabled) {
|
1330
|
+
/* setup for getting BOTH: an IPv6 AND an IPv4 address */
|
1331
|
+
ip_port_v64.ip.family = AF_UNSPEC;
|
1332
|
+
ip_reset(&ip_port_v4.ip);
|
1333
|
+
ip_extra = &ip_port_v4.ip;
|
1334
|
+
}
|
1335
|
+
|
1336
|
+
if (addr_resolve_or_parse_ip(address, &ip_port_v64.ip, ip_extra)) {
|
1337
|
+
ip_port_v64.port = port;
|
1338
|
+
DHT_bootstrap(dht, ip_port_v64, public_key);
|
1339
|
+
|
1340
|
+
if ((ip_extra != NULL) && ip_isset(ip_extra)) {
|
1341
|
+
ip_port_v4.port = port;
|
1342
|
+
DHT_bootstrap(dht, ip_port_v4, public_key);
|
1343
|
+
}
|
1344
|
+
|
1345
|
+
return 1;
|
1346
|
+
} else
|
1347
|
+
return 0;
|
1348
|
+
}
|
1349
|
+
|
1350
|
+
/* Send the given packet to node with client_id
|
1351
|
+
*
|
1352
|
+
* return -1 if failure.
|
1353
|
+
*/
|
1354
|
+
int route_packet(const DHT *dht, const uint8_t *client_id, const uint8_t *packet, uint32_t length)
|
1355
|
+
{
|
1356
|
+
uint32_t i;
|
1357
|
+
|
1358
|
+
for (i = 0; i < LCLIENT_LIST; ++i) {
|
1359
|
+
if (id_equal(client_id, dht->close_clientlist[i].client_id)) {
|
1360
|
+
const Client_data *client = &dht->close_clientlist[i];
|
1361
|
+
|
1362
|
+
if (ip_isset(&client->assoc6.ip_port.ip))
|
1363
|
+
return sendpacket(dht->net, client->assoc6.ip_port, packet, length);
|
1364
|
+
else if (ip_isset(&client->assoc4.ip_port.ip))
|
1365
|
+
return sendpacket(dht->net, client->assoc4.ip_port, packet, length);
|
1366
|
+
else
|
1367
|
+
break;
|
1368
|
+
}
|
1369
|
+
}
|
1370
|
+
|
1371
|
+
return -1;
|
1372
|
+
}
|
1373
|
+
|
1374
|
+
/* Puts all the different ips returned by the nodes for a friend_num into array ip_portlist.
|
1375
|
+
* ip_portlist must be at least MAX_FRIEND_CLIENTS big.
|
1376
|
+
*
|
1377
|
+
* return the number of ips returned.
|
1378
|
+
* return 0 if we are connected to friend or if no ips were found.
|
1379
|
+
* return -1 if no such friend.
|
1380
|
+
*/
|
1381
|
+
static int friend_iplist(const DHT *dht, IP_Port *ip_portlist, uint16_t friend_num)
|
1382
|
+
{
|
1383
|
+
if (friend_num >= dht->num_friends)
|
1384
|
+
return -1;
|
1385
|
+
|
1386
|
+
DHT_Friend *friend = &dht->friends_list[friend_num];
|
1387
|
+
Client_data *client;
|
1388
|
+
IP_Port ipv4s[MAX_FRIEND_CLIENTS];
|
1389
|
+
int num_ipv4s = 0;
|
1390
|
+
IP_Port ipv6s[MAX_FRIEND_CLIENTS];
|
1391
|
+
int num_ipv6s = 0;
|
1392
|
+
int i;
|
1393
|
+
|
1394
|
+
for (i = 0; i < MAX_FRIEND_CLIENTS; ++i) {
|
1395
|
+
client = &(friend->client_list[i]);
|
1396
|
+
|
1397
|
+
/* If ip is not zero and node is good. */
|
1398
|
+
if (ip_isset(&client->assoc4.ret_ip_port.ip) && !is_timeout(client->assoc4.ret_timestamp, BAD_NODE_TIMEOUT)) {
|
1399
|
+
ipv4s[num_ipv4s] = client->assoc4.ret_ip_port;
|
1400
|
+
++num_ipv4s;
|
1401
|
+
}
|
1402
|
+
|
1403
|
+
if (ip_isset(&client->assoc6.ret_ip_port.ip) && !is_timeout(client->assoc6.ret_timestamp, BAD_NODE_TIMEOUT)) {
|
1404
|
+
ipv6s[num_ipv6s] = client->assoc6.ret_ip_port;
|
1405
|
+
++num_ipv6s;
|
1406
|
+
}
|
1407
|
+
|
1408
|
+
if (id_equal(client->client_id, friend->client_id))
|
1409
|
+
if (!is_timeout(client->assoc6.timestamp, BAD_NODE_TIMEOUT) || !is_timeout(client->assoc4.timestamp, BAD_NODE_TIMEOUT))
|
1410
|
+
return 0; /* direct connectivity */
|
1411
|
+
}
|
1412
|
+
|
1413
|
+
#ifdef FRIEND_IPLIST_PAD
|
1414
|
+
memcpy(ip_portlist, ipv6s, num_ipv6s * sizeof(IP_Port));
|
1415
|
+
|
1416
|
+
if (num_ipv6s == MAX_FRIEND_CLIENTS)
|
1417
|
+
return MAX_FRIEND_CLIENTS;
|
1418
|
+
|
1419
|
+
int num_ipv4s_used = MAX_FRIEND_CLIENTS - num_ipv6s;
|
1420
|
+
|
1421
|
+
if (num_ipv4s_used > num_ipv4s)
|
1422
|
+
num_ipv4s_used = num_ipv4s;
|
1423
|
+
|
1424
|
+
memcpy(&ip_portlist[num_ipv6s], ipv4s, num_ipv4s_used * sizeof(IP_Port));
|
1425
|
+
return num_ipv6s + num_ipv4s_used;
|
1426
|
+
|
1427
|
+
#else /* !FRIEND_IPLIST_PAD */
|
1428
|
+
|
1429
|
+
/* there must be some secret reason why we can't pad the longer list
|
1430
|
+
* with the shorter one...
|
1431
|
+
*/
|
1432
|
+
if (num_ipv6s >= num_ipv4s) {
|
1433
|
+
memcpy(ip_portlist, ipv6s, num_ipv6s * sizeof(IP_Port));
|
1434
|
+
return num_ipv6s;
|
1435
|
+
}
|
1436
|
+
|
1437
|
+
memcpy(ip_portlist, ipv4s, num_ipv4s * sizeof(IP_Port));
|
1438
|
+
return num_ipv4s;
|
1439
|
+
|
1440
|
+
#endif /* !FRIEND_IPLIST_PAD */
|
1441
|
+
}
|
1442
|
+
|
1443
|
+
|
1444
|
+
/* Send the following packet to everyone who tells us they are connected to friend_id.
|
1445
|
+
*
|
1446
|
+
* return ip for friend.
|
1447
|
+
* return number of nodes the packet was sent to. (Only works if more than (MAX_FRIEND_CLIENTS / 4).
|
1448
|
+
*/
|
1449
|
+
int route_tofriend(const DHT *dht, const uint8_t *friend_id, const uint8_t *packet, uint32_t length)
|
1450
|
+
{
|
1451
|
+
int num = friend_number(dht, friend_id);
|
1452
|
+
|
1453
|
+
if (num == -1)
|
1454
|
+
return 0;
|
1455
|
+
|
1456
|
+
uint32_t i, sent = 0;
|
1457
|
+
uint8_t friend_sent[MAX_FRIEND_CLIENTS] = {0};
|
1458
|
+
|
1459
|
+
IP_Port ip_list[MAX_FRIEND_CLIENTS];
|
1460
|
+
int ip_num = friend_iplist(dht, ip_list, num);
|
1461
|
+
|
1462
|
+
if (ip_num < (MAX_FRIEND_CLIENTS / 4))
|
1463
|
+
return 0; /* Reason for that? */
|
1464
|
+
|
1465
|
+
DHT_Friend *friend = &dht->friends_list[num];
|
1466
|
+
Client_data *client;
|
1467
|
+
|
1468
|
+
/* extra legwork, because having the outside allocating the space for us
|
1469
|
+
* is *usually* good(tm) (bites us in the behind in this case though) */
|
1470
|
+
uint32_t a;
|
1471
|
+
|
1472
|
+
for (a = 0; a < 2; a++)
|
1473
|
+
for (i = 0; i < MAX_FRIEND_CLIENTS; ++i) {
|
1474
|
+
if (friend_sent[i])/* Send one packet per client.*/
|
1475
|
+
continue;
|
1476
|
+
|
1477
|
+
client = &friend->client_list[i];
|
1478
|
+
IPPTsPng *assoc = NULL;
|
1479
|
+
|
1480
|
+
if (!a)
|
1481
|
+
assoc = &client->assoc4;
|
1482
|
+
else
|
1483
|
+
assoc = &client->assoc6;
|
1484
|
+
|
1485
|
+
/* If ip is not zero and node is good. */
|
1486
|
+
if (ip_isset(&assoc->ret_ip_port.ip) &&
|
1487
|
+
!is_timeout(assoc->ret_timestamp, BAD_NODE_TIMEOUT)) {
|
1488
|
+
int retval = sendpacket(dht->net, assoc->ip_port, packet, length);
|
1489
|
+
|
1490
|
+
if ((unsigned int)retval == length) {
|
1491
|
+
++sent;
|
1492
|
+
friend_sent[i] = 1;
|
1493
|
+
}
|
1494
|
+
}
|
1495
|
+
}
|
1496
|
+
|
1497
|
+
return sent;
|
1498
|
+
}
|
1499
|
+
|
1500
|
+
/* Send the following packet to one random person who tells us they are connected to friend_id.
|
1501
|
+
*
|
1502
|
+
* return number of nodes the packet was sent to.
|
1503
|
+
*/
|
1504
|
+
static int routeone_tofriend(DHT *dht, const uint8_t *friend_id, const uint8_t *packet, uint32_t length)
|
1505
|
+
{
|
1506
|
+
int num = friend_number(dht, friend_id);
|
1507
|
+
|
1508
|
+
if (num == -1)
|
1509
|
+
return 0;
|
1510
|
+
|
1511
|
+
DHT_Friend *friend = &dht->friends_list[num];
|
1512
|
+
Client_data *client;
|
1513
|
+
|
1514
|
+
IP_Port ip_list[MAX_FRIEND_CLIENTS * 2];
|
1515
|
+
int n = 0;
|
1516
|
+
uint32_t i;
|
1517
|
+
|
1518
|
+
/* extra legwork, because having the outside allocating the space for us
|
1519
|
+
* is *usually* good(tm) (bites us in the behind in this case though) */
|
1520
|
+
uint32_t a;
|
1521
|
+
|
1522
|
+
for (a = 0; a < 2; a++)
|
1523
|
+
for (i = 0; i < MAX_FRIEND_CLIENTS; ++i) {
|
1524
|
+
client = &friend->client_list[i];
|
1525
|
+
IPPTsPng *assoc = NULL;
|
1526
|
+
|
1527
|
+
if (!a)
|
1528
|
+
assoc = &client->assoc4;
|
1529
|
+
else
|
1530
|
+
assoc = &client->assoc6;
|
1531
|
+
|
1532
|
+
/* If ip is not zero and node is good. */
|
1533
|
+
if (ip_isset(&assoc->ret_ip_port.ip) && !is_timeout(assoc->ret_timestamp, BAD_NODE_TIMEOUT)) {
|
1534
|
+
ip_list[n] = assoc->ip_port;
|
1535
|
+
++n;
|
1536
|
+
}
|
1537
|
+
}
|
1538
|
+
|
1539
|
+
if (n < 1)
|
1540
|
+
return 0;
|
1541
|
+
|
1542
|
+
int retval = sendpacket(dht->net, ip_list[rand() % n], packet, length);
|
1543
|
+
|
1544
|
+
if ((unsigned int)retval == length)
|
1545
|
+
return 1;
|
1546
|
+
|
1547
|
+
return 0;
|
1548
|
+
}
|
1549
|
+
|
1550
|
+
/* Puts all the different ips returned by the nodes for a friend_id into array ip_portlist.
|
1551
|
+
* ip_portlist must be at least MAX_FRIEND_CLIENTS big.
|
1552
|
+
*
|
1553
|
+
* return number of ips returned.
|
1554
|
+
* return 0 if we are connected to friend or if no ips were found.
|
1555
|
+
* return -1 if no such friend.
|
1556
|
+
*/
|
1557
|
+
int friend_ips(const DHT *dht, IP_Port *ip_portlist, const uint8_t *friend_id)
|
1558
|
+
{
|
1559
|
+
uint32_t i;
|
1560
|
+
|
1561
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
1562
|
+
/* Equal */
|
1563
|
+
if (id_equal(dht->friends_list[i].client_id, friend_id))
|
1564
|
+
return friend_iplist(dht, ip_portlist, i);
|
1565
|
+
}
|
1566
|
+
|
1567
|
+
return -1;
|
1568
|
+
}
|
1569
|
+
|
1570
|
+
/*----------------------------------------------------------------------------------*/
|
1571
|
+
/*---------------------BEGINNING OF NAT PUNCHING FUNCTIONS--------------------------*/
|
1572
|
+
|
1573
|
+
static int send_NATping(DHT *dht, const uint8_t *public_key, uint64_t ping_id, uint8_t type)
|
1574
|
+
{
|
1575
|
+
uint8_t data[sizeof(uint64_t) + 1];
|
1576
|
+
uint8_t packet[MAX_CRYPTO_REQUEST_SIZE];
|
1577
|
+
|
1578
|
+
int num = 0;
|
1579
|
+
|
1580
|
+
data[0] = type;
|
1581
|
+
memcpy(data + 1, &ping_id, sizeof(uint64_t));
|
1582
|
+
/* 254 is NAT ping request packet id */
|
1583
|
+
int len = create_request(dht->self_public_key, dht->self_secret_key, packet, public_key, data,
|
1584
|
+
sizeof(uint64_t) + 1, CRYPTO_PACKET_NAT_PING);
|
1585
|
+
|
1586
|
+
if (len == -1)
|
1587
|
+
return -1;
|
1588
|
+
|
1589
|
+
if (type == 0) /* If packet is request use many people to route it. */
|
1590
|
+
num = route_tofriend(dht, public_key, packet, len);
|
1591
|
+
else if (type == 1) /* If packet is response use only one person to route it */
|
1592
|
+
num = routeone_tofriend(dht, public_key, packet, len);
|
1593
|
+
|
1594
|
+
if (num == 0)
|
1595
|
+
return -1;
|
1596
|
+
|
1597
|
+
return num;
|
1598
|
+
}
|
1599
|
+
|
1600
|
+
/* Handle a received ping request for. */
|
1601
|
+
static int handle_NATping(void *object, IP_Port source, const uint8_t *source_pubkey, const uint8_t *packet,
|
1602
|
+
uint32_t length)
|
1603
|
+
{
|
1604
|
+
if (length != sizeof(uint64_t) + 1)
|
1605
|
+
return 1;
|
1606
|
+
|
1607
|
+
DHT *dht = object;
|
1608
|
+
uint64_t ping_id;
|
1609
|
+
memcpy(&ping_id, packet + 1, sizeof(uint64_t));
|
1610
|
+
|
1611
|
+
int friendnumber = friend_number(dht, source_pubkey);
|
1612
|
+
|
1613
|
+
if (friendnumber == -1)
|
1614
|
+
return 1;
|
1615
|
+
|
1616
|
+
DHT_Friend *friend = &dht->friends_list[friendnumber];
|
1617
|
+
|
1618
|
+
if (packet[0] == NAT_PING_REQUEST) {
|
1619
|
+
/* 1 is reply */
|
1620
|
+
send_NATping(dht, source_pubkey, ping_id, NAT_PING_RESPONSE);
|
1621
|
+
friend->nat.recvNATping_timestamp = unix_time();
|
1622
|
+
return 0;
|
1623
|
+
} else if (packet[0] == NAT_PING_RESPONSE) {
|
1624
|
+
if (friend->nat.NATping_id == ping_id) {
|
1625
|
+
friend->nat.NATping_id = random_64b();
|
1626
|
+
friend->nat.hole_punching = 1;
|
1627
|
+
return 0;
|
1628
|
+
}
|
1629
|
+
}
|
1630
|
+
|
1631
|
+
return 1;
|
1632
|
+
}
|
1633
|
+
|
1634
|
+
/* Get the most common ip in the ip_portlist.
|
1635
|
+
* Only return ip if it appears in list min_num or more.
|
1636
|
+
* len must not be bigger than MAX_FRIEND_CLIENTS.
|
1637
|
+
*
|
1638
|
+
* return ip of 0 if failure.
|
1639
|
+
*/
|
1640
|
+
static IP NAT_commonip(IP_Port *ip_portlist, uint16_t len, uint16_t min_num)
|
1641
|
+
{
|
1642
|
+
IP zero;
|
1643
|
+
ip_reset(&zero);
|
1644
|
+
|
1645
|
+
if (len > MAX_FRIEND_CLIENTS)
|
1646
|
+
return zero;
|
1647
|
+
|
1648
|
+
uint32_t i, j;
|
1649
|
+
uint16_t numbers[MAX_FRIEND_CLIENTS] = {0};
|
1650
|
+
|
1651
|
+
for (i = 0; i < len; ++i) {
|
1652
|
+
for (j = 0; j < len; ++j) {
|
1653
|
+
if (ip_equal(&ip_portlist[i].ip, &ip_portlist[j].ip))
|
1654
|
+
++numbers[i];
|
1655
|
+
}
|
1656
|
+
|
1657
|
+
if (numbers[i] >= min_num)
|
1658
|
+
return ip_portlist[i].ip;
|
1659
|
+
}
|
1660
|
+
|
1661
|
+
return zero;
|
1662
|
+
}
|
1663
|
+
|
1664
|
+
/* Return all the ports for one ip in a list.
|
1665
|
+
* portlist must be at least len long,
|
1666
|
+
* where len is the length of ip_portlist.
|
1667
|
+
*
|
1668
|
+
* return number of ports and puts the list of ports in portlist.
|
1669
|
+
*/
|
1670
|
+
static uint16_t NAT_getports(uint16_t *portlist, IP_Port *ip_portlist, uint16_t len, IP ip)
|
1671
|
+
{
|
1672
|
+
uint32_t i;
|
1673
|
+
uint16_t num = 0;
|
1674
|
+
|
1675
|
+
for (i = 0; i < len; ++i) {
|
1676
|
+
if (ip_equal(&ip_portlist[i].ip, &ip)) {
|
1677
|
+
portlist[num] = ntohs(ip_portlist[i].port);
|
1678
|
+
++num;
|
1679
|
+
}
|
1680
|
+
}
|
1681
|
+
|
1682
|
+
return num;
|
1683
|
+
}
|
1684
|
+
|
1685
|
+
static void punch_holes(DHT *dht, IP ip, uint16_t *port_list, uint16_t numports, uint16_t friend_num)
|
1686
|
+
{
|
1687
|
+
if (numports > MAX_FRIEND_CLIENTS || numports == 0)
|
1688
|
+
return;
|
1689
|
+
|
1690
|
+
uint32_t i;
|
1691
|
+
uint32_t top = dht->friends_list[friend_num].nat.punching_index + MAX_PUNCHING_PORTS;
|
1692
|
+
uint16_t firstport = port_list[0];
|
1693
|
+
|
1694
|
+
for (i = 0; i < numports; ++i) {
|
1695
|
+
if (firstport != port_list[i])
|
1696
|
+
break;
|
1697
|
+
}
|
1698
|
+
|
1699
|
+
if (i == numports) { /* If all ports are the same, only try that one port. */
|
1700
|
+
IP_Port pinging;
|
1701
|
+
ip_copy(&pinging.ip, &ip);
|
1702
|
+
pinging.port = htons(firstport);
|
1703
|
+
send_ping_request(dht->ping, pinging, dht->friends_list[friend_num].client_id);
|
1704
|
+
} else {
|
1705
|
+
for (i = dht->friends_list[friend_num].nat.punching_index; i != top; ++i) {
|
1706
|
+
/* TODO: Improve port guessing algorithm. */
|
1707
|
+
uint16_t port = port_list[(i / 2) % numports] + (i / (2 * numports)) * ((i % 2) ? -1 : 1);
|
1708
|
+
IP_Port pinging;
|
1709
|
+
ip_copy(&pinging.ip, &ip);
|
1710
|
+
pinging.port = htons(port);
|
1711
|
+
send_ping_request(dht->ping, pinging, dht->friends_list[friend_num].client_id);
|
1712
|
+
}
|
1713
|
+
|
1714
|
+
dht->friends_list[friend_num].nat.punching_index = i;
|
1715
|
+
}
|
1716
|
+
|
1717
|
+
if (dht->friends_list[friend_num].nat.tries > MAX_NORMAL_PUNCHING_TRIES) {
|
1718
|
+
top = dht->friends_list[friend_num].nat.punching_index2 + MAX_PUNCHING_PORTS;
|
1719
|
+
uint16_t port = 1024;
|
1720
|
+
IP_Port pinging;
|
1721
|
+
ip_copy(&pinging.ip, &ip);
|
1722
|
+
|
1723
|
+
for (i = dht->friends_list[friend_num].nat.punching_index2; i != top; ++i) {
|
1724
|
+
pinging.port = htons(port + i);
|
1725
|
+
send_ping_request(dht->ping, pinging, dht->friends_list[friend_num].client_id);
|
1726
|
+
}
|
1727
|
+
|
1728
|
+
dht->friends_list[friend_num].nat.punching_index2 = i - (MAX_PUNCHING_PORTS / 2);
|
1729
|
+
}
|
1730
|
+
|
1731
|
+
++dht->friends_list[friend_num].nat.tries;
|
1732
|
+
}
|
1733
|
+
|
1734
|
+
static void do_NAT(DHT *dht)
|
1735
|
+
{
|
1736
|
+
uint32_t i;
|
1737
|
+
uint64_t temp_time = unix_time();
|
1738
|
+
|
1739
|
+
for (i = 0; i < dht->num_friends; ++i) {
|
1740
|
+
IP_Port ip_list[MAX_FRIEND_CLIENTS];
|
1741
|
+
int num = friend_iplist(dht, ip_list, i);
|
1742
|
+
|
1743
|
+
/* If already connected or friend is not online don't try to hole punch. */
|
1744
|
+
if (num < MAX_FRIEND_CLIENTS / 2)
|
1745
|
+
continue;
|
1746
|
+
|
1747
|
+
if (dht->friends_list[i].nat.NATping_timestamp + PUNCH_INTERVAL < temp_time) {
|
1748
|
+
send_NATping(dht, dht->friends_list[i].client_id, dht->friends_list[i].nat.NATping_id, NAT_PING_REQUEST);
|
1749
|
+
dht->friends_list[i].nat.NATping_timestamp = temp_time;
|
1750
|
+
}
|
1751
|
+
|
1752
|
+
if (dht->friends_list[i].nat.hole_punching == 1 &&
|
1753
|
+
dht->friends_list[i].nat.punching_timestamp + PUNCH_INTERVAL < temp_time &&
|
1754
|
+
dht->friends_list[i].nat.recvNATping_timestamp + PUNCH_INTERVAL * 2 >= temp_time) {
|
1755
|
+
|
1756
|
+
IP ip = NAT_commonip(ip_list, num, MAX_FRIEND_CLIENTS / 2);
|
1757
|
+
|
1758
|
+
if (!ip_isset(&ip))
|
1759
|
+
continue;
|
1760
|
+
|
1761
|
+
uint16_t port_list[MAX_FRIEND_CLIENTS];
|
1762
|
+
uint16_t numports = NAT_getports(port_list, ip_list, num, ip);
|
1763
|
+
punch_holes(dht, ip, port_list, numports, i);
|
1764
|
+
|
1765
|
+
dht->friends_list[i].nat.punching_timestamp = temp_time;
|
1766
|
+
dht->friends_list[i].nat.hole_punching = 0;
|
1767
|
+
}
|
1768
|
+
}
|
1769
|
+
}
|
1770
|
+
|
1771
|
+
/*----------------------------------------------------------------------------------*/
|
1772
|
+
/*-----------------------END OF NAT PUNCHING FUNCTIONS------------------------------*/
|
1773
|
+
|
1774
|
+
#define HARDREQ_DATA_SIZE 384 /* Attempt to prevent amplification/other attacks*/
|
1775
|
+
|
1776
|
+
#define CHECK_TYPE_ROUTE_REQ 0
|
1777
|
+
#define CHECK_TYPE_ROUTE_RES 1
|
1778
|
+
#define CHECK_TYPE_GETNODE_REQ 2
|
1779
|
+
#define CHECK_TYPE_GETNODE_RES 3
|
1780
|
+
#define CHECK_TYPE_TEST_REQ 4
|
1781
|
+
#define CHECK_TYPE_TEST_RES 5
|
1782
|
+
|
1783
|
+
static int send_hardening_req(DHT *dht, Node_format *sendto, uint8_t type, uint8_t *contents, uint16_t length)
|
1784
|
+
{
|
1785
|
+
if (length > HARDREQ_DATA_SIZE - 1)
|
1786
|
+
return -1;
|
1787
|
+
|
1788
|
+
uint8_t packet[MAX_CRYPTO_REQUEST_SIZE];
|
1789
|
+
uint8_t data[HARDREQ_DATA_SIZE] = {0};
|
1790
|
+
data[0] = type;
|
1791
|
+
memcpy(data + 1, contents, length);
|
1792
|
+
int len = create_request(dht->self_public_key, dht->self_secret_key, packet, sendto->client_id, data,
|
1793
|
+
sizeof(data), CRYPTO_PACKET_HARDENING);
|
1794
|
+
|
1795
|
+
if (len == -1)
|
1796
|
+
return -1;
|
1797
|
+
|
1798
|
+
return sendpacket(dht->net, sendto->ip_port, packet, len);
|
1799
|
+
}
|
1800
|
+
|
1801
|
+
/* Send a get node hardening request */
|
1802
|
+
static int send_hardening_getnode_req(DHT *dht, Node_format *dest, Node_format *node_totest, uint8_t *search_id)
|
1803
|
+
{
|
1804
|
+
uint8_t data[sizeof(Node_format) + CLIENT_ID_SIZE];
|
1805
|
+
memcpy(data, node_totest, sizeof(Node_format));
|
1806
|
+
memcpy(data + sizeof(Node_format), search_id, CLIENT_ID_SIZE);
|
1807
|
+
return send_hardening_req(dht, dest, CHECK_TYPE_GETNODE_REQ, data, sizeof(Node_format) + CLIENT_ID_SIZE);
|
1808
|
+
}
|
1809
|
+
|
1810
|
+
/* Send a get node hardening response */
|
1811
|
+
static int send_hardening_getnode_res(const DHT *dht, const Node_format *sendto, const uint8_t *queried_client_id,
|
1812
|
+
const uint8_t *nodes_data, uint16_t nodes_data_length)
|
1813
|
+
{
|
1814
|
+
if (!ip_isset(&sendto->ip_port.ip))
|
1815
|
+
return -1;
|
1816
|
+
|
1817
|
+
uint8_t packet[MAX_CRYPTO_REQUEST_SIZE];
|
1818
|
+
uint8_t data[1 + CLIENT_ID_SIZE + nodes_data_length];
|
1819
|
+
data[0] = CHECK_TYPE_GETNODE_RES;
|
1820
|
+
memcpy(data + 1, queried_client_id, CLIENT_ID_SIZE);
|
1821
|
+
memcpy(data + 1 + CLIENT_ID_SIZE, nodes_data, nodes_data_length);
|
1822
|
+
int len = create_request(dht->self_public_key, dht->self_secret_key, packet, sendto->client_id, data,
|
1823
|
+
sizeof(data), CRYPTO_PACKET_HARDENING);
|
1824
|
+
|
1825
|
+
if (len == -1)
|
1826
|
+
return -1;
|
1827
|
+
|
1828
|
+
return sendpacket(dht->net, sendto->ip_port, packet, len);
|
1829
|
+
}
|
1830
|
+
|
1831
|
+
/* TODO: improve */
|
1832
|
+
static IPPTsPng *get_closelist_IPPTsPng(DHT *dht, const uint8_t *client_id, sa_family_t sa_family)
|
1833
|
+
{
|
1834
|
+
uint32_t i;
|
1835
|
+
|
1836
|
+
for (i = 0; i < LCLIENT_LIST; ++i) {
|
1837
|
+
if (memcmp(dht->close_clientlist[i].client_id, client_id, CLIENT_ID_SIZE) != 0)
|
1838
|
+
continue;
|
1839
|
+
|
1840
|
+
if (sa_family == AF_INET)
|
1841
|
+
return &dht->close_clientlist[i].assoc4;
|
1842
|
+
else if (sa_family == AF_INET6)
|
1843
|
+
return &dht->close_clientlist[i].assoc6;
|
1844
|
+
}
|
1845
|
+
|
1846
|
+
return NULL;
|
1847
|
+
}
|
1848
|
+
|
1849
|
+
/*
|
1850
|
+
* check how many nodes in nodes are also present in the closelist.
|
1851
|
+
* TODO: make this function better.
|
1852
|
+
*/
|
1853
|
+
static uint32_t have_nodes_closelist(DHT *dht, Node_format *nodes, uint16_t num)
|
1854
|
+
{
|
1855
|
+
uint32_t counter = 0;
|
1856
|
+
uint32_t i;
|
1857
|
+
|
1858
|
+
for (i = 0; i < num; ++i) {
|
1859
|
+
if (id_equal(nodes[i].client_id, dht->self_public_key)) {
|
1860
|
+
++counter;
|
1861
|
+
continue;
|
1862
|
+
}
|
1863
|
+
|
1864
|
+
IPPTsPng *temp = get_closelist_IPPTsPng(dht, nodes[i].client_id, nodes[i].ip_port.ip.family);
|
1865
|
+
|
1866
|
+
if (temp) {
|
1867
|
+
if (!is_timeout(temp->timestamp, BAD_NODE_TIMEOUT)) {
|
1868
|
+
++counter;
|
1869
|
+
}
|
1870
|
+
}
|
1871
|
+
}
|
1872
|
+
|
1873
|
+
return counter;
|
1874
|
+
}
|
1875
|
+
|
1876
|
+
/* Interval in seconds between hardening checks */
|
1877
|
+
#define HARDENING_INTERVAL 120
|
1878
|
+
#define HARDEN_TIMEOUT 1200
|
1879
|
+
|
1880
|
+
/* Handle a received hardening packet */
|
1881
|
+
static int handle_hardening(void *object, IP_Port source, const uint8_t *source_pubkey, const uint8_t *packet,
|
1882
|
+
uint32_t length)
|
1883
|
+
{
|
1884
|
+
DHT *dht = object;
|
1885
|
+
|
1886
|
+
if (length < 2) {
|
1887
|
+
return 1;
|
1888
|
+
}
|
1889
|
+
|
1890
|
+
switch (packet[0]) {
|
1891
|
+
case CHECK_TYPE_GETNODE_REQ: {
|
1892
|
+
if (length != HARDREQ_DATA_SIZE)
|
1893
|
+
return 1;
|
1894
|
+
|
1895
|
+
Node_format node, tocheck_node;
|
1896
|
+
node.ip_port = source;
|
1897
|
+
memcpy(node.client_id, source_pubkey, CLIENT_ID_SIZE);
|
1898
|
+
memcpy(&tocheck_node, packet + 1, sizeof(Node_format));
|
1899
|
+
|
1900
|
+
if (getnodes(dht, tocheck_node.ip_port, tocheck_node.client_id, packet + 1 + sizeof(Node_format), &node) == -1)
|
1901
|
+
return 1;
|
1902
|
+
|
1903
|
+
return 0;
|
1904
|
+
}
|
1905
|
+
|
1906
|
+
case CHECK_TYPE_GETNODE_RES: {
|
1907
|
+
if (length <= CLIENT_ID_SIZE + 1)
|
1908
|
+
return 1;
|
1909
|
+
|
1910
|
+
if (length > 1 + CLIENT_ID_SIZE + sizeof(Node_format) * MAX_SENT_NODES)
|
1911
|
+
return 1;
|
1912
|
+
|
1913
|
+
uint16_t length_nodes = length - 1 - CLIENT_ID_SIZE;
|
1914
|
+
Node_format nodes[MAX_SENT_NODES];
|
1915
|
+
int num_nodes = unpack_nodes(nodes, MAX_SENT_NODES, 0, packet + 1 + CLIENT_ID_SIZE, length_nodes, 0);
|
1916
|
+
|
1917
|
+
/* TODO: MAX_SENT_NODES nodes should be returned at all times
|
1918
|
+
(right now we have a small network size so it could cause problems for testing and etc..) */
|
1919
|
+
if (num_nodes <= 0)
|
1920
|
+
return 1;
|
1921
|
+
|
1922
|
+
/* NOTE: This should work for now but should be changed to something better. */
|
1923
|
+
if (have_nodes_closelist(dht, nodes, num_nodes) < (uint32_t)((num_nodes + 2) / 2))
|
1924
|
+
return 1;
|
1925
|
+
|
1926
|
+
IPPTsPng *temp = get_closelist_IPPTsPng(dht, packet + 1, nodes[0].ip_port.ip.family);
|
1927
|
+
|
1928
|
+
if (temp == NULL)
|
1929
|
+
return 1;
|
1930
|
+
|
1931
|
+
if (is_timeout(temp->hardening.send_nodes_timestamp, HARDENING_INTERVAL))
|
1932
|
+
return 1;
|
1933
|
+
|
1934
|
+
if (memcmp(temp->hardening.send_nodes_pingedid, source_pubkey, CLIENT_ID_SIZE) != 0)
|
1935
|
+
return 1;
|
1936
|
+
|
1937
|
+
/* If Nodes look good and the request checks out */
|
1938
|
+
temp->hardening.send_nodes_ok = 1;
|
1939
|
+
return 0;/* success*/
|
1940
|
+
}
|
1941
|
+
}
|
1942
|
+
|
1943
|
+
return 1;
|
1944
|
+
}
|
1945
|
+
|
1946
|
+
/* Return a random node from all the nodes we are connected to.
|
1947
|
+
* TODO: improve this function.
|
1948
|
+
*/
|
1949
|
+
Node_format random_node(DHT *dht, sa_family_t sa_family)
|
1950
|
+
{
|
1951
|
+
uint8_t id[CLIENT_ID_SIZE];
|
1952
|
+
uint32_t i;
|
1953
|
+
|
1954
|
+
for (i = 0; i < CLIENT_ID_SIZE / 4; ++i) { /* populate the id with pseudorandom bytes.*/
|
1955
|
+
uint32_t t = rand();
|
1956
|
+
memcpy(id + i * sizeof(t), &t, sizeof(t));
|
1957
|
+
}
|
1958
|
+
|
1959
|
+
Node_format nodes_list[MAX_SENT_NODES];
|
1960
|
+
memset(nodes_list, 0, sizeof(nodes_list));
|
1961
|
+
uint32_t num_nodes = get_close_nodes(dht, id, nodes_list, sa_family, 1, 0);
|
1962
|
+
|
1963
|
+
if (num_nodes == 0)
|
1964
|
+
return nodes_list[0];
|
1965
|
+
else
|
1966
|
+
return nodes_list[rand() % num_nodes];
|
1967
|
+
}
|
1968
|
+
|
1969
|
+
/* Put up to max_num nodes in nodes from the closelist.
|
1970
|
+
*
|
1971
|
+
* return the number of nodes.
|
1972
|
+
*/
|
1973
|
+
uint16_t closelist_nodes(DHT *dht, Node_format *nodes, uint16_t max_num)
|
1974
|
+
{
|
1975
|
+
if (max_num == 0)
|
1976
|
+
return 0;
|
1977
|
+
|
1978
|
+
uint16_t count = 0;
|
1979
|
+
Client_data *list = dht->close_clientlist;
|
1980
|
+
|
1981
|
+
uint32_t i;
|
1982
|
+
|
1983
|
+
for (i = LCLIENT_LIST; i != 0; --i) {
|
1984
|
+
IPPTsPng *assoc = NULL;
|
1985
|
+
|
1986
|
+
if (!is_timeout(list[i - 1].assoc4.timestamp, BAD_NODE_TIMEOUT))
|
1987
|
+
assoc = &list[i - 1].assoc4;
|
1988
|
+
|
1989
|
+
if (!is_timeout(list[i - 1].assoc6.timestamp, BAD_NODE_TIMEOUT)) {
|
1990
|
+
if (assoc == NULL)
|
1991
|
+
assoc = &list[i - 1].assoc6;
|
1992
|
+
else if (rand() % 2)
|
1993
|
+
assoc = &list[i - 1].assoc6;
|
1994
|
+
}
|
1995
|
+
|
1996
|
+
if (assoc != NULL) {
|
1997
|
+
memcpy(nodes[count].client_id, list[i - 1].client_id, CLIENT_ID_SIZE);
|
1998
|
+
nodes[count].ip_port = assoc->ip_port;
|
1999
|
+
++count;
|
2000
|
+
|
2001
|
+
if (count >= max_num)
|
2002
|
+
return count;
|
2003
|
+
}
|
2004
|
+
}
|
2005
|
+
|
2006
|
+
return count;
|
2007
|
+
}
|
2008
|
+
|
2009
|
+
/* Put a random node from list of list_size in node. LAN_ok is 1 if LAN ips are ok, 0 if we don't want them. */
|
2010
|
+
static int random_node_fromlist(Client_data *list, uint16_t list_size, Node_format *node, uint8_t LAN_ok)
|
2011
|
+
{
|
2012
|
+
uint32_t i;
|
2013
|
+
uint32_t num_nodes = 0;
|
2014
|
+
Client_data *client_list[list_size * 2];
|
2015
|
+
IPPTsPng *assoc_list[list_size * 2];
|
2016
|
+
|
2017
|
+
for (i = 0; i < list_size; i++) {
|
2018
|
+
/* If node is not dead. */
|
2019
|
+
Client_data *client = &list[i];
|
2020
|
+
IPPTsPng *assoc;
|
2021
|
+
uint32_t a;
|
2022
|
+
|
2023
|
+
for (a = 0, assoc = &client->assoc6; a < 2; a++, assoc = &client->assoc4) {
|
2024
|
+
/* If node is good. */
|
2025
|
+
if (!is_timeout(assoc->timestamp, BAD_NODE_TIMEOUT)) {
|
2026
|
+
if (!LAN_ok) {
|
2027
|
+
if (LAN_ip(assoc->ip_port.ip) == 0)
|
2028
|
+
continue;
|
2029
|
+
}
|
2030
|
+
|
2031
|
+
client_list[num_nodes] = client;
|
2032
|
+
assoc_list[num_nodes] = assoc;
|
2033
|
+
++num_nodes;
|
2034
|
+
}
|
2035
|
+
}
|
2036
|
+
}
|
2037
|
+
|
2038
|
+
if (num_nodes == 0)
|
2039
|
+
return -1;
|
2040
|
+
|
2041
|
+
uint32_t rand_node = rand() % num_nodes;
|
2042
|
+
node->ip_port = assoc_list[rand_node]->ip_port;
|
2043
|
+
memcpy(node->client_id, client_list[rand_node]->client_id, CLIENT_ID_SIZE);
|
2044
|
+
return 0;
|
2045
|
+
}
|
2046
|
+
|
2047
|
+
/* Put up to max_num random nodes in nodes.
|
2048
|
+
*
|
2049
|
+
* return the number of nodes.
|
2050
|
+
*
|
2051
|
+
* NOTE:this is used to pick nodes for paths.
|
2052
|
+
*
|
2053
|
+
* TODO: remove the LAN stuff from this.
|
2054
|
+
*/
|
2055
|
+
uint16_t random_nodes_path(const DHT *dht, Node_format *nodes, uint16_t max_num)
|
2056
|
+
{
|
2057
|
+
if (max_num == 0)
|
2058
|
+
return 0;
|
2059
|
+
|
2060
|
+
if (dht->num_friends == 0)
|
2061
|
+
return 0;
|
2062
|
+
|
2063
|
+
uint16_t count = 0;
|
2064
|
+
uint16_t list_size = 0;
|
2065
|
+
uint32_t i;
|
2066
|
+
|
2067
|
+
for (i = 0; i < max_num; ++i) {
|
2068
|
+
Client_data *list = NULL;
|
2069
|
+
uint16_t rand_num = rand() % (dht->num_friends);
|
2070
|
+
list = dht->friends_list[rand_num].client_list;
|
2071
|
+
list_size = MAX_FRIEND_CLIENTS;
|
2072
|
+
|
2073
|
+
uint8_t LAN_ok = 1;
|
2074
|
+
|
2075
|
+
if (count != 0 && LAN_ip(nodes[0].ip_port.ip) != 0)
|
2076
|
+
LAN_ok = 0;
|
2077
|
+
|
2078
|
+
if (random_node_fromlist(list, list_size, &nodes[count], LAN_ok) == 0)
|
2079
|
+
++count;
|
2080
|
+
}
|
2081
|
+
|
2082
|
+
return count;
|
2083
|
+
}
|
2084
|
+
|
2085
|
+
void do_hardening(DHT *dht)
|
2086
|
+
{
|
2087
|
+
uint32_t i;
|
2088
|
+
|
2089
|
+
for (i = 0; i < LCLIENT_LIST * 2; ++i) {
|
2090
|
+
IPPTsPng *cur_iptspng;
|
2091
|
+
sa_family_t sa_family;
|
2092
|
+
uint8_t *client_id = dht->close_clientlist[i / 2].client_id;
|
2093
|
+
|
2094
|
+
if (i % 2 == 0) {
|
2095
|
+
cur_iptspng = &dht->close_clientlist[i / 2].assoc4;
|
2096
|
+
sa_family = AF_INET;
|
2097
|
+
} else {
|
2098
|
+
cur_iptspng = &dht->close_clientlist[i / 2].assoc6;
|
2099
|
+
sa_family = AF_INET6;
|
2100
|
+
}
|
2101
|
+
|
2102
|
+
if (is_timeout(cur_iptspng->timestamp, BAD_NODE_TIMEOUT))
|
2103
|
+
continue;
|
2104
|
+
|
2105
|
+
if (cur_iptspng->hardening.send_nodes_ok == 0) {
|
2106
|
+
if (is_timeout(cur_iptspng->hardening.send_nodes_timestamp, HARDENING_INTERVAL)) {
|
2107
|
+
Node_format rand_node = random_node(dht, sa_family);
|
2108
|
+
|
2109
|
+
if (!ipport_isset(&rand_node.ip_port))
|
2110
|
+
continue;
|
2111
|
+
|
2112
|
+
if (id_equal(client_id, rand_node.client_id))
|
2113
|
+
continue;
|
2114
|
+
|
2115
|
+
Node_format to_test;
|
2116
|
+
to_test.ip_port = cur_iptspng->ip_port;
|
2117
|
+
memcpy(to_test.client_id, client_id, CLIENT_ID_SIZE);
|
2118
|
+
|
2119
|
+
//TODO: The search id should maybe not be ours?
|
2120
|
+
if (send_hardening_getnode_req(dht, &rand_node, &to_test, dht->self_public_key) > 0) {
|
2121
|
+
memcpy(cur_iptspng->hardening.send_nodes_pingedid, rand_node.client_id, CLIENT_ID_SIZE);
|
2122
|
+
cur_iptspng->hardening.send_nodes_timestamp = unix_time();
|
2123
|
+
}
|
2124
|
+
}
|
2125
|
+
} else {
|
2126
|
+
if (is_timeout(cur_iptspng->hardening.send_nodes_timestamp, HARDEN_TIMEOUT)) {
|
2127
|
+
cur_iptspng->hardening.send_nodes_ok = 0;
|
2128
|
+
}
|
2129
|
+
}
|
2130
|
+
|
2131
|
+
//TODO: add the 2 other testers.
|
2132
|
+
}
|
2133
|
+
}
|
2134
|
+
|
2135
|
+
/*----------------------------------------------------------------------------------*/
|
2136
|
+
|
2137
|
+
void cryptopacket_registerhandler(DHT *dht, uint8_t byte, cryptopacket_handler_callback cb, void *object)
|
2138
|
+
{
|
2139
|
+
dht->cryptopackethandlers[byte].function = cb;
|
2140
|
+
dht->cryptopackethandlers[byte].object = object;
|
2141
|
+
}
|
2142
|
+
|
2143
|
+
static int cryptopacket_handle(void *object, IP_Port source, const uint8_t *packet, uint32_t length)
|
2144
|
+
{
|
2145
|
+
DHT *dht = object;
|
2146
|
+
|
2147
|
+
if (packet[0] == NET_PACKET_CRYPTO) {
|
2148
|
+
if (length <= crypto_box_PUBLICKEYBYTES * 2 + crypto_box_NONCEBYTES + 1 + crypto_box_MACBYTES ||
|
2149
|
+
length > MAX_CRYPTO_REQUEST_SIZE + crypto_box_MACBYTES)
|
2150
|
+
return 1;
|
2151
|
+
|
2152
|
+
if (memcmp(packet + 1, dht->self_public_key, crypto_box_PUBLICKEYBYTES) == 0) { // Check if request is for us.
|
2153
|
+
uint8_t public_key[crypto_box_PUBLICKEYBYTES];
|
2154
|
+
uint8_t data[MAX_CRYPTO_REQUEST_SIZE];
|
2155
|
+
uint8_t number;
|
2156
|
+
int len = handle_request(dht->self_public_key, dht->self_secret_key, public_key, data, &number, packet, length);
|
2157
|
+
|
2158
|
+
if (len == -1 || len == 0)
|
2159
|
+
return 1;
|
2160
|
+
|
2161
|
+
if (!dht->cryptopackethandlers[number].function) return 1;
|
2162
|
+
|
2163
|
+
return dht->cryptopackethandlers[number].function(dht->cryptopackethandlers[number].object, source, public_key,
|
2164
|
+
data, len);
|
2165
|
+
|
2166
|
+
} else { /* If request is not for us, try routing it. */
|
2167
|
+
int retval = route_packet(dht, packet + 1, packet, length);
|
2168
|
+
|
2169
|
+
if ((unsigned int)retval == length)
|
2170
|
+
return 0;
|
2171
|
+
}
|
2172
|
+
}
|
2173
|
+
|
2174
|
+
return 1;
|
2175
|
+
}
|
2176
|
+
|
2177
|
+
/*----------------------------------------------------------------------------------*/
|
2178
|
+
|
2179
|
+
DHT *new_DHT(Networking_Core *net)
|
2180
|
+
{
|
2181
|
+
/* init time */
|
2182
|
+
unix_time_update();
|
2183
|
+
|
2184
|
+
if (net == NULL)
|
2185
|
+
return NULL;
|
2186
|
+
|
2187
|
+
DHT *dht = calloc(1, sizeof(DHT));
|
2188
|
+
|
2189
|
+
if (dht == NULL)
|
2190
|
+
return NULL;
|
2191
|
+
|
2192
|
+
dht->net = net;
|
2193
|
+
dht->ping = new_ping(dht);
|
2194
|
+
|
2195
|
+
if (dht->ping == NULL) {
|
2196
|
+
kill_DHT(dht);
|
2197
|
+
return NULL;
|
2198
|
+
}
|
2199
|
+
|
2200
|
+
networking_registerhandler(dht->net, NET_PACKET_GET_NODES, &handle_getnodes, dht);
|
2201
|
+
networking_registerhandler(dht->net, NET_PACKET_SEND_NODES_IPV6, &handle_sendnodes_ipv6, dht);
|
2202
|
+
networking_registerhandler(dht->net, NET_PACKET_CRYPTO, &cryptopacket_handle, dht);
|
2203
|
+
cryptopacket_registerhandler(dht, CRYPTO_PACKET_NAT_PING, &handle_NATping, dht);
|
2204
|
+
cryptopacket_registerhandler(dht, CRYPTO_PACKET_HARDENING, &handle_hardening, dht);
|
2205
|
+
|
2206
|
+
new_symmetric_key(dht->secret_symmetric_key);
|
2207
|
+
crypto_box_keypair(dht->self_public_key, dht->self_secret_key);
|
2208
|
+
|
2209
|
+
ping_array_init(&dht->dht_ping_array, DHT_PING_ARRAY_SIZE, PING_TIMEOUT);
|
2210
|
+
ping_array_init(&dht->dht_harden_ping_array, DHT_PING_ARRAY_SIZE, PING_TIMEOUT);
|
2211
|
+
#ifdef ENABLE_ASSOC_DHT
|
2212
|
+
dht->assoc = new_Assoc_default(dht->self_public_key);
|
2213
|
+
#endif
|
2214
|
+
uint32_t i;
|
2215
|
+
|
2216
|
+
for (i = 0; i < DHT_FAKE_FRIEND_NUMBER; ++i) {
|
2217
|
+
uint8_t random_key_bytes[CLIENT_ID_SIZE];
|
2218
|
+
randombytes(random_key_bytes, sizeof(random_key_bytes));
|
2219
|
+
DHT_addfriend(dht, random_key_bytes);
|
2220
|
+
}
|
2221
|
+
|
2222
|
+
return dht;
|
2223
|
+
}
|
2224
|
+
|
2225
|
+
void do_DHT(DHT *dht)
|
2226
|
+
{
|
2227
|
+
// Load friends/clients if first call to do_DHT
|
2228
|
+
if (dht->has_loaded_friends_clients == 0) {
|
2229
|
+
dht->has_loaded_friends_clients = 1;
|
2230
|
+
DHT_connect_after_load(dht);
|
2231
|
+
}
|
2232
|
+
|
2233
|
+
unix_time_update();
|
2234
|
+
|
2235
|
+
if (dht->last_run == unix_time()) {
|
2236
|
+
return;
|
2237
|
+
}
|
2238
|
+
|
2239
|
+
do_Close(dht);
|
2240
|
+
do_DHT_friends(dht);
|
2241
|
+
do_NAT(dht);
|
2242
|
+
do_to_ping(dht->ping);
|
2243
|
+
do_hardening(dht);
|
2244
|
+
#ifdef ENABLE_ASSOC_DHT
|
2245
|
+
|
2246
|
+
if (dht->assoc)
|
2247
|
+
do_Assoc(dht->assoc, dht);
|
2248
|
+
|
2249
|
+
#endif
|
2250
|
+
dht->last_run = unix_time();
|
2251
|
+
}
|
2252
|
+
void kill_DHT(DHT *dht)
|
2253
|
+
{
|
2254
|
+
#ifdef ENABLE_ASSOC_DHT
|
2255
|
+
kill_Assoc(dht->assoc);
|
2256
|
+
#endif
|
2257
|
+
networking_registerhandler(dht->net, NET_PACKET_GET_NODES, NULL, NULL);
|
2258
|
+
networking_registerhandler(dht->net, NET_PACKET_SEND_NODES, NULL, NULL);
|
2259
|
+
networking_registerhandler(dht->net, NET_PACKET_SEND_NODES_IPV6, NULL, NULL);
|
2260
|
+
cryptopacket_registerhandler(dht, CRYPTO_PACKET_NAT_PING, NULL, NULL);
|
2261
|
+
cryptopacket_registerhandler(dht, CRYPTO_PACKET_HARDENING, NULL, NULL);
|
2262
|
+
ping_array_free_all(&dht->dht_ping_array);
|
2263
|
+
ping_array_free_all(&dht->dht_harden_ping_array);
|
2264
|
+
kill_ping(dht->ping);
|
2265
|
+
free(dht->friends_list);
|
2266
|
+
free(dht);
|
2267
|
+
}
|
2268
|
+
|
2269
|
+
/* new DHT format for load/save, more robust and forward compatible */
|
2270
|
+
|
2271
|
+
#define DHT_STATE_COOKIE_GLOBAL 0x159000d
|
2272
|
+
|
2273
|
+
#define DHT_STATE_COOKIE_TYPE 0x11ce
|
2274
|
+
#define DHT_STATE_TYPE_FRIENDS_ASSOC46 3
|
2275
|
+
#define DHT_STATE_TYPE_CLIENTS_ASSOC46 4
|
2276
|
+
|
2277
|
+
/* Get the size of the DHT (for saving). */
|
2278
|
+
uint32_t DHT_size(const DHT *dht)
|
2279
|
+
{
|
2280
|
+
uint32_t num = 0, i;
|
2281
|
+
|
2282
|
+
for (i = 0; i < LCLIENT_LIST; ++i)
|
2283
|
+
if ((dht->close_clientlist[i].assoc4.timestamp != 0) ||
|
2284
|
+
(dht->close_clientlist[i].assoc6.timestamp != 0))
|
2285
|
+
num++;
|
2286
|
+
|
2287
|
+
uint32_t size32 = sizeof(uint32_t), sizesubhead = size32 * 2;
|
2288
|
+
return size32
|
2289
|
+
+ sizesubhead + sizeof(DHT_Friend) * dht->num_friends
|
2290
|
+
+ sizesubhead + sizeof(Client_data) * num;
|
2291
|
+
}
|
2292
|
+
|
2293
|
+
static uint8_t *z_state_save_subheader(uint8_t *data, uint32_t len, uint16_t type)
|
2294
|
+
{
|
2295
|
+
uint32_t *data32 = (uint32_t *)data;
|
2296
|
+
data32[0] = len;
|
2297
|
+
data32[1] = (DHT_STATE_COOKIE_TYPE << 16) | type;
|
2298
|
+
data += sizeof(uint32_t) * 2;
|
2299
|
+
return data;
|
2300
|
+
}
|
2301
|
+
|
2302
|
+
/* Save the DHT in data where data is an array of size DHT_size(). */
|
2303
|
+
void DHT_save(DHT *dht, uint8_t *data)
|
2304
|
+
{
|
2305
|
+
uint32_t len;
|
2306
|
+
uint16_t type;
|
2307
|
+
*(uint32_t *)data = DHT_STATE_COOKIE_GLOBAL;
|
2308
|
+
data += sizeof(uint32_t);
|
2309
|
+
|
2310
|
+
len = sizeof(DHT_Friend) * dht->num_friends;
|
2311
|
+
type = DHT_STATE_TYPE_FRIENDS_ASSOC46;
|
2312
|
+
data = z_state_save_subheader(data, len, type);
|
2313
|
+
memcpy(data, dht->friends_list, len);
|
2314
|
+
data += len;
|
2315
|
+
|
2316
|
+
uint32_t num = 0, i;
|
2317
|
+
|
2318
|
+
for (i = 0; i < LCLIENT_LIST; ++i)
|
2319
|
+
if ((dht->close_clientlist[i].assoc4.timestamp != 0) ||
|
2320
|
+
(dht->close_clientlist[i].assoc6.timestamp != 0))
|
2321
|
+
num++;
|
2322
|
+
|
2323
|
+
len = num * sizeof(Client_data);
|
2324
|
+
type = DHT_STATE_TYPE_CLIENTS_ASSOC46;
|
2325
|
+
data = z_state_save_subheader(data, len, type);
|
2326
|
+
|
2327
|
+
if (num) {
|
2328
|
+
Client_data *clients = (Client_data *)data;
|
2329
|
+
|
2330
|
+
for (num = 0, i = 0; i < LCLIENT_LIST; ++i)
|
2331
|
+
if ((dht->close_clientlist[i].assoc4.timestamp != 0) ||
|
2332
|
+
(dht->close_clientlist[i].assoc6.timestamp != 0))
|
2333
|
+
memcpy(&clients[num++], &dht->close_clientlist[i], sizeof(Client_data));
|
2334
|
+
}
|
2335
|
+
}
|
2336
|
+
|
2337
|
+
static void DHT_bootstrap_loaded_clients(DHT *dht)
|
2338
|
+
{
|
2339
|
+
if (!dht->loaded_clients_list)
|
2340
|
+
return;
|
2341
|
+
|
2342
|
+
uint32_t i;
|
2343
|
+
|
2344
|
+
Client_data *client_list = dht->loaded_clients_list;
|
2345
|
+
uint32_t client_count = dht->loaded_num_clients;
|
2346
|
+
|
2347
|
+
for (i = 0; i < client_count; ++i) {
|
2348
|
+
if (client_list[i].assoc4.timestamp != 0)
|
2349
|
+
DHT_bootstrap(dht, client_list[i].assoc4.ip_port, client_list[i].client_id);
|
2350
|
+
|
2351
|
+
if (client_list[i].assoc6.timestamp != 0)
|
2352
|
+
DHT_bootstrap(dht, client_list[i].assoc6.ip_port, client_list[i].client_id);
|
2353
|
+
}
|
2354
|
+
}
|
2355
|
+
|
2356
|
+
static void getnodes_of_loaded_friend_clients(DHT *dht)
|
2357
|
+
{
|
2358
|
+
if (!dht->loaded_friends_list)
|
2359
|
+
return;
|
2360
|
+
|
2361
|
+
uint32_t i, j;
|
2362
|
+
|
2363
|
+
DHT_Friend *friend_list = dht->loaded_friends_list;
|
2364
|
+
uint32_t friend_count = dht->loaded_num_friends;
|
2365
|
+
|
2366
|
+
for (i = 0; i < friend_count; ++i) {
|
2367
|
+
for (j = 0; j < MAX_FRIEND_CLIENTS; ++j) {
|
2368
|
+
Client_data *client = &friend_list[i].client_list[j];
|
2369
|
+
|
2370
|
+
if (client->assoc4.timestamp != 0)
|
2371
|
+
getnodes(dht, client->assoc4.ip_port, client->client_id, friend_list[i].client_id, NULL);
|
2372
|
+
|
2373
|
+
if (client->assoc6.timestamp != 0)
|
2374
|
+
getnodes(dht, client->assoc6.ip_port, client->client_id, friend_list[i].client_id, NULL);
|
2375
|
+
}
|
2376
|
+
}
|
2377
|
+
}
|
2378
|
+
|
2379
|
+
/* Start sending packets after DHT loaded_friends_list and loaded_clients_list are set */
|
2380
|
+
int DHT_connect_after_load(DHT *dht)
|
2381
|
+
{
|
2382
|
+
if (dht == NULL)
|
2383
|
+
return -1;
|
2384
|
+
|
2385
|
+
getnodes_of_loaded_friend_clients(dht);
|
2386
|
+
DHT_bootstrap_loaded_clients(dht);
|
2387
|
+
|
2388
|
+
// Loaded lists were allocd, free them
|
2389
|
+
free(dht->loaded_friends_list);
|
2390
|
+
dht->loaded_friends_list = NULL;
|
2391
|
+
dht->loaded_num_friends = 0;
|
2392
|
+
|
2393
|
+
free(dht->loaded_clients_list);
|
2394
|
+
dht->loaded_clients_list = NULL;
|
2395
|
+
dht->loaded_num_clients = 0;
|
2396
|
+
|
2397
|
+
return 0;
|
2398
|
+
}
|
2399
|
+
|
2400
|
+
static int dht_load_state_callback(void *outer, const uint8_t *data, uint32_t length, uint16_t type)
|
2401
|
+
{
|
2402
|
+
DHT *dht = outer;
|
2403
|
+
uint32_t num, i, j;
|
2404
|
+
|
2405
|
+
switch (type) {
|
2406
|
+
case DHT_STATE_TYPE_FRIENDS_ASSOC46:
|
2407
|
+
if (length % sizeof(DHT_Friend) != 0)
|
2408
|
+
break;
|
2409
|
+
|
2410
|
+
{ /* localize declarations */
|
2411
|
+
DHT_Friend *friend_list = (DHT_Friend *)data;
|
2412
|
+
num = length / sizeof(DHT_Friend);
|
2413
|
+
|
2414
|
+
free(dht->loaded_friends_list);
|
2415
|
+
// Copy to loaded_friends_list
|
2416
|
+
dht->loaded_friends_list = calloc(num, sizeof(DHT_Friend));
|
2417
|
+
|
2418
|
+
for (i = 0; i < num; i++)
|
2419
|
+
memcpy(&(dht->loaded_friends_list[i]), &(friend_list[i]), sizeof(DHT_Friend));
|
2420
|
+
|
2421
|
+
dht->loaded_num_friends = num;
|
2422
|
+
|
2423
|
+
dht->has_loaded_friends_clients = 1;
|
2424
|
+
} /* localize declarations */
|
2425
|
+
|
2426
|
+
break;
|
2427
|
+
|
2428
|
+
case DHT_STATE_TYPE_CLIENTS_ASSOC46:
|
2429
|
+
if ((length % sizeof(Client_data)) != 0)
|
2430
|
+
break;
|
2431
|
+
|
2432
|
+
{ /* localize declarations */
|
2433
|
+
num = length / sizeof(Client_data);
|
2434
|
+
Client_data *client_list = (Client_data *)data;
|
2435
|
+
|
2436
|
+
free(dht->loaded_clients_list);
|
2437
|
+
// Copy to loaded_clients_list
|
2438
|
+
dht->loaded_clients_list = calloc(num, sizeof(Client_data));
|
2439
|
+
|
2440
|
+
for (i = 0; i < num; i++)
|
2441
|
+
memcpy(&(dht->loaded_clients_list[i]), &(client_list[i]), sizeof(Client_data));
|
2442
|
+
|
2443
|
+
dht->loaded_num_clients = num;
|
2444
|
+
|
2445
|
+
dht->has_loaded_friends_clients = 1;
|
2446
|
+
} /* localize declarations */
|
2447
|
+
|
2448
|
+
break;
|
2449
|
+
|
2450
|
+
#ifdef DEBUG
|
2451
|
+
|
2452
|
+
default:
|
2453
|
+
fprintf(stderr, "Load state (DHT): contains unrecognized part (len %u, type %u)\n",
|
2454
|
+
length, type);
|
2455
|
+
break;
|
2456
|
+
#endif
|
2457
|
+
}
|
2458
|
+
|
2459
|
+
return 0;
|
2460
|
+
}
|
2461
|
+
|
2462
|
+
/* Load the DHT from data of size size.
|
2463
|
+
*
|
2464
|
+
* return -1 if failure.
|
2465
|
+
* return 0 if success.
|
2466
|
+
*/
|
2467
|
+
int DHT_load(DHT *dht, const uint8_t *data, uint32_t length)
|
2468
|
+
{
|
2469
|
+
uint32_t cookie_len = sizeof(uint32_t);
|
2470
|
+
|
2471
|
+
if (length > cookie_len) {
|
2472
|
+
uint32_t *data32 = (uint32_t *)data;
|
2473
|
+
|
2474
|
+
if (data32[0] == DHT_STATE_COOKIE_GLOBAL)
|
2475
|
+
return load_state(dht_load_state_callback, dht, data + cookie_len,
|
2476
|
+
length - cookie_len, DHT_STATE_COOKIE_TYPE);
|
2477
|
+
}
|
2478
|
+
|
2479
|
+
return -1;
|
2480
|
+
}
|
2481
|
+
|
2482
|
+
/* return 0 if we are not connected to the DHT.
|
2483
|
+
* return 1 if we are.
|
2484
|
+
*/
|
2485
|
+
int DHT_isconnected(const DHT *dht)
|
2486
|
+
{
|
2487
|
+
uint32_t i;
|
2488
|
+
unix_time_update();
|
2489
|
+
|
2490
|
+
for (i = 0; i < LCLIENT_LIST; ++i) {
|
2491
|
+
const Client_data *client = &dht->close_clientlist[i];
|
2492
|
+
|
2493
|
+
if (!is_timeout(client->assoc4.timestamp, BAD_NODE_TIMEOUT) ||
|
2494
|
+
!is_timeout(client->assoc6.timestamp, BAD_NODE_TIMEOUT))
|
2495
|
+
return 1;
|
2496
|
+
}
|
2497
|
+
|
2498
|
+
return 0;
|
2499
|
+
}
|
2500
|
+
|
2501
|
+
/* return 0 if we are not connected or only connected to lan peers with the DHT.
|
2502
|
+
* return 1 if we are.
|
2503
|
+
*/
|
2504
|
+
int DHT_non_lan_connected(const DHT *dht)
|
2505
|
+
{
|
2506
|
+
uint32_t i;
|
2507
|
+
unix_time_update();
|
2508
|
+
|
2509
|
+
for (i = 0; i < LCLIENT_LIST; ++i) {
|
2510
|
+
const Client_data *client = &dht->close_clientlist[i];
|
2511
|
+
|
2512
|
+
if (!is_timeout(client->assoc4.timestamp, BAD_NODE_TIMEOUT) && LAN_ip(client->assoc4.ip_port.ip) == -1)
|
2513
|
+
return 1;
|
2514
|
+
|
2515
|
+
if (!is_timeout(client->assoc6.timestamp, BAD_NODE_TIMEOUT) && LAN_ip(client->assoc6.ip_port.ip) == -1)
|
2516
|
+
return 1;
|
2517
|
+
|
2518
|
+
}
|
2519
|
+
|
2520
|
+
return 0;
|
2521
|
+
}
|