ffi-nats-core 0.3.0 → 0.3.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ffi-nats-core.gemspec +8 -0
- data/lib/ffi/nats/core/version.rb +1 -1
- data/vendor/cnats/CMakeLists.txt +137 -0
- data/vendor/cnats/adapters/libevent.h +220 -0
- data/vendor/cnats/adapters/libuv.h +472 -0
- data/vendor/cnats/examples/CMakeLists.txt +56 -0
- data/vendor/cnats/examples/asynctimeout.c +83 -0
- data/vendor/cnats/examples/examples.h +322 -0
- data/vendor/cnats/examples/libevent-pub.c +136 -0
- data/vendor/cnats/examples/libevent-sub.c +104 -0
- data/vendor/cnats/examples/libuv-pub.c +120 -0
- data/vendor/cnats/examples/libuv-sub.c +114 -0
- data/vendor/cnats/examples/publisher.c +62 -0
- data/vendor/cnats/examples/queuegroup.c +132 -0
- data/vendor/cnats/examples/replier.c +149 -0
- data/vendor/cnats/examples/requestor.c +75 -0
- data/vendor/cnats/examples/subscriber.c +133 -0
- data/vendor/cnats/src/CMakeLists.txt +31 -0
- data/vendor/cnats/src/asynccb.c +66 -0
- data/vendor/cnats/src/asynccb.h +42 -0
- data/vendor/cnats/src/buf.c +246 -0
- data/vendor/cnats/src/buf.h +116 -0
- data/vendor/cnats/src/comsock.c +474 -0
- data/vendor/cnats/src/comsock.h +81 -0
- data/vendor/cnats/src/conn.c +2725 -0
- data/vendor/cnats/src/conn.h +75 -0
- data/vendor/cnats/src/err.h +31 -0
- data/vendor/cnats/src/gc.h +27 -0
- data/vendor/cnats/src/hash.c +725 -0
- data/vendor/cnats/src/hash.h +141 -0
- data/vendor/cnats/src/include/n-unix.h +56 -0
- data/vendor/cnats/src/include/n-win.h +59 -0
- data/vendor/cnats/src/mem.h +20 -0
- data/vendor/cnats/src/msg.c +155 -0
- data/vendor/cnats/src/msg.h +43 -0
- data/vendor/cnats/src/nats.c +1734 -0
- data/vendor/cnats/src/nats.h +2024 -0
- data/vendor/cnats/src/natsp.h +518 -0
- data/vendor/cnats/src/natstime.c +79 -0
- data/vendor/cnats/src/natstime.h +27 -0
- data/vendor/cnats/src/nuid.c +265 -0
- data/vendor/cnats/src/nuid.h +21 -0
- data/vendor/cnats/src/opts.c +1030 -0
- data/vendor/cnats/src/opts.h +19 -0
- data/vendor/cnats/src/parser.c +869 -0
- data/vendor/cnats/src/parser.h +87 -0
- data/vendor/cnats/src/pub.c +293 -0
- data/vendor/cnats/src/srvpool.c +380 -0
- data/vendor/cnats/src/srvpool.h +71 -0
- data/vendor/cnats/src/stats.c +54 -0
- data/vendor/cnats/src/stats.h +21 -0
- data/vendor/cnats/src/status.c +60 -0
- data/vendor/cnats/src/status.h +95 -0
- data/vendor/cnats/src/sub.c +956 -0
- data/vendor/cnats/src/sub.h +34 -0
- data/vendor/cnats/src/timer.c +86 -0
- data/vendor/cnats/src/timer.h +57 -0
- data/vendor/cnats/src/unix/cond.c +103 -0
- data/vendor/cnats/src/unix/mutex.c +107 -0
- data/vendor/cnats/src/unix/sock.c +105 -0
- data/vendor/cnats/src/unix/thread.c +162 -0
- data/vendor/cnats/src/url.c +134 -0
- data/vendor/cnats/src/url.h +24 -0
- data/vendor/cnats/src/util.c +823 -0
- data/vendor/cnats/src/util.h +75 -0
- data/vendor/cnats/src/version.h +29 -0
- data/vendor/cnats/src/version.h.in +29 -0
- data/vendor/cnats/src/win/cond.c +86 -0
- data/vendor/cnats/src/win/mutex.c +54 -0
- data/vendor/cnats/src/win/sock.c +158 -0
- data/vendor/cnats/src/win/strings.c +108 -0
- data/vendor/cnats/src/win/thread.c +180 -0
- data/vendor/cnats/test/CMakeLists.txt +35 -0
- data/vendor/cnats/test/certs/ca.pem +38 -0
- data/vendor/cnats/test/certs/client-cert.pem +30 -0
- data/vendor/cnats/test/certs/client-key.pem +51 -0
- data/vendor/cnats/test/certs/server-cert.pem +31 -0
- data/vendor/cnats/test/certs/server-key.pem +51 -0
- data/vendor/cnats/test/dylib/CMakeLists.txt +10 -0
- data/vendor/cnats/test/dylib/nonats.c +13 -0
- data/vendor/cnats/test/list.txt +125 -0
- data/vendor/cnats/test/test.c +11655 -0
- data/vendor/cnats/test/tls.conf +15 -0
- data/vendor/cnats/test/tlsverify.conf +19 -0
- metadata +83 -1
@@ -0,0 +1,81 @@
|
|
1
|
+
// Copyright 2015 Apcera Inc. All rights reserved.
|
2
|
+
|
3
|
+
#ifndef SOCK_H_
|
4
|
+
#define SOCK_H_
|
5
|
+
|
6
|
+
#include "natsp.h"
|
7
|
+
|
8
|
+
natsStatus
|
9
|
+
natsSock_Init(natsSockCtx *ctx);
|
10
|
+
|
11
|
+
void
|
12
|
+
natsSock_Clear(natsSockCtx *ctx);
|
13
|
+
|
14
|
+
natsStatus
|
15
|
+
natsSock_WaitReady(int waitMode, natsSockCtx *ctx);
|
16
|
+
|
17
|
+
natsStatus
|
18
|
+
natsSock_ConnectTcp(natsSockCtx *ctx, const char *host, int port);
|
19
|
+
|
20
|
+
natsStatus
|
21
|
+
natsSock_SetBlocking(natsSock fd, bool blocking);
|
22
|
+
|
23
|
+
natsStatus
|
24
|
+
natsSock_CreateFDSet(fd_set **newFDSet);
|
25
|
+
|
26
|
+
void
|
27
|
+
natsSock_DestroyFDSet(fd_set *fdSet);
|
28
|
+
|
29
|
+
bool
|
30
|
+
natsSock_IsConnected(natsSock fd);
|
31
|
+
|
32
|
+
// Reads a line from the socket and returns it without the line-ending characters.
|
33
|
+
// This call blocks until the line is complete, or the socket is closed or an
|
34
|
+
// error occurs.
|
35
|
+
// Handles blocking and non-blocking sockets. For the later, an optional 'deadline'
|
36
|
+
// indicates how long it can wait for the full read to complete.
|
37
|
+
//
|
38
|
+
// NOTE: 'buffer[0]' must be set to '\0' prior to the very first call. If the
|
39
|
+
// peer is sending multiple lines, it is possible that this function reads the
|
40
|
+
// next line(s) (or partials) in a single call. In this case, the caller needs
|
41
|
+
// to repeat the call with the same buffer to "read" the next line.
|
42
|
+
natsStatus
|
43
|
+
natsSock_ReadLine(natsSockCtx *ctx, char *buffer, size_t maxBufferSize);
|
44
|
+
|
45
|
+
// Reads up to 'maxBufferSize' bytes from the socket and put them in 'buffer'.
|
46
|
+
// If the socket is blocking, wait until some data is available or the socket
|
47
|
+
// is closed or an error occurs.
|
48
|
+
// If the socket is non-blocking, wait up to the optional deadline (set in
|
49
|
+
// the context). If NULL, behaves like a blocking socket.
|
50
|
+
// If an external event loop is used, it is possible that this function
|
51
|
+
// returns NATS_OK with 'n' == 0.
|
52
|
+
natsStatus
|
53
|
+
natsSock_Read(natsSockCtx *ctx, char *buffer, size_t maxBufferSize, int *n);
|
54
|
+
|
55
|
+
// Writes up to 'len' bytes to the socket. If the socket is blocking,
|
56
|
+
// wait for some data to be sent. If the socket is non-blocking, wait up
|
57
|
+
// to the optional deadline (set in ctx).
|
58
|
+
// If an external event loop is used, it is possible that this function
|
59
|
+
// returns NATS_OK with 'n' == 0.
|
60
|
+
natsStatus
|
61
|
+
natsSock_Write(natsSockCtx *ctx, const char *data, int len, int *n);
|
62
|
+
|
63
|
+
// Writes 'len' bytes to the socket. Does not return until all bytes
|
64
|
+
// have been written, unless the socket is closed or an error occurs.
|
65
|
+
natsStatus
|
66
|
+
natsSock_WriteFully(natsSockCtx *ctx, const char *data, int len);
|
67
|
+
|
68
|
+
natsStatus
|
69
|
+
natsSock_Flush(natsSock fd);
|
70
|
+
|
71
|
+
void
|
72
|
+
natsSock_Close(natsSock fd);
|
73
|
+
|
74
|
+
natsStatus
|
75
|
+
natsSock_SetCommonTcpOptions(natsSock fd);
|
76
|
+
|
77
|
+
void
|
78
|
+
natsSock_Shutdown(natsSock fd);
|
79
|
+
|
80
|
+
|
81
|
+
#endif /* SOCK_H_ */
|
@@ -0,0 +1,2725 @@
|
|
1
|
+
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
2
|
+
|
3
|
+
#include "natsp.h"
|
4
|
+
|
5
|
+
#include <assert.h>
|
6
|
+
#include <stdio.h>
|
7
|
+
#include <string.h>
|
8
|
+
#include <errno.h>
|
9
|
+
|
10
|
+
#include "conn.h"
|
11
|
+
#include "mem.h"
|
12
|
+
#include "buf.h"
|
13
|
+
#include "parser.h"
|
14
|
+
#include "srvpool.h"
|
15
|
+
#include "url.h"
|
16
|
+
#include "opts.h"
|
17
|
+
#include "util.h"
|
18
|
+
#include "timer.h"
|
19
|
+
#include "sub.h"
|
20
|
+
#include "msg.h"
|
21
|
+
#include "asynccb.h"
|
22
|
+
#include "comsock.h"
|
23
|
+
|
24
|
+
#define DEFAULT_SCRATCH_SIZE (512)
|
25
|
+
#define DEFAULT_BUF_SIZE (32768)
|
26
|
+
#define DEFAULT_PENDING_SIZE (1024 * 1024)
|
27
|
+
|
28
|
+
#define NATS_EVENT_ACTION_ADD (true)
|
29
|
+
#define NATS_EVENT_ACTION_REMOVE (false)
|
30
|
+
|
31
|
+
#ifdef DEV_MODE
|
32
|
+
// For type safety
|
33
|
+
|
34
|
+
static void _retain(natsConnection *nc) { nc->refs++; }
|
35
|
+
static void _release(natsConnection *nc) { nc->refs--; }
|
36
|
+
|
37
|
+
void natsConn_Lock(natsConnection *nc) { natsMutex_Lock(nc->mu); }
|
38
|
+
void natsConn_Unlock(natsConnection *nc) { natsMutex_Unlock(nc->mu); }
|
39
|
+
|
40
|
+
#else
|
41
|
+
// We know what we are doing :-)
|
42
|
+
|
43
|
+
#define _retain(c) ((c)->refs++)
|
44
|
+
#define _release(c) ((c)->refs--)
|
45
|
+
|
46
|
+
#endif // DEV_MODE
|
47
|
+
|
48
|
+
|
49
|
+
// CLIENT_PROTO_ZERO is the original client protocol from 2009.
|
50
|
+
// http://nats.io/documentation/internals/nats-protocol/
|
51
|
+
#define CLIENT_PROTO_ZERO (0)
|
52
|
+
|
53
|
+
// CLIENT_PROTO_INFO signals a client can receive more then the original INFO block.
|
54
|
+
// This can be used to update clients on other cluster members, etc.
|
55
|
+
#define CLIENT_PROTO_INFO (1)
|
56
|
+
|
57
|
+
/*
|
58
|
+
* Forward declarations:
|
59
|
+
*/
|
60
|
+
static natsStatus
|
61
|
+
_spinUpSocketWatchers(natsConnection *nc);
|
62
|
+
|
63
|
+
static natsStatus
|
64
|
+
_processConnInit(natsConnection *nc);
|
65
|
+
|
66
|
+
static void
|
67
|
+
_close(natsConnection *nc, natsConnStatus status, bool doCBs);
|
68
|
+
|
69
|
+
/*
|
70
|
+
* ----------------------------------------
|
71
|
+
*/
|
72
|
+
|
73
|
+
struct threadsToJoin
|
74
|
+
{
|
75
|
+
natsThread *readLoop;
|
76
|
+
natsThread *flusher;
|
77
|
+
natsThread *reconnect;
|
78
|
+
bool joinReconnect;
|
79
|
+
|
80
|
+
} threadsToJoin;
|
81
|
+
|
82
|
+
static void
|
83
|
+
_initThreadsToJoin(struct threadsToJoin *ttj, natsConnection *nc, bool joinReconnect)
|
84
|
+
{
|
85
|
+
memset(ttj, 0, sizeof(threadsToJoin));
|
86
|
+
|
87
|
+
ttj->joinReconnect = joinReconnect;
|
88
|
+
|
89
|
+
if (nc->readLoopThread != NULL)
|
90
|
+
{
|
91
|
+
ttj->readLoop = nc->readLoopThread;
|
92
|
+
nc->readLoopThread = NULL;
|
93
|
+
}
|
94
|
+
|
95
|
+
if (joinReconnect && (nc->reconnectThread != NULL))
|
96
|
+
{
|
97
|
+
ttj->reconnect = nc->reconnectThread;
|
98
|
+
nc->reconnectThread = NULL;
|
99
|
+
}
|
100
|
+
|
101
|
+
if (nc->flusherThread != NULL)
|
102
|
+
{
|
103
|
+
nc->flusherStop = true;
|
104
|
+
natsCondition_Signal(nc->flusherCond);
|
105
|
+
|
106
|
+
ttj->flusher = nc->flusherThread;
|
107
|
+
nc->flusherThread = NULL;
|
108
|
+
}
|
109
|
+
}
|
110
|
+
|
111
|
+
static void
|
112
|
+
_joinThreads(struct threadsToJoin *ttj)
|
113
|
+
{
|
114
|
+
if (ttj->readLoop != NULL)
|
115
|
+
{
|
116
|
+
natsThread_Join(ttj->readLoop);
|
117
|
+
natsThread_Destroy(ttj->readLoop);
|
118
|
+
}
|
119
|
+
|
120
|
+
if (ttj->joinReconnect && (ttj->reconnect != NULL))
|
121
|
+
{
|
122
|
+
natsThread_Join(ttj->reconnect);
|
123
|
+
natsThread_Destroy(ttj->reconnect);
|
124
|
+
}
|
125
|
+
|
126
|
+
if (ttj->flusher != NULL)
|
127
|
+
{
|
128
|
+
natsThread_Join(ttj->flusher);
|
129
|
+
natsThread_Destroy(ttj->flusher);
|
130
|
+
}
|
131
|
+
}
|
132
|
+
|
133
|
+
static void
|
134
|
+
_clearServerInfo(natsServerInfo *si)
|
135
|
+
{
|
136
|
+
int i;
|
137
|
+
|
138
|
+
NATS_FREE(si->id);
|
139
|
+
NATS_FREE(si->host);
|
140
|
+
NATS_FREE(si->version);
|
141
|
+
|
142
|
+
for (i=0; i<si->connectURLsCount; i++)
|
143
|
+
NATS_FREE(si->connectURLs[i]);
|
144
|
+
NATS_FREE(si->connectURLs);
|
145
|
+
|
146
|
+
memset(si, 0, sizeof(natsServerInfo));
|
147
|
+
}
|
148
|
+
|
149
|
+
static void
|
150
|
+
_freeConn(natsConnection *nc)
|
151
|
+
{
|
152
|
+
if (nc == NULL)
|
153
|
+
return;
|
154
|
+
|
155
|
+
natsTimer_Destroy(nc->ptmr);
|
156
|
+
natsBuf_Destroy(nc->pending);
|
157
|
+
natsBuf_Destroy(nc->scratch);
|
158
|
+
natsBuf_Destroy(nc->bw);
|
159
|
+
natsSrvPool_Destroy(nc->srvPool);
|
160
|
+
_clearServerInfo(&(nc->info));
|
161
|
+
natsCondition_Destroy(nc->flusherCond);
|
162
|
+
natsCondition_Destroy(nc->pongs.cond);
|
163
|
+
natsParser_Destroy(nc->ps);
|
164
|
+
natsThread_Destroy(nc->readLoopThread);
|
165
|
+
natsThread_Destroy(nc->flusherThread);
|
166
|
+
natsHash_Destroy(nc->subs);
|
167
|
+
natsOptions_Destroy(nc->opts);
|
168
|
+
natsSock_Clear(&nc->sockCtx);
|
169
|
+
if (nc->sockCtx.ssl != NULL)
|
170
|
+
SSL_free(nc->sockCtx.ssl);
|
171
|
+
NATS_FREE(nc->el.buffer);
|
172
|
+
natsMutex_Destroy(nc->mu);
|
173
|
+
|
174
|
+
NATS_FREE(nc);
|
175
|
+
|
176
|
+
natsLib_Release();
|
177
|
+
}
|
178
|
+
|
179
|
+
void
|
180
|
+
natsConn_retain(natsConnection *nc)
|
181
|
+
{
|
182
|
+
if (nc == NULL)
|
183
|
+
return;
|
184
|
+
|
185
|
+
natsConn_Lock(nc);
|
186
|
+
|
187
|
+
nc->refs++;
|
188
|
+
|
189
|
+
natsConn_Unlock(nc);
|
190
|
+
}
|
191
|
+
|
192
|
+
void
|
193
|
+
natsConn_release(natsConnection *nc)
|
194
|
+
{
|
195
|
+
int refs = 0;
|
196
|
+
|
197
|
+
if (nc == NULL)
|
198
|
+
return;
|
199
|
+
|
200
|
+
natsConn_Lock(nc);
|
201
|
+
|
202
|
+
refs = --(nc->refs);
|
203
|
+
|
204
|
+
natsConn_Unlock(nc);
|
205
|
+
|
206
|
+
if (refs == 0)
|
207
|
+
_freeConn(nc);
|
208
|
+
}
|
209
|
+
|
210
|
+
void
|
211
|
+
natsConn_lockAndRetain(natsConnection *nc)
|
212
|
+
{
|
213
|
+
natsConn_Lock(nc);
|
214
|
+
nc->refs++;
|
215
|
+
}
|
216
|
+
|
217
|
+
void
|
218
|
+
natsConn_unlockAndRelease(natsConnection *nc)
|
219
|
+
{
|
220
|
+
int refs = 0;
|
221
|
+
|
222
|
+
refs = --(nc->refs);
|
223
|
+
|
224
|
+
natsConn_Unlock(nc);
|
225
|
+
|
226
|
+
if (refs == 0)
|
227
|
+
_freeConn(nc);
|
228
|
+
}
|
229
|
+
|
230
|
+
natsStatus
|
231
|
+
natsConn_bufferFlush(natsConnection *nc)
|
232
|
+
{
|
233
|
+
natsStatus s = NATS_OK;
|
234
|
+
int bufLen = natsBuf_Len(nc->bw);
|
235
|
+
|
236
|
+
if (bufLen == 0)
|
237
|
+
return NATS_OK;
|
238
|
+
|
239
|
+
if (nc->usePending)
|
240
|
+
{
|
241
|
+
s = natsBuf_Append(nc->pending, natsBuf_Data(nc->bw), bufLen);
|
242
|
+
}
|
243
|
+
else if (nc->sockCtx.useEventLoop)
|
244
|
+
{
|
245
|
+
if (!(nc->el.writeAdded))
|
246
|
+
{
|
247
|
+
nc->el.writeAdded = true;
|
248
|
+
s = nc->opts->evCbs.write(nc->el.data, NATS_EVENT_ACTION_ADD);
|
249
|
+
if (s != NATS_OK)
|
250
|
+
nats_setError(s, "Error processing write request: %d - %s",
|
251
|
+
s, natsStatus_GetText(s));
|
252
|
+
}
|
253
|
+
|
254
|
+
return NATS_UPDATE_ERR_STACK(s);
|
255
|
+
}
|
256
|
+
else
|
257
|
+
{
|
258
|
+
s = natsSock_WriteFully(&(nc->sockCtx), natsBuf_Data(nc->bw), bufLen);
|
259
|
+
}
|
260
|
+
|
261
|
+
if (s == NATS_OK)
|
262
|
+
natsBuf_Reset(nc->bw);
|
263
|
+
|
264
|
+
return NATS_UPDATE_ERR_STACK(s);
|
265
|
+
}
|
266
|
+
|
267
|
+
natsStatus
|
268
|
+
natsConn_bufferWrite(natsConnection *nc, const char *buffer, int len)
|
269
|
+
{
|
270
|
+
natsStatus s = NATS_OK;
|
271
|
+
int offset = 0;
|
272
|
+
int avail = 0;
|
273
|
+
|
274
|
+
if (len <= 0)
|
275
|
+
return NATS_OK;
|
276
|
+
|
277
|
+
if (nc->usePending)
|
278
|
+
return natsBuf_Append(nc->pending, buffer, len);
|
279
|
+
|
280
|
+
if (nc->sockCtx.useEventLoop)
|
281
|
+
{
|
282
|
+
s = natsBuf_Append(nc->bw, buffer, len);
|
283
|
+
if ((s == NATS_OK)
|
284
|
+
&& (natsBuf_Len(nc->bw) >= DEFAULT_BUF_SIZE)
|
285
|
+
&& !(nc->el.writeAdded))
|
286
|
+
{
|
287
|
+
nc->el.writeAdded = true;
|
288
|
+
s = nc->opts->evCbs.write(nc->el.data, NATS_EVENT_ACTION_ADD);
|
289
|
+
if (s != NATS_OK)
|
290
|
+
nats_setError(s, "Error processing write request: %d - %s",
|
291
|
+
s, natsStatus_GetText(s));
|
292
|
+
}
|
293
|
+
|
294
|
+
return NATS_UPDATE_ERR_STACK(s);
|
295
|
+
}
|
296
|
+
|
297
|
+
// If we have more data that can fit..
|
298
|
+
while ((s == NATS_OK) && (len > natsBuf_Available(nc->bw)))
|
299
|
+
{
|
300
|
+
// If there is nothing in the buffer...
|
301
|
+
if (natsBuf_Len(nc->bw) == 0)
|
302
|
+
{
|
303
|
+
// Do a single socket write to avoid a copy
|
304
|
+
s = natsSock_WriteFully(&(nc->sockCtx), buffer + offset, len);
|
305
|
+
|
306
|
+
// We are done
|
307
|
+
return NATS_UPDATE_ERR_STACK(s);
|
308
|
+
}
|
309
|
+
|
310
|
+
// We already have data in the buffer, check how many more bytes
|
311
|
+
// can we fit
|
312
|
+
avail = natsBuf_Available(nc->bw);
|
313
|
+
|
314
|
+
// Append that much bytes
|
315
|
+
s = natsBuf_Append(nc->bw, buffer + offset, avail);
|
316
|
+
|
317
|
+
// Flush the buffer
|
318
|
+
if (s == NATS_OK)
|
319
|
+
s = natsConn_bufferFlush(nc);
|
320
|
+
|
321
|
+
// If success, then decrement what's left to send and update the
|
322
|
+
// offset.
|
323
|
+
if (s == NATS_OK)
|
324
|
+
{
|
325
|
+
len -= avail;
|
326
|
+
offset += avail;
|
327
|
+
}
|
328
|
+
}
|
329
|
+
|
330
|
+
// If there is data left, the buffer can now hold this data.
|
331
|
+
if ((s == NATS_OK) && (len > 0))
|
332
|
+
s = natsBuf_Append(nc->bw, buffer + offset, len);
|
333
|
+
|
334
|
+
return NATS_UPDATE_ERR_STACK(s);
|
335
|
+
}
|
336
|
+
|
337
|
+
natsStatus
|
338
|
+
natsConn_bufferWriteString(natsConnection *nc, const char *string)
|
339
|
+
{
|
340
|
+
natsStatus s = natsConn_bufferWrite(nc, string, (int) strlen(string));
|
341
|
+
|
342
|
+
return NATS_UPDATE_ERR_STACK(s);
|
343
|
+
}
|
344
|
+
|
345
|
+
// _createConn will connect to the server and do the right thing when an
|
346
|
+
// existing connection is in place.
|
347
|
+
static natsStatus
|
348
|
+
_createConn(natsConnection *nc)
|
349
|
+
{
|
350
|
+
natsStatus s = NATS_OK;
|
351
|
+
natsSrv *cur = NULL;
|
352
|
+
|
353
|
+
cur = natsSrvPool_GetCurrentServer(nc->srvPool, nc->url, NULL);
|
354
|
+
if (cur == NULL)
|
355
|
+
return nats_setDefaultError(NATS_NO_SERVER);
|
356
|
+
|
357
|
+
cur->lastAttempt = nats_Now();
|
358
|
+
|
359
|
+
// Sets a deadline for the connect process (not just the low level
|
360
|
+
// tcp connect. The deadline will be removed when we have received
|
361
|
+
// the PONG to our initial PING. See _processConnInit().
|
362
|
+
natsDeadline_Init(&(nc->sockCtx.deadline), nc->opts->timeout);
|
363
|
+
|
364
|
+
// Set the IP resolution order
|
365
|
+
nc->sockCtx.orderIP = nc->opts->orderIP;
|
366
|
+
|
367
|
+
s = natsSock_ConnectTcp(&(nc->sockCtx), nc->url->host, nc->url->port);
|
368
|
+
if (s == NATS_OK)
|
369
|
+
{
|
370
|
+
nc->sockCtx.fdActive = true;
|
371
|
+
|
372
|
+
if ((nc->pending != NULL) && (nc->bw != NULL)
|
373
|
+
&& (natsBuf_Len(nc->bw) > 0))
|
374
|
+
{
|
375
|
+
// Move to pending buffer
|
376
|
+
s = natsConn_bufferWrite(nc, natsBuf_Data(nc->bw),
|
377
|
+
natsBuf_Len(nc->bw));
|
378
|
+
}
|
379
|
+
}
|
380
|
+
|
381
|
+
if (s == NATS_OK)
|
382
|
+
{
|
383
|
+
if (nc->bw == NULL)
|
384
|
+
s = natsBuf_Create(&(nc->bw), DEFAULT_BUF_SIZE);
|
385
|
+
else
|
386
|
+
natsBuf_Reset(nc->bw);
|
387
|
+
}
|
388
|
+
|
389
|
+
if (s != NATS_OK)
|
390
|
+
{
|
391
|
+
// reset the deadline
|
392
|
+
natsDeadline_Clear(&(nc->sockCtx.deadline));
|
393
|
+
}
|
394
|
+
|
395
|
+
return NATS_UPDATE_ERR_STACK(s);
|
396
|
+
}
|
397
|
+
|
398
|
+
static void
|
399
|
+
_clearControlContent(natsControl *control)
|
400
|
+
{
|
401
|
+
NATS_FREE(control->op);
|
402
|
+
NATS_FREE(control->args);
|
403
|
+
}
|
404
|
+
|
405
|
+
static void
|
406
|
+
_initControlContent(natsControl *control)
|
407
|
+
{
|
408
|
+
control->op = NULL;
|
409
|
+
control->args = NULL;
|
410
|
+
}
|
411
|
+
|
412
|
+
static bool
|
413
|
+
_isConnecting(natsConnection *nc)
|
414
|
+
{
|
415
|
+
return nc->status == CONNECTING;
|
416
|
+
}
|
417
|
+
|
418
|
+
bool
|
419
|
+
natsConn_isClosed(natsConnection *nc)
|
420
|
+
{
|
421
|
+
return nc->status == CLOSED;
|
422
|
+
}
|
423
|
+
|
424
|
+
bool
|
425
|
+
natsConn_isReconnecting(natsConnection *nc)
|
426
|
+
{
|
427
|
+
return (nc->status == RECONNECTING);
|
428
|
+
}
|
429
|
+
|
430
|
+
static natsStatus
|
431
|
+
_readOp(natsConnection *nc, natsControl *control)
|
432
|
+
{
|
433
|
+
natsStatus s = NATS_OK;
|
434
|
+
char buffer[DEFAULT_BUF_SIZE];
|
435
|
+
|
436
|
+
buffer[0] = '\0';
|
437
|
+
|
438
|
+
s = natsSock_ReadLine(&(nc->sockCtx), buffer, sizeof(buffer));
|
439
|
+
if (s == NATS_OK)
|
440
|
+
s = nats_ParseControl(control, buffer);
|
441
|
+
|
442
|
+
return NATS_UPDATE_ERR_STACK(s);
|
443
|
+
}
|
444
|
+
|
445
|
+
// _processInfo is used to parse the info messages sent
|
446
|
+
// from the server.
|
447
|
+
// This function may update the server pool.
|
448
|
+
static natsStatus
|
449
|
+
_processInfo(natsConnection *nc, char *info, int len)
|
450
|
+
{
|
451
|
+
natsStatus s = NATS_OK;
|
452
|
+
nats_JSON *json = NULL;
|
453
|
+
|
454
|
+
if (info == NULL)
|
455
|
+
return NATS_OK;
|
456
|
+
|
457
|
+
_clearServerInfo(&(nc->info));
|
458
|
+
|
459
|
+
s = nats_JSONParse(&json, info, len);
|
460
|
+
if (s != NATS_OK)
|
461
|
+
return NATS_UPDATE_ERR_STACK(s);
|
462
|
+
|
463
|
+
if (s == NATS_OK)
|
464
|
+
s = nats_JSONGetValue(json, "server_id", TYPE_STR,
|
465
|
+
(void**) &(nc->info.id));
|
466
|
+
if (s == NATS_OK)
|
467
|
+
s = nats_JSONGetValue(json, "version", TYPE_STR,
|
468
|
+
(void**) &(nc->info.version));
|
469
|
+
if (s == NATS_OK)
|
470
|
+
s = nats_JSONGetValue(json, "host", TYPE_STR,
|
471
|
+
(void**) &(nc->info.host));
|
472
|
+
if (s == NATS_OK)
|
473
|
+
s = nats_JSONGetValue(json, "port", TYPE_INT,
|
474
|
+
(void**) &(nc->info.port));
|
475
|
+
if (s == NATS_OK)
|
476
|
+
s = nats_JSONGetValue(json, "auth_required", TYPE_BOOL,
|
477
|
+
(void**) &(nc->info.authRequired));
|
478
|
+
if (s == NATS_OK)
|
479
|
+
s = nats_JSONGetValue(json, "tls_required", TYPE_BOOL,
|
480
|
+
(void**) &(nc->info.tlsRequired));
|
481
|
+
if (s == NATS_OK)
|
482
|
+
s = nats_JSONGetValue(json, "max_payload", TYPE_LONG,
|
483
|
+
(void**) &(nc->info.maxPayload));
|
484
|
+
if (s == NATS_OK)
|
485
|
+
s = nats_JSONGetArrayValue(json, "connect_urls", TYPE_STR,
|
486
|
+
(void***) &(nc->info.connectURLs),
|
487
|
+
&(nc->info.connectURLsCount));
|
488
|
+
|
489
|
+
#if 0
|
490
|
+
fprintf(stderr, "Id=%s Version=%s Host=%s Port=%d Auth=%s SSL=%s Payload=%d\n",
|
491
|
+
nc->info.id, nc->info.version, nc->info.host, nc->info.port,
|
492
|
+
nats_GetBoolStr(nc->info.authRequired),
|
493
|
+
nats_GetBoolStr(nc->info.tlsRequired),
|
494
|
+
(int) nc->info.maxPayload);
|
495
|
+
#endif
|
496
|
+
|
497
|
+
if (s == NATS_OK)
|
498
|
+
s = natsSrvPool_addNewURLs(nc->srvPool,
|
499
|
+
nc->info.connectURLs,
|
500
|
+
nc->info.connectURLsCount,
|
501
|
+
!nc->opts->noRandomize);
|
502
|
+
|
503
|
+
if (s != NATS_OK)
|
504
|
+
s = nats_setError(NATS_PROTOCOL_ERROR,
|
505
|
+
"Invalid protocol: %s", nats_GetLastError(NULL));
|
506
|
+
|
507
|
+
nats_JSONDestroy(json);
|
508
|
+
|
509
|
+
return NATS_UPDATE_ERR_STACK(s);
|
510
|
+
}
|
511
|
+
|
512
|
+
// natsConn_processAsyncINFO does the same than processInfo, but is called
|
513
|
+
// from the parser. Calls processInfo under connection's lock
|
514
|
+
// protection.
|
515
|
+
void
|
516
|
+
natsConn_processAsyncINFO(natsConnection *nc, char *buf, int len)
|
517
|
+
{
|
518
|
+
natsConn_Lock(nc);
|
519
|
+
// Ignore errors, we will simply not update the server pool...
|
520
|
+
(void) _processInfo(nc, buf, len);
|
521
|
+
natsConn_Unlock(nc);
|
522
|
+
}
|
523
|
+
|
524
|
+
// makeTLSConn will wrap an existing Conn using TLS
|
525
|
+
static natsStatus
|
526
|
+
_makeTLSConn(natsConnection *nc)
|
527
|
+
{
|
528
|
+
#if defined(NATS_HAS_TLS)
|
529
|
+
natsStatus s = NATS_OK;
|
530
|
+
SSL *ssl = NULL;
|
531
|
+
|
532
|
+
// Reset nc->errStr before initiating the handshake...
|
533
|
+
nc->errStr[0] = '\0';
|
534
|
+
|
535
|
+
natsMutex_Lock(nc->opts->sslCtx->lock);
|
536
|
+
|
537
|
+
s = natsSock_SetBlocking(nc->sockCtx.fd, true);
|
538
|
+
if (s == NATS_OK)
|
539
|
+
{
|
540
|
+
ssl = SSL_new(nc->opts->sslCtx->ctx);
|
541
|
+
if (ssl == NULL)
|
542
|
+
{
|
543
|
+
s = nats_setError(NATS_SSL_ERROR,
|
544
|
+
"Error creating SSL object: %s",
|
545
|
+
NATS_SSL_ERR_REASON_STRING);
|
546
|
+
}
|
547
|
+
else
|
548
|
+
{
|
549
|
+
nats_sslRegisterThreadForCleanup();
|
550
|
+
|
551
|
+
SSL_set_ex_data(ssl, 0, (void*) nc);
|
552
|
+
}
|
553
|
+
}
|
554
|
+
if (s == NATS_OK)
|
555
|
+
{
|
556
|
+
SSL_set_connect_state(ssl);
|
557
|
+
|
558
|
+
if (SSL_set_fd(ssl, (int) nc->sockCtx.fd) != 1)
|
559
|
+
{
|
560
|
+
s = nats_setError(NATS_SSL_ERROR,
|
561
|
+
"Error connecting the SSL object to a file descriptor : %s",
|
562
|
+
NATS_SSL_ERR_REASON_STRING);
|
563
|
+
}
|
564
|
+
}
|
565
|
+
if (s == NATS_OK)
|
566
|
+
{
|
567
|
+
if (SSL_do_handshake(ssl) != 1)
|
568
|
+
{
|
569
|
+
s = nats_setError(NATS_SSL_ERROR,
|
570
|
+
"SSL handshake error: %s",
|
571
|
+
NATS_SSL_ERR_REASON_STRING);
|
572
|
+
}
|
573
|
+
}
|
574
|
+
if ((s == NATS_OK) && !nc->opts->sslCtx->skipVerify)
|
575
|
+
{
|
576
|
+
X509 *cert = SSL_get_peer_certificate(ssl);
|
577
|
+
|
578
|
+
if (cert != NULL)
|
579
|
+
{
|
580
|
+
if ((SSL_get_verify_result(ssl) != X509_V_OK)
|
581
|
+
|| (nc->errStr[0] != '\0'))
|
582
|
+
{
|
583
|
+
s = nats_setError(NATS_SSL_ERROR,
|
584
|
+
"Server certificate verification failed: %s",
|
585
|
+
nc->errStr);
|
586
|
+
}
|
587
|
+
X509_free(cert);
|
588
|
+
}
|
589
|
+
else
|
590
|
+
{
|
591
|
+
s = nats_setError(NATS_SSL_ERROR, "%s",
|
592
|
+
"Server did not provide a certificate");
|
593
|
+
}
|
594
|
+
}
|
595
|
+
|
596
|
+
if (s == NATS_OK)
|
597
|
+
s = natsSock_SetBlocking(nc->sockCtx.fd, false);
|
598
|
+
|
599
|
+
natsMutex_Unlock(nc->opts->sslCtx->lock);
|
600
|
+
|
601
|
+
if (s != NATS_OK)
|
602
|
+
{
|
603
|
+
if (ssl != NULL)
|
604
|
+
SSL_free(ssl);
|
605
|
+
}
|
606
|
+
else
|
607
|
+
{
|
608
|
+
nc->sockCtx.ssl = ssl;
|
609
|
+
}
|
610
|
+
|
611
|
+
return NATS_UPDATE_ERR_STACK(s);
|
612
|
+
#else
|
613
|
+
return nats_setError(NATS_ILLEGAL_STATE, "%s", NO_SSL_ERR);
|
614
|
+
#endif
|
615
|
+
}
|
616
|
+
|
617
|
+
// This will check to see if the connection should be
|
618
|
+
// secure. This can be dictated from either end and should
|
619
|
+
// only be called after the INIT protocol has been received.
|
620
|
+
static natsStatus
|
621
|
+
_checkForSecure(natsConnection *nc)
|
622
|
+
{
|
623
|
+
natsStatus s = NATS_OK;
|
624
|
+
|
625
|
+
// Check for mismatch in setups
|
626
|
+
if (nc->opts->secure && !nc->info.tlsRequired)
|
627
|
+
s = nats_setDefaultError(NATS_SECURE_CONNECTION_WANTED);
|
628
|
+
else if (nc->info.tlsRequired && !nc->opts->secure)
|
629
|
+
s = nats_setDefaultError(NATS_SECURE_CONNECTION_REQUIRED);
|
630
|
+
|
631
|
+
if ((s == NATS_OK) && nc->opts->secure)
|
632
|
+
s = _makeTLSConn(nc);
|
633
|
+
|
634
|
+
return NATS_UPDATE_ERR_STACK(s);
|
635
|
+
}
|
636
|
+
|
637
|
+
static natsStatus
|
638
|
+
_processExpectedInfo(natsConnection *nc)
|
639
|
+
{
|
640
|
+
natsControl control;
|
641
|
+
natsStatus s;
|
642
|
+
|
643
|
+
_initControlContent(&control);
|
644
|
+
|
645
|
+
s = _readOp(nc, &control);
|
646
|
+
if (s != NATS_OK)
|
647
|
+
return NATS_UPDATE_ERR_STACK(s);
|
648
|
+
|
649
|
+
if ((s == NATS_OK)
|
650
|
+
&& ((control.op == NULL)
|
651
|
+
|| (strcmp(control.op, _INFO_OP_) != 0)))
|
652
|
+
{
|
653
|
+
s = nats_setError(NATS_PROTOCOL_ERROR,
|
654
|
+
"Unexpected protocol: got '%s' instead of '%s'",
|
655
|
+
(control.op == NULL ? "<null>" : control.op),
|
656
|
+
_INFO_OP_);
|
657
|
+
}
|
658
|
+
if (s == NATS_OK)
|
659
|
+
s = _processInfo(nc, control.args, -1);
|
660
|
+
if (s == NATS_OK)
|
661
|
+
s = _checkForSecure(nc);
|
662
|
+
|
663
|
+
_clearControlContent(&control);
|
664
|
+
|
665
|
+
return NATS_UPDATE_ERR_STACK(s);
|
666
|
+
}
|
667
|
+
|
668
|
+
static natsStatus
|
669
|
+
_connectProto(natsConnection *nc, char **proto)
|
670
|
+
{
|
671
|
+
natsOptions *opts = nc->opts;
|
672
|
+
const char *token= NULL;
|
673
|
+
const char *user = NULL;
|
674
|
+
const char *pwd = NULL;
|
675
|
+
const char *name = NULL;
|
676
|
+
int res;
|
677
|
+
|
678
|
+
if (nc->url->username != NULL)
|
679
|
+
user = nc->url->username;
|
680
|
+
if (nc->url->password != NULL)
|
681
|
+
pwd = nc->url->password;
|
682
|
+
if ((user != NULL) && (pwd == NULL))
|
683
|
+
{
|
684
|
+
token = user;
|
685
|
+
user = NULL;
|
686
|
+
}
|
687
|
+
if ((user == NULL) && (token == NULL))
|
688
|
+
{
|
689
|
+
// Take from options (possibly all NULL)
|
690
|
+
user = nc->opts->user;
|
691
|
+
pwd = nc->opts->password;
|
692
|
+
token = nc->opts->token;
|
693
|
+
}
|
694
|
+
if (opts->name != NULL)
|
695
|
+
name = opts->name;
|
696
|
+
|
697
|
+
res = nats_asprintf(proto,
|
698
|
+
"CONNECT {\"verbose\":%s,\"pedantic\":%s,%s%s%s%s%s%s%s%s%s\"tls_required\":%s," \
|
699
|
+
"\"name\":\"%s\",\"lang\":\"%s\",\"version\":\"%s\",\"protocol\":%d}%s",
|
700
|
+
nats_GetBoolStr(opts->verbose),
|
701
|
+
nats_GetBoolStr(opts->pedantic),
|
702
|
+
(user != NULL ? "\"user\":\"" : ""),
|
703
|
+
(user != NULL ? user : ""),
|
704
|
+
(user != NULL ? "\"," : ""),
|
705
|
+
(pwd != NULL ? "\"pass\":\"" : ""),
|
706
|
+
(pwd != NULL ? pwd : ""),
|
707
|
+
(pwd != NULL ? "\"," : ""),
|
708
|
+
(token != NULL ? "\"auth_token\":\"" :""),
|
709
|
+
(token != NULL ? token : ""),
|
710
|
+
(token != NULL ? "\"," : ""),
|
711
|
+
nats_GetBoolStr(opts->secure),
|
712
|
+
(name != NULL ? name : ""),
|
713
|
+
CString, NATS_VERSION_STRING,
|
714
|
+
CLIENT_PROTO_INFO,
|
715
|
+
_CRLF_);
|
716
|
+
if (res < 0)
|
717
|
+
return NATS_NO_MEMORY;
|
718
|
+
|
719
|
+
return NATS_OK;
|
720
|
+
}
|
721
|
+
|
722
|
+
static natsStatus
|
723
|
+
_sendUnsubProto(natsConnection *nc, int64_t subId, int max)
|
724
|
+
{
|
725
|
+
natsStatus s = NATS_OK;
|
726
|
+
char *proto = NULL;
|
727
|
+
int res = 0;
|
728
|
+
|
729
|
+
if (max > 0)
|
730
|
+
res = nats_asprintf(&proto, _UNSUB_PROTO_, subId, max);
|
731
|
+
else
|
732
|
+
res = nats_asprintf(&proto, _UNSUB_NO_MAX_PROTO_, subId);
|
733
|
+
|
734
|
+
if (res < 0)
|
735
|
+
s = nats_setDefaultError(NATS_NO_MEMORY);
|
736
|
+
else
|
737
|
+
{
|
738
|
+
s = natsConn_bufferWriteString(nc, proto);
|
739
|
+
NATS_FREE(proto);
|
740
|
+
}
|
741
|
+
|
742
|
+
return NATS_UPDATE_ERR_STACK(s);
|
743
|
+
}
|
744
|
+
|
745
|
+
static natsStatus
|
746
|
+
_resendSubscriptions(natsConnection *nc)
|
747
|
+
{
|
748
|
+
natsStatus s = NATS_OK;
|
749
|
+
natsSubscription *sub = NULL;
|
750
|
+
natsHashIter iter;
|
751
|
+
char *proto;
|
752
|
+
int res;
|
753
|
+
int adjustedMax;
|
754
|
+
|
755
|
+
natsHashIter_Init(&iter, nc->subs);
|
756
|
+
while ((s == NATS_OK) && natsHashIter_Next(&iter, NULL, (void**) &sub))
|
757
|
+
{
|
758
|
+
proto = NULL;
|
759
|
+
|
760
|
+
adjustedMax = 0;
|
761
|
+
natsSub_Lock(sub);
|
762
|
+
if (sub->max > 0)
|
763
|
+
{
|
764
|
+
if (sub->delivered < sub->max)
|
765
|
+
adjustedMax = (int)(sub->max - sub->delivered);
|
766
|
+
|
767
|
+
// The adjusted max could be 0 here if the number of delivered
|
768
|
+
// messages have reached the max, if so, unsubscribe.
|
769
|
+
if (adjustedMax == 0)
|
770
|
+
{
|
771
|
+
natsSub_Unlock(sub);
|
772
|
+
s = _sendUnsubProto(nc, sub->sid, 0);
|
773
|
+
continue;
|
774
|
+
}
|
775
|
+
}
|
776
|
+
natsSub_Unlock(sub);
|
777
|
+
|
778
|
+
// These sub's fields are immutable
|
779
|
+
res = nats_asprintf(&proto, _SUB_PROTO_,
|
780
|
+
sub->subject,
|
781
|
+
(sub->queue == NULL ? "" : sub->queue),
|
782
|
+
(int) sub->sid);
|
783
|
+
if (res < 0)
|
784
|
+
s = NATS_NO_MEMORY;
|
785
|
+
|
786
|
+
if (s == NATS_OK)
|
787
|
+
{
|
788
|
+
s = natsConn_bufferWriteString(nc, proto);
|
789
|
+
NATS_FREE(proto);
|
790
|
+
proto = NULL;
|
791
|
+
}
|
792
|
+
|
793
|
+
if ((s == NATS_OK) && (adjustedMax > 0))
|
794
|
+
s = _sendUnsubProto(nc, sub->sid, adjustedMax);
|
795
|
+
}
|
796
|
+
|
797
|
+
return s;
|
798
|
+
}
|
799
|
+
|
800
|
+
static natsStatus
|
801
|
+
_flushReconnectPendingItems(natsConnection *nc)
|
802
|
+
{
|
803
|
+
natsStatus s = NATS_OK;
|
804
|
+
|
805
|
+
if (nc->pending == NULL)
|
806
|
+
return NATS_OK;
|
807
|
+
|
808
|
+
if (natsBuf_Len(nc->pending) > 0)
|
809
|
+
{
|
810
|
+
s = natsBuf_Append(nc->bw, natsBuf_Data(nc->pending),
|
811
|
+
natsBuf_Len(nc->pending));
|
812
|
+
}
|
813
|
+
|
814
|
+
return s;
|
815
|
+
}
|
816
|
+
|
817
|
+
static void
|
818
|
+
_removePongFromList(natsConnection *nc, natsPong *pong)
|
819
|
+
{
|
820
|
+
if (pong->prev != NULL)
|
821
|
+
pong->prev->next = pong->next;
|
822
|
+
|
823
|
+
if (pong->next != NULL)
|
824
|
+
pong->next->prev = pong->prev;
|
825
|
+
|
826
|
+
if (nc->pongs.head == pong)
|
827
|
+
nc->pongs.head = pong->next;
|
828
|
+
|
829
|
+
if (nc->pongs.tail == pong)
|
830
|
+
nc->pongs.tail = pong->prev;
|
831
|
+
|
832
|
+
pong->prev = pong->next = NULL;
|
833
|
+
}
|
834
|
+
|
835
|
+
// When the connection is closed, or is disconnected and we are about
|
836
|
+
// to reconnect, we need to unblock all pending natsConnection_Flush[Timeout]()
|
837
|
+
// calls: there is no chance that a PING sent to a server is going to be
|
838
|
+
// echoed by the new server.
|
839
|
+
static void
|
840
|
+
_clearPendingFlushRequests(natsConnection *nc)
|
841
|
+
{
|
842
|
+
natsPong *pong = NULL;
|
843
|
+
|
844
|
+
while ((pong = nc->pongs.head) != NULL)
|
845
|
+
{
|
846
|
+
// Pop from the queue
|
847
|
+
_removePongFromList(nc, pong);
|
848
|
+
|
849
|
+
// natsConnection_Flush[Timeout]() is waiting on a condition
|
850
|
+
// variable and exit when this value is != 0. "Flush" will
|
851
|
+
// return an error to the caller if the connection status
|
852
|
+
// is not CONNECTED at that time.
|
853
|
+
pong->id = -1;
|
854
|
+
|
855
|
+
// There may be more than one user-thread making
|
856
|
+
// natsConnection_Flush() calls.
|
857
|
+
natsCondition_Broadcast(nc->pongs.cond);
|
858
|
+
}
|
859
|
+
|
860
|
+
nc->pongs.incoming = 0;
|
861
|
+
nc->pongs.outgoingPings = 0;
|
862
|
+
}
|
863
|
+
|
864
|
+
// Try to reconnect using the option parameters.
|
865
|
+
// This function assumes we are allowed to reconnect.
|
866
|
+
static void
|
867
|
+
_doReconnect(void *arg)
|
868
|
+
{
|
869
|
+
natsStatus s = NATS_OK;
|
870
|
+
natsConnection *nc = (natsConnection*) arg;
|
871
|
+
natsThread *tReconnect = NULL;
|
872
|
+
natsSrv *cur;
|
873
|
+
int64_t elapsed;
|
874
|
+
natsSrvPool *pool = NULL;
|
875
|
+
int64_t sleepTime;
|
876
|
+
struct threadsToJoin ttj;
|
877
|
+
|
878
|
+
natsConn_Lock(nc);
|
879
|
+
|
880
|
+
_initThreadsToJoin(&ttj, nc, false);
|
881
|
+
|
882
|
+
natsConn_Unlock(nc);
|
883
|
+
|
884
|
+
_joinThreads(&ttj);
|
885
|
+
|
886
|
+
natsConn_Lock(nc);
|
887
|
+
|
888
|
+
// Kick out all calls to natsConnection_Flush[Timeout]().
|
889
|
+
_clearPendingFlushRequests(nc);
|
890
|
+
|
891
|
+
// Clear any error.
|
892
|
+
nc->err = NATS_OK;
|
893
|
+
nc->errStr[0] = '\0';
|
894
|
+
|
895
|
+
pool = nc->srvPool;
|
896
|
+
|
897
|
+
// Perform appropriate callback if needed for a disconnect.
|
898
|
+
if (nc->opts->disconnectedCb != NULL)
|
899
|
+
natsAsyncCb_PostConnHandler(nc, ASYNC_DISCONNECTED);
|
900
|
+
|
901
|
+
// Note that the pool's size may decrement after the call to
|
902
|
+
// natsSrvPool_GetNextServer.
|
903
|
+
while ((s == NATS_OK) && (natsSrvPool_GetSize(pool) > 0))
|
904
|
+
{
|
905
|
+
cur = natsSrvPool_GetNextServer(pool, nc->opts, nc->url);
|
906
|
+
nc->url = (cur == NULL ? NULL : cur->url);
|
907
|
+
if (cur == NULL)
|
908
|
+
{
|
909
|
+
nc->err = NATS_NO_SERVER;
|
910
|
+
break;
|
911
|
+
}
|
912
|
+
|
913
|
+
sleepTime = 0;
|
914
|
+
|
915
|
+
// Sleep appropriate amount of time before the
|
916
|
+
// connection attempt if connecting to same server
|
917
|
+
// we just got disconnected from..
|
918
|
+
if (((elapsed = nats_Now() - cur->lastAttempt)) < nc->opts->reconnectWait)
|
919
|
+
sleepTime = (nc->opts->reconnectWait - elapsed);
|
920
|
+
|
921
|
+
natsConn_Unlock(nc);
|
922
|
+
|
923
|
+
if (sleepTime > 0)
|
924
|
+
nats_Sleep(sleepTime);
|
925
|
+
else
|
926
|
+
natsThread_Yield();
|
927
|
+
|
928
|
+
natsConn_Lock(nc);
|
929
|
+
|
930
|
+
// Check if we have been closed first.
|
931
|
+
if (natsConn_isClosed(nc))
|
932
|
+
break;
|
933
|
+
|
934
|
+
// Mark that we tried a reconnect
|
935
|
+
cur->reconnects += 1;
|
936
|
+
|
937
|
+
// Try to create a new connection
|
938
|
+
s = _createConn(nc);
|
939
|
+
if (s != NATS_OK)
|
940
|
+
{
|
941
|
+
// Reset error here. We will return NATS_NO_SERVERS at the end of
|
942
|
+
// this loop if appropriate.
|
943
|
+
nc->err = NATS_OK;
|
944
|
+
|
945
|
+
// Reset status
|
946
|
+
s = NATS_OK;
|
947
|
+
|
948
|
+
// Not yet connected, retry...
|
949
|
+
// Continue to hold the lock
|
950
|
+
continue;
|
951
|
+
}
|
952
|
+
|
953
|
+
// We have a valid FD and the writer buffer was moved to pending.
|
954
|
+
// We are now going to send data directly to the newly connected
|
955
|
+
// server, so we need to disable the use of 'pending' for the
|
956
|
+
// moment
|
957
|
+
nc->usePending = false;
|
958
|
+
|
959
|
+
// We are reconnected
|
960
|
+
nc->stats.reconnects += 1;
|
961
|
+
|
962
|
+
// Process Connect logic
|
963
|
+
s = _processConnInit(nc);
|
964
|
+
|
965
|
+
// Send existing subscription state
|
966
|
+
if (s == NATS_OK)
|
967
|
+
s = _resendSubscriptions(nc);
|
968
|
+
|
969
|
+
// Now send off and clear pending buffer
|
970
|
+
if (s == NATS_OK)
|
971
|
+
s = _flushReconnectPendingItems(nc);
|
972
|
+
|
973
|
+
// This is where we are truly connected.
|
974
|
+
if (s == NATS_OK)
|
975
|
+
nc->status = CONNECTED;
|
976
|
+
|
977
|
+
if (s != NATS_OK)
|
978
|
+
{
|
979
|
+
// In case we were at the last iteration, this is the error
|
980
|
+
// we will report.
|
981
|
+
nc->err = s;
|
982
|
+
|
983
|
+
// Reset status
|
984
|
+
s = NATS_OK;
|
985
|
+
|
986
|
+
// Close the socket since we were connected, but a problem occurred.
|
987
|
+
// (not doing this would cause an FD leak)
|
988
|
+
natsSock_Close(nc->sockCtx.fd);
|
989
|
+
nc->sockCtx.fd = NATS_SOCK_INVALID;
|
990
|
+
|
991
|
+
// We need to re-activate the use of pending since we
|
992
|
+
// may go back to sleep and release the lock
|
993
|
+
nc->usePending = true;
|
994
|
+
natsBuf_Reset(nc->bw);
|
995
|
+
|
996
|
+
nc->status = RECONNECTING;
|
997
|
+
continue;
|
998
|
+
}
|
999
|
+
|
1000
|
+
// No more failure allowed past this point.
|
1001
|
+
|
1002
|
+
// Clear out server stats for the server we connected to..
|
1003
|
+
cur->didConnect = true;
|
1004
|
+
cur->reconnects = 0;
|
1005
|
+
|
1006
|
+
tReconnect = nc->reconnectThread;
|
1007
|
+
nc->reconnectThread = NULL;
|
1008
|
+
|
1009
|
+
// At this point we know that we don't need the pending buffer
|
1010
|
+
// anymore. Destroy now.
|
1011
|
+
natsBuf_Destroy(nc->pending);
|
1012
|
+
nc->pending = NULL;
|
1013
|
+
nc->usePending = false;
|
1014
|
+
|
1015
|
+
// Call reconnectedCB if appropriate. Since we are in a separate
|
1016
|
+
// thread, we could invoke the callback directly, however, we
|
1017
|
+
// still post it so all callbacks from a connection are serialized.
|
1018
|
+
if (nc->opts->reconnectedCb != NULL)
|
1019
|
+
natsAsyncCb_PostConnHandler(nc, ASYNC_RECONNECTED);
|
1020
|
+
|
1021
|
+
// Release lock here, we will return below.
|
1022
|
+
natsConn_Unlock(nc);
|
1023
|
+
|
1024
|
+
// Make sure we flush everything
|
1025
|
+
(void) natsConnection_Flush(nc);
|
1026
|
+
|
1027
|
+
natsThread_Join(tReconnect);
|
1028
|
+
natsThread_Destroy(tReconnect);
|
1029
|
+
|
1030
|
+
return;
|
1031
|
+
}
|
1032
|
+
|
1033
|
+
// Call into close.. We have no servers left..
|
1034
|
+
if (nc->err == NATS_OK)
|
1035
|
+
nc->err = NATS_NO_SERVER;
|
1036
|
+
|
1037
|
+
natsConn_Unlock(nc);
|
1038
|
+
|
1039
|
+
_close(nc, CLOSED, true);
|
1040
|
+
}
|
1041
|
+
|
1042
|
+
// Notifies the flusher thread that there is pending data to send to the
|
1043
|
+
// server.
|
1044
|
+
void
|
1045
|
+
natsConn_kickFlusher(natsConnection *nc)
|
1046
|
+
{
|
1047
|
+
if (!(nc->flusherSignaled) && (nc->bw != NULL))
|
1048
|
+
{
|
1049
|
+
nc->flusherSignaled = true;
|
1050
|
+
natsCondition_Signal(nc->flusherCond);
|
1051
|
+
}
|
1052
|
+
}
|
1053
|
+
|
1054
|
+
static natsStatus
|
1055
|
+
_sendProto(natsConnection *nc, const char* proto, int protoLen)
|
1056
|
+
{
|
1057
|
+
natsStatus s;
|
1058
|
+
|
1059
|
+
natsConn_Lock(nc);
|
1060
|
+
|
1061
|
+
s = natsConn_bufferWrite(nc, proto, protoLen);
|
1062
|
+
if (s == NATS_OK)
|
1063
|
+
natsConn_kickFlusher(nc);
|
1064
|
+
|
1065
|
+
natsConn_Unlock(nc);
|
1066
|
+
|
1067
|
+
return s;
|
1068
|
+
}
|
1069
|
+
|
1070
|
+
static natsStatus
|
1071
|
+
_sendConnect(natsConnection *nc)
|
1072
|
+
{
|
1073
|
+
natsStatus s = NATS_OK;
|
1074
|
+
char *cProto = NULL;
|
1075
|
+
char buffer[DEFAULT_BUF_SIZE];
|
1076
|
+
|
1077
|
+
buffer[0] = '\0';
|
1078
|
+
|
1079
|
+
// Create the CONNECT protocol
|
1080
|
+
s = _connectProto(nc, &cProto);
|
1081
|
+
|
1082
|
+
// Add it to the buffer
|
1083
|
+
if (s == NATS_OK)
|
1084
|
+
s = natsConn_bufferWriteString(nc, cProto);
|
1085
|
+
|
1086
|
+
// Add the PING protocol to the buffer
|
1087
|
+
if (s == NATS_OK)
|
1088
|
+
s = natsConn_bufferWrite(nc, _PING_OP_, _PING_OP_LEN_);
|
1089
|
+
if (s == NATS_OK)
|
1090
|
+
s = natsConn_bufferWrite(nc, _CRLF_, _CRLF_LEN_);
|
1091
|
+
|
1092
|
+
// Flush the buffer
|
1093
|
+
if (s == NATS_OK)
|
1094
|
+
s = natsConn_bufferFlush(nc);
|
1095
|
+
|
1096
|
+
// Now read the response from the server.
|
1097
|
+
if (s == NATS_OK)
|
1098
|
+
s = natsSock_ReadLine(&(nc->sockCtx), buffer, sizeof(buffer));
|
1099
|
+
|
1100
|
+
// If Verbose is set, we expect +OK first.
|
1101
|
+
if ((s == NATS_OK) && nc->opts->verbose)
|
1102
|
+
{
|
1103
|
+
// Check protocol is as expected
|
1104
|
+
if (strncmp(buffer, _OK_OP_, _OK_OP_LEN_) != 0)
|
1105
|
+
{
|
1106
|
+
s = nats_setError(NATS_PROTOCOL_ERROR,
|
1107
|
+
"Expected '%s', got '%s'",
|
1108
|
+
_OK_OP_, buffer);
|
1109
|
+
}
|
1110
|
+
|
1111
|
+
// Read the rest now...
|
1112
|
+
if (s == NATS_OK)
|
1113
|
+
s = natsSock_ReadLine(&(nc->sockCtx), buffer, sizeof(buffer));
|
1114
|
+
}
|
1115
|
+
|
1116
|
+
// We except the PONG protocol
|
1117
|
+
if ((s == NATS_OK) && (strncmp(buffer, _PONG_OP_, _PONG_OP_LEN_) != 0))
|
1118
|
+
{
|
1119
|
+
// But it could be something else, like -ERR
|
1120
|
+
|
1121
|
+
if (strncmp(buffer, _ERR_OP_, _ERR_OP_LEN_) == 0)
|
1122
|
+
{
|
1123
|
+
// Remove -ERR, trim spaces and quotes.
|
1124
|
+
nats_NormalizeErr(buffer);
|
1125
|
+
|
1126
|
+
// Search if the error message says something about
|
1127
|
+
// authentication failure.
|
1128
|
+
|
1129
|
+
if (nats_strcasestr(buffer, "authorization") != NULL)
|
1130
|
+
s = nats_setError(NATS_CONNECTION_AUTH_FAILED,
|
1131
|
+
"%s", buffer);
|
1132
|
+
else
|
1133
|
+
s = nats_setError(NATS_ERR, "%s", buffer);
|
1134
|
+
}
|
1135
|
+
else
|
1136
|
+
{
|
1137
|
+
s = nats_setError(NATS_PROTOCOL_ERROR,
|
1138
|
+
"Expected '%s', got '%s'",
|
1139
|
+
_PONG_OP_, buffer);
|
1140
|
+
}
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
if (s == NATS_OK)
|
1144
|
+
nc->status = CONNECTED;
|
1145
|
+
|
1146
|
+
free(cProto);
|
1147
|
+
|
1148
|
+
return NATS_UPDATE_ERR_STACK(s);
|
1149
|
+
}
|
1150
|
+
|
1151
|
+
static natsStatus
|
1152
|
+
_processConnInit(natsConnection *nc)
|
1153
|
+
{
|
1154
|
+
natsStatus s = NATS_OK;
|
1155
|
+
|
1156
|
+
nc->status = CONNECTING;
|
1157
|
+
|
1158
|
+
// Process the INFO protocol that we should be receiving
|
1159
|
+
s = _processExpectedInfo(nc);
|
1160
|
+
|
1161
|
+
// Send the CONNECT and PING protocol, and wait for the PONG.
|
1162
|
+
if (s == NATS_OK)
|
1163
|
+
s = _sendConnect(nc);
|
1164
|
+
|
1165
|
+
// Clear our deadline, regardless of error
|
1166
|
+
natsDeadline_Clear(&(nc->sockCtx.deadline));
|
1167
|
+
|
1168
|
+
// Switch to blocking socket here...
|
1169
|
+
if (s == NATS_OK)
|
1170
|
+
s = natsSock_SetBlocking(nc->sockCtx.fd, true);
|
1171
|
+
|
1172
|
+
// Start the readLoop and flusher threads
|
1173
|
+
if (s == NATS_OK)
|
1174
|
+
s = _spinUpSocketWatchers(nc);
|
1175
|
+
|
1176
|
+
if ((s == NATS_OK) && (nc->opts->evLoop != NULL))
|
1177
|
+
{
|
1178
|
+
s = natsSock_SetBlocking(nc->sockCtx.fd, false);
|
1179
|
+
|
1180
|
+
// If we are reconnecting, buffer will have already been allocated
|
1181
|
+
if ((s == NATS_OK) && (nc->el.buffer == NULL))
|
1182
|
+
{
|
1183
|
+
nc->el.buffer = (char*) malloc(DEFAULT_BUF_SIZE);
|
1184
|
+
if (nc->el.buffer == NULL)
|
1185
|
+
s = nats_setDefaultError(NATS_NO_MEMORY);
|
1186
|
+
}
|
1187
|
+
if (s == NATS_OK)
|
1188
|
+
{
|
1189
|
+
// Set this first in case the event loop triggers the first READ
|
1190
|
+
// event just after this call returns.
|
1191
|
+
nc->sockCtx.useEventLoop = true;
|
1192
|
+
|
1193
|
+
s = nc->opts->evCbs.attach(&(nc->el.data),
|
1194
|
+
nc->opts->evLoop,
|
1195
|
+
nc,
|
1196
|
+
(int) nc->sockCtx.fd);
|
1197
|
+
if (s == NATS_OK)
|
1198
|
+
{
|
1199
|
+
nc->el.attached = true;
|
1200
|
+
}
|
1201
|
+
else
|
1202
|
+
{
|
1203
|
+
nc->sockCtx.useEventLoop = false;
|
1204
|
+
|
1205
|
+
nats_setError(s,
|
1206
|
+
"Error attaching to the event loop: %d - %s",
|
1207
|
+
s, natsStatus_GetText(s));
|
1208
|
+
}
|
1209
|
+
}
|
1210
|
+
}
|
1211
|
+
|
1212
|
+
return NATS_UPDATE_ERR_STACK(s);
|
1213
|
+
}
|
1214
|
+
|
1215
|
+
// Main connect function. Will connect to the server
|
1216
|
+
static natsStatus
|
1217
|
+
_connect(natsConnection *nc)
|
1218
|
+
{
|
1219
|
+
natsStatus s = NATS_OK;
|
1220
|
+
natsStatus retSts= NATS_OK;
|
1221
|
+
natsSrvPool *pool = NULL;
|
1222
|
+
int i;
|
1223
|
+
int poolSize;
|
1224
|
+
|
1225
|
+
natsConn_Lock(nc);
|
1226
|
+
|
1227
|
+
pool = nc->srvPool;
|
1228
|
+
|
1229
|
+
// Create actual socket connection
|
1230
|
+
// For first connect we walk all servers in the pool and try
|
1231
|
+
// to connect immediately.
|
1232
|
+
|
1233
|
+
// Get the size of the pool. The pool may change inside the loop
|
1234
|
+
// iteration due to INFO protocol.
|
1235
|
+
poolSize = natsSrvPool_GetSize(pool);
|
1236
|
+
for (i = 0; i < poolSize; i++)
|
1237
|
+
{
|
1238
|
+
nc->url = natsSrvPool_GetSrvUrl(pool,i);
|
1239
|
+
|
1240
|
+
s = _createConn(nc);
|
1241
|
+
if (s == NATS_OK)
|
1242
|
+
{
|
1243
|
+
s = _processConnInit(nc);
|
1244
|
+
|
1245
|
+
if (s == NATS_OK)
|
1246
|
+
{
|
1247
|
+
natsSrvPool_SetSrvDidConnect(pool, i, true);
|
1248
|
+
natsSrvPool_SetSrvReconnects(pool, i, 0);
|
1249
|
+
retSts = NATS_OK;
|
1250
|
+
break;
|
1251
|
+
}
|
1252
|
+
else
|
1253
|
+
{
|
1254
|
+
retSts = s;
|
1255
|
+
|
1256
|
+
natsConn_Unlock(nc);
|
1257
|
+
|
1258
|
+
_close(nc, DISCONNECTED, false);
|
1259
|
+
|
1260
|
+
natsConn_Lock(nc);
|
1261
|
+
|
1262
|
+
nc->url = NULL;
|
1263
|
+
}
|
1264
|
+
// Refresh our view of pool length since it may have been
|
1265
|
+
// modified when processing the INFO protocol.
|
1266
|
+
poolSize = natsSrvPool_GetSize(pool);
|
1267
|
+
}
|
1268
|
+
else
|
1269
|
+
{
|
1270
|
+
if (s == NATS_IO_ERROR)
|
1271
|
+
retSts = NATS_OK;
|
1272
|
+
}
|
1273
|
+
}
|
1274
|
+
|
1275
|
+
if ((retSts == NATS_OK) && (nc->status != CONNECTED))
|
1276
|
+
{
|
1277
|
+
s = nats_setDefaultError(NATS_NO_SERVER);
|
1278
|
+
}
|
1279
|
+
|
1280
|
+
natsConn_Unlock(nc);
|
1281
|
+
|
1282
|
+
return NATS_UPDATE_ERR_STACK(s);
|
1283
|
+
}
|
1284
|
+
|
1285
|
+
// _processOpError handles errors from reading or parsing the protocol.
|
1286
|
+
// The lock should not be held entering this function.
|
1287
|
+
static void
|
1288
|
+
_processOpError(natsConnection *nc, natsStatus s)
|
1289
|
+
{
|
1290
|
+
natsConn_Lock(nc);
|
1291
|
+
|
1292
|
+
if (_isConnecting(nc) || natsConn_isClosed(nc) || natsConn_isReconnecting(nc))
|
1293
|
+
{
|
1294
|
+
natsConn_Unlock(nc);
|
1295
|
+
|
1296
|
+
return;
|
1297
|
+
}
|
1298
|
+
|
1299
|
+
// Do reconnect only if allowed and we were actually connected
|
1300
|
+
if (nc->opts->allowReconnect && (nc->status == CONNECTED))
|
1301
|
+
{
|
1302
|
+
natsStatus ls = NATS_OK;
|
1303
|
+
|
1304
|
+
// Set our new status
|
1305
|
+
nc->status = RECONNECTING;
|
1306
|
+
|
1307
|
+
if (nc->ptmr != NULL)
|
1308
|
+
natsTimer_Stop(nc->ptmr);
|
1309
|
+
|
1310
|
+
if (nc->sockCtx.fdActive)
|
1311
|
+
{
|
1312
|
+
natsConn_bufferFlush(nc);
|
1313
|
+
|
1314
|
+
natsSock_Shutdown(nc->sockCtx.fd);
|
1315
|
+
nc->sockCtx.fdActive = false;
|
1316
|
+
}
|
1317
|
+
|
1318
|
+
// If we use an external event loop, we need to stop polling
|
1319
|
+
// on the socket since we are going to reconnect.
|
1320
|
+
if (nc->el.attached)
|
1321
|
+
{
|
1322
|
+
// Stop polling for READ/WRITE events on that socket.
|
1323
|
+
nc->sockCtx.useEventLoop = false;
|
1324
|
+
nc->el.writeAdded = false;
|
1325
|
+
ls = nc->opts->evCbs.read(nc->el.data, NATS_EVENT_ACTION_REMOVE);
|
1326
|
+
if (ls == NATS_OK)
|
1327
|
+
ls = nc->opts->evCbs.write(nc->el.data, NATS_EVENT_ACTION_REMOVE);
|
1328
|
+
}
|
1329
|
+
|
1330
|
+
// Create the pending buffer to hold all write requests while we try
|
1331
|
+
// to reconnect.
|
1332
|
+
ls = natsBuf_Create(&(nc->pending), nc->opts->reconnectBufSize);
|
1333
|
+
if (ls == NATS_OK)
|
1334
|
+
{
|
1335
|
+
nc->usePending = true;
|
1336
|
+
|
1337
|
+
// Start the reconnect thread
|
1338
|
+
ls = natsThread_Create(&(nc->reconnectThread),
|
1339
|
+
_doReconnect, (void*) nc);
|
1340
|
+
}
|
1341
|
+
if (ls == NATS_OK)
|
1342
|
+
{
|
1343
|
+
natsConn_Unlock(nc);
|
1344
|
+
|
1345
|
+
return;
|
1346
|
+
}
|
1347
|
+
}
|
1348
|
+
|
1349
|
+
// reconnect not allowed or we failed to setup the reconnect code.
|
1350
|
+
|
1351
|
+
nc->status = DISCONNECTED;
|
1352
|
+
nc->err = s;
|
1353
|
+
|
1354
|
+
natsConn_Unlock(nc);
|
1355
|
+
|
1356
|
+
_close(nc, CLOSED, true);
|
1357
|
+
}
|
1358
|
+
|
1359
|
+
static void
|
1360
|
+
natsConn_clearSSL(natsConnection *nc)
|
1361
|
+
{
|
1362
|
+
if (nc->sockCtx.ssl == NULL)
|
1363
|
+
return;
|
1364
|
+
|
1365
|
+
SSL_free(nc->sockCtx.ssl);
|
1366
|
+
nc->sockCtx.ssl = NULL;
|
1367
|
+
}
|
1368
|
+
|
1369
|
+
static void
|
1370
|
+
_readLoop(void *arg)
|
1371
|
+
{
|
1372
|
+
natsStatus s = NATS_OK;
|
1373
|
+
char buffer[DEFAULT_BUF_SIZE];
|
1374
|
+
natsSock fd;
|
1375
|
+
int n;
|
1376
|
+
|
1377
|
+
natsConnection *nc = (natsConnection*) arg;
|
1378
|
+
|
1379
|
+
natsConn_Lock(nc);
|
1380
|
+
|
1381
|
+
if (nc->sockCtx.ssl != NULL)
|
1382
|
+
nats_sslRegisterThreadForCleanup();
|
1383
|
+
|
1384
|
+
fd = nc->sockCtx.fd;
|
1385
|
+
|
1386
|
+
if (nc->ps == NULL)
|
1387
|
+
s = natsParser_Create(&(nc->ps));
|
1388
|
+
|
1389
|
+
while ((s == NATS_OK)
|
1390
|
+
&& !natsConn_isClosed(nc)
|
1391
|
+
&& !natsConn_isReconnecting(nc))
|
1392
|
+
{
|
1393
|
+
natsConn_Unlock(nc);
|
1394
|
+
|
1395
|
+
n = 0;
|
1396
|
+
|
1397
|
+
s = natsSock_Read(&(nc->sockCtx), buffer, sizeof(buffer), &n);
|
1398
|
+
if (s == NATS_OK)
|
1399
|
+
s = natsParser_Parse(nc, buffer, n);
|
1400
|
+
|
1401
|
+
if (s != NATS_OK)
|
1402
|
+
_processOpError(nc, s);
|
1403
|
+
|
1404
|
+
natsConn_Lock(nc);
|
1405
|
+
}
|
1406
|
+
|
1407
|
+
natsSock_Close(fd);
|
1408
|
+
nc->sockCtx.fd = NATS_SOCK_INVALID;
|
1409
|
+
nc->sockCtx.fdActive = false;
|
1410
|
+
|
1411
|
+
// We need to cleanup some things if the connection was SSL.
|
1412
|
+
if (nc->sockCtx.ssl != NULL)
|
1413
|
+
natsConn_clearSSL(nc);
|
1414
|
+
|
1415
|
+
natsParser_Destroy(nc->ps);
|
1416
|
+
nc->ps = NULL;
|
1417
|
+
|
1418
|
+
// This unlocks and releases the connection to compensate for the retain
|
1419
|
+
// when this thread was created.
|
1420
|
+
natsConn_unlockAndRelease(nc);
|
1421
|
+
}
|
1422
|
+
|
1423
|
+
static void
|
1424
|
+
_flusher(void *arg)
|
1425
|
+
{
|
1426
|
+
natsConnection *nc = (natsConnection*) arg;
|
1427
|
+
natsStatus s;
|
1428
|
+
|
1429
|
+
while (true)
|
1430
|
+
{
|
1431
|
+
natsConn_Lock(nc);
|
1432
|
+
|
1433
|
+
while (!(nc->flusherSignaled) && !(nc->flusherStop))
|
1434
|
+
natsCondition_Wait(nc->flusherCond, nc->mu);
|
1435
|
+
|
1436
|
+
if (nc->flusherStop)
|
1437
|
+
{
|
1438
|
+
natsConn_Unlock(nc);
|
1439
|
+
break;
|
1440
|
+
}
|
1441
|
+
|
1442
|
+
//TODO: If we process the request right away, performance
|
1443
|
+
// will suffer when sending quickly very small messages.
|
1444
|
+
// The buffer is going to be always flushed, which
|
1445
|
+
// defeats the purpose of a write buffer.
|
1446
|
+
// We need to revisit this.
|
1447
|
+
|
1448
|
+
// Give a chance to accumulate more requests...
|
1449
|
+
natsCondition_TimedWait(nc->flusherCond, nc->mu, 1);
|
1450
|
+
|
1451
|
+
nc->flusherSignaled = false;
|
1452
|
+
|
1453
|
+
if (natsConn_isClosed(nc) || natsConn_isReconnecting(nc))
|
1454
|
+
{
|
1455
|
+
natsConn_Unlock(nc);
|
1456
|
+
break;
|
1457
|
+
}
|
1458
|
+
|
1459
|
+
if (nc->sockCtx.fdActive && (natsBuf_Len(nc->bw) > 0))
|
1460
|
+
{
|
1461
|
+
s = natsConn_bufferFlush(nc);
|
1462
|
+
if ((s != NATS_OK) && (nc->err == NATS_OK))
|
1463
|
+
nc->err = s;
|
1464
|
+
}
|
1465
|
+
|
1466
|
+
natsConn_Unlock(nc);
|
1467
|
+
}
|
1468
|
+
|
1469
|
+
// Release the connection to compensate for the retain when this thread
|
1470
|
+
// was created.
|
1471
|
+
natsConn_release(nc);
|
1472
|
+
}
|
1473
|
+
|
1474
|
+
static void
|
1475
|
+
_sendPing(natsConnection *nc, natsPong *pong)
|
1476
|
+
{
|
1477
|
+
natsStatus s = NATS_OK;
|
1478
|
+
|
1479
|
+
s = natsConn_bufferWrite(nc, _PING_PROTO_, _PING_PROTO_LEN_);
|
1480
|
+
if (s == NATS_OK)
|
1481
|
+
{
|
1482
|
+
// Flush the buffer in place.
|
1483
|
+
s = natsConn_bufferFlush(nc);
|
1484
|
+
}
|
1485
|
+
if (s == NATS_OK)
|
1486
|
+
{
|
1487
|
+
// Now that we know the PING was sent properly, update
|
1488
|
+
// the number of PING sent.
|
1489
|
+
nc->pongs.outgoingPings++;
|
1490
|
+
|
1491
|
+
if (pong != NULL)
|
1492
|
+
{
|
1493
|
+
pong->id = nc->pongs.outgoingPings;
|
1494
|
+
|
1495
|
+
// Add this pong to the list.
|
1496
|
+
pong->next = NULL;
|
1497
|
+
pong->prev = nc->pongs.tail;
|
1498
|
+
|
1499
|
+
if (nc->pongs.tail != NULL)
|
1500
|
+
nc->pongs.tail->next = pong;
|
1501
|
+
|
1502
|
+
nc->pongs.tail = pong;
|
1503
|
+
|
1504
|
+
if (nc->pongs.head == NULL)
|
1505
|
+
nc->pongs.head = pong;
|
1506
|
+
}
|
1507
|
+
}
|
1508
|
+
}
|
1509
|
+
|
1510
|
+
static void
|
1511
|
+
_processPingTimer(natsTimer *timer, void *arg)
|
1512
|
+
{
|
1513
|
+
natsConnection *nc = (natsConnection*) arg;
|
1514
|
+
|
1515
|
+
natsConn_Lock(nc);
|
1516
|
+
|
1517
|
+
if (nc->status != CONNECTED)
|
1518
|
+
{
|
1519
|
+
natsConn_Unlock(nc);
|
1520
|
+
return;
|
1521
|
+
}
|
1522
|
+
|
1523
|
+
// If we have more PINGs out than PONGs in, consider
|
1524
|
+
// the connection stale.
|
1525
|
+
if (++(nc->pout) > nc->opts->maxPingsOut)
|
1526
|
+
{
|
1527
|
+
natsConn_Unlock(nc);
|
1528
|
+
_processOpError(nc, NATS_STALE_CONNECTION);
|
1529
|
+
return;
|
1530
|
+
}
|
1531
|
+
|
1532
|
+
_sendPing(nc, NULL);
|
1533
|
+
|
1534
|
+
natsConn_Unlock(nc);
|
1535
|
+
}
|
1536
|
+
|
1537
|
+
static void
|
1538
|
+
_pingStopppedCb(natsTimer *timer, void *closure)
|
1539
|
+
{
|
1540
|
+
natsConnection *nc = (natsConnection*) closure;
|
1541
|
+
|
1542
|
+
natsConn_release(nc);
|
1543
|
+
}
|
1544
|
+
|
1545
|
+
static natsStatus
|
1546
|
+
_spinUpSocketWatchers(natsConnection *nc)
|
1547
|
+
{
|
1548
|
+
natsStatus s = NATS_OK;
|
1549
|
+
|
1550
|
+
nc->pout = 0;
|
1551
|
+
nc->flusherStop = false;
|
1552
|
+
|
1553
|
+
if (nc->opts->evLoop == NULL)
|
1554
|
+
{
|
1555
|
+
// Let's not rely on the created threads acquiring lock that would make it
|
1556
|
+
// safe to retain only on success.
|
1557
|
+
_retain(nc);
|
1558
|
+
|
1559
|
+
s = natsThread_Create(&(nc->readLoopThread), _readLoop, (void*) nc);
|
1560
|
+
if (s != NATS_OK)
|
1561
|
+
_release(nc);
|
1562
|
+
}
|
1563
|
+
|
1564
|
+
if (s == NATS_OK)
|
1565
|
+
{
|
1566
|
+
_retain(nc);
|
1567
|
+
|
1568
|
+
s = natsThread_Create(&(nc->flusherThread), _flusher, (void*) nc);
|
1569
|
+
if (s != NATS_OK)
|
1570
|
+
_release(nc);
|
1571
|
+
}
|
1572
|
+
|
1573
|
+
if ((s == NATS_OK) && (nc->opts->pingInterval > 0))
|
1574
|
+
{
|
1575
|
+
_retain(nc);
|
1576
|
+
|
1577
|
+
if (nc->ptmr == NULL)
|
1578
|
+
{
|
1579
|
+
s = natsTimer_Create(&(nc->ptmr),
|
1580
|
+
_processPingTimer,
|
1581
|
+
_pingStopppedCb,
|
1582
|
+
nc->opts->pingInterval,
|
1583
|
+
(void*) nc);
|
1584
|
+
if (s != NATS_OK)
|
1585
|
+
_release(nc);
|
1586
|
+
}
|
1587
|
+
else
|
1588
|
+
{
|
1589
|
+
natsTimer_Reset(nc->ptmr, nc->opts->pingInterval);
|
1590
|
+
}
|
1591
|
+
}
|
1592
|
+
|
1593
|
+
return s;
|
1594
|
+
}
|
1595
|
+
|
1596
|
+
// Remove all subscriptions. This will kick out the delivery threads,
|
1597
|
+
// and unblock NextMsg() calls.
|
1598
|
+
static void
|
1599
|
+
_removeAllSubscriptions(natsConnection *nc)
|
1600
|
+
{
|
1601
|
+
natsHashIter iter;
|
1602
|
+
natsSubscription *sub;
|
1603
|
+
|
1604
|
+
natsHashIter_Init(&iter, nc->subs);
|
1605
|
+
while (natsHashIter_Next(&iter, NULL, (void**) &sub))
|
1606
|
+
{
|
1607
|
+
(void) natsHashIter_RemoveCurrent(&iter);
|
1608
|
+
|
1609
|
+
natsSub_close(sub, true);
|
1610
|
+
|
1611
|
+
natsSub_release(sub);
|
1612
|
+
}
|
1613
|
+
}
|
1614
|
+
|
1615
|
+
|
1616
|
+
// Low level close call that will do correct cleanup and set
|
1617
|
+
// desired status. Also controls whether user defined callbacks
|
1618
|
+
// will be triggered. The lock should not be held entering this
|
1619
|
+
// function. This function will handle the locking manually.
|
1620
|
+
static void
|
1621
|
+
_close(natsConnection *nc, natsConnStatus status, bool doCBs)
|
1622
|
+
{
|
1623
|
+
struct threadsToJoin ttj;
|
1624
|
+
bool sockWasActive = false;
|
1625
|
+
bool detach = false;
|
1626
|
+
|
1627
|
+
natsConn_lockAndRetain(nc);
|
1628
|
+
|
1629
|
+
if (natsConn_isClosed(nc))
|
1630
|
+
{
|
1631
|
+
nc->status = status;
|
1632
|
+
|
1633
|
+
natsConn_unlockAndRelease(nc);
|
1634
|
+
return;
|
1635
|
+
}
|
1636
|
+
|
1637
|
+
nc->status = CLOSED;
|
1638
|
+
|
1639
|
+
_initThreadsToJoin(&ttj, nc, true);
|
1640
|
+
|
1641
|
+
// Kick out all calls to natsConnection_Flush[Timeout]().
|
1642
|
+
_clearPendingFlushRequests(nc);
|
1643
|
+
|
1644
|
+
if (nc->ptmr != NULL)
|
1645
|
+
natsTimer_Stop(nc->ptmr);
|
1646
|
+
|
1647
|
+
// Remove all subscriptions. This will kick out the delivery threads,
|
1648
|
+
// and unblock NextMsg() calls.
|
1649
|
+
_removeAllSubscriptions(nc);
|
1650
|
+
|
1651
|
+
// Go ahead and make sure we have flushed the outbound buffer.
|
1652
|
+
nc->status = CLOSED;
|
1653
|
+
if (nc->sockCtx.fdActive)
|
1654
|
+
{
|
1655
|
+
natsConn_bufferFlush(nc);
|
1656
|
+
|
1657
|
+
// If there is no readLoop, then it is our responsibility to close
|
1658
|
+
// the socket. Otherwise, _readLoop is the one doing it.
|
1659
|
+
if ((ttj.readLoop == NULL) && (nc->opts->evLoop == NULL))
|
1660
|
+
{
|
1661
|
+
natsSock_Close(nc->sockCtx.fd);
|
1662
|
+
nc->sockCtx.fd = NATS_SOCK_INVALID;
|
1663
|
+
|
1664
|
+
// We need to cleanup some things if the connection was SSL.
|
1665
|
+
if (nc->sockCtx.ssl != NULL)
|
1666
|
+
natsConn_clearSSL(nc);
|
1667
|
+
}
|
1668
|
+
else
|
1669
|
+
{
|
1670
|
+
// Shutdown the socket to stop any read/write operations.
|
1671
|
+
// The socket will be closed by the _readLoop thread.
|
1672
|
+
natsSock_Shutdown(nc->sockCtx.fd);
|
1673
|
+
}
|
1674
|
+
nc->sockCtx.fdActive = false;
|
1675
|
+
sockWasActive = true;
|
1676
|
+
}
|
1677
|
+
|
1678
|
+
// Perform appropriate callback if needed for a disconnect.
|
1679
|
+
// Do not invoke if we were disconnected and failed to reconnect (since
|
1680
|
+
// it has already been invoked in doReconnect).
|
1681
|
+
if (doCBs && (nc->opts->disconnectedCb != NULL) && sockWasActive)
|
1682
|
+
natsAsyncCb_PostConnHandler(nc, ASYNC_DISCONNECTED);
|
1683
|
+
|
1684
|
+
natsConn_Unlock(nc);
|
1685
|
+
|
1686
|
+
_joinThreads(&ttj);
|
1687
|
+
|
1688
|
+
natsConn_Lock(nc);
|
1689
|
+
|
1690
|
+
// Perform appropriate callback if needed for a connection closed.
|
1691
|
+
if (doCBs && (nc->opts->closedCb != NULL))
|
1692
|
+
natsAsyncCb_PostConnHandler(nc, ASYNC_CLOSED);
|
1693
|
+
|
1694
|
+
nc->status = status;
|
1695
|
+
|
1696
|
+
if (nc->el.attached)
|
1697
|
+
{
|
1698
|
+
nc->el.attached = false;
|
1699
|
+
detach = true;
|
1700
|
+
_retain(nc);
|
1701
|
+
}
|
1702
|
+
|
1703
|
+
natsConn_unlockAndRelease(nc);
|
1704
|
+
|
1705
|
+
if (detach)
|
1706
|
+
{
|
1707
|
+
nc->opts->evCbs.detach(nc->el.data);
|
1708
|
+
natsConn_release(nc);
|
1709
|
+
}
|
1710
|
+
}
|
1711
|
+
|
1712
|
+
static void
|
1713
|
+
_processSlowConsumer(natsConnection *nc, natsSubscription *sub)
|
1714
|
+
{
|
1715
|
+
nc->err = NATS_SLOW_CONSUMER;
|
1716
|
+
|
1717
|
+
if (!(sub->slowConsumer) && (nc->opts->asyncErrCb != NULL))
|
1718
|
+
natsAsyncCb_PostErrHandler(nc, sub, NATS_SLOW_CONSUMER);
|
1719
|
+
|
1720
|
+
sub->slowConsumer = true;
|
1721
|
+
}
|
1722
|
+
|
1723
|
+
static natsStatus
|
1724
|
+
_createMsg(natsMsg **newMsg, natsConnection *nc, char *buf, int bufLen)
|
1725
|
+
{
|
1726
|
+
natsStatus s = NATS_OK;
|
1727
|
+
int subjLen = 0;
|
1728
|
+
char *reply = NULL;
|
1729
|
+
int replyLen = 0;
|
1730
|
+
|
1731
|
+
subjLen = natsBuf_Len(nc->ps->ma.subject);
|
1732
|
+
|
1733
|
+
if (nc->ps->ma.reply != NULL)
|
1734
|
+
{
|
1735
|
+
reply = natsBuf_Data(nc->ps->ma.reply);
|
1736
|
+
replyLen = natsBuf_Len(nc->ps->ma.reply);
|
1737
|
+
}
|
1738
|
+
|
1739
|
+
s = natsMsg_create(newMsg,
|
1740
|
+
(const char*) natsBuf_Data(nc->ps->ma.subject), subjLen,
|
1741
|
+
(const char*) reply, replyLen,
|
1742
|
+
(const char*) buf, bufLen);
|
1743
|
+
return s;
|
1744
|
+
}
|
1745
|
+
|
1746
|
+
natsStatus
|
1747
|
+
natsConn_processMsg(natsConnection *nc, char *buf, int bufLen)
|
1748
|
+
{
|
1749
|
+
natsStatus s = NATS_OK;
|
1750
|
+
natsSubscription *sub = NULL;
|
1751
|
+
natsMsg *msg = NULL;
|
1752
|
+
natsMsgDlvWorker *ldw = NULL;
|
1753
|
+
|
1754
|
+
natsConn_Lock(nc);
|
1755
|
+
|
1756
|
+
nc->stats.inMsgs += 1;
|
1757
|
+
nc->stats.inBytes += (uint64_t) bufLen;
|
1758
|
+
|
1759
|
+
sub = natsHash_Get(nc->subs, nc->ps->ma.sid);
|
1760
|
+
if (sub == NULL)
|
1761
|
+
{
|
1762
|
+
natsConn_Unlock(nc);
|
1763
|
+
return NATS_OK;
|
1764
|
+
}
|
1765
|
+
|
1766
|
+
// Do this outside of sub's lock, even if we end-up having to destroy
|
1767
|
+
// it because we have reached the maxPendingMsgs count. This reduces
|
1768
|
+
// lock contention.
|
1769
|
+
s = _createMsg(&msg, nc, buf, bufLen);
|
1770
|
+
if (s != NATS_OK)
|
1771
|
+
{
|
1772
|
+
natsConn_Unlock(nc);
|
1773
|
+
return s;
|
1774
|
+
}
|
1775
|
+
|
1776
|
+
if ((ldw = sub->libDlvWorker) != NULL)
|
1777
|
+
natsMutex_Lock(ldw->lock);
|
1778
|
+
else
|
1779
|
+
natsSub_Lock(sub);
|
1780
|
+
|
1781
|
+
sub->msgList.msgs++;
|
1782
|
+
sub->msgList.bytes += bufLen;
|
1783
|
+
|
1784
|
+
if (((sub->msgsLimit > 0) && (sub->msgList.msgs > sub->msgsLimit))
|
1785
|
+
|| ((sub->bytesLimit > 0) && (sub->msgList.bytes > sub->bytesLimit)))
|
1786
|
+
{
|
1787
|
+
natsMsg_Destroy(msg);
|
1788
|
+
|
1789
|
+
sub->dropped++;
|
1790
|
+
|
1791
|
+
// Undo stats from above.
|
1792
|
+
sub->msgList.msgs--;
|
1793
|
+
sub->msgList.bytes -= bufLen;
|
1794
|
+
|
1795
|
+
_processSlowConsumer(nc, sub);
|
1796
|
+
}
|
1797
|
+
else
|
1798
|
+
{
|
1799
|
+
natsMsgList *list = NULL;
|
1800
|
+
|
1801
|
+
if (sub->msgList.msgs > sub->msgsMax)
|
1802
|
+
sub->msgsMax = sub->msgList.msgs;
|
1803
|
+
|
1804
|
+
if (sub->msgList.bytes > sub->bytesMax)
|
1805
|
+
sub->bytesMax = sub->msgList.bytes;
|
1806
|
+
|
1807
|
+
sub->slowConsumer = false;
|
1808
|
+
|
1809
|
+
if (ldw != NULL)
|
1810
|
+
{
|
1811
|
+
msg->sub = sub;
|
1812
|
+
list = &ldw->msgList;
|
1813
|
+
}
|
1814
|
+
else
|
1815
|
+
{
|
1816
|
+
list = &sub->msgList;
|
1817
|
+
}
|
1818
|
+
|
1819
|
+
if (list->head == NULL)
|
1820
|
+
list->head = msg;
|
1821
|
+
|
1822
|
+
if (list->tail != NULL)
|
1823
|
+
list->tail->next = msg;
|
1824
|
+
|
1825
|
+
list->tail = msg;
|
1826
|
+
|
1827
|
+
if (ldw != NULL)
|
1828
|
+
{
|
1829
|
+
if (ldw->inWait)
|
1830
|
+
natsCondition_Broadcast(ldw->cond);
|
1831
|
+
}
|
1832
|
+
else
|
1833
|
+
{
|
1834
|
+
if (sub->inWait > 0)
|
1835
|
+
natsCondition_Broadcast(sub->cond);
|
1836
|
+
}
|
1837
|
+
}
|
1838
|
+
|
1839
|
+
if (ldw != NULL)
|
1840
|
+
natsMutex_Unlock(ldw->lock);
|
1841
|
+
else
|
1842
|
+
natsSub_Unlock(sub);
|
1843
|
+
|
1844
|
+
natsConn_Unlock(nc);
|
1845
|
+
|
1846
|
+
return s;
|
1847
|
+
}
|
1848
|
+
|
1849
|
+
void
|
1850
|
+
natsConn_processOK(natsConnection *nc)
|
1851
|
+
{
|
1852
|
+
// Do nothing for now.
|
1853
|
+
}
|
1854
|
+
|
1855
|
+
void
|
1856
|
+
natsConn_processErr(natsConnection *nc, char *buf, int bufLen)
|
1857
|
+
{
|
1858
|
+
char error[256];
|
1859
|
+
|
1860
|
+
// Copy the error in this local buffer.
|
1861
|
+
snprintf(error, sizeof(error), "%.*s", bufLen, buf);
|
1862
|
+
|
1863
|
+
// Trim spaces and remove quotes.
|
1864
|
+
nats_NormalizeErr(error);
|
1865
|
+
|
1866
|
+
if (strcasecmp(error, STALE_CONNECTION) == 0)
|
1867
|
+
{
|
1868
|
+
_processOpError(nc, NATS_STALE_CONNECTION);
|
1869
|
+
}
|
1870
|
+
else
|
1871
|
+
{
|
1872
|
+
natsConn_Lock(nc);
|
1873
|
+
nc->err = NATS_ERR;
|
1874
|
+
snprintf(nc->errStr, sizeof(nc->errStr), "%s", error);
|
1875
|
+
natsConn_Unlock(nc);
|
1876
|
+
_close(nc, CLOSED, true);
|
1877
|
+
}
|
1878
|
+
}
|
1879
|
+
|
1880
|
+
void
|
1881
|
+
natsConn_processPing(natsConnection *nc)
|
1882
|
+
{
|
1883
|
+
_sendProto(nc, _PONG_PROTO_, _PONG_PROTO_LEN_);
|
1884
|
+
}
|
1885
|
+
|
1886
|
+
void
|
1887
|
+
natsConn_processPong(natsConnection *nc)
|
1888
|
+
{
|
1889
|
+
natsPong *pong = NULL;
|
1890
|
+
|
1891
|
+
natsConn_Lock(nc);
|
1892
|
+
|
1893
|
+
nc->pongs.incoming++;
|
1894
|
+
|
1895
|
+
// Check if the first pong's id in the list matches the incoming Id.
|
1896
|
+
if (((pong = nc->pongs.head) != NULL)
|
1897
|
+
&& (pong->id == nc->pongs.incoming))
|
1898
|
+
{
|
1899
|
+
// Remove the pong from the list
|
1900
|
+
_removePongFromList(nc, pong);
|
1901
|
+
|
1902
|
+
// Release the Flush[Timeout] call
|
1903
|
+
pong->id = 0;
|
1904
|
+
|
1905
|
+
// There may be more than one thread waiting on this
|
1906
|
+
// condition variable, so we use broadcast instead of
|
1907
|
+
// signal.
|
1908
|
+
natsCondition_Broadcast(nc->pongs.cond);
|
1909
|
+
}
|
1910
|
+
|
1911
|
+
nc->pout = 0;
|
1912
|
+
|
1913
|
+
natsConn_Unlock(nc);
|
1914
|
+
}
|
1915
|
+
|
1916
|
+
natsStatus
|
1917
|
+
natsConn_addSubcription(natsConnection *nc, natsSubscription *sub)
|
1918
|
+
{
|
1919
|
+
natsStatus s = NATS_OK;
|
1920
|
+
natsSubscription *oldSub = NULL;
|
1921
|
+
|
1922
|
+
s = natsHash_Set(nc->subs, sub->sid, (void*) sub, (void**) &oldSub);
|
1923
|
+
if (s == NATS_OK)
|
1924
|
+
{
|
1925
|
+
assert(oldSub == NULL);
|
1926
|
+
natsSub_retain(sub);
|
1927
|
+
}
|
1928
|
+
|
1929
|
+
return NATS_UPDATE_ERR_STACK(s);
|
1930
|
+
}
|
1931
|
+
|
1932
|
+
void
|
1933
|
+
natsConn_removeSubscription(natsConnection *nc, natsSubscription *removedSub, bool needsLock)
|
1934
|
+
{
|
1935
|
+
natsSubscription *sub = NULL;
|
1936
|
+
|
1937
|
+
if (needsLock)
|
1938
|
+
natsConn_Lock(nc);
|
1939
|
+
|
1940
|
+
sub = natsHash_Remove(nc->subs, removedSub->sid);
|
1941
|
+
|
1942
|
+
// Note that the sub may have already been removed, so 'sub == NULL'
|
1943
|
+
// is not an error.
|
1944
|
+
if (sub != NULL)
|
1945
|
+
natsSub_close(sub, false);
|
1946
|
+
|
1947
|
+
if (needsLock)
|
1948
|
+
natsConn_Unlock(nc);
|
1949
|
+
|
1950
|
+
// If we really removed the subscription, then release it.
|
1951
|
+
if (sub != NULL)
|
1952
|
+
natsSub_release(sub);
|
1953
|
+
}
|
1954
|
+
|
1955
|
+
// subscribe is the internal subscribe function that indicates interest in a
|
1956
|
+
// subject.
|
1957
|
+
natsStatus
|
1958
|
+
natsConn_subscribe(natsSubscription **newSub,
|
1959
|
+
natsConnection *nc, const char *subj, const char *queue,
|
1960
|
+
int64_t timeout, natsMsgHandler cb, void *cbClosure)
|
1961
|
+
{
|
1962
|
+
natsStatus s = NATS_OK;
|
1963
|
+
natsSubscription *sub = NULL;
|
1964
|
+
|
1965
|
+
if (nc == NULL)
|
1966
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
1967
|
+
|
1968
|
+
if ((subj == NULL) || (strlen(subj) == 0))
|
1969
|
+
return nats_setDefaultError(NATS_INVALID_SUBJECT);
|
1970
|
+
|
1971
|
+
natsConn_Lock(nc);
|
1972
|
+
|
1973
|
+
if (natsConn_isClosed(nc))
|
1974
|
+
{
|
1975
|
+
natsConn_Unlock(nc);
|
1976
|
+
|
1977
|
+
return nats_setDefaultError(NATS_CONNECTION_CLOSED);
|
1978
|
+
}
|
1979
|
+
|
1980
|
+
s = natsSub_create(&sub, nc, subj, queue, timeout, cb, cbClosure);
|
1981
|
+
if (s == NATS_OK)
|
1982
|
+
{
|
1983
|
+
sub->sid = ++(nc->ssid);
|
1984
|
+
s = natsConn_addSubcription(nc, sub);
|
1985
|
+
}
|
1986
|
+
|
1987
|
+
if (s == NATS_OK)
|
1988
|
+
{
|
1989
|
+
// We will send these for all subs when we reconnect
|
1990
|
+
// so that we can suppress here.
|
1991
|
+
if (!natsConn_isReconnecting(nc))
|
1992
|
+
{
|
1993
|
+
char *proto = NULL;
|
1994
|
+
int res = 0;
|
1995
|
+
|
1996
|
+
res = nats_asprintf(&proto, _SUB_PROTO_,
|
1997
|
+
subj,
|
1998
|
+
(queue == NULL ? "" : queue),
|
1999
|
+
(int) sub->sid);
|
2000
|
+
if (res < 0)
|
2001
|
+
s = nats_setDefaultError(NATS_NO_MEMORY);
|
2002
|
+
|
2003
|
+
if (s == NATS_OK)
|
2004
|
+
{
|
2005
|
+
s = natsConn_bufferWriteString(nc, proto);
|
2006
|
+
if (s == NATS_OK)
|
2007
|
+
natsConn_kickFlusher(nc);
|
2008
|
+
|
2009
|
+
// We should not return a failure if we get an issue
|
2010
|
+
// with the buffer write (except if it is no memory).
|
2011
|
+
// For IO errors (if we just got disconnected), the
|
2012
|
+
// reconnect logic will resend the sub protocol.
|
2013
|
+
|
2014
|
+
if (s != NATS_NO_MEMORY)
|
2015
|
+
s = NATS_OK;
|
2016
|
+
}
|
2017
|
+
|
2018
|
+
NATS_FREE(proto);
|
2019
|
+
}
|
2020
|
+
}
|
2021
|
+
|
2022
|
+
if (s == NATS_OK)
|
2023
|
+
{
|
2024
|
+
*newSub = sub;
|
2025
|
+
}
|
2026
|
+
else if (sub != NULL)
|
2027
|
+
{
|
2028
|
+
// A delivery thread may have been started, but the subscription not
|
2029
|
+
// added to the connection's subscription map. So this is necessary
|
2030
|
+
// for the delivery thread to unroll.
|
2031
|
+
natsSub_close(sub, false);
|
2032
|
+
|
2033
|
+
natsConn_removeSubscription(nc, sub, false);
|
2034
|
+
|
2035
|
+
natsSub_release(sub);
|
2036
|
+
}
|
2037
|
+
|
2038
|
+
natsConn_Unlock(nc);
|
2039
|
+
|
2040
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2041
|
+
}
|
2042
|
+
|
2043
|
+
// Performs the low level unsubscribe to the server.
|
2044
|
+
natsStatus
|
2045
|
+
natsConn_unsubscribe(natsConnection *nc, natsSubscription *sub, int max)
|
2046
|
+
{
|
2047
|
+
natsStatus s = NATS_OK;
|
2048
|
+
|
2049
|
+
natsConn_Lock(nc);
|
2050
|
+
|
2051
|
+
if (natsConn_isClosed(nc))
|
2052
|
+
{
|
2053
|
+
natsConn_Unlock(nc);
|
2054
|
+
return nats_setDefaultError(NATS_CONNECTION_CLOSED);
|
2055
|
+
}
|
2056
|
+
|
2057
|
+
sub = natsHash_Get(nc->subs, sub->sid);
|
2058
|
+
if (sub == NULL)
|
2059
|
+
{
|
2060
|
+
// Already unsubscribed
|
2061
|
+
natsConn_Unlock(nc);
|
2062
|
+
return NATS_OK;
|
2063
|
+
}
|
2064
|
+
|
2065
|
+
natsSub_Lock(sub);
|
2066
|
+
sub->max = max;
|
2067
|
+
natsSub_Unlock(sub);
|
2068
|
+
|
2069
|
+
if (max == 0)
|
2070
|
+
natsConn_removeSubscription(nc, sub, false);
|
2071
|
+
|
2072
|
+
if (!natsConn_isReconnecting(nc))
|
2073
|
+
{
|
2074
|
+
// We will send these for all subs when we reconnect
|
2075
|
+
// so that we can suppress here.
|
2076
|
+
s = _sendUnsubProto(nc, sub->sid, max);
|
2077
|
+
if (s == NATS_OK)
|
2078
|
+
natsConn_kickFlusher(nc);
|
2079
|
+
|
2080
|
+
// We should not return a failure if we get an issue
|
2081
|
+
// with the buffer write (except if it is no memory).
|
2082
|
+
// For IO errors (if we just got disconnected), the
|
2083
|
+
// reconnect logic will resend the unsub protocol.
|
2084
|
+
if ((s != NATS_OK) && (s != NATS_NO_MEMORY))
|
2085
|
+
{
|
2086
|
+
nats_clearLastError();
|
2087
|
+
s = NATS_OK;
|
2088
|
+
}
|
2089
|
+
}
|
2090
|
+
|
2091
|
+
natsConn_Unlock(nc);
|
2092
|
+
|
2093
|
+
return s;
|
2094
|
+
}
|
2095
|
+
|
2096
|
+
static natsStatus
|
2097
|
+
_setupServerPool(natsConnection *nc)
|
2098
|
+
{
|
2099
|
+
natsStatus s;
|
2100
|
+
|
2101
|
+
s = natsSrvPool_Create(&(nc->srvPool), nc->opts);
|
2102
|
+
if (s == NATS_OK)
|
2103
|
+
nc->url = natsSrvPool_GetSrvUrl(nc->srvPool, 0);
|
2104
|
+
|
2105
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2106
|
+
}
|
2107
|
+
|
2108
|
+
natsStatus
|
2109
|
+
natsConn_create(natsConnection **newConn, natsOptions *options)
|
2110
|
+
{
|
2111
|
+
natsStatus s = NATS_OK;
|
2112
|
+
natsConnection *nc = NULL;
|
2113
|
+
|
2114
|
+
s = nats_Open(-1);
|
2115
|
+
if (s != NATS_OK)
|
2116
|
+
return s;
|
2117
|
+
|
2118
|
+
nc = NATS_CALLOC(1, sizeof(natsConnection));
|
2119
|
+
if (nc == NULL)
|
2120
|
+
{
|
2121
|
+
// options have been cloned or created for the connection,
|
2122
|
+
// which was supposed to take ownership, so destroy it now.
|
2123
|
+
natsOptions_Destroy(options);
|
2124
|
+
return nats_setDefaultError(NATS_NO_MEMORY);
|
2125
|
+
}
|
2126
|
+
|
2127
|
+
natsLib_Retain();
|
2128
|
+
|
2129
|
+
nc->refs = 1;
|
2130
|
+
nc->sockCtx.fd = NATS_SOCK_INVALID;
|
2131
|
+
nc->opts = options;
|
2132
|
+
|
2133
|
+
if (nc->opts->maxPingsOut == 0)
|
2134
|
+
nc->opts->maxPingsOut = NATS_OPTS_DEFAULT_MAX_PING_OUT;
|
2135
|
+
|
2136
|
+
if (nc->opts->maxPendingMsgs == 0)
|
2137
|
+
nc->opts->maxPendingMsgs = NATS_OPTS_DEFAULT_MAX_PENDING_MSGS;
|
2138
|
+
|
2139
|
+
if (nc->opts->reconnectBufSize == 0)
|
2140
|
+
nc->opts->reconnectBufSize = NATS_OPTS_DEFAULT_RECONNECT_BUF_SIZE;
|
2141
|
+
|
2142
|
+
nc->errStr[0] = '\0';
|
2143
|
+
|
2144
|
+
s = natsMutex_Create(&(nc->mu));
|
2145
|
+
if (s == NATS_OK)
|
2146
|
+
s = _setupServerPool(nc);
|
2147
|
+
if (s == NATS_OK)
|
2148
|
+
s = natsHash_Create(&(nc->subs), 8);
|
2149
|
+
if (s == NATS_OK)
|
2150
|
+
s = natsSock_Init(&nc->sockCtx);
|
2151
|
+
if (s == NATS_OK)
|
2152
|
+
{
|
2153
|
+
s = natsBuf_Create(&(nc->scratch), DEFAULT_SCRATCH_SIZE);
|
2154
|
+
if (s == NATS_OK)
|
2155
|
+
s = natsBuf_Append(nc->scratch, _PUB_P_, _PUB_P_LEN_);
|
2156
|
+
}
|
2157
|
+
if (s == NATS_OK)
|
2158
|
+
s = natsCondition_Create(&(nc->flusherCond));
|
2159
|
+
if (s == NATS_OK)
|
2160
|
+
s = natsCondition_Create(&(nc->pongs.cond));
|
2161
|
+
|
2162
|
+
if (s == NATS_OK)
|
2163
|
+
*newConn = nc;
|
2164
|
+
else
|
2165
|
+
natsConn_release(nc);
|
2166
|
+
|
2167
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2168
|
+
}
|
2169
|
+
|
2170
|
+
natsStatus
|
2171
|
+
natsConnection_Connect(natsConnection **newConn, natsOptions *options)
|
2172
|
+
{
|
2173
|
+
natsStatus s = NATS_OK;
|
2174
|
+
natsConnection *nc = NULL;
|
2175
|
+
natsOptions *opts = NULL;
|
2176
|
+
|
2177
|
+
if (options == NULL)
|
2178
|
+
{
|
2179
|
+
s = natsConnection_ConnectTo(newConn, NATS_DEFAULT_URL);
|
2180
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2181
|
+
}
|
2182
|
+
|
2183
|
+
opts = natsOptions_clone(options);
|
2184
|
+
if (opts == NULL)
|
2185
|
+
s = NATS_NO_MEMORY;
|
2186
|
+
|
2187
|
+
if (s == NATS_OK)
|
2188
|
+
s = natsConn_create(&nc, opts);
|
2189
|
+
if (s == NATS_OK)
|
2190
|
+
s = _connect(nc);
|
2191
|
+
|
2192
|
+
if (s == NATS_OK)
|
2193
|
+
*newConn = nc;
|
2194
|
+
else
|
2195
|
+
natsConn_release(nc);
|
2196
|
+
|
2197
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2198
|
+
}
|
2199
|
+
|
2200
|
+
static natsStatus
|
2201
|
+
_processUrlString(natsOptions *opts, const char *urls)
|
2202
|
+
{
|
2203
|
+
int count = 0;
|
2204
|
+
natsStatus s = NATS_OK;
|
2205
|
+
char **serverUrls = NULL;
|
2206
|
+
char *urlsCopy = NULL;
|
2207
|
+
char *commaPos = NULL;
|
2208
|
+
char *ptr = NULL;
|
2209
|
+
int len;
|
2210
|
+
|
2211
|
+
ptr = (char*) urls;
|
2212
|
+
while ((ptr = strchr(ptr, ',')) != NULL)
|
2213
|
+
{
|
2214
|
+
ptr++;
|
2215
|
+
count++;
|
2216
|
+
}
|
2217
|
+
if (count == 0)
|
2218
|
+
return natsOptions_SetURL(opts, urls);
|
2219
|
+
|
2220
|
+
serverUrls = (char**) NATS_CALLOC(count + 1, sizeof(char*));
|
2221
|
+
if (serverUrls == NULL)
|
2222
|
+
s = NATS_NO_MEMORY;
|
2223
|
+
if (s == NATS_OK)
|
2224
|
+
{
|
2225
|
+
urlsCopy = NATS_STRDUP(urls);
|
2226
|
+
if (urlsCopy == NULL)
|
2227
|
+
{
|
2228
|
+
NATS_FREE(serverUrls);
|
2229
|
+
return NATS_NO_MEMORY;
|
2230
|
+
}
|
2231
|
+
}
|
2232
|
+
|
2233
|
+
count = 0;
|
2234
|
+
ptr = urlsCopy;
|
2235
|
+
|
2236
|
+
do
|
2237
|
+
{
|
2238
|
+
while (*ptr == ' ')
|
2239
|
+
ptr++;
|
2240
|
+
serverUrls[count++] = ptr;
|
2241
|
+
|
2242
|
+
commaPos = strchr(ptr, ',');
|
2243
|
+
if (commaPos != NULL)
|
2244
|
+
{
|
2245
|
+
ptr = (char*)(commaPos + 1);
|
2246
|
+
*(commaPos) = '\0';
|
2247
|
+
}
|
2248
|
+
|
2249
|
+
len = (int) strlen(ptr);
|
2250
|
+
while ((len > 0) && (ptr[len-1] == ' '))
|
2251
|
+
ptr[--len] = '\0';
|
2252
|
+
|
2253
|
+
} while (commaPos != NULL);
|
2254
|
+
|
2255
|
+
if (s == NATS_OK)
|
2256
|
+
s = natsOptions_SetServers(opts, (const char**) serverUrls, count);
|
2257
|
+
|
2258
|
+
NATS_FREE(urlsCopy);
|
2259
|
+
NATS_FREE(serverUrls);
|
2260
|
+
|
2261
|
+
return s;
|
2262
|
+
}
|
2263
|
+
|
2264
|
+
natsStatus
|
2265
|
+
natsConnection_ConnectTo(natsConnection **newConn, const char *url)
|
2266
|
+
{
|
2267
|
+
natsStatus s = NATS_OK;
|
2268
|
+
natsConnection *nc = NULL;
|
2269
|
+
natsOptions *opts = NULL;
|
2270
|
+
|
2271
|
+
s = natsOptions_Create(&opts);
|
2272
|
+
if (s == NATS_OK)
|
2273
|
+
s = _processUrlString(opts, url);
|
2274
|
+
if (s == NATS_OK)
|
2275
|
+
s = natsConn_create(&nc, opts);
|
2276
|
+
if (s == NATS_OK)
|
2277
|
+
s = _connect(nc);
|
2278
|
+
|
2279
|
+
if (s == NATS_OK)
|
2280
|
+
*newConn = nc;
|
2281
|
+
else
|
2282
|
+
natsConn_release(nc);
|
2283
|
+
|
2284
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2285
|
+
}
|
2286
|
+
|
2287
|
+
// Test if connection has been closed.
|
2288
|
+
bool
|
2289
|
+
natsConnection_IsClosed(natsConnection *nc)
|
2290
|
+
{
|
2291
|
+
bool closed;
|
2292
|
+
|
2293
|
+
if (nc == NULL)
|
2294
|
+
return true;
|
2295
|
+
|
2296
|
+
natsConn_Lock(nc);
|
2297
|
+
|
2298
|
+
closed = natsConn_isClosed(nc);
|
2299
|
+
|
2300
|
+
natsConn_Unlock(nc);
|
2301
|
+
|
2302
|
+
return closed;
|
2303
|
+
}
|
2304
|
+
|
2305
|
+
// Test if connection is reconnecting.
|
2306
|
+
bool
|
2307
|
+
natsConnection_IsReconnecting(natsConnection *nc)
|
2308
|
+
{
|
2309
|
+
bool reconnecting;
|
2310
|
+
|
2311
|
+
if (nc == NULL)
|
2312
|
+
return false;
|
2313
|
+
|
2314
|
+
natsConn_Lock(nc);
|
2315
|
+
|
2316
|
+
reconnecting = natsConn_isReconnecting(nc);
|
2317
|
+
|
2318
|
+
natsConn_Unlock(nc);
|
2319
|
+
|
2320
|
+
return reconnecting;
|
2321
|
+
}
|
2322
|
+
|
2323
|
+
// Returns the current state of the connection.
|
2324
|
+
natsConnStatus
|
2325
|
+
natsConnection_Status(natsConnection *nc)
|
2326
|
+
{
|
2327
|
+
natsConnStatus cs;
|
2328
|
+
|
2329
|
+
if (nc == NULL)
|
2330
|
+
return CLOSED;
|
2331
|
+
|
2332
|
+
natsConn_Lock(nc);
|
2333
|
+
|
2334
|
+
cs = nc->status;
|
2335
|
+
|
2336
|
+
natsConn_Unlock(nc);
|
2337
|
+
|
2338
|
+
return cs;
|
2339
|
+
}
|
2340
|
+
|
2341
|
+
static void
|
2342
|
+
_destroyPong(natsConnection *nc, natsPong *pong)
|
2343
|
+
{
|
2344
|
+
// If this pong is the cached one, do not free
|
2345
|
+
if (pong == &(nc->pongs.cached))
|
2346
|
+
memset(pong, 0, sizeof(natsPong));
|
2347
|
+
else
|
2348
|
+
NATS_FREE(pong);
|
2349
|
+
}
|
2350
|
+
|
2351
|
+
natsStatus
|
2352
|
+
natsConnection_FlushTimeout(natsConnection *nc, int64_t timeout)
|
2353
|
+
{
|
2354
|
+
natsStatus s = NATS_OK;
|
2355
|
+
int64_t target = 0;
|
2356
|
+
natsPong *pong = NULL;
|
2357
|
+
|
2358
|
+
if (nc == NULL)
|
2359
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2360
|
+
|
2361
|
+
if (timeout <= 0)
|
2362
|
+
return nats_setDefaultError(NATS_INVALID_TIMEOUT);
|
2363
|
+
|
2364
|
+
natsConn_lockAndRetain(nc);
|
2365
|
+
|
2366
|
+
if (natsConn_isClosed(nc))
|
2367
|
+
s = nats_setDefaultError(NATS_CONNECTION_CLOSED);
|
2368
|
+
|
2369
|
+
if (s == NATS_OK)
|
2370
|
+
{
|
2371
|
+
// Use the cached PONG instead of creating one if the list
|
2372
|
+
// is empty
|
2373
|
+
if (nc->pongs.head == NULL)
|
2374
|
+
pong = &(nc->pongs.cached);
|
2375
|
+
else
|
2376
|
+
pong = (natsPong*) NATS_CALLOC(1, sizeof(natsPong));
|
2377
|
+
|
2378
|
+
if (pong == NULL)
|
2379
|
+
s = nats_setDefaultError(NATS_NO_MEMORY);
|
2380
|
+
}
|
2381
|
+
|
2382
|
+
if (s == NATS_OK)
|
2383
|
+
{
|
2384
|
+
// Send the ping (and add the pong to the list)
|
2385
|
+
_sendPing(nc, pong);
|
2386
|
+
|
2387
|
+
target = nats_Now() + timeout;
|
2388
|
+
|
2389
|
+
// When the corresponding PONG is received, the PONG processing code
|
2390
|
+
// will set pong->id to 0 and do a broadcast. This will allow this
|
2391
|
+
// code to break out of the 'while' loop.
|
2392
|
+
while ((s != NATS_TIMEOUT)
|
2393
|
+
&& !natsConn_isClosed(nc)
|
2394
|
+
&& (pong->id > 0))
|
2395
|
+
{
|
2396
|
+
s = natsCondition_AbsoluteTimedWait(nc->pongs.cond, nc->mu, target);
|
2397
|
+
}
|
2398
|
+
|
2399
|
+
if ((s == NATS_OK) && (nc->status == CLOSED))
|
2400
|
+
{
|
2401
|
+
// The connection has been closed while we were waiting
|
2402
|
+
s = nats_setDefaultError(NATS_CONNECTION_CLOSED);
|
2403
|
+
}
|
2404
|
+
else if ((s == NATS_OK) && (pong->id == -1))
|
2405
|
+
{
|
2406
|
+
// The connection was disconnected and the library is in the
|
2407
|
+
// process of trying to reconnect
|
2408
|
+
s = nats_setDefaultError(NATS_CONNECTION_DISCONNECTED);
|
2409
|
+
}
|
2410
|
+
|
2411
|
+
if (s != NATS_OK)
|
2412
|
+
{
|
2413
|
+
// If we are here, it is possible that we timed-out, or some other
|
2414
|
+
// error occurred. Make sure the request is no longer in the list.
|
2415
|
+
_removePongFromList(nc, pong);
|
2416
|
+
|
2417
|
+
// Set the error. If we don't do that, and flush is called in a loop,
|
2418
|
+
// the stack would be growing with Flush/FlushTimeout.
|
2419
|
+
s = nats_setDefaultError(s);
|
2420
|
+
}
|
2421
|
+
|
2422
|
+
// We are done with the pong
|
2423
|
+
_destroyPong(nc, pong);
|
2424
|
+
}
|
2425
|
+
|
2426
|
+
natsConn_unlockAndRelease(nc);
|
2427
|
+
|
2428
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2429
|
+
}
|
2430
|
+
|
2431
|
+
natsStatus
|
2432
|
+
natsConnection_Flush(natsConnection *nc)
|
2433
|
+
{
|
2434
|
+
natsStatus s = natsConnection_FlushTimeout(nc, 60000);
|
2435
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2436
|
+
}
|
2437
|
+
|
2438
|
+
int
|
2439
|
+
natsConnection_Buffered(natsConnection *nc)
|
2440
|
+
{
|
2441
|
+
int buffered = -1;
|
2442
|
+
|
2443
|
+
if (nc == NULL)
|
2444
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2445
|
+
|
2446
|
+
natsConn_Lock(nc);
|
2447
|
+
|
2448
|
+
if ((nc->status != CLOSED) && (nc->bw != NULL))
|
2449
|
+
buffered = natsBuf_Len(nc->bw);
|
2450
|
+
|
2451
|
+
natsConn_Unlock(nc);
|
2452
|
+
|
2453
|
+
return buffered;
|
2454
|
+
}
|
2455
|
+
|
2456
|
+
int64_t
|
2457
|
+
natsConnection_GetMaxPayload(natsConnection *nc)
|
2458
|
+
{
|
2459
|
+
int64_t mp = 0;
|
2460
|
+
|
2461
|
+
if (nc == NULL)
|
2462
|
+
return 0;
|
2463
|
+
|
2464
|
+
natsConn_Lock(nc);
|
2465
|
+
|
2466
|
+
mp = nc->info.maxPayload;
|
2467
|
+
|
2468
|
+
natsConn_Unlock(nc);
|
2469
|
+
|
2470
|
+
return mp;
|
2471
|
+
}
|
2472
|
+
|
2473
|
+
natsStatus
|
2474
|
+
natsConnection_GetStats(natsConnection *nc, natsStatistics *stats)
|
2475
|
+
{
|
2476
|
+
natsStatus s = NATS_OK;
|
2477
|
+
|
2478
|
+
if ((nc == NULL) || (stats == NULL))
|
2479
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2480
|
+
|
2481
|
+
natsConn_Lock(nc);
|
2482
|
+
|
2483
|
+
memcpy(stats, &(nc->stats), sizeof(natsStatistics));
|
2484
|
+
|
2485
|
+
natsConn_Unlock(nc);
|
2486
|
+
|
2487
|
+
return s;
|
2488
|
+
}
|
2489
|
+
|
2490
|
+
natsStatus
|
2491
|
+
natsConnection_GetConnectedUrl(natsConnection *nc, char *buffer, size_t bufferSize)
|
2492
|
+
{
|
2493
|
+
natsStatus s = NATS_OK;
|
2494
|
+
|
2495
|
+
if ((nc == NULL) || (buffer == NULL))
|
2496
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2497
|
+
|
2498
|
+
natsConn_Lock(nc);
|
2499
|
+
|
2500
|
+
buffer[0] = '\0';
|
2501
|
+
|
2502
|
+
if ((nc->status == CONNECTED) && (nc->url->fullUrl != NULL))
|
2503
|
+
{
|
2504
|
+
if (strlen(nc->url->fullUrl) >= bufferSize)
|
2505
|
+
s = nats_setDefaultError(NATS_INSUFFICIENT_BUFFER);
|
2506
|
+
|
2507
|
+
if (s == NATS_OK)
|
2508
|
+
snprintf(buffer, bufferSize, "%s", nc->url->fullUrl);
|
2509
|
+
}
|
2510
|
+
|
2511
|
+
natsConn_Unlock(nc);
|
2512
|
+
|
2513
|
+
return s;
|
2514
|
+
}
|
2515
|
+
|
2516
|
+
natsStatus
|
2517
|
+
natsConnection_GetConnectedServerId(natsConnection *nc, char *buffer, size_t bufferSize)
|
2518
|
+
{
|
2519
|
+
natsStatus s = NATS_OK;
|
2520
|
+
|
2521
|
+
if ((nc == NULL) || (buffer == NULL))
|
2522
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2523
|
+
|
2524
|
+
natsConn_Lock(nc);
|
2525
|
+
|
2526
|
+
buffer[0] = '\0';
|
2527
|
+
|
2528
|
+
if ((nc->status == CONNECTED) && (nc->info.id != NULL))
|
2529
|
+
{
|
2530
|
+
if (strlen(nc->info.id) >= bufferSize)
|
2531
|
+
s = nats_setDefaultError(NATS_INSUFFICIENT_BUFFER);
|
2532
|
+
|
2533
|
+
if (s == NATS_OK)
|
2534
|
+
snprintf(buffer, bufferSize, "%s", nc->info.id);
|
2535
|
+
}
|
2536
|
+
|
2537
|
+
natsConn_Unlock(nc);
|
2538
|
+
|
2539
|
+
return s;
|
2540
|
+
}
|
2541
|
+
|
2542
|
+
natsStatus
|
2543
|
+
natsConnection_GetServers(natsConnection *nc, char ***servers, int *count)
|
2544
|
+
{
|
2545
|
+
natsStatus s = NATS_OK;
|
2546
|
+
|
2547
|
+
if ((nc == NULL) || (servers == NULL) || (count == NULL))
|
2548
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2549
|
+
|
2550
|
+
natsConn_Lock(nc);
|
2551
|
+
|
2552
|
+
s = natsSrvPool_GetServers(nc->srvPool, false, servers, count);
|
2553
|
+
|
2554
|
+
natsConn_Unlock(nc);
|
2555
|
+
|
2556
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2557
|
+
}
|
2558
|
+
|
2559
|
+
natsStatus
|
2560
|
+
natsConnection_GetDiscoveredServers(natsConnection *nc, char ***servers, int *count)
|
2561
|
+
{
|
2562
|
+
natsStatus s = NATS_OK;
|
2563
|
+
|
2564
|
+
if ((nc == NULL) || (servers == NULL) || (count == NULL))
|
2565
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2566
|
+
|
2567
|
+
natsConn_Lock(nc);
|
2568
|
+
|
2569
|
+
s = natsSrvPool_GetServers(nc->srvPool, true, servers, count);
|
2570
|
+
|
2571
|
+
natsConn_Unlock(nc);
|
2572
|
+
|
2573
|
+
return NATS_UPDATE_ERR_STACK(s);
|
2574
|
+
}
|
2575
|
+
|
2576
|
+
natsStatus
|
2577
|
+
natsConnection_GetLastError(natsConnection *nc, const char **lastError)
|
2578
|
+
{
|
2579
|
+
natsStatus s;
|
2580
|
+
|
2581
|
+
if (nc == NULL)
|
2582
|
+
return nats_setDefaultError(NATS_INVALID_ARG);
|
2583
|
+
|
2584
|
+
natsConn_Lock(nc);
|
2585
|
+
|
2586
|
+
s = nc->err;
|
2587
|
+
if (s == NATS_OK)
|
2588
|
+
nc->errStr[0] = '\0';
|
2589
|
+
else if (nc->errStr[0] == '\0')
|
2590
|
+
snprintf(nc->errStr, sizeof(nc->errStr), "%s", natsStatus_GetText(s));
|
2591
|
+
|
2592
|
+
*lastError = nc->errStr;
|
2593
|
+
|
2594
|
+
natsConn_Unlock(nc);
|
2595
|
+
|
2596
|
+
return s;
|
2597
|
+
}
|
2598
|
+
|
2599
|
+
void
|
2600
|
+
natsConnection_Close(natsConnection *nc)
|
2601
|
+
{
|
2602
|
+
if (nc == NULL)
|
2603
|
+
return;
|
2604
|
+
|
2605
|
+
nats_doNotUpdateErrStack(true);
|
2606
|
+
|
2607
|
+
_close(nc, CLOSED, true);
|
2608
|
+
|
2609
|
+
nats_doNotUpdateErrStack(false);
|
2610
|
+
}
|
2611
|
+
|
2612
|
+
void
|
2613
|
+
natsConnection_Destroy(natsConnection *nc)
|
2614
|
+
{
|
2615
|
+
if (nc == NULL)
|
2616
|
+
return;
|
2617
|
+
|
2618
|
+
nats_doNotUpdateErrStack(true);
|
2619
|
+
|
2620
|
+
_close(nc, CLOSED, true);
|
2621
|
+
|
2622
|
+
nats_doNotUpdateErrStack(false);
|
2623
|
+
|
2624
|
+
natsConn_release(nc);
|
2625
|
+
}
|
2626
|
+
|
2627
|
+
void
|
2628
|
+
natsConnection_ProcessReadEvent(natsConnection *nc)
|
2629
|
+
{
|
2630
|
+
natsStatus s = NATS_OK;
|
2631
|
+
int n = 0;
|
2632
|
+
char *buffer;
|
2633
|
+
int size;
|
2634
|
+
|
2635
|
+
natsConn_Lock(nc);
|
2636
|
+
|
2637
|
+
if (!(nc->el.attached))
|
2638
|
+
{
|
2639
|
+
natsConn_Unlock(nc);
|
2640
|
+
return;
|
2641
|
+
}
|
2642
|
+
|
2643
|
+
if (nc->ps == NULL)
|
2644
|
+
{
|
2645
|
+
s = natsParser_Create(&(nc->ps));
|
2646
|
+
if (s != NATS_OK)
|
2647
|
+
nats_setDefaultError(NATS_NO_MEMORY);
|
2648
|
+
}
|
2649
|
+
|
2650
|
+
if ((s != NATS_OK) || natsConn_isClosed(nc) || natsConn_isReconnecting(nc))
|
2651
|
+
{
|
2652
|
+
(void) NATS_UPDATE_ERR_STACK(s);
|
2653
|
+
natsConn_Unlock(nc);
|
2654
|
+
return;
|
2655
|
+
}
|
2656
|
+
|
2657
|
+
_retain(nc);
|
2658
|
+
|
2659
|
+
buffer = nc->el.buffer;
|
2660
|
+
size = DEFAULT_BUF_SIZE;
|
2661
|
+
|
2662
|
+
natsConn_Unlock(nc);
|
2663
|
+
|
2664
|
+
// Do not try to read again here on success. If more than one connection
|
2665
|
+
// is attached to the same loop, and there is a constant stream of data
|
2666
|
+
// coming for the first connection, this would starve the second connection.
|
2667
|
+
// So return and we will be called back later by the event loop.
|
2668
|
+
s = natsSock_Read(&(nc->sockCtx), buffer, size, &n);
|
2669
|
+
if (s == NATS_OK)
|
2670
|
+
s = natsParser_Parse(nc, buffer, n);
|
2671
|
+
|
2672
|
+
if (s != NATS_OK)
|
2673
|
+
_processOpError(nc, s);
|
2674
|
+
|
2675
|
+
natsConn_release(nc);
|
2676
|
+
}
|
2677
|
+
|
2678
|
+
void
|
2679
|
+
natsConnection_ProcessWriteEvent(natsConnection *nc)
|
2680
|
+
{
|
2681
|
+
natsStatus s = NATS_OK;
|
2682
|
+
int n = 0;
|
2683
|
+
char *buf;
|
2684
|
+
int len;
|
2685
|
+
|
2686
|
+
natsConn_Lock(nc);
|
2687
|
+
|
2688
|
+
if (!(nc->el.attached) || (nc->sockCtx.fd == NATS_SOCK_INVALID))
|
2689
|
+
{
|
2690
|
+
natsConn_Unlock(nc);
|
2691
|
+
return;
|
2692
|
+
}
|
2693
|
+
|
2694
|
+
buf = natsBuf_Data(nc->bw);
|
2695
|
+
len = natsBuf_Len(nc->bw);
|
2696
|
+
|
2697
|
+
s = natsSock_Write(&(nc->sockCtx), buf, len, &n);
|
2698
|
+
if (s == NATS_OK)
|
2699
|
+
{
|
2700
|
+
if (n == len)
|
2701
|
+
{
|
2702
|
+
// We sent all the data, reset buffer and remove WRITE event.
|
2703
|
+
natsBuf_Reset(nc->bw);
|
2704
|
+
|
2705
|
+
s = nc->opts->evCbs.write(nc->el.data, NATS_EVENT_ACTION_REMOVE);
|
2706
|
+
if (s == NATS_OK)
|
2707
|
+
nc->el.writeAdded = false;
|
2708
|
+
else
|
2709
|
+
nats_setError(s, "Error processing write request: %d - %s",
|
2710
|
+
s, natsStatus_GetText(s));
|
2711
|
+
}
|
2712
|
+
else
|
2713
|
+
{
|
2714
|
+
// We sent some part of the buffer. Move the remaining at the beginning.
|
2715
|
+
natsBuf_Consume(nc->bw, n);
|
2716
|
+
}
|
2717
|
+
}
|
2718
|
+
|
2719
|
+
natsConn_Unlock(nc);
|
2720
|
+
|
2721
|
+
if (s != NATS_OK)
|
2722
|
+
_processOpError(nc, s);
|
2723
|
+
|
2724
|
+
(void) NATS_UPDATE_ERR_STACK(s);
|
2725
|
+
}
|