Bug Summary

File:out/../deps/uv/src/unix/udp.c
Warning:line 229, column 19
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name udp.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/maurizio/node-v18.6.0/out -resource-dir /usr/local/lib/clang/16.0.0 -D V8_DEPRECATION_WARNINGS -D V8_IMMINENT_DEPRECATION_WARNINGS -D _GLIBCXX_USE_CXX11_ABI=1 -D NODE_OPENSSL_CONF_NAME=nodejs_conf -D NODE_OPENSSL_HAS_QUIC -D __STDC_FORMAT_MACROS -D OPENSSL_NO_PINSHARED -D OPENSSL_THREADS -D _LARGEFILE_SOURCE -D _FILE_OFFSET_BITS=64 -D _GNU_SOURCE -I ../deps/uv/include -I ../deps/uv/src -internal-isystem /usr/local/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-redhat-linux/8/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wno-unused-parameter -fdebug-compilation-dir=/home/maurizio/node-v18.6.0/out -ferror-limit 19 -fvisibility hidden -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-08-22-142216-507842-1 -x c ../deps/uv/src/unix/udp.c
1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22#include "uv.h"
23#include "internal.h"
24
25#include <assert.h>
26#include <string.h>
27#include <errno(*__errno_location ()).h>
28#include <stdlib.h>
29#include <unistd.h>
30#if defined(__MVS__)
31#include <xti.h>
32#endif
33#include <sys/un.h>
34
35#if defined(IPV6_JOIN_GROUP20) && !defined(IPV6_ADD_MEMBERSHIP20)
36# define IPV6_ADD_MEMBERSHIP20 IPV6_JOIN_GROUP20
37#endif
38
39#if defined(IPV6_LEAVE_GROUP21) && !defined(IPV6_DROP_MEMBERSHIP21)
40# define IPV6_DROP_MEMBERSHIP21 IPV6_LEAVE_GROUP21
41#endif
42
43union uv__sockaddr {
44 struct sockaddr_in6 in6;
45 struct sockaddr_in in;
46 struct sockaddr addr;
47};
48
49static void uv__udp_run_completed(uv_udp_t* handle);
50static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
51static void uv__udp_recvmsg(uv_udp_t* handle);
52static void uv__udp_sendmsg(uv_udp_t* handle);
53static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
54 int domain,
55 unsigned int flags);
56
57#if HAVE_MMSG1
58
59#define UV__MMSG_MAXWIDTH20 20
60
61static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
62static void uv__udp_sendmmsg(uv_udp_t* handle);
63
64static int uv__recvmmsg_avail;
65static int uv__sendmmsg_avail;
66static uv_once_t once = UV_ONCE_INIT0;
67
68static void uv__udp_mmsg_init(void) {
69 int ret;
70 int s;
71 s = uv__socket(AF_INET2, SOCK_DGRAMSOCK_DGRAM, 0);
72 if (s < 0)
73 return;
74 ret = uv__sendmmsg(s, NULL((void*)0), 0);
75 if (ret == 0 || errno(*__errno_location ()) != ENOSYS38) {
76 uv__sendmmsg_avail = 1;
77 uv__recvmmsg_avail = 1;
78 } else {
79 ret = uv__recvmmsg(s, NULL((void*)0), 0);
80 if (ret == 0 || errno(*__errno_location ()) != ENOSYS38)
81 uv__recvmmsg_avail = 1;
82 }
83 uv__close(s);
84}
85
86#endif
87
88void uv__udp_close(uv_udp_t* handle) {
89 uv__io_close(handle->loop, &handle->io_watcher);
90 uv__handle_stop(handle)do { if (((handle)->flags & UV_HANDLE_ACTIVE) == 0) break
; (handle)->flags &= ~UV_HANDLE_ACTIVE; if (((handle)->
flags & UV_HANDLE_REF) != 0) do { (handle)->loop->active_handles
--; } while (0); } while (0)
;
91
92 if (handle->io_watcher.fd != -1) {
93 uv__close(handle->io_watcher.fd);
94 handle->io_watcher.fd = -1;
95 }
96}
97
98
99void uv__udp_finish_close(uv_udp_t* handle) {
100 uv_udp_send_t* req;
101 QUEUE* q;
102
103 assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT))((void) sizeof ((!uv__io_active(&handle->io_watcher, 0x001
| 0x004)) ? 1 : 0), __extension__ ({ if (!uv__io_active(&
handle->io_watcher, 0x001 | 0x004)) ; else __assert_fail (
"!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)"
, "../deps/uv/src/unix/udp.c", 103, __extension__ __PRETTY_FUNCTION__
); }))
;
104 assert(handle->io_watcher.fd == -1)((void) sizeof ((handle->io_watcher.fd == -1) ? 1 : 0), __extension__
({ if (handle->io_watcher.fd == -1) ; else __assert_fail (
"handle->io_watcher.fd == -1", "../deps/uv/src/unix/udp.c"
, 104, __extension__ __PRETTY_FUNCTION__); }))
;
105
106 while (!QUEUE_EMPTY(&handle->write_queue)((const QUEUE *) (&handle->write_queue) == (const QUEUE
*) (*(QUEUE **) &((*(&handle->write_queue))[0])))
) {
107 q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])));
108 QUEUE_REMOVE(q)do { ((*(QUEUE **) &((*((*(QUEUE **) &((*(q))[1]))))[
0]))) = (*(QUEUE **) &((*(q))[0])); ((*(QUEUE **) &((
*((*(QUEUE **) &((*(q))[0]))))[1]))) = (*(QUEUE **) &
((*(q))[1])); } while (0)
;
109
110 req = QUEUE_DATA(q, uv_udp_send_t, queue)((uv_udp_send_t *) ((char *) (q) - __builtin_offsetof(uv_udp_send_t
, queue)))
;
111 req->status = UV_ECANCELED;
112 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue)do { (*(QUEUE **) &((*(&req->queue))[0])) = (&
handle->write_completed_queue); (*(QUEUE **) &((*(&
req->queue))[1])) = (*(QUEUE **) &((*(&handle->
write_completed_queue))[1])); ((*(QUEUE **) &((*((*(QUEUE
**) &((*(&req->queue))[1]))))[0]))) = (&req->
queue); (*(QUEUE **) &((*(&handle->write_completed_queue
))[1])) = (&req->queue); } while (0)
;
113 }
114
115 uv__udp_run_completed(handle);
116
117 assert(handle->send_queue_size == 0)((void) sizeof ((handle->send_queue_size == 0) ? 1 : 0), __extension__
({ if (handle->send_queue_size == 0) ; else __assert_fail
("handle->send_queue_size == 0", "../deps/uv/src/unix/udp.c"
, 117, __extension__ __PRETTY_FUNCTION__); }))
;
118 assert(handle->send_queue_count == 0)((void) sizeof ((handle->send_queue_count == 0) ? 1 : 0), __extension__
({ if (handle->send_queue_count == 0) ; else __assert_fail
("handle->send_queue_count == 0", "../deps/uv/src/unix/udp.c"
, 118, __extension__ __PRETTY_FUNCTION__); }))
;
119
120 /* Now tear down the handle. */
121 handle->recv_cb = NULL((void*)0);
122 handle->alloc_cb = NULL((void*)0);
123 /* but _do not_ touch close_cb */
124}
125
126
127static void uv__udp_run_completed(uv_udp_t* handle) {
128 uv_udp_send_t* req;
129 QUEUE* q;
130
131 assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING))((void) sizeof ((!(handle->flags & UV_HANDLE_UDP_PROCESSING
)) ? 1 : 0), __extension__ ({ if (!(handle->flags & UV_HANDLE_UDP_PROCESSING
)) ; else __assert_fail ("!(handle->flags & UV_HANDLE_UDP_PROCESSING)"
, "../deps/uv/src/unix/udp.c", 131, __extension__ __PRETTY_FUNCTION__
); }))
;
132 handle->flags |= UV_HANDLE_UDP_PROCESSING;
133
134 while (!QUEUE_EMPTY(&handle->write_completed_queue)((const QUEUE *) (&handle->write_completed_queue) == (
const QUEUE *) (*(QUEUE **) &((*(&handle->write_completed_queue
))[0])))
) {
135 q = QUEUE_HEAD(&handle->write_completed_queue)((*(QUEUE **) &((*(&handle->write_completed_queue)
)[0])))
;
136 QUEUE_REMOVE(q)do { ((*(QUEUE **) &((*((*(QUEUE **) &((*(q))[1]))))[
0]))) = (*(QUEUE **) &((*(q))[0])); ((*(QUEUE **) &((
*((*(QUEUE **) &((*(q))[0]))))[1]))) = (*(QUEUE **) &
((*(q))[1])); } while (0)
;
137
138 req = QUEUE_DATA(q, uv_udp_send_t, queue)((uv_udp_send_t *) ((char *) (q) - __builtin_offsetof(uv_udp_send_t
, queue)))
;
139 uv__req_unregister(handle->loop, req)do { ((void) sizeof ((((handle->loop)->active_reqs.count
> 0)) ? 1 : 0), __extension__ ({ if (((handle->loop)->
active_reqs.count > 0)) ; else __assert_fail ("uv__has_active_reqs(handle->loop)"
, "../deps/uv/src/unix/udp.c", 139, __extension__ __PRETTY_FUNCTION__
); })); (handle->loop)->active_reqs.count--; } while (0
)
;
140
141 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
142 handle->send_queue_count--;
143
144 if (req->bufs != req->bufsml)
145 uv__free(req->bufs);
146 req->bufs = NULL((void*)0);
147
148 if (req->send_cb == NULL((void*)0))
149 continue;
150
151 /* req->status >= 0 == bytes written
152 * req->status < 0 == errno
153 */
154 if (req->status >= 0)
155 req->send_cb(req, 0);
156 else
157 req->send_cb(req, req->status);
158 }
159
160 if (QUEUE_EMPTY(&handle->write_queue)((const QUEUE *) (&handle->write_queue) == (const QUEUE
*) (*(QUEUE **) &((*(&handle->write_queue))[0])))
) {
161 /* Pending queue and completion queue empty, stop watcher. */
162 uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT0x004);
163 if (!uv__io_active(&handle->io_watcher, POLLIN0x001))
164 uv__handle_stop(handle)do { if (((handle)->flags & UV_HANDLE_ACTIVE) == 0) break
; (handle)->flags &= ~UV_HANDLE_ACTIVE; if (((handle)->
flags & UV_HANDLE_REF) != 0) do { (handle)->loop->active_handles
--; } while (0); } while (0)
;
165 }
166
167 handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
168}
169
170
171static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
172 uv_udp_t* handle;
173
174 handle = container_of(w, uv_udp_t, io_watcher)((uv_udp_t *) ((char *) (w) - __builtin_offsetof(uv_udp_t, io_watcher
)))
;
175 assert(handle->type == UV_UDP)((void) sizeof ((handle->type == UV_UDP) ? 1 : 0), __extension__
({ if (handle->type == UV_UDP) ; else __assert_fail ("handle->type == UV_UDP"
, "../deps/uv/src/unix/udp.c", 175, __extension__ __PRETTY_FUNCTION__
); }))
;
1
Assuming field 'type' is equal to UV_UDP
2
Taking true branch
176
177 if (revents & POLLIN0x001)
3
Assuming the condition is true
4
Taking true branch
178 uv__udp_recvmsg(handle);
5
Calling 'uv__udp_recvmsg'
179
180 if (revents & POLLOUT0x004) {
181 uv__udp_sendmsg(handle);
182 uv__udp_run_completed(handle);
183 }
184}
185
186#if HAVE_MMSG1
187static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
188 struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH20];
189 struct iovec iov[UV__MMSG_MAXWIDTH20];
190 struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH20];
191 ssize_t nread;
192 uv_buf_t chunk_buf;
193 size_t chunks;
194 int flags;
195 size_t k;
196
197 /* prepare structures for recvmmsg */
198 chunks = buf->len / UV__UDP_DGRAM_MAXSIZE(64 * 1024);
199 if (chunks > ARRAY_SIZE(iov)(sizeof(iov) / sizeof((iov)[0])))
17
Assuming the condition is false
18
Taking false branch
200 chunks = ARRAY_SIZE(iov)(sizeof(iov) / sizeof((iov)[0]));
201 for (k = 0; k < chunks; ++k) {
19
Assuming 'k' is >= 'chunks'
20
Loop condition is false. Execution continues on line 214
202 iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE(64 * 1024);
203 iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE(64 * 1024);
204 msgs[k].msg_hdr.msg_iov = iov + k;
205 msgs[k].msg_hdr.msg_iovlen = 1;
206 msgs[k].msg_hdr.msg_name = peers + k;
207 msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
208 msgs[k].msg_hdr.msg_control = NULL((void*)0);
209 msgs[k].msg_hdr.msg_controllen = 0;
210 msgs[k].msg_hdr.msg_flags = 0;
211 }
212
213 do
22
Loop condition is false. Exiting loop
214 nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
215 while (nread == -1 && errno(*__errno_location ()) == EINTR4);
21
Assuming the condition is false
216
217 if (nread < 1) {
23
Assuming 'nread' is >= 1
24
Taking false branch
218 if (nread == 0 || errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EWOULDBLOCK11)
219 handle->recv_cb(handle, 0, buf, NULL((void*)0), 0);
220 else
221 handle->recv_cb(handle, UV__ERR(errno)(-((*__errno_location ()))), buf, NULL((void*)0), 0);
222 } else {
223 /* pass each chunk to the application */
224 for (k = 0; k
24.1
'k' is < 'nread'
< (size_t) nread && handle->recv_cb != NULL((void*)0); k++) {
25
Assuming field 'recv_cb' is not equal to NULL
26
Loop condition is true. Entering loop body
225 flags = UV_UDP_MMSG_CHUNK;
226 if (msgs[k].msg_hdr.msg_flags & MSG_TRUNCMSG_TRUNC)
27
Assuming the condition is false
28
Taking false branch
227 flags |= UV_UDP_PARTIAL;
228
229 chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
29
1st function call argument is an uninitialized value
230 handle->recv_cb(handle,
231 msgs[k].msg_len,
232 &chunk_buf,
233 msgs[k].msg_hdr.msg_name,
234 flags);
235 }
236
237 /* one last callback so the original buffer is freed */
238 if (handle->recv_cb != NULL((void*)0))
239 handle->recv_cb(handle, 0, buf, NULL((void*)0), UV_UDP_MMSG_FREE);
240 }
241 return nread;
242}
243#endif
244
245static void uv__udp_recvmsg(uv_udp_t* handle) {
246 struct sockaddr_storage peer;
247 struct msghdr h;
248 ssize_t nread;
249 uv_buf_t buf;
250 int flags;
251 int count;
252
253 assert(handle->recv_cb != NULL)((void) sizeof ((handle->recv_cb != ((void*)0)) ? 1 : 0), __extension__
({ if (handle->recv_cb != ((void*)0)) ; else __assert_fail
("handle->recv_cb != NULL", "../deps/uv/src/unix/udp.c", 253
, __extension__ __PRETTY_FUNCTION__); }))
;
6
Assuming field 'recv_cb' is not equal to null
7
Taking true branch
254 assert(handle->alloc_cb != NULL)((void) sizeof ((handle->alloc_cb != ((void*)0)) ? 1 : 0),
__extension__ ({ if (handle->alloc_cb != ((void*)0)) ; else
__assert_fail ("handle->alloc_cb != NULL", "../deps/uv/src/unix/udp.c"
, 254, __extension__ __PRETTY_FUNCTION__); }))
;
8
Assuming field 'alloc_cb' is not equal to null
9
Taking true branch
255
256 /* Prevent loop starvation when the data comes in as fast as (or faster than)
257 * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
258 */
259 count = 32;
260
261 do {
262 buf = uv_buf_init(NULL((void*)0), 0);
263 handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE(64 * 1024), &buf);
264 if (buf.base == NULL((void*)0) || buf.len == 0) {
10
Assuming field 'base' is not equal to NULL
11
Assuming field 'len' is not equal to 0
12
Taking false branch
265 handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL((void*)0), 0);
266 return;
267 }
268 assert(buf.base != NULL)((void) sizeof ((buf.base != ((void*)0)) ? 1 : 0), __extension__
({ if (buf.base != ((void*)0)) ; else __assert_fail ("buf.base != NULL"
, "../deps/uv/src/unix/udp.c", 268, __extension__ __PRETTY_FUNCTION__
); }))
;
13
Taking true branch
269
270#if HAVE_MMSG1
271 if (uv_udp_using_recvmmsg(handle)) {
14
Assuming the condition is true
15
Taking true branch
272 nread = uv__udp_recvmmsg(handle, &buf);
16
Calling 'uv__udp_recvmmsg'
273 if (nread > 0)
274 count -= nread;
275 continue;
276 }
277#endif
278
279 memset(&h, 0, sizeof(h));
280 memset(&peer, 0, sizeof(peer));
281 h.msg_name = &peer;
282 h.msg_namelen = sizeof(peer);
283 h.msg_iov = (void*) &buf;
284 h.msg_iovlen = 1;
285
286 do {
287 nread = recvmsg(handle->io_watcher.fd, &h, 0);
288 }
289 while (nread == -1 && errno(*__errno_location ()) == EINTR4);
290
291 if (nread == -1) {
292 if (errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EWOULDBLOCK11)
293 handle->recv_cb(handle, 0, &buf, NULL((void*)0), 0);
294 else
295 handle->recv_cb(handle, UV__ERR(errno)(-((*__errno_location ()))), &buf, NULL((void*)0), 0);
296 }
297 else {
298 flags = 0;
299 if (h.msg_flags & MSG_TRUNCMSG_TRUNC)
300 flags |= UV_UDP_PARTIAL;
301
302 handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
303 }
304 count--;
305 }
306 /* recv_cb callback may decide to pause or close the handle */
307 while (nread != -1
308 && count > 0
309 && handle->io_watcher.fd != -1
310 && handle->recv_cb != NULL((void*)0));
311}
312
313#if HAVE_MMSG1
314static void uv__udp_sendmmsg(uv_udp_t* handle) {
315 uv_udp_send_t* req;
316 struct uv__mmsghdr h[UV__MMSG_MAXWIDTH20];
317 struct uv__mmsghdr *p;
318 QUEUE* q;
319 ssize_t npkts;
320 size_t pkts;
321 size_t i;
322
323 if (QUEUE_EMPTY(&handle->write_queue)((const QUEUE *) (&handle->write_queue) == (const QUEUE
*) (*(QUEUE **) &((*(&handle->write_queue))[0])))
)
324 return;
325
326write_queue_drain:
327 for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])));
328 pkts < UV__MMSG_MAXWIDTH20 && q != &handle->write_queue;
329 ++pkts, q = QUEUE_HEAD(q)((*(QUEUE **) &((*(q))[0])))) {
330 assert(q != NULL)((void) sizeof ((q != ((void*)0)) ? 1 : 0), __extension__ ({ if
(q != ((void*)0)) ; else __assert_fail ("q != NULL", "../deps/uv/src/unix/udp.c"
, 330, __extension__ __PRETTY_FUNCTION__); }))
;
331 req = QUEUE_DATA(q, uv_udp_send_t, queue)((uv_udp_send_t *) ((char *) (q) - __builtin_offsetof(uv_udp_send_t
, queue)))
;
332 assert(req != NULL)((void) sizeof ((req != ((void*)0)) ? 1 : 0), __extension__ (
{ if (req != ((void*)0)) ; else __assert_fail ("req != NULL",
"../deps/uv/src/unix/udp.c", 332, __extension__ __PRETTY_FUNCTION__
); }))
;
333
334 p = &h[pkts];
335 memset(p, 0, sizeof(*p));
336 if (req->addr.ss_family == AF_UNSPEC0) {
337 p->msg_hdr.msg_name = NULL((void*)0);
338 p->msg_hdr.msg_namelen = 0;
339 } else {
340 p->msg_hdr.msg_name = &req->addr;
341 if (req->addr.ss_family == AF_INET610)
342 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
343 else if (req->addr.ss_family == AF_INET2)
344 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
345 else if (req->addr.ss_family == AF_UNIX1)
346 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
347 else {
348 assert(0 && "unsupported address family")((void) sizeof ((0 && "unsupported address family") ?
1 : 0), __extension__ ({ if (0 && "unsupported address family"
) ; else __assert_fail ("0 && \"unsupported address family\""
, "../deps/uv/src/unix/udp.c", 348, __extension__ __PRETTY_FUNCTION__
); }))
;
349 abort();
350 }
351 }
352 h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
353 h[pkts].msg_hdr.msg_iovlen = req->nbufs;
354 }
355
356 do
357 npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
358 while (npkts == -1 && errno(*__errno_location ()) == EINTR4);
359
360 if (npkts < 1) {
361 if (errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EWOULDBLOCK11 || errno(*__errno_location ()) == ENOBUFS105)
362 return;
363 for (i = 0, q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])));
364 i < pkts && q != &handle->write_queue;
365 ++i, q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])))) {
366 assert(q != NULL)((void) sizeof ((q != ((void*)0)) ? 1 : 0), __extension__ ({ if
(q != ((void*)0)) ; else __assert_fail ("q != NULL", "../deps/uv/src/unix/udp.c"
, 366, __extension__ __PRETTY_FUNCTION__); }))
;
367 req = QUEUE_DATA(q, uv_udp_send_t, queue)((uv_udp_send_t *) ((char *) (q) - __builtin_offsetof(uv_udp_send_t
, queue)))
;
368 assert(req != NULL)((void) sizeof ((req != ((void*)0)) ? 1 : 0), __extension__ (
{ if (req != ((void*)0)) ; else __assert_fail ("req != NULL",
"../deps/uv/src/unix/udp.c", 368, __extension__ __PRETTY_FUNCTION__
); }))
;
369
370 req->status = UV__ERR(errno)(-((*__errno_location ())));
371 QUEUE_REMOVE(&req->queue)do { ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req->
queue))[1]))))[0]))) = (*(QUEUE **) &((*(&req->queue
))[0])); ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req
->queue))[0]))))[1]))) = (*(QUEUE **) &((*(&req->
queue))[1])); } while (0)
;
372 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue)do { (*(QUEUE **) &((*(&req->queue))[0])) = (&
handle->write_completed_queue); (*(QUEUE **) &((*(&
req->queue))[1])) = (*(QUEUE **) &((*(&handle->
write_completed_queue))[1])); ((*(QUEUE **) &((*((*(QUEUE
**) &((*(&req->queue))[1]))))[0]))) = (&req->
queue); (*(QUEUE **) &((*(&handle->write_completed_queue
))[1])) = (&req->queue); } while (0)
;
373 }
374 uv__io_feed(handle->loop, &handle->io_watcher);
375 return;
376 }
377
378 /* Safety: npkts known to be >0 below. Hence cast from ssize_t
379 * to size_t safe.
380 */
381 for (i = 0, q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])));
382 i < (size_t)npkts && q != &handle->write_queue;
383 ++i, q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])))) {
384 assert(q != NULL)((void) sizeof ((q != ((void*)0)) ? 1 : 0), __extension__ ({ if
(q != ((void*)0)) ; else __assert_fail ("q != NULL", "../deps/uv/src/unix/udp.c"
, 384, __extension__ __PRETTY_FUNCTION__); }))
;
385 req = QUEUE_DATA(q, uv_udp_send_t, queue)((uv_udp_send_t *) ((char *) (q) - __builtin_offsetof(uv_udp_send_t
, queue)))
;
386 assert(req != NULL)((void) sizeof ((req != ((void*)0)) ? 1 : 0), __extension__ (
{ if (req != ((void*)0)) ; else __assert_fail ("req != NULL",
"../deps/uv/src/unix/udp.c", 386, __extension__ __PRETTY_FUNCTION__
); }))
;
387
388 req->status = req->bufs[0].len;
389
390 /* Sending a datagram is an atomic operation: either all data
391 * is written or nothing is (and EMSGSIZE is raised). That is
392 * why we don't handle partial writes. Just pop the request
393 * off the write queue and onto the completed queue, done.
394 */
395 QUEUE_REMOVE(&req->queue)do { ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req->
queue))[1]))))[0]))) = (*(QUEUE **) &((*(&req->queue
))[0])); ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req
->queue))[0]))))[1]))) = (*(QUEUE **) &((*(&req->
queue))[1])); } while (0)
;
396 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue)do { (*(QUEUE **) &((*(&req->queue))[0])) = (&
handle->write_completed_queue); (*(QUEUE **) &((*(&
req->queue))[1])) = (*(QUEUE **) &((*(&handle->
write_completed_queue))[1])); ((*(QUEUE **) &((*((*(QUEUE
**) &((*(&req->queue))[1]))))[0]))) = (&req->
queue); (*(QUEUE **) &((*(&handle->write_completed_queue
))[1])) = (&req->queue); } while (0)
;
397 }
398
399 /* couldn't batch everything, continue sending (jump to avoid stack growth) */
400 if (!QUEUE_EMPTY(&handle->write_queue)((const QUEUE *) (&handle->write_queue) == (const QUEUE
*) (*(QUEUE **) &((*(&handle->write_queue))[0])))
)
401 goto write_queue_drain;
402 uv__io_feed(handle->loop, &handle->io_watcher);
403 return;
404}
405#endif
406
407static void uv__udp_sendmsg(uv_udp_t* handle) {
408 uv_udp_send_t* req;
409 struct msghdr h;
410 QUEUE* q;
411 ssize_t size;
412
413#if HAVE_MMSG1
414 uv_once(&once, uv__udp_mmsg_init);
415 if (uv__sendmmsg_avail) {
416 uv__udp_sendmmsg(handle);
417 return;
418 }
419#endif
420
421 while (!QUEUE_EMPTY(&handle->write_queue)((const QUEUE *) (&handle->write_queue) == (const QUEUE
*) (*(QUEUE **) &((*(&handle->write_queue))[0])))
) {
422 q = QUEUE_HEAD(&handle->write_queue)((*(QUEUE **) &((*(&handle->write_queue))[0])));
423 assert(q != NULL)((void) sizeof ((q != ((void*)0)) ? 1 : 0), __extension__ ({ if
(q != ((void*)0)) ; else __assert_fail ("q != NULL", "../deps/uv/src/unix/udp.c"
, 423, __extension__ __PRETTY_FUNCTION__); }))
;
424
425 req = QUEUE_DATA(q, uv_udp_send_t, queue)((uv_udp_send_t *) ((char *) (q) - __builtin_offsetof(uv_udp_send_t
, queue)))
;
426 assert(req != NULL)((void) sizeof ((req != ((void*)0)) ? 1 : 0), __extension__ (
{ if (req != ((void*)0)) ; else __assert_fail ("req != NULL",
"../deps/uv/src/unix/udp.c", 426, __extension__ __PRETTY_FUNCTION__
); }))
;
427
428 memset(&h, 0, sizeof h);
429 if (req->addr.ss_family == AF_UNSPEC0) {
430 h.msg_name = NULL((void*)0);
431 h.msg_namelen = 0;
432 } else {
433 h.msg_name = &req->addr;
434 if (req->addr.ss_family == AF_INET610)
435 h.msg_namelen = sizeof(struct sockaddr_in6);
436 else if (req->addr.ss_family == AF_INET2)
437 h.msg_namelen = sizeof(struct sockaddr_in);
438 else if (req->addr.ss_family == AF_UNIX1)
439 h.msg_namelen = sizeof(struct sockaddr_un);
440 else {
441 assert(0 && "unsupported address family")((void) sizeof ((0 && "unsupported address family") ?
1 : 0), __extension__ ({ if (0 && "unsupported address family"
) ; else __assert_fail ("0 && \"unsupported address family\""
, "../deps/uv/src/unix/udp.c", 441, __extension__ __PRETTY_FUNCTION__
); }))
;
442 abort();
443 }
444 }
445 h.msg_iov = (struct iovec*) req->bufs;
446 h.msg_iovlen = req->nbufs;
447
448 do {
449 size = sendmsg(handle->io_watcher.fd, &h, 0);
450 } while (size == -1 && errno(*__errno_location ()) == EINTR4);
451
452 if (size == -1) {
453 if (errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EWOULDBLOCK11 || errno(*__errno_location ()) == ENOBUFS105)
454 break;
455 }
456
457 req->status = (size == -1 ? UV__ERR(errno)(-((*__errno_location ()))) : size);
458
459 /* Sending a datagram is an atomic operation: either all data
460 * is written or nothing is (and EMSGSIZE is raised). That is
461 * why we don't handle partial writes. Just pop the request
462 * off the write queue and onto the completed queue, done.
463 */
464 QUEUE_REMOVE(&req->queue)do { ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req->
queue))[1]))))[0]))) = (*(QUEUE **) &((*(&req->queue
))[0])); ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req
->queue))[0]))))[1]))) = (*(QUEUE **) &((*(&req->
queue))[1])); } while (0)
;
465 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue)do { (*(QUEUE **) &((*(&req->queue))[0])) = (&
handle->write_completed_queue); (*(QUEUE **) &((*(&
req->queue))[1])) = (*(QUEUE **) &((*(&handle->
write_completed_queue))[1])); ((*(QUEUE **) &((*((*(QUEUE
**) &((*(&req->queue))[1]))))[0]))) = (&req->
queue); (*(QUEUE **) &((*(&handle->write_completed_queue
))[1])) = (&req->queue); } while (0)
;
466 uv__io_feed(handle->loop, &handle->io_watcher);
467 }
468}
469
470/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
471 * refinements for programs that use multicast.
472 *
473 * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
474 * are different from the BSDs: it _shares_ the port rather than steal it
475 * from the current listener. While useful, it's not something we can emulate
476 * on other platforms so we don't enable it.
477 *
478 * zOS does not support getsockname with SO_REUSEPORT option when using
479 * AF_UNIX.
480 */
481static int uv__set_reuse(int fd) {
482 int yes;
483 yes = 1;
484
485#if defined(SO_REUSEPORT15) && defined(__MVS__)
486 struct sockaddr_in sockfd;
487 unsigned int sockfd_len = sizeof(sockfd);
488 if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
489 return UV__ERR(errno)(-((*__errno_location ())));
490 if (sockfd.sin_family == AF_UNIX1) {
491 if (setsockopt(fd, SOL_SOCKET1, SO_REUSEADDR2, &yes, sizeof(yes)))
492 return UV__ERR(errno)(-((*__errno_location ())));
493 } else {
494 if (setsockopt(fd, SOL_SOCKET1, SO_REUSEPORT15, &yes, sizeof(yes)))
495 return UV__ERR(errno)(-((*__errno_location ())));
496 }
497#elif defined(SO_REUSEPORT15) && !defined(__linux__1)
498 if (setsockopt(fd, SOL_SOCKET1, SO_REUSEPORT15, &yes, sizeof(yes)))
499 return UV__ERR(errno)(-((*__errno_location ())));
500#else
501 if (setsockopt(fd, SOL_SOCKET1, SO_REUSEADDR2, &yes, sizeof(yes)))
502 return UV__ERR(errno)(-((*__errno_location ())));
503#endif
504
505 return 0;
506}
507
508/*
509 * The Linux kernel suppresses some ICMP error messages by default for UDP
510 * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
511 * error reporting, hopefully resulting in faster failover to working name
512 * servers.
513 */
514static int uv__set_recverr(int fd, sa_family_t ss_family) {
515#if defined(__linux__1)
516 int yes;
517
518 yes = 1;
519 if (ss_family == AF_INET2) {
520 if (setsockopt(fd, IPPROTO_IPIPPROTO_IP, IP_RECVERR11, &yes, sizeof(yes)))
521 return UV__ERR(errno)(-((*__errno_location ())));
522 } else if (ss_family == AF_INET610) {
523 if (setsockopt(fd, IPPROTO_IPV6IPPROTO_IPV6, IPV6_RECVERR25, &yes, sizeof(yes)))
524 return UV__ERR(errno)(-((*__errno_location ())));
525 }
526#endif
527 return 0;
528}
529
530
531int uv__udp_bind(uv_udp_t* handle,
532 const struct sockaddr* addr,
533 unsigned int addrlen,
534 unsigned int flags) {
535 int err;
536 int yes;
537 int fd;
538
539 /* Check for bad flags. */
540 if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR))
541 return UV_EINVAL;
542
543 /* Cannot set IPv6-only mode on non-IPv6 socket. */
544 if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET610)
545 return UV_EINVAL;
546
547 fd = handle->io_watcher.fd;
548 if (fd == -1) {
549 err = uv__socket(addr->sa_family, SOCK_DGRAMSOCK_DGRAM, 0);
550 if (err < 0)
551 return err;
552 fd = err;
553 handle->io_watcher.fd = fd;
554 }
555
556 if (flags & UV_UDP_LINUX_RECVERR) {
557 err = uv__set_recverr(fd, addr->sa_family);
558 if (err)
559 return err;
560 }
561
562 if (flags & UV_UDP_REUSEADDR) {
563 err = uv__set_reuse(fd);
564 if (err)
565 return err;
566 }
567
568 if (flags & UV_UDP_IPV6ONLY) {
569#ifdef IPV6_V6ONLY26
570 yes = 1;
571 if (setsockopt(fd, IPPROTO_IPV6IPPROTO_IPV6, IPV6_V6ONLY26, &yes, sizeof yes) == -1) {
572 err = UV__ERR(errno)(-((*__errno_location ())));
573 return err;
574 }
575#else
576 err = UV_ENOTSUP;
577 return err;
578#endif
579 }
580
581 if (bind(fd, addr, addrlen)) {
582 err = UV__ERR(errno)(-((*__errno_location ())));
583 if (errno(*__errno_location ()) == EAFNOSUPPORT97)
584 /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
585 * socket created with AF_INET to an AF_INET6 address or vice versa. */
586 err = UV_EINVAL;
587 return err;
588 }
589
590 if (addr->sa_family == AF_INET610)
591 handle->flags |= UV_HANDLE_IPV6;
592
593 handle->flags |= UV_HANDLE_BOUND;
594 return 0;
595}
596
597
598static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
599 int domain,
600 unsigned int flags) {
601 union uv__sockaddr taddr;
602 socklen_t addrlen;
603
604 if (handle->io_watcher.fd != -1)
605 return 0;
606
607 switch (domain) {
608 case AF_INET2:
609 {
610 struct sockaddr_in* addr = &taddr.in;
611 memset(addr, 0, sizeof *addr);
612 addr->sin_family = AF_INET2;
613 addr->sin_addr.s_addr = INADDR_ANY((in_addr_t) 0x00000000);
614 addrlen = sizeof *addr;
615 break;
616 }
617 case AF_INET610:
618 {
619 struct sockaddr_in6* addr = &taddr.in6;
620 memset(addr, 0, sizeof *addr);
621 addr->sin6_family = AF_INET610;
622 addr->sin6_addr = in6addr_any;
623 addrlen = sizeof *addr;
624 break;
625 }
626 default:
627 assert(0 && "unsupported address family")((void) sizeof ((0 && "unsupported address family") ?
1 : 0), __extension__ ({ if (0 && "unsupported address family"
) ; else __assert_fail ("0 && \"unsupported address family\""
, "../deps/uv/src/unix/udp.c", 627, __extension__ __PRETTY_FUNCTION__
); }))
;
628 abort();
629 }
630
631 return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
632}
633
634
635int uv__udp_connect(uv_udp_t* handle,
636 const struct sockaddr* addr,
637 unsigned int addrlen) {
638 int err;
639
640 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
641 if (err)
642 return err;
643
644 do {
645 errno(*__errno_location ()) = 0;
646 err = connect(handle->io_watcher.fd, addr, addrlen);
647 } while (err == -1 && errno(*__errno_location ()) == EINTR4);
648
649 if (err)
650 return UV__ERR(errno)(-((*__errno_location ())));
651
652 handle->flags |= UV_HANDLE_UDP_CONNECTED;
653
654 return 0;
655}
656
657/* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
658 * Any of uv supported UNIXs kernel should be standardized, but the kernel
659 * implementation logic not same, let's use pseudocode to explain the udp
660 * disconnect behaviors:
661 *
662 * Predefined stubs for pseudocode:
663 * 1. sodisconnect: The function to perform the real udp disconnect
664 * 2. pru_connect: The function to perform the real udp connect
665 * 3. so: The kernel object match with socket fd
666 * 4. addr: The sockaddr parameter from user space
667 *
668 * BSDs:
669 * if(sodisconnect(so) == 0) { // udp disconnect succeed
670 * if (addr->sa_len != so->addr->sa_len) return EINVAL;
671 * if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
672 * pru_connect(so);
673 * }
674 * else return EISCONN;
675 *
676 * z/OS (same with Windows):
677 * if(addr->sa_len < so->addr->sa_len) return EINVAL;
678 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
679 *
680 * AIX:
681 * if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
682 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
683 *
684 * Linux,Others:
685 * if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
686 * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
687 */
688int uv__udp_disconnect(uv_udp_t* handle) {
689 int r;
690#if defined(__MVS__)
691 struct sockaddr_storage addr;
692#else
693 struct sockaddr addr;
694#endif
695
696 memset(&addr, 0, sizeof(addr));
697
698#if defined(__MVS__)
699 addr.ss_family = AF_UNSPEC0;
700#else
701 addr.sa_family = AF_UNSPEC0;
702#endif
703
704 do {
705 errno(*__errno_location ()) = 0;
706 r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
707 } while (r == -1 && errno(*__errno_location ()) == EINTR4);
708
709 if (r == -1) {
710#if defined(BSD) /* The macro BSD is from sys/param.h */
711 if (errno(*__errno_location ()) != EAFNOSUPPORT97 && errno(*__errno_location ()) != EINVAL22)
712 return UV__ERR(errno)(-((*__errno_location ())));
713#else
714 return UV__ERR(errno)(-((*__errno_location ())));
715#endif
716 }
717
718 handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
719 return 0;
720}
721
722int uv__udp_send(uv_udp_send_t* req,
723 uv_udp_t* handle,
724 const uv_buf_t bufs[],
725 unsigned int nbufs,
726 const struct sockaddr* addr,
727 unsigned int addrlen,
728 uv_udp_send_cb send_cb) {
729 int err;
730 int empty_queue;
731
732 assert(nbufs > 0)((void) sizeof ((nbufs > 0) ? 1 : 0), __extension__ ({ if (
nbufs > 0) ; else __assert_fail ("nbufs > 0", "../deps/uv/src/unix/udp.c"
, 732, __extension__ __PRETTY_FUNCTION__); }))
;
733
734 if (addr) {
735 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
736 if (err)
737 return err;
738 }
739
740 /* It's legal for send_queue_count > 0 even when the write_queue is empty;
741 * it means there are error-state requests in the write_completed_queue that
742 * will touch up send_queue_size/count later.
743 */
744 empty_queue = (handle->send_queue_count == 0);
745
746 uv__req_init(handle->loop, req, UV_UDP_SEND)do { do { (req)->type = (UV_UDP_SEND); } while (0); do { (
handle->loop)->active_reqs.count++; } while (0); } while
(0)
;
747 assert(addrlen <= sizeof(req->addr))((void) sizeof ((addrlen <= sizeof(req->addr)) ? 1 : 0)
, __extension__ ({ if (addrlen <= sizeof(req->addr)) ; else
__assert_fail ("addrlen <= sizeof(req->addr)", "../deps/uv/src/unix/udp.c"
, 747, __extension__ __PRETTY_FUNCTION__); }))
;
748 if (addr == NULL((void*)0))
749 req->addr.ss_family = AF_UNSPEC0;
750 else
751 memcpy(&req->addr, addr, addrlen);
752 req->send_cb = send_cb;
753 req->handle = handle;
754 req->nbufs = nbufs;
755
756 req->bufs = req->bufsml;
757 if (nbufs > ARRAY_SIZE(req->bufsml)(sizeof(req->bufsml) / sizeof((req->bufsml)[0])))
758 req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
759
760 if (req->bufs == NULL((void*)0)) {
761 uv__req_unregister(handle->loop, req)do { ((void) sizeof ((((handle->loop)->active_reqs.count
> 0)) ? 1 : 0), __extension__ ({ if (((handle->loop)->
active_reqs.count > 0)) ; else __assert_fail ("uv__has_active_reqs(handle->loop)"
, "../deps/uv/src/unix/udp.c", 761, __extension__ __PRETTY_FUNCTION__
); })); (handle->loop)->active_reqs.count--; } while (0
)
;
762 return UV_ENOMEM;
763 }
764
765 memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
766 handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
767 handle->send_queue_count++;
768 QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue)do { (*(QUEUE **) &((*(&req->queue))[0])) = (&
handle->write_queue); (*(QUEUE **) &((*(&req->queue
))[1])) = (*(QUEUE **) &((*(&handle->write_queue))
[1])); ((*(QUEUE **) &((*((*(QUEUE **) &((*(&req->
queue))[1]))))[0]))) = (&req->queue); (*(QUEUE **) &
((*(&handle->write_queue))[1])) = (&req->queue)
; } while (0)
;
769 uv__handle_start(handle)do { if (((handle)->flags & UV_HANDLE_ACTIVE) != 0) break
; (handle)->flags |= UV_HANDLE_ACTIVE; if (((handle)->flags
& UV_HANDLE_REF) != 0) do { (handle)->loop->active_handles
++; } while (0); } while (0)
;
770
771 if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
772 uv__udp_sendmsg(handle);
773
774 /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
775 * away. In such cases the `io_watcher` has to be queued for asynchronous
776 * write.
777 */
778 if (!QUEUE_EMPTY(&handle->write_queue)((const QUEUE *) (&handle->write_queue) == (const QUEUE
*) (*(QUEUE **) &((*(&handle->write_queue))[0])))
)
779 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT0x004);
780 } else {
781 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT0x004);
782 }
783
784 return 0;
785}
786
787
788int uv__udp_try_send(uv_udp_t* handle,
789 const uv_buf_t bufs[],
790 unsigned int nbufs,
791 const struct sockaddr* addr,
792 unsigned int addrlen) {
793 int err;
794 struct msghdr h;
795 ssize_t size;
796
797 assert(nbufs > 0)((void) sizeof ((nbufs > 0) ? 1 : 0), __extension__ ({ if (
nbufs > 0) ; else __assert_fail ("nbufs > 0", "../deps/uv/src/unix/udp.c"
, 797, __extension__ __PRETTY_FUNCTION__); }))
;
798
799 /* already sending a message */
800 if (handle->send_queue_count != 0)
801 return UV_EAGAIN;
802
803 if (addr) {
804 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
805 if (err)
806 return err;
807 } else {
808 assert(handle->flags & UV_HANDLE_UDP_CONNECTED)((void) sizeof ((handle->flags & UV_HANDLE_UDP_CONNECTED
) ? 1 : 0), __extension__ ({ if (handle->flags & UV_HANDLE_UDP_CONNECTED
) ; else __assert_fail ("handle->flags & UV_HANDLE_UDP_CONNECTED"
, "../deps/uv/src/unix/udp.c", 808, __extension__ __PRETTY_FUNCTION__
); }))
;
809 }
810
811 memset(&h, 0, sizeof h);
812 h.msg_name = (struct sockaddr*) addr;
813 h.msg_namelen = addrlen;
814 h.msg_iov = (struct iovec*) bufs;
815 h.msg_iovlen = nbufs;
816
817 do {
818 size = sendmsg(handle->io_watcher.fd, &h, 0);
819 } while (size == -1 && errno(*__errno_location ()) == EINTR4);
820
821 if (size == -1) {
822 if (errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EWOULDBLOCK11 || errno(*__errno_location ()) == ENOBUFS105)
823 return UV_EAGAIN;
824 else
825 return UV__ERR(errno)(-((*__errno_location ())));
826 }
827
828 return size;
829}
830
831
832static int uv__udp_set_membership4(uv_udp_t* handle,
833 const struct sockaddr_in* multicast_addr,
834 const char* interface_addr,
835 uv_membership membership) {
836 struct ip_mreq mreq;
837 int optname;
838 int err;
839
840 memset(&mreq, 0, sizeof mreq);
841
842 if (interface_addr) {
843 err = uv_inet_pton(AF_INET2, interface_addr, &mreq.imr_interface.s_addr);
844 if (err)
845 return err;
846 } else {
847 mreq.imr_interface.s_addr = htonl(INADDR_ANY)__bswap_32 (((in_addr_t) 0x00000000));
848 }
849
850 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
851
852 switch (membership) {
853 case UV_JOIN_GROUP:
854 optname = IP_ADD_MEMBERSHIP35;
855 break;
856 case UV_LEAVE_GROUP:
857 optname = IP_DROP_MEMBERSHIP36;
858 break;
859 default:
860 return UV_EINVAL;
861 }
862
863 if (setsockopt(handle->io_watcher.fd,
864 IPPROTO_IPIPPROTO_IP,
865 optname,
866 &mreq,
867 sizeof(mreq))) {
868#if defined(__MVS__)
869 if (errno(*__errno_location ()) == ENXIO6)
870 return UV_ENODEV;
871#endif
872 return UV__ERR(errno)(-((*__errno_location ())));
873 }
874
875 return 0;
876}
877
878
879static int uv__udp_set_membership6(uv_udp_t* handle,
880 const struct sockaddr_in6* multicast_addr,
881 const char* interface_addr,
882 uv_membership membership) {
883 int optname;
884 struct ipv6_mreq mreq;
885 struct sockaddr_in6 addr6;
886
887 memset(&mreq, 0, sizeof mreq);
888
889 if (interface_addr) {
890 if (uv_ip6_addr(interface_addr, 0, &addr6))
891 return UV_EINVAL;
892 mreq.ipv6mr_interface = addr6.sin6_scope_id;
893 } else {
894 mreq.ipv6mr_interface = 0;
895 }
896
897 mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
898
899 switch (membership) {
900 case UV_JOIN_GROUP:
901 optname = IPV6_ADD_MEMBERSHIP20;
902 break;
903 case UV_LEAVE_GROUP:
904 optname = IPV6_DROP_MEMBERSHIP21;
905 break;
906 default:
907 return UV_EINVAL;
908 }
909
910 if (setsockopt(handle->io_watcher.fd,
911 IPPROTO_IPV6IPPROTO_IPV6,
912 optname,
913 &mreq,
914 sizeof(mreq))) {
915#if defined(__MVS__)
916 if (errno(*__errno_location ()) == ENXIO6)
917 return UV_ENODEV;
918#endif
919 return UV__ERR(errno)(-((*__errno_location ())));
920 }
921
922 return 0;
923}
924
925
926#if !defined(__OpenBSD__) && \
927 !defined(__NetBSD__) && \
928 !defined(__ANDROID__) && \
929 !defined(__DragonFly__) && \
930 !defined(__QNX__)
931static int uv__udp_set_source_membership4(uv_udp_t* handle,
932 const struct sockaddr_in* multicast_addr,
933 const char* interface_addr,
934 const struct sockaddr_in* source_addr,
935 uv_membership membership) {
936 struct ip_mreq_source mreq;
937 int optname;
938 int err;
939
940 err = uv__udp_maybe_deferred_bind(handle, AF_INET2, UV_UDP_REUSEADDR);
941 if (err)
942 return err;
943
944 memset(&mreq, 0, sizeof(mreq));
945
946 if (interface_addr != NULL((void*)0)) {
947 err = uv_inet_pton(AF_INET2, interface_addr, &mreq.imr_interface.s_addr);
948 if (err)
949 return err;
950 } else {
951 mreq.imr_interface.s_addr = htonl(INADDR_ANY)__bswap_32 (((in_addr_t) 0x00000000));
952 }
953
954 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
955 mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
956
957 if (membership == UV_JOIN_GROUP)
958 optname = IP_ADD_SOURCE_MEMBERSHIP39;
959 else if (membership == UV_LEAVE_GROUP)
960 optname = IP_DROP_SOURCE_MEMBERSHIP40;
961 else
962 return UV_EINVAL;
963
964 if (setsockopt(handle->io_watcher.fd,
965 IPPROTO_IPIPPROTO_IP,
966 optname,
967 &mreq,
968 sizeof(mreq))) {
969 return UV__ERR(errno)(-((*__errno_location ())));
970 }
971
972 return 0;
973}
974
975
976static int uv__udp_set_source_membership6(uv_udp_t* handle,
977 const struct sockaddr_in6* multicast_addr,
978 const char* interface_addr,
979 const struct sockaddr_in6* source_addr,
980 uv_membership membership) {
981 struct group_source_req mreq;
982 struct sockaddr_in6 addr6;
983 int optname;
984 int err;
985
986 err = uv__udp_maybe_deferred_bind(handle, AF_INET610, UV_UDP_REUSEADDR);
987 if (err)
988 return err;
989
990 memset(&mreq, 0, sizeof(mreq));
991
992 if (interface_addr != NULL((void*)0)) {
993 err = uv_ip6_addr(interface_addr, 0, &addr6);
994 if (err)
995 return err;
996 mreq.gsr_interface = addr6.sin6_scope_id;
997 } else {
998 mreq.gsr_interface = 0;
999 }
1000
1001 STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr))void uv__static_assert(int static_assert_failed[1 - 2 * !(sizeof
(mreq.gsr_group) >= sizeof(*multicast_addr))])
;
1002 STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr))void uv__static_assert(int static_assert_failed[1 - 2 * !(sizeof
(mreq.gsr_source) >= sizeof(*source_addr))])
;
1003 memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
1004 memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
1005
1006 if (membership == UV_JOIN_GROUP)
1007 optname = MCAST_JOIN_SOURCE_GROUP46;
1008 else if (membership == UV_LEAVE_GROUP)
1009 optname = MCAST_LEAVE_SOURCE_GROUP47;
1010 else
1011 return UV_EINVAL;
1012
1013 if (setsockopt(handle->io_watcher.fd,
1014 IPPROTO_IPV6IPPROTO_IPV6,
1015 optname,
1016 &mreq,
1017 sizeof(mreq))) {
1018 return UV__ERR(errno)(-((*__errno_location ())));
1019 }
1020
1021 return 0;
1022}
1023#endif
1024
1025
1026int uv__udp_init_ex(uv_loop_t* loop,
1027 uv_udp_t* handle,
1028 unsigned flags,
1029 int domain) {
1030 int fd;
1031
1032 fd = -1;
1033 if (domain != AF_UNSPEC0) {
1034 fd = uv__socket(domain, SOCK_DGRAMSOCK_DGRAM, 0);
1035 if (fd < 0)
1036 return fd;
1037 }
1038
1039 uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP)do { ((uv_handle_t*)handle)->loop = (loop); ((uv_handle_t*
)handle)->type = (UV_UDP); ((uv_handle_t*)handle)->flags
= UV_HANDLE_REF; do { (*(QUEUE **) &((*(&((uv_handle_t
*)handle)->handle_queue))[0])) = (&(loop)->handle_queue
); (*(QUEUE **) &((*(&((uv_handle_t*)handle)->handle_queue
))[1])) = (*(QUEUE **) &((*(&(loop)->handle_queue)
)[1])); ((*(QUEUE **) &((*((*(QUEUE **) &((*(&((uv_handle_t
*)handle)->handle_queue))[1]))))[0]))) = (&((uv_handle_t
*)handle)->handle_queue); (*(QUEUE **) &((*(&(loop
)->handle_queue))[1])) = (&((uv_handle_t*)handle)->
handle_queue); } while (0); (((uv_handle_t*)handle)->next_closing
= ((void*)0)); } while (0)
;
1040 handle->alloc_cb = NULL((void*)0);
1041 handle->recv_cb = NULL((void*)0);
1042 handle->send_queue_size = 0;
1043 handle->send_queue_count = 0;
1044 uv__io_init(&handle->io_watcher, uv__udp_io, fd);
1045 QUEUE_INIT(&handle->write_queue)do { (*(QUEUE **) &((*(&handle->write_queue))[0]))
= (&handle->write_queue); (*(QUEUE **) &((*(&
handle->write_queue))[1])) = (&handle->write_queue)
; } while (0)
;
1046 QUEUE_INIT(&handle->write_completed_queue)do { (*(QUEUE **) &((*(&handle->write_completed_queue
))[0])) = (&handle->write_completed_queue); (*(QUEUE *
*) &((*(&handle->write_completed_queue))[1])) = (&
handle->write_completed_queue); } while (0)
;
1047
1048 return 0;
1049}
1050
1051
1052int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
1053#if HAVE_MMSG1
1054 if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
1055 uv_once(&once, uv__udp_mmsg_init);
1056 return uv__recvmmsg_avail;
1057 }
1058#endif
1059 return 0;
1060}
1061
1062
1063int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
1064 int err;
1065
1066 /* Check for already active socket. */
1067 if (handle->io_watcher.fd != -1)
1068 return UV_EBUSY;
1069
1070 if (uv__fd_exists(handle->loop, sock))
1071 return UV_EEXIST;
1072
1073 err = uv__nonblockuv__nonblock_ioctl(sock, 1);
1074 if (err)
1075 return err;
1076
1077 err = uv__set_reuse(sock);
1078 if (err)
1079 return err;
1080
1081 handle->io_watcher.fd = sock;
1082 if (uv__udp_is_connected(handle))
1083 handle->flags |= UV_HANDLE_UDP_CONNECTED;
1084
1085 return 0;
1086}
1087
1088
1089int uv_udp_set_membership(uv_udp_t* handle,
1090 const char* multicast_addr,
1091 const char* interface_addr,
1092 uv_membership membership) {
1093 int err;
1094 struct sockaddr_in addr4;
1095 struct sockaddr_in6 addr6;
1096
1097 if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
1098 err = uv__udp_maybe_deferred_bind(handle, AF_INET2, UV_UDP_REUSEADDR);
1099 if (err)
1100 return err;
1101 return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1102 } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
1103 err = uv__udp_maybe_deferred_bind(handle, AF_INET610, UV_UDP_REUSEADDR);
1104 if (err)
1105 return err;
1106 return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1107 } else {
1108 return UV_EINVAL;
1109 }
1110}
1111
1112
1113int uv_udp_set_source_membership(uv_udp_t* handle,
1114 const char* multicast_addr,
1115 const char* interface_addr,
1116 const char* source_addr,
1117 uv_membership membership) {
1118#if !defined(__OpenBSD__) && \
1119 !defined(__NetBSD__) && \
1120 !defined(__ANDROID__) && \
1121 !defined(__DragonFly__) && \
1122 !defined(__QNX__)
1123 int err;
1124 union uv__sockaddr mcast_addr;
1125 union uv__sockaddr src_addr;
1126
1127 err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
1128 if (err) {
1129 err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
1130 if (err)
1131 return err;
1132 err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
1133 if (err)
1134 return err;
1135 return uv__udp_set_source_membership6(handle,
1136 &mcast_addr.in6,
1137 interface_addr,
1138 &src_addr.in6,
1139 membership);
1140 }
1141
1142 err = uv_ip4_addr(source_addr, 0, &src_addr.in);
1143 if (err)
1144 return err;
1145 return uv__udp_set_source_membership4(handle,
1146 &mcast_addr.in,
1147 interface_addr,
1148 &src_addr.in,
1149 membership);
1150#else
1151 return UV_ENOSYS;
1152#endif
1153}
1154
1155
1156static int uv__setsockopt(uv_udp_t* handle,
1157 int option4,
1158 int option6,
1159 const void* val,
1160 socklen_t size) {
1161 int r;
1162
1163 if (handle->flags & UV_HANDLE_IPV6)
1164 r = setsockopt(handle->io_watcher.fd,
1165 IPPROTO_IPV6IPPROTO_IPV6,
1166 option6,
1167 val,
1168 size);
1169 else
1170 r = setsockopt(handle->io_watcher.fd,
1171 IPPROTO_IPIPPROTO_IP,
1172 option4,
1173 val,
1174 size);
1175 if (r)
1176 return UV__ERR(errno)(-((*__errno_location ())));
1177
1178 return 0;
1179}
1180
1181static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1182 int option4,
1183 int option6,
1184 int val) {
1185#if defined(__sun) || defined(_AIX) || defined(__MVS__)
1186 char arg = val;
1187#elif defined(__OpenBSD__)
1188 unsigned char arg = val;
1189#else
1190 int arg = val;
1191#endif
1192
1193 if (val < 0 || val > 255)
1194 return UV_EINVAL;
1195
1196 return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1197}
1198
1199
1200int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1201 if (setsockopt(handle->io_watcher.fd,
1202 SOL_SOCKET1,
1203 SO_BROADCAST6,
1204 &on,
1205 sizeof(on))) {
1206 return UV__ERR(errno)(-((*__errno_location ())));
1207 }
1208
1209 return 0;
1210}
1211
1212
1213int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1214 if (ttl < 1 || ttl > 255)
1215 return UV_EINVAL;
1216
1217#if defined(__MVS__)
1218 if (!(handle->flags & UV_HANDLE_IPV6))
1219 return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */
1220#endif
1221
1222/*
1223 * On Solaris and derivatives such as SmartOS, the length of socket options
1224 * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
1225 * so hardcode the size of these options on this platform,
1226 * and use the general uv__setsockopt_maybe_char call on other platforms.
1227 */
1228#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1229 defined(__MVS__) || defined(__QNX__)
1230
1231 return uv__setsockopt(handle,
1232 IP_TTL2,
1233 IPV6_UNICAST_HOPS16,
1234 &ttl,
1235 sizeof(ttl));
1236
1237#else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1238 defined(__MVS__) || defined(__QNX__)) */
1239
1240 return uv__setsockopt_maybe_char(handle,
1241 IP_TTL2,
1242 IPV6_UNICAST_HOPS16,
1243 ttl);
1244
1245#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1246 defined(__MVS__) || defined(__QNX__) */
1247}
1248
1249
1250int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1251/*
1252 * On Solaris and derivatives such as SmartOS, the length of socket options
1253 * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
1254 * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
1255 * and use the general uv__setsockopt_maybe_char call otherwise.
1256 */
1257#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1258 defined(__MVS__) || defined(__QNX__)
1259 if (handle->flags & UV_HANDLE_IPV6)
1260 return uv__setsockopt(handle,
1261 IP_MULTICAST_TTL33,
1262 IPV6_MULTICAST_HOPS18,
1263 &ttl,
1264 sizeof(ttl));
1265#endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1266 defined(__MVS__) || defined(__QNX__) */
1267
1268 return uv__setsockopt_maybe_char(handle,
1269 IP_MULTICAST_TTL33,
1270 IPV6_MULTICAST_HOPS18,
1271 ttl);
1272}
1273
1274
1275int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1276/*
1277 * On Solaris and derivatives such as SmartOS, the length of socket options
1278 * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
1279 * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
1280 * and use the general uv__setsockopt_maybe_char call otherwise.
1281 */
1282#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1283 defined(__MVS__) || defined(__QNX__)
1284 if (handle->flags & UV_HANDLE_IPV6)
1285 return uv__setsockopt(handle,
1286 IP_MULTICAST_LOOP34,
1287 IPV6_MULTICAST_LOOP19,
1288 &on,
1289 sizeof(on));
1290#endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
1291 defined(__MVS__) || defined(__QNX__) */
1292
1293 return uv__setsockopt_maybe_char(handle,
1294 IP_MULTICAST_LOOP34,
1295 IPV6_MULTICAST_LOOP19,
1296 on);
1297}
1298
1299int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1300 struct sockaddr_storage addr_st;
1301 struct sockaddr_in* addr4;
1302 struct sockaddr_in6* addr6;
1303
1304 addr4 = (struct sockaddr_in*) &addr_st;
1305 addr6 = (struct sockaddr_in6*) &addr_st;
1306
1307 if (!interface_addr) {
1308 memset(&addr_st, 0, sizeof addr_st);
1309 if (handle->flags & UV_HANDLE_IPV6) {
1310 addr_st.ss_family = AF_INET610;
1311 addr6->sin6_scope_id = 0;
1312 } else {
1313 addr_st.ss_family = AF_INET2;
1314 addr4->sin_addr.s_addr = htonl(INADDR_ANY)__bswap_32 (((in_addr_t) 0x00000000));
1315 }
1316 } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
1317 /* nothing, address was parsed */
1318 } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
1319 /* nothing, address was parsed */
1320 } else {
1321 return UV_EINVAL;
1322 }
1323
1324 if (addr_st.ss_family == AF_INET2) {
1325 if (setsockopt(handle->io_watcher.fd,
1326 IPPROTO_IPIPPROTO_IP,
1327 IP_MULTICAST_IF32,
1328 (void*) &addr4->sin_addr,
1329 sizeof(addr4->sin_addr)) == -1) {
1330 return UV__ERR(errno)(-((*__errno_location ())));
1331 }
1332 } else if (addr_st.ss_family == AF_INET610) {
1333 if (setsockopt(handle->io_watcher.fd,
1334 IPPROTO_IPV6IPPROTO_IPV6,
1335 IPV6_MULTICAST_IF17,
1336 &addr6->sin6_scope_id,
1337 sizeof(addr6->sin6_scope_id)) == -1) {
1338 return UV__ERR(errno)(-((*__errno_location ())));
1339 }
1340 } else {
1341 assert(0 && "unexpected address family")((void) sizeof ((0 && "unexpected address family") ? 1
: 0), __extension__ ({ if (0 && "unexpected address family"
) ; else __assert_fail ("0 && \"unexpected address family\""
, "../deps/uv/src/unix/udp.c", 1341, __extension__ __PRETTY_FUNCTION__
); }))
;
1342 abort();
1343 }
1344
1345 return 0;
1346}
1347
1348int uv_udp_getpeername(const uv_udp_t* handle,
1349 struct sockaddr* name,
1350 int* namelen) {
1351
1352 return uv__getsockpeername((const uv_handle_t*) handle,
1353 getpeername,
1354 name,
1355 namelen);
1356}
1357
1358int uv_udp_getsockname(const uv_udp_t* handle,
1359 struct sockaddr* name,
1360 int* namelen) {
1361
1362 return uv__getsockpeername((const uv_handle_t*) handle,
1363 getsockname,
1364 name,
1365 namelen);
1366}
1367
1368
1369int uv__udp_recv_start(uv_udp_t* handle,
1370 uv_alloc_cb alloc_cb,
1371 uv_udp_recv_cb recv_cb) {
1372 int err;
1373
1374 if (alloc_cb == NULL((void*)0) || recv_cb == NULL((void*)0))
1375 return UV_EINVAL;
1376
1377 if (uv__io_active(&handle->io_watcher, POLLIN0x001))
1378 return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */
1379
1380 err = uv__udp_maybe_deferred_bind(handle, AF_INET2, 0);
1381 if (err)
1382 return err;
1383
1384 handle->alloc_cb = alloc_cb;
1385 handle->recv_cb = recv_cb;
1386
1387 uv__io_start(handle->loop, &handle->io_watcher, POLLIN0x001);
1388 uv__handle_start(handle)do { if (((handle)->flags & UV_HANDLE_ACTIVE) != 0) break
; (handle)->flags |= UV_HANDLE_ACTIVE; if (((handle)->flags
& UV_HANDLE_REF) != 0) do { (handle)->loop->active_handles
++; } while (0); } while (0)
;
1389
1390 return 0;
1391}
1392
1393
1394int uv__udp_recv_stop(uv_udp_t* handle) {
1395 uv__io_stop(handle->loop, &handle->io_watcher, POLLIN0x001);
1396
1397 if (!uv__io_active(&handle->io_watcher, POLLOUT0x004))
1398 uv__handle_stop(handle)do { if (((handle)->flags & UV_HANDLE_ACTIVE) == 0) break
; (handle)->flags &= ~UV_HANDLE_ACTIVE; if (((handle)->
flags & UV_HANDLE_REF) != 0) do { (handle)->loop->active_handles
--; } while (0); } while (0)
;
1399
1400 handle->alloc_cb = NULL((void*)0);
1401 handle->recv_cb = NULL((void*)0);
1402
1403 return 0;
1404}