BadVPN – Blame information for rev 1

Subversion Repositories:
Rev:
Rev Author Line No. Line
1 office 1 /**
2 * @file
3 * Sockets BSD-Like API module
4 *
5 * @defgroup socket Socket API
6 * @ingroup sequential_api
7 * BSD-style socket API.\n
8 * Thread-safe, to be called from non-TCPIP threads only.\n
9 * Can be activated by defining @ref LWIP_SOCKET to 1.\n
10 * Header is in posix/sys/socket.h\b
11 */
12  
13 /*
14 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without modification,
18 * are permitted provided that the following conditions are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright notice,
21 * this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 * this list of conditions and the following disclaimer in the documentation
24 * and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
30 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
31 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
33 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
37 * OF SUCH DAMAGE.
38 *
39 * This file is part of the lwIP TCP/IP stack.
40 *
41 * Author: Adam Dunkels <adam@sics.se>
42 *
43 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
44 *
45 */
46  
47 #include "lwip/opt.h"
48  
49 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
50  
51 #include "lwip/sockets.h"
52 #include "lwip/priv/sockets_priv.h"
53 #include "lwip/api.h"
54 #include "lwip/igmp.h"
55 #include "lwip/inet.h"
56 #include "lwip/tcp.h"
57 #include "lwip/raw.h"
58 #include "lwip/udp.h"
59 #include "lwip/memp.h"
60 #include "lwip/pbuf.h"
61 #include "lwip/netif.h"
62 #include "lwip/priv/tcpip_priv.h"
63 #include "lwip/mld6.h"
64 #if LWIP_CHECKSUM_ON_COPY
65 #include "lwip/inet_chksum.h"
66 #endif
67  
68 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
69 #include <stdarg.h>
70 #endif
71  
72 #include <string.h>
73  
74 /* If the netconn API is not required publicly, then we include the necessary
75 files here to get the implementation */
76 #if !LWIP_NETCONN
77 #undef LWIP_NETCONN
78 #define LWIP_NETCONN 1
79 #include "api_msg.c"
80 #include "api_lib.c"
81 #include "netbuf.c"
82 #undef LWIP_NETCONN
83 #define LWIP_NETCONN 0
84 #endif
85  
86 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
87 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
88 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
89 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
90  
91 #if LWIP_IPV4
92 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
93 (sin)->sin_len = sizeof(struct sockaddr_in); \
94 (sin)->sin_family = AF_INET; \
95 (sin)->sin_port = lwip_htons((port)); \
96 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
97 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
98 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
99 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
100 (port) = lwip_ntohs((sin)->sin_port); }while(0)
101 #endif /* LWIP_IPV4 */
102  
103 #if LWIP_IPV6
104 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
105 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
106 (sin6)->sin6_family = AF_INET6; \
107 (sin6)->sin6_port = lwip_htons((port)); \
108 (sin6)->sin6_flowinfo = 0; \
109 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
110 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
111 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
112 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
113 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
114 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
115 } \
116 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
117 #endif /* LWIP_IPV6 */
118  
119 #if LWIP_IPV4 && LWIP_IPV6
120 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
121  
122 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
123 ((namelen) == sizeof(struct sockaddr_in6)))
124 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
125 ((name)->sa_family == AF_INET6))
126 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
127 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
128 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
129 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
130 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
131 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
132 } else { \
133 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
134 } } while(0)
135 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
136 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
137 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
138 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
139 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
140 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
141 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
142 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
143 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
144 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
145 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
146 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
147 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
148 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
149 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
150 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
151 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
152 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
153 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
154 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
155 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
156 #endif /* LWIP_IPV6 */
157  
158 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
159 IS_SOCK_ADDR_TYPE_VALID(name))
160 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
161 SOCK_ADDR_TYPE_MATCH(name, sock))
162 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
163  
164  
165 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
166 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
167 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
168 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
169 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
170 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
171 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
172 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
173 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
174 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
175  
176  
177 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
178 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
179 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
180 #if LWIP_MPU_COMPATIBLE
181 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
182 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
183 if (name == NULL) { \
184 sock_set_errno(sock, ENOMEM); \
185 done_socket(sock); \
186 return -1; \
187 } }while(0)
188 #else /* LWIP_MPU_COMPATIBLE */
189 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
190 #endif /* LWIP_MPU_COMPATIBLE */
191  
192 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
193 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
194 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
195 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
196 #else
197 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
198 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
199 u32_t loc = (val); \
200 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
201 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
202 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
203 #endif
204  
205  
206 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
207 * sockaddr_in6 if instantiated.
208 */
209 union sockaddr_aligned {
210 struct sockaddr sa;
211 #if LWIP_IPV6
212 struct sockaddr_in6 sin6;
213 #endif /* LWIP_IPV6 */
214 #if LWIP_IPV4
215 struct sockaddr_in sin;
216 #endif /* LWIP_IPV4 */
217 };
218  
219 /* Define the number of IPv4 multicast memberships, default is one per socket */
220 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
221 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
222 #endif
223  
224 #if LWIP_IGMP
225 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
226 a socket is closed */
227 struct lwip_socket_multicast_pair {
228 /** the socket */
229 struct lwip_sock *sock;
230 /** the interface address */
231 ip4_addr_t if_addr;
232 /** the group address */
233 ip4_addr_t multi_addr;
234 };
235  
236 struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
237  
238 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
239 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
240 static void lwip_socket_drop_registered_memberships(int s);
241 #endif /* LWIP_IGMP */
242  
243 #if LWIP_IPV6_MLD
244 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
245 a socket is closed */
246 struct lwip_socket_multicast_mld6_pair {
247 /** the socket */
248 struct lwip_sock *sock;
249 /** the interface index */
250 u8_t if_idx;
251 /** the group address */
252 ip6_addr_t multi_addr;
253 };
254  
255 struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
256  
257 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
258 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
259 static void lwip_socket_drop_registered_mld6_memberships(int s);
260 #endif /* LWIP_IGMP */
261  
262 /** The global array of available sockets */
263 static struct lwip_sock sockets[NUM_SOCKETS];
264  
265 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
266 #if LWIP_TCPIP_CORE_LOCKING
267 /* protect the select_cb_list using core lock */
268 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
269 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
270 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
271 #else /* LWIP_TCPIP_CORE_LOCKING */
272 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
273 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
274 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
275 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
276 /** This counter is increased from lwip_select when the list is changed
277 and checked in select_check_waiters to see if it has changed. */
278 static volatile int select_cb_ctr;
279 #endif /* LWIP_TCPIP_CORE_LOCKING */
280 /** The global list of tasks waiting for select */
281 static struct lwip_select_cb *select_cb_list;
282 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
283  
284 #define sock_set_errno(sk, e) do { \
285 const int sockerr = (e); \
286 set_errno(sockerr); \
287 } while (0)
288  
289 /* Forward declaration of some functions */
290 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
291 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
292 #define DEFAULT_SOCKET_EVENTCB event_callback
293 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent, struct lwip_sock *sock);
294 #else
295 #define DEFAULT_SOCKET_EVENTCB NULL
296 #endif
297 #if !LWIP_TCPIP_CORE_LOCKING
298 static void lwip_getsockopt_callback(void *arg);
299 static void lwip_setsockopt_callback(void *arg);
300 #endif
301 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
302 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
303 static void free_socket(struct lwip_sock *sock, int is_tcp);
304  
305 #if LWIP_IPV4 && LWIP_IPV6
306 static void
307 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
308 {
309 if ((sockaddr->sa_family) == AF_INET6) {
310 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
311 ipaddr->type = IPADDR_TYPE_V6;
312 } else {
313 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
314 ipaddr->type = IPADDR_TYPE_V4;
315 }
316 }
317 #endif /* LWIP_IPV4 && LWIP_IPV6 */
318  
319 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
320 void
321 lwip_socket_thread_init(void)
322 {
323 netconn_thread_init();
324 }
325  
326 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
327 void
328 lwip_socket_thread_cleanup(void)
329 {
330 netconn_thread_cleanup();
331 }
332  
333 #if LWIP_NETCONN_FULLDUPLEX
334 /* Thread-safe increment of sock->fd_used, with overflow check */
335 static void
336 sock_inc_used(struct lwip_sock *sock)
337 {
338 SYS_ARCH_DECL_PROTECT(lev);
339  
340 LWIP_ASSERT("sock != NULL", sock != NULL);
341  
342 SYS_ARCH_PROTECT(lev);
343 ++sock->fd_used;
344 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
345 SYS_ARCH_UNPROTECT(lev);
346 }
347  
348 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
349 * released (and possibly reused) when used from more than one thread
350 * (e.g. read-while-write or close-while-write, etc)
351 * This function is called at the end of functions using (try)get_socket*().
352 */
353 static void
354 done_socket(struct lwip_sock *sock)
355 {
356 SYS_ARCH_DECL_PROTECT(lev);
357  
358 LWIP_ASSERT("sock != NULL", sock != NULL);
359  
360 SYS_ARCH_PROTECT(lev);
361 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
362 if (--sock->fd_used == 0) {
363 if (sock->fd_free_pending) {
364 /* free the socket */
365 sock->fd_used = 1;
366 free_socket(sock, sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP);
367 }
368 }
369 SYS_ARCH_UNPROTECT(lev);
370 }
371 #else /* LWIP_NETCONN_FULLDUPLEX */
372 #define sock_inc_used(sock)
373 #define done_socket(sock)
374 #endif /* LWIP_NETCONN_FULLDUPLEX */
375  
376 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
377 static struct lwip_sock *
378 tryget_socket_unconn_nouse(int fd)
379 {
380 int s = fd - LWIP_SOCKET_OFFSET;
381 if ((s < 0) || (s >= NUM_SOCKETS)) {
382 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
383 return NULL;
384 }
385 return &sockets[s];
386 }
387  
388 struct lwip_sock *
389 lwip_socket_dbg_get_socket(int fd)
390 {
391 return tryget_socket_unconn_nouse(fd);
392 }
393  
394 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
395 static struct lwip_sock *
396 tryget_socket_unconn(int fd)
397 {
398 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
399 if (ret != NULL) {
400 sock_inc_used(ret);
401 }
402 return ret;
403 }
404  
405 /**
406 * Same as get_socket but doesn't set errno
407 *
408 * @param fd externally used socket index
409 * @return struct lwip_sock for the socket or NULL if not found
410 */
411 static struct lwip_sock *
412 tryget_socket(int fd)
413 {
414 struct lwip_sock *sock = tryget_socket_unconn(fd);
415 if (sock != NULL) {
416 if (sock->conn) {
417 return sock;
418 }
419 done_socket(sock);
420 }
421 return NULL;
422 }
423  
424 /**
425 * Map a externally used socket index to the internal socket representation.
426 *
427 * @param fd externally used socket index
428 * @return struct lwip_sock for the socket or NULL if not found
429 */
430 static struct lwip_sock *
431 get_socket(int fd)
432 {
433 struct lwip_sock *sock = tryget_socket(fd);
434 if (!sock) {
435 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
436 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
437 }
438 set_errno(EBADF);
439 return NULL;
440 }
441 return sock;
442 }
443  
444 /**
445 * Allocate a new socket for a given netconn.
446 *
447 * @param newconn the netconn for which to allocate a socket
448 * @param accepted 1 if socket has been created by accept(),
449 * 0 if socket has been created by socket()
450 * @return the index of the new socket; -1 on error
451 */
452 static int
453 alloc_socket(struct netconn *newconn, int accepted)
454 {
455 int i;
456 SYS_ARCH_DECL_PROTECT(lev);
457 LWIP_UNUSED_ARG(accepted);
458  
459 /* allocate a new socket identifier */
460 for (i = 0; i < NUM_SOCKETS; ++i) {
461 /* Protect socket array */
462 SYS_ARCH_PROTECT(lev);
463 if (!sockets[i].conn) {
464 #if LWIP_NETCONN_FULLDUPLEX
465 if (sockets[i].fd_used) {
466 SYS_ARCH_UNPROTECT(lev);
467 continue;
468 }
469 sockets[i].fd_used = 1;
470 sockets[i].fd_free_pending = 0;
471 #endif
472 sockets[i].conn = newconn;
473 /* The socket is not yet known to anyone, so no need to protect
474 after having marked it as used. */
475 SYS_ARCH_UNPROTECT(lev);
476 sockets[i].lastdata.pbuf = NULL;
477 #if LWIP_SOCKET_SELECT
478 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
479 sockets[i].rcvevent = 0;
480 /* TCP sendbuf is empty, but the socket is not yet writable until connected
481 * (unless it has been created by accept()). */
482 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
483 sockets[i].errevent = 0;
484 #endif /* LWIP_SOCKET_SELECT */
485 return i + LWIP_SOCKET_OFFSET;
486 }
487 SYS_ARCH_UNPROTECT(lev);
488 }
489 return -1;
490 }
491  
492 /** Free a socket. The socket's netconn must have been
493 * delete before!
494 *
495 * @param sock the socket to free
496 * @param is_tcp != 0 for TCP sockets, used to free lastdata
497 */
498 static void
499 free_socket(struct lwip_sock *sock, int is_tcp)
500 {
501 union lwip_sock_lastdata lastdata;
502 SYS_ARCH_DECL_PROTECT(lev);
503  
504 /* Protect socket array */
505 SYS_ARCH_PROTECT(lev);
506  
507 #if LWIP_NETCONN_FULLDUPLEX
508 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
509 if (--sock->fd_used > 0) {
510 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0;
511 SYS_ARCH_UNPROTECT(lev);
512 return;
513 }
514 #endif
515  
516 lastdata = sock->lastdata;
517 sock->lastdata.pbuf = NULL;
518 sock->conn = NULL;
519 SYS_ARCH_UNPROTECT(lev);
520 /* don't use 'sock' after this line, as another task might have allocated it */
521  
522 if (lastdata.pbuf != NULL) {
523 if (is_tcp) {
524 pbuf_free(lastdata.pbuf);
525 } else {
526 netbuf_delete(lastdata.netbuf);
527 }
528 }
529 }
530  
531 /* Below this, the well-known socket functions are implemented.
532 * Use google.com or opengroup.org to get a good description :-)
533 *
534 * Exceptions are documented!
535 */
536  
537 int
538 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
539 {
540 struct lwip_sock *sock, *nsock;
541 struct netconn *newconn;
542 ip_addr_t naddr;
543 u16_t port = 0;
544 int newsock;
545 err_t err;
546 int recvevent;
547 SYS_ARCH_DECL_PROTECT(lev);
548  
549 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
550 sock = get_socket(s);
551 if (!sock) {
552 return -1;
553 }
554  
555 /* wait for a new connection */
556 err = netconn_accept(sock->conn, &newconn);
557 if (err != ERR_OK) {
558 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
559 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
560 sock_set_errno(sock, EOPNOTSUPP);
561 } else if (err == ERR_CLSD) {
562 sock_set_errno(sock, EINVAL);
563 } else {
564 sock_set_errno(sock, err_to_errno(err));
565 }
566 done_socket(sock);
567 return -1;
568 }
569 LWIP_ASSERT("newconn != NULL", newconn != NULL);
570  
571 newsock = alloc_socket(newconn, 1);
572 if (newsock == -1) {
573 netconn_delete(newconn);
574 sock_set_errno(sock, ENFILE);
575 done_socket(sock);
576 return -1;
577 }
578 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
579 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
580  
581 /* See event_callback: If data comes in right away after an accept, even
582 * though the server task might not have created a new socket yet.
583 * In that case, newconn->socket is counted down (newconn->socket--),
584 * so nsock->rcvevent is >= 1 here!
585 */
586 SYS_ARCH_PROTECT(lev);
587 recvevent = (s16_t)(-1 - newconn->socket);
588 newconn->socket = newsock;
589 SYS_ARCH_UNPROTECT(lev);
590  
591 if (newconn->callback) {
592 LOCK_TCPIP_CORE();
593 while (recvevent > 0) {
594 recvevent--;
595 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
596 }
597 UNLOCK_TCPIP_CORE();
598 }
599  
600 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
601 * not be NULL if addr is valid.
602 */
603 if ((addr != NULL) && (addrlen != NULL)) {
604 union sockaddr_aligned tempaddr;
605 /* get the IP address and port of the remote host */
606 err = netconn_peer(newconn, &naddr, &port);
607 if (err != ERR_OK) {
608 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
609 netconn_delete(newconn);
610 free_socket(nsock, 1);
611 sock_set_errno(sock, err_to_errno(err));
612 done_socket(sock);
613 return -1;
614 }
615  
616 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
617 if (*addrlen > tempaddr.sa.sa_len) {
618 *addrlen = tempaddr.sa.sa_len;
619 }
620 MEMCPY(addr, &tempaddr, *addrlen);
621  
622 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
623 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
624 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
625 } else {
626 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
627 }
628  
629 sock_set_errno(sock, 0);
630 done_socket(sock);
631 done_socket(nsock);
632 return newsock;
633 }
634  
635 int
636 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
637 {
638 struct lwip_sock *sock;
639 ip_addr_t local_addr;
640 u16_t local_port;
641 err_t err;
642  
643 sock = get_socket(s);
644 if (!sock) {
645 return -1;
646 }
647  
648 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
649 /* sockaddr does not match socket type (IPv4/IPv6) */
650 sock_set_errno(sock, err_to_errno(ERR_VAL));
651 done_socket(sock);
652 return -1;
653 }
654  
655 /* check size, family and alignment of 'name' */
656 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
657 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
658 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
659 LWIP_UNUSED_ARG(namelen);
660  
661 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
662 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
663 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
664 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
665  
666 #if LWIP_IPV4 && LWIP_IPV6
667 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
668 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
669 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
670 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
671 }
672 #endif /* LWIP_IPV4 && LWIP_IPV6 */
673  
674 err = netconn_bind(sock->conn, &local_addr, local_port);
675  
676 if (err != ERR_OK) {
677 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
678 sock_set_errno(sock, err_to_errno(err));
679 done_socket(sock);
680 return -1;
681 }
682  
683 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
684 sock_set_errno(sock, 0);
685 done_socket(sock);
686 return 0;
687 }
688  
689 int
690 lwip_close(int s)
691 {
692 struct lwip_sock *sock;
693 int is_tcp = 0;
694 err_t err;
695  
696 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
697  
698 sock = get_socket(s);
699 if (!sock) {
700 return -1;
701 }
702  
703 if (sock->conn != NULL) {
704 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
705 } else {
706 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
707 }
708  
709 #if LWIP_IGMP
710 /* drop all possibly joined IGMP memberships */
711 lwip_socket_drop_registered_memberships(s);
712 #endif /* LWIP_IGMP */
713 #if LWIP_IPV6_MLD
714 /* drop all possibly joined MLD6 memberships */
715 lwip_socket_drop_registered_mld6_memberships(s);
716 #endif /* LWIP_IPV6_MLD */
717  
718 err = netconn_delete(sock->conn);
719 if (err != ERR_OK) {
720 sock_set_errno(sock, err_to_errno(err));
721 done_socket(sock);
722 return -1;
723 }
724  
725 free_socket(sock, is_tcp);
726 set_errno(0);
727 return 0;
728 }
729  
730 int
731 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
732 {
733 struct lwip_sock *sock;
734 err_t err;
735  
736 sock = get_socket(s);
737 if (!sock) {
738 return -1;
739 }
740  
741 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
742 /* sockaddr does not match socket type (IPv4/IPv6) */
743 sock_set_errno(sock, err_to_errno(ERR_VAL));
744 done_socket(sock);
745 return -1;
746 }
747  
748 LWIP_UNUSED_ARG(namelen);
749 if (name->sa_family == AF_UNSPEC) {
750 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
751 err = netconn_disconnect(sock->conn);
752 } else {
753 ip_addr_t remote_addr;
754 u16_t remote_port;
755  
756 /* check size, family and alignment of 'name' */
757 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
758 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
759 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
760  
761 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
762 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
763 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
764 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
765  
766 #if LWIP_IPV4 && LWIP_IPV6
767 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
768 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
769 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
770 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
771 }
772 #endif /* LWIP_IPV4 && LWIP_IPV6 */
773  
774 err = netconn_connect(sock->conn, &remote_addr, remote_port);
775 }
776  
777 if (err != ERR_OK) {
778 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
779 sock_set_errno(sock, err_to_errno(err));
780 done_socket(sock);
781 return -1;
782 }
783  
784 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
785 sock_set_errno(sock, 0);
786 done_socket(sock);
787 return 0;
788 }
789  
790 /**
791 * Set a socket into listen mode.
792 * The socket may not have been used for another connection previously.
793 *
794 * @param s the socket to set to listening mode
795 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
796 * @return 0 on success, non-zero on failure
797 */
798 int
799 lwip_listen(int s, int backlog)
800 {
801 struct lwip_sock *sock;
802 err_t err;
803  
804 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
805  
806 sock = get_socket(s);
807 if (!sock) {
808 return -1;
809 }
810  
811 /* limit the "backlog" parameter to fit in an u8_t */
812 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
813  
814 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
815  
816 if (err != ERR_OK) {
817 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
818 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
819 sock_set_errno(sock, EOPNOTSUPP);
820 } else {
821 sock_set_errno(sock, err_to_errno(err));
822 }
823 done_socket(sock);
824 return -1;
825 }
826  
827 sock_set_errno(sock, 0);
828 done_socket(sock);
829 return 0;
830 }
831  
832 #if LWIP_TCP
833 /* Helper function to loop over receiving pbufs from netconn
834 * until "len" bytes are received or we're otherwise done.
835 * Keeps sock->lastdata for peeking or partly copying.
836 */
837 static ssize_t
838 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
839 {
840 u8_t apiflags = NETCONN_NOAUTORCVD;
841 ssize_t recvd = 0;
842 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
843  
844 LWIP_ASSERT("no socket given", sock != NULL);
845 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
846  
847 if (flags & MSG_DONTWAIT) {
848 apiflags |= NETCONN_DONTBLOCK;
849 }
850  
851 do {
852 struct pbuf *p;
853 err_t err;
854 u16_t copylen;
855  
856 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
857 /* Check if there is data left from the last recv operation. */
858 if (sock->lastdata.pbuf) {
859 p = sock->lastdata.pbuf;
860 } else {
861 /* No data was left from the previous operation, so we try to get
862 some from the network. */
863 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
864 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
865 err, (void *)p));
866  
867 if (err != ERR_OK) {
868 if (recvd > 0) {
869 /* already received data, return that (this trusts in getting the same error from
870 netconn layer again next time netconn_recv is called) */
871 if (err == ERR_CLSD) {
872 /* closed but already received data, ensure select gets the FIN, too */
873 if (sock->conn->callback != NULL) {
874 LOCK_TCPIP_CORE();
875 sock->conn->callback(sock->conn, NETCONN_EVT_RCVPLUS, 0);
876 UNLOCK_TCPIP_CORE();
877 }
878 }
879 goto lwip_recv_tcp_done;
880 }
881 /* We should really do some error checking here. */
882 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
883 lwip_strerr(err)));
884 sock_set_errno(sock, err_to_errno(err));
885 if (err == ERR_CLSD) {
886 return 0;
887 } else {
888 return -1;
889 }
890 }
891 LWIP_ASSERT("p != NULL", p != NULL);
892 sock->lastdata.pbuf = p;
893 }
894  
895 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
896 p->tot_len, (int)recv_left, (int)recvd));
897  
898 if (recv_left > p->tot_len) {
899 copylen = p->tot_len;
900 } else {
901 copylen = (u16_t)recv_left;
902 }
903 if (recvd + copylen < recvd) {
904 /* overflow */
905 copylen = (u16_t)(SSIZE_MAX - recvd);
906 }
907  
908 /* copy the contents of the received buffer into
909 the supplied memory pointer mem */
910 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
911  
912 recvd += copylen;
913  
914 /* TCP combines multiple pbufs for one recv */
915 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
916 recv_left -= copylen;
917  
918 /* Unless we peek the incoming message... */
919 if ((flags & MSG_PEEK) == 0) {
920 /* ... check if there is data left in the pbuf */
921 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
922 if (p->tot_len - copylen > 0) {
923 /* If so, it should be saved in the sock structure for the next recv call.
924 We store the pbuf but hide/free the consumed data: */
925 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
926 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
927 } else {
928 sock->lastdata.pbuf = NULL;
929 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
930 pbuf_free(p);
931 }
932 }
933 /* once we have some data to return, only add more if we don't need to wait */
934 apiflags |= NETCONN_DONTBLOCK;
935 /* @todo: do we need to support peeking more than one pbuf? */
936 } while ((recv_left > 0) || (flags & MSG_PEEK));
937 lwip_recv_tcp_done:
938 if (recvd > 0) {
939 /* ensure window update after copying all data */
940 netconn_tcp_recvd(sock->conn, (size_t)recvd);
941 }
942 sock_set_errno(sock, 0);
943 return recvd;
944 }
945 #endif
946  
947 /* Convert a netbuf's address data to struct sockaddr */
948 static int
949 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
950 struct sockaddr *from, socklen_t *fromlen)
951 {
952 int truncated = 0;
953 union sockaddr_aligned saddr;
954  
955 LWIP_UNUSED_ARG(conn);
956  
957 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
958 LWIP_ASSERT("from != NULL", from != NULL);
959 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
960  
961 #if LWIP_IPV4 && LWIP_IPV6
962 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
963 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
964 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
965 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
966 }
967 #endif /* LWIP_IPV4 && LWIP_IPV6 */
968  
969 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
970 if (*fromlen < saddr.sa.sa_len) {
971 truncated = 1;
972 } else if (*fromlen > saddr.sa.sa_len) {
973 *fromlen = saddr.sa.sa_len;
974 }
975 MEMCPY(from, &saddr, *fromlen);
976 return truncated;
977 }
978  
979 #if LWIP_TCP
980 /* Helper function to get a tcp socket's remote address info */
981 static int
982 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
983 {
984 if (sock == NULL) {
985 return 0;
986 }
987 LWIP_UNUSED_ARG(dbg_fn);
988 LWIP_UNUSED_ARG(dbg_s);
989 LWIP_UNUSED_ARG(dbg_ret);
990  
991 #if !SOCKETS_DEBUG
992 if (from && fromlen)
993 #endif /* !SOCKETS_DEBUG */
994 {
995 /* get remote addr/port from tcp_pcb */
996 u16_t port;
997 ip_addr_t tmpaddr;
998 netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
999 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1000 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1001 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1002 if (from && fromlen) {
1003 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1004 }
1005 }
1006 return 0;
1007 }
1008 #endif
1009  
1010 /* Helper function to receive a netbuf from a udp or raw netconn.
1011 * Keeps sock->lastdata for peeking.
1012 */
1013 static err_t
1014 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1015 {
1016 struct netbuf *buf;
1017 u8_t apiflags;
1018 err_t err;
1019 u16_t buflen, copylen, copied;
1020 int i;
1021  
1022 LWIP_UNUSED_ARG(dbg_s);
1023 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1024  
1025 if (flags & MSG_DONTWAIT) {
1026 apiflags = NETCONN_DONTBLOCK;
1027 } else {
1028 apiflags = 0;
1029 }
1030  
1031 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1032 /* Check if there is data left from the last recv operation. */
1033 buf = sock->lastdata.netbuf;
1034 if (buf == NULL) {
1035 /* No data was left from the previous operation, so we try to get
1036 some from the network. */
1037 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1038 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1039 err, (void *)buf));
1040  
1041 if (err != ERR_OK) {
1042 return err;
1043 }
1044 LWIP_ASSERT("buf != NULL", buf != NULL);
1045 sock->lastdata.netbuf = buf;
1046 }
1047 buflen = buf->p->tot_len;
1048 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1049  
1050 copied = 0;
1051 /* copy the pbuf payload into the iovs */
1052 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1053 u16_t len_left = (u16_t)(buflen - copied);
1054 if (msg->msg_iov[i].iov_len > len_left) {
1055 copylen = len_left;
1056 } else {
1057 copylen = (u16_t)msg->msg_iov[i].iov_len;
1058 }
1059  
1060 /* copy the contents of the received buffer into
1061 the supplied memory buffer */
1062 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1063 copied = (u16_t)(copied + copylen);
1064 }
1065  
1066 /* Check to see from where the data was.*/
1067 #if !SOCKETS_DEBUG
1068 if (msg->msg_name && msg->msg_namelen)
1069 #endif /* !SOCKETS_DEBUG */
1070 {
1071 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1072 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1073 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1074 if (msg->msg_name && msg->msg_namelen) {
1075 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1076 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1077 }
1078 }
1079  
1080 /* Initialize flag output */
1081 msg->msg_flags = 0;
1082  
1083 if (msg->msg_control) {
1084 u8_t wrote_msg = 0;
1085 #if LWIP_NETBUF_RECVINFO
1086 /* Check if packet info was recorded */
1087 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1088 if (IP_IS_V4(&buf->toaddr)) {
1089 #if LWIP_IPV4
1090 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1091 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1092 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1093 chdr->cmsg_level = IPPROTO_IP;
1094 chdr->cmsg_type = IP_PKTINFO;
1095 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1096 pkti->ipi_ifindex = buf->p->if_idx;
1097 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1098 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1099 wrote_msg = 1;
1100 } else {
1101 msg->msg_flags |= MSG_CTRUNC;
1102 }
1103 #endif /* LWIP_IPV4 */
1104 }
1105 }
1106 #endif /* LWIP_NETBUF_RECVINFO */
1107  
1108 if (!wrote_msg) {
1109 msg->msg_controllen = 0;
1110 }
1111 }
1112  
1113 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1114 if ((flags & MSG_PEEK) == 0) {
1115 sock->lastdata.netbuf = NULL;
1116 netbuf_delete(buf);
1117 }
1118 if (datagram_len) {
1119 *datagram_len = buflen;
1120 }
1121 return ERR_OK;
1122 }
1123  
1124 ssize_t
1125 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1126 struct sockaddr *from, socklen_t *fromlen)
1127 {
1128 struct lwip_sock *sock;
1129 ssize_t ret;
1130  
1131 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1132 sock = get_socket(s);
1133 if (!sock) {
1134 return -1;
1135 }
1136 #if LWIP_TCP
1137 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1138 ret = lwip_recv_tcp(sock, mem, len, flags);
1139 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1140 done_socket(sock);
1141 return ret;
1142 } else
1143 #endif
1144 {
1145 u16_t datagram_len = 0;
1146 struct iovec vec;
1147 struct msghdr msg;
1148 err_t err;
1149 vec.iov_base = mem;
1150 vec.iov_len = len;
1151 msg.msg_control = NULL;
1152 msg.msg_controllen = 0;
1153 msg.msg_flags = 0;
1154 msg.msg_iov = &vec;
1155 msg.msg_iovlen = 1;
1156 msg.msg_name = from;
1157 msg.msg_namelen = (fromlen ? *fromlen : 0);
1158 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1159 if (err != ERR_OK) {
1160 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1161 s, lwip_strerr(err)));
1162 sock_set_errno(sock, err_to_errno(err));
1163 done_socket(sock);
1164 return -1;
1165 }
1166 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1167 if (fromlen) {
1168 *fromlen = msg.msg_namelen;
1169 }
1170 }
1171  
1172 sock_set_errno(sock, 0);
1173 done_socket(sock);
1174 return ret;
1175 }
1176  
1177 ssize_t
1178 lwip_read(int s, void *mem, size_t len)
1179 {
1180 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1181 }
1182  
1183 ssize_t
1184 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1185 {
1186 struct msghdr msg;
1187  
1188 msg.msg_name = NULL;
1189 msg.msg_namelen = 0;
1190 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1191 Blame the opengroup standard for this inconsistency. */
1192 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1193 msg.msg_iovlen = iovcnt;
1194 msg.msg_control = NULL;
1195 msg.msg_controllen = 0;
1196 msg.msg_flags = 0;
1197 return lwip_recvmsg(s, &msg, 0);
1198 }
1199  
1200 ssize_t
1201 lwip_recv(int s, void *mem, size_t len, int flags)
1202 {
1203 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1204 }
1205  
1206 ssize_t
1207 lwip_recvmsg(int s, struct msghdr *message, int flags)
1208 {
1209 struct lwip_sock *sock;
1210 int i;
1211 ssize_t buflen;
1212  
1213 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1214 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1215 LWIP_ERROR("lwip_recvmsg: unsupported flags", ((flags == 0) || (flags == MSG_PEEK)),
1216 set_errno(EOPNOTSUPP); return -1;);
1217  
1218 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1219 set_errno(EMSGSIZE);
1220 return -1;
1221 }
1222  
1223 sock = get_socket(s);
1224 if (!sock) {
1225 return -1;
1226 }
1227  
1228 /* check for valid vectors */
1229 buflen = 0;
1230 for (i = 0; i < message->msg_iovlen; i++) {
1231 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1232 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1233 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1234 sock_set_errno(sock, ERR_VAL);
1235 done_socket(sock);
1236 return -1;
1237 }
1238 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1239 }
1240  
1241 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1242 #if LWIP_TCP
1243 int recv_flags = flags;
1244 message->msg_flags = 0;
1245 /* recv the data */
1246 buflen = 0;
1247 for (i = 0; i < message->msg_iovlen; i++) {
1248 /* try to receive into this vector's buffer */
1249 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1250 if (recvd_local > 0) {
1251 /* sum up received bytes */
1252 buflen += recvd_local;
1253 }
1254 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1255 (flags & MSG_PEEK)) {
1256 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1257 if (buflen <= 0) {
1258 /* nothing received at all, propagate the error */
1259 buflen = recvd_local;
1260 }
1261 break;
1262 }
1263 /* while MSG_DONTWAIT is not supported for this function, we pass it to
1264 lwip_recv_tcp() to prevent waiting for more data */
1265 recv_flags |= MSG_DONTWAIT;
1266 }
1267 if (buflen > 0) {
1268 /* reset socket error since we have received something */
1269 sock_set_errno(sock, 0);
1270 }
1271 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1272 done_socket(sock);
1273 return buflen;
1274 #else /* LWIP_TCP */
1275 sock_set_errno(sock, err_to_errno(ERR_ARG));
1276 done_socket(sock);
1277 return -1;
1278 #endif /* LWIP_TCP */
1279 }
1280 /* else, UDP and RAW NETCONNs */
1281 #if LWIP_UDP || LWIP_RAW
1282 {
1283 u16_t datagram_len = 0;
1284 err_t err;
1285 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1286 if (err != ERR_OK) {
1287 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1288 s, lwip_strerr(err)));
1289 sock_set_errno(sock, err_to_errno(err));
1290 done_socket(sock);
1291 return -1;
1292 }
1293 if (datagram_len > buflen) {
1294 message->msg_flags |= MSG_TRUNC;
1295 }
1296  
1297 sock_set_errno(sock, 0);
1298 done_socket(sock);
1299 return (int)datagram_len;
1300 }
1301 #else /* LWIP_UDP || LWIP_RAW */
1302 sock_set_errno(sock, err_to_errno(ERR_ARG));
1303 done_socket(sock);
1304 return -1;
1305 #endif /* LWIP_UDP || LWIP_RAW */
1306 }
1307  
1308 ssize_t
1309 lwip_send(int s, const void *data, size_t size, int flags)
1310 {
1311 struct lwip_sock *sock;
1312 err_t err;
1313 u8_t write_flags;
1314 size_t written;
1315  
1316 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1317 s, data, size, flags));
1318  
1319 sock = get_socket(s);
1320 if (!sock) {
1321 return -1;
1322 }
1323  
1324 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1325 #if (LWIP_UDP || LWIP_RAW)
1326 done_socket(sock);
1327 return lwip_sendto(s, data, size, flags, NULL, 0);
1328 #else /* (LWIP_UDP || LWIP_RAW) */
1329 sock_set_errno(sock, err_to_errno(ERR_ARG));
1330 done_socket(sock);
1331 return -1;
1332 #endif /* (LWIP_UDP || LWIP_RAW) */
1333 }
1334  
1335 write_flags = (u8_t)(NETCONN_COPY |
1336 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1337 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1338 written = 0;
1339 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1340  
1341 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1342 sock_set_errno(sock, err_to_errno(err));
1343 done_socket(sock);
1344 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1345 return (err == ERR_OK ? (ssize_t)written : -1);
1346 }
1347  
1348 ssize_t
1349 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1350 {
1351 struct lwip_sock *sock;
1352 #if LWIP_TCP
1353 u8_t write_flags;
1354 size_t written;
1355 #endif
1356 err_t err = ERR_OK;
1357  
1358 sock = get_socket(s);
1359 if (!sock) {
1360 return -1;
1361 }
1362  
1363 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1364 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1365 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1366 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1367 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1368 sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;);
1369 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1370 sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;);
1371  
1372 LWIP_UNUSED_ARG(msg->msg_control);
1373 LWIP_UNUSED_ARG(msg->msg_controllen);
1374 LWIP_UNUSED_ARG(msg->msg_flags);
1375  
1376 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1377 #if LWIP_TCP
1378 write_flags = (u8_t)(NETCONN_COPY |
1379 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1380 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1381  
1382 written = 0;
1383 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1384 sock_set_errno(sock, err_to_errno(err));
1385 done_socket(sock);
1386 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1387 return (err == ERR_OK ? (ssize_t)written : -1);
1388 #else /* LWIP_TCP */
1389 sock_set_errno(sock, err_to_errno(ERR_ARG));
1390 done_socket(sock);
1391 return -1;
1392 #endif /* LWIP_TCP */
1393 }
1394 /* else, UDP and RAW NETCONNs */
1395 #if LWIP_UDP || LWIP_RAW
1396 {
1397 struct netbuf chain_buf;
1398 int i;
1399 ssize_t size = 0;
1400  
1401 LWIP_UNUSED_ARG(flags);
1402 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1403 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1404 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1405  
1406 /* initialize chain buffer with destination */
1407 memset(&chain_buf, 0, sizeof(struct netbuf));
1408 if (msg->msg_name) {
1409 u16_t remote_port;
1410 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1411 netbuf_fromport(&chain_buf) = remote_port;
1412 }
1413 #if LWIP_NETIF_TX_SINGLE_PBUF
1414 for (i = 0; i < msg->msg_iovlen; i++) {
1415 size += msg->msg_iov[i].iov_len;
1416 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1417 /* overflow */
1418 goto sendmsg_emsgsize;
1419 }
1420 }
1421 if (size > 0xFFFF) {
1422 /* overflow */
1423 goto sendmsg_emsgsize;
1424 }
1425 /* Allocate a new netbuf and copy the data into it. */
1426 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1427 err = ERR_MEM;
1428 } else {
1429 /* flatten the IO vectors */
1430 size_t offset = 0;
1431 for (i = 0; i < msg->msg_iovlen; i++) {
1432 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1433 offset += msg->msg_iov[i].iov_len;
1434 }
1435 #if LWIP_CHECKSUM_ON_COPY
1436 {
1437 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1438 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1439 netbuf_set_chksum(&chain_buf, chksum);
1440 }
1441 #endif /* LWIP_CHECKSUM_ON_COPY */
1442 err = ERR_OK;
1443 }
1444 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1445 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1446 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1447 for (i = 0; i < msg->msg_iovlen; i++) {
1448 struct pbuf *p;
1449 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1450 /* overflow */
1451 goto sendmsg_emsgsize;
1452 }
1453 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1454 if (p == NULL) {
1455 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1456 break;
1457 }
1458 p->payload = msg->msg_iov[i].iov_base;
1459 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1460 /* netbuf empty, add new pbuf */
1461 if (chain_buf.p == NULL) {
1462 chain_buf.p = chain_buf.ptr = p;
1463 /* add pbuf to existing pbuf chain */
1464 } else {
1465 if (chain_buf.p->tot_len + p->len > 0xffff) {
1466 /* overflow */
1467 pbuf_free(p);
1468 goto sendmsg_emsgsize;
1469 }
1470 pbuf_cat(chain_buf.p, p);
1471 }
1472 }
1473 /* save size of total chain */
1474 if (err == ERR_OK) {
1475 size = netbuf_len(&chain_buf);
1476 }
1477 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1478  
1479 if (err == ERR_OK) {
1480 #if LWIP_IPV4 && LWIP_IPV6
1481 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1482 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1483 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1484 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1485 }
1486 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1487  
1488 /* send the data */
1489 err = netconn_send(sock->conn, &chain_buf);
1490 }
1491  
1492 /* deallocated the buffer */
1493 netbuf_free(&chain_buf);
1494  
1495 sock_set_errno(sock, err_to_errno(err));
1496 done_socket(sock);
1497 return (err == ERR_OK ? size : -1);
1498 sendmsg_emsgsize:
1499 sock_set_errno(sock, EMSGSIZE);
1500 netbuf_free(&chain_buf);
1501 done_socket(sock);
1502 return -1;
1503 }
1504 #else /* LWIP_UDP || LWIP_RAW */
1505 sock_set_errno(sock, err_to_errno(ERR_ARG));
1506 done_socket(sock);
1507 return -1;
1508 #endif /* LWIP_UDP || LWIP_RAW */
1509 }
1510  
1511 ssize_t
1512 lwip_sendto(int s, const void *data, size_t size, int flags,
1513 const struct sockaddr *to, socklen_t tolen)
1514 {
1515 struct lwip_sock *sock;
1516 err_t err;
1517 u16_t short_size;
1518 u16_t remote_port;
1519 struct netbuf buf;
1520  
1521 sock = get_socket(s);
1522 if (!sock) {
1523 return -1;
1524 }
1525  
1526 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1527 #if LWIP_TCP
1528 done_socket(sock);
1529 return lwip_send(s, data, size, flags);
1530 #else /* LWIP_TCP */
1531 LWIP_UNUSED_ARG(flags);
1532 sock_set_errno(sock, err_to_errno(ERR_ARG));
1533 done_socket(sock);
1534 return -1;
1535 #endif /* LWIP_TCP */
1536 }
1537  
1538 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1539 /* cannot fit into one datagram (at least for us) */
1540 sock_set_errno(sock, EMSGSIZE);
1541 done_socket(sock);
1542 return -1;
1543 }
1544 short_size = (u16_t)size;
1545 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1546 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1547 IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))),
1548 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1549 LWIP_UNUSED_ARG(tolen);
1550  
1551 /* initialize a buffer */
1552 buf.p = buf.ptr = NULL;
1553 #if LWIP_CHECKSUM_ON_COPY
1554 buf.flags = 0;
1555 #endif /* LWIP_CHECKSUM_ON_COPY */
1556 if (to) {
1557 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1558 } else {
1559 remote_port = 0;
1560 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1561 }
1562 netbuf_fromport(&buf) = remote_port;
1563  
1564  
1565 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1566 s, data, short_size, flags));
1567 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1568 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1569  
1570 /* make the buffer point to the data that should be sent */
1571 #if LWIP_NETIF_TX_SINGLE_PBUF
1572 /* Allocate a new netbuf and copy the data into it. */
1573 if (netbuf_alloc(&buf, short_size) == NULL) {
1574 err = ERR_MEM;
1575 } else {
1576 #if LWIP_CHECKSUM_ON_COPY
1577 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1578 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1579 netbuf_set_chksum(&buf, chksum);
1580 } else
1581 #endif /* LWIP_CHECKSUM_ON_COPY */
1582 {
1583 MEMCPY(buf.p->payload, data, short_size);
1584 }
1585 err = ERR_OK;
1586 }
1587 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1588 err = netbuf_ref(&buf, data, short_size);
1589 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1590 if (err == ERR_OK) {
1591 #if LWIP_IPV4 && LWIP_IPV6
1592 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1593 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1594 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1595 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1596 }
1597 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1598  
1599 /* send the data */
1600 err = netconn_send(sock->conn, &buf);
1601 }
1602  
1603 /* deallocated the buffer */
1604 netbuf_free(&buf);
1605  
1606 sock_set_errno(sock, err_to_errno(err));
1607 done_socket(sock);
1608 return (err == ERR_OK ? short_size : -1);
1609 }
1610  
1611 int
1612 lwip_socket(int domain, int type, int protocol)
1613 {
1614 struct netconn *conn;
1615 int i;
1616  
1617 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1618  
1619 /* create a netconn */
1620 switch (type) {
1621 case SOCK_RAW:
1622 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1623 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1624 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1625 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1626 break;
1627 case SOCK_DGRAM:
1628 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1629 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1630 DEFAULT_SOCKET_EVENTCB);
1631 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1632 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1633 #if LWIP_NETBUF_RECVINFO
1634 if (conn) {
1635 /* netconn layer enables pktinfo by default, sockets default to off */
1636 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1637 }
1638 #endif /* LWIP_NETBUF_RECVINFO */
1639 break;
1640 case SOCK_STREAM:
1641 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1642 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1643 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1644 break;
1645 default:
1646 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1647 domain, type, protocol));
1648 set_errno(EINVAL);
1649 return -1;
1650 }
1651  
1652 if (!conn) {
1653 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1654 set_errno(ENOBUFS);
1655 return -1;
1656 }
1657  
1658 i = alloc_socket(conn, 0);
1659  
1660 if (i == -1) {
1661 netconn_delete(conn);
1662 set_errno(ENFILE);
1663 return -1;
1664 }
1665 conn->socket = i;
1666 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1667 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1668 set_errno(0);
1669 return i;
1670 }
1671  
1672 ssize_t
1673 lwip_write(int s, const void *data, size_t size)
1674 {
1675 return lwip_send(s, data, size, 0);
1676 }
1677  
1678 ssize_t
1679 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1680 {
1681 struct msghdr msg;
1682  
1683 msg.msg_name = NULL;
1684 msg.msg_namelen = 0;
1685 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1686 Blame the opengroup standard for this inconsistency. */
1687 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1688 msg.msg_iovlen = iovcnt;
1689 msg.msg_control = NULL;
1690 msg.msg_controllen = 0;
1691 msg.msg_flags = 0;
1692 return lwip_sendmsg(s, &msg, 0);
1693 }
1694  
1695 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1696 /* Add select_cb to select_cb_list. */
1697 static void
1698 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1699 {
1700 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1701  
1702 /* Protect the select_cb_list */
1703 LWIP_SOCKET_SELECT_PROTECT(lev);
1704  
1705 /* Put this select_cb on top of list */
1706 select_cb->next = select_cb_list;
1707 if (select_cb_list != NULL) {
1708 select_cb_list->prev = select_cb;
1709 }
1710 select_cb_list = select_cb;
1711 #if !LWIP_TCPIP_CORE_LOCKING
1712 /* Increasing this counter tells select_check_waiters that the list has changed. */
1713 select_cb_ctr++;
1714 #endif
1715  
1716 /* Now we can safely unprotect */
1717 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1718 }
1719  
1720 /* Remove select_cb from select_cb_list. */
1721 static void
1722 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1723 {
1724 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1725  
1726 /* Take us off the list */
1727 LWIP_SOCKET_SELECT_PROTECT(lev);
1728 if (select_cb->next != NULL) {
1729 select_cb->next->prev = select_cb->prev;
1730 }
1731 if (select_cb_list == select_cb) {
1732 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1733 select_cb_list = select_cb->next;
1734 } else {
1735 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1736 select_cb->prev->next = select_cb->next;
1737 }
1738 #if !LWIP_TCPIP_CORE_LOCKING
1739 /* Increasing this counter tells select_check_waiters that the list has changed. */
1740 select_cb_ctr++;
1741 #endif
1742 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1743 }
1744 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1745  
1746 #if LWIP_SOCKET_SELECT
1747 /**
1748 * Go through the readset and writeset lists and see which socket of the sockets
1749 * set in the sets has events. On return, readset, writeset and exceptset have
1750 * the sockets enabled that had events.
1751 *
1752 * @param maxfdp1 the highest socket index in the sets
1753 * @param readset_in set of sockets to check for read events
1754 * @param writeset_in set of sockets to check for write events
1755 * @param exceptset_in set of sockets to check for error events
1756 * @param readset_out set of sockets that had read events
1757 * @param writeset_out set of sockets that had write events
1758 * @param exceptset_out set os sockets that had error events
1759 * @return number of sockets that had events (read/write/exception) (>= 0)
1760 */
1761 static int
1762 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1763 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1764 {
1765 int i, nready = 0;
1766 fd_set lreadset, lwriteset, lexceptset;
1767 struct lwip_sock *sock;
1768 SYS_ARCH_DECL_PROTECT(lev);
1769  
1770 FD_ZERO(&lreadset);
1771 FD_ZERO(&lwriteset);
1772 FD_ZERO(&lexceptset);
1773  
1774 /* Go through each socket in each list to count number of sockets which
1775 currently match */
1776 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1777 /* if this FD is not in the set, continue */
1778 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1779 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1780 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1781 continue;
1782 }
1783 /* First get the socket's status (protected)... */
1784 SYS_ARCH_PROTECT(lev);
1785 sock = tryget_socket_unconn(i);
1786 if (sock != NULL) {
1787 void *lastdata = sock->lastdata.pbuf;
1788 s16_t rcvevent = sock->rcvevent;
1789 u16_t sendevent = sock->sendevent;
1790 u16_t errevent = sock->errevent;
1791 SYS_ARCH_UNPROTECT(lev);
1792  
1793 /* ... then examine it: */
1794 /* See if netconn of this socket is ready for read */
1795 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1796 FD_SET(i, &lreadset);
1797 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1798 nready++;
1799 }
1800 /* See if netconn of this socket is ready for write */
1801 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1802 FD_SET(i, &lwriteset);
1803 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1804 nready++;
1805 }
1806 /* See if netconn of this socket had an error */
1807 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1808 FD_SET(i, &lexceptset);
1809 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1810 nready++;
1811 }
1812 done_socket(sock);
1813 } else {
1814 SYS_ARCH_UNPROTECT(lev);
1815 /* no a valid open socket */
1816 return -1;
1817 }
1818 }
1819 /* copy local sets to the ones provided as arguments */
1820 *readset_out = lreadset;
1821 *writeset_out = lwriteset;
1822 *exceptset_out = lexceptset;
1823  
1824 LWIP_ASSERT("nready >= 0", nready >= 0);
1825 return nready;
1826 }
1827  
1828 #if LWIP_NETCONN_FULLDUPLEX
1829 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1830 * All sockets are marked (and later unmarked), whether they are open or not.
1831 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1832 */
1833 static void
1834 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1835 {
1836 SYS_ARCH_DECL_PROTECT(lev);
1837 if (fdset) {
1838 int i;
1839 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1840 /* if this FD is in the set, lock it (unless already done) */
1841 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1842 struct lwip_sock *sock;
1843 SYS_ARCH_PROTECT(lev);
1844 sock = tryget_socket_unconn(i);
1845 if (sock != NULL) {
1846 /* leave the socket used until released by lwip_select_dec_sockets_used */
1847 FD_SET(i, used_sockets);
1848 }
1849 SYS_ARCH_UNPROTECT(lev);
1850 }
1851 }
1852 }
1853 }
1854  
1855 /* Mark all sockets passed to select as used to prevent them from being freed
1856 * from other threads while select is running.
1857 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1858 * to unmark them correctly.
1859 */
1860 static void
1861 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
1862 {
1863 FD_ZERO(used_sockets);
1864 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
1865 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
1866 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
1867 }
1868  
1869 /* Let go all sockets that were marked as used when starting select */
1870 static void
1871 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
1872 {
1873 int i;
1874 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1875 /* if this FD is not in the set, continue */
1876 if (FD_ISSET(i, used_sockets)) {
1877 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
1878 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
1879 if (sock != NULL) {
1880 done_socket(sock);
1881 }
1882 }
1883 }
1884 }
1885 #else /* LWIP_NETCONN_FULLDUPLEX */
1886 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
1887 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
1888 #endif /* LWIP_NETCONN_FULLDUPLEX */
1889  
1890 int
1891 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
1892 struct timeval *timeout)
1893 {
1894 u32_t waitres = 0;
1895 int nready;
1896 fd_set lreadset, lwriteset, lexceptset;
1897 u32_t msectimeout;
1898 int i;
1899 int maxfdp2;
1900 #if LWIP_NETCONN_SEM_PER_THREAD
1901 int waited = 0;
1902 #endif
1903 #if LWIP_NETCONN_FULLDUPLEX
1904 fd_set used_sockets;
1905 #endif
1906 SYS_ARCH_DECL_PROTECT(lev);
1907  
1908 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
1909 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
1910 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
1911 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
1912  
1913 if ((maxfdp1 < 0) || (maxfdp1 > (FD_SETSIZE + LWIP_SOCKET_OFFSET))) {
1914 set_errno(EINVAL);
1915 return -1;
1916 }
1917  
1918 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
1919  
1920 /* Go through each socket in each list to count number of sockets which
1921 currently match */
1922 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
1923  
1924 if (nready < 0) {
1925 /* one of the sockets in one of the fd_sets was invalid */
1926 set_errno(EBADF);
1927 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
1928 return -1;
1929 } else if (nready > 0) {
1930 /* one or more sockets are set, no need to wait */
1931 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
1932 } else {
1933 /* If we don't have any current events, then suspend if we are supposed to */
1934 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
1935 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
1936 /* This is OK as the local fdsets are empty and nready is zero,
1937 or we would have returned earlier. */
1938 } else {
1939 /* None ready: add our semaphore to list:
1940 We don't actually need any dynamic memory. Our entry on the
1941 list is only valid while we are in this function, so it's ok
1942 to use local variables (unless we're running in MPU compatible
1943 mode). */
1944 API_SELECT_CB_VAR_DECLARE(select_cb);
1945 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
1946 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
1947  
1948 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
1949 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
1950 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
1951 #if LWIP_NETCONN_SEM_PER_THREAD
1952 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
1953 #else /* LWIP_NETCONN_SEM_PER_THREAD */
1954 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
1955 /* failed to create semaphore */
1956 set_errno(ENOMEM);
1957 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
1958 API_SELECT_CB_VAR_FREE(select_cb);
1959 return -1;
1960 }
1961 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
1962  
1963 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
1964  
1965 /* Increase select_waiting for each socket we are interested in */
1966 maxfdp2 = maxfdp1;
1967 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1968 if ((readset && FD_ISSET(i, readset)) ||
1969 (writeset && FD_ISSET(i, writeset)) ||
1970 (exceptset && FD_ISSET(i, exceptset))) {
1971 struct lwip_sock *sock;
1972 SYS_ARCH_PROTECT(lev);
1973 sock = tryget_socket_unconn(i);
1974 if (sock != NULL) {
1975 sock->select_waiting++;
1976 if (sock->select_waiting == 0) {
1977 /* overflow - too many threads waiting */
1978 sock->select_waiting--;
1979 done_socket(sock);
1980 nready = -1;
1981 maxfdp2 = i;
1982 SYS_ARCH_UNPROTECT(lev);
1983 set_errno(EBUSY);
1984 break;
1985 }
1986 done_socket(sock);
1987 } else {
1988 /* Not a valid socket */
1989 nready = -1;
1990 maxfdp2 = i;
1991 SYS_ARCH_UNPROTECT(lev);
1992 set_errno(EBADF);
1993 break;
1994 }
1995 SYS_ARCH_UNPROTECT(lev);
1996 }
1997 }
1998  
1999 if (nready >= 0) {
2000 /* Call lwip_selscan again: there could have been events between
2001 the last scan (without us on the list) and putting us on the list! */
2002 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2003 if (!nready) {
2004 /* Still none ready, just wait to be woken */
2005 if (timeout == 0) {
2006 /* Wait forever */
2007 msectimeout = 0;
2008 } else {
2009 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2010 if (msecs_long <= 0) {
2011 /* Wait 1ms at least (0 means wait forever) */
2012 msectimeout = 1;
2013 } else {
2014 msectimeout = (u32_t)msecs_long;
2015 }
2016 }
2017  
2018 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2019 #if LWIP_NETCONN_SEM_PER_THREAD
2020 waited = 1;
2021 #endif
2022 }
2023 }
2024  
2025 /* Decrease select_waiting for each socket we are interested in */
2026 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2027 if ((readset && FD_ISSET(i, readset)) ||
2028 (writeset && FD_ISSET(i, writeset)) ||
2029 (exceptset && FD_ISSET(i, exceptset))) {
2030 struct lwip_sock *sock;
2031 SYS_ARCH_PROTECT(lev);
2032 sock = tryget_socket_unconn(i);
2033 if (sock != NULL) {
2034 /* for now, handle select_waiting==0... */
2035 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2036 if (sock->select_waiting > 0) {
2037 sock->select_waiting--;
2038 }
2039 done_socket(sock);
2040 } else {
2041 /* Not a valid socket */
2042 nready = -1;
2043 set_errno(EBADF);
2044 }
2045 SYS_ARCH_UNPROTECT(lev);
2046 }
2047 }
2048  
2049 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2050  
2051 #if LWIP_NETCONN_SEM_PER_THREAD
2052 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2053 /* don't leave the thread-local semaphore signalled */
2054 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2055 }
2056 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2057 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2058 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2059 API_SELECT_CB_VAR_FREE(select_cb);
2060  
2061 if (nready < 0) {
2062 /* This happens when a socket got closed while waiting */
2063 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2064 return -1;
2065 }
2066  
2067 if (waitres == SYS_ARCH_TIMEOUT) {
2068 /* Timeout */
2069 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2070 /* This is OK as the local fdsets are empty and nready is zero,
2071 or we would have returned earlier. */
2072 } else {
2073 /* See what's set now after waiting */
2074 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2075 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2076 }
2077 }
2078 }
2079  
2080 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2081 set_errno(0);
2082 if (readset) {
2083 *readset = lreadset;
2084 }
2085 if (writeset) {
2086 *writeset = lwriteset;
2087 }
2088 if (exceptset) {
2089 *exceptset = lexceptset;
2090 }
2091 return nready;
2092 }
2093 #endif /* LWIP_SOCKET_SELECT */
2094  
2095 #if LWIP_SOCKET_POLL
2096 /** Options for the lwip_pollscan function. */
2097 enum lwip_pollscan_opts
2098 {
2099 /** Clear revents in each struct pollfd. */
2100 LWIP_POLLSCAN_CLEAR = 1,
2101  
2102 /** Increment select_waiting in each struct lwip_sock. */
2103 LWIP_POLLSCAN_INC_WAIT = 2,
2104  
2105 /** Decrement select_waiting in each struct lwip_sock. */
2106 LWIP_POLLSCAN_DEC_WAIT = 4
2107 };
2108  
2109 /**
2110 * Update revents in each struct pollfd.
2111 * Optionally update select_waiting in struct lwip_sock.
2112 *
2113 * @param fds array of structures to update
2114 * @param nfds number of structures in fds
2115 * @param opts what to update and how
2116 * @return number of structures that have revents != 0
2117 */
2118 static int
2119 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2120 {
2121 int nready = 0;
2122 nfds_t fdi;
2123 struct lwip_sock *sock;
2124 SYS_ARCH_DECL_PROTECT(lev);
2125  
2126 /* Go through each struct pollfd in the array. */
2127 for (fdi = 0; fdi < nfds; fdi++) {
2128 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2129 fds[fdi].revents = 0;
2130 }
2131  
2132 /* Negative fd means the caller wants us to ignore this struct.
2133 POLLNVAL means we already detected that the fd is invalid;
2134 if another thread has since opened a new socket with that fd,
2135 we must not use that socket. */
2136 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2137 /* First get the socket's status (protected)... */
2138 SYS_ARCH_PROTECT(lev);
2139 sock = tryget_socket_unconn(fds[fdi].fd);
2140 if (sock != NULL) {
2141 void* lastdata = sock->lastdata.pbuf;
2142 s16_t rcvevent = sock->rcvevent;
2143 u16_t sendevent = sock->sendevent;
2144 u16_t errevent = sock->errevent;
2145  
2146 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2147 sock->select_waiting++;
2148 if (sock->select_waiting == 0) {
2149 /* overflow - too many threads waiting */
2150 sock->select_waiting--;
2151 done_socket(sock);
2152 nready = -1;
2153 SYS_ARCH_UNPROTECT(lev);
2154 break;
2155 }
2156 done_socket(sock);
2157 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2158 /* for now, handle select_waiting==0... */
2159 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2160 if (sock->select_waiting > 0) {
2161 sock->select_waiting--;
2162 }
2163 done_socket(sock);
2164 }
2165  
2166 SYS_ARCH_UNPROTECT(lev);
2167  
2168 /* ... then examine it: */
2169 /* See if netconn of this socket is ready for read */
2170 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2171 fds[fdi].revents |= POLLIN;
2172 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2173 }
2174 /* See if netconn of this socket is ready for write */
2175 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2176 fds[fdi].revents |= POLLOUT;
2177 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2178 }
2179 /* See if netconn of this socket had an error */
2180 if (errevent != 0) {
2181 /* POLLERR is output only. */
2182 fds[fdi].revents |= POLLERR;
2183 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2184 }
2185 } else {
2186 /* Not a valid socket */
2187 SYS_ARCH_UNPROTECT(lev);
2188 /* POLLNVAL is output only. */
2189 fds[fdi].revents |= POLLNVAL;
2190 return -1;
2191 }
2192 }
2193  
2194 /* Will return the number of structures that have events,
2195 not the number of events. */
2196 if (fds[fdi].revents != 0) {
2197 nready++;
2198 }
2199 }
2200  
2201 LWIP_ASSERT("nready >= 0", nready >= 0);
2202 return nready;
2203 }
2204  
2205 #if LWIP_NETCONN_FULLDUPLEX
2206 /* Mark all sockets as used.
2207 *
2208 * All sockets are marked (and later unmarked), whether they are open or not.
2209 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2210 */
2211 static void
2212 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2213 {
2214 nfds_t fdi;
2215 SYS_ARCH_DECL_PROTECT(lev);
2216  
2217 if(fds) {
2218 /* Go through each struct pollfd in the array. */
2219 for (fdi = 0; fdi < nfds; fdi++) {
2220 SYS_ARCH_PROTECT(lev);
2221 /* Increase the reference counter */
2222 tryget_socket_unconn(fds[fdi].fd);
2223 SYS_ARCH_UNPROTECT(lev);
2224 }
2225 }
2226 }
2227  
2228 /* Let go all sockets that were marked as used when starting poll */
2229 static void
2230 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2231 {
2232 nfds_t fdi;
2233 struct lwip_sock *sock;
2234 SYS_ARCH_DECL_PROTECT(lev);
2235  
2236 if(fds) {
2237 /* Go through each struct pollfd in the array. */
2238 for (fdi = 0; fdi < nfds; fdi++) {
2239 sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2240 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2241 if (sock != NULL) {
2242 done_socket(sock);
2243 }
2244 }
2245 }
2246 }
2247 #else /* LWIP_NETCONN_FULLDUPLEX */
2248 #define lwip_poll_inc_sockets_used(fds, nfds)
2249 #define lwip_poll_dec_sockets_used(fds, nfds)
2250 #endif /* LWIP_NETCONN_FULLDUPLEX */
2251  
2252 int
2253 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2254 {
2255 u32_t waitres = 0;
2256 int nready;
2257 u32_t msectimeout;
2258 #if LWIP_NETCONN_SEM_PER_THREAD
2259 int waited = 0;
2260 #endif
2261  
2262 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2263 (void*)fds, (int)nfds, timeout));
2264  
2265 lwip_poll_inc_sockets_used(fds, nfds);
2266  
2267 /* Go through each struct pollfd to count number of structures
2268 which currently match */
2269 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2270  
2271 if (nready < 0) {
2272 lwip_poll_dec_sockets_used(fds, nfds);
2273 return -1;
2274 }
2275  
2276 /* If we don't have any current events, then suspend if we are supposed to */
2277 if (!nready) {
2278 API_SELECT_CB_VAR_DECLARE(select_cb);
2279  
2280 if (timeout == 0) {
2281 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2282 goto return_success;
2283 }
2284 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2285 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2286  
2287 /* None ready: add our semaphore to list:
2288 We don't actually need any dynamic memory. Our entry on the
2289 list is only valid while we are in this function, so it's ok
2290 to use local variables. */
2291  
2292 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2293 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2294 #if LWIP_NETCONN_SEM_PER_THREAD
2295 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2296 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2297 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2298 /* failed to create semaphore */
2299 set_errno(EAGAIN);
2300 lwip_poll_dec_sockets_used(fds, nfds);
2301 API_SELECT_CB_VAR_FREE(select_cb);
2302 return -1;
2303 }
2304 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2305  
2306 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2307  
2308 /* Increase select_waiting for each socket we are interested in.
2309 Also, check for events again: there could have been events between
2310 the last scan (without us on the list) and putting us on the list! */
2311 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2312  
2313 if (!nready) {
2314 /* Still none ready, just wait to be woken */
2315 if (timeout < 0) {
2316 /* Wait forever */
2317 msectimeout = 0;
2318 } else {
2319 /* timeout == 0 would have been handled earlier. */
2320 LWIP_ASSERT("timeout > 0", timeout > 0);
2321 msectimeout = timeout;
2322 }
2323 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2324 #if LWIP_NETCONN_SEM_PER_THREAD
2325 waited = 1;
2326 #endif
2327 }
2328  
2329 /* Decrease select_waiting for each socket we are interested in,
2330 and check which events occurred while we waited. */
2331 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2332  
2333 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2334  
2335 #if LWIP_NETCONN_SEM_PER_THREAD
2336 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2337 /* don't leave the thread-local semaphore signalled */
2338 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2339 }
2340 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2341 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2342 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2343 API_SELECT_CB_VAR_FREE(select_cb);
2344  
2345 if (nready < 0) {
2346 /* This happens when a socket got closed while waiting */
2347 lwip_poll_dec_sockets_used(fds, nfds);
2348 return -1;
2349 }
2350  
2351 if (waitres == SYS_ARCH_TIMEOUT) {
2352 /* Timeout */
2353 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2354 goto return_success;
2355 }
2356 }
2357  
2358 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2359 return_success:
2360 lwip_poll_dec_sockets_used(fds, nfds);
2361 set_errno(0);
2362 return nready;
2363 }
2364  
2365 /**
2366 * Check whether event_callback should wake up a thread waiting in
2367 * lwip_poll.
2368 */
2369 static int
2370 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, struct lwip_sock *sock)
2371 {
2372 nfds_t fdi;
2373 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2374 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2375 if (pollfd->fd == fd) {
2376 /* Do not update pollfd->revents right here;
2377 that would be a data race because lwip_pollscan
2378 accesses revents without protecting. */
2379 if (sock->rcvevent > 0 && (pollfd->events & POLLIN) != 0) {
2380 return 1;
2381 }
2382 if (sock->sendevent != 0 && (pollfd->events & POLLOUT) != 0) {
2383 return 1;
2384 }
2385 if (sock->errevent != 0) {
2386 /* POLLERR is output only. */
2387 return 1;
2388 }
2389 }
2390 }
2391 return 0;
2392 }
2393 #endif /* LWIP_SOCKET_POLL */
2394  
2395 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2396 /**
2397 * Callback registered in the netconn layer for each socket-netconn.
2398 * Processes recvevent (data available) and wakes up tasks waiting for select.
2399 *
2400 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2401 * must have the core lock held when signaling the following events
2402 * as they might cause select_list_cb to be checked:
2403 * NETCONN_EVT_RCVPLUS
2404 * NETCONN_EVT_SENDPLUS
2405 * NETCONN_EVT_ERROR
2406 */
2407 static void
2408 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2409 {
2410 int s, check_waiters;
2411 struct lwip_sock *sock;
2412 SYS_ARCH_DECL_PROTECT(lev);
2413  
2414 LWIP_UNUSED_ARG(len);
2415  
2416 /* Get socket */
2417 if (conn) {
2418 s = conn->socket;
2419 if (s < 0) {
2420 /* Data comes in right away after an accept, even though
2421 * the server task might not have created a new socket yet.
2422 * Just count down (or up) if that's the case and we
2423 * will use the data later. Note that only receive events
2424 * can happen before the new socket is set up. */
2425 SYS_ARCH_PROTECT(lev);
2426 if (conn->socket < 0) {
2427 if (evt == NETCONN_EVT_RCVPLUS) {
2428 /* conn->socket is -1 on initialization
2429 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2430 conn->socket--;
2431 }
2432 SYS_ARCH_UNPROTECT(lev);
2433 return;
2434 }
2435 s = conn->socket;
2436 SYS_ARCH_UNPROTECT(lev);
2437 }
2438  
2439 sock = get_socket(s);
2440 if (!sock) {
2441 return;
2442 }
2443 } else {
2444 return;
2445 }
2446  
2447 check_waiters = 1;
2448 SYS_ARCH_PROTECT(lev);
2449 /* Set event as required */
2450 switch (evt) {
2451 case NETCONN_EVT_RCVPLUS:
2452 sock->rcvevent++;
2453 if (sock->rcvevent > 1) {
2454 check_waiters = 0;
2455 }
2456 break;
2457 case NETCONN_EVT_RCVMINUS:
2458 sock->rcvevent--;
2459 check_waiters = 0;
2460 case NETCONN_EVT_SENDPLUS:
2461 if (sock->sendevent) {
2462 check_waiters = 0;
2463 }
2464 sock->sendevent = 1;
2465 break;
2466 case NETCONN_EVT_SENDMINUS:
2467 sock->sendevent = 0;
2468 check_waiters = 0;
2469 case NETCONN_EVT_ERROR:
2470 sock->errevent = 1;
2471 break;
2472 default:
2473 LWIP_ASSERT("unknown event", 0);
2474 break;
2475 }
2476  
2477 if (sock->select_waiting && check_waiters) {
2478 /* Save which events are active */
2479 int has_recvevent, has_sendevent, has_errevent;
2480 has_recvevent = sock->rcvevent > 0;
2481 has_sendevent = sock->sendevent != 0;
2482 has_errevent = sock->errevent != 0;
2483 SYS_ARCH_UNPROTECT(lev);
2484 /* Check any select calls waiting on this socket */
2485 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent, sock);
2486 } else {
2487 SYS_ARCH_UNPROTECT(lev);
2488 }
2489 done_socket(sock);
2490 }
2491  
2492 /**
2493 * Check if any select waiters are waiting on this socket and its events
2494 *
2495 * @note on synchronization of select_cb_list:
2496 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2497 * the core lock. We do a single pass through the list and signal any waiters.
2498 * Core lock should already be held when calling here!!!!
2499  
2500 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2501 * of the loop, thus creating a possibility where a thread could modify the
2502 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2503 * detect this change and restart the list walk. The list is expected to be small
2504 */
2505 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent, struct lwip_sock *sock)
2506 {
2507 struct lwip_select_cb *scb;
2508 #if !LWIP_TCPIP_CORE_LOCKING
2509 int last_select_cb_ctr;
2510 SYS_ARCH_DECL_PROTECT(lev);
2511 #endif
2512  
2513 #if !LWIP_TCPIP_CORE_LOCKING
2514 SYS_ARCH_PROTECT(lev);
2515 again:
2516 /* remember the state of select_cb_list to detect changes */
2517 last_select_cb_ctr = select_cb_ctr;
2518 #endif
2519 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2520 if (scb->sem_signalled == 0) {
2521 /* semaphore not signalled yet */
2522 int do_signal = 0;
2523 #if LWIP_SOCKET_POLL
2524 if (scb->poll_fds != NULL) {
2525 LWIP_UNUSED_ARG(has_recvevent);
2526 LWIP_UNUSED_ARG(has_sendevent);
2527 LWIP_UNUSED_ARG(has_errevent);
2528 do_signal = lwip_poll_should_wake(scb, s, sock);
2529 }
2530 #endif /* LWIP_SOCKET_POLL */
2531 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2532 else
2533 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2534 #if LWIP_SOCKET_SELECT
2535 {
2536 LWIP_UNUSED_ARG(sock);
2537 /* Test this select call for our socket */
2538 if (has_recvevent) {
2539 if (scb->readset && FD_ISSET(s, scb->readset)) {
2540 do_signal = 1;
2541 }
2542 }
2543 if (has_sendevent) {
2544 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2545 do_signal = 1;
2546 }
2547 }
2548 if (has_errevent) {
2549 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2550 do_signal = 1;
2551 }
2552 }
2553 }
2554 #endif /* LWIP_SOCKET_SELECT */
2555 if (do_signal) {
2556 scb->sem_signalled = 1;
2557 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2558 the semaphore, as this might lead to the select thread taking itself off the list,
2559 invalidating the semaphore. */
2560 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2561 }
2562 }
2563 #if LWIP_TCPIP_CORE_LOCKING
2564 }
2565 #else
2566 /* unlock interrupts with each step */
2567 SYS_ARCH_UNPROTECT(lev);
2568 /* this makes sure interrupt protection time is short */
2569 SYS_ARCH_PROTECT(lev);
2570 if (last_select_cb_ctr != select_cb_ctr) {
2571 /* someone has changed select_cb_list, restart at the beginning */
2572 goto again;
2573 }
2574 /* remember the state of select_cb_list to detect changes */
2575 last_select_cb_ctr = select_cb_ctr;
2576 }
2577 SYS_ARCH_UNPROTECT(lev);
2578 #endif
2579 }
2580 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2581  
2582 /**
2583 * Close one end of a full-duplex connection.
2584 */
2585 int
2586 lwip_shutdown(int s, int how)
2587 {
2588 struct lwip_sock *sock;
2589 err_t err;
2590 u8_t shut_rx = 0, shut_tx = 0;
2591  
2592 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2593  
2594 sock = get_socket(s);
2595 if (!sock) {
2596 return -1;
2597 }
2598  
2599 if (sock->conn != NULL) {
2600 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2601 sock_set_errno(sock, EOPNOTSUPP);
2602 done_socket(sock);
2603 return -1;
2604 }
2605 } else {
2606 sock_set_errno(sock, ENOTCONN);
2607 done_socket(sock);
2608 return -1;
2609 }
2610  
2611 if (how == SHUT_RD) {
2612 shut_rx = 1;
2613 } else if (how == SHUT_WR) {
2614 shut_tx = 1;
2615 } else if (how == SHUT_RDWR) {
2616 shut_rx = 1;
2617 shut_tx = 1;
2618 } else {
2619 sock_set_errno(sock, EINVAL);
2620 done_socket(sock);
2621 return -1;
2622 }
2623 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2624  
2625 sock_set_errno(sock, err_to_errno(err));
2626 done_socket(sock);
2627 return (err == ERR_OK ? 0 : -1);
2628 }
2629  
2630 static int
2631 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2632 {
2633 struct lwip_sock *sock;
2634 union sockaddr_aligned saddr;
2635 ip_addr_t naddr;
2636 u16_t port;
2637 err_t err;
2638  
2639 sock = get_socket(s);
2640 if (!sock) {
2641 return -1;
2642 }
2643  
2644 /* get the IP address and port */
2645 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2646 if (err != ERR_OK) {
2647 sock_set_errno(sock, err_to_errno(err));
2648 done_socket(sock);
2649 return -1;
2650 }
2651  
2652 #if LWIP_IPV4 && LWIP_IPV6
2653 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2654 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2655 IP_IS_V4_VAL(naddr)) {
2656 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2657 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2658 }
2659 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2660  
2661 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2662  
2663 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2664 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2665 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2666  
2667 if (*namelen > saddr.sa.sa_len) {
2668 *namelen = saddr.sa.sa_len;
2669 }
2670 MEMCPY(name, &saddr, *namelen);
2671  
2672 sock_set_errno(sock, 0);
2673 done_socket(sock);
2674 return 0;
2675 }
2676  
2677 int
2678 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2679 {
2680 return lwip_getaddrname(s, name, namelen, 0);
2681 }
2682  
2683 int
2684 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2685 {
2686 return lwip_getaddrname(s, name, namelen, 1);
2687 }
2688  
2689 int
2690 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2691 {
2692 int err;
2693 struct lwip_sock *sock = get_socket(s);
2694 #if !LWIP_TCPIP_CORE_LOCKING
2695 err_t cberr;
2696 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2697 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2698  
2699 if (!sock) {
2700 return -1;
2701 }
2702  
2703 if ((NULL == optval) || (NULL == optlen)) {
2704 sock_set_errno(sock, EFAULT);
2705 done_socket(sock);
2706 return -1;
2707 }
2708  
2709 #if LWIP_TCPIP_CORE_LOCKING
2710 /* core-locking can just call the -impl function */
2711 LOCK_TCPIP_CORE();
2712 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2713 UNLOCK_TCPIP_CORE();
2714  
2715 #else /* LWIP_TCPIP_CORE_LOCKING */
2716  
2717 #if LWIP_MPU_COMPATIBLE
2718 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2719 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2720 sock_set_errno(sock, ENOBUFS);
2721 done_socket(sock);
2722 return -1;
2723 }
2724 #endif /* LWIP_MPU_COMPATIBLE */
2725  
2726 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2727 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2728 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2729 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2730 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2731 #if !LWIP_MPU_COMPATIBLE
2732 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2733 #endif /* !LWIP_MPU_COMPATIBLE */
2734 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2735 #if LWIP_NETCONN_SEM_PER_THREAD
2736 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2737 #else
2738 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2739 #endif
2740 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2741 if (cberr != ERR_OK) {
2742 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2743 sock_set_errno(sock, err_to_errno(cberr));
2744 done_socket(sock);
2745 return -1;
2746 }
2747 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2748  
2749 /* write back optlen and optval */
2750 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2751 #if LWIP_MPU_COMPATIBLE
2752 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2753 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2754 #endif /* LWIP_MPU_COMPATIBLE */
2755  
2756 /* maybe lwip_getsockopt_internal has changed err */
2757 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2758 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2759 #endif /* LWIP_TCPIP_CORE_LOCKING */
2760  
2761 sock_set_errno(sock, err);
2762 done_socket(sock);
2763 return err ? -1 : 0;
2764 }
2765  
2766 #if !LWIP_TCPIP_CORE_LOCKING
2767 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2768 * to get into the tcpip_thread
2769 */
2770 static void
2771 lwip_getsockopt_callback(void *arg)
2772 {
2773 struct lwip_setgetsockopt_data *data;
2774 LWIP_ASSERT("arg != NULL", arg != NULL);
2775 data = (struct lwip_setgetsockopt_data *)arg;
2776  
2777 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2778 #if LWIP_MPU_COMPATIBLE
2779 data->optval,
2780 #else /* LWIP_MPU_COMPATIBLE */
2781 data->optval.p,
2782 #endif /* LWIP_MPU_COMPATIBLE */
2783 &data->optlen);
2784  
2785 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2786 }
2787 #endif /* LWIP_TCPIP_CORE_LOCKING */
2788  
2789 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2790 * same argument as lwip_getsockopt, either called directly or through callback
2791 */
2792 static int
2793 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2794 {
2795 int err = 0;
2796 struct lwip_sock *sock = tryget_socket(s);
2797 if (!sock) {
2798 return EBADF;
2799 }
2800  
2801 switch (level) {
2802  
2803 /* Level: SOL_SOCKET */
2804 case SOL_SOCKET:
2805 switch (optname) {
2806  
2807 #if LWIP_TCP
2808 case SO_ACCEPTCONN:
2809 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2810 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2811 done_socket(sock);
2812 return ENOPROTOOPT;
2813 }
2814 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2815 *(int *)optval = 1;
2816 } else {
2817 *(int *)optval = 0;
2818 }
2819 break;
2820 #endif /* LWIP_TCP */
2821  
2822 /* The option flags */
2823 case SO_BROADCAST:
2824 case SO_KEEPALIVE:
2825 #if SO_REUSE
2826 case SO_REUSEADDR:
2827 #endif /* SO_REUSE */
2828 if ((optname == SO_BROADCAST) &&
2829 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
2830 done_socket(sock);
2831 return ENOPROTOOPT;
2832 }
2833 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2834 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
2835 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
2836 s, optname, (*(int *)optval ? "on" : "off")));
2837 break;
2838  
2839 case SO_TYPE:
2840 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2841 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
2842 case NETCONN_RAW:
2843 *(int *)optval = SOCK_RAW;
2844 break;
2845 case NETCONN_TCP:
2846 *(int *)optval = SOCK_STREAM;
2847 break;
2848 case NETCONN_UDP:
2849 *(int *)optval = SOCK_DGRAM;
2850 break;
2851 default: /* unrecognized socket type */
2852 *(int *)optval = netconn_type(sock->conn);
2853 LWIP_DEBUGF(SOCKETS_DEBUG,
2854 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
2855 s, *(int *)optval));
2856 } /* switch (netconn_type(sock->conn)) */
2857 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
2858 s, *(int *)optval));
2859 break;
2860  
2861 case SO_ERROR:
2862 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
2863 *(int *)optval = err_to_errno(netconn_err(sock->conn));
2864 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
2865 s, *(int *)optval));
2866 break;
2867  
2868 #if LWIP_SO_SNDTIMEO
2869 case SO_SNDTIMEO:
2870 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2871 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
2872 break;
2873 #endif /* LWIP_SO_SNDTIMEO */
2874 #if LWIP_SO_RCVTIMEO
2875 case SO_RCVTIMEO:
2876 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2877 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
2878 break;
2879 #endif /* LWIP_SO_RCVTIMEO */
2880 #if LWIP_SO_RCVBUF
2881 case SO_RCVBUF:
2882 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2883 *(int *)optval = netconn_get_recvbufsize(sock->conn);
2884 break;
2885 #endif /* LWIP_SO_RCVBUF */
2886 #if LWIP_SO_LINGER
2887 case SO_LINGER: {
2888 s16_t conn_linger;
2889 struct linger *linger = (struct linger *)optval;
2890 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
2891 conn_linger = sock->conn->linger;
2892 if (conn_linger >= 0) {
2893 linger->l_onoff = 1;
2894 linger->l_linger = (int)conn_linger;
2895 } else {
2896 linger->l_onoff = 0;
2897 linger->l_linger = 0;
2898 }
2899 }
2900 break;
2901 #endif /* LWIP_SO_LINGER */
2902 #if LWIP_UDP
2903 case SO_NO_CHECK:
2904 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
2905 #if LWIP_UDPLITE
2906 if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_UDPLITE) != 0) {
2907 /* this flag is only available for UDP, not for UDP lite */
2908 done_socket(sock);
2909 return EAFNOSUPPORT;
2910 }
2911 #endif /* LWIP_UDPLITE */
2912 *(int *)optval = (udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_NOCHKSUM) ? 1 : 0;
2913 break;
2914 #endif /* LWIP_UDP*/
2915 default:
2916 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
2917 s, optname));
2918 err = ENOPROTOOPT;
2919 break;
2920 } /* switch (optname) */
2921 break;
2922  
2923 /* Level: IPPROTO_IP */
2924 case IPPROTO_IP:
2925 switch (optname) {
2926 case IP_TTL:
2927 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2928 *(int *)optval = sock->conn->pcb.ip->ttl;
2929 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
2930 s, *(int *)optval));
2931 break;
2932 case IP_TOS:
2933 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2934 *(int *)optval = sock->conn->pcb.ip->tos;
2935 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
2936 s, *(int *)optval));
2937 break;
2938 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
2939 case IP_MULTICAST_TTL:
2940 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
2941 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
2942 done_socket(sock);
2943 return ENOPROTOOPT;
2944 }
2945 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
2946 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
2947 s, *(int *)optval));
2948 break;
2949 case IP_MULTICAST_IF:
2950 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
2951 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
2952 done_socket(sock);
2953 return ENOPROTOOPT;
2954 }
2955 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
2956 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
2957 s, *(u32_t *)optval));
2958 break;
2959 case IP_MULTICAST_LOOP:
2960 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
2961 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
2962 *(u8_t *)optval = 1;
2963 } else {
2964 *(u8_t *)optval = 0;
2965 }
2966 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
2967 s, *(int *)optval));
2968 break;
2969 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
2970 default:
2971 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
2972 s, optname));
2973 err = ENOPROTOOPT;
2974 break;
2975 } /* switch (optname) */
2976 break;
2977  
2978 #if LWIP_TCP
2979 /* Level: IPPROTO_TCP */
2980 case IPPROTO_TCP:
2981 /* Special case: all IPPROTO_TCP option take an int */
2982 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
2983 if (sock->conn->pcb.tcp->state == LISTEN) {
2984 done_socket(sock);
2985 return EINVAL;
2986 }
2987 switch (optname) {
2988 case TCP_NODELAY:
2989 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
2990 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
2991 s, (*(int *)optval) ? "on" : "off") );
2992 break;
2993 case TCP_KEEPALIVE:
2994 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
2995 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
2996 s, *(int *)optval));
2997 break;
2998  
2999 #if LWIP_TCP_KEEPALIVE
3000 case TCP_KEEPIDLE:
3001 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3002 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3003 s, *(int *)optval));
3004 break;
3005 case TCP_KEEPINTVL:
3006 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3007 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3008 s, *(int *)optval));
3009 break;
3010 case TCP_KEEPCNT:
3011 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3012 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3013 s, *(int *)optval));
3014 break;
3015 #endif /* LWIP_TCP_KEEPALIVE */
3016 default:
3017 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3018 s, optname));
3019 err = ENOPROTOOPT;
3020 break;
3021 } /* switch (optname) */
3022 break;
3023 #endif /* LWIP_TCP */
3024  
3025 #if LWIP_IPV6
3026 /* Level: IPPROTO_IPV6 */
3027 case IPPROTO_IPV6:
3028 switch (optname) {
3029 case IPV6_V6ONLY:
3030 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3031 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3032 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3033 s, *(int *)optval));
3034 break;
3035 default:
3036 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3037 s, optname));
3038 err = ENOPROTOOPT;
3039 break;
3040 } /* switch (optname) */
3041 break;
3042 #endif /* LWIP_IPV6 */
3043  
3044 #if LWIP_UDP && LWIP_UDPLITE
3045 /* Level: IPPROTO_UDPLITE */
3046 case IPPROTO_UDPLITE:
3047 /* Special case: all IPPROTO_UDPLITE option take an int */
3048 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3049 /* If this is no UDP lite socket, ignore any options. */
3050 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3051 done_socket(sock);
3052 return ENOPROTOOPT;
3053 }
3054 switch (optname) {
3055 case UDPLITE_SEND_CSCOV:
3056 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3057 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3058 s, (*(int *)optval)) );
3059 break;
3060 case UDPLITE_RECV_CSCOV:
3061 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3062 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3063 s, (*(int *)optval)) );
3064 break;
3065 default:
3066 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3067 s, optname));
3068 err = ENOPROTOOPT;
3069 break;
3070 } /* switch (optname) */
3071 break;
3072 #endif /* LWIP_UDP */
3073 /* Level: IPPROTO_RAW */
3074 case IPPROTO_RAW:
3075 switch (optname) {
3076 #if LWIP_IPV6 && LWIP_RAW
3077 case IPV6_CHECKSUM:
3078 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3079 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3080 *(int *)optval = -1;
3081 } else {
3082 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3083 }
3084 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3085 s, (*(int *)optval)) );
3086 break;
3087 #endif /* LWIP_IPV6 && LWIP_RAW */
3088 default:
3089 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3090 s, optname));
3091 err = ENOPROTOOPT;
3092 break;
3093 } /* switch (optname) */
3094 break;
3095 default:
3096 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3097 s, level, optname));
3098 err = ENOPROTOOPT;
3099 break;
3100 } /* switch (level) */
3101  
3102 done_socket(sock);
3103 return err;
3104 }
3105  
3106 int
3107 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3108 {
3109 int err = 0;
3110 struct lwip_sock *sock = get_socket(s);
3111 #if !LWIP_TCPIP_CORE_LOCKING
3112 err_t cberr;
3113 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3114 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3115  
3116 if (!sock) {
3117 return -1;
3118 }
3119  
3120 if (NULL == optval) {
3121 sock_set_errno(sock, EFAULT);
3122 done_socket(sock);
3123 return -1;
3124 }
3125  
3126 #if LWIP_TCPIP_CORE_LOCKING
3127 /* core-locking can just call the -impl function */
3128 LOCK_TCPIP_CORE();
3129 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3130 UNLOCK_TCPIP_CORE();
3131  
3132 #else /* LWIP_TCPIP_CORE_LOCKING */
3133  
3134 #if LWIP_MPU_COMPATIBLE
3135 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3136 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3137 sock_set_errno(sock, ENOBUFS);
3138 done_socket(sock);
3139 return -1;
3140 }
3141 #endif /* LWIP_MPU_COMPATIBLE */
3142  
3143 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3144 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3145 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3146 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3147 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3148 #if LWIP_MPU_COMPATIBLE
3149 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3150 #else /* LWIP_MPU_COMPATIBLE */
3151 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3152 #endif /* LWIP_MPU_COMPATIBLE */
3153 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3154 #if LWIP_NETCONN_SEM_PER_THREAD
3155 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3156 #else
3157 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3158 #endif
3159 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3160 if (cberr != ERR_OK) {
3161 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3162 sock_set_errno(sock, err_to_errno(cberr));
3163 done_socket(sock);
3164 return -1;
3165 }
3166 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3167  
3168 /* maybe lwip_getsockopt_internal has changed err */
3169 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3170 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3171 #endif /* LWIP_TCPIP_CORE_LOCKING */
3172  
3173 sock_set_errno(sock, err);
3174 done_socket(sock);
3175 return err ? -1 : 0;
3176 }
3177  
3178 #if !LWIP_TCPIP_CORE_LOCKING
3179 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3180 * to get into the tcpip_thread
3181 */
3182 static void
3183 lwip_setsockopt_callback(void *arg)
3184 {
3185 struct lwip_setgetsockopt_data *data;
3186 LWIP_ASSERT("arg != NULL", arg != NULL);
3187 data = (struct lwip_setgetsockopt_data *)arg;
3188  
3189 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3190 #if LWIP_MPU_COMPATIBLE
3191 data->optval,
3192 #else /* LWIP_MPU_COMPATIBLE */
3193 data->optval.pc,
3194 #endif /* LWIP_MPU_COMPATIBLE */
3195 data->optlen);
3196  
3197 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3198 }
3199 #endif /* LWIP_TCPIP_CORE_LOCKING */
3200  
3201 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3202 * same argument as lwip_setsockopt, either called directly or through callback
3203 */
3204 static int
3205 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3206 {
3207 int err = 0;
3208 struct lwip_sock *sock = tryget_socket(s);
3209 if (!sock) {
3210 return EBADF;
3211 }
3212  
3213 switch (level) {
3214  
3215 /* Level: SOL_SOCKET */
3216 case SOL_SOCKET:
3217 switch (optname) {
3218  
3219 /* SO_ACCEPTCONN is get-only */
3220  
3221 /* The option flags */
3222 case SO_BROADCAST:
3223 case SO_KEEPALIVE:
3224 #if SO_REUSE
3225 case SO_REUSEADDR:
3226 #endif /* SO_REUSE */
3227 if ((optname == SO_BROADCAST) &&
3228 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3229 done_socket(sock);
3230 return ENOPROTOOPT;
3231 }
3232 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3233 if (*(const int *)optval) {
3234 ip_set_option(sock->conn->pcb.ip, optname);
3235 } else {
3236 ip_reset_option(sock->conn->pcb.ip, optname);
3237 }
3238 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3239 s, optname, (*(const int *)optval ? "on" : "off")));
3240 break;
3241  
3242 /* SO_TYPE is get-only */
3243 /* SO_ERROR is get-only */
3244  
3245 #if LWIP_SO_SNDTIMEO
3246 case SO_SNDTIMEO: {
3247 long ms_long;
3248 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3249 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3250 if (ms_long < 0) {
3251 done_socket(sock);
3252 return EINVAL;
3253 }
3254 netconn_set_sendtimeout(sock->conn, ms_long);
3255 break;
3256 }
3257 #endif /* LWIP_SO_SNDTIMEO */
3258 #if LWIP_SO_RCVTIMEO
3259 case SO_RCVTIMEO: {
3260 long ms_long;
3261 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3262 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3263 if (ms_long < 0) {
3264 done_socket(sock);
3265 return EINVAL;
3266 }
3267 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3268 break;
3269 }
3270 #endif /* LWIP_SO_RCVTIMEO */
3271 #if LWIP_SO_RCVBUF
3272 case SO_RCVBUF:
3273 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3274 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3275 break;
3276 #endif /* LWIP_SO_RCVBUF */
3277 #if LWIP_SO_LINGER
3278 case SO_LINGER: {
3279 const struct linger *linger = (const struct linger *)optval;
3280 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3281 if (linger->l_onoff) {
3282 int lingersec = linger->l_linger;
3283 if (lingersec < 0) {
3284 done_socket(sock);
3285 return EINVAL;
3286 }
3287 if (lingersec > 0xFFFF) {
3288 lingersec = 0xFFFF;
3289 }
3290 sock->conn->linger = (s16_t)lingersec;
3291 } else {
3292 sock->conn->linger = -1;
3293 }
3294 }
3295 break;
3296 #endif /* LWIP_SO_LINGER */
3297 #if LWIP_UDP
3298 case SO_NO_CHECK:
3299 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3300 #if LWIP_UDPLITE
3301 if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_UDPLITE) != 0) {
3302 /* this flag is only available for UDP, not for UDP lite */
3303 done_socket(sock);
3304 return EAFNOSUPPORT;
3305 }
3306 #endif /* LWIP_UDPLITE */
3307 if (*(const int *)optval) {
3308 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3309 } else {
3310 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3311 }
3312 break;
3313 #endif /* LWIP_UDP */
3314 case SO_BINDTODEVICE: {
3315 const struct ifreq *iface;
3316 struct netif *n = NULL;
3317  
3318 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3319  
3320 iface = (const struct ifreq *)optval;
3321 if (iface->ifr_name[0] != 0) {
3322 n = netif_find(iface->ifr_name);
3323 if (n == NULL) {
3324 done_socket(sock);
3325 return ENODEV;
3326 }
3327 }
3328  
3329 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3330 #if LWIP_TCP
3331 case NETCONN_TCP:
3332 tcp_bind_netif(sock->conn->pcb.tcp, n);
3333 break;
3334 #endif
3335 #if LWIP_UDP
3336 case NETCONN_UDP:
3337 udp_bind_netif(sock->conn->pcb.udp, n);
3338 break;
3339 #endif
3340 #if LWIP_RAW
3341 case NETCONN_RAW:
3342 raw_bind_netif(sock->conn->pcb.raw, n);
3343 break;
3344 #endif
3345 default:
3346 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3347 break;
3348 }
3349 }
3350 break;
3351 default:
3352 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3353 s, optname));
3354 err = ENOPROTOOPT;
3355 break;
3356 } /* switch (optname) */
3357 break;
3358  
3359 /* Level: IPPROTO_IP */
3360 case IPPROTO_IP:
3361 switch (optname) {
3362 case IP_TTL:
3363 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3364 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3365 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3366 s, sock->conn->pcb.ip->ttl));
3367 break;
3368 case IP_TOS:
3369 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3370 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3371 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3372 s, sock->conn->pcb.ip->tos));
3373 break;
3374 #if LWIP_NETBUF_RECVINFO
3375 case IP_PKTINFO:
3376 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3377 if (*(const int *)optval) {
3378 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3379 } else {
3380 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3381 }
3382 break;
3383 #endif /* LWIP_NETBUF_RECVINFO */
3384 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3385 case IP_MULTICAST_TTL:
3386 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3387 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3388 break;
3389 case IP_MULTICAST_IF: {
3390 ip4_addr_t if_addr;
3391 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3392 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3393 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3394 }
3395 break;
3396 case IP_MULTICAST_LOOP:
3397 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3398 if (*(const u8_t *)optval) {
3399 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3400 } else {
3401 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3402 }
3403 break;
3404 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3405 #if LWIP_IGMP
3406 case IP_ADD_MEMBERSHIP:
3407 case IP_DROP_MEMBERSHIP: {
3408 /* If this is a TCP or a RAW socket, ignore these options. */
3409 err_t igmp_err;
3410 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3411 ip4_addr_t if_addr;
3412 ip4_addr_t multi_addr;
3413 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3414 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3415 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3416 if (optname == IP_ADD_MEMBERSHIP) {
3417 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3418 /* cannot track membership (out of memory) */
3419 err = ENOMEM;
3420 igmp_err = ERR_OK;
3421 } else {
3422 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3423 }
3424 } else {
3425 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3426 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3427 }
3428 if (igmp_err != ERR_OK) {
3429 err = EADDRNOTAVAIL;
3430 }
3431 }
3432 break;
3433 #endif /* LWIP_IGMP */
3434 default:
3435 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3436 s, optname));
3437 err = ENOPROTOOPT;
3438 break;
3439 } /* switch (optname) */
3440 break;
3441  
3442 #if LWIP_TCP
3443 /* Level: IPPROTO_TCP */
3444 case IPPROTO_TCP:
3445 /* Special case: all IPPROTO_TCP option take an int */
3446 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3447 if (sock->conn->pcb.tcp->state == LISTEN) {
3448 done_socket(sock);
3449 return EINVAL;
3450 }
3451 switch (optname) {
3452 case TCP_NODELAY:
3453 if (*(const int *)optval) {
3454 tcp_nagle_disable(sock->conn->pcb.tcp);
3455 } else {
3456 tcp_nagle_enable(sock->conn->pcb.tcp);
3457 }
3458 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3459 s, (*(const int *)optval) ? "on" : "off") );
3460 break;
3461 case TCP_KEEPALIVE:
3462 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3463 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3464 s, sock->conn->pcb.tcp->keep_idle));
3465 break;
3466  
3467 #if LWIP_TCP_KEEPALIVE
3468 case TCP_KEEPIDLE:
3469 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3470 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3471 s, sock->conn->pcb.tcp->keep_idle));
3472 break;
3473 case TCP_KEEPINTVL:
3474 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3475 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3476 s, sock->conn->pcb.tcp->keep_intvl));
3477 break;
3478 case TCP_KEEPCNT:
3479 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3480 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3481 s, sock->conn->pcb.tcp->keep_cnt));
3482 break;
3483 #endif /* LWIP_TCP_KEEPALIVE */
3484 default:
3485 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3486 s, optname));
3487 err = ENOPROTOOPT;
3488 break;
3489 } /* switch (optname) */
3490 break;
3491 #endif /* LWIP_TCP*/
3492  
3493 #if LWIP_IPV6
3494 /* Level: IPPROTO_IPV6 */
3495 case IPPROTO_IPV6:
3496 switch (optname) {
3497 case IPV6_V6ONLY:
3498 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3499 if (*(const int *)optval) {
3500 netconn_set_ipv6only(sock->conn, 1);
3501 } else {
3502 netconn_set_ipv6only(sock->conn, 0);
3503 }
3504 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3505 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3506 break;
3507 #if LWIP_IPV6_MLD
3508 case IPV6_JOIN_GROUP:
3509 case IPV6_LEAVE_GROUP: {
3510 /* If this is a TCP or a RAW socket, ignore these options. */
3511 err_t mld6_err;
3512 struct netif *netif;
3513 ip6_addr_t multi_addr;
3514 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3515 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3516 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3517 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3518 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3519 if (netif == NULL) {
3520 err = EADDRNOTAVAIL;
3521 break;
3522 }
3523  
3524 if (optname == IPV6_JOIN_GROUP) {
3525 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3526 /* cannot track membership (out of memory) */
3527 err = ENOMEM;
3528 mld6_err = ERR_OK;
3529 } else {
3530 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3531 }
3532 } else {
3533 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3534 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3535 }
3536 if (mld6_err != ERR_OK) {
3537 err = EADDRNOTAVAIL;
3538 }
3539 }
3540 break;
3541 #endif /* LWIP_IPV6_MLD */
3542 default:
3543 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3544 s, optname));
3545 err = ENOPROTOOPT;
3546 break;
3547 } /* switch (optname) */
3548 break;
3549 #endif /* LWIP_IPV6 */
3550  
3551 #if LWIP_UDP && LWIP_UDPLITE
3552 /* Level: IPPROTO_UDPLITE */
3553 case IPPROTO_UDPLITE:
3554 /* Special case: all IPPROTO_UDPLITE option take an int */
3555 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3556 /* If this is no UDP lite socket, ignore any options. */
3557 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3558 done_socket(sock);
3559 return ENOPROTOOPT;
3560 }
3561 switch (optname) {
3562 case UDPLITE_SEND_CSCOV:
3563 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3564 /* don't allow illegal values! */
3565 sock->conn->pcb.udp->chksum_len_tx = 8;
3566 } else {
3567 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3568 }
3569 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3570 s, (*(const int *)optval)) );
3571 break;
3572 case UDPLITE_RECV_CSCOV:
3573 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3574 /* don't allow illegal values! */
3575 sock->conn->pcb.udp->chksum_len_rx = 8;
3576 } else {
3577 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3578 }
3579 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3580 s, (*(const int *)optval)) );
3581 break;
3582 default:
3583 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3584 s, optname));
3585 err = ENOPROTOOPT;
3586 break;
3587 } /* switch (optname) */
3588 break;
3589 #endif /* LWIP_UDP */
3590 /* Level: IPPROTO_RAW */
3591 case IPPROTO_RAW:
3592 switch (optname) {
3593 #if LWIP_IPV6 && LWIP_RAW
3594 case IPV6_CHECKSUM:
3595 /* It should not be possible to disable the checksum generation with ICMPv6
3596 * as per RFC 3542 chapter 3.1 */
3597 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3598 done_socket(sock);
3599 return EINVAL;
3600 }
3601  
3602 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3603 if (*(const int *)optval < 0) {
3604 sock->conn->pcb.raw->chksum_reqd = 0;
3605 } else if (*(const int *)optval & 1) {
3606 /* Per RFC3542, odd offsets are not allowed */
3607 done_socket(sock);
3608 return EINVAL;
3609 } else {
3610 sock->conn->pcb.raw->chksum_reqd = 1;
3611 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3612 }
3613 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3614 s, sock->conn->pcb.raw->chksum_reqd));
3615 break;
3616 #endif /* LWIP_IPV6 && LWIP_RAW */
3617 default:
3618 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3619 s, optname));
3620 err = ENOPROTOOPT;
3621 break;
3622 } /* switch (optname) */
3623 break;
3624 default:
3625 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3626 s, level, optname));
3627 err = ENOPROTOOPT;
3628 break;
3629 } /* switch (level) */
3630  
3631 done_socket(sock);
3632 return err;
3633 }
3634  
3635 int
3636 lwip_ioctl(int s, long cmd, void *argp)
3637 {
3638 struct lwip_sock *sock = get_socket(s);
3639 u8_t val;
3640 #if LWIP_SO_RCVBUF
3641 int recv_avail;
3642 #endif /* LWIP_SO_RCVBUF */
3643  
3644 if (!sock) {
3645 return -1;
3646 }
3647  
3648 switch (cmd) {
3649 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3650 case FIONREAD:
3651 if (!argp) {
3652 sock_set_errno(sock, EINVAL);
3653 done_socket(sock);
3654 return -1;
3655 }
3656 #if LWIP_FIONREAD_LINUXMODE
3657 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3658 struct netbuf *nb;
3659 if (sock->lastdata.netbuf) {
3660 nb = sock->lastdata.netbuf;
3661 *((int *)argp) = nb->p->tot_len;
3662 } else {
3663 struct netbuf *rxbuf;
3664 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3665 if (err != ERR_OK) {
3666 *((int *)argp) = 0;
3667 } else {
3668 sock->lastdata.netbuf = rxbuf;
3669 *((int *)argp) = rxbuf->p->tot_len;
3670 }
3671 }
3672 done_socket(sock);
3673 return 0;
3674 }
3675 #endif /* LWIP_FIONREAD_LINUXMODE */
3676  
3677 #if LWIP_SO_RCVBUF
3678 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3679 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3680 if (recv_avail < 0) {
3681 recv_avail = 0;
3682 }
3683  
3684 /* Check if there is data left from the last recv operation. /maq 041215 */
3685 if (sock->lastdata.netbuf) {
3686 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3687 recv_avail += sock->lastdata.pbuf->tot_len;
3688 } else {
3689 recv_avail += sock->lastdata.netbuf->p->tot_len;
3690 }
3691 }
3692 *((int *)argp) = recv_avail;
3693  
3694 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3695 sock_set_errno(sock, 0);
3696 done_socket(sock);
3697 return 0;
3698 #else /* LWIP_SO_RCVBUF */
3699 break;
3700 #endif /* LWIP_SO_RCVBUF */
3701 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3702  
3703 case (long)FIONBIO:
3704 val = 0;
3705 if (argp && *(int *)argp) {
3706 val = 1;
3707 }
3708 netconn_set_nonblocking(sock->conn, val);
3709 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3710 sock_set_errno(sock, 0);
3711 done_socket(sock);
3712 return 0;
3713  
3714 default:
3715 break;
3716 } /* switch (cmd) */
3717 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3718 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3719 done_socket(sock);
3720 return -1;
3721 }
3722  
3723 /** A minimal implementation of fcntl.
3724 * Currently only the commands F_GETFL and F_SETFL are implemented.
3725 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3726 * the flag O_NONBLOCK is implemented for F_SETFL.
3727 */
3728 int
3729 lwip_fcntl(int s, int cmd, int val)
3730 {
3731 struct lwip_sock *sock = get_socket(s);
3732 int ret = -1;
3733 int op_mode = 0;
3734  
3735 if (!sock) {
3736 return -1;
3737 }
3738  
3739 switch (cmd) {
3740 case F_GETFL:
3741 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3742 sock_set_errno(sock, 0);
3743  
3744 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3745 #if LWIP_TCPIP_CORE_LOCKING
3746 LOCK_TCPIP_CORE();
3747 #else
3748 SYS_ARCH_DECL_PROTECT(lev);
3749 /* the proper thing to do here would be to get into the tcpip_thread,
3750 but locking should be OK as well since we only *read* some flags */
3751 SYS_ARCH_PROTECT(lev);
3752 #endif
3753 #if LWIP_TCP
3754 if (sock->conn->pcb.tcp) {
3755 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3756 op_mode |= O_RDONLY;
3757 }
3758 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3759 op_mode |= O_WRONLY;
3760 }
3761 }
3762 #endif
3763 #if LWIP_TCPIP_CORE_LOCKING
3764 UNLOCK_TCPIP_CORE();
3765 #else
3766 SYS_ARCH_UNPROTECT(lev);
3767 #endif
3768 } else {
3769 op_mode |= O_RDWR;
3770 }
3771  
3772 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3773 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3774  
3775 break;
3776 case F_SETFL:
3777 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3778 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3779 if ((val & ~O_NONBLOCK) == 0) {
3780 /* only O_NONBLOCK, all other bits are zero */
3781 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3782 ret = 0;
3783 sock_set_errno(sock, 0);
3784 } else {
3785 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3786 }
3787 break;
3788 default:
3789 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3790 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3791 break;
3792 }
3793 done_socket(sock);
3794 return ret;
3795 }
3796  
3797 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3798 int
3799 fcntl(int s, int cmd, ...)
3800 {
3801 va_list ap;
3802 int val;
3803  
3804 va_start(ap, cmd);
3805 val = va_arg(ap, int);
3806 va_end(ap);
3807 return lwip_fcntl(s, cmd, val);
3808 }
3809 #endif
3810  
3811 const char *
3812 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
3813 {
3814 const char *ret = NULL;
3815 int size_int = (int)size;
3816 if (size_int < 0) {
3817 set_errno(ENOSPC);
3818 return NULL;
3819 }
3820 switch (af) {
3821 #if LWIP_IPV4
3822 case AF_INET:
3823 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
3824 if (ret == NULL) {
3825 set_errno(ENOSPC);
3826 }
3827 break;
3828 #endif
3829 #if LWIP_IPV6
3830 case AF_INET6:
3831 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
3832 if (ret == NULL) {
3833 set_errno(ENOSPC);
3834 }
3835 break;
3836 #endif
3837 default:
3838 set_errno(EAFNOSUPPORT);
3839 break;
3840 }
3841 return ret;
3842 }
3843  
3844 int
3845 lwip_inet_pton(int af, const char *src, void *dst)
3846 {
3847 int err;
3848 switch (af) {
3849 #if LWIP_IPV4
3850 case AF_INET:
3851 err = ip4addr_aton(src, (ip4_addr_t *)dst);
3852 break;
3853 #endif
3854 #if LWIP_IPV6
3855 case AF_INET6: {
3856 /* convert into temporary variable since ip6_addr_t might be larger
3857 than in6_addr when scopes are enabled */
3858 ip6_addr_t addr;
3859 err = ip6addr_aton(src, &addr);
3860 if (err) {
3861 memcpy(dst, &addr.addr, sizeof(addr.addr));
3862 }
3863 break;
3864 }
3865 #endif
3866 default:
3867 err = -1;
3868 set_errno(EAFNOSUPPORT);
3869 break;
3870 }
3871 return err;
3872 }
3873  
3874 #if LWIP_IGMP
3875 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
3876 *
3877 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3878 *
3879 * @return 1 on success, 0 on failure
3880 */
3881 static int
3882 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
3883 {
3884 struct lwip_sock *sock = get_socket(s);
3885 int i;
3886  
3887 if (!sock) {
3888 return 0;
3889 }
3890  
3891 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
3892 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
3893 socket_ipv4_multicast_memberships[i].sock = sock;
3894 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
3895 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
3896 done_socket(sock);
3897 return 1;
3898 }
3899 }
3900 done_socket(sock);
3901 return 0;
3902 }
3903  
3904 /** Unregister a previously registered membership. This prevents dropping the membership
3905 * on socket close.
3906 *
3907 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3908 */
3909 static void
3910 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
3911 {
3912 struct lwip_sock *sock = get_socket(s);
3913 int i;
3914  
3915 if (!sock) {
3916 return;
3917 }
3918  
3919 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
3920 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
3921 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
3922 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
3923 socket_ipv4_multicast_memberships[i].sock = NULL;
3924 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
3925 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
3926 break;
3927 }
3928 }
3929 done_socket(sock);
3930 }
3931  
3932 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
3933 *
3934 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
3935 */
3936 static void
3937 lwip_socket_drop_registered_memberships(int s)
3938 {
3939 struct lwip_sock *sock = get_socket(s);
3940 int i;
3941  
3942 if (!sock) {
3943 return;
3944 }
3945  
3946 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
3947 if (socket_ipv4_multicast_memberships[i].sock == sock) {
3948 ip_addr_t multi_addr, if_addr;
3949 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
3950 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
3951 socket_ipv4_multicast_memberships[i].sock = NULL;
3952 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
3953 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
3954  
3955 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
3956 }
3957 }
3958 done_socket(sock);
3959 }
3960 #endif /* LWIP_IGMP */
3961  
3962 #if LWIP_IPV6_MLD
3963 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
3964 *
3965 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3966 *
3967 * @return 1 on success, 0 on failure
3968 */
3969 static int
3970 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
3971 {
3972 struct lwip_sock *sock = get_socket(s);
3973 int i;
3974  
3975 if (!sock) {
3976 return 0;
3977 }
3978  
3979 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
3980 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
3981 socket_ipv6_multicast_memberships[i].sock = sock;
3982 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
3983 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
3984 done_socket(sock);
3985 return 1;
3986 }
3987 }
3988 done_socket(sock);
3989 return 0;
3990 }
3991  
3992 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
3993 * on socket close.
3994 *
3995 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3996 */
3997 static void
3998 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
3999 {
4000 struct lwip_sock *sock = get_socket(s);
4001 int i;
4002  
4003 if (!sock) {
4004 return;
4005 }
4006  
4007 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4008 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4009 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4010 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4011 socket_ipv6_multicast_memberships[i].sock = NULL;
4012 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4013 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4014 break;
4015 }
4016 }
4017 done_socket(sock);
4018 }
4019  
4020 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4021 *
4022 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4023 */
4024 static void
4025 lwip_socket_drop_registered_mld6_memberships(int s)
4026 {
4027 struct lwip_sock *sock = get_socket(s);
4028 int i;
4029  
4030 if (!sock) {
4031 return;
4032 }
4033  
4034 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4035 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4036 ip_addr_t multi_addr;
4037 u8_t if_idx;
4038  
4039 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4040 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4041  
4042 socket_ipv6_multicast_memberships[i].sock = NULL;
4043 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4044 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4045  
4046 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4047 }
4048 }
4049 done_socket(sock);
4050 }
4051 #endif /* LWIP_IPV6_MLD */
4052  
4053 #endif /* LWIP_SOCKET */