Initial public version

This commit is contained in:
Ivan Grokhotkov
2016-08-17 23:08:22 +08:00
commit bd6ea4393c
628 changed files with 224814 additions and 0 deletions

108
components/lwip/core/def.c Executable file
View File

@@ -0,0 +1,108 @@
/**
* @file
* Common functions used throughout the stack.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt
*
*/
#include "lwip/opt.h"
#include "lwip/def.h"
/**
* These are reference implementations of the byte swapping functions.
* Again with the aim of being simple, correct and fully portable.
* Byte swapping is the second thing you would want to optimize. You will
* need to port it to your architecture and in your cc.h:
*
* #define LWIP_PLATFORM_BYTESWAP 1
* #define LWIP_PLATFORM_HTONS(x) <your_htons>
* #define LWIP_PLATFORM_HTONL(x) <your_htonl>
*
* Note ntohs() and ntohl() are merely references to the htonx counterparts.
*/
#if (LWIP_PLATFORM_BYTESWAP == 0) && (BYTE_ORDER == LITTLE_ENDIAN)
/**
* Convert an u16_t from host- to network byte order.
*
* @param n u16_t in host byte order
* @return n in network byte order
*/
u16_t
lwip_htons(u16_t n)
{
return ((n & 0xff) << 8) | ((n & 0xff00) >> 8);
}
/**
* Convert an u16_t from network- to host byte order.
*
* @param n u16_t in network byte order
* @return n in host byte order
*/
u16_t
lwip_ntohs(u16_t n)
{
return lwip_htons(n);
}
/**
* Convert an u32_t from host- to network byte order.
*
* @param n u32_t in host byte order
* @return n in network byte order
*/
u32_t
lwip_htonl(u32_t n)
{
return ((n & 0xff) << 24) |
((n & 0xff00) << 8) |
((n & 0xff0000UL) >> 8) |
((n & 0xff000000UL) >> 24);
}
/**
* Convert an u32_t from network- to host byte order.
*
* @param n u32_t in network byte order
* @return n in host byte order
*/
u32_t
lwip_ntohl(u32_t n)
{
return lwip_htonl(n);
}
#endif /* (LWIP_PLATFORM_BYTESWAP == 0) && (BYTE_ORDER == LITTLE_ENDIAN) */

1500
components/lwip/core/dns.c Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,612 @@
/**
* @file
* Incluse internet checksum functions.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#include "lwip/inet_chksum.h"
#include "lwip/def.h"
#include "lwip/ip_addr.h"
#include <stddef.h>
#include <string.h>
/* These are some reference implementations of the checksum algorithm, with the
* aim of being simple, correct and fully portable. Checksumming is the
* first thing you would want to optimize for your platform. If you create
* your own version, link it in and in your cc.h put:
*
* #define LWIP_CHKSUM <your_checksum_routine>
*
* Or you can select from the implementations below by defining
* LWIP_CHKSUM_ALGORITHM to 1, 2 or 3.
*/
#ifndef LWIP_CHKSUM
# define LWIP_CHKSUM lwip_standard_chksum
# ifndef LWIP_CHKSUM_ALGORITHM
# define LWIP_CHKSUM_ALGORITHM 2
# endif
u16_t lwip_standard_chksum(const void *dataptr, int len);
#endif
/* If none set: */
#ifndef LWIP_CHKSUM_ALGORITHM
# define LWIP_CHKSUM_ALGORITHM 0
#endif
#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */
/**
* lwip checksum
*
* @param dataptr points to start of data to be summed at any boundary
* @param len length of data to be summed
* @return host order (!) lwip checksum (non-inverted Internet sum)
*
* @note accumulator size limits summable length to 64k
* @note host endianess is irrelevant (p3 RFC1071)
*/
u16_t
lwip_standard_chksum(const void *dataptr, int len)
{
u32_t acc;
u16_t src;
const u8_t *octetptr;
acc = 0;
/* dataptr may be at odd or even addresses */
octetptr = (const u8_t*)dataptr;
while (len > 1) {
/* declare first octet as most significant
thus assume network order, ignoring host order */
src = (*octetptr) << 8;
octetptr++;
/* declare second octet as least significant */
src |= (*octetptr);
octetptr++;
acc += src;
len -= 2;
}
if (len > 0) {
/* accumulate remaining octet */
src = (*octetptr) << 8;
acc += src;
}
/* add deferred carry bits */
acc = (acc >> 16) + (acc & 0x0000ffffUL);
if ((acc & 0xffff0000UL) != 0) {
acc = (acc >> 16) + (acc & 0x0000ffffUL);
}
/* This maybe a little confusing: reorder sum using htons()
instead of ntohs() since it has a little less call overhead.
The caller must invert bits for Internet sum ! */
return htons((u16_t)acc);
}
#endif
#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */
/*
* Curt McDowell
* Broadcom Corp.
* csm@broadcom.com
*
* IP checksum two bytes at a time with support for
* unaligned buffer.
* Works for len up to and including 0x20000.
* by Curt McDowell, Broadcom Corp. 12/08/2005
*
* @param dataptr points to start of data to be summed at any boundary
* @param len length of data to be summed
* @return host order (!) lwip checksum (non-inverted Internet sum)
*/
u16_t
lwip_standard_chksum(const void *dataptr, int len)
{
const u8_t *pb = (const u8_t *)dataptr;
const u16_t *ps;
u16_t t = 0;
u32_t sum = 0;
int odd = ((mem_ptr_t)pb & 1);
/* Get aligned to u16_t */
if (odd && len > 0) {
((u8_t *)&t)[1] = *pb++;
len--;
}
/* Add the bulk of the data */
ps = (const u16_t *)(const void *)pb;
while (len > 1) {
sum += *ps++;
len -= 2;
}
/* Consume left-over byte, if any */
if (len > 0) {
((u8_t *)&t)[0] = *(const u8_t *)ps;
}
/* Add end bytes */
sum += t;
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
sum = FOLD_U32T(sum);
sum = FOLD_U32T(sum);
/* Swap if alignment was odd */
if (odd) {
sum = SWAP_BYTES_IN_WORD(sum);
}
return (u16_t)sum;
}
#endif
#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */
/**
* An optimized checksum routine. Basically, it uses loop-unrolling on
* the checksum loop, treating the head and tail bytes specially, whereas
* the inner loop acts on 8 bytes at a time.
*
* @arg start of buffer to be checksummed. May be an odd byte address.
* @len number of bytes in the buffer to be checksummed.
* @return host order (!) lwip checksum (non-inverted Internet sum)
*
* by Curt McDowell, Broadcom Corp. December 8th, 2005
*/
u16_t
lwip_standard_chksum(const void *dataptr, int len)
{
const u8_t *pb = (const u8_t *)dataptr;
const u16_t *ps;
u16_t t = 0;
const u32_t *pl;
u32_t sum = 0, tmp;
/* starts at odd byte address? */
int odd = ((mem_ptr_t)pb & 1);
if (odd && len > 0) {
((u8_t *)&t)[1] = *pb++;
len--;
}
ps = (const u16_t *)(const void*)pb;
if (((mem_ptr_t)ps & 3) && len > 1) {
sum += *ps++;
len -= 2;
}
pl = (const u32_t *)(const void*)ps;
while (len > 7) {
tmp = sum + *pl++; /* ping */
if (tmp < sum) {
tmp++; /* add back carry */
}
sum = tmp + *pl++; /* pong */
if (sum < tmp) {
sum++; /* add back carry */
}
len -= 8;
}
/* make room in upper bits */
sum = FOLD_U32T(sum);
ps = (const u16_t *)pl;
/* 16-bit aligned word remaining? */
while (len > 1) {
sum += *ps++;
len -= 2;
}
/* dangling tail byte remaining? */
if (len > 0) { /* include odd byte */
((u8_t *)&t)[0] = *(const u8_t *)ps;
}
sum += t; /* add end bytes */
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
sum = FOLD_U32T(sum);
sum = FOLD_U32T(sum);
if (odd) {
sum = SWAP_BYTES_IN_WORD(sum);
}
return (u16_t)sum;
}
#endif
/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */
static u16_t
inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc)
{
struct pbuf *q;
u8_t swapped = 0;
/* iterate through all pbuf in chain */
for (q = p; q != NULL; q = q->next) {
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n",
(void *)q, (void *)q->next));
acc += LWIP_CHKSUM(q->payload, q->len);
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
/* just executing this next line is probably faster that the if statement needed
to check whether we really need to execute it, and does no harm */
acc = FOLD_U32T(acc);
if (q->len % 2 != 0) {
swapped = 1 - swapped;
acc = SWAP_BYTES_IN_WORD(acc);
}
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
}
if (swapped) {
acc = SWAP_BYTES_IN_WORD(acc);
}
acc += (u32_t)htons((u16_t)proto);
acc += (u32_t)htons(proto_len);
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
return (u16_t)~(acc & 0xffffUL);
}
#if LWIP_IPV4
/* inet_chksum_pseudo:
*
* Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
* IP addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
const ip4_addr_t *src, const ip4_addr_t *dest)
{
u32_t acc;
u32_t addr;
addr = ip4_addr_get_u32(src);
acc = (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
addr = ip4_addr_get_u32(dest);
acc += (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_base(p, proto, proto_len, acc);
}
#endif /* LWIP_IPV4 */
#if LWIP_IPV6
/**
* Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain.
* IPv6 addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ipv6 address (used for checksum of pseudo header)
* @param dst destination ipv6 address (used for checksum of pseudo header)
* @param proto ipv6 protocol/next header (used for checksum of pseudo header)
* @param proto_len length of the ipv6 payload (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
const ip6_addr_t *src, const ip6_addr_t *dest)
{
u32_t acc = 0;
u32_t addr;
u8_t addr_part;
for (addr_part = 0; addr_part < 4; addr_part++) {
addr = src->addr[addr_part];
acc += (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
addr = dest->addr[addr_part];
acc += (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
}
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_base(p, proto, proto_len, acc);
}
#endif /* LWIP_IPV6 */
/* ip_chksum_pseudo:
*
* Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
* IP addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
const ip_addr_t *src, const ip_addr_t *dest)
{
#if LWIP_IPV6
if (IP_IS_V6(dest)) {
return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest));
}
#endif /* LWIP_IPV6 */
#if LWIP_IPV4 && LWIP_IPV6
else
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#if LWIP_IPV4
{
return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest));
}
#endif /* LWIP_IPV4 */
}
/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */
static u16_t
inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, u32_t acc)
{
struct pbuf *q;
u8_t swapped = 0;
u16_t chklen;
/* iterate through all pbuf in chain */
for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) {
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n",
(void *)q, (void *)q->next));
chklen = q->len;
if (chklen > chksum_len) {
chklen = chksum_len;
}
acc += LWIP_CHKSUM(q->payload, chklen);
chksum_len -= chklen;
LWIP_ASSERT("delete me", chksum_len < 0x7fff);
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
/* fold the upper bit down */
acc = FOLD_U32T(acc);
if (q->len % 2 != 0) {
swapped = 1 - swapped;
acc = SWAP_BYTES_IN_WORD(acc);
}
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
}
if (swapped) {
acc = SWAP_BYTES_IN_WORD(acc);
}
acc += (u32_t)htons((u16_t)proto);
acc += (u32_t)htons(proto_len);
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
return (u16_t)~(acc & 0xffffUL);
}
#if LWIP_IPV4
/* inet_chksum_pseudo_partial:
*
* Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
* IP addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest)
{
u32_t acc;
u32_t addr;
addr = ip4_addr_get_u32(src);
acc = (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
addr = ip4_addr_get_u32(dest);
acc += (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc);
}
#endif /* LWIP_IPV4 */
#if LWIP_IPV6
/**
* Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain.
* IPv6 addresses are expected to be in network byte order. Will only compute for a
* portion of the payload.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ipv6 address (used for checksum of pseudo header)
* @param dst destination ipv6 address (used for checksum of pseudo header)
* @param proto ipv6 protocol/next header (used for checksum of pseudo header)
* @param proto_len length of the ipv6 payload (used for checksum of pseudo header)
* @param chksum_len number of payload bytes used to compute chksum
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest)
{
u32_t acc = 0;
u32_t addr;
u8_t addr_part;
for (addr_part = 0; addr_part < 4; addr_part++) {
addr = src->addr[addr_part];
acc += (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
addr = dest->addr[addr_part];
acc += (addr & 0xffffUL);
acc += ((addr >> 16) & 0xffffUL);
}
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc);
}
#endif /* LWIP_IPV6 */
/* ip_chksum_pseudo_partial:
*
* Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest)
{
#if LWIP_IPV6
if (IP_IS_V6(dest)) {
return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest));
}
#endif /* LWIP_IPV6 */
#if LWIP_IPV4 && LWIP_IPV6
else
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#if LWIP_IPV4
{
return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest));
}
#endif /* LWIP_IPV4 */
}
/* inet_chksum:
*
* Calculates the Internet checksum over a portion of memory. Used primarily for IP
* and ICMP.
*
* @param dataptr start of the buffer to calculate the checksum (no alignment needed)
* @param len length of the buffer to calculate the checksum
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum(const void *dataptr, u16_t len)
{
return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len);
}
/**
* Calculate a checksum over a chain of pbufs (without pseudo-header, much like
* inet_chksum only pbufs are used).
*
* @param p pbuf chain over that the checksum should be calculated
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum_pbuf(struct pbuf *p)
{
u32_t acc;
struct pbuf *q;
u8_t swapped;
acc = 0;
swapped = 0;
for (q = p; q != NULL; q = q->next) {
acc += LWIP_CHKSUM(q->payload, q->len);
acc = FOLD_U32T(acc);
if (q->len % 2 != 0) {
swapped = 1 - swapped;
acc = SWAP_BYTES_IN_WORD(acc);
}
}
if (swapped) {
acc = SWAP_BYTES_IN_WORD(acc);
}
return (u16_t)~(acc & 0xffffUL);
}
/* These are some implementations for LWIP_CHKSUM_COPY, which copies data
* like MEMCPY but generates a checksum at the same time. Since this is a
* performance-sensitive function, you might want to create your own version
* in assembly targeted at your hardware by defining it in lwipopts.h:
* #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len)
*/
#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */
/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM.
* For architectures with big caches, data might still be in cache when
* generating the checksum after copying.
*/
u16_t
lwip_chksum_copy(void *dst, const void *src, u16_t len)
{
MEMCPY(dst, src, len);
return LWIP_CHKSUM(dst, len);
}
#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */

378
components/lwip/core/init.c Executable file
View File

@@ -0,0 +1,378 @@
/**
* @file
* Modules initialization
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#include "lwip/init.h"
#include "lwip/stats.h"
#include "lwip/sys.h"
#include "lwip/mem.h"
#include "lwip/memp.h"
#include "lwip/pbuf.h"
#include "lwip/netif.h"
#include "lwip/sockets.h"
#include "lwip/ip.h"
#include "lwip/raw.h"
#include "lwip/udp.h"
#include "lwip/priv/tcp_priv.h"
#include "lwip/autoip.h"
#include "lwip/igmp.h"
#include "lwip/dns.h"
#include "lwip/timers.h"
#include "netif/etharp.h"
#include "lwip/ip6.h"
#include "lwip/nd6.h"
#include "lwip/mld6.h"
#include "lwip/api.h"
#include "netif/ppp/ppp_impl.h"
#ifndef PERF
/* Compile-time sanity checks for configuration errors.
* These can be done independently of LWIP_DEBUG, without penalty.
*/
#ifndef BYTE_ORDER
#error "BYTE_ORDER is not defined, you have to define it in your cc.h"
#endif
#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV)
#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_UDPLITE)
#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_DHCP)
#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_MULTICAST_TX_OPTIONS)
#error "If you want to use IGMP/LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_DNS)
#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */
#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0))
#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h"
#endif
#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0))
#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h"
#endif
#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0))
#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h"
#endif
#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0))
#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h"
#endif
#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1))
#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h"
#endif
#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS)
#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h"
#endif
#if (LWIP_IGMP && !LWIP_IPV4)
#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h"
#endif
#if (LWIP_MULTICAST_TX_OPTIONS && !LWIP_IPV4)
#error "LWIP_MULTICAST_TX_OPTIONS needs LWIP_IPV4 enabled in your lwipopts.h"
#endif
#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0))
#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h"
#endif
/* There must be sufficient timeouts, taking into account requirements of the subsystems. */
#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_SUPPORT + (LWIP_IPV6 ? (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD) : 0)))
#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts"
#endif
#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS))
#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!"
#endif
#endif /* !MEMP_MEM_MALLOC */
#if LWIP_WND_SCALE
//#if (LWIP_TCP && (TCP_WND > 0xffffffff))
//#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h"
//#endif
#if (LWIP_TCP && LWIP_WND_SCALE && (TCP_RCV_SCALE > 14))
#error "The maximum valid window scale value is 14!"
#endif
//#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE)))
//#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!"
//#endif
//#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0))
//#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!"
//#endif
#else /* LWIP_WND_SCALE */
#ifndef LWIP_ESP8266
#if (LWIP_TCP && (TCP_WND > 0xffff))
#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)"
#endif
#endif
#endif /* LWIP_WND_SCALE */
#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff))
#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h"
#endif
#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2))
#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work"
#endif
#ifndef LWIP_ESP8266
#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12)))
#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h"
#endif
#endif
#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff)))
#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t"
#endif
#if (LWIP_NETIF_API && (NO_SYS==1))
#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h"
#endif
#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1))
#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h"
#endif
#if (LWIP_PPP_API && (NO_SYS==1))
#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h"
#endif
#if (LWIP_PPP_API && (PPP_SUPPORT==0))
#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h"
#endif
#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP)
#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h"
#endif
#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK)
#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h"
#endif
#if (!LWIP_ARP && LWIP_AUTOIP)
#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h"
#endif
#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API)))
#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h"
#endif
#if (MEM_LIBC_MALLOC && MEM_USE_POOLS)
#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h"
#endif
#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS)
#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h"
#endif
#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT)
#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf"
#endif
#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT)))
#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST"
#endif
#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT
#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on"
#endif
#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT
#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on"
#endif
#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4
#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on"
#endif
#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6
#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on"
#endif
#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT)
#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT"
#endif
#if (LWIP_IGMP || LWIP_IPV6) && !defined(LWIP_RAND)
#error "When using IGMP or IPv6, LWIP_RAND() needs to be defined to a random-function returning an u32_t random value"
#endif
#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING
#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too"
#endif
#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE
#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets"
#endif
#if IP_FRAG && IP_FRAG_USES_STATIC_BUF && LWIP_NETIF_TX_SINGLE_PBUF
#error "LWIP_NETIF_TX_SINGLE_PBUF does not work with IP_FRAG_USES_STATIC_BUF==1 as that creates pbuf queues"
#endif
#if LWIP_NETCONN && LWIP_TCP
#if NETCONN_COPY != TCP_WRITE_FLAG_COPY
#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY"
#endif
#if NETCONN_MORE != TCP_WRITE_FLAG_MORE
#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE"
#endif
#endif /* LWIP_NETCONN && LWIP_TCP */
#if LWIP_SOCKET
/* Check that the SO_* socket options and SOF_* lwIP-internal flags match */
#if SO_REUSEADDR != SOF_REUSEADDR
#error "WARNING: SO_REUSEADDR != SOF_REUSEADDR"
#endif
#if SO_KEEPALIVE != SOF_KEEPALIVE
#error "WARNING: SO_KEEPALIVE != SOF_KEEPALIVE"
#endif
#if SO_BROADCAST != SOF_BROADCAST
#error "WARNING: SO_BROADCAST != SOF_BROADCAST"
#endif
#endif /* LWIP_SOCKET */
/* Compile-time checks for deprecated options.
*/
#ifdef MEMP_NUM_TCPIP_MSG
#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef TCP_REXMIT_DEBUG
#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef RAW_STATS
#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef ETHARP_QUEUE_FIRST
#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef ETHARP_ALWAYS_INSERT
#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h."
#endif
#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS
#define LWIP_DISABLE_TCP_SANITY_CHECKS 0
#endif
#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS
#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0
#endif
/* MEMP sanity checks */
#if !LWIP_DISABLE_MEMP_SANITY_CHECKS
#if LWIP_NETCONN
#if MEMP_MEM_MALLOC
#if !MEMP_NUM_NETCONN && LWIP_SOCKET
#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!"
#endif
#else /* MEMP_MEM_MALLOC */
#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB)
#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error."
#endif
#endif /* MEMP_MEM_MALLOC */
#endif /* LWIP_NETCONN */
#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */
/* TCP sanity checks */
#if !LWIP_DISABLE_TCP_SANITY_CHECKS
#if LWIP_TCP
#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN)
#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SND_BUF < (2 * TCP_MSS)
#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS))
#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SNDLOWAT >= TCP_SND_BUF
#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS))
#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!"
#endif
#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN
#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if !MEMP_MEM_MALLOC && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))
#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#ifndef LWIP_ESP8266
#if !MEMP_MEM_MALLOC && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))))
#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_WND < TCP_MSS
#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#endif
#endif /* LWIP_TCP */
#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */
#endif
/**
* Initialize all modules.
*/
void
lwip_init(void)
{
#ifdef LWIP_ESP8266
// MEMP_NUM_TCP_PCB = 5;
// TCP_WND = (4 * TCP_MSS);
// TCP_MAXRTX = 12;
// TCP_SYNMAXRTX = 6;
#endif
/* Modules initialization */
stats_init();
#if !NO_SYS
sys_init();
#endif /* !NO_SYS */
mem_init();
memp_init();
pbuf_init();
netif_init();
#if LWIP_IPV4
ip_init();
#if LWIP_ARP
etharp_init();
#endif /* LWIP_ARP */
#endif /* LWIP_IPV4 */
#if LWIP_RAW
raw_init();
#endif /* LWIP_RAW */
#if LWIP_UDP
udp_init();
#endif /* LWIP_UDP */
#if LWIP_TCP
tcp_init();
#endif /* LWIP_TCP */
#if LWIP_AUTOIP
autoip_init();
#endif /* LWIP_AUTOIP */
#if LWIP_IGMP
igmp_init();
#endif /* LWIP_IGMP */
#if LWIP_DNS
dns_init();
#endif /* LWIP_DNS */
#if PPP_SUPPORT
ppp_init();
#endif
#if LWIP_TIMERS
sys_timeouts_init();
#endif /* LWIP_TIMERS */
}

101
components/lwip/core/ip.c Executable file
View File

@@ -0,0 +1,101 @@
/**
* @file ip.c
* Common IPv4 and IPv6 code
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4 || LWIP_IPV6
#include "lwip/ip_addr.h"
#include "lwip/ip.h"
/** Global data for both IPv4 and IPv6 */
struct ip_globals ip_data;
#if LWIP_IPV4 && LWIP_IPV6
const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT;
/** Convert IP address string (both versions) to numeric.
* The version is auto-detected from the string.
*
* @param cp IP address string to convert
* @param addr conversion result is stored here
* @return 1 on success, 0 on error
*/
int
ipaddr_aton(const char *cp, ip_addr_t *addr)
{
if (cp != NULL) {
const char* c;
for (c = cp; *c != 0; c++) {
if (*c == ':') {
/* contains a colon: IPv6 address */
if (addr) {
IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6);
}
return ip6addr_aton(cp, ip_2_ip6(addr));
} else if (*c == '.') {
/* contains a dot: IPv4 address */
break;
}
}
/* call ip4addr_aton as fallback or if IPv4 was found */
if (addr) {
IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4);
}
return ip4addr_aton(cp, ip_2_ip4(addr));
}
return 0;
}
/* If both IP versions are enabled, this function can dispatch packets to the correct one. */
err_t
ip_input(struct pbuf *p, struct netif *inp)
{
if (p != NULL) {
if (IP_HDR_GET_VERSION(p->payload) == 6) {
return ip6_input(p, inp);
}
return ip4_input(p, inp);
}
return ERR_VAL;
}
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#endif /* LWIP_IPV4 || LWIP_IPV6 */

View File

@@ -0,0 +1,549 @@
/**
* @file
* AutoIP Automatic LinkLocal IP Configuration
*
*/
/*
*
* Copyright (c) 2007 Dominik Spies <kontakt@dspies.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* Author: Dominik Spies <kontakt@dspies.de>
*
* This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform
* with RFC 3927.
*
*
* Please coordinate changes and requests with Dominik Spies
* <kontakt@dspies.de>
*/
/*******************************************************************************
* USAGE:
*
* define LWIP_AUTOIP 1 in your lwipopts.h
*
* If you don't use tcpip.c (so, don't call, you don't call tcpip_init):
* - First, call autoip_init().
* - call autoip_tmr() all AUTOIP_TMR_INTERVAL msces,
* that should be defined in autoip.h.
* I recommend a value of 100. The value must divide 1000 with a remainder almost 0.
* Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 ....
*
* Without DHCP:
* - Call autoip_start() after netif_add().
*
* With DHCP:
* - define LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h.
* - Configure your DHCP Client.
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */
#include "lwip/mem.h"
/* #include "lwip/udp.h" */
#include "lwip/ip_addr.h"
#include "lwip/netif.h"
#include "lwip/autoip.h"
#include "netif/etharp.h"
#include <stdlib.h>
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
/* 169.254.0.0 */
#define AUTOIP_NET 0xA9FE0000
/* 169.254.1.0 */
#define AUTOIP_RANGE_START (AUTOIP_NET | 0x0100)
/* 169.254.254.255 */
#define AUTOIP_RANGE_END (AUTOIP_NET | 0xFEFF)
/** Pseudo random macro based on netif informations.
* You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */
#ifndef LWIP_AUTOIP_RAND
#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \
((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \
((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \
((u32_t)((netif->hwaddr[4]) & 0xff))) + \
(netif->autoip?netif->autoip->tried_llipaddr:0))
#endif /* LWIP_AUTOIP_RAND */
/**
* Macro that generates the initial IP address to be tried by AUTOIP.
* If you want to override this, define it to something else in lwipopts.h.
*/
#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR
#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \
htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \
((u32_t)((u8_t)(netif->hwaddr[5]))) << 8)))
#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */
/* static functions */
static void autoip_handle_arp_conflict(struct netif *netif);
/* creates a pseudo random LL IP-Address for a network interface */
static void autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr);
/* sends an ARP probe */
static err_t autoip_arp_probe(struct netif *netif);
/* sends an ARP announce */
static err_t autoip_arp_announce(struct netif *netif);
/* configure interface for use with current LL IP-Address */
static err_t autoip_bind(struct netif *netif);
/* start sending probes for llipaddr */
static void autoip_start_probing(struct netif *netif);
/** Set a statically allocated struct autoip to work with.
* Using this prevents autoip_start to allocate it using mem_malloc.
*
* @param netif the netif for which to set the struct autoip
* @param dhcp (uninitialised) dhcp struct allocated by the application
*/
void
autoip_set_struct(struct netif *netif, struct autoip *autoip)
{
LWIP_ASSERT("netif != NULL", netif != NULL);
LWIP_ASSERT("autoip != NULL", autoip != NULL);
LWIP_ASSERT("netif already has a struct autoip set", netif->autoip == NULL);
/* clear data structure */
memset(autoip, 0, sizeof(struct autoip));
/* autoip->state = AUTOIP_STATE_OFF; */
netif->autoip = autoip;
}
/** Restart AutoIP client and check the next address (conflict detected)
*
* @param netif The netif under AutoIP control
*/
static void
autoip_restart(struct netif *netif)
{
netif->autoip->tried_llipaddr++;
autoip_start(netif);
}
/**
* Handle a IP address conflict after an ARP conflict detection
*/
static void
autoip_handle_arp_conflict(struct netif *netif)
{
/* Somehow detect if we are defending or retreating */
unsigned char defend = 1; /* tbd */
if (defend) {
if (netif->autoip->lastconflict > 0) {
/* retreat, there was a conflicting ARP in the last
* DEFEND_INTERVAL seconds
*/
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n"));
/* TODO: close all TCP sessions */
autoip_restart(netif);
} else {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n"));
autoip_arp_announce(netif);
netif->autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND;
}
} else {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_handle_arp_conflict(): we do not defend, retreating\n"));
/* TODO: close all TCP sessions */
autoip_restart(netif);
}
}
/**
* Create an IP-Address out of range 169.254.1.0 to 169.254.254.255
*
* @param netif network interface on which create the IP-Address
* @param ipaddr ip address to initialize
*/
static void
autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr)
{
/* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255
* compliant to RFC 3927 Section 2.1
* We have 254 * 256 possibilities */
u32_t addr = ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif));
addr += netif->autoip->tried_llipaddr;
addr = AUTOIP_NET | (addr & 0xffff);
/* Now, 169.254.0.0 <= addr <= 169.254.255.255 */
if (addr < AUTOIP_RANGE_START) {
addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1;
}
if (addr > AUTOIP_RANGE_END) {
addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1;
}
LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) &&
(addr <= AUTOIP_RANGE_END));
ip4_addr_set_u32(ipaddr, htonl(addr));
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
(u16_t)(netif->autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr),
ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr)));
}
/**
* Sends an ARP probe from a network interface
*
* @param netif network interface used to send the probe
*/
static err_t
autoip_arp_probe(struct netif *netif)
{
return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, &ethbroadcast,
(struct eth_addr *)netif->hwaddr, IP4_ADDR_ANY, &ethzero,
&netif->autoip->llipaddr, ARP_REQUEST);
}
/**
* Sends an ARP announce from a network interface
*
* @param netif network interface used to send the announce
*/
static err_t
autoip_arp_announce(struct netif *netif)
{
return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, &ethbroadcast,
(struct eth_addr *)netif->hwaddr, &netif->autoip->llipaddr, &ethzero,
&netif->autoip->llipaddr, ARP_REQUEST);
}
/**
* Configure interface for use with current LL IP-Address
*
* @param netif network interface to configure with current LL IP-Address
*/
static err_t
autoip_bind(struct netif *netif)
{
struct autoip *autoip = netif->autoip;
ip4_addr_t sn_mask, gw_addr;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
(void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num,
ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
IP4_ADDR(&sn_mask, 255, 255, 0, 0);
IP4_ADDR(&gw_addr, 0, 0, 0, 0);
netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr);
/* interface is used by routing now that an address is set */
return ERR_OK;
}
/**
* Start AutoIP client
*
* @param netif network interface on which start the AutoIP client
*/
err_t
autoip_start(struct netif *netif)
{
struct autoip *autoip = netif->autoip;
err_t result = ERR_OK;
LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;);
/* Set IP-Address, Netmask and Gateway to 0 to make sure that
* ARP Packets are formed correctly
*/
netif_set_addr(netif, IP4_ADDR_ANY, IP4_ADDR_ANY, IP4_ADDR_ANY);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_start(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0],
netif->name[1], (u16_t)netif->num));
if (autoip == NULL) {
/* no AutoIP client attached yet? */
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_start(): starting new AUTOIP client\n"));
autoip = (struct autoip *)mem_malloc(sizeof(struct autoip));
if (autoip == NULL) {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_start(): could not allocate autoip\n"));
return ERR_MEM;
}
memset(autoip, 0, sizeof(struct autoip));
/* store this AutoIP client in the netif */
netif->autoip = autoip;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip"));
} else {
autoip->state = AUTOIP_STATE_OFF;
autoip->ttw = 0;
autoip->sent_num = 0;
ip4_addr_set_zero(&autoip->llipaddr);
autoip->lastconflict = 0;
}
autoip_create_addr(netif, &(autoip->llipaddr));
autoip_start_probing(netif);
return result;
}
static void
autoip_start_probing(struct netif *netif)
{
struct autoip *autoip = netif->autoip;
autoip->state = AUTOIP_STATE_PROBING;
autoip->sent_num = 0;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(&netif->autoip->llipaddr), ip4_addr2_16(&netif->autoip->llipaddr),
ip4_addr3_16(&netif->autoip->llipaddr), ip4_addr4_16(&netif->autoip->llipaddr)));
/* time to wait to first probe, this is randomly
* chosen out of 0 to PROBE_WAIT seconds.
* compliant to RFC 3927 Section 2.2.1
*/
autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND));
/*
* if we tried more then MAX_CONFLICTS we must limit our rate for
* acquiring and probing address
* compliant to RFC 3927 Section 2.2.1
*/
if (autoip->tried_llipaddr > MAX_CONFLICTS) {
autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND;
}
}
/**
* Handle a possible change in the network configuration.
*
* If there is an AutoIP address configured, take the interface down
* and begin probing with the same address.
*/
void
autoip_network_changed(struct netif *netif)
{
if (netif->autoip && netif->autoip->state != AUTOIP_STATE_OFF) {
autoip_start_probing(netif);
}
}
/**
* Stop AutoIP client
*
* @param netif network interface on which stop the AutoIP client
*/
err_t
autoip_stop(struct netif *netif)
{
if (netif->autoip) {
netif->autoip->state = AUTOIP_STATE_OFF;
if (ip4_addr_islinklocal(netif_ip4_addr(netif))) {
netif_set_addr(netif, IP4_ADDR_ANY, IP4_ADDR_ANY, IP4_ADDR_ANY);
}
}
return ERR_OK;
}
/**
* Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds
*/
void
autoip_tmr(void)
{
struct netif *netif = netif_list;
/* loop through netif's */
while (netif != NULL) {
/* only act on AutoIP configured interfaces */
if (netif->autoip != NULL) {
if (netif->autoip->lastconflict > 0) {
netif->autoip->lastconflict--;
}
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n",
(u16_t)(netif->autoip->state), netif->autoip->ttw));
switch(netif->autoip->state) {
case AUTOIP_STATE_PROBING:
if (netif->autoip->ttw > 0) {
netif->autoip->ttw--;
} else {
if (netif->autoip->sent_num >= PROBE_NUM) {
netif->autoip->state = AUTOIP_STATE_ANNOUNCING;
netif->autoip->sent_num = 0;
netif->autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(&netif->autoip->llipaddr), ip4_addr2_16(&netif->autoip->llipaddr),
ip4_addr3_16(&netif->autoip->llipaddr), ip4_addr4_16(&netif->autoip->llipaddr)));
} else {
autoip_arp_probe(netif);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_tmr() PROBING Sent Probe\n"));
netif->autoip->sent_num++;
/* calculate time to wait to next probe */
netif->autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) %
((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) +
PROBE_MIN * AUTOIP_TICKS_PER_SECOND);
}
}
break;
case AUTOIP_STATE_ANNOUNCING:
if (netif->autoip->ttw > 0) {
netif->autoip->ttw--;
} else {
if (netif->autoip->sent_num == 0) {
/* We are here the first time, so we waited ANNOUNCE_WAIT seconds
* Now we can bind to an IP address and use it.
*
* autoip_bind calls netif_set_addr. This triggers a gratuitous ARP
* which counts as an announcement.
*/
autoip_bind(netif);
} else {
autoip_arp_announce(netif);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_tmr() ANNOUNCING Sent Announce\n"));
}
netif->autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND;
netif->autoip->sent_num++;
if (netif->autoip->sent_num >= ANNOUNCE_NUM) {
netif->autoip->state = AUTOIP_STATE_BOUND;
netif->autoip->sent_num = 0;
netif->autoip->ttw = 0;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(&netif->autoip->llipaddr), ip4_addr2_16(&netif->autoip->llipaddr),
ip4_addr3_16(&netif->autoip->llipaddr), ip4_addr4_16(&netif->autoip->llipaddr)));
}
}
break;
default:
/* nothing to do in other states */
break;
}
}
/* proceed to next network interface */
netif = netif->next;
}
}
/**
* Handles every incoming ARP Packet, called by etharp_arp_input.
*
* @param netif network interface to use for autoip processing
* @param hdr Incoming ARP packet
*/
void
autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr)
{
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n"));
if ((netif->autoip != NULL) && (netif->autoip->state != AUTOIP_STATE_OFF)) {
/* when ip.src == llipaddr && hw.src != netif->hwaddr
*
* when probing ip.dst == llipaddr && hw.src != netif->hwaddr
* we have a conflict and must solve it
*/
ip4_addr_t sipaddr, dipaddr;
struct eth_addr netifaddr;
ETHADDR16_COPY(netifaddr.addr, netif->hwaddr);
/* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without
* structure packing (not using structure copy which breaks strict-aliasing rules).
*/
IPADDR2_COPY(&sipaddr, &hdr->sipaddr);
IPADDR2_COPY(&dipaddr, &hdr->dipaddr);
if ((netif->autoip->state == AUTOIP_STATE_PROBING) ||
((netif->autoip->state == AUTOIP_STATE_ANNOUNCING) &&
(netif->autoip->sent_num == 0))) {
/* RFC 3927 Section 2.2.1:
* from beginning to after ANNOUNCE_WAIT
* seconds we have a conflict if
* ip.src == llipaddr OR
* ip.dst == llipaddr && hw.src != own hwaddr
*/
if ((ip4_addr_cmp(&sipaddr, &netif->autoip->llipaddr)) ||
(ip4_addr_cmp(&dipaddr, &netif->autoip->llipaddr) &&
!eth_addr_cmp(&netifaddr, &hdr->shwaddr))) {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
("autoip_arp_reply(): Probe Conflict detected\n"));
autoip_restart(netif);
}
} else {
/* RFC 3927 Section 2.5:
* in any state we have a conflict if
* ip.src == llipaddr && hw.src != own hwaddr
*/
if (ip4_addr_cmp(&sipaddr, &netif->autoip->llipaddr) &&
!eth_addr_cmp(&netifaddr, &hdr->shwaddr)) {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
("autoip_arp_reply(): Conflicting ARP-Packet detected\n"));
autoip_handle_arp_conflict(netif);
}
}
}
}
/** check if AutoIP supplied netif->ip_addr
*
* @param netif the netif to check
* @return 1 if AutoIP supplied netif->ip_addr (state BOUND),
* 0 otherwise
*/
u8_t
autoip_supplied_address(struct netif *netif)
{
if ((netif != NULL) && (netif->autoip != NULL)) {
if (netif->autoip->state == AUTOIP_STATE_BOUND) {
return 1;
}
}
return 0;
}
#endif /* LWIP_IPV4 && LWIP_AUTOIP */

1941
components/lwip/core/ipv4/dhcp.c Executable file

File diff suppressed because it is too large Load Diff

391
components/lwip/core/ipv4/icmp.c Executable file
View File

@@ -0,0 +1,391 @@
/**
* @file
* ICMP - Internet Control Message Protocol
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
/* Some ICMP messages should be passed to the transport protocols. This
is not implemented. */
#include "lwip/opt.h"
#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */
#include "lwip/icmp.h"
#include "lwip/inet_chksum.h"
#include "lwip/ip.h"
#include "lwip/def.h"
#include "lwip/stats.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be
* used to modify and send a response packet (and to 1 if this is not the case,
* e.g. when link header is stripped of when receiving) */
#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
/* The amount of data from the original packet to return in a dest-unreachable */
#define ICMP_DEST_UNREACH_DATASIZE 8
static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code);
/**
* Processes ICMP input packets, called from ip_input().
*
* Currently only processes icmp echo requests and sends
* out the echo response.
*
* @param p the icmp echo request packet, p->payload pointing to the icmp header
* @param inp the netif on which this packet was received
*/
void
icmp_input(struct pbuf *p, struct netif *inp)
{
u8_t type;
#ifdef LWIP_DEBUG
u8_t code;
#endif /* LWIP_DEBUG */
struct icmp_echo_hdr *iecho;
const struct ip_hdr *iphdr_in;
s16_t hlen;
const ip4_addr_t* src;
ICMP_STATS_INC(icmp.recv);
MIB2_STATS_INC(mib2.icmpinmsgs);
iphdr_in = ip4_current_header();
hlen = IPH_HL(iphdr_in) * 4;
if (p->len < sizeof(u16_t)*2) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len));
goto lenerr;
}
type = *((u8_t *)p->payload);
#ifdef LWIP_DEBUG
code = *(((u8_t *)p->payload)+1);
#endif /* LWIP_DEBUG */
switch (type) {
case ICMP_ER:
/* This is OK, echo reply might have been parsed by a raw PCB
(as obviously, an echo request has been sent, too). */
MIB2_STATS_INC(mib2.icmpinechoreps);
break;
case ICMP_ECHO:
MIB2_STATS_INC(mib2.icmpinechos);
src = ip4_current_dest_addr();
/* multicast destination address? */
if (ip4_addr_ismulticast(ip4_current_dest_addr())) {
#if LWIP_MULTICAST_PING
/* For multicast, use address of receiving interface as source address */
src = netif_ip4_addr(inp);
#else /* LWIP_MULTICAST_PING */
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n"));
goto icmperr;
#endif /* LWIP_MULTICAST_PING */
}
/* broadcast destination address? */
if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) {
#if LWIP_BROADCAST_PING
/* For broadcast, use address of receiving interface as source address */
src = netif_ip4_addr(inp);
#else /* LWIP_BROADCAST_PING */
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n"));
goto icmperr;
#endif /* LWIP_BROADCAST_PING */
}
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n"));
if (p->tot_len < sizeof(struct icmp_echo_hdr)) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n"));
goto lenerr;
}
#if CHECKSUM_CHECK_ICMP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) {
if (inet_chksum_pbuf(p) != 0) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n"));
pbuf_free(p);
ICMP_STATS_INC(icmp.chkerr);
MIB2_STATS_INC(mib2.icmpinerrors);
return;
}
}
#endif
#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
if (pbuf_header(p, (PBUF_IP_HLEN + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) {
/* p is not big enough to contain link headers
* allocate a new one and copy p into it
*/
struct pbuf *r;
/* allocate new packet buffer with space for link headers */
r = pbuf_alloc(PBUF_LINK, p->tot_len + hlen, PBUF_RAM);
if (r == NULL) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n"));
goto icmperr;
}
LWIP_ASSERT("check that first pbuf can hold struct the ICMP header",
(r->len >= hlen + sizeof(struct icmp_echo_hdr)));
/* copy the ip header */
MEMCPY(r->payload, iphdr_in, hlen);
/* switch r->payload back to icmp header */
if (pbuf_header(r, -hlen)) {
LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0);
pbuf_free(r);
goto icmperr;
}
/* copy the rest of the packet without ip header */
if (pbuf_copy(r, p) != ERR_OK) {
LWIP_ASSERT("icmp_input: copying to new pbuf failed\n", 0);
pbuf_free(r);
goto icmperr;
}
/* free the original p */
pbuf_free(p);
/* we now have an identical copy of p that has room for link headers */
p = r;
} else {
/* restore p->payload to point to icmp header */
if (pbuf_header(p, -(s16_t)(PBUF_IP_HLEN + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) {
LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0);
goto icmperr;
}
}
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
/* At this point, all checks are OK. */
/* We generate an answer by switching the dest and src ip addresses,
* setting the icmp type to ECHO_RESPONSE and updating the checksum. */
iecho = (struct icmp_echo_hdr *)p->payload;
if (pbuf_header(p, hlen)) {
LWIP_ASSERT("Can't move over header in packet", 0);
} else {
err_t ret;
struct ip_hdr *iphdr = (struct ip_hdr*)p->payload;
ip4_addr_copy(iphdr->src, *src);
ip4_addr_copy(iphdr->dest, *ip4_current_src_addr());
ICMPH_TYPE_SET(iecho, ICMP_ER);
#if CHECKSUM_GEN_ICMP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) {
/* adjust the checksum */
if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) {
iecho->chksum += PP_HTONS(ICMP_ECHO << 8) + 1;
} else {
iecho->chksum += PP_HTONS(ICMP_ECHO << 8);
}
}
#if LWIP_CHECKSUM_CTRL_PER_NETIF
else {
iecho->chksum = 0;
}
#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */
#else /* CHECKSUM_GEN_ICMP */
iecho->chksum = 0;
#endif /* CHECKSUM_GEN_ICMP */
/* Set the correct TTL and recalculate the header checksum. */
IPH_TTL_SET(iphdr, ICMP_TTL);
IPH_CHKSUM_SET(iphdr, 0);
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) {
IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));
}
#endif /* CHECKSUM_GEN_IP */
ICMP_STATS_INC(icmp.xmit);
/* increase number of messages attempted to send */
MIB2_STATS_INC(mib2.icmpoutmsgs);
/* increase number of echo replies attempted to send */
MIB2_STATS_INC(mib2.icmpoutechoreps);
/* send an ICMP packet */
ret = ip4_output_if(p, src, IP_HDRINCL,
ICMP_TTL, 0, IP_PROTO_ICMP, inp);
if (ret != ERR_OK) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret)));
}
}
break;
default:
if (type == ICMP_DUR) {
MIB2_STATS_INC(mib2.icmpindestunreachs);
} else if (type == ICMP_TE) {
MIB2_STATS_INC(mib2.icmpindestunreachs);
} else if (type == ICMP_PP) {
MIB2_STATS_INC(mib2.icmpinparmprobs);
} else if (type == ICMP_SQ) {
MIB2_STATS_INC(mib2.icmpinsrcquenchs);
} else if (type == ICMP_RD) {
MIB2_STATS_INC(mib2.icmpinredirects);
} else if (type == ICMP_TS) {
MIB2_STATS_INC(mib2.icmpintimestamps);
} else if (type == ICMP_TSR) {
MIB2_STATS_INC(mib2.icmpintimestampreps);
} else if (type == ICMP_AM) {
MIB2_STATS_INC(mib2.icmpinaddrmasks);
} else if (type == ICMP_AMR) {
MIB2_STATS_INC(mib2.icmpinaddrmaskreps);
}
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n",
(s16_t)type, (s16_t)code));
ICMP_STATS_INC(icmp.proterr);
ICMP_STATS_INC(icmp.drop);
}
pbuf_free(p);
return;
lenerr:
pbuf_free(p);
ICMP_STATS_INC(icmp.lenerr);
MIB2_STATS_INC(mib2.icmpinerrors);
return;
#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING
icmperr:
pbuf_free(p);
ICMP_STATS_INC(icmp.err);
MIB2_STATS_INC(mib2.icmpinerrors);
return;
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */
}
/**
* Send an icmp 'destination unreachable' packet, called from ip_input() if
* the transport layer protocol is unknown and from udp_input() if the local
* port is not bound.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IP header
* @param t type of the 'unreachable' packet
*/
void
icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t)
{
MIB2_STATS_INC(mib2.icmpoutdestunreachs);
icmp_send_response(p, ICMP_DUR, t);
}
#if IP_FORWARD || IP_REASSEMBLY
/**
* Send a 'time exceeded' packet, called from ip_forward() if TTL is 0.
*
* @param p the input packet for which the 'time exceeded' should be sent,
* p->payload pointing to the IP header
* @param t type of the 'time exceeded' packet
*/
void
icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t)
{
MIB2_STATS_INC(mib2.icmpouttimeexcds);
icmp_send_response(p, ICMP_TE, t);
}
#endif /* IP_FORWARD || IP_REASSEMBLY */
/**
* Send an icmp packet in response to an incoming packet.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IP header
* @param type Type of the ICMP header
* @param code Code of the ICMP header
*/
static void
icmp_send_response(struct pbuf *p, u8_t type, u8_t code)
{
struct pbuf *q;
struct ip_hdr *iphdr;
/* we can use the echo header here */
struct icmp_echo_hdr *icmphdr;
ip4_addr_t iphdr_src;
struct netif *netif;
/* increase number of messages attempted to send */
MIB2_STATS_INC(mib2.icmpoutmsgs);
/* ICMP header + IP header + 8 bytes of data */
q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE,
PBUF_RAM);
if (q == NULL) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n"));
MIB2_STATS_INC(mib2.icmpouterrors);
return;
}
LWIP_ASSERT("check that first pbuf can hold icmp message",
(q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE)));
iphdr = (struct ip_hdr *)p->payload;
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from "));
ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src);
LWIP_DEBUGF(ICMP_DEBUG, (" to "));
ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest);
LWIP_DEBUGF(ICMP_DEBUG, ("\n"));
icmphdr = (struct icmp_echo_hdr *)q->payload;
icmphdr->type = type;
icmphdr->code = code;
icmphdr->id = 0;
icmphdr->seqno = 0;
/* copy fields from original packet */
SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload,
IP_HLEN + ICMP_DEST_UNREACH_DATASIZE);
ip4_addr_copy(iphdr_src, iphdr->src);
#ifdef LWIP_HOOK_IP4_ROUTE_SRC
{
ip4_addr_t iphdr_dst;
ip4_addr_copy(iphdr_dst, iphdr->dest);
netif = ip4_route_src(&iphdr_src, &iphdr_dst);
}
#else
netif = ip4_route(&iphdr_src);
#endif
if (netif != NULL) {
/* calculate checksum */
icmphdr->chksum = 0;
#if CHECKSUM_GEN_ICMP
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) {
icmphdr->chksum = inet_chksum(icmphdr, q->len);
}
#endif
ICMP_STATS_INC(icmp.xmit);
ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif);
}
pbuf_free(q);
}
#endif /* LWIP_IPV4 && LWIP_ICMP */

832
components/lwip/core/ipv4/igmp.c Executable file
View File

@@ -0,0 +1,832 @@
/**
* @file
* IGMP - Internet Group Management Protocol
*
*/
/*
* Copyright (c) 2002 CITEL Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file is a contribution to the lwIP TCP/IP stack.
* The Swedish Institute of Computer Science and Adam Dunkels
* are specifically granted permission to redistribute this
* source code.
*/
/*-------------------------------------------------------------
Note 1)
Although the rfc requires V1 AND V2 capability
we will only support v2 since now V1 is very old (August 1989)
V1 can be added if required
a debug print and statistic have been implemented to
show this up.
-------------------------------------------------------------
-------------------------------------------------------------
Note 2)
A query for a specific group address (as opposed to ALLHOSTS)
has now been implemented as I am unsure if it is required
a debug print and statistic have been implemented to
show this up.
-------------------------------------------------------------
-------------------------------------------------------------
Note 3)
The router alert rfc 2113 is implemented in outgoing packets
but not checked rigorously incoming
-------------------------------------------------------------
Steve Reynolds
------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
* RFC 988 - Host extensions for IP multicasting - V0
* RFC 1054 - Host extensions for IP multicasting -
* RFC 1112 - Host extensions for IP multicasting - V1
* RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard)
* RFC 3376 - Internet Group Management Protocol, Version 3 - V3
* RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+
* RFC 2113 - IP Router Alert Option -
*----------------------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
* Includes
*----------------------------------------------------------------------------*/
#include "lwip/opt.h"
#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */
#include "lwip/igmp.h"
#include "lwip/debug.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/ip.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/stats.h"
#include "string.h"
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
/*
* IGMP constants
*/
#define IGMP_TTL 1
#define IGMP_MINLEN 8
#define ROUTER_ALERT 0x9404U
#define ROUTER_ALERTLEN 4
/*
* IGMP message types, including version number.
*/
#define IGMP_MEMB_QUERY 0x11 /* Membership query */
#define IGMP_V1_MEMB_REPORT 0x12 /* Ver. 1 membership report */
#define IGMP_V2_MEMB_REPORT 0x16 /* Ver. 2 membership report */
#define IGMP_LEAVE_GROUP 0x17 /* Leave-group message */
/* Group membership states */
#define IGMP_GROUP_NON_MEMBER 0
#define IGMP_GROUP_DELAYING_MEMBER 1
#define IGMP_GROUP_IDLE_MEMBER 2
/**
* IGMP packet format.
*/
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
struct igmp_msg {
PACK_STRUCT_FLD_8(u8_t igmp_msgtype);
PACK_STRUCT_FLD_8(u8_t igmp_maxresp);
PACK_STRUCT_FIELD(u16_t igmp_checksum);
PACK_STRUCT_FLD_S(ip4_addr_p_t igmp_group_address);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr);
static err_t igmp_remove_group(struct igmp_group *group);
static void igmp_timeout( struct igmp_group *group);
static void igmp_start_timer(struct igmp_group *group, u8_t max_time);
static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp);
static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif);
static void igmp_send(struct igmp_group *group, u8_t type);
static struct igmp_group* igmp_group_list;
static ip4_addr_t allsystems;
static ip4_addr_t allrouters;
/**
* Initialize the IGMP module
*/
void
igmp_init(void)
{
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n"));
IP4_ADDR(&allsystems, 224, 0, 0, 1);
IP4_ADDR(&allrouters, 224, 0, 0, 2);
}
/**
* Start IGMP processing on interface
*
* @param netif network interface on which start IGMP processing
*/
err_t
igmp_start(struct netif *netif)
{
struct igmp_group* group;
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void*)netif));
group = igmp_lookup_group(netif, &allsystems);
if (group != NULL) {
group->group_state = IGMP_GROUP_IDLE_MEMBER;
group->use++;
/* Allow the igmp messages at the MAC level */
if (netif->igmp_mac_filter != NULL) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD "));
ip4_addr_debug_print_val(IGMP_DEBUG, allsystems);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif));
netif->igmp_mac_filter(netif, &allsystems, IGMP_ADD_MAC_FILTER);
}
return ERR_OK;
}
return ERR_MEM;
}
/**
* Stop IGMP processing on interface
*
* @param netif network interface on which stop IGMP processing
*/
err_t
igmp_stop(struct netif *netif)
{
struct igmp_group *group = igmp_group_list;
struct igmp_group *prev = NULL;
struct igmp_group *next;
/* look for groups joined on this interface further down the list */
while (group != NULL) {
next = group->next;
/* is it a group joined on this interface? */
if (group->netif == netif) {
/* is it the first group of the list? */
if (group == igmp_group_list) {
igmp_group_list = next;
}
/* is there a "previous" group defined? */
if (prev != NULL) {
prev->next = next;
}
/* disable the group at the MAC level */
if (netif->igmp_mac_filter != NULL) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL "));
ip4_addr_debug_print(IGMP_DEBUG, &group->group_address);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif));
netif->igmp_mac_filter(netif, &(group->group_address), IGMP_DEL_MAC_FILTER);
}
/* free group */
memp_free(MEMP_IGMP_GROUP, group);
} else {
/* change the "previous" */
prev = group;
}
/* move to "next" */
group = next;
}
return ERR_OK;
}
/**
* Report IGMP memberships for this interface
*
* @param netif network interface on which report IGMP memberships
*/
void
igmp_report_groups(struct netif *netif)
{
struct igmp_group *group = igmp_group_list;
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void*)netif));
while (group != NULL) {
if ((group->netif == netif) && (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) {
igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR);
}
group = group->next;
}
}
/**
* Search for a group in the global igmp_group_list
*
* @param ifp the network interface for which to look
* @param addr the group ip address to search for
* @return a struct igmp_group* if the group has been found,
* NULL if the group wasn't found.
*/
struct igmp_group *
igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr)
{
struct igmp_group *group = igmp_group_list;
while (group != NULL) {
if ((group->netif == ifp) && (ip4_addr_cmp(&(group->group_address), addr))) {
return group;
}
group = group->next;
}
/* to be clearer, we return NULL here instead of
* 'group' (which is also NULL at this point).
*/
return NULL;
}
/**
* Search for a specific igmp group and create a new one if not found-
*
* @param ifp the network interface for which to look
* @param addr the group ip address to search
* @return a struct igmp_group*,
* NULL on memory error.
*/
struct igmp_group *
igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr)
{
struct igmp_group *group;
/* Search if the group already exists */
group = igmp_lookfor_group(ifp, addr);
if (group != NULL) {
/* Group already exists. */
return group;
}
/* Group doesn't exist yet, create a new one */
group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP);
if (group != NULL) {
group->netif = ifp;
ip4_addr_set(&(group->group_address), addr);
group->timer = 0; /* Not running */
group->group_state = IGMP_GROUP_NON_MEMBER;
group->last_reporter_flag = 0;
group->use = 0;
group->next = igmp_group_list;
igmp_group_list = group;
}
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group?"":"impossible to ")));
ip4_addr_debug_print(IGMP_DEBUG, addr);
LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)ifp));
return group;
}
/**
* Remove a group in the global igmp_group_list
*
* @param group the group to remove from the global igmp_group_list
* @return ERR_OK if group was removed from the list, an err_t otherwise
*/
static err_t
igmp_remove_group(struct igmp_group *group)
{
err_t err = ERR_OK;
/* Is it the first group? */
if (igmp_group_list == group) {
igmp_group_list = group->next;
} else {
/* look for group further down the list */
struct igmp_group *tmpGroup;
for (tmpGroup = igmp_group_list; tmpGroup != NULL; tmpGroup = tmpGroup->next) {
if (tmpGroup->next == group) {
tmpGroup->next = group->next;
break;
}
}
/* Group not found in the global igmp_group_list */
if (tmpGroup == NULL) {
err = ERR_ARG;
}
}
/* free group */
memp_free(MEMP_IGMP_GROUP, group);
return err;
}
/**
* Called from ip_input() if a new IGMP packet is received.
*
* @param p received igmp packet, p->payload pointing to the igmp header
* @param inp network interface on which the packet was received
* @param dest destination ip address of the igmp packet
*/
void
igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest)
{
struct igmp_msg* igmp;
struct igmp_group* group;
struct igmp_group* groupref;
IGMP_STATS_INC(igmp.recv);
/* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */
if (p->len < IGMP_MINLEN) {
pbuf_free(p);
IGMP_STATS_INC(igmp.lenerr);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n"));
return;
}
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from "));
ip4_addr_debug_print(IGMP_DEBUG, &(ip4_current_header()->src));
LWIP_DEBUGF(IGMP_DEBUG, (" to address "));
ip4_addr_debug_print(IGMP_DEBUG, &(ip4_current_header()->dest));
LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)inp));
/* Now calculate and check the checksum */
igmp = (struct igmp_msg *)p->payload;
if (inet_chksum(igmp, p->len)) {
pbuf_free(p);
IGMP_STATS_INC(igmp.chkerr);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n"));
return;
}
/* Packet is ok so find an existing group */
group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */
/* If group can be found or create... */
if (!group) {
pbuf_free(p);
IGMP_STATS_INC(igmp.drop);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n"));
return;
}
/* NOW ACT ON THE INCOMING MESSAGE TYPE... */
switch (igmp->igmp_msgtype) {
case IGMP_MEMB_QUERY:
/* IGMP_MEMB_QUERY to the "all systems" address ? */
if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) {
/* THIS IS THE GENERAL QUERY */
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
if (igmp->igmp_maxresp == 0) {
IGMP_STATS_INC(igmp.rx_v1);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n"));
igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR;
} else {
IGMP_STATS_INC(igmp.rx_general);
}
groupref = igmp_group_list;
while (groupref) {
/* Do not send messages on the all systems group address! */
if ((groupref->netif == inp) && (!(ip4_addr_cmp(&(groupref->group_address), &allsystems)))) {
igmp_delaying_member(groupref, igmp->igmp_maxresp);
}
groupref = groupref->next;
}
} else {
/* IGMP_MEMB_QUERY to a specific group ? */
if (!ip4_addr_isany(&igmp->igmp_group_address)) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group "));
ip4_addr_debug_print(IGMP_DEBUG, &igmp->igmp_group_address);
if (ip4_addr_cmp(dest, &allsystems)) {
ip4_addr_t groupaddr;
LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
/* we first need to re-look for the group since we used dest last time */
ip4_addr_copy(groupaddr, igmp->igmp_group_address);
group = igmp_lookfor_group(inp, &groupaddr);
} else {
LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
}
if (group != NULL) {
IGMP_STATS_INC(igmp.rx_group);
igmp_delaying_member(group, igmp->igmp_maxresp);
} else {
IGMP_STATS_INC(igmp.drop);
}
} else {
IGMP_STATS_INC(igmp.proterr);
}
}
break;
case IGMP_V2_MEMB_REPORT:
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n"));
IGMP_STATS_INC(igmp.rx_report);
if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) {
/* This is on a specific group we have already looked up */
group->timer = 0; /* stopped */
group->group_state = IGMP_GROUP_IDLE_MEMBER;
group->last_reporter_flag = 0;
}
break;
default:
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n",
igmp->igmp_msgtype, group->group_state, (void*)&group, (void*)group->netif));
IGMP_STATS_INC(igmp.proterr);
break;
}
pbuf_free(p);
return;
}
/**
* Join a group on one network interface.
*
* @param ifaddr ip address of the network interface which should join a new group
* @param groupaddr the ip address of the group which to join
* @return ERR_OK if group was joined on the netif(s), an err_t otherwise
*/
err_t
igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
/* make sure it is multicast address */
LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* loop through netif's */
netif = netif_list;
while (netif != NULL) {
/* Should we join this interface ? */
if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) {
err = igmp_joingroup_netif(netif, groupaddr);
if (err != ERR_OK) {
/* Return an error even if some network interfaces are joined */
/** @todo undo any other netif already joined */
return err;
}
}
/* proceed to next network interface */
netif = netif->next;
}
return err;
}
/**
* Join a group on one network interface.
*
* @param netif the network interface which should join a new group
* @param groupaddr the ip address of the group which to join
* @return ERR_OK if group was joined on the netif, an err_t otherwise
*/
err_t
igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr)
{
struct igmp_group *group;
/* make sure it is multicast address */
LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* make sure it is an igmp-enabled netif */
LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;);
/* find group or create a new one if not found */
group = igmp_lookup_group(netif, groupaddr);
if (group != NULL) {
/* This should create a new group, check the state to make sure */
if (group->group_state != IGMP_GROUP_NON_MEMBER) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n"));
} else {
/* OK - it was new group */
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
/* If first use of the group, allow the group at the MAC level */
if ((group->use==0) && (netif->igmp_mac_filter != NULL)) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif));
netif->igmp_mac_filter(netif, groupaddr, IGMP_ADD_MAC_FILTER);
}
IGMP_STATS_INC(igmp.tx_join);
igmp_send(group, IGMP_V2_MEMB_REPORT);
igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR);
/* Need to work out where this timer comes from */
group->group_state = IGMP_GROUP_DELAYING_MEMBER;
}
/* Increment group use */
group->use++;
/* Join on this interface */
return ERR_OK;
} else {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n"));
return ERR_MEM;
}
}
/**
* Leave a group on one network interface.
*
* @param ifaddr ip address of the network interface which should leave a group
* @param groupaddr the ip address of the group which to leave
* @return ERR_OK if group was left on the netif(s), an err_t otherwise
*/
err_t
igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
/* make sure it is multicast address */
LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* loop through netif's */
netif = netif_list;
while (netif != NULL) {
/* Should we leave this interface ? */
if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) {
err_t res = igmp_leavegroup_netif(netif, groupaddr);
if (err != ERR_OK) {
/* Store this result if we have not yet gotten a success */
err = res;
}
}
/* proceed to next network interface */
netif = netif->next;
}
return err;
}
/**
* Leave a group on one network interface.
*
* @param netif the network interface which should leave a group
* @param groupaddr the ip address of the group which to leave
* @return ERR_OK if group was left on the netif, an err_t otherwise
*/
err_t
igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr)
{
struct igmp_group *group;
/* make sure it is multicast address */
LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* make sure it is an igmp-enabled netif */
LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;);
/* find group */
group = igmp_lookfor_group(netif, groupaddr);
if (group != NULL) {
/* Only send a leave if the flag is set according to the state diagram */
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
/* If there is no other use of the group */
if (group->use <= 1) {
/* If we are the last reporter for this group */
if (group->last_reporter_flag) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n"));
IGMP_STATS_INC(igmp.tx_leave);
igmp_send(group, IGMP_LEAVE_GROUP);
}
/* Disable the group at the MAC level */
if (netif->igmp_mac_filter != NULL) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif));
netif->igmp_mac_filter(netif, groupaddr, IGMP_DEL_MAC_FILTER);
}
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: remove group: "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
/* Free the group */
igmp_remove_group(group);
} else {
/* Decrement group use */
group->use--;
}
return ERR_OK;
} else {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n"));
return ERR_VAL;
}
}
/**
* The igmp timer function (both for NO_SYS=1 and =0)
* Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default).
*/
void
igmp_tmr(void)
{
struct igmp_group *group = igmp_group_list;
while (group != NULL) {
if (group->timer > 0) {
group->timer--;
if (group->timer == 0) {
igmp_timeout(group);
}
}
group = group->next;
}
}
/**
* Called if a timeout for one group is reached.
* Sends a report for this group.
*
* @param group an igmp_group for which a timeout is reached
*/
static void
igmp_timeout(struct igmp_group *group)
{
/* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group
(unless it is the allsystems group) */
if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) &&
(!(ip4_addr_cmp(&(group->group_address), &allsystems)))) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address "));
ip4_addr_debug_print(IGMP_DEBUG, &(group->group_address));
LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)group->netif));
IGMP_STATS_INC(igmp.tx_report);
igmp_send(group, IGMP_V2_MEMB_REPORT);
}
}
/**
* Start a timer for an igmp group
*
* @param group the igmp_group for which to start a timer
* @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with
* every call to igmp_tmr())
*/
static void
igmp_start_timer(struct igmp_group *group, u8_t max_time)
{
#ifdef LWIP_RAND
group->timer = max_time > 2 ? (LWIP_RAND() % max_time) : 1;
#else /* LWIP_RAND */
/* ATTENTION: use this only if absolutely necessary! */
group->timer = max_time / 2;
#endif /* LWIP_RAND */
if (group->timer == 0) {
group->timer = 1;
}
}
/**
* Delaying membership report for a group if necessary
*
* @param group the igmp_group for which "delaying" membership report
* @param maxresp query delay
*/
static void
igmp_delaying_member(struct igmp_group *group, u8_t maxresp)
{
if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) ||
((group->group_state == IGMP_GROUP_DELAYING_MEMBER) &&
((group->timer == 0) || (maxresp < group->timer)))) {
igmp_start_timer(group, maxresp);
group->group_state = IGMP_GROUP_DELAYING_MEMBER;
}
}
/**
* Sends an IP packet on a network interface. This function constructs the IP header
* and calculates the IP header checksum. If the source IP address is NULL,
* the IP address of the outgoing network interface is filled in as source address.
*
* @param p the packet to send (p->payload points to the data, e.g. next
protocol header; if dest == IP_HDRINCL, p already includes an IP
header and p->payload points to that IP header)
* @param src the source IP address to send from (if src == IP_ADDR_ANY, the
* IP address of the netif used to send is used as source address)
* @param dest the destination IP address to send the packet to
* @param ttl the TTL value to be set in the IP header
* @param proto the PROTOCOL to be set in the IP header
* @param netif the netif on which to send this packet
* @return ERR_OK if the packet was sent OK
* ERR_BUF if p doesn't have enough space for IP/LINK headers
* returns errors returned by netif->output
*/
static err_t
igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif)
{
/* This is the "router alert" option */
u16_t ra[2];
ra[0] = PP_HTONS(ROUTER_ALERT);
ra[1] = 0x0000; /* Router shall examine packet */
IGMP_STATS_INC(igmp.xmit);
return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN);
}
/**
* Send an igmp packet to a specific group.
*
* @param group the group to which to send the packet
* @param type the type of igmp packet to send
*/
static void
igmp_send(struct igmp_group *group, u8_t type)
{
struct pbuf* p = NULL;
struct igmp_msg* igmp = NULL;
ip4_addr_t src = *IP4_ADDR_ANY;
ip4_addr_t* dest = NULL;
/* IP header + "router alert" option + IGMP header */
p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM);
if (p) {
igmp = (struct igmp_msg *)p->payload;
LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg",
(p->len >= sizeof(struct igmp_msg)));
ip4_addr_copy(src, *netif_ip4_addr(group->netif));
if (type == IGMP_V2_MEMB_REPORT) {
dest = &(group->group_address);
ip4_addr_copy(igmp->igmp_group_address, group->group_address);
group->last_reporter_flag = 1; /* Remember we were the last to report */
} else {
if (type == IGMP_LEAVE_GROUP) {
dest = &allrouters;
ip4_addr_copy(igmp->igmp_group_address, group->group_address);
}
}
if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) {
igmp->igmp_msgtype = type;
igmp->igmp_maxresp = 0;
igmp->igmp_checksum = 0;
igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN);
igmp_ip_output_if(p, &src, dest, group->netif);
}
pbuf_free(p);
} else {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n"));
IGMP_STATS_INC(igmp.memerr);
}
}
#endif /* LWIP_IPV4 && LWIP_IGMP */

1085
components/lwip/core/ipv4/ip4.c Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,372 @@
/**
* @file
* This is the IPv4 address tools implementation.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4
#include "lwip/ip_addr.h"
#include "lwip/netif.h"
/* used by IP_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */
#ifdef LWIP_ESP8266
//TO_DO
//const ip_addr_t ip_addr_any ICACHE_RODATA_ATTR STORE_ATTR = IPADDR4_INIT(IPADDR_ANY);
//const ip_addr_t ip_addr_broadcast ICACHE_RODATA_ATTR STORE_ATTR = IPADDR4_INIT(IPADDR_BROADCAST);
const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY);
const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST);
#else
const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY);
const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST);
#endif
/**
* Determine if an address is a broadcast address on a network interface
*
* @param addr address to be checked
* @param netif the network interface against which the address is checked
* @return returns non-zero if the address is a broadcast address
*/
u8_t
ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif)
{
ip4_addr_t ipaddr;
ip4_addr_set_u32(&ipaddr, addr);
/* all ones (broadcast) or all zeroes (old skool broadcast) */
if ((~addr == IPADDR_ANY) ||
(addr == IPADDR_ANY)) {
return 1;
/* no broadcast support on this network interface? */
} else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) {
/* the given address cannot be a broadcast address
* nor can we check against any broadcast addresses */
return 0;
/* address matches network interface address exactly? => no broadcast */
} else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) {
return 0;
/* on the same (sub) network... */
} else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif))
/* ...and host identifier bits are all ones? =>... */
&& ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) ==
(IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) {
/* => network broadcast address */
return 1;
} else {
return 0;
}
}
/** Checks if a netmask is valid (starting with ones, then only zeros)
*
* @param netmask the IPv4 netmask to check (in network byte order!)
* @return 1 if the netmask is valid, 0 if it is not
*/
u8_t
ip4_addr_netmask_valid(u32_t netmask)
{
u32_t mask;
u32_t nm_hostorder = lwip_htonl(netmask);
/* first, check for the first zero */
for (mask = 1UL << 31 ; mask != 0; mask >>= 1) {
if ((nm_hostorder & mask) == 0) {
break;
}
}
/* then check that there is no one */
for (; mask != 0; mask >>= 1) {
if ((nm_hostorder & mask) != 0) {
/* there is a one after the first zero -> invalid */
return 0;
}
}
/* no one after the first zero -> valid */
return 1;
}
/* Here for now until needed in other places in lwIP */
#ifndef isprint
#define in_range(c, lo, up) ((u8_t)c >= lo && (u8_t)c <= up)
#define isprint(c) in_range(c, 0x20, 0x7f)
#define isdigit(c) in_range(c, '0', '9')
#define isxdigit(c) (isdigit(c) || in_range(c, 'a', 'f') || in_range(c, 'A', 'F'))
#define islower(c) in_range(c, 'a', 'z')
#define isspace(c) (c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v')
#endif
/**
* Ascii internet address interpretation routine.
* The value returned is in network order.
*
* @param cp IP address in ascii representation (e.g. "127.0.0.1")
* @return ip address in network order
*/
u32_t
ipaddr_addr(const char *cp)
{
ip4_addr_t val;
if (ip4addr_aton(cp, &val)) {
return ip4_addr_get_u32(&val);
}
return (IPADDR_NONE);
}
/**
* Check whether "cp" is a valid ascii representation
* of an Internet address and convert to a binary address.
* Returns 1 if the address is valid, 0 if not.
* This replaces inet_addr, the return value from which
* cannot distinguish between failure and a local broadcast address.
*
* @param cp IP address in ascii representation (e.g. "127.0.0.1")
* @param addr pointer to which to save the ip address in network order
* @return 1 if cp could be converted to addr, 0 on failure
*/
int
ip4addr_aton(const char *cp, ip4_addr_t *addr)
{
u32_t val;
u8_t base;
char c;
u32_t parts[4];
u32_t *pp = parts;
#ifdef LWIP_ESP8266
//#if 0
char ch;
unsigned long cutoff;
int cutlim;
#endif
c = *cp;
for (;;) {
/*
* Collect number up to ``.''.
* Values are specified as for C:
* 0x=hex, 0=octal, 1-9=decimal.
*/
if (!isdigit(c)) {
return 0;
}
val = 0;
base = 10;
if (c == '0') {
c = *++cp;
if (c == 'x' || c == 'X') {
base = 16;
c = *++cp;
} else {
base = 8;
}
}
#ifdef LWIP_ESP8266
//#if 0
cutoff =(unsigned long)0xffffffff / (unsigned long)base;
cutlim =(unsigned long)0xffffffff % (unsigned long)base;
for (;;) {
if (isdigit(c)) {
ch = (int)(c - '0');
if (val > cutoff || (val == cutoff && ch > cutlim))
return (0);
val = (val * base) + (int)(c - '0');
c = *++cp;
} else if (base == 16 && isxdigit(c)) {
ch = (int)(c + 10 - (islower(c) ? 'a' : 'A'));
if (val > cutoff || (val == cutoff && ch > cutlim))
return (0);
val = (val << 4) | (int)(c + 10 - (islower(c) ? 'a' : 'A'));
c = *++cp;
} else
break;
}
#else
for (;;) {
if (isdigit(c)) {
val = (val * base) + (int)(c - '0');
c = *++cp;
} else if (base == 16 && isxdigit(c)) {
val = (val << 4) | (int)(c + 10 - (islower(c) ? 'a' : 'A'));
c = *++cp;
} else {
break;
}
}
#endif
if (c == '.') {
/*
* Internet format:
* a.b.c.d
* a.b.c (with c treated as 16 bits)
* a.b (with b treated as 24 bits)
*/
if (pp >= parts + 3) {
return 0;
}
*pp++ = val;
c = *++cp;
} else {
break;
}
}
/*
* Check for trailing characters.
*/
if (c != '\0' && !isspace(c)) {
return 0;
}
/*
* Concoct the address according to
* the number of parts specified.
*/
switch (pp - parts + 1) {
case 0:
return 0; /* initial nondigit */
case 1: /* a -- 32 bits */
break;
case 2: /* a.b -- 8.24 bits */
if (val > 0xffffffUL) {
return 0;
}
if (parts[0] > 0xff) {
return 0;
}
val |= parts[0] << 24;
break;
case 3: /* a.b.c -- 8.8.16 bits */
if (val > 0xffff) {
return 0;
}
if ((parts[0] > 0xff) || (parts[1] > 0xff)) {
return 0;
}
val |= (parts[0] << 24) | (parts[1] << 16);
break;
case 4: /* a.b.c.d -- 8.8.8.8 bits */
if (val > 0xff) {
return 0;
}
if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) {
return 0;
}
val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8);
break;
default:
LWIP_ASSERT("unhandled", 0);
break;
}
if (addr) {
ip4_addr_set_u32(addr, htonl(val));
}
return 1;
}
/**
* Convert numeric IP address into decimal dotted ASCII representation.
* returns ptr to static buffer; not reentrant!
*
* @param addr ip address in network order to convert
* @return pointer to a global static (!) buffer that holds the ASCII
* representation of addr
*/
char*
ip4addr_ntoa(const ip4_addr_t *addr)
{
static char str[IP4ADDR_STRLEN_MAX];
return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX);
}
/**
* Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used.
*
* @param addr ip address in network order to convert
* @param buf target buffer where the string is stored
* @param buflen length of buf
* @return either pointer to buf which now holds the ASCII
* representation of addr or NULL if buf was too small
*/
char*
ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen)
{
u32_t s_addr;
char inv[3];
char *rp;
u8_t *ap;
u8_t rem;
u8_t n;
u8_t i;
int len = 0;
s_addr = ip4_addr_get_u32(addr);
rp = buf;
ap = (u8_t *)&s_addr;
for (n = 0; n < 4; n++) {
i = 0;
do {
rem = *ap % (u8_t)10;
*ap /= (u8_t)10;
inv[i++] = '0' + rem;
} while (*ap);
while (i--) {
if (len++ >= buflen) {
return NULL;
}
*rp++ = inv[i];
}
if (len++ >= buflen) {
return NULL;
}
*rp++ = '.';
ap++;
}
*--rp = 0;
return buf;
}
#endif /* LWIP_IPV4 */

View File

@@ -0,0 +1,902 @@
/**
* @file
* This is the IPv4 packet segmentation and reassembly implementation.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Jani Monoses <jani@iv.ro>
* Simon Goldschmidt
* original reassembly code by Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4
#include "lwip/ip_frag.h"
#include "lwip/def.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/stats.h"
#include "lwip/icmp.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
#if IP_REASSEMBLY
/**
* The IP reassembly code currently has the following limitations:
* - IP header options are not supported
* - fragments must not overlap (e.g. due to different routes),
* currently, overlapping or duplicate fragments are thrown away
* if IP_REASS_CHECK_OVERLAP=1 (the default)!
*
* @todo: work with IP header options
*/
/** Setting this to 0, you can turn off checking the fragments for overlapping
* regions. The code gets a little smaller. Only use this if you know that
* overlapping won't occur on your network! */
#ifndef IP_REASS_CHECK_OVERLAP
#define IP_REASS_CHECK_OVERLAP 1
#endif /* IP_REASS_CHECK_OVERLAP */
/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
* full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
* Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
* is set to 1, so one datagram can be reassembled at a time, only. */
#ifndef IP_REASS_FREE_OLDEST
#define IP_REASS_FREE_OLDEST 1
#endif /* IP_REASS_FREE_OLDEST */
#define IP_REASS_FLAG_LASTFRAG 0x01
/** This is a helper struct which holds the starting
* offset and the ending offset of this fragment to
* easily chain the fragments.
* It has the same packing requirements as the IP header, since it replaces
* the IP header in memory in incoming fragments (after copying it) to keep
* track of the various fragments. (-> If the IP header doesn't need packing,
* this struct doesn't need packing, too.)
*/
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
struct ip_reass_helper {
PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
PACK_STRUCT_FIELD(u16_t start);
PACK_STRUCT_FIELD(u16_t end);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \
(ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \
ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \
IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0
/* global variables */
static struct ip_reassdata *reassdatagrams;
static u16_t ip_reass_pbufcount;
/* function prototypes */
static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
/**
* Reassembly timer base function
* for both NO_SYS == 0 and 1 (!).
*
* Should be called every 1000 msec (defined by IP_TMR_INTERVAL).
*/
void
ip_reass_tmr(void)
{
struct ip_reassdata *r, *prev = NULL;
r = reassdatagrams;
while (r != NULL) {
/* Decrement the timer. Once it reaches 0,
* clean up the incomplete fragment assembly */
if (r->timer > 0) {
r->timer--;
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n",(u16_t)r->timer));
prev = r;
r = r->next;
} else {
/* reassembly timed out */
struct ip_reassdata *tmp;
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n"));
tmp = r;
/* get the next pointer before freeing */
r = r->next;
/* free the helper struct and all enqueued pbufs */
ip_reass_free_complete_datagram(tmp, prev);
}
}
}
/**
* Free a datagram (struct ip_reassdata) and all its pbufs.
* Updates the total count of enqueued pbufs (ip_reass_pbufcount),
* SNMP counters and sends an ICMP time exceeded packet.
*
* @param ipr datagram to free
* @param prev the previous datagram in the linked list
* @return the number of pbufs freed
*/
static int
ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
{
u16_t pbufs_freed = 0;
u8_t clen;
struct pbuf *p;
struct ip_reass_helper *iprh;
LWIP_ASSERT("prev != ipr", prev != ipr);
if (prev != NULL) {
LWIP_ASSERT("prev->next == ipr", prev->next == ipr);
}
MIB2_STATS_INC(mib2.ipreasmfails);
#if LWIP_ICMP
iprh = (struct ip_reass_helper *)ipr->p->payload;
if (iprh->start == 0) {
/* The first fragment was received, send ICMP time exceeded. */
/* First, de-queue the first pbuf from r->p. */
p = ipr->p;
ipr->p = iprh->next_pbuf;
/* Then, copy the original header into it. */
SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN);
icmp_time_exceeded(p, ICMP_TE_FRAG);
clen = pbuf_clen(p);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed += clen;
pbuf_free(p);
}
#endif /* LWIP_ICMP */
/* First, free all received pbufs. The individual pbufs need to be released
separately as they have not yet been chained */
p = ipr->p;
while (p != NULL) {
struct pbuf *pcur;
iprh = (struct ip_reass_helper *)p->payload;
pcur = p;
/* get the next pointer before freeing */
p = iprh->next_pbuf;
clen = pbuf_clen(pcur);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed += clen;
pbuf_free(pcur);
}
/* Then, unchain the struct ip_reassdata from the list and free it. */
ip_reass_dequeue_datagram(ipr, prev);
LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= pbufs_freed);
ip_reass_pbufcount -= pbufs_freed;
return pbufs_freed;
}
#if IP_REASS_FREE_OLDEST
/**
* Free the oldest datagram to make room for enqueueing new fragments.
* The datagram 'fraghdr' belongs to is not freed!
*
* @param fraghdr IP header of the current fragment
* @param pbufs_needed number of pbufs needed to enqueue
* (used for freeing other datagrams if not enough space)
* @return the number of pbufs freed
*/
static int
ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed)
{
/* @todo Can't we simply remove the last datagram in the
* linked list behind reassdatagrams?
*/
struct ip_reassdata *r, *oldest, *prev, *oldest_prev;
int pbufs_freed = 0, pbufs_freed_current;
int other_datagrams;
/* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
* but don't free the datagram that 'fraghdr' belongs to! */
do {
oldest = NULL;
prev = NULL;
oldest_prev = NULL;
other_datagrams = 0;
r = reassdatagrams;
while (r != NULL) {
if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) {
/* Not the same datagram as fraghdr */
other_datagrams++;
if (oldest == NULL) {
oldest = r;
oldest_prev = prev;
} else if (r->timer <= oldest->timer) {
/* older than the previous oldest */
oldest = r;
oldest_prev = prev;
}
}
if (r->next != NULL) {
prev = r;
}
r = r->next;
}
if (oldest != NULL) {
pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev);
pbufs_freed += pbufs_freed_current;
}
} while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1));
return pbufs_freed;
}
#endif /* IP_REASS_FREE_OLDEST */
/**
* Enqueues a new fragment into the fragment queue
* @param fraghdr points to the new fragments IP hdr
* @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space)
* @return A pointer to the queue location into which the fragment was enqueued
*/
static struct ip_reassdata*
ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen)
{
struct ip_reassdata* ipr;
#if ! IP_REASS_FREE_OLDEST
LWIP_UNUSED_ARG(clen);
#endif
/* No matching previous fragment found, allocate a new reassdata struct */
ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
if (ipr == NULL) {
#if IP_REASS_FREE_OLDEST
if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) {
ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
}
if (ipr == NULL)
#endif /* IP_REASS_FREE_OLDEST */
{
IPFRAG_STATS_INC(ip_frag.memerr);
LWIP_DEBUGF(IP_REASS_DEBUG,("Failed to alloc reassdata struct\n"));
return NULL;
}
}
memset(ipr, 0, sizeof(struct ip_reassdata));
ipr->timer = IP_REASS_MAXAGE;
/* enqueue the new structure to the front of the list */
ipr->next = reassdatagrams;
reassdatagrams = ipr;
/* copy the ip header for later tests and input */
/* @todo: no ip options supported? */
SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN);
return ipr;
}
/**
* Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs.
* @param ipr points to the queue entry to dequeue
*/
static void
ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
{
/* dequeue the reass struct */
if (reassdatagrams == ipr) {
/* it was the first in the list */
reassdatagrams = ipr->next;
} else {
/* it wasn't the first, so it must have a valid 'prev' */
LWIP_ASSERT("sanity check linked list", prev != NULL);
prev->next = ipr->next;
}
/* now we can free the ip_reassdata struct */
memp_free(MEMP_REASSDATA, ipr);
}
/**
* Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list
* will grow over time as new pbufs are rx.
* Also checks that the datagram passes basic continuity checks (if the last
* fragment was received at least once).
* @param root_p points to the 'root' pbuf for the current datagram being assembled.
* @param new_p points to the pbuf for the current fragment
* @return 0 if invalid, >0 otherwise
*/
static int
ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p)
{
struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
struct pbuf *q;
u16_t offset,len;
struct ip_hdr *fraghdr;
int valid = 1;
/* Extract length and fragment offset from current fragment */
fraghdr = (struct ip_hdr*)new_p->payload;
len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;
offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;
/* overwrite the fragment's ip header from the pbuf with our helper struct,
* and setup the embedded helper structure. */
/* make sure the struct ip_reass_helper fits into the IP header */
LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
sizeof(struct ip_reass_helper) <= IP_HLEN);
iprh = (struct ip_reass_helper*)new_p->payload;
iprh->next_pbuf = NULL;
iprh->start = offset;
iprh->end = offset + len;
/* Iterate through until we either get to the end of the list (append),
* or we find one with a larger offset (insert). */
for (q = ipr->p; q != NULL;) {
iprh_tmp = (struct ip_reass_helper*)q->payload;
if (iprh->start < iprh_tmp->start) {
/* the new pbuf should be inserted before this */
iprh->next_pbuf = q;
if (iprh_prev != NULL) {
/* not the fragment with the lowest offset */
#if IP_REASS_CHECK_OVERLAP
if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
/* fragment overlaps with previous or following, throw away */
goto freepbuf;
}
#endif /* IP_REASS_CHECK_OVERLAP */
iprh_prev->next_pbuf = new_p;
} else {
/* fragment with the lowest offset */
ipr->p = new_p;
}
break;
} else if (iprh->start == iprh_tmp->start) {
/* received the same datagram twice: no need to keep the datagram */
goto freepbuf;
#if IP_REASS_CHECK_OVERLAP
} else if (iprh->start < iprh_tmp->end) {
/* overlap: no need to keep the new datagram */
goto freepbuf;
#endif /* IP_REASS_CHECK_OVERLAP */
} else {
/* Check if the fragments received so far have no holes. */
if (iprh_prev != NULL) {
if (iprh_prev->end != iprh_tmp->start) {
/* There is a fragment missing between the current
* and the previous fragment */
valid = 0;
}
}
}
q = iprh_tmp->next_pbuf;
iprh_prev = iprh_tmp;
}
/* If q is NULL, then we made it to the end of the list. Determine what to do now */
if (q == NULL) {
if (iprh_prev != NULL) {
/* this is (for now), the fragment with the highest offset:
* chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
#endif /* IP_REASS_CHECK_OVERLAP */
iprh_prev->next_pbuf = new_p;
if (iprh_prev->end != iprh->start) {
valid = 0;
}
} else {
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("no previous fragment, this must be the first fragment!",
ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
/* this is the first fragment we ever received for this ip datagram */
ipr->p = new_p;
}
}
/* At this point, the validation part begins: */
/* If we already received the last fragment */
if ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0) {
/* and had no holes so far */
if (valid) {
/* then check if the rest of the fragments is here */
/* Check if the queue starts with the first datagram */
if ((ipr->p == NULL) || (((struct ip_reass_helper*)ipr->p->payload)->start != 0)) {
valid = 0;
} else {
/* and check that there are no holes after this datagram */
iprh_prev = iprh;
q = iprh->next_pbuf;
while (q != NULL) {
iprh = (struct ip_reass_helper*)q->payload;
if (iprh_prev->end != iprh->start) {
valid = 0;
break;
}
iprh_prev = iprh;
q = iprh->next_pbuf;
}
/* if still valid, all fragments are received
* (because to the MF==0 already arrived */
if (valid) {
LWIP_ASSERT("sanity check", ipr->p != NULL);
LWIP_ASSERT("sanity check",
((struct ip_reass_helper*)ipr->p->payload) != iprh);
LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
iprh->next_pbuf == NULL);
LWIP_ASSERT("validate_datagram:datagram end!=datagram len",
iprh->end == ipr->datagram_len);
}
}
}
/* If valid is 0 here, there are some fragments missing in the middle
* (since MF == 0 has already arrived). Such datagrams simply time out if
* no more fragments are received... */
return valid;
}
/* If we come here, not all fragments were received, yet! */
return 0; /* not yet valid! */
#if IP_REASS_CHECK_OVERLAP
freepbuf:
ip_reass_pbufcount -= pbuf_clen(new_p);
pbuf_free(new_p);
return 0;
#endif /* IP_REASS_CHECK_OVERLAP */
}
/**
* Reassembles incoming IP fragments into an IP datagram.
*
* @param p points to a pbuf chain of the fragment
* @return NULL if reassembly is incomplete, ? otherwise
*/
struct pbuf *
ip4_reass(struct pbuf *p)
{
struct pbuf *r;
struct ip_hdr *fraghdr;
struct ip_reassdata *ipr;
struct ip_reass_helper *iprh;
u16_t offset, len;
u8_t clen;
IPFRAG_STATS_INC(ip_frag.recv);
MIB2_STATS_INC(mib2.ipreasmreqds);
fraghdr = (struct ip_hdr*)p->payload;
if ((IPH_HL(fraghdr) * 4) != IP_HLEN) {
LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: IP options currently not supported!\n"));
IPFRAG_STATS_INC(ip_frag.err);
goto nullreturn;
}
offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;
len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;
/* Check if we are allowed to enqueue more datagrams. */
clen = pbuf_clen(p);
if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
#if IP_REASS_FREE_OLDEST
if (!ip_reass_remove_oldest_datagram(fraghdr, clen) ||
((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS))
#endif /* IP_REASS_FREE_OLDEST */
{
/* No datagram could be freed and still too many pbufs enqueued */
LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n",
ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS));
IPFRAG_STATS_INC(ip_frag.memerr);
/* @todo: send ICMP time exceeded here? */
/* drop this pbuf */
goto nullreturn;
}
}
/* Look for the datagram the fragment belongs to in the current datagram queue,
* remembering the previous in the queue for later dequeueing. */
for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) {
/* Check if the incoming fragment matches the one currently present
in the reassembly buffer. If so, we proceed with copying the
fragment into the buffer. */
if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) {
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n",
ntohs(IPH_ID(fraghdr))));
IPFRAG_STATS_INC(ip_frag.cachehit);
break;
}
}
if (ipr == NULL) {
/* Enqueue a new datagram into the datagram queue */
ipr = ip_reass_enqueue_new_datagram(fraghdr, clen);
/* Bail if unable to enqueue */
if (ipr == NULL) {
goto nullreturn;
}
} else {
if (((ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) &&
((ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) {
/* ipr->iphdr is not the header from the first fragment, but fraghdr is
* -> copy fraghdr into ipr->iphdr since we want to have the header
* of the first fragment (for ICMP time exceeded and later, for copying
* all options, if supported)*/
SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN);
}
}
/* Track the current number of pbufs current 'in-flight', in order to limit
the number of fragments that may be enqueued at any one time */
ip_reass_pbufcount += clen;
/* At this point, we have either created a new entry or pointing
* to an existing one */
/* check for 'no more fragments', and update queue entry*/
if ((IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0) {
ipr->flags |= IP_REASS_FLAG_LASTFRAG;
ipr->datagram_len = offset + len;
LWIP_DEBUGF(IP_REASS_DEBUG,
("ip4_reass: last fragment seen, total len %"S16_F"\n",
ipr->datagram_len));
}
/* find the right place to insert this pbuf */
/* @todo: trim pbufs if fragments are overlapping */
if (ip_reass_chain_frag_into_datagram_and_validate(ipr, p)) {
struct ip_reassdata *ipr_prev;
/* the totally last fragment (flag more fragments = 0) was received at least
* once AND all fragments are received */
ipr->datagram_len += IP_HLEN;
/* save the second pbuf before copying the header over the pointer */
r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf;
/* copy the original ip header back to the first pbuf */
fraghdr = (struct ip_hdr*)(ipr->p->payload);
SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN);
IPH_LEN_SET(fraghdr, htons(ipr->datagram_len));
IPH_OFFSET_SET(fraghdr, 0);
IPH_CHKSUM_SET(fraghdr, 0);
/* @todo: do we need to set/calculate the correct checksum? */
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) {
IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN));
}
#endif /* CHECKSUM_GEN_IP */
p = ipr->p;
/* chain together the pbufs contained within the reass_data list. */
while (r != NULL) {
iprh = (struct ip_reass_helper*)r->payload;
/* hide the ip header for every succeeding fragment */
pbuf_header(r, -IP_HLEN);
pbuf_cat(p, r);
r = iprh->next_pbuf;
}
/* find the previous entry in the linked list */
if (ipr == reassdatagrams) {
ipr_prev = NULL;
} else {
for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
if (ipr_prev->next == ipr) {
break;
}
}
}
/* release the sources allocate for the fragment queue entry */
ip_reass_dequeue_datagram(ipr, ipr_prev);
/* and adjust the number of pbufs currently queued for reassembly. */
ip_reass_pbufcount -= pbuf_clen(p);
MIB2_STATS_INC(mib2.ipreasmoks);
/* Return the pbuf chain */
return p;
}
/* the datagram is not (yet?) reassembled completely */
LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount));
return NULL;
nullreturn:
LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: nullreturn\n"));
IPFRAG_STATS_INC(ip_frag.drop);
pbuf_free(p);
return NULL;
}
#endif /* IP_REASSEMBLY */
#if IP_FRAG
#if IP_FRAG_USES_STATIC_BUF
static u8_t buf[LWIP_MEM_ALIGN_SIZE(IP_FRAG_MAX_MTU + MEM_ALIGNMENT - 1)];
#else /* IP_FRAG_USES_STATIC_BUF */
#if !LWIP_NETIF_TX_SINGLE_PBUF
/** Allocate a new struct pbuf_custom_ref */
static struct pbuf_custom_ref*
ip_frag_alloc_pbuf_custom_ref(void)
{
return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF);
}
/** Free a struct pbuf_custom_ref */
static void
ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p)
{
LWIP_ASSERT("p != NULL", p != NULL);
memp_free(MEMP_FRAG_PBUF, p);
}
/** Free-callback function to free a 'struct pbuf_custom_ref', called by
* pbuf_free. */
static void
ipfrag_free_pbuf_custom(struct pbuf *p)
{
struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p;
LWIP_ASSERT("pcr != NULL", pcr != NULL);
LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p);
if (pcr->original != NULL) {
pbuf_free(pcr->original);
}
ip_frag_free_pbuf_custom_ref(pcr);
}
#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
#endif /* IP_FRAG_USES_STATIC_BUF */
/**
* Fragment an IP datagram if too large for the netif.
*
* Chop the datagram in MTU sized chunks and send them in order
* by using a fixed size static memory buffer (PBUF_REF) or
* point PBUF_REFs into p (depending on IP_FRAG_USES_STATIC_BUF).
*
* @param p ip packet to send
* @param netif the netif on which to send
* @param dest destination ip address to which to send
*
* @return ERR_OK if sent successfully, err_t otherwise
*/
err_t
ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest)
{
struct pbuf *rambuf;
#if IP_FRAG_USES_STATIC_BUF
struct pbuf *header;
#else
#if !LWIP_NETIF_TX_SINGLE_PBUF
struct pbuf *newpbuf;
#endif
struct ip_hdr *original_iphdr;
#endif
struct ip_hdr *iphdr;
u16_t nfb;
u16_t left, cop;
u16_t mtu = netif->mtu;
u16_t ofo, omf;
u16_t last;
u16_t poff = IP_HLEN;
u16_t tmp;
#if !IP_FRAG_USES_STATIC_BUF && !LWIP_NETIF_TX_SINGLE_PBUF
u16_t newpbuflen = 0;
u16_t left_to_copy;
#endif
/* Get a RAM based MTU sized pbuf */
#if IP_FRAG_USES_STATIC_BUF
/* When using a static buffer, we use a PBUF_REF, which we will
* use to reference the packet (without link header).
* Layer and length is irrelevant.
*/
rambuf = pbuf_alloc(PBUF_LINK, 0, PBUF_REF);
if (rambuf == NULL) {
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc(PBUF_LINK, 0, PBUF_REF) failed\n"));
goto memerr;
}
rambuf->tot_len = rambuf->len = mtu;
rambuf->payload = LWIP_MEM_ALIGN((void *)buf);
/* Copy the IP header in it */
iphdr = (struct ip_hdr *)rambuf->payload;
SMEMCPY(iphdr, p->payload, IP_HLEN);
#else /* IP_FRAG_USES_STATIC_BUF */
original_iphdr = (struct ip_hdr *)p->payload;
iphdr = original_iphdr;
#endif /* IP_FRAG_USES_STATIC_BUF */
/* Save original offset */
tmp = ntohs(IPH_OFFSET(iphdr));
ofo = tmp & IP_OFFMASK;
omf = tmp & IP_MF;
left = p->tot_len - IP_HLEN;
nfb = (mtu - IP_HLEN) / 8;
while (left) {
last = (left <= mtu - IP_HLEN);
/* Set new offset and MF flag */
tmp = omf | (IP_OFFMASK & (ofo));
if (!last) {
tmp = tmp | IP_MF;
}
/* Fill this fragment */
cop = last ? left : nfb * 8;
#if IP_FRAG_USES_STATIC_BUF
poff += pbuf_copy_partial(p, (u8_t*)iphdr + IP_HLEN, cop, poff);
#else /* IP_FRAG_USES_STATIC_BUF */
#if LWIP_NETIF_TX_SINGLE_PBUF
rambuf = pbuf_alloc(PBUF_IP, cop, PBUF_RAM);
if (rambuf == NULL) {
goto memerr;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
poff += pbuf_copy_partial(p, rambuf->payload, cop, poff);
/* make room for the IP header */
if (pbuf_header(rambuf, IP_HLEN)) {
pbuf_free(rambuf);
goto memerr;
}
/* fill in the IP header */
SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
iphdr = (struct ip_hdr*)rambuf->payload;
#else /* LWIP_NETIF_TX_SINGLE_PBUF */
/* When not using a static buffer, create a chain of pbufs.
* The first will be a PBUF_RAM holding the link and IP header.
* The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
* but limited to the size of an mtu.
*/
rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
if (rambuf == NULL) {
goto memerr;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(p->len >= (IP_HLEN)));
SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
iphdr = (struct ip_hdr *)rambuf->payload;
/* Can just adjust p directly for needed offset. */
p->payload = (u8_t *)p->payload + poff;
p->len -= poff;
left_to_copy = cop;
while (left_to_copy) {
struct pbuf_custom_ref *pcr;
newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
/* Is this pbuf already empty? */
if (!newpbuflen) {
p = p->next;
continue;
}
pcr = ip_frag_alloc_pbuf_custom_ref();
if (pcr == NULL) {
pbuf_free(rambuf);
goto memerr;
}
/* Mirror this pbuf, although we might not need all of it. */
newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
if (newpbuf == NULL) {
ip_frag_free_pbuf_custom_ref(pcr);
pbuf_free(rambuf);
goto memerr;
}
pbuf_ref(p);
pcr->original = p;
pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;
/* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
* so that it is removed when pbuf_dechain is later called on rambuf.
*/
pbuf_cat(rambuf, newpbuf);
left_to_copy -= newpbuflen;
if (left_to_copy) {
p = p->next;
}
}
poff = newpbuflen;
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
#endif /* IP_FRAG_USES_STATIC_BUF */
/* Correct header */
IPH_OFFSET_SET(iphdr, htons(tmp));
IPH_LEN_SET(iphdr, htons(cop + IP_HLEN));
IPH_CHKSUM_SET(iphdr, 0);
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) {
IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));
}
#endif /* CHECKSUM_GEN_IP */
#if IP_FRAG_USES_STATIC_BUF
if (last) {
pbuf_realloc(rambuf, left + IP_HLEN);
}
/* This part is ugly: we alloc a RAM based pbuf for
* the link level header for each chunk and then
* free it. A PBUF_ROM style pbuf for which pbuf_header
* worked would make things simpler.
*/
header = pbuf_alloc(PBUF_LINK, 0, PBUF_RAM);
if (header != NULL) {
pbuf_chain(header, rambuf);
netif->output(netif, header, dest);
IPFRAG_STATS_INC(ip_frag.xmit);
MIB2_STATS_INC(mib2.ipfragcreates);
pbuf_free(header);
} else {
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc() for header failed\n"));
pbuf_free(rambuf);
goto memerr;
}
#else /* IP_FRAG_USES_STATIC_BUF */
/* No need for separate header pbuf - we allowed room for it in rambuf
* when allocated.
*/
netif->output(netif, rambuf, dest);
IPFRAG_STATS_INC(ip_frag.xmit);
/* Unfortunately we can't reuse rambuf - the hardware may still be
* using the buffer. Instead we free it (and the ensuing chain) and
* recreate it next time round the loop. If we're lucky the hardware
* will have already sent the packet, the free will really free, and
* there will be zero memory penalty.
*/
pbuf_free(rambuf);
#endif /* IP_FRAG_USES_STATIC_BUF */
left -= cop;
ofo += nfb;
}
#if IP_FRAG_USES_STATIC_BUF
pbuf_free(rambuf);
#endif /* IP_FRAG_USES_STATIC_BUF */
MIB2_STATS_INC(mib2.ipfragoks);
return ERR_OK;
memerr:
MIB2_STATS_INC(mib2.ipfragfails);
return ERR_MEM;
}
#endif /* IP_FRAG */
#endif /* LWIP_IPV4 */

View File

@@ -0,0 +1 @@
IPv6 support in lwIP is very experimental.

View File

@@ -0,0 +1,50 @@
/**
* @file
*
* DHCPv6.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */
#include "lwip/ip6_addr.h"
#include "lwip/def.h"
#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */

View File

@@ -0,0 +1,159 @@
/**
* @file
*
* Ethernet output for IPv6. Uses ND tables for link-layer addressing.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_ETHERNET
#include "lwip/ethip6.h"
#include "lwip/nd6.h"
#include "lwip/pbuf.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/icmp6.h"
#include "netif/ethernet.h"
#include <string.h>
/**
* Send an IPv6 packet on the network using netif->linkoutput
* The ethernet header is filled in before sending.
*
* @params netif the lwIP network interface on which to send the packet
* @params p the packet to send, p->payload pointing to the (uninitialized) ethernet header
* @params src the source MAC address to be copied into the ethernet header
* @params dst the destination MAC address to be copied into the ethernet header
* @return ERR_OK if the packet was sent, any other err_t on failure
*/
static err_t
ethip6_send(struct netif *netif, struct pbuf *p, struct eth_addr *src, struct eth_addr *dst)
{
struct eth_hdr *ethhdr = (struct eth_hdr *)p->payload;
LWIP_ASSERT("netif->hwaddr_len must be 6 for ethip6!",
(netif->hwaddr_len == 6));
SMEMCPY(&ethhdr->dest, dst, 6);
SMEMCPY(&ethhdr->src, src, 6);
ethhdr->type = PP_HTONS(ETHTYPE_IPV6);
LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("ethip6_send: sending packet %p\n", (void *)p));
/* send the packet */
return netif->linkoutput(netif, p);
}
/**
* Resolve and fill-in Ethernet address header for outgoing IPv6 packet.
*
* For IPv6 multicast, corresponding Ethernet addresses
* are selected and the packet is transmitted on the link.
*
* For unicast addresses, ...
*
* @TODO anycast addresses
*
* @param netif The lwIP network interface which the IP packet will be sent on.
* @param q The pbuf(s) containing the IP packet to be sent.
* @param ip6addr The IP address of the packet destination.
*
* @return
* - ERR_RTE No route to destination (no gateway to external networks),
* or the return type of either etharp_query() or etharp_send_ip().
*/
err_t
ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr)
{
struct eth_addr dest;
s8_t i;
/* make room for Ethernet header - should not fail */
if (pbuf_header(q, sizeof(struct eth_hdr)) != 0) {
/* bail out */
LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
("etharp_output: could not allocate room for header.\n"));
return ERR_BUF;
}
/* multicast destination IP address? */
if (ip6_addr_ismulticast(ip6addr)) {
/* Hash IP multicast address to MAC address.*/
dest.addr[0] = 0x33;
dest.addr[1] = 0x33;
dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0];
dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1];
dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2];
dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3];
/* Send out. */
return ethip6_send(netif, q, (struct eth_addr*)(netif->hwaddr), &dest);
}
/* We have a unicast destination IP address */
/* TODO anycast? */
/* Get next hop record. */
i = nd6_get_next_hop_entry(ip6addr, netif);
if (i < 0) {
/* failed to get a next hop neighbor record. */
return ERR_MEM;
}
/* Now that we have a destination record, send or queue the packet. */
if (neighbor_cache[i].state == ND6_STALE) {
/* Switch to delay state. */
neighbor_cache[i].state = ND6_DELAY;
neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME;
}
/* TODO should we send or queue if PROBE? send for now, to let unicast NS pass. */
if ((neighbor_cache[i].state == ND6_REACHABLE) ||
(neighbor_cache[i].state == ND6_DELAY) ||
(neighbor_cache[i].state == ND6_PROBE)) {
/* Send out. */
SMEMCPY(dest.addr, neighbor_cache[i].lladdr, 6);
return ethip6_send(netif, q, (struct eth_addr*)(netif->hwaddr), &dest);
}
/* We should queue packet on this interface. */
pbuf_header(q, -(s16_t)SIZEOF_ETH_HDR);
return nd6_queue_packet(i, q);
}
#endif /* LWIP_IPV6 && LWIP_ETHERNET */

353
components/lwip/core/ipv6/icmp6.c Executable file
View File

@@ -0,0 +1,353 @@
/**
* @file
*
* IPv6 version of ICMP, as per RFC 4443.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
#include "lwip/icmp6.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/inet_chksum.h"
#include "lwip/pbuf.h"
#include "lwip/netif.h"
#include "lwip/nd6.h"
#include "lwip/mld6.h"
#include "lwip/ip.h"
#include "lwip/stats.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
#ifndef LWIP_ICMP6_DATASIZE
#define LWIP_ICMP6_DATASIZE 8
#endif
#if LWIP_ICMP6_DATASIZE == 0
#define LWIP_ICMP6_DATASIZE 8
#endif
/* Forward declarations */
static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type);
/**
* Process an input ICMPv6 message. Called by ip6_input.
*
* Will generate a reply for echo requests. Other messages are forwarded
* to nd6_input, or mld6_input.
*
* @param p the mld packet, p->payload pointing to the icmpv6 header
* @param inp the netif on which this packet was received
*/
void
icmp6_input(struct pbuf *p, struct netif *inp)
{
struct icmp6_hdr *icmp6hdr;
struct pbuf * r;
const ip6_addr_t * reply_src;
ICMP6_STATS_INC(icmp6.recv);
/* Check that ICMPv6 header fits in payload */
if (p->len < sizeof(struct icmp6_hdr)) {
/* drop short packets */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.lenerr);
ICMP6_STATS_INC(icmp6.drop);
return;
}
icmp6hdr = (struct icmp6_hdr *)p->payload;
#if CHECKSUM_CHECK_ICMP6
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) {
if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(),
ip6_current_dest_addr()) != 0) {
/* Checksum failed */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.chkerr);
ICMP6_STATS_INC(icmp6.drop);
return;
}
}
#endif /* CHECKSUM_CHECK_ICMP6 */
switch (icmp6hdr->type) {
case ICMP6_TYPE_NA: /* Neighbor advertisement */
case ICMP6_TYPE_NS: /* Neighbor solicitation */
case ICMP6_TYPE_RA: /* Router advertisement */
case ICMP6_TYPE_RD: /* Redirect */
case ICMP6_TYPE_PTB: /* Packet too big */
nd6_input(p, inp);
return;
break;
case ICMP6_TYPE_RS:
#if LWIP_IPV6_FORWARD
/* TODO implement router functionality */
#endif
break;
#if LWIP_IPV6_MLD
case ICMP6_TYPE_MLQ:
case ICMP6_TYPE_MLR:
case ICMP6_TYPE_MLD:
mld6_input(p, inp);
return;
break;
#endif
case ICMP6_TYPE_EREQ:
#if !LWIP_MULTICAST_PING
/* multicast destination address? */
if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
/* drop */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.drop);
return;
}
#endif /* LWIP_MULTICAST_PING */
/* Allocate reply. */
r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM);
if (r == NULL) {
/* drop */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.memerr);
return;
}
/* Copy echo request. */
if (pbuf_copy(r, p) != ERR_OK) {
/* drop */
pbuf_free(p);
pbuf_free(r);
ICMP6_STATS_INC(icmp6.err);
return;
}
/* Determine reply source IPv6 address. */
#if LWIP_MULTICAST_PING
if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr()));
if (reply_src == NULL) {
/* drop */
pbuf_free(p);
pbuf_free(r);
ICMP6_STATS_INC(icmp6.rterr);
return;
}
}
else
#endif /* LWIP_MULTICAST_PING */
{
reply_src = ip6_current_dest_addr();
}
/* Set fields in reply. */
((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP;
((struct icmp6_echo_hdr *)(r->payload))->chksum = 0;
#if CHECKSUM_GEN_ICMP6
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) {
((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r,
IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr());
}
#endif /* CHECKSUM_GEN_ICMP6 */
/* Send reply. */
ICMP6_STATS_INC(icmp6.xmit);
ip6_output_if(r, reply_src, ip6_current_src_addr(),
LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp);
pbuf_free(r);
break;
default:
ICMP6_STATS_INC(icmp6.proterr);
ICMP6_STATS_INC(icmp6.drop);
break;
}
pbuf_free(p);
}
/**
* Send an icmpv6 'destination unreachable' packet.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IPv6 header
* @param c ICMPv6 code for the unreachable type
*/
void
icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c)
{
icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR);
}
/**
* Send an icmpv6 'packet too big' packet.
*
* @param p the input packet for which the 'packet too big' should be sent,
* p->payload pointing to the IPv6 header
* @param mtu the maximum mtu that we can accept
*/
void
icmp6_packet_too_big(struct pbuf *p, u32_t mtu)
{
icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB);
}
/**
* Send an icmpv6 'time exceeded' packet.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IPv6 header
* @param c ICMPv6 code for the time exceeded type
*/
void
icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c)
{
icmp6_send_response(p, c, 0, ICMP6_TYPE_TE);
}
/**
* Send an icmpv6 'parameter problem' packet.
*
* @param p the input packet for which the 'param problem' should be sent,
* p->payload pointing to the IP header
* @param c ICMPv6 code for the param problem type
* @param pointer the pointer to the byte where the parameter is found
*/
void
icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, u32_t pointer)
{
icmp6_send_response(p, c, pointer, ICMP6_TYPE_PP);
}
/**
* Send an ICMPv6 packet in response to an incoming packet.
*
* @param p the input packet for which the response should be sent,
* p->payload pointing to the IPv6 header
* @param code Code of the ICMPv6 header
* @param data Additional 32-bit parameter in the ICMPv6 header
* @param type Type of the ICMPv6 header
*/
static void
icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type)
{
struct pbuf *q;
struct icmp6_hdr *icmp6hdr;
const ip6_addr_t *reply_src;
ip6_addr_t *reply_dest;
ip6_addr_t reply_src_local, reply_dest_local;
struct ip6_hdr *ip6hdr;
struct netif *netif;
/* ICMPv6 header + IPv6 header + data */
q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE,
PBUF_RAM);
if (q == NULL) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n"));
ICMP6_STATS_INC(icmp6.memerr);
return;
}
LWIP_ASSERT("check that first pbuf can hold icmp 6message",
(q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE)));
icmp6hdr = (struct icmp6_hdr *)q->payload;
icmp6hdr->type = type;
icmp6hdr->code = code;
icmp6hdr->data = data;
/* copy fields from original packet */
SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload,
IP6_HLEN + LWIP_ICMP6_DATASIZE);
/* Get the destination address and netif for this ICMP message. */
if ((ip_current_netif() == NULL) ||
((code == ICMP6_TE_FRAG) && (type == ICMP6_TYPE_TE))) {
/* Special case, as ip6_current_xxx is either NULL, or points
* to a different packet than the one that expired.
* We must use the addresses that are stored in the expired packet. */
ip6hdr = (struct ip6_hdr *)p->payload;
/* copy from packed address to aligned address */
ip6_addr_copy(reply_dest_local, ip6hdr->src);
ip6_addr_copy(reply_src_local, ip6hdr->dest);
reply_dest = &reply_dest_local;
reply_src = &reply_src_local;
netif = ip6_route(reply_src, reply_dest);
if (netif == NULL) {
/* drop */
pbuf_free(q);
ICMP6_STATS_INC(icmp6.rterr);
return;
}
}
else {
netif = ip_current_netif();
reply_dest = ip6_current_src_addr();
/* Select an address to use as source. */
reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest));
if (reply_src == NULL) {
/* drop */
pbuf_free(q);
ICMP6_STATS_INC(icmp6.rterr);
return;
}
}
/* calculate checksum */
icmp6hdr->chksum = 0;
#if CHECKSUM_GEN_ICMP6
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len,
reply_src, reply_dest);
}
#endif /* CHECKSUM_GEN_ICMP6 */
ICMP6_STATS_INC(icmp6.xmit);
ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif);
pbuf_free(q);
}
#endif /* LWIP_ICMP6 && LWIP_IPV6 */

View File

@@ -0,0 +1,53 @@
/**
* @file
*
* INET v6 addresses.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/inet.h"
/** This variable is initialized by the system to contain the wildcard IPv6 address.
*/
const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
#endif /* LWIP_IPV6 */

1104
components/lwip/core/ipv6/ip6.c Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,292 @@
/**
* @file
*
* IPv6 addresses.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
* Functions for handling IPv6 addresses.
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
#include "lwip/ip_addr.h"
#include "lwip/def.h"
/* used by IP6_ADDR_ANY(6) in ip6_addr.h */
const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul);
#ifndef isprint
#define in_range(c, lo, up) ((u8_t)c >= lo && (u8_t)c <= up)
#define isprint(c) in_range(c, 0x20, 0x7f)
#define isdigit(c) in_range(c, '0', '9')
#define isxdigit(c) (isdigit(c) || in_range(c, 'a', 'f') || in_range(c, 'A', 'F'))
#define islower(c) in_range(c, 'a', 'z')
#define isspace(c) (c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v')
#define xchar(i) ((i) < 10 ? '0' + (i) : 'A' + (i) - 10)
#endif
/**
* Check whether "cp" is a valid ascii representation
* of an IPv6 address and convert to a binary address.
* Returns 1 if the address is valid, 0 if not.
*
* @param cp IPv6 address in ascii representation (e.g. "FF01::1")
* @param addr pointer to which to save the ip address in network order
* @return 1 if cp could be converted to addr, 0 on failure
*/
int
ip6addr_aton(const char *cp, ip6_addr_t *addr)
{
u32_t addr_index, zero_blocks, current_block_index, current_block_value;
const char * s;
/* Count the number of colons, to count the number of blocks in a "::" sequence
zero_blocks may be 1 even if there are no :: sequences */
zero_blocks = 8;
for (s = cp; *s != 0; s++) {
if (*s == ':') {
zero_blocks--;
} else if (!isxdigit(*s)) {
break;
}
}
/* parse each block */
addr_index = 0;
current_block_index = 0;
current_block_value = 0;
for (s = cp; *s != 0; s++) {
if (*s == ':') {
if (addr) {
if (current_block_index & 0x1) {
addr->addr[addr_index++] |= current_block_value;
}
else {
addr->addr[addr_index] = current_block_value << 16;
}
}
current_block_index++;
current_block_value = 0;
if (current_block_index > 7) {
/* address too long! */
return 0;
}
if (s[1] == ':') {
if (s[2] == ':') {
/* invalid format: three successive colons */
return 0;
}
s++;
/* "::" found, set zeros */
while (zero_blocks > 0) {
zero_blocks--;
if (current_block_index & 0x1) {
addr_index++;
} else {
if (addr) {
addr->addr[addr_index] = 0;
}
}
current_block_index++;
if (current_block_index > 7) {
/* address too long! */
return 0;
}
}
}
} else if (isxdigit(*s)) {
/* add current digit */
current_block_value = (current_block_value << 4) +
(isdigit(*s) ? *s - '0' :
10 + (islower(*s) ? *s - 'a' : *s - 'A'));
} else {
/* unexpected digit, space? CRLF? */
break;
}
}
if (addr) {
if (current_block_index & 0x1) {
addr->addr[addr_index++] |= current_block_value;
}
else {
addr->addr[addr_index] = current_block_value << 16;
}
}
/* convert to network byte order. */
if (addr) {
for (addr_index = 0; addr_index < 4; addr_index++) {
addr->addr[addr_index] = htonl(addr->addr[addr_index]);
}
}
if (current_block_index != 7) {
return 0;
}
return 1;
}
/**
* Convert numeric IPv6 address into ASCII representation.
* returns ptr to static buffer; not reentrant!
*
* @param addr ip6 address in network order to convert
* @return pointer to a global static (!) buffer that holds the ASCII
* representation of addr
*/
char *
ip6addr_ntoa(const ip6_addr_t *addr)
{
static char str[40];
return ip6addr_ntoa_r(addr, str, 40);
}
/**
* Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used.
*
* @param addr ip6 address in network order to convert
* @param buf target buffer where the string is stored
* @param buflen length of buf
* @return either pointer to buf which now holds the ASCII
* representation of addr or NULL if buf was too small
*/
char *
ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen)
{
u32_t current_block_index, current_block_value, next_block_value;
s32_t i;
u8_t zero_flag, empty_block_flag;
i = 0;
empty_block_flag = 0; /* used to indicate a zero chain for "::' */
for (current_block_index = 0; current_block_index < 8; current_block_index++) {
/* get the current 16-bit block */
current_block_value = htonl(addr->addr[current_block_index >> 1]);
if ((current_block_index & 0x1) == 0) {
current_block_value = current_block_value >> 16;
}
current_block_value &= 0xffff;
/* Check for empty block. */
if (current_block_value == 0) {
if (current_block_index == 7) {
/* special case, we must render a ':' for the last block. */
buf[i++] = ':';
if (i >= buflen) {
return NULL;
}
break;
}
if (empty_block_flag == 0) {
/* generate empty block "::", but only if more than one contiguous zero block,
* according to current formatting suggestions RFC 5952. */
next_block_value = htonl(addr->addr[(current_block_index + 1) >> 1]);
if ((current_block_index & 0x1) == 0x01) {
next_block_value = next_block_value >> 16;
}
next_block_value &= 0xffff;
if (next_block_value == 0) {
empty_block_flag = 1;
buf[i++] = ':';
if (i >= buflen) {
return NULL;
}
continue; /* move on to next block. */
}
} else if (empty_block_flag == 1) {
/* move on to next block. */
continue;
}
} else if (empty_block_flag == 1) {
/* Set this flag value so we don't produce multiple empty blocks. */
empty_block_flag = 2;
}
if (current_block_index > 0) {
buf[i++] = ':';
if (i >= buflen) {
return NULL;
}
}
if ((current_block_value & 0xf000) == 0) {
zero_flag = 1;
} else {
buf[i++] = xchar(((current_block_value & 0xf000) >> 12));
zero_flag = 0;
if (i >= buflen) {
return NULL;
}
}
if (((current_block_value & 0xf00) == 0) && (zero_flag)) {
/* do nothing */
} else {
buf[i++] = xchar(((current_block_value & 0xf00) >> 8));
zero_flag = 0;
if (i >= buflen) {
return NULL;
}
}
if (((current_block_value & 0xf0) == 0) && (zero_flag)) {
/* do nothing */
}
else {
buf[i++] = xchar(((current_block_value & 0xf0) >> 4));
zero_flag = 0;
if (i >= buflen) {
return NULL;
}
}
buf[i++] = xchar((current_block_value & 0xf));
if (i >= buflen) {
return NULL;
}
}
buf[i] = 0;
return buf;
}
#endif /* LWIP_IPV6 */

View File

@@ -0,0 +1,778 @@
/**
* @file
*
* IPv6 fragmentation and reassembly.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#include "lwip/ip6_frag.h"
#include "lwip/ip6.h"
#include "lwip/icmp6.h"
#include "lwip/nd6.h"
#include "lwip/ip.h"
#include "lwip/pbuf.h"
#include "lwip/memp.h"
#include "lwip/stats.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */
/** Setting this to 0, you can turn off checking the fragments for overlapping
* regions. The code gets a little smaller. Only use this if you know that
* overlapping won't occur on your network! */
#ifndef IP_REASS_CHECK_OVERLAP
#define IP_REASS_CHECK_OVERLAP 1
#endif /* IP_REASS_CHECK_OVERLAP */
/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
* full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
* Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
* is set to 1, so one datagram can be reassembled at a time, only. */
#ifndef IP_REASS_FREE_OLDEST
#define IP_REASS_FREE_OLDEST 1
#endif /* IP_REASS_FREE_OLDEST */
#if IPV6_FRAG_COPYHEADER
#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN))
#endif
#define IP_REASS_FLAG_LASTFRAG 0x01
/** This is a helper struct which holds the starting
* offset and the ending offset of this fragment to
* easily chain the fragments.
* It has the same packing requirements as the IPv6 header, since it replaces
* the Fragment Header in memory in incoming fragments to keep
* track of the various fragments.
*/
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
struct ip6_reass_helper {
PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
PACK_STRUCT_FIELD(u16_t start);
PACK_STRUCT_FIELD(u16_t end);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
/* static variables */
static struct ip6_reassdata *reassdatagrams;
static u16_t ip6_reass_pbufcount;
/* Forward declarations. */
static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr);
#if IP_REASS_FREE_OLDEST
static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed);
#endif /* IP_REASS_FREE_OLDEST */
void
ip6_reass_tmr(void)
{
struct ip6_reassdata *r, *tmp;
#if !IPV6_FRAG_COPYHEADER
LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
#endif /* !IPV6_FRAG_COPYHEADER */
r = reassdatagrams;
while (r != NULL) {
/* Decrement the timer. Once it reaches 0,
* clean up the incomplete fragment assembly */
if (r->timer > 0) {
r->timer--;
r = r->next;
} else {
/* reassembly timed out */
tmp = r;
/* get the next pointer before freeing */
r = r->next;
/* free the helper struct and all enqueued pbufs */
ip6_reass_free_complete_datagram(tmp);
}
}
}
/**
* Free a datagram (struct ip6_reassdata) and all its pbufs.
* Updates the total count of enqueued pbufs (ip6_reass_pbufcount),
* sends an ICMP time exceeded packet.
*
* @param ipr datagram to free
*/
static void
ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr)
{
struct ip6_reassdata *prev;
u16_t pbufs_freed = 0;
u8_t clen;
struct pbuf *p;
struct ip6_reass_helper *iprh;
#if LWIP_ICMP6
iprh = (struct ip6_reass_helper *)ipr->p->payload;
if (iprh->start == 0) {
/* The first fragment was received, send ICMP time exceeded. */
/* First, de-queue the first pbuf from r->p. */
p = ipr->p;
ipr->p = iprh->next_pbuf;
/* Then, move back to the original ipv6 header (we are now pointing to Fragment header).
This cannot fail since we already checked when receiving this fragment. */
if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)IPV6_FRAG_HDRREF(ipr->iphdr)))) {
LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0);
}
else {
icmp6_time_exceeded(p, ICMP6_TE_FRAG);
}
clen = pbuf_clen(p);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed += clen;
pbuf_free(p);
}
#endif /* LWIP_ICMP6 */
/* First, free all received pbufs. The individual pbufs need to be released
separately as they have not yet been chained */
p = ipr->p;
while (p != NULL) {
struct pbuf *pcur;
iprh = (struct ip6_reass_helper *)p->payload;
pcur = p;
/* get the next pointer before freeing */
p = iprh->next_pbuf;
clen = pbuf_clen(pcur);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed += clen;
pbuf_free(pcur);
}
/* Then, unchain the struct ip6_reassdata from the list and free it. */
if (ipr == reassdatagrams) {
reassdatagrams = ipr->next;
} else {
prev = reassdatagrams;
while (prev != NULL) {
if (prev->next == ipr) {
break;
}
prev = prev->next;
}
if (prev != NULL) {
prev->next = ipr->next;
}
}
memp_free(MEMP_IP6_REASSDATA, ipr);
/* Finally, update number of pbufs in reassembly queue */
LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed);
ip6_reass_pbufcount -= pbufs_freed;
}
#if IP_REASS_FREE_OLDEST
/**
* Free the oldest datagram to make room for enqueueing new fragments.
* The datagram ipr is not freed!
*
* @param ipr ip6_reassdata for the current fragment
* @param pbufs_needed number of pbufs needed to enqueue
* (used for freeing other datagrams if not enough space)
*/
static void
ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed)
{
struct ip6_reassdata *r, *oldest;
/* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
* but don't free the current datagram! */
do {
r = oldest = reassdatagrams;
while (r != NULL) {
if (r != ipr) {
if (r->timer <= oldest->timer) {
/* older than the previous oldest */
oldest = r;
}
}
r = r->next;
}
if (oldest == ipr) {
/* nothing to free, ipr is the only element on the list */
return;
}
if (oldest != NULL) {
ip6_reass_free_complete_datagram(oldest);
}
} while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL));
}
#endif /* IP_REASS_FREE_OLDEST */
/**
* Reassembles incoming IPv6 fragments into an IPv6 datagram.
*
* @param p points to the IPv6 Fragment Header
* @param len the length of the payload (after Fragment Header)
* @return NULL if reassembly is incomplete, pbuf pointing to
* IPv6 Header if reassembly is complete
*/
struct pbuf *
ip6_reass(struct pbuf *p)
{
struct ip6_reassdata *ipr, *ipr_prev;
struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
struct ip6_frag_hdr * frag_hdr;
u16_t offset, len;
u8_t clen, valid = 1;
struct pbuf *q;
IP6_FRAG_STATS_INC(ip6_frag.recv);
LWIP_ASSERT("ip6_frag_hdr must be in the first pbuf, not chained",
(const void*)ip6_current_header() == ((u8_t*)p->payload) - IP6_HLEN);
frag_hdr = (struct ip6_frag_hdr *) p->payload;
clen = pbuf_clen(p);
offset = ntohs(frag_hdr->_fragment_offset);
/* Calculate fragment length from IPv6 payload length.
* Adjust for headers before Fragment Header.
* And finally adjust by Fragment Header length. */
len = ntohs(ip6_current_header()->_plen);
len -= (u16_t)(((u8_t*)p->payload - (const u8_t*)ip6_current_header()) - IP6_HLEN);
len -= IP6_FRAG_HLEN;
/* Look for the datagram the fragment belongs to in the current datagram queue,
* remembering the previous in the queue for later dequeueing. */
for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) {
/* Check if the incoming fragment matches the one currently present
in the reassembly buffer. If so, we proceed with copying the
fragment into the buffer. */
if ((frag_hdr->_identification == ipr->identification) &&
ip6_addr_cmp(ip6_current_src_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->src)) &&
ip6_addr_cmp(ip6_current_dest_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->dest))) {
IP6_FRAG_STATS_INC(ip6_frag.cachehit);
break;
}
ipr_prev = ipr;
}
if (ipr == NULL) {
/* Enqueue a new datagram into the datagram queue */
ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
if (ipr == NULL) {
#if IP_REASS_FREE_OLDEST
/* Make room and try again. */
ip6_reass_remove_oldest_datagram(ipr, clen);
ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
if (ipr != NULL) {
/* re-search ipr_prev since it might have been removed */
for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
if (ipr_prev->next == ipr) {
break;
}
}
} else
#endif /* IP_REASS_FREE_OLDEST */
{
IP6_FRAG_STATS_INC(ip6_frag.memerr);
IP6_FRAG_STATS_INC(ip6_frag.drop);
goto nullreturn;
}
}
memset(ipr, 0, sizeof(struct ip6_reassdata));
ipr->timer = IP_REASS_MAXAGE;
/* enqueue the new structure to the front of the list */
ipr->next = reassdatagrams;
reassdatagrams = ipr;
/* Use the current IPv6 header for src/dest address reference.
* Eventually, we will replace it when we get the first fragment
* (it might be this one, in any case, it is done later). */
#if IPV6_FRAG_COPYHEADER
MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN);
#else /* IPV6_FRAG_COPYHEADER */
/* need to use the none-const pointer here: */
ipr->iphdr = ip_data.current_ip6_header;
#endif /* IPV6_FRAG_COPYHEADER */
/* copy the fragmented packet id. */
ipr->identification = frag_hdr->_identification;
/* copy the nexth field */
ipr->nexth = frag_hdr->_nexth;
}
/* Check if we are allowed to enqueue more datagrams. */
if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
#if IP_REASS_FREE_OLDEST
ip6_reass_remove_oldest_datagram(ipr, clen);
if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) {
/* re-search ipr_prev since it might have been removed */
for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
if (ipr_prev->next == ipr) {
break;
}
}
} else
#endif /* IP_REASS_FREE_OLDEST */
{
/* @todo: send ICMPv6 time exceeded here? */
/* drop this pbuf */
IP6_FRAG_STATS_INC(ip6_frag.memerr);
IP6_FRAG_STATS_INC(ip6_frag.drop);
goto nullreturn;
}
}
/* Overwrite Fragment Header with our own helper struct. */
#if IPV6_FRAG_COPYHEADER
if (IPV6_FRAG_REQROOM > 0) {
/* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4).
This cannot fail since we already checked when receiving this fragment. */
err_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM);
LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == ERR_OK);
}
#else /* IPV6_FRAG_COPYHEADER */
LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
#endif /* IPV6_FRAG_COPYHEADER */
iprh = (struct ip6_reass_helper *)p->payload;
iprh->next_pbuf = NULL;
iprh->start = (offset & IP6_FRAG_OFFSET_MASK);
iprh->end = (offset & IP6_FRAG_OFFSET_MASK) + len;
/* find the right place to insert this pbuf */
/* Iterate through until we either get to the end of the list (append),
* or we find on with a larger offset (insert). */
for (q = ipr->p; q != NULL;) {
iprh_tmp = (struct ip6_reass_helper*)q->payload;
if (iprh->start < iprh_tmp->start) {
#if IP_REASS_CHECK_OVERLAP
if (iprh->end > iprh_tmp->start) {
/* fragment overlaps with following, throw away */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
IP6_FRAG_STATS_INC(ip6_frag.drop);
goto nullreturn;
}
if (iprh_prev != NULL) {
if (iprh->start < iprh_prev->end) {
/* fragment overlaps with previous, throw away */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
IP6_FRAG_STATS_INC(ip6_frag.drop);
goto nullreturn;
}
}
#endif /* IP_REASS_CHECK_OVERLAP */
/* the new pbuf should be inserted before this */
iprh->next_pbuf = q;
if (iprh_prev != NULL) {
/* not the fragment with the lowest offset */
iprh_prev->next_pbuf = p;
} else {
/* fragment with the lowest offset */
ipr->p = p;
}
break;
} else if (iprh->start == iprh_tmp->start) {
/* received the same datagram twice: no need to keep the datagram */
IP6_FRAG_STATS_INC(ip6_frag.drop);
goto nullreturn;
#if IP_REASS_CHECK_OVERLAP
} else if (iprh->start < iprh_tmp->end) {
/* overlap: no need to keep the new datagram */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
IP6_FRAG_STATS_INC(ip6_frag.drop);
goto nullreturn;
#endif /* IP_REASS_CHECK_OVERLAP */
} else {
/* Check if the fragments received so far have no gaps. */
if (iprh_prev != NULL) {
if (iprh_prev->end != iprh_tmp->start) {
/* There is a fragment missing between the current
* and the previous fragment */
valid = 0;
}
}
}
q = iprh_tmp->next_pbuf;
iprh_prev = iprh_tmp;
}
/* If q is NULL, then we made it to the end of the list. Determine what to do now */
if (q == NULL) {
if (iprh_prev != NULL) {
/* this is (for now), the fragment with the highest offset:
* chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
#endif /* IP_REASS_CHECK_OVERLAP */
iprh_prev->next_pbuf = p;
if (iprh_prev->end != iprh->start) {
valid = 0;
}
} else {
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("no previous fragment, this must be the first fragment!",
ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
/* this is the first fragment we ever received for this ip datagram */
ipr->p = p;
}
}
/* Track the current number of pbufs current 'in-flight', in order to limit
the number of fragments that may be enqueued at any one time */
ip6_reass_pbufcount += clen;
/* Remember IPv6 header if this is the first fragment. */
if (iprh->start == 0) {
#if IPV6_FRAG_COPYHEADER
if (iprh->next_pbuf != NULL) {
MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN);
}
#else /* IPV6_FRAG_COPYHEADER */
/* need to use the none-const pointer here: */
ipr->iphdr = ip_data.current_ip6_header;
#endif /* IPV6_FRAG_COPYHEADER */
}
/* If this is the last fragment, calculate total packet length. */
if ((offset & IP6_FRAG_MORE_FLAG) == 0) {
ipr->datagram_len = iprh->end;
}
/* Additional validity tests: we have received first and last fragment. */
iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload;
if (iprh_tmp->start != 0) {
valid = 0;
}
if (ipr->datagram_len == 0) {
valid = 0;
}
/* Final validity test: no gaps between current and last fragment. */
iprh_prev = iprh;
q = iprh->next_pbuf;
while ((q != NULL) && valid) {
iprh = (struct ip6_reass_helper*)q->payload;
if (iprh_prev->end != iprh->start) {
valid = 0;
break;
}
iprh_prev = iprh;
q = iprh->next_pbuf;
}
if (valid) {
/* All fragments have been received */
struct ip6_hdr* iphdr_ptr;
/* chain together the pbufs contained within the ip6_reassdata list. */
iprh = (struct ip6_reass_helper*) ipr->p->payload;
while (iprh != NULL) {
struct pbuf* next_pbuf = iprh->next_pbuf;
if (next_pbuf != NULL) {
/* Save next helper struct (will be hidden in next step). */
iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload;
/* hide the fragment header for every succeeding fragment */
pbuf_header(next_pbuf, -IP6_FRAG_HLEN);
#if IPV6_FRAG_COPYHEADER
if (IPV6_FRAG_REQROOM > 0) {
/* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */
err_t hdrerr = pbuf_header(next_pbuf, -(s16_t)(IPV6_FRAG_REQROOM));
LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == ERR_OK);
}
#endif
pbuf_cat(ipr->p, next_pbuf);
}
else {
iprh_tmp = NULL;
}
iprh = iprh_tmp;
}
#if IPV6_FRAG_COPYHEADER
if (IPV6_FRAG_REQROOM > 0) {
/* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */
err_t hdrerr = pbuf_header(ipr->p, -(s16_t)(IPV6_FRAG_REQROOM));
LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == ERR_OK);
}
iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->p->payload - IP6_HLEN);
MEMCPY(iphdr_ptr, &ipr->iphdr, IP6_HLEN);
#else
iphdr_ptr = ipr->iphdr;
#endif
/* Adjust datagram length by adding header lengths. */
ipr->datagram_len += (u16_t)(((u8_t*)ipr->p->payload - (u8_t*)iphdr_ptr)
+ IP6_FRAG_HLEN
- IP6_HLEN);
/* Set payload length in ip header. */
iphdr_ptr->_plen = htons(ipr->datagram_len);
/* Get the first pbuf. */
p = ipr->p;
/* Restore Fragment Header in first pbuf. Mark as "single fragment"
* packet. Restore nexth. */
frag_hdr = (struct ip6_frag_hdr *) p->payload;
frag_hdr->_nexth = ipr->nexth;
frag_hdr->reserved = 0;
frag_hdr->_fragment_offset = 0;
frag_hdr->_identification = 0;
/* release the sources allocate for the fragment queue entry */
if (reassdatagrams == ipr) {
/* it was the first in the list */
reassdatagrams = ipr->next;
} else {
/* it wasn't the first, so it must have a valid 'prev' */
LWIP_ASSERT("sanity check linked list", ipr_prev != NULL);
ipr_prev->next = ipr->next;
}
memp_free(MEMP_IP6_REASSDATA, ipr);
/* adjust the number of pbufs currently queued for reassembly. */
ip6_reass_pbufcount -= pbuf_clen(p);
/* Move pbuf back to IPv6 header.
This cannot fail since we already checked when receiving this fragment. */
if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) {
LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0);
pbuf_free(p);
return NULL;
}
/* Return the pbuf chain */
return p;
}
/* the datagram is not (yet?) reassembled completely */
return NULL;
nullreturn:
pbuf_free(p);
return NULL;
}
#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */
#if LWIP_IPV6 && LWIP_IPV6_FRAG
/** Allocate a new struct pbuf_custom_ref */
static struct pbuf_custom_ref*
ip6_frag_alloc_pbuf_custom_ref(void)
{
return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF);
}
/** Free a struct pbuf_custom_ref */
static void
ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p)
{
LWIP_ASSERT("p != NULL", p != NULL);
memp_free(MEMP_FRAG_PBUF, p);
}
/** Free-callback function to free a 'struct pbuf_custom_ref', called by
* pbuf_free. */
static void
ip6_frag_free_pbuf_custom(struct pbuf *p)
{
struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p;
LWIP_ASSERT("pcr != NULL", pcr != NULL);
LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p);
if (pcr->original != NULL) {
pbuf_free(pcr->original);
}
ip6_frag_free_pbuf_custom_ref(pcr);
}
/**
* Fragment an IPv6 datagram if too large for the netif or path MTU.
*
* Chop the datagram in MTU sized chunks and send them in order
* by pointing PBUF_REFs into p
*
* @param p ipv6 packet to send
* @param netif the netif on which to send
* @param dest destination ipv6 address to which to send
*
* @return ERR_OK if sent successfully, err_t otherwise
*/
err_t
ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest)
{
struct ip6_hdr *original_ip6hdr;
struct ip6_hdr *ip6hdr;
struct ip6_frag_hdr * frag_hdr;
struct pbuf *rambuf;
struct pbuf *newpbuf;
static u32_t identification;
u16_t nfb;
u16_t left, cop;
u16_t mtu;
u16_t fragment_offset = 0;
u16_t last;
u16_t poff = IP6_HLEN;
u16_t newpbuflen = 0;
u16_t left_to_copy;
identification++;
original_ip6hdr = (struct ip6_hdr *)p->payload;
mtu = nd6_get_destination_mtu(dest, netif);
/* TODO we assume there are no options in the unfragmentable part (IPv6 header). */
left = p->tot_len - IP6_HLEN;
nfb = (mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK;
while (left) {
last = (left <= nfb);
/* Fill this fragment */
cop = last ? left : nfb;
/* When not using a static buffer, create a chain of pbufs.
* The first will be a PBUF_RAM holding the link, IPv6, and Fragment header.
* The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
* but limited to the size of an mtu.
*/
rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM);
if (rambuf == NULL) {
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(p->len >= (IP6_HLEN + IP6_FRAG_HLEN)));
SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN);
ip6hdr = (struct ip6_hdr *)rambuf->payload;
frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN);
/* Can just adjust p directly for needed offset. */
p->payload = (u8_t *)p->payload + poff;
p->len -= poff;
p->tot_len -= poff;
left_to_copy = cop;
while (left_to_copy) {
struct pbuf_custom_ref *pcr;
newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
/* Is this pbuf already empty? */
if (!newpbuflen) {
p = p->next;
continue;
}
pcr = ip6_frag_alloc_pbuf_custom_ref();
if (pcr == NULL) {
pbuf_free(rambuf);
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
/* Mirror this pbuf, although we might not need all of it. */
newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
if (newpbuf == NULL) {
ip6_frag_free_pbuf_custom_ref(pcr);
pbuf_free(rambuf);
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
pbuf_ref(p);
pcr->original = p;
pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom;
/* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
* so that it is removed when pbuf_dechain is later called on rambuf.
*/
pbuf_cat(rambuf, newpbuf);
left_to_copy -= newpbuflen;
if (left_to_copy) {
p = p->next;
}
}
poff = newpbuflen;
/* Set headers */
frag_hdr->_nexth = original_ip6hdr->_nexth;
frag_hdr->reserved = 0;
frag_hdr->_fragment_offset = htons((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG));
frag_hdr->_identification = htonl(identification);
IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT);
IP6H_PLEN_SET(ip6hdr, cop + IP6_FRAG_HLEN);
/* No need for separate header pbuf - we allowed room for it in rambuf
* when allocated.
*/
IP6_FRAG_STATS_INC(ip6_frag.xmit);
netif->output_ip6(netif, rambuf, dest);
/* Unfortunately we can't reuse rambuf - the hardware may still be
* using the buffer. Instead we free it (and the ensuing chain) and
* recreate it next time round the loop. If we're lucky the hardware
* will have already sent the packet, the free will really free, and
* there will be zero memory penalty.
*/
pbuf_free(rambuf);
left -= cop;
fragment_offset += cop;
}
return ERR_OK;
}
#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */

593
components/lwip/core/ipv6/mld6.c Executable file
View File

@@ -0,0 +1,593 @@
/**
* @file
*
* Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710.
* No support for MLDv2.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
/* Based on igmp.c implementation of igmp v2 protocol */
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */
#include "lwip/mld6.h"
#include "lwip/icmp6.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/ip.h"
#include "lwip/inet_chksum.h"
#include "lwip/pbuf.h"
#include "lwip/netif.h"
#include "lwip/memp.h"
#include "lwip/stats.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
/*
* MLD constants
*/
#define MLD6_HL 1
#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500)
#define MLD6_GROUP_NON_MEMBER 0
#define MLD6_GROUP_DELAYING_MEMBER 1
#define MLD6_GROUP_IDLE_MEMBER 2
/* The list of joined groups. */
static struct mld_group* mld_group_list;
/* Forward declarations. */
static struct mld_group * mld6_new_group(struct netif *ifp, const ip6_addr_t *addr);
static err_t mld6_free_group(struct mld_group *group);
static void mld6_delayed_report(struct mld_group *group, u16_t maxresp);
static void mld6_send(struct mld_group *group, u8_t type);
/**
* Stop MLD processing on interface
*
* @param netif network interface on which stop MLD processing
*/
err_t
mld6_stop(struct netif *netif)
{
struct mld_group *group = mld_group_list;
struct mld_group *prev = NULL;
struct mld_group *next;
/* look for groups joined on this interface further down the list */
while (group != NULL) {
next = group->next;
/* is it a group joined on this interface? */
if (group->netif == netif) {
/* is it the first group of the list? */
if (group == mld_group_list) {
mld_group_list = next;
}
/* is there a "previous" group defined? */
if (prev != NULL) {
prev->next = next;
}
/* disable the group at the MAC level */
if (netif->mld_mac_filter != NULL) {
netif->mld_mac_filter(netif, &(group->group_address), MLD6_DEL_MAC_FILTER);
}
/* free group */
memp_free(MEMP_MLD6_GROUP, group);
} else {
/* change the "previous" */
prev = group;
}
/* move to "next" */
group = next;
}
return ERR_OK;
}
/**
* Report MLD memberships for this interface
*
* @param netif network interface on which report MLD memberships
*/
void
mld6_report_groups(struct netif *netif)
{
struct mld_group *group = mld_group_list;
while (group != NULL) {
if (group->netif == netif) {
mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS);
}
group = group->next;
}
}
/**
* Search for a group that is joined on a netif
*
* @param ifp the network interface for which to look
* @param addr the group ipv6 address to search for
* @return a struct mld_group* if the group has been found,
* NULL if the group wasn't found.
*/
struct mld_group *
mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr)
{
struct mld_group *group = mld_group_list;
while (group != NULL) {
if ((group->netif == ifp) && (ip6_addr_cmp(&(group->group_address), addr))) {
return group;
}
group = group->next;
}
return NULL;
}
/**
* create a new group
*
* @param ifp the network interface for which to create
* @param addr the new group ipv6
* @return a struct mld_group*,
* NULL on memory error.
*/
static struct mld_group *
mld6_new_group(struct netif *ifp, const ip6_addr_t *addr)
{
struct mld_group *group;
group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP);
if (group != NULL) {
group->netif = ifp;
ip6_addr_set(&(group->group_address), addr);
group->timer = 0; /* Not running */
group->group_state = MLD6_GROUP_IDLE_MEMBER;
group->last_reporter_flag = 0;
group->use = 0;
group->next = mld_group_list;
mld_group_list = group;
}
return group;
}
/**
* Remove a group in the mld_group_list and free
*
* @param group the group to remove
* @return ERR_OK if group was removed from the list, an err_t otherwise
*/
static err_t
mld6_free_group(struct mld_group *group)
{
err_t err = ERR_OK;
/* Is it the first group? */
if (mld_group_list == group) {
mld_group_list = group->next;
} else {
/* look for group further down the list */
struct mld_group *tmpGroup;
for (tmpGroup = mld_group_list; tmpGroup != NULL; tmpGroup = tmpGroup->next) {
if (tmpGroup->next == group) {
tmpGroup->next = group->next;
break;
}
}
/* Group not find group */
if (tmpGroup == NULL) {
err = ERR_ARG;
}
}
/* free group */
memp_free(MEMP_MLD6_GROUP, group);
return err;
}
/**
* Process an input MLD message. Called by icmp6_input.
*
* @param p the mld packet, p->payload pointing to the icmpv6 header
* @param inp the netif on which this packet was received
*/
void
mld6_input(struct pbuf *p, struct netif *inp)
{
struct mld_header * mld_hdr;
struct mld_group* group;
MLD6_STATS_INC(mld6.recv);
/* Check that mld header fits in packet. */
if (p->len < sizeof(struct mld_header)) {
/* TODO debug message */
pbuf_free(p);
MLD6_STATS_INC(mld6.lenerr);
MLD6_STATS_INC(mld6.drop);
return;
}
mld_hdr = (struct mld_header *)p->payload;
switch (mld_hdr->type) {
case ICMP6_TYPE_MLQ: /* Multicast listener query. */
/* Is it a general query? */
if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) &&
ip6_addr_isany(&(mld_hdr->multicast_address))) {
MLD6_STATS_INC(mld6.rx_general);
/* Report all groups, except all nodes group, and if-local groups. */
group = mld_group_list;
while (group != NULL) {
if ((group->netif == inp) &&
(!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) &&
(!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) {
mld6_delayed_report(group, mld_hdr->max_resp_delay);
}
group = group->next;
}
} else {
/* Have we joined this group?
* We use IP6 destination address to have a memory aligned copy.
* mld_hdr->multicast_address should be the same. */
MLD6_STATS_INC(mld6.rx_group);
group = mld6_lookfor_group(inp, ip6_current_dest_addr());
if (group != NULL) {
/* Schedule a report. */
mld6_delayed_report(group, mld_hdr->max_resp_delay);
}
}
break; /* ICMP6_TYPE_MLQ */
case ICMP6_TYPE_MLR: /* Multicast listener report. */
/* Have we joined this group?
* We use IP6 destination address to have a memory aligned copy.
* mld_hdr->multicast_address should be the same. */
MLD6_STATS_INC(mld6.rx_report);
group = mld6_lookfor_group(inp, ip6_current_dest_addr());
if (group != NULL) {
/* If we are waiting to report, cancel it. */
if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) {
group->timer = 0; /* stopped */
group->group_state = MLD6_GROUP_IDLE_MEMBER;
group->last_reporter_flag = 0;
}
}
break; /* ICMP6_TYPE_MLR */
case ICMP6_TYPE_MLD: /* Multicast listener done. */
/* Do nothing, router will query us. */
break; /* ICMP6_TYPE_MLD */
default:
MLD6_STATS_INC(mld6.proterr);
MLD6_STATS_INC(mld6.drop);
break;
}
pbuf_free(p);
}
/**
* Join a group on a network interface.
*
* @param srcaddr ipv6 address of the network interface which should
* join a new group. If IP6_ADDR_ANY, join on all netifs
* @param groupaddr the ipv6 address of the group to join
* @return ERR_OK if group was joined on the netif(s), an err_t otherwise
*/
err_t
mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
/* loop through netif's */
netif = netif_list;
while (netif != NULL) {
/* Should we join this interface ? */
if (ip6_addr_isany(srcaddr) ||
netif_get_ip6_addr_match(netif, srcaddr) >= 0) {
err = mld6_joingroup_netif(netif, groupaddr);
if (err != ERR_OK) {
return err;
}
}
/* proceed to next network interface */
netif = netif->next;
}
return err;
}
/**
* Join a group on a network interface.
*
* @param netif the network interface which should join a new group.
* @param groupaddr the ipv6 address of the group to join
* @return ERR_OK if group was joined on the netif, an err_t otherwise
*/
err_t
mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr)
{
struct mld_group *group;
/* find group or create a new one if not found */
group = mld6_lookfor_group(netif, groupaddr);
if (group == NULL) {
/* Joining a new group. Create a new group entry. */
group = mld6_new_group(netif, groupaddr);
if (group == NULL) {
return ERR_MEM;
}
/* Activate this address on the MAC layer. */
if (netif->mld_mac_filter != NULL) {
netif->mld_mac_filter(netif, groupaddr, MLD6_ADD_MAC_FILTER);
}
/* Report our membership. */
MLD6_STATS_INC(mld6.tx_report);
mld6_send(group, ICMP6_TYPE_MLR);
mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS);
}
/* Increment group use */
group->use++;
return ERR_OK;
}
/**
* Leave a group on a network interface.
*
* @param srcaddr ipv6 address of the network interface which should
* leave the group. If IP6_ISANY, leave on all netifs
* @param groupaddr the ipv6 address of the group to leave
* @return ERR_OK if group was left on the netif(s), an err_t otherwise
*/
err_t
mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
/* loop through netif's */
netif = netif_list;
while (netif != NULL) {
/* Should we leave this interface ? */
if (ip6_addr_isany(srcaddr) ||
netif_get_ip6_addr_match(netif, srcaddr) >= 0) {
err_t res = mld6_leavegroup_netif(netif, groupaddr);
if (err != ERR_OK) {
/* Store this result if we have not yet gotten a success */
err = res;
}
}
/* proceed to next network interface */
netif = netif->next;
}
return err;
}
/**
* Leave a group on a network interface.
*
* @param netif the network interface which should leave the group.
* @param groupaddr the ipv6 address of the group to leave
* @return ERR_OK if group was left on the netif, an err_t otherwise
*/
err_t
mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr)
{
struct mld_group *group;
/* find group */
group = mld6_lookfor_group(netif, groupaddr);
if (group != NULL) {
/* Leave if there is no other use of the group */
if (group->use <= 1) {
/* If we are the last reporter for this group */
if (group->last_reporter_flag) {
MLD6_STATS_INC(mld6.tx_leave);
mld6_send(group, ICMP6_TYPE_MLD);
}
/* Disable the group at the MAC level */
if (netif->mld_mac_filter != NULL) {
netif->mld_mac_filter(netif, groupaddr, MLD6_DEL_MAC_FILTER);
}
/* Free the group */
mld6_free_group(group);
} else {
/* Decrement group use */
group->use--;
}
/* Left group */
return ERR_OK;
}
/* Group not found */
return ERR_VAL;
}
/**
* Periodic timer for mld processing. Must be called every
* MLD6_TMR_INTERVAL milliseconds (100).
*
* When a delaying member expires, a membership report is sent.
*/
void
mld6_tmr(void)
{
struct mld_group *group = mld_group_list;
while (group != NULL) {
if (group->timer > 0) {
group->timer--;
if (group->timer == 0) {
/* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */
if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) {
MLD6_STATS_INC(mld6.tx_report);
mld6_send(group, ICMP6_TYPE_MLR);
group->group_state = MLD6_GROUP_IDLE_MEMBER;
}
}
}
group = group->next;
}
}
/**
* Schedule a delayed membership report for a group
*
* @param group the mld_group for which "delaying" membership report
* should be sent
* @param maxresp the max resp delay provided in the query
*/
static void
mld6_delayed_report(struct mld_group *group, u16_t maxresp)
{
/* Convert maxresp from milliseconds to tmr ticks */
maxresp = maxresp / MLD6_TMR_INTERVAL;
if (maxresp == 0) {
maxresp = 1;
}
#ifdef LWIP_RAND
/* Randomize maxresp. (if LWIP_RAND is supported) */
maxresp = LWIP_RAND() % maxresp;
if (maxresp == 0) {
maxresp = 1;
}
#endif /* LWIP_RAND */
/* Apply timer value if no report has been scheduled already. */
if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) ||
((group->group_state == MLD6_GROUP_DELAYING_MEMBER) &&
((group->timer == 0) || (maxresp < group->timer)))) {
group->timer = maxresp;
group->group_state = MLD6_GROUP_DELAYING_MEMBER;
}
}
/**
* Send a MLD message (report or done).
*
* An IPv6 hop-by-hop options header with a router alert option
* is prepended.
*
* @param group the group to report or quit
* @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done)
*/
static void
mld6_send(struct mld_group *group, u8_t type)
{
struct mld_header * mld_hdr;
struct pbuf * p;
const ip6_addr_t * src_addr;
/* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */
p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + sizeof(struct ip6_hbh_hdr), PBUF_RAM);
if (p == NULL) {
MLD6_STATS_INC(mld6.memerr);
return;
}
/* Move to make room for Hop-by-hop options header. */
if (pbuf_header(p, -IP6_HBH_HLEN)) {
pbuf_free(p);
MLD6_STATS_INC(mld6.lenerr);
return;
}
/* Select our source address. */
if (!ip6_addr_isvalid(netif_ip6_addr_state(group->netif, 0))) {
/* This is a special case, when we are performing duplicate address detection.
* We must join the multicast group, but we don't have a valid address yet. */
src_addr = IP6_ADDR_ANY6;
} else {
/* Use link-local address as source address. */
src_addr = netif_ip6_addr(group->netif, 0);
}
/* MLD message header pointer. */
mld_hdr = (struct mld_header *)p->payload;
/* Set fields. */
mld_hdr->type = type;
mld_hdr->code = 0;
mld_hdr->chksum = 0;
mld_hdr->max_resp_delay = 0;
mld_hdr->reserved = 0;
ip6_addr_set(&(mld_hdr->multicast_address), &(group->group_address));
#if CHECKSUM_GEN_ICMP6
IF__NETIF_CHECKSUM_ENABLED(group->netif, NETIF_CHECKSUM_GEN_ICMP6) {
mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len,
src_addr, &(group->group_address));
}
#endif /* CHECKSUM_GEN_ICMP6 */
/* Add hop-by-hop headers options: router alert with MLD value. */
ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD);
/* Send the packet out. */
MLD6_STATS_INC(mld6.xmit);
ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address),
MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, group->netif);
pbuf_free(p);
}
#endif /* LWIP_IPV6 */

1846
components/lwip/core/ipv6/nd6.c Executable file

File diff suppressed because it is too large Load Diff

686
components/lwip/core/mem.c Executable file
View File

@@ -0,0 +1,686 @@
/**
* @file
* Dynamic memory manager
*
* This is a lightweight replacement for the standard C library malloc().
*
* If you want to use the standard C library malloc() instead, define
* MEM_LIBC_MALLOC to 1 in your lwipopts.h
*
* To let mem_malloc() use pools (prevents fragmentation and is much faster than
* a heap but might waste some memory), define MEM_USE_POOLS to 1, define
* MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
* of pools like this (more pools can be added between _START and _END):
*
* Define three pools with sizes 256, 512, and 1512 bytes
* LWIP_MALLOC_MEMPOOL_START
* LWIP_MALLOC_MEMPOOL(20, 256)
* LWIP_MALLOC_MEMPOOL(10, 512)
* LWIP_MALLOC_MEMPOOL(5, 1512)
* LWIP_MALLOC_MEMPOOL_END
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
* Simon Goldschmidt
*
*/
#include "lwip/opt.h"
#if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "lwip/err.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
#if MEM_USE_POOLS
#if MEMP_MEM_MALLOC
#error MEM_USE_POOLS and MEMP_MEM_MALLOC cannot be used together
#endif
/* lwIP head implemented with different sized pools */
/**
* Allocate memory: determine the smallest pool that is big enough
* to contain an element of 'size' and get an element from that pool.
*
* @param size the size in bytes of the memory needed
* @return a pointer to the allocated memory or NULL if the pool is empty
*/
void *
mem_malloc(mem_size_t size)
{
void *ret;
struct memp_malloc_helper *element;
memp_t poolnr;
mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
#if MEM_USE_POOLS_TRY_BIGGER_POOL
again:
#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
/* is this pool big enough to hold an element of the required size
plus a struct memp_malloc_helper that saves the pool this element came from? */
if (required_size <= memp_pools[poolnr]->size) {
break;
}
}
if (poolnr > MEMP_POOL_LAST) {
LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
return NULL;
}
element = (struct memp_malloc_helper*)memp_malloc(poolnr);
if (element == NULL) {
/* No need to DEBUGF or ASSERT: This error is already
taken care of in memp.c */
#if MEM_USE_POOLS_TRY_BIGGER_POOL
/** Try a bigger pool if this one is empty! */
if (poolnr < MEMP_POOL_LAST) {
poolnr++;
goto again;
}
#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
return NULL;
}
/* save the pool number this element came from */
element->poolnr = poolnr;
/* and return a pointer to the memory directly after the struct memp_malloc_helper */
ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
#if MEMP_OVERFLOW_CHECK
/* initialize unused memory */
element->size = size;
memset((u8_t*)ret + size, 0xcd, memp_pools[poolnr]->size - size);
#endif /* MEMP_OVERFLOW_CHECK */
return ret;
}
/**
* Free memory previously allocated by mem_malloc. Loads the pool number
* and calls memp_free with that pool number to put the element back into
* its pool
*
* @param rmem the memory element to free
*/
void
mem_free(void *rmem)
{
struct memp_malloc_helper *hmem;
LWIP_ASSERT("rmem != NULL", (rmem != NULL));
LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
/* get the original struct memp_malloc_helper */
hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
LWIP_ASSERT("hmem != NULL", (hmem != NULL));
LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
#if MEMP_OVERFLOW_CHECK
{
u16_t i;
LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
hmem->size <= memp_pools[hmem->poolnr]->size);
/* check that unused memory remained untouched */
for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
u8_t data = *((u8_t*)rmem + i);
LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
}
}
#endif /* MEMP_OVERFLOW_CHECK */
/* and put it in the pool we saved earlier */
memp_free(hmem->poolnr, hmem);
}
#else /* MEM_USE_POOLS */
/* lwIP replacement for your libc malloc() */
/**
* The heap is made up as a list of structs of this type.
* This does not have to be aligned since for getting its size,
* we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
*/
struct mem {
/** index (-> ram[next]) of the next struct */
mem_size_t next;
/** index (-> ram[prev]) of the previous struct */
mem_size_t prev;
/** 1: this area is used; 0: this area is unused */
u8_t used;
};
/** All allocated blocks will be MIN_SIZE bytes big, at least!
* MIN_SIZE can be overridden to suit your needs. Smaller values save space,
* larger values could prevent too small blocks to fragment the RAM too much. */
#ifndef MIN_SIZE
#define MIN_SIZE 12
#endif /* MIN_SIZE */
/* some alignment macros: we define them here for better source code layout */
#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
/** If you want to relocate the heap to external memory, simply define
* LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
* If so, make sure the memory at that location is big enough (see below on
* how that space is calculated). */
#ifndef LWIP_RAM_HEAP_POINTER
/** the heap. we need one struct mem at the end and some room for alignment */
u8_t ram_heap[MEM_SIZE_ALIGNED + (2U*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
#define LWIP_RAM_HEAP_POINTER ram_heap
#endif /* LWIP_RAM_HEAP_POINTER */
/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
static u8_t *ram;
/** the last entry, always unused! */
static struct mem *ram_end;
/** pointer to the lowest free block, this is used for faster search */
static struct mem *lfree;
/** concurrent access protection */
#if !NO_SYS
static sys_mutex_t mem_mutex;
#endif
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
static volatile u8_t mem_free_count;
/* Allow mem_free from other (e.g. interrupt) context */
#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
/* Protect the heap only by using a semaphore */
#define LWIP_MEM_FREE_DECL_PROTECT()
#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
/* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
#define LWIP_MEM_ALLOC_DECL_PROTECT()
#define LWIP_MEM_ALLOC_PROTECT()
#define LWIP_MEM_ALLOC_UNPROTECT()
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
/**
* "Plug holes" by combining adjacent empty struct mems.
* After this function is through, there should not exist
* one empty struct mem pointing to another empty struct mem.
*
* @param mem this points to a struct mem which just has been freed
* @internal this function is only called by mem_free() and mem_trim()
*
* This assumes access to the heap is protected by the calling function
* already.
*/
static void
plug_holes(struct mem *mem)
{
struct mem *nmem;
struct mem *pmem;
LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
/* plug hole forward */
LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
nmem = (struct mem *)(void *)&ram[mem->next];
if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
/* if mem->next is unused and not end of ram, combine mem and mem->next */
if (lfree == nmem) {
lfree = mem;
}
mem->next = nmem->next;
((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
}
/* plug hole backward */
pmem = (struct mem *)(void *)&ram[mem->prev];
if (pmem != mem && pmem->used == 0) {
/* if mem->prev is unused, combine mem and mem->prev */
if (lfree == mem) {
lfree = pmem;
}
pmem->next = mem->next;
((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
}
}
/**
* Zero the heap and initialize start, end and lowest-free
*/
void
mem_init(void)
{
struct mem *mem;
LWIP_ASSERT("Sanity check alignment",
(SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
/* align the heap */
ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
/* initialize the start of the heap */
mem = (struct mem *)(void *)ram;
mem->next = MEM_SIZE_ALIGNED;
mem->prev = 0;
mem->used = 0;
/* initialize the end of the heap */
ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
ram_end->used = 1;
ram_end->next = MEM_SIZE_ALIGNED;
ram_end->prev = MEM_SIZE_ALIGNED;
/* initialize the lowest-free pointer to the start of the heap */
lfree = (struct mem *)(void *)ram;
MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
if (sys_mutex_new(&mem_mutex) != ERR_OK) {
LWIP_ASSERT("failed to create mem_mutex", 0);
}
}
/**
* Put a struct mem back on the heap
*
* @param rmem is the data portion of a struct mem as returned by a previous
* call to mem_malloc()
*/
void
mem_free(void *rmem)
{
struct mem *mem;
LWIP_MEM_FREE_DECL_PROTECT();
if (rmem == NULL) {
LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
return;
}
LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
(u8_t *)rmem < (u8_t *)ram_end);
if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
SYS_ARCH_DECL_PROTECT(lev);
LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
/* protect mem stats from concurrent access */
SYS_ARCH_PROTECT(lev);
MEM_STATS_INC(illegal);
SYS_ARCH_UNPROTECT(lev);
return;
}
/* protect the heap from concurrent access */
LWIP_MEM_FREE_PROTECT();
/* Get the corresponding struct mem ... */
mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
/* ... which has to be in a used state ... */
LWIP_ASSERT("mem_free: mem->used", mem->used);
/* ... and is now unused. */
mem->used = 0;
if (mem < lfree) {
/* the newly freed struct is now the lowest */
lfree = mem;
}
MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
/* finally, see if prev or next are free also */
plug_holes(mem);
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mem_free_count = 1;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
LWIP_MEM_FREE_UNPROTECT();
}
/**
* Shrink memory returned by mem_malloc().
*
* @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
* @param newsize required size after shrinking (needs to be smaller than or
* equal to the previous size)
* @return for compatibility reasons: is always == rmem, at the moment
* or NULL if newsize is > old size, in which case rmem is NOT touched
* or freed!
*/
void *
mem_trim(void *rmem, mem_size_t newsize)
{
mem_size_t size;
mem_size_t ptr, ptr2;
struct mem *mem, *mem2;
/* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
LWIP_MEM_FREE_DECL_PROTECT();
/* Expand the size of the allocated memory region so that we can
adjust for alignment. */
newsize = LWIP_MEM_ALIGN_SIZE(newsize);
if (newsize < MIN_SIZE_ALIGNED) {
/* every data block must be at least MIN_SIZE_ALIGNED long */
newsize = MIN_SIZE_ALIGNED;
}
if (newsize > MEM_SIZE_ALIGNED) {
return NULL;
}
LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
(u8_t *)rmem < (u8_t *)ram_end);
if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
SYS_ARCH_DECL_PROTECT(lev);
LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
/* protect mem stats from concurrent access */
SYS_ARCH_PROTECT(lev);
MEM_STATS_INC(illegal);
SYS_ARCH_UNPROTECT(lev);
return rmem;
}
/* Get the corresponding struct mem ... */
mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
/* ... and its offset pointer */
ptr = (mem_size_t)((u8_t *)mem - ram);
size = mem->next - ptr - SIZEOF_STRUCT_MEM;
LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
if (newsize > size) {
/* not supported */
return NULL;
}
if (newsize == size) {
/* No change in size, simply return */
return rmem;
}
/* protect the heap from concurrent access */
LWIP_MEM_FREE_PROTECT();
mem2 = (struct mem *)(void *)&ram[mem->next];
if (mem2->used == 0) {
/* The next struct is unused, we can simply move it at little */
mem_size_t next;
/* remember the old next pointer */
next = mem2->next;
/* create new struct mem which is moved directly after the shrinked mem */
ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
if (lfree == mem2) {
lfree = (struct mem *)(void *)&ram[ptr2];
}
mem2 = (struct mem *)(void *)&ram[ptr2];
mem2->used = 0;
/* restore the next pointer */
mem2->next = next;
/* link it back to mem */
mem2->prev = ptr;
/* link mem to it */
mem->next = ptr2;
/* last thing to restore linked list: as we have moved mem2,
* let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
* the end of the heap */
if (mem2->next != MEM_SIZE_ALIGNED) {
((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
}
MEM_STATS_DEC_USED(used, (size - newsize));
/* no need to plug holes, we've already done that */
} else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
/* Next struct is used but there's room for another struct mem with
* at least MIN_SIZE_ALIGNED of data.
* Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
* ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
* @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
* region that couldn't hold data, but when mem->next gets freed,
* the 2 regions would be combined, resulting in more free memory */
ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mem2 = (struct mem *)(void *)&ram[ptr2];
if (mem2 < lfree) {
lfree = mem2;
}
mem2->used = 0;
mem2->next = mem->next;
mem2->prev = ptr;
mem->next = ptr2;
if (mem2->next != MEM_SIZE_ALIGNED) {
((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
}
MEM_STATS_DEC_USED(used, (size - newsize));
/* the original mem->next is used, so no need to plug holes! */
}
/* else {
next struct mem is used but size between mem and mem2 is not big enough
to create another struct mem
-> don't do anyhting.
-> the remaining space stays unused since it is too small
} */
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mem_free_count = 1;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
LWIP_MEM_FREE_UNPROTECT();
return rmem;
}
/**
* Adam's mem_malloc() plus solution for bug #17922
* Allocate a block of memory with a minimum of 'size' bytes.
*
* @param size is the minimum size of the requested block in bytes.
* @return pointer to allocated memory or NULL if no free memory was found.
*
* Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
*/
void *
mem_malloc(mem_size_t size)
{
mem_size_t ptr, ptr2;
struct mem *mem, *mem2;
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
u8_t local_mem_free_count = 0;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
LWIP_MEM_ALLOC_DECL_PROTECT();
if (size == 0) {
return NULL;
}
/* Expand the size of the allocated memory region so that we can
adjust for alignment. */
size = LWIP_MEM_ALIGN_SIZE(size);
if (size < MIN_SIZE_ALIGNED) {
/* every data block must be at least MIN_SIZE_ALIGNED long */
size = MIN_SIZE_ALIGNED;
}
if (size > MEM_SIZE_ALIGNED) {
return NULL;
}
/* protect the heap from concurrent access */
sys_mutex_lock(&mem_mutex);
LWIP_MEM_ALLOC_PROTECT();
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
/* run as long as a mem_free disturbed mem_malloc or mem_trim */
do {
local_mem_free_count = 0;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
/* Scan through the heap searching for a free block that is big enough,
* beginning with the lowest free block.
*/
for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
ptr = ((struct mem *)(void *)&ram[ptr])->next) {
mem = (struct mem *)(void *)&ram[ptr];
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mem_free_count = 0;
LWIP_MEM_ALLOC_UNPROTECT();
/* allow mem_free or mem_trim to run */
LWIP_MEM_ALLOC_PROTECT();
if (mem_free_count != 0) {
/* If mem_free or mem_trim have run, we have to restart since they
could have altered our current struct mem. */
local_mem_free_count = 1;
break;
}
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
if ((!mem->used) &&
(mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
/* mem is not used and at least perfect fit is possible:
* mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
/* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
* at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
* -> split large block, create empty remainder,
* remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
* mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
* struct mem would fit in but no data between mem2 and mem2->next
* @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
* region that couldn't hold data, but when mem->next gets freed,
* the 2 regions would be combined, resulting in more free memory
*/
ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
/* create mem2 struct */
mem2 = (struct mem *)(void *)&ram[ptr2];
mem2->used = 0;
mem2->next = mem->next;
mem2->prev = ptr;
/* and insert it between mem and mem->next */
mem->next = ptr2;
mem->used = 1;
if (mem2->next != MEM_SIZE_ALIGNED) {
((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
}
MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
} else {
/* (a mem2 struct does no fit into the user data space of mem and mem->next will always
* be used at this point: if not we have 2 unused structs in a row, plug_holes should have
* take care of this).
* -> near fit or exact fit: do not split, no mem2 creation
* also can't move mem->next directly behind mem, since mem->next
* will always be used at this point!
*/
mem->used = 1;
MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
}
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mem_malloc_adjust_lfree:
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
if (mem == lfree) {
struct mem *cur = lfree;
/* Find next free block after mem and update lowest free pointer */
while (cur->used && cur != ram_end) {
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mem_free_count = 0;
LWIP_MEM_ALLOC_UNPROTECT();
/* prevent high interrupt latency... */
LWIP_MEM_ALLOC_PROTECT();
if (mem_free_count != 0) {
/* If mem_free or mem_trim have run, we have to restart since they
could have altered our current struct mem or lfree. */
goto mem_malloc_adjust_lfree;
}
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
cur = (struct mem *)(void *)&ram[cur->next];
}
lfree = cur;
LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
}
LWIP_MEM_ALLOC_UNPROTECT();
sys_mutex_unlock(&mem_mutex);
LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
(mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
LWIP_ASSERT("mem_malloc: sanity check alignment",
(((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
return (u8_t *)mem + SIZEOF_STRUCT_MEM;
}
}
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
/* if we got interrupted by a mem_free, try again */
} while (local_mem_free_count != 0);
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
MEM_STATS_INC(err);
LWIP_MEM_ALLOC_UNPROTECT();
sys_mutex_unlock(&mem_mutex);
return NULL;
}
#endif /* MEM_USE_POOLS */
/**
* Contiguously allocates enough space for count objects that are size bytes
* of memory each and returns a pointer to the allocated memory.
*
* The allocated memory is filled with bytes of value zero.
*
* @param count number of objects to allocate
* @param size size of the objects to allocate
* @return pointer to allocated memory / NULL pointer if there is an error
*/
void *mem_calloc(mem_size_t count, mem_size_t size)
{
void *p;
/* allocate 'count' objects of size 'size' */
p = mem_malloc(count * size);
if (p) {
/* zero the memory */
memset(p, 0, (size_t)count * (size_t)size);
}
return p;
}
#endif /* !MEM_LIBC_MALLOC */

436
components/lwip/core/memp.c Executable file
View File

@@ -0,0 +1,436 @@
/**
* @file
* Dynamic pool memory manager
*
* lwIP has dedicated pools for many structures (netconn, protocol control blocks,
* packet buffers, ...). All these pools are managed here.
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#include "lwip/memp.h"
#include "lwip/pbuf.h"
#include "lwip/udp.h"
#include "lwip/raw.h"
#include "lwip/igmp.h"
#include "lwip/api.h"
#include "lwip/priv/api_msg.h"
#include "lwip/sockets.h"
#include "lwip/sys.h"
#include "lwip/timers.h"
#include "lwip/stats.h"
#include "netif/etharp.h"
#include "lwip/ip_frag.h"
#include "lwip/dns.h"
#include "lwip/netdb.h"
#include "netif/ppp/ppp.h"
#include "netif/ppp/pppos.h"
#include "netif/ppp/pppoe.h"
#include "netif/ppp/pppol2tp.h"
#include "lwip/nd6.h"
#include "lwip/ip6_frag.h"
#include "lwip/mld6.h"
#include "lwip/tcp.h"
#include "lwip/tcpip.h"
#include "lwip/priv/tcp_priv.h"
#include "lwip/priv/tcpip_priv.h"
#include "lwip/netifapi.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc)
#include "lwip/priv/memp_std.h"
const struct memp_desc* const memp_pools[MEMP_MAX] = {
#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name,
#include "lwip/priv/memp_std.h"
};
#if !MEMP_MEM_MALLOC /* don't build if not configured for use in lwipopts.h */
#if MEMP_SANITY_CHECK
/**
* Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm".
*/
static int
memp_sanity(const struct memp_desc *desc)
{
struct memp *t, *h;
t = *desc->tab;
if (t != NULL) {
for (h = t->next; (t != NULL) && (h != NULL); t = t->next,
h = ((h->next != NULL) ? h->next->next : NULL)) {
if (t == h) {
return 0;
}
}
}
return 1;
}
#endif /* MEMP_SANITY_CHECK*/
#if MEMP_OVERFLOW_CHECK
/**
* Check if a memp element was victim of an overflow
* (e.g. the restricted area after it has been altered)
*
* @param p the memp element to check
* @param memp_type the pool p comes from
*/
static void
memp_overflow_check_element_overflow(struct memp *p, const struct memp_desc *desc)
{
u16_t k;
u8_t *m;
#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0
m = (u8_t*)p + MEMP_SIZE + desc->size;
for (k = 0; k < MEMP_SANITY_REGION_AFTER_ALIGNED; k++) {
if (m[k] != 0xcd) {
char errstr[128] = "detected memp overflow in pool ";
strcat(errstr, desc->desc);
LWIP_ASSERT(errstr, 0);
}
}
#endif
}
/**
* Check if a memp element was victim of an underflow
* (e.g. the restricted area before it has been altered)
*
* @param p the memp element to check
* @param memp_type the pool p comes from
*/
static void
memp_overflow_check_element_underflow(struct memp *p, const struct memp_desc *desc)
{
u16_t k;
u8_t *m;
#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0
m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED;
for (k = 0; k < MEMP_SANITY_REGION_BEFORE_ALIGNED; k++) {
if (m[k] != 0xcd) {
char errstr[128] = "detected memp underflow in pool ";
strcat(errstr, desc->desc);
LWIP_ASSERT(errstr, 0);
}
}
#endif
}
/**
* Do an overflow check for all elements in every pool.
*
* @see memp_overflow_check_element for a description of the check
*/
static void
memp_overflow_check_all(void)
{
u16_t i, j;
struct memp *p;
for (i = 0; i < MEMP_MAX; ++i) {
p = (struct memp *)(size_t)(memp_pools[i]->base);
for (j = 0; j < memp_pools[i]->num; ++j) {
memp_overflow_check_element_overflow(p, memp_pools[i]);
memp_overflow_check_element_underflow(p, memp_pools[i]);
p = (struct memp*)(size_t)((u8_t*)p + MEMP_SIZE + memp_pools[i]->size + MEMP_SANITY_REGION_AFTER_ALIGNED);
}
}
}
/**
* Initialize the restricted areas of all memp elements in every pool.
*/
static void
memp_overflow_init(const struct memp_desc *desc)
{
u16_t i;
struct memp *p;
u8_t *m;
p = (struct memp *)(size_t)(desc->base);
for (i = 0; i < desc->num; ++i) {
#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0
m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED;
memset(m, 0xcd, MEMP_SANITY_REGION_BEFORE_ALIGNED);
#endif
#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0
m = (u8_t*)p + MEMP_SIZE + desc->size;
memset(m, 0xcd, MEMP_SANITY_REGION_AFTER_ALIGNED);
#endif
p = (struct memp*)(size_t)((u8_t*)p + MEMP_SIZE + desc->size + MEMP_SANITY_REGION_AFTER_ALIGNED);
}
}
#endif /* MEMP_OVERFLOW_CHECK */
void
memp_init_pool(const struct memp_desc *desc)
{
int i;
struct memp *memp;
*desc->tab = NULL;
memp = (struct memp*)LWIP_MEM_ALIGN(desc->base);
/* create a linked list of memp elements */
for (i = 0; i < desc->num; ++i) {
memp->next = *desc->tab;
*desc->tab = memp;
memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size
#if MEMP_OVERFLOW_CHECK
+ MEMP_SANITY_REGION_AFTER_ALIGNED
#endif
);
}
#if MEMP_OVERFLOW_CHECK
memp_overflow_init(desc);
#endif /* MEMP_OVERFLOW_CHECK */
}
/**
* Initialize this module.
*
* Carves out memp_memory into linked lists for each pool-type.
*/
void
memp_init(void)
{
u16_t i;
for (i = 0; i < MEMP_MAX; ++i) {
MEMP_STATS_AVAIL(used, i, 0);
MEMP_STATS_AVAIL(max, i, 0);
MEMP_STATS_AVAIL(err, i, 0);
MEMP_STATS_AVAIL(avail, i, memp_pools[i]->num);
}
/* for every pool: */
for (i = 0; i < MEMP_MAX; ++i) {
memp_init_pool(memp_pools[i]);
}
#if MEMP_OVERFLOW_CHECK
/* check everything a first time to see if it worked */
memp_overflow_check_all();
#endif /* MEMP_OVERFLOW_CHECK */
}
void *
#if !MEMP_OVERFLOW_CHECK
memp_malloc_pool(const struct memp_desc *desc)
#else
memp_malloc_pool_fn(const struct memp_desc *desc, const char* file, const int line)
#endif
{
struct memp *memp;
SYS_ARCH_DECL_PROTECT(old_level);
SYS_ARCH_PROTECT(old_level);
memp = *desc->tab;
#if MEMP_OVERFLOW_CHECK == 1
memp_overflow_check_element_overflow(memp, desc);
memp_overflow_check_element_underflow(memp, desc);
#endif /* MEMP_OVERFLOW_CHECK */
if (memp != NULL) {
*desc->tab = memp->next;
#if MEMP_OVERFLOW_CHECK
memp->next = NULL;
memp->file = file;
memp->line = line;
#endif /* MEMP_OVERFLOW_CHECK */
LWIP_ASSERT("memp_malloc: memp properly aligned",
((mem_ptr_t)memp % MEM_ALIGNMENT) == 0);
memp = (struct memp*)(void *)((u8_t*)memp + MEMP_SIZE);
}
SYS_ARCH_UNPROTECT(old_level);
return memp;
}
/**
* Get an element from a specific pool.
*
* @param type the pool to get an element from
*
* the debug version has two more parameters:
* @param file file name calling this function
* @param line number of line where this function is called
*
* @return a pointer to the allocated memory or a NULL pointer on error
*/
void *
#if !MEMP_OVERFLOW_CHECK
memp_malloc(memp_t type)
#else
memp_malloc_fn(memp_t type, const char* file, const int line)
#endif
{
void *memp;
SYS_ARCH_DECL_PROTECT(old_level);
LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;);
SYS_ARCH_PROTECT(old_level);
#if MEMP_OVERFLOW_CHECK >= 2
memp_overflow_check_all();
#endif /* MEMP_OVERFLOW_CHECK >= 2 */
#if !MEMP_OVERFLOW_CHECK
memp = memp_malloc_pool(memp_pools[type]);
#else
memp = memp_malloc_pool_fn(memp_pools[type], file, line);
#endif
if (memp != NULL) {
MEMP_STATS_INC(used, type);
if(MEMP_STATS_GET(used, type) > MEMP_STATS_GET(max, type)) {
MEMP_STATS_AVAIL(max, type, MEMP_STATS_GET(used, type));
}
} else {
LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", memp_pools[type]->desc));
MEMP_STATS_INC(err, type);
}
SYS_ARCH_UNPROTECT(old_level);
return memp;
}
static void
#ifdef LWIP_HOOK_MEMP_AVAILABLE
do_memp_free_pool(const struct memp_desc* desc, void *mem, struct memp **old_first)
#else
do_memp_free_pool(const struct memp_desc* desc, void *mem)
#endif
{
struct memp *memp;
SYS_ARCH_DECL_PROTECT(old_level);
LWIP_ASSERT("memp_free: mem properly aligned",
((mem_ptr_t)mem % MEM_ALIGNMENT) == 0);
memp = (struct memp *)(void *)((u8_t*)mem - MEMP_SIZE);
SYS_ARCH_PROTECT(old_level);
#if MEMP_OVERFLOW_CHECK == 1
memp_overflow_check_element_overflow(memp, desc);
memp_overflow_check_element_underflow(memp, desc);
#endif /* MEMP_OVERFLOW_CHECK */
memp->next = *desc->tab;
#ifdef LWIP_HOOK_MEMP_AVAILABLE
if (old_first)
*old_first = *desc->tab;
#endif
*desc->tab = memp;
#if MEMP_SANITY_CHECK
LWIP_ASSERT("memp sanity", memp_sanity(desc));
#endif /* MEMP_SANITY_CHECK */
SYS_ARCH_UNPROTECT(old_level);
}
void
memp_free_pool(const struct memp_desc* desc, void *mem)
{
if ((desc == NULL) || (mem == NULL)) {
return;
}
#ifdef LWIP_HOOK_MEMP_AVAILABLE
do_memp_free_pool(desc, mem, NULL);
#else
do_memp_free_pool(desc, mem);
#endif
}
/**
* Put an element back into its pool.
*
* @param type the pool where to put mem
* @param mem the memp element to free
*/
void
memp_free(memp_t type, void *mem)
{
#ifdef LWIP_HOOK_MEMP_AVAILABLE
struct memp *old_first;
#endif
SYS_ARCH_DECL_PROTECT(old_level);
LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;);
SYS_ARCH_PROTECT(old_level);
#if MEMP_OVERFLOW_CHECK >= 2
memp_overflow_check_all();
#endif /* MEMP_OVERFLOW_CHECK >= 2 */
MEMP_STATS_DEC(used, type);
#ifdef LWIP_HOOK_MEMP_AVAILABLE
do_memp_free_pool(memp_pools[type], mem, &old_first);
#else
do_memp_free_pool(memp_pools[type], mem);
#endif
SYS_ARCH_UNPROTECT(old_level);
#ifdef LWIP_HOOK_MEMP_AVAILABLE
if (old_first == NULL) {
LWIP_HOOK_MEMP_AVAILABLE(type);
}
#endif
}
#endif /* MEMP_MEM_MALLOC */

1090
components/lwip/core/netif.c Executable file

File diff suppressed because it is too large Load Diff

1396
components/lwip/core/pbuf.c Executable file

File diff suppressed because it is too large Load Diff

483
components/lwip/core/raw.c Executable file
View File

@@ -0,0 +1,483 @@
/**
* @file
* Implementation of raw protocol PCBs for low-level handling of
* different types of protocols besides (or overriding) those
* already available in lwIP.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/memp.h"
#include "lwip/ip_addr.h"
#include "lwip/netif.h"
#include "lwip/raw.h"
#include "lwip/stats.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/inet_chksum.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
/** The list of RAW PCBs */
static struct raw_pcb *raw_pcbs;
static u8_t
raw_input_match(struct raw_pcb *pcb, u8_t broadcast)
{
LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */
/* Dual-stack: PCBs listening to any IP type also listen to any IP address */
if(IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV
if((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) {
return 0;
}
#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */
return 1;
}
/* Only need to check PCB if incoming IP version matches PCB IP version */
if(IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) {
#if LWIP_IPV4
/* Special case: IPv4 broadcast: receive all broadcasts
* Note: broadcast variable can only be 1 if it is an IPv4 broadcast */
if(broadcast != 0) {
#if IP_SOF_BROADCAST_RECV
if(ip_get_option(pcb, SOF_BROADCAST))
#endif /* IP_SOF_BROADCAST_RECV */
{
if(ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) {
return 1;
}
}
} else
#endif /* LWIP_IPV4 */
/* Handle IPv4 and IPv6: catch all or exact match */
if(ip_addr_isany(&pcb->local_ip) ||
ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) {
return 1;
}
}
return 0;
}
/**
* Determine if in incoming IP packet is covered by a RAW PCB
* and if so, pass it to a user-provided receive callback function.
*
* Given an incoming IP datagram (as a chain of pbufs) this function
* finds a corresponding RAW PCB and calls the corresponding receive
* callback function.
*
* @param p pbuf to be demultiplexed to a RAW PCB.
* @param inp network interface on which the datagram was received.
* @return - 1 if the packet has been eaten by a RAW PCB receive
* callback function. The caller MAY NOT not reference the
* packet any longer, and MAY NOT call pbuf_free().
* @return - 0 if packet is not eaten (pbuf is still referenced by the
* caller).
*
*/
u8_t
raw_input(struct pbuf *p, struct netif *inp)
{
struct raw_pcb *pcb, *prev;
s16_t proto;
u8_t eaten = 0;
u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif());
LWIP_UNUSED_ARG(inp);
#if LWIP_IPV6
#if LWIP_IPV4
if (IP_HDR_GET_VERSION(p->payload) == 6)
#endif /* LWIP_IPV4 */
{
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload;
proto = IP6H_NEXTH(ip6hdr);
}
#if LWIP_IPV4
else
#endif /* LWIP_IPV4 */
#endif /* LWIP_IPV6 */
#if LWIP_IPV4
{
proto = IPH_PROTO((struct ip_hdr *)p->payload);
}
#endif /* LWIP_IPV4 */
prev = NULL;
pcb = raw_pcbs;
/* loop through all raw pcbs until the packet is eaten by one */
/* this allows multiple pcbs to match against the packet by design */
while ((eaten == 0) && (pcb != NULL)) {
if ((pcb->protocol == proto) && raw_input_match(pcb, broadcast)) {
/* receive callback function available? */
if (pcb->recv != NULL) {
#ifndef LWIP_NOASSERT
void* old_payload = p->payload;
#endif
/* the receive callback function did not eat the packet? */
eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr());
if (eaten != 0) {
/* receive function ate the packet */
p = NULL;
eaten = 1;
if (prev != NULL) {
/* move the pcb to the front of raw_pcbs so that is
found faster next time */
prev->next = pcb->next;
pcb->next = raw_pcbs;
raw_pcbs = pcb;
}
} else {
/* sanity-check that the receive callback did not alter the pbuf */
LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet",
p->payload == old_payload);
}
}
/* no receive callback function was set for this raw PCB */
}
/* drop the packet */
prev = pcb;
pcb = pcb->next;
}
return eaten;
}
/**
* Bind a RAW PCB.
*
* @param pcb RAW PCB to be bound with a local address ipaddr.
* @param ipaddr local IP address to bind with. Use IP_ADDR_ANY to
* bind to all local interfaces.
*
* @return lwIP error code.
* - ERR_OK. Successful. No error occurred.
* - ERR_USE. The specified IP address is already bound to by
* another RAW PCB.
*
* @see raw_disconnect()
*/
err_t
raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr)
{
if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) {
return ERR_VAL;
}
ip_addr_set_ipaddr(&pcb->local_ip, ipaddr);
return ERR_OK;
}
/**
* Connect an RAW PCB. This function is required by upper layers
* of lwip. Using the raw api you could use raw_sendto() instead
*
* This will associate the RAW PCB with the remote address.
*
* @param pcb RAW PCB to be connected with remote address ipaddr and port.
* @param ipaddr remote IP address to connect with.
*
* @return lwIP error code
*
* @see raw_disconnect() and raw_sendto()
*/
err_t
raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr)
{
if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) {
return ERR_VAL;
}
ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr);
return ERR_OK;
}
/**
* Set the callback function for received packets that match the
* raw PCB's protocol and binding.
*
* The callback function MUST either
* - eat the packet by calling pbuf_free() and returning non-zero. The
* packet will not be passed to other raw PCBs or other protocol layers.
* - not free the packet, and return zero. The packet will be matched
* against further PCBs and/or forwarded to another protocol layers.
*
* @return non-zero if the packet was free()d, zero if the packet remains
* available for others.
*/
void
raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg)
{
/* remember recv() callback and user data */
pcb->recv = recv;
pcb->recv_arg = recv_arg;
}
/**
* Send the raw IP packet to the given address. Note that actually you cannot
* modify the IP headers (this is inconsistent with the receive callback where
* you actually get the IP headers), you can only specify the IP payload here.
* It requires some more changes in lwIP. (there will be a raw_send() function
* then.)
*
* @param pcb the raw pcb which to send
* @param p the IP payload to send
* @param ipaddr the destination address of the IP packet
*
*/
err_t
raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr)
{
err_t err;
struct netif *netif;
const ip_addr_t *src_ip;
struct pbuf *q; /* q will be sent down the stack */
s16_t header_size;
const ip_addr_t *dst_ip = ipaddr;
if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) {
return ERR_VAL;
}
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n"));
header_size = (
#if LWIP_IPV4 && LWIP_IPV6
IP_IS_V6(ipaddr) ? IP6_HLEN : IP_HLEN);
#elif LWIP_IPV4
IP_HLEN);
#else
IP6_HLEN);
#endif
/* not enough space to add an IP header to first pbuf in given p chain? */
if (pbuf_header(p, header_size)) {
/* allocate header in new pbuf */
q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM);
/* new header pbuf could not be allocated? */
if (q == NULL) {
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n"));
return ERR_MEM;
}
if (p->tot_len != 0) {
/* chain header q in front of given pbuf p */
pbuf_chain(q, p);
}
/* { first pbuf q points to header pbuf } */
LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p));
} else {
/* first pbuf q equals given pbuf */
q = p;
if (pbuf_header(q, -header_size)) {
LWIP_ASSERT("Can't restore header we just removed!", 0);
return ERR_MEM;
}
}
netif = ip_route(&pcb->local_ip, dst_ip);
if (netif == NULL) {
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to "));
ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, dst_ip);
/* free any temporary header pbuf allocated by pbuf_header() */
if (q != p) {
pbuf_free(q);
}
return ERR_RTE;
}
#if IP_SOF_BROADCAST
if (!IP_IS_V6(ipaddr))
{
/* broadcast filter? */
if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(ipaddr, netif)) {
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb));
/* free any temporary header pbuf allocated by pbuf_header() */
if (q != p) {
pbuf_free(q);
}
return ERR_VAL;
}
}
#endif /* IP_SOF_BROADCAST */
if (ip_addr_isany(&pcb->local_ip)) {
/* use outgoing network interface IP address as source address */
src_ip = ip_netif_get_local_ip(netif, dst_ip);
#if LWIP_IPV6
if (src_ip == NULL) {
if (q != p) {
pbuf_free(q);
}
return ERR_RTE;
}
#endif /* LWIP_IPV6 */
} else {
/* use RAW PCB local IP address as source address */
src_ip = &pcb->local_ip;
}
#if LWIP_IPV6
/* If requested, based on the IPV6_CHECKSUM socket option per RFC3542,
compute the checksum and update the checksum in the payload. */
if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) {
u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip));
LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2));
SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t));
}
#endif
NETIF_SET_HWADDRHINT(netif, &pcb->addr_hint);
err = ip_output_if(q, src_ip, dst_ip, pcb->ttl, pcb->tos, pcb->protocol, netif);
NETIF_SET_HWADDRHINT(netif, NULL);
/* did we chain a header earlier? */
if (q != p) {
/* free the header */
pbuf_free(q);
}
return err;
}
/**
* Send the raw IP packet to the address given by raw_connect()
*
* @param pcb the raw pcb which to send
* @param p the IP payload to send
*
*/
err_t
raw_send(struct raw_pcb *pcb, struct pbuf *p)
{
return raw_sendto(pcb, p, &pcb->remote_ip);
}
/**
* Remove an RAW PCB.
*
* @param pcb RAW PCB to be removed. The PCB is removed from the list of
* RAW PCB's and the data structure is freed from memory.
*
* @see raw_new()
*/
void
raw_remove(struct raw_pcb *pcb)
{
struct raw_pcb *pcb2;
/* pcb to be removed is first in list? */
if (raw_pcbs == pcb) {
/* make list start at 2nd pcb */
raw_pcbs = raw_pcbs->next;
/* pcb not 1st in list */
} else {
for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) {
/* find pcb in raw_pcbs list */
if (pcb2->next != NULL && pcb2->next == pcb) {
/* remove pcb from list */
pcb2->next = pcb->next;
break;
}
}
}
memp_free(MEMP_RAW_PCB, pcb);
}
/**
* Create a RAW PCB.
*
* @return The RAW PCB which was created. NULL if the PCB data structure
* could not be allocated.
*
* @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP)
*
* @see raw_remove()
*/
struct raw_pcb *
raw_new(u8_t proto)
{
struct raw_pcb *pcb;
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n"));
pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB);
/* could allocate RAW PCB? */
if (pcb != NULL) {
/* initialize PCB to all zeroes */
memset(pcb, 0, sizeof(struct raw_pcb));
pcb->protocol = proto;
pcb->ttl = RAW_TTL;
pcb->next = raw_pcbs;
raw_pcbs = pcb;
}
return pcb;
}
/**
* Create a RAW PCB for IPv6.
*
* @return The RAW PCB which was created. NULL if the PCB data structure
* could not be allocated.
*
* @param type IP address type, see IPADDR_TYPE_XX definitions.
* @param proto the protocol number (next header) of the IPv6 packet payload
* (e.g. IP6_NEXTH_ICMP6)
*
* @see raw_remove()
*/
struct raw_pcb *
raw_new_ip_type(u8_t type, u8_t proto)
{
struct raw_pcb *pcb;
pcb = raw_new(proto);
#if LWIP_IPV4 && LWIP_IPV6
if(pcb != NULL) {
IP_SET_TYPE_VAL(pcb->local_ip, type);
IP_SET_TYPE_VAL(pcb->remote_ip, type);
}
#else /* LWIP_IPV4 && LWIP_IPV6 */
LWIP_UNUSED_ARG(type);
#endif /* LWIP_IPV4 && LWIP_IPV6 */
return pcb;
}
#endif /* LWIP_RAW */

188
components/lwip/core/stats.c Executable file
View File

@@ -0,0 +1,188 @@
/**
* @file
* Statistics module
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/stats.h"
#include "lwip/mem.h"
#include "lwip/debug.h"
#include <string.h>
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
struct stats_ lwip_stats;
#if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY
#if MEMP_STATS
static const char * memp_names[] = {
#define LWIP_MEMPOOL(name,num,size,desc) desc,
#include "lwip/priv/memp_std.h"
};
#endif /* MEMP_STATS */
#endif /* LWIP_DEBUG || LWIP_STATS_DISPLAY */
void
stats_init(void)
{
#ifdef LWIP_DEBUG
#if MEMP_STATS
int i;
for (i = 0; i < MEMP_MAX; i++) {
lwip_stats.memp[i].name = memp_names[i];
}
#endif /* MEMP_STATS */
#if MEM_STATS
lwip_stats.mem.name = "MEM";
#endif /* MEM_STATS */
#endif /* LWIP_DEBUG */
}
#if LWIP_STATS_DISPLAY
void
stats_display_proto(struct stats_proto *proto, const char *name)
{
LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit));
LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv));
LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw));
LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop));
LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr));
LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr));
LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr));
LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr));
LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr));
LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr));
LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err));
LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit));
}
#if IGMP_STATS || MLD6_STATS
void
stats_display_igmp(struct stats_igmp *igmp, const char *name)
{
LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit));
LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv));
LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop));
LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr));
LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr));
LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr));
LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr));
LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1));
LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group));
LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general));
LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report));
LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join));
LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave));
LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n\t", igmp->tx_report));
}
#endif /* IGMP_STATS || MLD6_STATS */
#if MEM_STATS || MEMP_STATS
void
stats_display_mem(struct stats_mem *mem, const char *name)
{
LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name));
LWIP_PLATFORM_DIAG(("avail: %"U32_F"\n\t", (u32_t)mem->avail));
LWIP_PLATFORM_DIAG(("used: %"U32_F"\n\t", (u32_t)mem->used));
LWIP_PLATFORM_DIAG(("max: %"U32_F"\n\t", (u32_t)mem->max));
LWIP_PLATFORM_DIAG(("err: %"U32_F"\n", (u32_t)mem->err));
}
#if MEMP_STATS
void
stats_display_memp(struct stats_mem *mem, int index)
{
if (index < MEMP_MAX) {
stats_display_mem(mem, memp_names[index]);
}
}
#endif /* MEMP_STATS */
#endif /* MEM_STATS || MEMP_STATS */
#if SYS_STATS
void
stats_display_sys(struct stats_sys *sys)
{
LWIP_PLATFORM_DIAG(("\nSYS\n\t"));
LWIP_PLATFORM_DIAG(("sem.used: %"U32_F"\n\t", (u32_t)sys->sem.used));
LWIP_PLATFORM_DIAG(("sem.max: %"U32_F"\n\t", (u32_t)sys->sem.max));
LWIP_PLATFORM_DIAG(("sem.err: %"U32_F"\n\t", (u32_t)sys->sem.err));
LWIP_PLATFORM_DIAG(("mutex.used: %"U32_F"\n\t", (u32_t)sys->mutex.used));
LWIP_PLATFORM_DIAG(("mutex.max: %"U32_F"\n\t", (u32_t)sys->mutex.max));
LWIP_PLATFORM_DIAG(("mutex.err: %"U32_F"\n\t", (u32_t)sys->mutex.err));
LWIP_PLATFORM_DIAG(("mbox.used: %"U32_F"\n\t", (u32_t)sys->mbox.used));
LWIP_PLATFORM_DIAG(("mbox.max: %"U32_F"\n\t", (u32_t)sys->mbox.max));
LWIP_PLATFORM_DIAG(("mbox.err: %"U32_F"\n\t", (u32_t)sys->mbox.err));
}
#endif /* SYS_STATS */
void
stats_display(void)
{
s16_t i;
LINK_STATS_DISPLAY();
ETHARP_STATS_DISPLAY();
IPFRAG_STATS_DISPLAY();
IP6_FRAG_STATS_DISPLAY();
IP_STATS_DISPLAY();
ND6_STATS_DISPLAY();
IP6_STATS_DISPLAY();
IGMP_STATS_DISPLAY();
MLD6_STATS_DISPLAY();
ICMP_STATS_DISPLAY();
ICMP6_STATS_DISPLAY();
UDP_STATS_DISPLAY();
TCP_STATS_DISPLAY();
MEM_STATS_DISPLAY();
for (i = 0; i < MEMP_MAX; i++) {
MEMP_STATS_DISPLAY(i);
}
SYS_STATS_DISPLAY();
}
#endif /* LWIP_STATS_DISPLAY */
#endif /* LWIP_STATS */

68
components/lwip/core/sys.c Executable file
View File

@@ -0,0 +1,68 @@
/**
* @file
* lwIP Operating System abstraction
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#include "lwip/sys.h"
/* Most of the functions defined in sys.h must be implemented in the
* architecture-dependent file sys_arch.c */
#if !NO_SYS
#ifndef sys_msleep
/**
* Sleep for some ms. Timeouts are NOT processed while sleeping.
*
* @param ms number of milliseconds to sleep
*/
void
sys_msleep(u32_t ms)
{
if (ms > 0) {
sys_sem_t delaysem;
err_t err = sys_sem_new(&delaysem, 0);
if (err == ERR_OK) {
sys_arch_sem_wait(&delaysem, ms);
sys_sem_free(&delaysem);
}
}
}
#endif /* sys_msleep */
#endif /* !NO_SYS */

2155
components/lwip/core/tcp.c Executable file

File diff suppressed because it is too large Load Diff

1802
components/lwip/core/tcp_in.c Executable file

File diff suppressed because it is too large Load Diff

1614
components/lwip/core/tcp_out.c Executable file

File diff suppressed because it is too large Load Diff

614
components/lwip/core/timers.c Executable file
View File

@@ -0,0 +1,614 @@
/**
* @file
* Stack-internal timers implementation.
* This file includes timer callbacks for stack-internal timers as well as
* functions to set up or stop timers and check for expired timers.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
* Simon Goldschmidt
*
*/
#include "lwip/opt.h"
#include "lwip/timers.h"
#include "lwip/priv/tcp_priv.h"
#if LWIP_TIMERS
#include "lwip/def.h"
#include "lwip/memp.h"
#include "lwip/priv/tcpip_priv.h"
#include "lwip/ip_frag.h"
#include "netif/etharp.h"
#include "lwip/dhcp.h"
#include "lwip/autoip.h"
#include "lwip/igmp.h"
#include "lwip/dns.h"
#include "lwip/nd6.h"
#include "lwip/ip6_frag.h"
#include "lwip/mld6.h"
#include "lwip/sys.h"
#include "lwip/pbuf.h"
#ifdef MEMLEAK_DEBUG
static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__;
#endif
/** The one and only timeout list */
static struct sys_timeo *next_timeout;
#if NO_SYS
static u32_t timeouts_last_time;
#endif /* NO_SYS */
#if LWIP_TCP
/** global variable that shows if the tcp timer is currently scheduled or not */
static int tcpip_tcp_timer_active;
/**
* Timer callback function that calls tcp_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
tcpip_tcp_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
/* call TCP timer handler */
tcp_tmr();
/* timer still needed? */
if (tcp_active_pcbs || tcp_tw_pcbs) {
/* restart timer */
sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL);
} else {
/* disable timer */
tcpip_tcp_timer_active = 0;
}
}
/**
* Called from TCP_REG when registering a new PCB:
* the reason is to have the TCP timer only running when
* there are active (or time-wait) PCBs.
*/
void
tcp_timer_needed(void)
{
/* timer is off but needed again? */
if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) {
/* enable and start timer */
tcpip_tcp_timer_active = 1;
sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL);
}
}
#endif /* LWIP_TCP */
#if LWIP_IPV4
#if IP_REASSEMBLY
/**
* Timer callback function that calls ip_reass_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
ip_reass_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: ip_reass_tmr()\n"));
ip_reass_tmr();
sys_timeout(IP_TMR_INTERVAL, ip_reass_timer, NULL);
}
#endif /* IP_REASSEMBLY */
#if LWIP_ARP
/**
* Timer callback function that calls etharp_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
arp_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: etharp_tmr()\n"));
etharp_tmr();
sys_timeout(ARP_TMR_INTERVAL, arp_timer, NULL);
}
#endif /* LWIP_ARP */
#if LWIP_DHCP
/**
* Timer callback function that calls dhcp_coarse_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
dhcp_timer_coarse(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: dhcp_coarse_tmr()\n"));
dhcp_coarse_tmr();
#ifdef LWIP_ESP8266
extern void dhcps_coarse_tmr(void);
dhcps_coarse_tmr();
#endif
sys_timeout(DHCP_COARSE_TIMER_MSECS, dhcp_timer_coarse, NULL);
}
/**
* Timer callback function that calls dhcp_fine_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
dhcp_timer_fine(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: dhcp_fine_tmr()\n"));
dhcp_fine_tmr();
sys_timeout(DHCP_FINE_TIMER_MSECS, dhcp_timer_fine, NULL);
}
#endif /* LWIP_DHCP */
#if LWIP_AUTOIP
/**
* Timer callback function that calls autoip_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
autoip_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: autoip_tmr()\n"));
autoip_tmr();
sys_timeout(AUTOIP_TMR_INTERVAL, autoip_timer, NULL);
}
#endif /* LWIP_AUTOIP */
#if LWIP_IGMP
/**
* Timer callback function that calls igmp_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
igmp_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: igmp_tmr()\n"));
igmp_tmr();
sys_timeout(IGMP_TMR_INTERVAL, igmp_timer, NULL);
}
#endif /* LWIP_IGMP */
#endif /* LWIP_IPV4 */
#if LWIP_DNS
/**
* Timer callback function that calls dns_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
dns_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: dns_tmr()\n"));
dns_tmr();
sys_timeout(DNS_TMR_INTERVAL, dns_timer, NULL);
}
#endif /* LWIP_DNS */
#if LWIP_IPV6
/**
* Timer callback function that calls nd6_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
nd6_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: nd6_tmr()\n"));
nd6_tmr();
sys_timeout(ND6_TMR_INTERVAL, nd6_timer, NULL);
}
#if LWIP_IPV6_REASS
/**
* Timer callback function that calls ip6_reass_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
ip6_reass_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: ip6_reass_tmr()\n"));
ip6_reass_tmr();
sys_timeout(IP6_REASS_TMR_INTERVAL, ip6_reass_timer, NULL);
}
#endif /* LWIP_IPV6_REASS */
#if LWIP_IPV6_MLD
/**
* Timer callback function that calls mld6_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
mld6_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: mld6_tmr()\n"));
mld6_tmr();
sys_timeout(MLD6_TMR_INTERVAL, mld6_timer, NULL);
}
#endif /* LWIP_IPV6_MLD */
#endif /* LWIP_IPV6 */
/** Initialize this module */
void sys_timeouts_init(void)
{
#if LWIP_IPV4
#if IP_REASSEMBLY
sys_timeout(IP_TMR_INTERVAL, ip_reass_timer, NULL);
#endif /* IP_REASSEMBLY */
#if LWIP_ARP
sys_timeout(ARP_TMR_INTERVAL, arp_timer, NULL);
#endif /* LWIP_ARP */
#if LWIP_DHCP
#ifdef LWIP_ESP8266
// DHCP_MAXRTX = 0;
#endif
sys_timeout(DHCP_COARSE_TIMER_MSECS, dhcp_timer_coarse, NULL);
sys_timeout(DHCP_FINE_TIMER_MSECS, dhcp_timer_fine, NULL);
#endif /* LWIP_DHCP */
#if LWIP_AUTOIP
sys_timeout(AUTOIP_TMR_INTERVAL, autoip_timer, NULL);
#endif /* LWIP_AUTOIP */
#if LWIP_IGMP
sys_timeout(IGMP_TMR_INTERVAL, igmp_timer, NULL);
#endif /* LWIP_IGMP */
#endif /* LWIP_IPV4 */
#if LWIP_DNS
sys_timeout(DNS_TMR_INTERVAL, dns_timer, NULL);
#endif /* LWIP_DNS */
#if LWIP_IPV6
sys_timeout(ND6_TMR_INTERVAL, nd6_timer, NULL);
#if LWIP_IPV6_REASS
sys_timeout(IP6_REASS_TMR_INTERVAL, ip6_reass_timer, NULL);
#endif /* LWIP_IPV6_REASS */
#if LWIP_IPV6_MLD
sys_timeout(MLD6_TMR_INTERVAL, mld6_timer, NULL);
#endif /* LWIP_IPV6_MLD */
#endif /* LWIP_IPV6 */
#if NO_SYS
/* Initialise timestamp for sys_check_timeouts */
timeouts_last_time = sys_now();
#endif
}
/**
* Create a one-shot timer (aka timeout). Timeouts are processed in the
* following cases:
* - while waiting for a message using sys_timeouts_mbox_fetch()
* - by calling sys_check_timeouts() (NO_SYS==1 only)
*
* @param msecs time in milliseconds after that the timer should expire
* @param handler callback function to call when msecs have elapsed
* @param arg argument to pass to the callback function
*/
#if LWIP_DEBUG_TIMERNAMES
void
sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name)
#else /* LWIP_DEBUG_TIMERNAMES */
#ifdef LWIP_ESP8266
u32_t LwipTimOutLim = 0; // For light sleep. time out. limit is 3000ms
#endif
void
sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg)
#endif /* LWIP_DEBUG_TIMERNAMES */
{
struct sys_timeo *timeout, *t;
#if NO_SYS
u32_t now, diff;
#endif
timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT);
if (timeout == NULL) {
LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL);
return;
}
#if NO_SYS
now = sys_now();
if (next_timeout == NULL) {
diff = 0;
timeouts_last_time = now;
} else {
diff = now - timeouts_last_time;
}
#endif
timeout->next = NULL;
timeout->h = handler;
timeout->arg = arg;
#ifdef LWIP_ESP8266
if(msecs < LwipTimOutLim)
msecs = LwipTimOutLim;
#endif
#if NO_SYS
timeout->time = msecs + diff;
#else
timeout->time = msecs;
#endif
#if LWIP_DEBUG_TIMERNAMES
timeout->handler_name = handler_name;
LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p msecs=%"U32_F" handler=%s arg=%p\n",
(void *)timeout, msecs, handler_name, (void *)arg));
#endif /* LWIP_DEBUG_TIMERNAMES */
if (next_timeout == NULL) {
next_timeout = timeout;
return;
}
if (next_timeout->time > msecs) {
next_timeout->time -= msecs;
timeout->next = next_timeout;
next_timeout = timeout;
} else {
for (t = next_timeout; t != NULL; t = t->next) {
timeout->time -= t->time;
if (t->next == NULL || t->next->time > timeout->time) {
if (t->next != NULL) {
t->next->time -= timeout->time;
}
timeout->next = t->next;
t->next = timeout;
break;
}
}
}
}
/**
* Go through timeout list (for this task only) and remove the first matching
* entry (subsequent entries remain untouched), even though the timeout has not
* triggered yet.
*
* @param handler callback function that would be called by the timeout
* @param arg callback argument that would be passed to handler
*/
void
sys_untimeout(sys_timeout_handler handler, void *arg)
{
struct sys_timeo *prev_t, *t;
if (next_timeout == NULL) {
return;
}
for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) {
if ((t->h == handler) && (t->arg == arg)) {
/* We have a match */
/* Unlink from previous in list */
if (prev_t == NULL) {
next_timeout = t->next;
} else {
prev_t->next = t->next;
}
/* If not the last one, add time of this one back to next */
if (t->next != NULL) {
t->next->time += t->time;
}
memp_free(MEMP_SYS_TIMEOUT, t);
return;
}
}
return;
}
#if NO_SYS
/** Handle timeouts for NO_SYS==1 (i.e. without using
* tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout
* handler functions when timeouts expire.
*
* Must be called periodically from your main loop.
*/
void
sys_check_timeouts(void)
{
if (next_timeout) {
struct sys_timeo *tmptimeout;
u32_t diff;
sys_timeout_handler handler;
void *arg;
u8_t had_one;
u32_t now;
now = sys_now();
/* this cares for wraparounds */
diff = now - timeouts_last_time;
do {
#if PBUF_POOL_FREE_OOSEQ
PBUF_CHECK_FREE_OOSEQ();
#endif /* PBUF_POOL_FREE_OOSEQ */
had_one = 0;
tmptimeout = next_timeout;
if (tmptimeout && (tmptimeout->time <= diff)) {
/* timeout has expired */
had_one = 1;
timeouts_last_time += tmptimeout->time;
diff -= tmptimeout->time;
next_timeout = tmptimeout->next;
handler = tmptimeout->h;
arg = tmptimeout->arg;
#if LWIP_DEBUG_TIMERNAMES
if (handler != NULL) {
LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s arg=%p\n",
tmptimeout->handler_name, arg));
}
#endif /* LWIP_DEBUG_TIMERNAMES */
memp_free(MEMP_SYS_TIMEOUT, tmptimeout);
if (handler != NULL) {
handler(arg);
}
}
/* repeat until all expired timers have been called */
} while (had_one);
}
}
/** Set back the timestamp of the last call to sys_check_timeouts()
* This is necessary if sys_check_timeouts() hasn't been called for a long
* time (e.g. while saving energy) to prevent all timer functions of that
* period being called.
*/
void
sys_restart_timeouts(void)
{
timeouts_last_time = sys_now();
}
/** Return the time left before the next timeout is due. If no timeouts are
* enqueued, returns 0xffffffff
*/
u32_t
sys_timeouts_sleeptime(void)
{
u32_t diff;
if (next_timeout == NULL) {
return 0xffffffff;
}
diff = sys_now() - timeouts_last_time;
if (diff > next_timeout->time) {
return 0;
} else {
return next_timeout->time - diff;
}
}
#else /* NO_SYS */
/**
* Wait (forever) for a message to arrive in an mbox.
* While waiting, timeouts are processed.
*
* @param mbox the mbox to fetch the message from
* @param msg the place to store the message
*/
void
sys_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg)
{
u32_t time_needed;
struct sys_timeo *tmptimeout;
sys_timeout_handler handler;
void *arg;
again:
if (!next_timeout) {
time_needed = sys_arch_mbox_fetch(mbox, msg, 0);
} else {
if (next_timeout->time > 0) {
time_needed = sys_arch_mbox_fetch(mbox, msg, next_timeout->time);
} else {
time_needed = SYS_ARCH_TIMEOUT;
}
if (time_needed == SYS_ARCH_TIMEOUT) {
/* If time == SYS_ARCH_TIMEOUT, a timeout occurred before a message
could be fetched. We should now call the timeout handler and
deallocate the memory allocated for the timeout. */
tmptimeout = next_timeout;
next_timeout = tmptimeout->next;
handler = tmptimeout->h;
arg = tmptimeout->arg;
#if LWIP_DEBUG_TIMERNAMES
if (handler != NULL) {
LWIP_DEBUGF(TIMERS_DEBUG, ("stmf calling h=%s arg=%p\n",
tmptimeout->handler_name, arg));
}
#endif /* LWIP_DEBUG_TIMERNAMES */
memp_free(MEMP_SYS_TIMEOUT, tmptimeout);
if (handler != NULL) {
/* For LWIP_TCPIP_CORE_LOCKING, lock the core before calling the
timeout handler function. */
LOCK_TCPIP_CORE();
handler(arg);
UNLOCK_TCPIP_CORE();
}
LWIP_TCPIP_THREAD_ALIVE();
/* We try again to fetch a message from the mbox. */
goto again;
} else {
/* If time != SYS_ARCH_TIMEOUT, a message was received before the timeout
occured. The time variable is set to the number of
milliseconds we waited for the message. */
if (time_needed < next_timeout->time) {
next_timeout->time -= time_needed;
} else {
next_timeout->time = 0;
}
}
}
}
#endif /* NO_SYS */
#else /* LWIP_TIMERS */
/* Satisfy the TCP code which calls this function */
void
tcp_timer_needed(void)
{
}
#endif /* LWIP_TIMERS */

1198
components/lwip/core/udp.c Executable file

File diff suppressed because it is too large Load Diff