From 70fc69bc5a54d9776ace7c99d46eb533f8fb6e89 Mon Sep 17 00:00:00 2001 From: "Lee A. Roberts" Date: Thu, 28 Feb 2013 04:37:27 +0000 Subject: [PATCH 1/4] sctp: fix association hangs due to off-by-one errors in sctp_tsnmap_grow() In sctp_tsnmap_mark(), correct off-by-one error when calculating size value for sctp_tsnmap_grow(). In sctp_tsnmap_grow(), correct off-by-one error when copying and resizing the tsnmap. If max_tsn_seen is in the LSB of the word, this bit can be lost, causing the corresponding packet to be transmitted again and to be entered as a duplicate into the SCTP reassembly/ordering queues. Change parameter name from "gap" (zero-based index) to "size" (one-based) to enhance code readability. Signed-off-by: Lee A. Roberts Acked-by: Vlad Yasevich Acked-by: Neil Horman --- net/sctp/tsnmap.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 5f25e0c92c31e4..396c45174e5b69 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c @@ -51,7 +51,7 @@ static void sctp_tsnmap_update(struct sctp_tsnmap *map); static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, __u16 len, __u16 *start, __u16 *end); -static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); +static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size); /* Initialize a block of memory as a tsnmap. */ struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, @@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, gap = tsn - map->base_tsn; - if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) + if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1)) return -ENOMEM; if (!sctp_tsnmap_has_gap(map) && gap == 0) { @@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, return ngaps; } -static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) +static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size) { unsigned long *new; unsigned long inc; u16 len; - if (gap >= SCTP_TSN_MAP_SIZE) + if (size > SCTP_TSN_MAP_SIZE) return 0; - inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; + inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); new = kzalloc(len>>3, GFP_ATOMIC); if (!new) return 0; - bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); + bitmap_copy(new, map->tsn_map, + map->max_tsn_seen - map->cumulative_tsn_ack_point); kfree(map->tsn_map); map->tsn_map = new; map->len = len; From e67f85ecd83de66d4f25f2e0f90bb0d01a52ddd8 Mon Sep 17 00:00:00 2001 From: "Lee A. Roberts" Date: Thu, 28 Feb 2013 04:37:28 +0000 Subject: [PATCH 2/4] sctp: fix association hangs due to reneging packets below the cumulative TSN ACK point In sctp_ulpq_renege_list(), do not renege packets below the cumulative TSN ACK point. Signed-off-by: Lee A. Roberts Acked-by: Vlad Yasevich Acked-by: Neil Horman --- net/sctp/ulpqueue.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65bf2..63afddcbcd2cae 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -969,11 +969,16 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, tsnmap = &ulpq->asoc->peer.tsn_map; - while ((skb = __skb_dequeue_tail(list)) != NULL) { - freed += skb_headlen(skb); + while ((skb = skb_peek_tail(list)) != NULL) { event = sctp_skb2event(skb); tsn = event->tsn; + /* Don't renege below the Cumulative TSN ACK Point. */ + if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) + break; + + __skb_unlink(skb, list); + freed += skb_headlen(skb); sctp_ulpevent_free(event); sctp_tsnmap_renege(tsnmap, tsn); if (freed >= needed) From 95ac7b859f508b1b3e6adf7dce307864e4384a69 Mon Sep 17 00:00:00 2001 From: "Lee A. Roberts" Date: Thu, 28 Feb 2013 04:37:29 +0000 Subject: [PATCH 3/4] sctp: fix association hangs due to errors when reneging events from the ordering queue In sctp_ulpq_renege_list(), events being reneged from the ordering queue may correspond to multiple TSNs. Identify all affected packets; sum freed space and renege from the tsnmap. Signed-off-by: Lee A. Roberts Acked-by: Vlad Yasevich Acked-by: Neil Horman --- net/sctp/ulpqueue.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 63afddcbcd2cae..f221fbbc80ac29 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -962,8 +962,8 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list, __u16 needed) { __u16 freed = 0; - __u32 tsn; - struct sk_buff *skb; + __u32 tsn, last_tsn; + struct sk_buff *skb, *flist, *last; struct sctp_ulpevent *event; struct sctp_tsnmap *tsnmap; @@ -977,10 +977,28 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) break; - __skb_unlink(skb, list); + /* Events in ordering queue may have multiple fragments + * corresponding to additional TSNs. Sum the total + * freed space; find the last TSN. + */ freed += skb_headlen(skb); + flist = skb_shinfo(skb)->frag_list; + for (last = flist; flist; flist = flist->next) { + last = flist; + freed += skb_headlen(last); + } + if (last) + last_tsn = sctp_skb2event(last)->tsn; + else + last_tsn = tsn; + + /* Unlink the event, then renege all applicable TSNs. */ + __skb_unlink(skb, list); sctp_ulpevent_free(event); - sctp_tsnmap_renege(tsnmap, tsn); + while (TSN_lte(tsn, last_tsn)) { + sctp_tsnmap_renege(tsnmap, tsn); + tsn++; + } if (freed >= needed) return freed; } From d003b41b801124b96337973b01eada6a83673d23 Mon Sep 17 00:00:00 2001 From: "Lee A. Roberts" Date: Thu, 28 Feb 2013 04:37:30 +0000 Subject: [PATCH 4/4] sctp: fix association hangs due to partial delivery errors In sctp_ulpq_tail_data(), use return values 0,1 to indicate whether a complete event (with MSG_EOR set) was delivered. A return value of -ENOMEM continues to indicate an out-of-memory condition was encountered. In sctp_ulpq_retrieve_partial() and sctp_ulpq_retrieve_first(), correct message reassembly logic for SCTP partial delivery. Change logic to ensure that as much data as possible is sent with the initial partial delivery and that following partial deliveries contain all available data. In sctp_ulpq_partial_delivery(), attempt partial delivery only if the data on the head of the reassembly queue is at or before the cumulative TSN ACK point. In sctp_ulpq_renege(), use the modified return values from sctp_ulpq_tail_data() to choose whether to attempt partial delivery or to attempt to drain the reassembly queue as a means to reduce memory pressure. Remove call to sctp_tsnmap_mark(), as this is handled correctly in call to sctp_ulpq_tail_data(). Signed-off-by: Lee A. Roberts Acked-by: Vlad Yasevich Acked-by: Neil Horman --- net/sctp/ulpqueue.c | 54 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index f221fbbc80ac29..0fd5b3d2df0315 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, { struct sk_buff_head temp; struct sctp_ulpevent *event; + int event_eor = 0; /* Create an event from the incoming chunk. */ event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); @@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, /* Send event to the ULP. 'event' is the sctp_ulpevent for * very first SKB on the 'temp' list. */ - if (event) + if (event) { + event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; sctp_ulpq_tail_event(ulpq, event); + } - return 0; + return event_eor; } /* Add a new event for propagation to the ULP. */ @@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) ctsn = cevent->tsn; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { + case SCTP_DATA_FIRST_FRAG: + if (!first_frag) + return NULL; + goto done; case SCTP_DATA_MIDDLE_FRAG: if (!first_frag) { first_frag = pos; next_tsn = ctsn + 1; last_frag = pos; - } else if (next_tsn == ctsn) + } else if (next_tsn == ctsn) { next_tsn++; - else + last_frag = pos; + } else goto done; break; case SCTP_DATA_LAST_FRAG: @@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) } else goto done; break; + + case SCTP_DATA_LAST_FRAG: + if (!first_frag) + return NULL; + else + goto done; + break; + default: return NULL; } @@ -1025,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event; struct sctp_association *asoc; struct sctp_sock *sp; + __u32 ctsn; + struct sk_buff *skb; asoc = ulpq->asoc; sp = sctp_sk(asoc->base.sk); /* If the association is already in Partial Delivery mode - * we have noting to do. + * we have nothing to do. */ if (ulpq->pd_mode) return; + /* Data must be at or below the Cumulative TSN ACK Point to + * start partial delivery. + */ + skb = skb_peek(&asoc->ulpq.reasm); + if (skb != NULL) { + ctsn = sctp_skb2event(skb)->tsn; + if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) + return; + } + /* If the user enabled fragment interleave socket option, * multiple associations can enter partial delivery. * Otherwise, we can only enter partial delivery if the @@ -1077,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, } /* If able to free enough room, accept this chunk. */ if (chunk && (freed >= needed)) { - __u32 tsn; - tsn = ntohl(chunk->subh.data_hdr->tsn); - sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); - sctp_ulpq_tail_data(ulpq, chunk, gfp); - - sctp_ulpq_partial_delivery(ulpq, gfp); + int retval; + retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); + /* + * Enter partial delivery if chunk has not been + * delivered; otherwise, drain the reassembly queue. + */ + if (retval <= 0) + sctp_ulpq_partial_delivery(ulpq, gfp); + else if (retval == 1) + sctp_ulpq_reasm_drain(ulpq); } sk_mem_reclaim(asoc->base.sk);