From f5bd5aff76177bc7d5b17fbeae133937892bc9bf Mon Sep 17 00:00:00 2001 From: Brian Date: Thu, 27 Feb 2020 13:06:02 -0700 Subject: [PATCH] Multi ABD Type Adding the mutli abd type, which allows for linear and scatter abd's to be chained together into a single abd. Signed-off-by: Brian Authored-by: Mark Maybee --- include/sys/abd.h | 25 +- module/os/freebsd/zfs/abd.c | 443 ++++++++++++++++++++---- module/os/linux/zfs/abd.c | 587 ++++++++++++++++++++++++++++---- module/os/linux/zfs/vdev_disk.c | 50 +-- module/zfs/vdev_queue.c | 48 ++- 5 files changed, 956 insertions(+), 197 deletions(-) diff --git a/include/sys/abd.h b/include/sys/abd.h index 82b73589bbef..90bf27ba4d5f 100644 --- a/include/sys/abd.h +++ b/include/sys/abd.h @@ -36,17 +36,21 @@ extern "C" { #endif typedef enum abd_flags { - ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */ - ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */ - ABD_FLAG_META = 1 << 2, /* does this represent FS metadata? */ - ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */ - ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */ - ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */ + ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */ + ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */ + ABD_FLAG_META = 1 << 2, /* does this represent FS metadata? */ + ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */ + ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */ + ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */ + ABD_FLAG_MULTI_LIST = 1 << 6, /* mult ABDs chained together */ + ABD_FLAG_MULTI_FREE = 1 << 7, /* mult ABD is responsible for mem */ + ABD_FLAG_ZEROS = 1 << 8, /* ABD for zero-filled buffer */ } abd_flags_t; typedef struct abd { abd_flags_t abd_flags; uint_t abd_size; /* excludes scattered abd_offset */ + list_node_t multi_link; struct abd *abd_parent; zfs_refcount_t abd_children; union { @@ -64,6 +68,9 @@ typedef struct abd { void *abd_buf; struct scatterlist *abd_sgl; /* for LINEAR_PAGE */ } abd_linear; + struct abd_multi { + list_t abd_chain; + } abd_multi; } abd_u; } abd_t; @@ -91,11 +98,14 @@ abd_is_linear_page(abd_t *abd) abd_t *abd_alloc(size_t, boolean_t); abd_t *abd_alloc_linear(size_t, boolean_t); +abd_t *abd_alloc_multi(void); abd_t *abd_alloc_for_io(size_t, boolean_t); abd_t *abd_alloc_sametype(abd_t *, size_t); +void abd_add_child(abd_t *, abd_t *, boolean_t); void abd_free(abd_t *); abd_t *abd_get_offset(abd_t *, size_t); abd_t *abd_get_offset_size(abd_t *, size_t, size_t); +abd_t *abd_get_zeros(size_t); abd_t *abd_get_from_buf(void *, size_t); void abd_put(abd_t *); @@ -126,8 +136,7 @@ int abd_cmp_buf_off(abd_t *, const void *, size_t, size_t); void abd_zero_off(abd_t *, size_t, size_t); #if defined(_KERNEL) -unsigned int abd_scatter_bio_map_off(struct bio *, abd_t *, unsigned int, - size_t); +unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t); unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t); #endif diff --git a/module/os/freebsd/zfs/abd.c b/module/os/freebsd/zfs/abd.c index 888a113a4291..193dbe97f2d5 100644 --- a/module/os/freebsd/zfs/abd.c +++ b/module/os/freebsd/zfs/abd.c @@ -133,6 +133,14 @@ static abd_stats_t abd_stats = { #define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1) #define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1) +#define ABD_MULTI(abd) (abd->abd_u.abd_multi) + +static inline boolean_t +abd_is_multi(abd_t *abd) +{ + return ((abd->abd_flags & ABD_FLAG_MULTI_LIST) != 0); +} + /* * It is possible to make all future ABDs be linear by setting this to B_FALSE. * Otherwise, ABDs are allocated scattered by default unless the caller uses @@ -161,6 +169,15 @@ SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN, kmem_cache_t *abd_chunk_cache; static kstat_t *abd_ksp; +/* + * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are + * just a single zero'd page. This allows us to conserve memory by + * only using a single zero page for the scatterlist. + */ +abd_t *abd_zero_scatter = NULL; +static char *abd_zero_buf = NULL; + +#define ABD_ZERO_PAGE (abd_zero_buf) extern inline boolean_t abd_is_linear(abd_t *abd); extern inline void abd_copy(abd_t *dabd, abd_t *sabd, size_t size); @@ -183,6 +200,83 @@ abd_free_chunk(void *c) kmem_cache_free(abd_chunk_cache, c); } +static inline size_t +abd_chunkcnt_for_bytes(size_t size) +{ + return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size); +} + +static inline size_t +abd_scatter_chunkcnt(abd_t *abd) +{ + ASSERT(!abd_is_linear(abd)); + return (abd_chunkcnt_for_bytes( + abd->abd_u.abd_scatter.abd_offset + abd->abd_size)); +} + +static inline abd_t * +abd_alloc_struct(size_t chunkcnt) +{ + size_t size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]); + abd_t *abd = kmem_alloc(size, KM_PUSHPAGE); + ASSERT3P(abd, !=, NULL); + list_link_init(&abd->multi_link); + ABDSTAT_INCR(abdstat_struct_size, size); + + return (abd); +} + +static inline void +abd_free_struct(abd_t *abd) +{ + size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd); + int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]); + kmem_free(abd, size); + ABDSTAT_INCR(abdstat_struct_size, -size); +} + +/* + * Allocating scatter ABD of size SPA_MAXBLOCKSIZE, where + * each page in the scatterlist will be set to ABD_ZERO_PAGE. + */ +static void +abd_alloc_zero_scatter(void) +{ + size_t n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); + abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP); + abd_zero_scatter = abd_alloc_struct(n); + + abd_zero_scatter->abd_flags = ABD_FLAG_OWNER; + abd_zero_scatter->abd_flags |= ABD_FLAG_ZEROS; + abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; + abd_zero_scatter->abd_parent = NULL; + zfs_refcount_create(&abd_zero_scatter->abd_children); + + abd_zero_scatter->abd_u.abd_scatter.abd_offset = 0; + abd_zero_scatter->abd_u.abd_scatter.abd_chunk_size = + zfs_abd_chunk_size; + + for (int i = 0; i < n; i++) { + abd_zero_scatter->abd_u.abd_scatter.abd_chunks[i] = + ABD_ZERO_PAGE; + } + + ABDSTAT_BUMP(abdstat_scatter_cnt); + ABDSTAT_INCR(abdstat_scatter_data_size, zfs_abd_chunk_size); +} + +static void +abd_free_zero_scatter(void) +{ + zfs_refcount_destroy(&abd_zero_scatter->abd_children); + ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); + ABDSTAT_INCR(abdstat_scatter_data_size, -(int)zfs_abd_chunk_size); + + abd_free_struct(abd_zero_scatter); + abd_zero_scatter = NULL; + kmem_free(abd_zero_buf, zfs_abd_chunk_size); +} + void abd_init(void) { @@ -195,11 +289,15 @@ abd_init(void) abd_ksp->ks_data = &abd_stats; kstat_install(abd_ksp); } + + abd_alloc_zero_scatter(); } void abd_fini(void) { + abd_free_zero_scatter(); + if (abd_ksp != NULL) { kstat_delete(abd_ksp); abd_ksp = NULL; @@ -209,31 +307,24 @@ abd_fini(void) abd_chunk_cache = NULL; } -static inline size_t -abd_chunkcnt_for_bytes(size_t size) -{ - return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size); -} - -static inline size_t -abd_scatter_chunkcnt(abd_t *abd) -{ - ASSERT(!abd_is_linear(abd)); - return (abd_chunkcnt_for_bytes( - abd->abd_u.abd_scatter.abd_offset + abd->abd_size)); -} - -static inline void +static void abd_verify(abd_t *abd) { ASSERT3U(abd->abd_size, >, 0); ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE); ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR | - ABD_FLAG_OWNER | ABD_FLAG_META)); + ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_FREE | + ABD_FLAG_ZEROS)); IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER)); IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER); if (abd_is_linear(abd)) { ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL); + } else if (abd_is_multi(abd)) { + for (abd_t *cabd = list_head(&ABD_MULTI(abd).abd_chain); + cabd != NULL; + cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd)) { + abd_verify(cabd); + } } else { ASSERT3U(abd->abd_u.abd_scatter.abd_offset, <, zfs_abd_chunk_size); @@ -245,26 +336,6 @@ abd_verify(abd_t *abd) } } -static inline abd_t * -abd_alloc_struct(size_t chunkcnt) -{ - size_t size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]); - abd_t *abd = kmem_alloc(size, KM_PUSHPAGE); - ASSERT3P(abd, !=, NULL); - ABDSTAT_INCR(abdstat_struct_size, size); - - return (abd); -} - -static inline void -abd_free_struct(abd_t *abd) -{ - size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd); - int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]); - kmem_free(abd, size); - ABDSTAT_INCR(abdstat_struct_size, -size); -} - /* * Allocate an ABD, along with its own underlying data buffers. Use this if you * don't care whether the ABD is linear or not. @@ -305,6 +376,13 @@ abd_alloc(size_t size, boolean_t is_metadata) return (abd); } +abd_t * +abd_get_zeros(size_t size) +{ + ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); + return (abd_get_offset_size(abd_zero_scatter, 0, size)); +} + static void abd_free_scatter(abd_t *abd) { @@ -370,6 +448,28 @@ abd_free_linear(abd_t *abd) abd_free_struct(abd); } +static void +abd_free_multi(abd_t *abd) +{ + ASSERT(abd_is_multi(abd)); + abd_t *cabd; + + while ((cabd = list_head(&ABD_MULTI(abd).abd_chain)) != NULL) { + list_remove(&ABD_MULTI(abd).abd_chain, cabd); + abd->abd_size -= cabd->abd_size; + if (cabd->abd_flags & ABD_FLAG_MULTI_FREE) { + if (cabd->abd_flags & ABD_FLAG_OWNER) + abd_free(cabd); + else + abd_put(cabd); + } + } + ASSERT3U(abd->abd_size, ==, 0); + list_destroy(&ABD_MULTI(abd).abd_chain); + zfs_refcount_destroy(&abd->abd_children); + abd_free_struct(abd); +} + /* * Free an ABD. Only use this on ABDs allocated with abd_alloc() or * abd_alloc_linear(). @@ -380,6 +480,8 @@ abd_free(abd_t *abd) if (abd == NULL) return; + ASSERT(!abd_is_multi(abd)); + ASSERT(!list_link_active(&abd->multi_link)); abd_verify(abd); ASSERT3P(abd->abd_parent, ==, NULL); ASSERT(abd->abd_flags & ABD_FLAG_OWNER); @@ -420,6 +522,87 @@ abd_alloc_for_io(size_t size, boolean_t is_metadata) return (abd_alloc_linear(size, is_metadata)); } +/* + * Create an ABD that will be the head of a list of ABD's. This is used + * to "chain" scatter/gather lists together when constructing aggregated + * IO's. To free this abd, abd_put() must be called. + */ +abd_t * +abd_alloc_multi(void) +{ + abd_t *abd; + + abd = abd_alloc_struct(0); + abd->abd_flags = ABD_FLAG_MULTI_LIST; + abd->abd_size = 0; + abd->abd_parent = NULL; + list_create(&ABD_MULTI(abd).abd_chain, + sizeof (abd_t), offsetof(abd_t, multi_link)); + zfs_refcount_create(&abd->abd_children); + return (abd); +} + +/* + * Add a child ABD to a chained list of ABD's. + */ +void +abd_add_child(abd_t *pabd, abd_t *cabd, boolean_t multi_mem_manage) +{ + ASSERT(abd_is_multi(pabd)); + abd_t *child_abd = NULL; + + if (list_link_active(&cabd->multi_link)) { + /* + * If the child ABD is already part of another + * multilist ABD then we must allocate a new + * ABD to use a seperate link. We mark the newly + * allocated ABD with ABD_FLAG_MULTI_FREE, before + * adding it to the multilist, to make the multilist + * aware that it is it's responsibility to call + * abd_put(). We use abd_get_offset() in order to + * just allocate a new ABD but avoid copying the data + * over into the newly allocated ABD. + * + * Cases were a ABD maybe part of multiple + * multilist ABD's are ditto blocks and when + * vdev_label_write() is called (see vdev_label.c). + */ + child_abd = abd_get_offset(cabd, 0); + child_abd->abd_flags |= ABD_FLAG_MULTI_FREE; + } else { + child_abd = cabd; + if (multi_mem_manage) + child_abd->abd_flags |= ABD_FLAG_MULTI_FREE; + } + ASSERT3P(child_abd, !=, NULL); + ASSERT3U(child_abd->abd_size, >, 0); + + list_insert_tail(&ABD_MULTI(pabd).abd_chain, child_abd); + pabd->abd_size += child_abd->abd_size; +} + +/* + * Locate the child abd for the supplied offset. + * Return a new offset relative to the child. + */ +static abd_t * +abd_find_child_off(abd_t *abd, size_t *off) +{ + ASSERT(abd_is_multi(abd)); + abd_t *cabd; + + for (cabd = list_head(&ABD_MULTI(abd).abd_chain); cabd != NULL; + cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd)) { + + if (*off >= cabd->abd_size) + *off -= cabd->abd_size; + else + break; + } + ASSERT(cabd != NULL); + return (cabd); +} + /* * Allocate a new ABD to point to offset off of sabd. It shares the underlying * buffer data with sabd. Use abd_put() to free. sabd must not be freed while @@ -446,6 +629,22 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size) abd->abd_u.abd_linear.abd_buf = (char *)sabd->abd_u.abd_linear.abd_buf + off; + } else if (abd_is_multi(sabd)) { + size_t left = size; + abd = abd_alloc_multi(); + + for (abd_t *cabd = abd_find_child_off(sabd, &off); + cabd != NULL && left > 0; + cabd = list_next(&ABD_MULTI(sabd).abd_chain, cabd)) { + abd_t *nabd = cabd; + int csize = MIN(left, cabd->abd_size - off); + + nabd = abd_get_offset_impl(cabd, off, csize); + abd_add_child(abd, nabd, B_TRUE); + left -= csize; + off = 0; + } + ASSERT3U(left, ==, 0); } else { size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off; size_t chunkcnt = abd_scatter_chunkcnt(sabd) - @@ -534,13 +733,20 @@ abd_put(abd_t *abd) if (abd == NULL) return; abd_verify(abd); - ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); if (abd->abd_parent != NULL) { (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children, abd->abd_size, abd); } + if (abd_is_multi(abd)) { + abd_free_multi(abd); + return; + } + + ASSERT(!list_link_active(&abd->multi_link)); + ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); + zfs_refcount_destroy(&abd->abd_children); abd_free_struct(abd); } @@ -683,6 +889,7 @@ abd_iter_scatter_chunk_index(struct abd_iter *aiter) static void abd_iter_init(struct abd_iter *aiter, abd_t *abd) { + ASSERT(!abd_is_multi(abd)); abd_verify(abd); aiter->iter_abd = abd; aiter->iter_pos = 0; @@ -690,6 +897,16 @@ abd_iter_init(struct abd_iter *aiter, abd_t *abd) aiter->iter_mapsize = 0; } +/* + * This is just a helper function to see if we have exhausted the + * abd_iter and reached the end. + */ +static boolean_t +abd_iter_at_end(struct abd_iter *aiter) +{ + return (aiter->iter_pos == aiter->iter_abd->abd_size); +} + /* * Advance the iterator by a certain amount. Cannot be called when a chunk is * in use. This can be safely called when the aiter has already exhausted, in @@ -702,12 +919,55 @@ abd_iter_advance(struct abd_iter *aiter, size_t amount) ASSERT0(aiter->iter_mapsize); /* There's nothing left to advance to, so do nothing */ - if (aiter->iter_pos == aiter->iter_abd->abd_size) + if (abd_iter_at_end(aiter)) return; aiter->iter_pos += amount; } +/* + * Initializes an abd_iter based on whether the abd is a chain of ABD's + * or just a single ABD. + */ +static inline abd_t * +abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off) +{ + abd_t *cabd = NULL; + + if (abd_is_multi(abd)) { + cabd = abd_find_child_off(abd, &off); + if (cabd) { + abd_iter_init(aiter, cabd); + abd_iter_advance(aiter, off); + } + } else { + abd_iter_init(aiter, abd); + abd_iter_advance(aiter, off); + } + return (cabd); +} + +/* + * Advances an abd_iter. We have to be careful with chains of ABD's as + * advancing could mean that we are at the end of a particular ABD and + * must grab the next one from the chain. + */ +static inline abd_t * +abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter, + size_t len) +{ + abd_iter_advance(aiter, len); + if (abd_is_multi(abd) && abd_iter_at_end(aiter)) { + ASSERT3P(cabd, !=, NULL); + cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd); + if (cabd) { + abd_iter_init(aiter, cabd); + abd_iter_advance(aiter, 0); + } + } + return (cabd); +} + /* * Map the current chunk into aiter. This can be safely called when the aiter * has already exhausted, in which case this does nothing. @@ -726,7 +986,7 @@ abd_iter_map(struct abd_iter *aiter) aiter->iter_abd->abd_u.abd_scatter.abd_chunk_size); /* There's nothing left to iterate over, so do nothing */ - if (aiter->iter_pos == aiter->iter_abd->abd_size) + if (abd_iter_at_end(aiter)) return; if (abd_is_linear(aiter->iter_abd)) { @@ -751,7 +1011,7 @@ static void abd_iter_unmap(struct abd_iter *aiter) { /* There's nothing left to unmap, so do nothing */ - if (aiter->iter_pos == aiter->iter_abd->abd_size) + if (abd_iter_at_end(aiter)) return; ASSERT3P(aiter->iter_mapaddr, !=, NULL); @@ -767,14 +1027,20 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size, { int ret = 0; struct abd_iter aiter; + boolean_t abd_multi; + abd_t *c_abd; abd_verify(abd); ASSERT3U(off + size, <=, abd->abd_size); - abd_iter_init(&aiter, abd); - abd_iter_advance(&aiter, off); + abd_multi = abd_is_multi(abd); + c_abd = abd_init_abd_iter(abd, &aiter, off); while (size > 0) { + /* If we are at the end of multi chain abd we are done */ + if (abd_multi && !c_abd) + break; + abd_iter_map(&aiter); size_t len = MIN(aiter.iter_mapsize, size); @@ -788,7 +1054,7 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size, break; size -= len; - abd_iter_advance(&aiter, len); + c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len); } return (ret); @@ -895,6 +1161,8 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, { int ret = 0; struct abd_iter daiter, saiter; + boolean_t dabd_is_multi, sabd_is_multi; + abd_t *c_dabd, *c_sabd; abd_verify(dabd); abd_verify(sabd); @@ -902,12 +1170,17 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, ASSERT3U(doff + size, <=, dabd->abd_size); ASSERT3U(soff + size, <=, sabd->abd_size); - abd_iter_init(&daiter, dabd); - abd_iter_init(&saiter, sabd); - abd_iter_advance(&daiter, doff); - abd_iter_advance(&saiter, soff); + dabd_is_multi = abd_is_multi(dabd); + sabd_is_multi = abd_is_multi(sabd); + c_dabd = abd_init_abd_iter(dabd, &daiter, doff); + c_sabd = abd_init_abd_iter(sabd, &saiter, soff); while (size > 0) { + /* if we are at the end of a multi abd chain we are done */ + if ((dabd_is_multi && !c_dabd) || + (sabd_is_multi && !c_sabd)) + break; + abd_iter_map(&daiter); abd_iter_map(&saiter); @@ -926,8 +1199,10 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, break; size -= len; - abd_iter_advance(&daiter, len); - abd_iter_advance(&saiter, len); + c_dabd = + abd_advance_abd_iter(dabd, c_dabd, &daiter, len); + c_sabd = + abd_advance_abd_iter(sabd, c_sabd, &saiter, len); } return (ret); @@ -987,29 +1262,47 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, struct abd_iter caiters[3]; struct abd_iter daiter = {0}; void *caddrs[3]; + abd_t *c_cabds[3]; + abd_t *c_dabd = NULL; + boolean_t cabds_is_multi[3]; + boolean_t dabd_is_multi = B_FALSE; ASSERT3U(parity, <=, 3); - for (i = 0; i < parity; i++) - abd_iter_init(&caiters[i], cabds[i]); + for (i = 0; i < parity; i++) { + cabds_is_multi[i] = abd_is_multi(cabds[i]); + c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0); + } - if (dabd) - abd_iter_init(&daiter, dabd); + if (dabd) { + dabd_is_multi = abd_is_multi(dabd); + c_dabd = abd_init_abd_iter(dabd, &daiter, 0); + } ASSERT3S(dsize, >=, 0); critical_enter(); while (csize > 0) { - len = csize; - - if (dabd && dsize > 0) - abd_iter_map(&daiter); + /* if we are at the end of a multi abd chain we are done */ + if (dabd_is_multi && !c_dabd) + break; for (i = 0; i < parity; i++) { + /* + * If we are at the end of a multi abd chain we are + * done. + */ + if (cabds_is_multi[i] && !c_cabds[i]) + break; abd_iter_map(&caiters[i]); caddrs[i] = caiters[i].iter_mapaddr; } + len = csize; + + if (dabd && dsize > 0) + abd_iter_map(&daiter); + switch (parity) { case 3: len = MIN(caiters[2].iter_mapsize, len); @@ -1041,12 +1334,16 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, for (i = parity-1; i >= 0; i--) { abd_iter_unmap(&caiters[i]); - abd_iter_advance(&caiters[i], len); + c_cabds[i] = + abd_advance_abd_iter(cabds[i], c_cabds[i], + &caiters[i], len); } if (dabd && dsize > 0) { abd_iter_unmap(&daiter); - abd_iter_advance(&daiter, dlen); + c_dabd = + abd_advance_abd_iter(dabd, c_dabd, &daiter, + dlen); dsize -= dlen; } @@ -1080,18 +1377,34 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds, struct abd_iter citers[3]; struct abd_iter xiters[3]; void *caddrs[3], *xaddrs[3]; + boolean_t cabds_is_multi[3]; + boolean_t tabds_is_multi[3]; + abd_t *c_cabds[3]; + abd_t *c_tabds[3]; ASSERT3U(parity, <=, 3); for (i = 0; i < parity; i++) { - abd_iter_init(&citers[i], cabds[i]); - abd_iter_init(&xiters[i], tabds[i]); + cabds_is_multi[i] = abd_is_multi(cabds[i]); + tabds_is_multi[i] = abd_is_multi(tabds[i]); + c_cabds[i] = + abd_init_abd_iter(cabds[i], &citers[i], 0); + c_tabds[i] = + abd_init_abd_iter(tabds[i], &xiters[i], 0); } critical_enter(); while (tsize > 0) { for (i = 0; i < parity; i++) { + /* + * If we are at the end of a multi abd chain we + * are done. + */ + if (cabds_is_multi[i] && !c_cabds[i]) + break; + if (tabds_is_multi[i] && !c_tabds[i]) + break; abd_iter_map(&citers[i]); abd_iter_map(&xiters[i]); caddrs[i] = citers[i].iter_mapaddr; @@ -1123,8 +1436,12 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds, for (i = parity-1; i >= 0; i--) { abd_iter_unmap(&xiters[i]); abd_iter_unmap(&citers[i]); - abd_iter_advance(&xiters[i], len); - abd_iter_advance(&citers[i], len); + c_tabds[i] = + abd_advance_abd_iter(tabds[i], c_tabds[i], + &xiters[i], len); + c_cabds[i] = + abd_advance_abd_iter(cabds[i], c_cabds[i], + &citers[i], len); } tsize -= len; diff --git a/module/os/linux/zfs/abd.c b/module/os/linux/zfs/abd.c index bc6f81000d48..5b0adc62a141 100644 --- a/module/os/linux/zfs/abd.c +++ b/module/os/linux/zfs/abd.c @@ -207,6 +207,14 @@ static abd_stats_t abd_stats = { #define abd_for_each_sg(abd, sg, n, i) \ for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i) +#define ABD_MULTI(abd) (abd->abd_u.abd_multi) + +static inline boolean_t +abd_is_multi(abd_t *abd) +{ + return ((abd->abd_flags & ABD_FLAG_MULTI_LIST) != 0); +} + /* see block comment above for description */ int zfs_abd_scatter_enabled = B_TRUE; unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; @@ -235,6 +243,13 @@ unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; */ int zfs_abd_scatter_min_size = 512 * 3; +/* + * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are + * just a single zero'd page. This allows us to conserve memory by + * only using a single zero page for the scatterlist. + */ +abd_t *abd_zero_scatter = NULL; + static kmem_cache_t *abd_cache = NULL; static kstat_t *abd_ksp; @@ -244,6 +259,24 @@ abd_chunkcnt_for_bytes(size_t size) return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE); } +static inline abd_t * +abd_alloc_struct(void) +{ + abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE); + ASSERT3P(abd, !=, NULL); + list_link_init(&abd->multi_link); + ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t)); + + return (abd); +} + +static inline void +abd_free_struct(abd_t *abd) +{ + kmem_cache_free(abd_cache, abd); + ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t)); +} + #ifdef _KERNEL /* * Mark zfs data pages so they can be excluded from kernel crash dumps @@ -441,11 +474,24 @@ abd_alloc_pages(abd_t *abd, size_t size) } #endif /* !CONFIG_HIGHMEM */ +/* + * This must be called if any of the sg_table allocation functions + * are called. + */ +static void +abd_free_sg_table(abd_t *abd) +{ + struct sg_table table; + + table.sgl = ABD_SCATTER(abd).abd_sgl; + table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents; + sg_free_table(&table); +} + static void abd_free_pages(abd_t *abd) { struct scatterlist *sg = NULL; - struct sg_table table; struct page *page; int nr_pages = ABD_SCATTER(abd).abd_nents; int order, i = 0; @@ -464,14 +510,60 @@ abd_free_pages(abd_t *abd) ASSERT3U(sg->length, <=, PAGE_SIZE << order); ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]); } + abd_free_sg_table(abd); +} - table.sgl = ABD_SCATTER(abd).abd_sgl; - table.nents = table.orig_nents = nr_pages; - sg_free_table(&table); +#define ABD_ZERO_PAGE (ZERO_PAGE(0)) + +/* + * Allocating scatter ABD of size SPA_MAXBLOCKSIZE, where + * each page in the scatterlist will be set to ABD_ZERO_PAGE. + */ +static void +abd_alloc_zero_scatter(void) +{ + struct scatterlist *sg = NULL; + struct sg_table table; + gfp_t gfp = __GFP_NOWARN | GFP_NOIO; + int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); + int i = 0; + + while (sg_alloc_table(&table, nr_pages, gfp)) { + ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); + schedule_timeout_interruptible(1); + } + ASSERT3U(table.nents, ==, nr_pages); + + abd_zero_scatter = abd_alloc_struct(); + abd_zero_scatter->abd_flags = ABD_FLAG_OWNER; + ABD_SCATTER(abd_zero_scatter).abd_offset = 0; + ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl; + ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages; + abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; + abd_zero_scatter->abd_parent = NULL; + abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK + | ABD_FLAG_ZEROS; + zfs_refcount_create(&abd_zero_scatter->abd_children); + + abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) { + sg_set_page(sg, ABD_ZERO_PAGE, PAGESIZE, 0); + } + + ABDSTAT_BUMP(abdstat_scatter_cnt); + ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE); + ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); } #else /* _KERNEL */ +/* + * In the kernel the macro ZERO_PAGE(0) returns a global shared page + * that is always zero. In the case of user space we will just + * return the allocated zero'd page abd_zero_buf. + */ +static char *abd_zero_buf = NULL; +#define ABD_ZERO_PAGE ((struct page *)abd_zero_buf) + #ifndef PAGE_SHIFT #define PAGE_SHIFT (highbit64(PAGESIZE)-1) #endif @@ -498,6 +590,18 @@ sg_init_table(struct scatterlist *sg, int nr) sg[nr - 1].end = 1; } +/* + * This must be called if any of the sg_table allocation functions + * are called. + */ +static void +abd_free_sg_table(abd_t *abd) +{ + int nents = ABD_SCATTER(abd).abd_nents; + vmem_free(ABD_SCATTER(abd).abd_sgl, + nents * sizeof (struct scatterlist)); +} + #define for_each_sg(sgl, sg, nr, i) \ for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg)) @@ -556,12 +660,59 @@ abd_free_pages(abd_t *abd) umem_free(p, PAGESIZE); } } + abd_free_sg_table(abd); +} + +static void +abd_alloc_zero_scatter(void) +{ + unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); + struct scatterlist *sg; + int i; + + abd_zero_buf = umem_zalloc(PAGESIZE, KM_SLEEP); + abd_zero_scatter = abd_alloc_struct(); + abd_zero_scatter->abd_flags = ABD_FLAG_OWNER; + abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK + | ABD_FLAG_ZEROS; + ABD_SCATTER(abd_zero_scatter).abd_offset = 0; + ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages; + abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; + abd_zero_scatter->abd_parent = NULL; + zfs_refcount_create(&abd_zero_scatter->abd_children); + ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages * + sizeof (struct scatterlist), KM_SLEEP); + + sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages); + + abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) { + sg_set_page(sg, ABD_ZERO_PAGE, PAGESIZE, 0); + } - vmem_free(ABD_SCATTER(abd).abd_sgl, n * sizeof (struct scatterlist)); + ABDSTAT_BUMP(abdstat_scatter_cnt); + ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE); + ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); } #endif /* _KERNEL */ +static void +abd_free_zero_scatter(void) +{ + zfs_refcount_destroy(&abd_zero_scatter->abd_children); + ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); + ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE); + ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); + + abd_free_sg_table(abd_zero_scatter); + abd_free_struct(abd_zero_scatter); + abd_zero_scatter = NULL; +#if !defined(_KERNEL) + umem_free(abd_zero_buf, PAGESIZE); +#endif /* _KERNEL */ +} + + void abd_init(void) { @@ -582,11 +733,15 @@ abd_init(void) abd_ksp->ks_data = &abd_stats; kstat_install(abd_ksp); } + + abd_alloc_zero_scatter(); } void abd_fini(void) { + abd_free_zero_scatter(); + if (abd_ksp != NULL) { kstat_delete(abd_ksp); abd_ksp = NULL; @@ -598,18 +753,25 @@ abd_fini(void) } } -static inline void +static void abd_verify(abd_t *abd) { ASSERT3U(abd->abd_size, >, 0); ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE); ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR | ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE | - ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE)); + ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_MULTI_LIST | + ABD_FLAG_MULTI_FREE | ABD_FLAG_ZEROS)); IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER)); IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER); if (abd_is_linear(abd)) { ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL); + } else if (abd_is_multi(abd)) { + for (abd_t *cabd = list_head(&ABD_MULTI(abd).abd_chain); + cabd != NULL; + cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd)) { + abd_verify(cabd); + } } else { size_t n; int i = 0; @@ -625,24 +787,6 @@ abd_verify(abd_t *abd) } } -static inline abd_t * -abd_alloc_struct(void) -{ - abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE); - - ASSERT3P(abd, !=, NULL); - ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t)); - - return (abd); -} - -static inline void -abd_free_struct(abd_t *abd) -{ - kmem_cache_free(abd_cache, abd); - ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t)); -} - /* * Allocate an ABD, along with its own underlying data buffers. Use this if you * don't care whether the ABD is linear or not. @@ -676,6 +820,16 @@ abd_alloc(size_t size, boolean_t is_metadata) return (abd); } +abd_t * +abd_get_zeros(size_t size) +{ + ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); + ASSERT3U(size, >, 0); // ADDED + abd_t *abd = abd_get_offset_size(abd_zero_scatter, 0, size); + ASSERT3U(abd->abd_size, >, 0); // ADDED + return (abd); +} + static void abd_free_scatter(abd_t *abd) { @@ -749,6 +903,28 @@ abd_free_linear(abd_t *abd) abd_free_struct(abd); } +static void +abd_free_multi(abd_t *abd) +{ + ASSERT(abd_is_multi(abd)); + abd_t *cabd; + + while ((cabd = list_head(&ABD_MULTI(abd).abd_chain)) != NULL) { + list_remove(&ABD_MULTI(abd).abd_chain, cabd); + abd->abd_size -= cabd->abd_size; + if (cabd->abd_flags & ABD_FLAG_MULTI_FREE) { + if (cabd->abd_flags & ABD_FLAG_OWNER) + abd_free(cabd); + else + abd_put(cabd); + } + } + ASSERT3U(abd->abd_size, ==, 0); + list_destroy(&ABD_MULTI(abd).abd_chain); + zfs_refcount_destroy(&abd->abd_children); + abd_free_struct(abd); +} + /* * Free an ABD. Only use this on ABDs allocated with abd_alloc() or * abd_alloc_linear(). @@ -756,6 +932,8 @@ abd_free_linear(abd_t *abd) void abd_free(abd_t *abd) { + ASSERT(!abd_is_multi(abd)); + ASSERT(!list_link_active(&abd->multi_link)); abd_verify(abd); ASSERT3P(abd->abd_parent, ==, NULL); ASSERT(abd->abd_flags & ABD_FLAG_OWNER); @@ -799,21 +977,104 @@ abd_alloc_sametype(abd_t *sabd, size_t size) abd_t * abd_alloc_for_io(size_t size, boolean_t is_metadata) { + ASSERT3U(size, >, 0); // ADDED return (abd_alloc(size, is_metadata)); } +/* + * Create an ABD that will be the head of a list of ABD's. This is used + * to "chain" scatter/gather lists together when constructing aggregated + * IO's. To free this abd, abd_put() must be called. + */ +abd_t * +abd_alloc_multi(void) +{ + abd_t *abd; + + abd = abd_alloc_struct(); + abd->abd_flags = ABD_FLAG_MULTI_LIST; + abd->abd_size = 0; + abd->abd_parent = NULL; + list_create(&ABD_MULTI(abd).abd_chain, + sizeof (abd_t), offsetof(abd_t, multi_link)); + zfs_refcount_create(&abd->abd_children); + return (abd); +} + +/* + * Add a child ABD to a chained list of ABD's. + */ +void +abd_add_child(abd_t *pabd, abd_t *cabd, boolean_t multi_mem_manage) +{ + ASSERT(abd_is_multi(pabd)); + abd_t *child_abd = NULL; + + if (list_link_active(&cabd->multi_link)) { + /* + * If the child ABD is already part of another + * multilist ABD then we must allocate a new + * ABD to use a seperate link. We mark the newly + * allocated ABD with ABD_FLAG_MULTI_FREE, before + * adding it to the multilist, to make the multilist + * aware that it is it's responsibility to call + * abd_put(). We use abd_get_offset() in order to + * just allocate a new ABD but avoid copying the data + * over into the newly allocated ABD. + * + * Cases were a ABD maybe part of multiple + * multilist ABD's are ditto blocks and when + * vdev_label_write() is called (see vdev_label.c). + */ + child_abd = abd_get_offset(cabd, 0); + child_abd->abd_flags |= ABD_FLAG_MULTI_FREE; + } else { + child_abd = cabd; + if (multi_mem_manage) + child_abd->abd_flags |= ABD_FLAG_MULTI_FREE; + } + ASSERT3P(child_abd, !=, NULL); + ASSERT3U(child_abd->abd_size, >, 0); // ADDED + + list_insert_tail(&ABD_MULTI(pabd).abd_chain, child_abd); + pabd->abd_size += child_abd->abd_size; +} + +/* + * Locate the child abd for the supplied offset. + * Return a new offset relative to the child. + */ +static abd_t * +abd_find_child_off(abd_t *abd, size_t *off) +{ + ASSERT(abd_is_multi(abd)); + abd_t *cabd; + + for (cabd = list_head(&ABD_MULTI(abd).abd_chain); cabd != NULL; + cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd)) { + + if (*off >= cabd->abd_size) + *off -= cabd->abd_size; + else + break; + } + ASSERT(cabd != NULL); + return (cabd); +} + /* * Allocate a new ABD to point to offset off of sabd. It shares the underlying * buffer data with sabd. Use abd_put() to free. sabd must not be freed while * any derived ABDs exist. */ -static inline abd_t * +static abd_t * abd_get_offset_impl(abd_t *sabd, size_t off, size_t size) { - abd_t *abd; + abd_t *abd = NULL; abd_verify(sabd); ASSERT3U(off, <=, sabd->abd_size); + ASSERT3U(size, >, 0); // ADDED if (abd_is_linear(sabd)) { abd = abd_alloc_struct(); @@ -827,6 +1088,22 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size) abd->abd_u.abd_linear.abd_buf = (char *)sabd->abd_u.abd_linear.abd_buf + off; + } else if (abd_is_multi(sabd)) { + size_t left = size; + abd = abd_alloc_multi(); + + for (abd_t *cabd = abd_find_child_off(sabd, &off); + cabd != NULL && left > 0; + cabd = list_next(&ABD_MULTI(sabd).abd_chain, cabd)) { + abd_t *nabd = cabd; + int csize = MIN(left, cabd->abd_size - off); + + nabd = abd_get_offset_impl(cabd, off, csize); + abd_add_child(abd, nabd, B_TRUE); + left -= csize; + off = 0; + } + ASSERT3U(left, ==, 0); } else { int i = 0; struct scatterlist *sg = NULL; @@ -856,7 +1133,6 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size) abd->abd_parent = sabd; zfs_refcount_create(&abd->abd_children); (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd); - return (abd); } @@ -874,7 +1150,7 @@ abd_t * abd_get_offset_size(abd_t *sabd, size_t off, size_t size) { ASSERT3U(off + size, <=, sabd->abd_size); - + ASSERT3U(size, >, 0); // ADDED return (abd_get_offset_impl(sabd, off, size)); } @@ -912,13 +1188,20 @@ void abd_put(abd_t *abd) { abd_verify(abd); - ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); if (abd->abd_parent != NULL) { (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children, abd->abd_size, abd); } + if (abd_is_multi(abd)) { + abd_free_multi(abd); + return; + } + + ASSERT(!list_link_active(&abd->multi_link)); + ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); + zfs_refcount_destroy(&abd->abd_children); abd_free_struct(abd); } @@ -952,7 +1235,6 @@ abd_borrow_buf(abd_t *abd, size_t n) buf = zio_buf_alloc(n); } (void) zfs_refcount_add_many(&abd->abd_children, n, buf); - return (buf); } @@ -1059,8 +1341,9 @@ struct abd_iter { * Initialize the abd_iter. */ static void -abd_iter_init(struct abd_iter *aiter, abd_t *abd, int km_type) +abd_iter_init(struct abd_iter *aiter, abd_t *abd) { + ASSERT(!abd_is_multi(abd)); abd_verify(abd); aiter->iter_abd = abd; aiter->iter_mapaddr = NULL; @@ -1075,6 +1358,16 @@ abd_iter_init(struct abd_iter *aiter, abd_t *abd, int km_type) } } +/* + * This is just a helper function to see if we have exhausted the + * abd_iter and reached the end. + */ +static boolean_t +abd_iter_at_end(struct abd_iter *aiter) +{ + return (aiter->iter_pos == aiter->iter_abd->abd_size); +} + /* * Advance the iterator by a certain amount. Cannot be called when a chunk is * in use. This can be safely called when the aiter has already exhausted, in @@ -1087,7 +1380,7 @@ abd_iter_advance(struct abd_iter *aiter, size_t amount) ASSERT0(aiter->iter_mapsize); /* There's nothing left to advance to, so do nothing */ - if (aiter->iter_pos == aiter->iter_abd->abd_size) + if (abd_iter_at_end(aiter)) return; aiter->iter_pos += amount; @@ -1104,6 +1397,49 @@ abd_iter_advance(struct abd_iter *aiter, size_t amount) } } +/* + * Initializes an abd_iter based on whether the abd is a chain of ABD's + * or just a single ABD. + */ +static inline abd_t * +abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off) +{ + abd_t *cabd = NULL; + + if (abd_is_multi(abd)) { + cabd = abd_find_child_off(abd, &off); + if (cabd) { + abd_iter_init(aiter, cabd); + abd_iter_advance(aiter, off); + } + } else { + abd_iter_init(aiter, abd); + abd_iter_advance(aiter, off); + } + return (cabd); +} + +/* + * Advances an abd_iter. We have to be careful with chains of ABD's as + * advancing could mean that we are at the end of a particular ABD and + * must grab the next one from the chain. + */ +static inline abd_t * +abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter, + size_t len) +{ + abd_iter_advance(aiter, len); + if (abd_is_multi(abd) && abd_iter_at_end(aiter)) { + ASSERT3P(cabd, !=, NULL); + cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd); + if (cabd) { + abd_iter_init(aiter, cabd); + abd_iter_advance(aiter, 0); + } + } + return (cabd); +} + /* * Map the current chunk into aiter. This can be safely called when the aiter * has already exhausted, in which case this does nothing. @@ -1118,7 +1454,7 @@ abd_iter_map(struct abd_iter *aiter) ASSERT0(aiter->iter_mapsize); /* There's nothing left to iterate over, so do nothing */ - if (aiter->iter_pos == aiter->iter_abd->abd_size) + if (abd_iter_at_end(aiter)) return; if (abd_is_linear(aiter->iter_abd)) { @@ -1146,7 +1482,7 @@ static void abd_iter_unmap(struct abd_iter *aiter) { /* There's nothing left to unmap, so do nothing */ - if (aiter->iter_pos == aiter->iter_abd->abd_size) + if (abd_iter_at_end(aiter)) return; if (!abd_is_linear(aiter->iter_abd)) { @@ -1168,14 +1504,20 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size, { int ret = 0; struct abd_iter aiter; + boolean_t abd_multi; + abd_t *c_abd; abd_verify(abd); ASSERT3U(off + size, <=, abd->abd_size); - abd_iter_init(&aiter, abd, 0); - abd_iter_advance(&aiter, off); + abd_multi = abd_is_multi(abd); + c_abd = abd_init_abd_iter(abd, &aiter, off); while (size > 0) { + /* If we are at the end of multi chain abd we are done */ + if (abd_multi && !c_abd) + break; + abd_iter_map(&aiter); size_t len = MIN(aiter.iter_mapsize, size); @@ -1189,7 +1531,7 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size, break; size -= len; - abd_iter_advance(&aiter, len); + c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len); } return (ret); @@ -1296,6 +1638,8 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, { int ret = 0; struct abd_iter daiter, saiter; + boolean_t dabd_is_multi, sabd_is_multi; + abd_t *c_dabd, *c_sabd; abd_verify(dabd); abd_verify(sabd); @@ -1303,12 +1647,17 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, ASSERT3U(doff + size, <=, dabd->abd_size); ASSERT3U(soff + size, <=, sabd->abd_size); - abd_iter_init(&daiter, dabd, 0); - abd_iter_init(&saiter, sabd, 1); - abd_iter_advance(&daiter, doff); - abd_iter_advance(&saiter, soff); + dabd_is_multi = abd_is_multi(dabd); + sabd_is_multi = abd_is_multi(sabd); + c_dabd = abd_init_abd_iter(dabd, &daiter, doff); + c_sabd = abd_init_abd_iter(sabd, &saiter, soff); while (size > 0) { + /* if we are at the end of a multi abd chain we are done */ + if ((dabd_is_multi && !c_dabd) || + (sabd_is_multi && !c_sabd)) + break; + abd_iter_map(&daiter); abd_iter_map(&saiter); @@ -1327,8 +1676,10 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, break; size -= len; - abd_iter_advance(&daiter, len); - abd_iter_advance(&saiter, len); + c_dabd = + abd_advance_abd_iter(dabd, c_dabd, &daiter, len); + c_sabd = + abd_advance_abd_iter(sabd, c_sabd, &saiter, len); } return (ret); @@ -1389,29 +1740,47 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, struct abd_iter daiter = {0}; void *caddrs[3]; unsigned long flags; + abd_t *c_cabds[3]; + abd_t *c_dabd = NULL; + boolean_t cabds_is_multi[3]; + boolean_t dabd_is_multi = B_FALSE; ASSERT3U(parity, <=, 3); - for (i = 0; i < parity; i++) - abd_iter_init(&caiters[i], cabds[i], i); + for (i = 0; i < parity; i++) { + cabds_is_multi[i] = abd_is_multi(cabds[i]); + c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0); + } - if (dabd) - abd_iter_init(&daiter, dabd, i); + if (dabd) { + dabd_is_multi = abd_is_multi(dabd); + c_dabd = abd_init_abd_iter(dabd, &daiter, 0); + } ASSERT3S(dsize, >=, 0); local_irq_save(flags); while (csize > 0) { - len = csize; - - if (dabd && dsize > 0) - abd_iter_map(&daiter); + /* if we are at the end of a multi abd chain we are done */ + if (dabd_is_multi && !c_dabd) + break; for (i = 0; i < parity; i++) { + /* + * If we are at the end of a multi abd chain we are + * done. + */ + if (cabds_is_multi[i] && !c_cabds[i]) + break; abd_iter_map(&caiters[i]); caddrs[i] = caiters[i].iter_mapaddr; } + len = csize; + + if (dabd && dsize > 0) + abd_iter_map(&daiter); + switch (parity) { case 3: len = MIN(caiters[2].iter_mapsize, len); @@ -1445,12 +1814,16 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, for (i = parity-1; i >= 0; i--) { abd_iter_unmap(&caiters[i]); - abd_iter_advance(&caiters[i], len); + c_cabds[i] = + abd_advance_abd_iter(cabds[i], c_cabds[i], + &caiters[i], len); } if (dabd && dsize > 0) { abd_iter_unmap(&daiter); - abd_iter_advance(&daiter, dlen); + c_dabd = + abd_advance_abd_iter(dabd, c_dabd, &daiter, + dlen); dsize -= dlen; } @@ -1485,18 +1858,34 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds, struct abd_iter xiters[3]; void *caddrs[3], *xaddrs[3]; unsigned long flags; + boolean_t cabds_is_multi[3]; + boolean_t tabds_is_multi[3]; + abd_t *c_cabds[3]; + abd_t *c_tabds[3]; ASSERT3U(parity, <=, 3); for (i = 0; i < parity; i++) { - abd_iter_init(&citers[i], cabds[i], 2*i); - abd_iter_init(&xiters[i], tabds[i], 2*i+1); + cabds_is_multi[i] = abd_is_multi(cabds[i]); + tabds_is_multi[i] = abd_is_multi(tabds[i]); + c_cabds[i] = + abd_init_abd_iter(cabds[i], &citers[i], 0); + c_tabds[i] = + abd_init_abd_iter(tabds[i], &xiters[i], 0); } local_irq_save(flags); while (tsize > 0) { for (i = 0; i < parity; i++) { + /* + * If we are at the end of a multi abd chain we + * are done. + */ + if (cabds_is_multi[i] && !c_cabds[i]) + break; + if (tabds_is_multi[i] && !c_tabds[i]) + break; abd_iter_map(&citers[i]); abd_iter_map(&xiters[i]); caddrs[i] = citers[i].iter_mapaddr; @@ -1530,8 +1919,12 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds, for (i = parity-1; i >= 0; i--) { abd_iter_unmap(&xiters[i]); abd_iter_unmap(&citers[i]); - abd_iter_advance(&xiters[i], len); - abd_iter_advance(&citers[i], len); + c_tabds[i] = + abd_advance_abd_iter(tabds[i], c_tabds[i], + &xiters[i], len); + c_cabds[i] = + abd_advance_abd_iter(cabds[i], c_cabds[i], + &citers[i], len); } tsize -= len; @@ -1550,6 +1943,10 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off) { unsigned long pos; + while (abd_is_multi(abd)) + abd = abd_find_child_off(abd, &off); + + ASSERT(!abd_is_multi(abd)); if (abd_is_linear(abd)) pos = (unsigned long)abd_to_buf(abd) + off; else @@ -1559,22 +1956,88 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off) (pos >> PAGE_SHIFT); } +static unsigned int +bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size) +{ + unsigned int offset, size, i; + struct page *page; + + offset = offset_in_page(buf_ptr); + for (i = 0; i < bio->bi_max_vecs; i++) { + size = PAGE_SIZE - offset; + + if (bio_size <= 0) + break; + + if (size > bio_size) + size = bio_size; + + if (is_vmalloc_addr(buf_ptr)) + page = vmalloc_to_page(buf_ptr); + else + page = virt_to_page(buf_ptr); + + /* + * Some network related block device uses tcp_sendpage, which + * doesn't behave well when using 0-count page, this is a + * safety net to catch them. + */ + ASSERT3S(page_count(page), >, 0); + + if (bio_add_page(bio, page, size, offset) != size) + break; + + buf_ptr += size; + bio_size -= size; + offset = 0; + } + + return (bio_size); +} + /* - * bio_map for scatter ABD. + * bio_map for multi_list ABD. + */ +static unsigned int +abd_multi_bio_map_off(struct bio *bio, abd_t *abd, + unsigned int io_size, size_t off) +{ + ASSERT(abd_is_multi(abd)); + + for (abd_t *cabd = abd_find_child_off(abd, &off); + cabd != NULL; cabd = list_next(&ABD_MULTI(abd).abd_chain, cabd)) { + int remainder, size = MIN(io_size, cabd->abd_size - off); + remainder = abd_bio_map_off(bio, cabd, size, off); + io_size -= (size - remainder); + if (io_size == 0 || remainder > 0) + return (io_size); + off = 0; + } + ASSERT(io_size == 0); + return (io_size); +} + +/* + * bio_map for ABD. * @off is the offset in @abd * Remaining IO size is returned */ unsigned int -abd_scatter_bio_map_off(struct bio *bio, abd_t *abd, +abd_bio_map_off(struct bio *bio, abd_t *abd, unsigned int io_size, size_t off) { int i; struct abd_iter aiter; - ASSERT(!abd_is_linear(abd)); ASSERT3U(io_size, <=, abd->abd_size - off); + if (abd_is_linear(abd)) + return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size)); + + ASSERT(!abd_is_linear(abd)); + if (abd_is_multi(abd)) + return (abd_multi_bio_map_off(bio, abd, io_size, off)); - abd_iter_init(&aiter, abd, 0); + abd_iter_init(&aiter, abd); abd_iter_advance(&aiter, off); for (i = 0; i < bio->bi_max_vecs; i++) { diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c index 66e408c6c98c..b514df3bc172 100644 --- a/module/os/linux/zfs/vdev_disk.c +++ b/module/os/linux/zfs/vdev_disk.c @@ -396,54 +396,6 @@ BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error) rc = vdev_disk_dio_put(dr); } -static unsigned int -bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size) -{ - unsigned int offset, size, i; - struct page *page; - - offset = offset_in_page(bio_ptr); - for (i = 0; i < bio->bi_max_vecs; i++) { - size = PAGE_SIZE - offset; - - if (bio_size <= 0) - break; - - if (size > bio_size) - size = bio_size; - - if (is_vmalloc_addr(bio_ptr)) - page = vmalloc_to_page(bio_ptr); - else - page = virt_to_page(bio_ptr); - - /* - * Some network related block device uses tcp_sendpage, which - * doesn't behave well when using 0-count page, this is a - * safety net to catch them. - */ - ASSERT3S(page_count(page), >, 0); - - if (bio_add_page(bio, page, size, offset) != size) - break; - - bio_ptr += size; - bio_size -= size; - offset = 0; - } - - return (bio_size); -} - -static unsigned int -bio_map_abd_off(struct bio *bio, abd_t *abd, unsigned int size, size_t off) -{ - if (abd_is_linear(abd)) - return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, size)); - - return (abd_scatter_bio_map_off(bio, abd, size, off)); -} - static inline void vdev_submit_bio_impl(struct bio *bio) { @@ -603,7 +555,7 @@ __vdev_disk_physio(struct block_device *bdev, zio_t *zio, bio_set_op_attrs(dr->dr_bio[i], rw, flags); /* Remaining size is returned to become the new size */ - bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd, + bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd, bio_size, abd_offset); /* Advance in buffer and construct another bio if needed */ diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index e156e2b0139f..75da519f908e 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -535,16 +535,17 @@ vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) static void vdev_queue_agg_io_done(zio_t *aio) { - if (aio->io_type == ZIO_TYPE_READ) { + abd_put(aio->io_abd); + if (aio->io_type == ZIO_TYPE_WRITE) { zio_t *pio; zio_link_t *zl = NULL; while ((pio = zio_walk_parents(aio, &zl)) != NULL) { - abd_copy_off(pio->io_abd, aio->io_abd, - 0, pio->io_offset - aio->io_offset, pio->io_size); + if (pio->io_flags & ZIO_FLAG_NODATA) { + abd_put(pio->io_abd); + pio->io_abd = NULL; + } } } - - abd_free(aio->io_abd); } /* @@ -568,6 +569,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) boolean_t stretch = B_FALSE; avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type); enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; + uint64_t next_offset; abd_t *abd; maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa); @@ -695,7 +697,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) size = IO_SPAN(first, last); ASSERT3U(size, <=, maxblocksize); - abd = abd_alloc_for_io(size, B_TRUE); + abd = abd_alloc_multi(); if (abd == NULL) return (NULL); @@ -706,12 +708,37 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) aio->io_timestamp = first->io_timestamp; nio = first; + next_offset = first->io_offset; do { dio = nio; nio = AVL_NEXT(t, dio); zio_add_child(dio, aio); vdev_queue_io_remove(vq, dio); + + if (dio->io_offset != next_offset) { + /* allocate a buffer for a read gap */ + ASSERT3U(dio->io_type, ==, ZIO_TYPE_READ); + ASSERT3U(dio->io_offset, >, next_offset); + abd = abd_alloc_for_io( + dio->io_offset - next_offset, B_TRUE); + abd_add_child(aio->io_abd, abd, B_TRUE); + } else if (dio->io_flags & ZIO_FLAG_NODATA) { + /* allocate a buffer for a write gap */ + ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); + ASSERT3P(dio->io_abd, ==, NULL); + dio->io_abd = abd_get_zeros(dio->io_size); + } + if (dio->io_size != dio->io_abd->abd_size) { + /* abd size not the same as IO size */ + ASSERT3U(dio->io_abd->abd_size, >, dio->io_size); + abd = abd_get_offset_size(dio->io_abd, 0, dio->io_size); + abd_add_child(aio->io_abd, abd, B_TRUE); + } else { + abd_add_child(aio->io_abd, dio->io_abd, B_FALSE); + } + next_offset = dio->io_offset + dio->io_size; } while (dio != last); + ASSERT3U(aio->io_abd->abd_size, ==, aio->io_size); /* * We need to drop the vdev queue's lock during zio_execute() to @@ -723,15 +750,6 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) while ((dio = zio_walk_parents(aio, &zl)) != NULL) { ASSERT3U(dio->io_type, ==, aio->io_type); - if (dio->io_flags & ZIO_FLAG_NODATA) { - ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); - abd_zero_off(aio->io_abd, - dio->io_offset - aio->io_offset, dio->io_size); - } else if (dio->io_type == ZIO_TYPE_WRITE) { - abd_copy_off(aio->io_abd, dio->io_abd, - dio->io_offset - aio->io_offset, 0, dio->io_size); - } - zio_vdev_io_bypass(dio); zio_execute(dio); }