--- zzzz-none-000/linux-3.10.107/drivers/md/dm-io.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/md/dm-io.c 2021-02-04 17:41:59.000000000 +0000 @@ -20,8 +20,6 @@ #define DM_MSG_PREFIX "io" #define DM_IO_MAX_REGIONS BITS_PER_LONG -#define MIN_IOS 16 -#define MIN_BIOS 16 struct dm_io_client { mempool_t *pool; @@ -35,7 +33,6 @@ struct io { unsigned long error_bits; atomic_t count; - struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; @@ -51,24 +48,24 @@ struct dm_io_client *dm_io_client_create(void) { struct dm_io_client *client; + unsigned min_ios = dm_get_reserved_bio_based_ios(); client = kmalloc(sizeof(*client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); + client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); if (!client->pool) goto bad; - client->bios = bioset_create(MIN_BIOS, 0); + client->bios = bioset_create(min_ios, 0); if (!client->bios) goto bad; return client; bad: - if (client->pool) - mempool_destroy(client->pool); + mempool_destroy(client->pool); kfree(client); return ERR_PTR(-ENOMEM); } @@ -113,36 +110,36 @@ * We need an io object to keep track of the number of bios that * have been dispatched for a particular io. *---------------------------------------------------------------*/ +static void complete_io(struct io *io) +{ + unsigned long error_bits = io->error_bits; + io_notify_fn fn = io->callback; + void *context = io->context; + + if (io->vma_invalidate_size) + invalidate_kernel_vmap_range(io->vma_invalidate_address, + io->vma_invalidate_size); + + mempool_free(io, io->client->pool); + fn(error_bits, context); +} + static void dec_count(struct io *io, unsigned int region, int error) { if (error) set_bit(region, &io->error_bits); - if (atomic_dec_and_test(&io->count)) { - if (io->vma_invalidate_size) - invalidate_kernel_vmap_range(io->vma_invalidate_address, - io->vma_invalidate_size); - - if (io->wait) - complete(io->wait); - - else { - unsigned long r = io->error_bits; - io_notify_fn fn = io->callback; - void *context = io->context; - - mempool_free(io, io->client->pool); - fn(r, context); - } - } + if (atomic_dec_and_test(&io->count)) + complete_io(io); } -static void endio(struct bio *bio, int error) +static void endio(struct bio *bio) { struct io *io; unsigned region; + int error; - if (error && bio_data_dir(bio) == READ) + if (bio->bi_error && bio_data_dir(bio) == READ) zero_fill_bio(bio); /* @@ -150,6 +147,7 @@ */ retrieve_io_and_region_from_bio(bio, &io, ®ion); + error = bio->bi_error; bio_put(bio); dec_count(io, region, error); @@ -203,26 +201,28 @@ /* * Functions for getting the pages from a bvec. */ -static void bvec_get_page(struct dpages *dp, - struct page **p, unsigned long *len, unsigned *offset) +static void bio_get_page(struct dpages *dp, struct page **p, + unsigned long *len, unsigned *offset) { - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + struct bio_vec *bvec = dp->context_ptr; *p = bvec->bv_page; - *len = bvec->bv_len; - *offset = bvec->bv_offset; + *len = bvec->bv_len - dp->context_u; + *offset = bvec->bv_offset + dp->context_u; } -static void bvec_next_page(struct dpages *dp) +static void bio_next_page(struct dpages *dp) { - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + struct bio_vec *bvec = dp->context_ptr; dp->context_ptr = bvec + 1; + dp->context_u = 0; } -static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) +static void bio_dp_init(struct dpages *dp, struct bio *bio) { - dp->get_page = bvec_get_page; - dp->next_page = bvec_next_page; - dp->context_ptr = bvec; + dp->get_page = bio_get_page; + dp->next_page = bio_next_page; + dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); + dp->context_u = bio->bi_iter.bi_bvec_done; } /* @@ -290,9 +290,16 @@ struct request_queue *q = bdev_get_queue(where->bdev); unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; + unsigned int uninitialized_var(special_cmd_max_sectors); - /* Reject unsupported discard requests */ - if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { + /* + * Reject unsupported discard and write same requests. + */ + if (rw & REQ_DISCARD) + special_cmd_max_sectors = q->limits.max_discard_sectors; + else if (rw & REQ_WRITE_SAME) + special_cmd_max_sectors = q->limits.max_write_same_sectors; + if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { dec_count(io, region, -EOPNOTSUPP); return; } @@ -308,18 +315,18 @@ if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) num_bvecs = 1; else - num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), + num_bvecs = min_t(int, BIO_MAX_PAGES, dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); - bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; store_io_and_region_in_bio(bio, io, region); if (rw & REQ_DISCARD) { - num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; } else if (rw & REQ_WRITE_SAME) { /* @@ -327,8 +334,8 @@ */ dp->get_page(dp, &page, &len, &offset); bio_add_page(bio, page, logical_block_size, offset); - num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; offset = 0; remaining -= num_sectors; @@ -381,41 +388,51 @@ dec_count(io, 0, 0); } +struct sync_io { + unsigned long error_bits; + struct completion wait; +}; + +static void sync_io_complete(unsigned long error, void *context) +{ + struct sync_io *sio = context; + + sio->error_bits = error; + complete(&sio->wait); +} + static int sync_io(struct dm_io_client *client, unsigned int num_regions, struct dm_io_region *where, int rw, struct dpages *dp, unsigned long *error_bits) { - /* - * gcc <= 4.3 can't do the alignment for stack variables, so we must - * align it on our own. - * volatile prevents the optimizer from removing or reusing - * "io_" field from the stack frame (allowed in ANSI C). - */ - volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; - struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); - DECLARE_COMPLETION_ONSTACK(wait); + struct io *io; + struct sync_io sio; if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); return -EIO; } + init_completion(&sio.wait); + + io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->wait = &wait; io->client = client; + io->callback = sync_io_complete; + io->context = &sio; io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; dispatch_io(rw, num_regions, where, dp, io, 1); - wait_for_completion_io(&wait); + wait_for_completion_io(&sio.wait); if (error_bits) - *error_bits = io->error_bits; + *error_bits = sio.error_bits; - return io->error_bits ? -EIO : 0; + return sio.error_bits ? -EIO : 0; } static int async_io(struct dm_io_client *client, unsigned int num_regions, @@ -433,7 +450,6 @@ io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->wait = NULL; io->client = client; io->callback = fn; io->context = context; @@ -458,8 +474,8 @@ list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); break; - case DM_IO_BVEC: - bvec_dp_init(dp, io_req->mem.ptr.bvec); + case DM_IO_BIO: + bio_dp_init(dp, io_req->mem.ptr.bio); break; case DM_IO_VMA: @@ -486,9 +502,9 @@ * New collapsed (a)synchronous interface. * * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug - * the queue with blk_unplug() some time later or set REQ_SYNC in -io_req->bi_rw. If you fail to do one of these, the IO will be submitted to - * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. + * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. + * If you fail to do one of these, the IO will be submitted to the disk after + * q->unplug_delay, which defaults to 3ms in blk-settings.c. */ int dm_io(struct dm_io_request *io_req, unsigned num_regions, struct dm_io_region *where, unsigned long *sync_error_bits)