--- zzzz-none-000/linux-3.10.107/drivers/s390/block/scm_blk.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/s390/block/scm_blk.c 2021-02-04 17:41:59.000000000 +0000 @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -20,13 +21,18 @@ debug_info_t *scm_debug; static int scm_major; +static mempool_t *aidaw_pool; static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(inactive_requests); static unsigned int nr_requests = 64; +static unsigned int nr_requests_per_io = 8; static atomic_t nr_devices = ATOMIC_INIT(0); module_param(nr_requests, uint, S_IRUGO); MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); +module_param(nr_requests_per_io, uint, S_IRUGO); +MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); + MODULE_DESCRIPTION("Block driver for s390 storage class memory."); MODULE_LICENSE("GPL"); MODULE_ALIAS("scm:scmdev*"); @@ -36,8 +42,8 @@ struct aob_rq_header *aobrq = to_aobrq(scmrq); free_page((unsigned long) scmrq->aob); - free_page((unsigned long) scmrq->aidaw); __scm_free_rq_cluster(scmrq); + kfree(scmrq->request); kfree(aobrq); } @@ -53,6 +59,8 @@ __scm_free_rq(scmrq); } spin_unlock_irq(&list_lock); + + mempool_destroy(aidaw_pool); } static int __scm_alloc_rq(void) @@ -65,17 +73,17 @@ return -ENOMEM; scmrq = (void *) aobrq->data; - scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA); scmrq->aob = (void *) get_zeroed_page(GFP_DMA); - if (!scmrq->aob || !scmrq->aidaw) { - __scm_free_rq(scmrq); - return -ENOMEM; - } + if (!scmrq->aob) + goto free; - if (__scm_alloc_rq_cluster(scmrq)) { - __scm_free_rq(scmrq); - return -ENOMEM; - } + scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), + GFP_KERNEL); + if (!scmrq->request) + goto free; + + if (__scm_alloc_rq_cluster(scmrq)) + goto free; INIT_LIST_HEAD(&scmrq->list); spin_lock_irq(&list_lock); @@ -83,12 +91,19 @@ spin_unlock_irq(&list_lock); return 0; +free: + __scm_free_rq(scmrq); + return -ENOMEM; } static int scm_alloc_rqs(unsigned int nrqs) { int ret = 0; + aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); + if (!aidaw_pool) + return -ENOMEM; + while (nrqs-- && !ret) ret = __scm_alloc_rq(); @@ -112,75 +127,113 @@ static void scm_request_done(struct scm_request *scmrq) { unsigned long flags; + struct msb *msb; + u64 aidaw; + int i; + + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { + msb = &scmrq->aob->msb[i]; + aidaw = msb->data_addr; + + if ((msb->flags & MSB_FLAG_IDA) && aidaw && + IS_ALIGNED(aidaw, PAGE_SIZE)) + mempool_free(virt_to_page(aidaw), aidaw_pool); + } spin_lock_irqsave(&list_lock, flags); list_add(&scmrq->list, &inactive_requests); spin_unlock_irqrestore(&list_lock, flags); } -static int scm_open(struct block_device *blkdev, fmode_t mode) +static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) { - return scm_get_ref(); + return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; } -static void scm_release(struct gendisk *gendisk, fmode_t mode) +static inline struct aidaw *scm_aidaw_alloc(void) { - scm_put_ref(); + struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); + + return page ? page_address(page) : NULL; } -static const struct block_device_operations scm_blk_devops = { - .owner = THIS_MODULE, - .open = scm_open, - .release = scm_release, -}; +static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) +{ + unsigned long _aidaw = (unsigned long) aidaw; + unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; -static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) + return (bytes / sizeof(*aidaw)) * PAGE_SIZE; +} + +struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) { - return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; + struct aidaw *aidaw; + + if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) + return scmrq->next_aidaw; + + aidaw = scm_aidaw_alloc(); + if (aidaw) + memset(aidaw, 0, PAGE_SIZE); + return aidaw; } -static void scm_request_prepare(struct scm_request *scmrq) +static int scm_request_prepare(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct aidaw *aidaw = scmrq->aidaw; - struct msb *msb = &scmrq->aob->msb[0]; + int pos = scmrq->aob->request.msb_count; + struct msb *msb = &scmrq->aob->msb[pos]; + struct request *req = scmrq->request[pos]; struct req_iterator iter; - struct bio_vec *bv; + struct aidaw *aidaw; + struct bio_vec bv; + + aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); + if (!aidaw) + return -ENOMEM; msb->bs = MSB_BS_4K; - scmrq->aob->request.msb_count = 1; - msb->scm_addr = scmdev->address + - ((u64) blk_rq_pos(scmrq->request) << 9); - msb->oc = (rq_data_dir(scmrq->request) == READ) ? - MSB_OC_READ : MSB_OC_WRITE; + scmrq->aob->request.msb_count++; + msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); + msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; msb->flags |= MSB_FLAG_IDA; msb->data_addr = (u64) aidaw; - rq_for_each_segment(bv, scmrq->request, iter) { - WARN_ON(bv->bv_offset); - msb->blk_count += bv->bv_len >> 12; - aidaw->data_addr = (u64) page_address(bv->bv_page); + rq_for_each_segment(bv, req, iter) { + WARN_ON(bv.bv_offset); + msb->blk_count += bv.bv_len >> 12; + aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; } + + scmrq->next_aidaw = aidaw; + return 0; +} + +static inline void scm_request_set(struct scm_request *scmrq, + struct request *req) +{ + scmrq->request[scmrq->aob->request.msb_count] = req; } static inline void scm_request_init(struct scm_blk_dev *bdev, - struct scm_request *scmrq, - struct request *req) + struct scm_request *scmrq) { struct aob_rq_header *aobrq = to_aobrq(scmrq); struct aob *aob = scmrq->aob; + memset(scmrq->request, 0, + nr_requests_per_io * sizeof(scmrq->request[0])); memset(aob, 0, sizeof(*aob)); - memset(scmrq->aidaw, 0, PAGE_SIZE); aobrq->scmdev = bdev->scmdev; aob->request.cmd_code = ARQB_CMD_MOVE; aob->request.data = (u64) aobrq; - scmrq->request = req; scmrq->bdev = bdev; scmrq->retries = 4; scmrq->error = 0; + /* We don't use all msbs - place aidaws at the end of the aob page. */ + scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; scm_request_cluster_init(scmrq); } @@ -196,9 +249,12 @@ void scm_request_requeue(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_requeue_request(bdev->rq, scmrq->request); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) + blk_requeue_request(bdev->rq, scmrq->request[i]); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); scm_ensure_queue_restart(bdev); @@ -207,58 +263,108 @@ void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_end_request_all(scmrq->request, scmrq->error); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) + blk_end_request_all(scmrq->request[i], scmrq->error); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } +static int scm_request_start(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + int ret; + + atomic_inc(&bdev->queued_reqs); + if (!scmrq->aob->request.msb_count) { + scm_request_requeue(scmrq); + return -EINVAL; + } + + ret = eadm_start_aob(scmrq->aob); + if (ret) { + SCM_LOG(5, "no subchannel"); + scm_request_requeue(scmrq); + } + return ret; +} + static void scm_blk_request(struct request_queue *rq) { struct scm_device *scmdev = rq->queuedata; struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); - struct scm_request *scmrq; + struct scm_request *scmrq = NULL; struct request *req; - int ret; while ((req = blk_peek_request(rq))) { - if (req->cmd_type != REQ_TYPE_FS) + if (req->cmd_type != REQ_TYPE_FS) { + blk_start_request(req); + blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); + blk_end_request_all(req, -EIO); continue; - - if (!scm_permit_request(bdev, req)) { - scm_ensure_queue_restart(bdev); - return; } - scmrq = scm_request_fetch(); + + if (!scm_permit_request(bdev, req)) + goto out; + if (!scmrq) { - SCM_LOG(5, "no request"); - scm_ensure_queue_restart(bdev); - return; + scmrq = scm_request_fetch(); + if (!scmrq) { + SCM_LOG(5, "no request"); + goto out; + } + scm_request_init(bdev, scmrq); } - scm_request_init(bdev, scmrq, req); + scm_request_set(scmrq, req); + if (!scm_reserve_cluster(scmrq)) { SCM_LOG(5, "cluster busy"); + scm_request_set(scmrq, NULL); + if (scmrq->aob->request.msb_count) + goto out; + scm_request_done(scmrq); return; } + if (scm_need_cluster_request(scmrq)) { - atomic_inc(&bdev->queued_reqs); - blk_start_request(req); - scm_initiate_cluster_request(scmrq); - return; + if (scmrq->aob->request.msb_count) { + /* Start cluster requests separately. */ + scm_request_set(scmrq, NULL); + if (scm_request_start(scmrq)) + return; + } else { + atomic_inc(&bdev->queued_reqs); + blk_start_request(req); + scm_initiate_cluster_request(scmrq); + } + scmrq = NULL; + continue; + } + + if (scm_request_prepare(scmrq)) { + SCM_LOG(5, "aidaw alloc failed"); + scm_request_set(scmrq, NULL); + goto out; } - scm_request_prepare(scmrq); - atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = scm_start_aob(scmrq->aob); - if (ret) { - SCM_LOG(5, "no subchannel"); - scm_request_requeue(scmrq); + if (scmrq->aob->request.msb_count < nr_requests_per_io) + continue; + + if (scm_request_start(scmrq)) return; - } + + scmrq = NULL; } +out: + if (scmrq) + scm_request_start(scmrq); + else + scm_ensure_queue_restart(bdev); } static void __scmrq_log_error(struct scm_request *scmrq) @@ -316,7 +422,7 @@ } restart: - if (!scm_start_aob(scmrq->aob)) + if (!eadm_start_aob(scmrq->aob)) return; requeue: @@ -359,6 +465,10 @@ blk_run_queue(bdev->rq); } +static const struct block_device_operations scm_blk_devops = { + .owner = THIS_MODULE, +}; + int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { struct request_queue *rq; @@ -394,6 +504,7 @@ blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_segments(rq, nr_max_blk); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); scm_blk_dev_cluster_setup(bdev); bdev->gendisk = alloc_disk(SCM_NR_PARTS); @@ -450,11 +561,19 @@ spin_unlock_irqrestore(&bdev->lock, flags); } +static bool __init scm_blk_params_valid(void) +{ + if (!nr_requests_per_io || nr_requests_per_io > 64) + return false; + + return scm_cluster_size_valid(); +} + static int __init scm_blk_init(void) { int ret = -EINVAL; - if (!scm_cluster_size_valid()) + if (!scm_blk_params_valid()) goto out; ret = register_blkdev(0, "scm");