[PATCH V3 2/4] blk-mq: implement queue quiesce via percpu_ref for BLK_MQ_F_BLOCKING
Ming Lei
ming.lei at redhat.com
Tue Sep 8 05:27:23 EDT 2020
On Tue, Sep 08, 2020 at 05:13:22PM +0800, Chao Leng wrote:
>
>
> On 2020/9/8 16:15, Ming Lei wrote:
> > In case of BLK_MQ_F_BLOCKING, blk-mq uses SRCU to mark read critical
> > section during dispatching request, then request queue quiesce is based on
> > SRCU. What we want to get is low cost added in fast path.
> >
> > With percpu-ref, it is cleaner and simpler & enough for implementing queue
> > quiesce. The main requirement is to make sure all read sections to observe
> > QUEUE_FLAG_QUIESCED once blk_mq_quiesce_queue() returns.
> >
> > Also it becomes much easier to add interface of async queue quiesce.
> >
> > Meantime memory footprint can be reduced with per-request-queue percpu-ref.
> >
> > > From implementation viewpoint, in fast path, not see percpu_ref is
> > slower than SRCU, and srcu tree(default option in most distributions)
> > could be slower since memory barrier is required in both lock & unlock,
> > and rcu_read_lock()/rcu_read_unlock() should be much cheap than
> > smp_mb().
> >
> > 1) percpu_ref just hold the rcu_read_lock, then run a check &
> > increase/decrease on the percpu variable:
> >
> > rcu_read_lock()
> > if (__ref_is_percpu(ref, &percpu_count))
> > this_cpu_inc(*percpu_count);
> > rcu_read_unlock()
> >
> > 2) srcu tree:
> > idx = READ_ONCE(ssp->srcu_idx) & 0x1;
> > this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
> > smp_mb(); /* B */ /* Avoid leaking the critical section. */
> >
> > Also from my test on null_blk(blocking), not observe percpu-ref performs
> > worse than srcu, see the following test:
> >
> > 1) test steps:
> >
> > rmmod null_blk > /dev/null 2>&1
> > modprobe null_blk nr_devices=1 submit_queues=1 blocking=1
> > fio --bs=4k --size=512G --rw=randread --norandommap --direct=1 --ioengine=libaio \
> > --iodepth=64 --runtime=60 --group_reporting=1 --name=nullb0 \
> > --filename=/dev/nullb0 --numjobs=32
> >
> > test machine: HP DL380, 16 cpu cores, 2 threads per core, dual
> > sockets/numa, Intel(R) Xeon(R) Silver 4110 CPU @ 2.10GHz
> >
> > 2) test result:
> > - srcu quiesce: 6063K IOPS
> > - percpu-ref quiesce: 6113K IOPS
> >
> > Signed-off-by: Ming Lei <ming.lei at redhat.com>
> > Cc: Sagi Grimberg <sagi at grimberg.me>
> > Cc: Bart Van Assche <bvanassche at acm.org>
> > Cc: Johannes Thumshirn <Johannes.Thumshirn at wdc.com>
> > Cc: Chao Leng <lengchao at huawei.com>
> > ---
> > block/blk-mq-sysfs.c | 2 -
> > block/blk-mq.c | 130 +++++++++++++++++++++--------------------
> > block/blk-sysfs.c | 6 +-
> > include/linux/blk-mq.h | 8 ---
> > include/linux/blkdev.h | 4 ++
> > 5 files changed, 77 insertions(+), 73 deletions(-)
> >
> > diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
> > index 062229395a50..799db7937105 100644
> > --- a/block/blk-mq-sysfs.c
> > +++ b/block/blk-mq-sysfs.c
> > @@ -38,8 +38,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
> > cancel_delayed_work_sync(&hctx->run_work);
> > - if (hctx->flags & BLK_MQ_F_BLOCKING)
> > - cleanup_srcu_struct(hctx->srcu);
> > blk_free_flush_queue(hctx->fq);
> > sbitmap_free(&hctx->ctx_map);
> > free_cpumask_var(hctx->cpumask);
> > diff --git a/block/blk-mq.c b/block/blk-mq.c
> > index 13cc10b89629..60630a720449 100644
> > --- a/block/blk-mq.c
> > +++ b/block/blk-mq.c
> > @@ -220,26 +220,22 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
> > */
> > void blk_mq_quiesce_queue(struct request_queue *q)
> > {
> > - struct blk_mq_hw_ctx *hctx;
> > - unsigned int i;
> > - bool rcu = false;
> > + bool blocking = !!(q->tag_set->flags & BLK_MQ_F_BLOCKING);
> > mutex_lock(&q->mq_quiesce_lock);
> > - if (blk_queue_quiesced(q))
> > - goto exit;
> > -
> > - blk_mq_quiesce_queue_nowait(q);
> > -
> > - queue_for_each_hw_ctx(q, hctx, i) {
> > - if (hctx->flags & BLK_MQ_F_BLOCKING)
> > - synchronize_srcu(hctx->srcu);
> > - else
> > - rcu = true;
> > + if (!blk_queue_quiesced(q)) {
> > + blk_mq_quiesce_queue_nowait(q);
> > + if (blocking)
> > + percpu_ref_kill(&q->dispatch_counter);
> > }
> > - if (rcu)
> > +
> > + if (blocking)
> > + wait_event(q->mq_quiesce_wq,
> > + percpu_ref_is_zero(&q->dispatch_counter));
> > + else
> > synchronize_rcu();
> > - exit:
> > +
> > mutex_unlock(&q->mq_quiesce_lock);
> > }
> > EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
> > @@ -255,7 +251,12 @@ void blk_mq_unquiesce_queue(struct request_queue *q)
> > {
> > mutex_lock(&q->mq_quiesce_lock);
> > - blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
> > + if (blk_queue_quiesced(q)) {
> > + blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
> > +
> > + if (q->tag_set->flags & BLK_MQ_F_BLOCKING)
> > + percpu_ref_resurrect(&q->dispatch_counter);
> > + }
> > /* dispatch requests which are inserted during quiescing */
> > blk_mq_run_hw_queues(q, true);
> > @@ -710,24 +711,21 @@ void blk_mq_complete_request(struct request *rq)
> > }
> > EXPORT_SYMBOL(blk_mq_complete_request);
> > -static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
> > - __releases(hctx->srcu)
> > +static void hctx_unlock(struct blk_mq_hw_ctx *hctx)
> > {
> > - if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> > - rcu_read_unlock();
> > + if (hctx->flags & BLK_MQ_F_BLOCKING)
> > + percpu_ref_put(&hctx->queue->dispatch_counter);
> > else
> > - srcu_read_unlock(hctx->srcu, srcu_idx);
> > + rcu_read_unlock();
> > }
> > -static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
> > - __acquires(hctx->srcu)
> > +/* Returning false means that queue is being quiesced */
> > +static inline bool hctx_lock(struct blk_mq_hw_ctx *hctx)
> > {
> > - if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
> > - /* shut up gcc false positive */
> > - *srcu_idx = 0;
> > - rcu_read_lock();
> > - } else
> > - *srcu_idx = srcu_read_lock(hctx->srcu);
> > + if (hctx->flags & BLK_MQ_F_BLOCKING)
> > + return percpu_ref_tryget_live(&hctx->queue->dispatch_counter);
> > + rcu_read_lock();
> > + return true;
> > }
> > /**
> > @@ -1506,8 +1504,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
> > */
> > static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
> > {
> > - int srcu_idx;
> > -
> > /*
> > * We should be running this queue from one of the CPUs that
> > * are mapped to it.
> > @@ -1541,9 +1537,10 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
> > might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
> > - hctx_lock(hctx, &srcu_idx);
> > - blk_mq_sched_dispatch_requests(hctx);
> > - hctx_unlock(hctx, srcu_idx);
> > + if (hctx_lock(hctx)) {
> > + blk_mq_sched_dispatch_requests(hctx);
> > + hctx_unlock(hctx);
> > + }
> > }
> > static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
> > @@ -1655,7 +1652,6 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
> > */
> > void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
> > {
> > - int srcu_idx;
> > bool need_run;
> > /*
> > @@ -1666,10 +1662,12 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
> > * And queue will be rerun in blk_mq_unquiesce_queue() if it is
> > * quiesced.
> > */
> > - hctx_lock(hctx, &srcu_idx);
> > + if (!hctx_lock(hctx))
> > + return;
> > +
> > need_run = !blk_queue_quiesced(hctx->queue) &&
> > blk_mq_hctx_has_pending(hctx);
> > - hctx_unlock(hctx, srcu_idx);
> > + hctx_unlock(hctx);
> > if (need_run)
> > __blk_mq_delay_run_hw_queue(hctx, async, 0);
> > @@ -2009,7 +2007,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
> > bool run_queue = true;
> > /*
> > - * RCU or SRCU read lock is needed before checking quiesced flag.
> > + * hctx_lock() is needed before checking quiesced flag.
> > *
> > * When queue is stopped or quiesced, ignore 'bypass_insert' from
> > * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
> > @@ -2057,11 +2055,14 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
> > struct request *rq, blk_qc_t *cookie)
> > {
> > blk_status_t ret;
> > - int srcu_idx;
> > might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
> > - hctx_lock(hctx, &srcu_idx);
> > + /* Insert request to queue in case of being quiesced */
> > + if (!hctx_lock(hctx)) {
> > + blk_mq_sched_insert_request(rq, false, false, false);
> Suggest: use blk_mq_request_bypass_insert, the rq should do first.
No, when request is issued directly, it never be queued to LLD, so we
should insert it to scheduler queue, see commit:
db03f88fae8a ("blk-mq: insert request not through ->queue_rq into sw/scheduler queue")
Thanks,
Ming
More information about the Linux-nvme
mailing list