From 8814ce8a0f680599a837af18aefdec774e5c7b97 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:04 -0800 Subject: block: Introduce blk_queue_flag_{set,clear,test_and_{set,clear}}() Introduce functions that modify the queue flags and that protect these modifications with the request queue lock. Except for moving one wake_up_all() call from inside to outside a critical section, this patch does not change any functionality. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-core.c | 91 +++++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq.c | 12 ++----- block/blk-settings.c | 6 ++-- block/blk-sysfs.c | 22 ++++--------- block/blk-timeout.c | 6 ++-- 5 files changed, 88 insertions(+), 49 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 241b73088617..74c6283f4509 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -71,6 +71,78 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue; +/** + * blk_queue_flag_set - atomically set a queue flag + * @flag: flag to be set + * @q: request queue + */ +void blk_queue_flag_set(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + queue_flag_set(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_queue_flag_set); + +/** + * blk_queue_flag_clear - atomically clear a queue flag + * @flag: flag to be cleared + * @q: request queue + */ +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + queue_flag_clear(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_queue_flag_clear); + +/** + * blk_queue_flag_test_and_set - atomically test and set a queue flag + * @flag: flag to be set + * @q: request queue + * + * Returns the previous value of @flag - 0 if the flag was not set and 1 if + * the flag was already set. + */ +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(q->queue_lock, flags); + res = queue_flag_test_and_set(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + return res; +} +EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); + +/** + * blk_queue_flag_test_and_clear - atomically test and clear a queue flag + * @flag: flag to be cleared + * @q: request queue + * + * Returns the previous value of @flag - 0 if the flag was not set and 1 if + * the flag was set. + */ +bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(q->queue_lock, flags); + res = queue_flag_test_and_clear(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + return res; +} +EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear); + static void blk_clear_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK @@ -361,25 +433,14 @@ EXPORT_SYMBOL(blk_sync_queue); */ int blk_set_preempt_only(struct request_queue *q) { - unsigned long flags; - int res; - - spin_lock_irqsave(q->queue_lock, flags); - res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); - spin_unlock_irqrestore(q->queue_lock, flags); - - return res; + return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); } EXPORT_SYMBOL_GPL(blk_set_preempt_only); void blk_clear_preempt_only(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); + blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); wake_up_all(&q->mq_freeze_wq); - spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_clear_preempt_only); @@ -629,9 +690,7 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); void blk_set_queue_dying(struct request_queue *q) { - spin_lock_irq(q->queue_lock); - queue_flag_set(QUEUE_FLAG_DYING, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_set(QUEUE_FLAG_DYING, q); /* * When queue DYING flag is set, we need to block new req diff --git a/block/blk-mq.c b/block/blk-mq.c index e70cc7d48f58..a86899022683 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -194,11 +194,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_set(QUEUE_FLAG_QUIESCED, q); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); @@ -239,11 +235,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); */ void blk_mq_unquiesce_queue(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_QUIESCED, q); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); /* dispatch requests which are inserted during quiescing */ blk_mq_run_hw_queues(q, true); diff --git a/block/blk-settings.c b/block/blk-settings.c index 7f719da0eadd..d1de71124656 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -859,12 +859,10 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment); void blk_queue_flush_queueable(struct request_queue *q, bool queueable) { - spin_lock_irq(q->queue_lock); if (queueable) - queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); + blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); else - queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index fd71a00c9462..d00d1b0ec109 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -276,12 +276,10 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ if (neg) \ val = !val; \ \ - spin_lock_irq(q->queue_lock); \ if (val) \ - queue_flag_set(QUEUE_FLAG_##flag, q); \ + blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ else \ - queue_flag_clear(QUEUE_FLAG_##flag, q); \ - spin_unlock_irq(q->queue_lock); \ + blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ return ret; \ } @@ -414,12 +412,10 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page, if (ret < 0) return ret; - spin_lock_irq(q->queue_lock); if (poll_on) - queue_flag_set(QUEUE_FLAG_POLL, q); + blk_queue_flag_set(QUEUE_FLAG_POLL, q); else - queue_flag_clear(QUEUE_FLAG_POLL, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_POLL, q); return ret; } @@ -487,12 +483,10 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page, if (set == -1) return -EINVAL; - spin_lock_irq(q->queue_lock); if (set) - queue_flag_set(QUEUE_FLAG_WC, q); + blk_queue_flag_set(QUEUE_FLAG_WC, q); else - queue_flag_clear(QUEUE_FLAG_WC, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_WC, q); return count; } @@ -946,9 +940,7 @@ void blk_unregister_queue(struct gendisk *disk) */ mutex_lock(&q->sysfs_lock); - spin_lock_irq(q->queue_lock); - queue_flag_clear(QUEUE_FLAG_REGISTERED, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); /* * Remove the sysfs attributes before unregistering the queue data diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a05e3676d24a..34a55250f08a 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -57,12 +57,10 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, char *p = (char *) buf; val = simple_strtoul(p, &p, 10); - spin_lock_irq(q->queue_lock); if (val) - queue_flag_set(QUEUE_FLAG_FAIL_IO, q); + blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); else - queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); } return count; -- cgit v1.2.1