@@ -814,12 +814,21 @@ void blk_queue_exit(struct request_queue *q)
814
814
percpu_ref_put (& q -> q_usage_counter );
815
815
}
816
816
817
+ static void blk_queue_usage_counter_release_swork (struct swork_event * sev )
818
+ {
819
+ struct request_queue * q =
820
+ container_of (sev , struct request_queue , mq_pcpu_wake );
821
+
822
+ wake_up_all (& q -> mq_freeze_wq );
823
+ }
824
+
817
825
static void blk_queue_usage_counter_release (struct percpu_ref * ref )
818
826
{
819
827
struct request_queue * q =
820
828
container_of (ref , struct request_queue , q_usage_counter );
821
829
822
- wake_up_all (& q -> mq_freeze_wq );
830
+ if (wq_has_sleeper (& q -> mq_freeze_wq ))
831
+ swork_queue (& q -> mq_pcpu_wake );
823
832
}
824
833
825
834
static void blk_rq_timed_out_timer (unsigned long data )
@@ -896,6 +905,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
896
905
__set_bit (QUEUE_FLAG_BYPASS , & q -> queue_flags );
897
906
898
907
init_waitqueue_head (& q -> mq_freeze_wq );
908
+ INIT_SWORK (& q -> mq_pcpu_wake , blk_queue_usage_counter_release_swork );
899
909
900
910
/*
901
911
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -3623,6 +3633,8 @@ int __init blk_dev_init(void)
3623
3633
if (!kblockd_workqueue )
3624
3634
panic ("Failed to create kblockd\n" );
3625
3635
3636
+ BUG_ON (swork_get ());
3637
+
3626
3638
request_cachep = kmem_cache_create ("blkdev_requests" ,
3627
3639
sizeof (struct request ), 0 , SLAB_PANIC , NULL );
3628
3640
0 commit comments