第一步blk_mq_flush_busy_ctxs会先把ctx里面的rq全部摘下来放到临时变量rq_list, 核心函数: static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) { ... spin_lock(&ctx->lock); list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); sbitmap_clear_bi...
if (rq->mq_ctx != this_ctx) { if (this_ctx) { trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); @@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_...
1.如果是fua/flush请求,则将request插入到flush队列,并调用blk_mq_run_hw_queue 启动请求派发 2.如果当前线程正在做IO plug且块设备是硬件单队列的(nr_hw_queues=1),则将request插入到当前线程的plug list 3.如果配置了调度器,则调用blk_mq_sched_insert_request将请求插入调度器队列(如果没有实现insert_request...
blk_start_plug(&plug); if(!bdev_test_flag(bio->bi_bdev,BD_HAS_SUBMIT_BIO)){ blk_mq_submit_bio(bio); }elseif(likely(bio_queue_enter(bio)==0)){ structgendisk*disk=bio->bi_bdev->bd_disk; disk->fops->submit_bio(bio); blk_queue_exit...
@@ -346,7 +346,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, smp_wmb(); req_ref_set(flush_rq, 1); blk_mq_add_to_requeue_list(flush_rq, BLK_MQ_INSERT_AT_HEAD); blk_mq_add_to_requeue_list(flush_rq, 0); blk_mq_kick_requeue_list(...
queue_head:通过list_head可以用来构建一个request_queue类型的双向链表; make_request_fn:设置bio提交时的回调函数,一般设置为blk_mq_make_request; last_merge:指向队列中首次可能合并的请求描述符; elevator:指向elevator对象的指针(电梯算法),决定了I/O调度层使用的I/O调度算法; ...
bitmap_parselist bitmap_print_to_pagebuf bitmap_to_arr32 bitmap_zalloc blk_alloc_queue blk_cleanup_queue blk_execute_rq blk_execute_rq_nowait blk_finish_plug blk_freeze_queue_start blk_get_queue blk_get_request blk_lookup_devt blk_mq_alloc_request blk_mq_alloc_requ...
> +++ b/block/blk-mq.c > @@ -1728,9 +1728,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, > bool from_schedule) > if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) { > if (this_hctx) {
示例1: list_move_tail ▲点赞 9▼ struct request *blk_do_flush(struct request_queue *q, struct request *rq){unsignedintfflags = q->flush_flags;/* may change, cache it */boolhas_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;booldo_preflush = has_flush && (rq->cmd_...
> completed before calling the above blk_mq_run_hw_queue. Then queue can > be freed during the above blk_mq_run_hw_queue(). > > Fixes the issue by grab .q_usage_counter before calling > blk_mq_sched_insert_requests() in blk_mq_flush_plug_list(). This way is ...