一是per-core的software queues, 数据类型blk_mq_ctx, 一般叫做ctx 一是跟硬件队列相关的dispatch queues, 数据类型blk_mq_hw_ctx, 一般叫做hctx none iosched blk-mq最核心的思想都体现在了none iosched上, 虽然none连自己的struct elevator_type都没有. 从逻辑上说, 有个独立的none iosched类型看似不错的选择...
dma_drain_needed_fn *dma_drain_needed;conststructblk_mq_ops*mq_ops;/* sw queues */structblk_mq_ctx__percpu*queue_ctx;unsignedintnr_queues;unsignedintqueue_depth;/* hw dispatch queues */structblk_mq_hw_ctx**queue_hw_ctx;unsignedintnr_hw_queues;structbacking_dev_info*backing_dev_info;/...
- struct blk_mq_hw_ctx *hctx; - unsigned int i; + struct blk_mq_hw_ctx *hctx, *next; + int i; cancel_delayed_work_sync(&q->requeue_work); - /* hctx kobj stays in hctx */ - queue_for_each_hw_ctx(q, hctx, i) { - if (!hctx) - continue; + queue_for_each_hw_c...
__ctx->queue = q;/* If the cpu isn't present, the cpu is mapped to first hctx */ if (!cpu_present(i)) continue;hctx = blk_mq_map_queue(q, i);/* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device ...
System crash in blk_mq_free_request() following NVMe controller reset due to NULL mq_hctx. Example 1: Raw nvme nvme0: I/O 71 QID 1 timeout, aborting nvme nvme0: Abort status: 0x0 nvme nvme0: I/O 71 QID 1 timeout, reset controller nvme nvme0: 2/0/0 default/read/poll queues...
@@ -336,6 +336,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, struct blk_mq_hw_ctx *hctx; int i;/* * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and * queue_hw_ctx after freeze the queue. So we could use q_usage_cou...
调用blk_mq_init_allocated_queue初始化分配的请求队列(request_queue),blk-mq的request_queue中包含两层队列,即percpu的软件队列(ctx)和与块设备硬件队列一一对应的硬件派发队列(hctx)。这个初始化过程主要包含下面几步: 1.设置队列的mq_ops(q->mq_ops)为set->ops (例如scsi对应的实现是scsi_mq_ops) ...
根据virtio_mq_ops 的定义,我们现在要调用 virtio_queue_rq。 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct virtio_blk *vblk = hctx->queue->queuedata; struct request *req = bd->rq; ...
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct virtio_blk *vblk = hctx->queue->queuedata; ...
> @@ -1728,9 +1728,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, > bool from_schedule) > if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) { > if (this_hctx) { > trace_block_unplug(this_q, depth, ...