blk-mq.h 6.84 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H

5
#include "blk-stat.h"
6
#include "blk-mq-tag.h"
7

8 9
struct blk_mq_tag_set;

10 11 12 13 14
struct blk_mq_ctxs {
	struct kobject kobj;
	struct blk_mq_ctx __percpu	*queue_ctx;
};

15 16 17
/**
 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 */
18 19 20
struct blk_mq_ctx {
	struct {
		spinlock_t		lock;
21 22
		struct list_head	rq_lists[HCTX_MAX_TYPES];
	} ____cacheline_aligned_in_smp;
23 24

	unsigned int		cpu;
25
	unsigned short		index_hw[HCTX_MAX_TYPES];
26 27 28 29 30 31 32 33 34

	/* incremented at dispatch time */
	unsigned long		rq_dispatched[2];
	unsigned long		rq_merged;

	/* incremented at completion time */
	unsigned long		____cacheline_aligned_in_smp rq_completed[2];

	struct request_queue	*queue;
35
	struct blk_mq_ctxs      *ctxs;
36
	struct kobject		kobj;
37
} ____cacheline_aligned_in_smp;
38

39
void blk_mq_freeze_queue(struct request_queue *q);
40
void blk_mq_free_queue(struct request_queue *q);
41
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42
void blk_mq_wake_waiters(struct request_queue *q);
43
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
44
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
45
bool blk_mq_get_driver_tag(struct request *rq);
46 47
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start);
48 49 50 51

/*
 * Internal helpers for allocating/freeing the request map
 */
52 53 54 55 56 57 58 59 60
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags);
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					unsigned int hctx_idx,
					unsigned int nr_tags,
					unsigned int reserved_tags);
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx, unsigned int depth);
61 62 63 64 65 66

/*
 * Internal helpers for request insertion into sw queues
 */
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
				bool at_head);
67
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
68 69
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
				struct list_head *list);
70

71 72 73 74
blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
						struct request *rq,
						blk_qc_t *cookie,
						bool bypass, bool last);
75 76
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
				    struct list_head *list);
77

78 79 80
/*
 * CPU -> queue mappings
 */
81
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
82

83 84 85
/*
 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 * @q: request queue
86
 * @type: the hctx type index
87 88 89
 * @cpu: CPU
 */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
90
							  enum hctx_type type,
91
							  unsigned int cpu)
92
{
93
	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
94 95
}

96 97 98 99 100 101 102 103 104
/*
 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
 * @q: request queue
 * @flags: request command flags
 * @cpu: CPU
 */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
						     unsigned int flags,
						     unsigned int cpu)
105
{
106 107
	enum hctx_type type = HCTX_TYPE_DEFAULT;

108 109 110 111
	if ((flags & REQ_HIPRI) &&
	    q->tag_set->nr_maps > HCTX_TYPE_POLL && 
	    q->tag_set->map[HCTX_TYPE_POLL].nr_queues &&
	    test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
112
		type = HCTX_TYPE_POLL;
113

114 115 116
	else if (((flags & REQ_OP_MASK) == REQ_OP_READ) &&
	         q->tag_set->nr_maps > HCTX_TYPE_READ &&
		 q->tag_set->map[HCTX_TYPE_READ].nr_queues)
117
		type = HCTX_TYPE_READ;
118
	
119
	return blk_mq_map_queue_type(q, type, cpu);
120 121
}

122 123 124
/*
 * sysfs helpers
 */
125
extern void blk_mq_sysfs_init(struct request_queue *q);
126
extern void blk_mq_sysfs_deinit(struct request_queue *q);
127
extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
128 129
extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
130
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
131

132 133
void blk_mq_release(struct request_queue *q);

134 135 136 137
/**
 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
 * @rq: target request.
 */
138
static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
139
{
140
	return READ_ONCE(rq->state);
141 142
}

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
					   unsigned int cpu)
{
	return per_cpu_ptr(q->queue_ctx, cpu);
}

/*
 * This assumes per-cpu software queueing queues. They could be per-node
 * as well, for instance. For now this is hardcoded as-is. Note that we don't
 * care about preemption, since we know the ctx's are persistent. This does
 * mean that we can't rely on ctx always matching the currently running CPU.
 */
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
	return __blk_mq_get_ctx(q, get_cpu());
}

static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
	put_cpu();
}

165 166 167
struct blk_mq_alloc_data {
	/* input parameter */
	struct request_queue *q;
168
	blk_mq_req_flags_t flags;
169
	unsigned int shallow_depth;
170
	unsigned int cmd_flags;
171 172 173 174 175 176

	/* input & output parameter */
	struct blk_mq_ctx *ctx;
	struct blk_mq_hw_ctx *hctx;
};

177 178
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{
179 180 181
	if (data->flags & BLK_MQ_REQ_INTERNAL)
		return data->hctx->sched_tags;

182 183 184
	return data->hctx->tags;
}

185 186 187 188 189
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{
	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
}

190 191 192 193 194
static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
{
	return hctx->nr_ctx && hctx->tags;
}

195
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
196 197
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
			 unsigned int inflight[2]);
198

199 200 201 202 203 204 205 206
static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
{
	struct request_queue *q = hctx->queue;

	if (q->mq_ops->put_budget)
		q->mq_ops->put_budget(hctx);
}

207
static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
208 209 210 211 212
{
	struct request_queue *q = hctx->queue;

	if (q->mq_ops->get_budget)
		return q->mq_ops->get_budget(hctx);
213
	return true;
214 215
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
					   struct request *rq)
{
	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
	rq->tag = -1;

	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
		atomic_dec(&hctx->nr_active);
	}
}

static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
				       struct request *rq)
{
	if (rq->tag == -1 || rq->internal_tag == -1)
		return;

	__blk_mq_put_driver_tag(hctx, rq);
}

static inline void blk_mq_put_driver_tag(struct request *rq)
{
	if (rq->tag == -1 || rq->internal_tag == -1)
		return;

242
	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
243 244
}

245
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
246 247 248 249
{
	int cpu;

	for_each_possible_cpu(cpu)
250
		qmap->mq_map[cpu] = 0;
251 252
}

253
#endif