nvme.h 8.09 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (c) 2011-2014, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#ifndef _NVME_H
#define _NVME_H

#include <linux/nvme.h>
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>

22 23 24 25 26 27 28 29 30 31
enum {
	/*
	 * Driver internal status code for commands that were cancelled due
	 * to timeouts or controller shutdown.  The value is negative so
	 * that it a) doesn't overlap with the unsigned hardware error codes,
	 * and b) can easily be tested for.
	 */
	NVME_SC_CANCELLED		= -EINTR,
};

32 33 34
extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)

35 36 37
extern unsigned char admin_timeout;
#define ADMIN_TIMEOUT	(admin_timeout * HZ)

38 39 40
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT	(shutdown_timeout * HZ)

Matias Bjørling's avatar
Matias Bjørling committed
41 42 43 44 45
enum {
	NVME_NS_LBA		= 0,
	NVME_NS_LIGHTNVM	= 1,
};

46
/*
47 48
 * List of workarounds for devices that required behavior not specified in
 * the standard.
49
 */
50 51 52 53 54 55
enum nvme_quirks {
	/*
	 * Prefers I/O aligned to a stripe size specified in a vendor
	 * specific Identify field.
	 */
	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
56 57 58 59 60 61

	/*
	 * The controller doesn't handle Identify value others than 0 or 1
	 * correctly.
	 */
	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
62 63 64 65 66 67

	/*
	 * The controller deterministically returns O's on reads to discarded
	 * logical blocks.
	 */
	NVME_QUIRK_DISCARD_ZEROES		= (1 << 2),
68 69
};

70 71 72 73 74
enum nvme_ctrl_state {
	NVME_CTRL_NEW,
	NVME_CTRL_LIVE,
	NVME_CTRL_RESETTING,
	NVME_CTRL_DELETING,
75
	NVME_CTRL_DEAD,
76 77
};

78
struct nvme_ctrl {
79 80
	enum nvme_ctrl_state state;
	spinlock_t lock;
81
	const struct nvme_ctrl_ops *ops;
82
	struct request_queue *admin_q;
83
	struct request_queue *connect_q;
84
	struct device *dev;
85
	struct kref kref;
86
	int instance;
87
	struct blk_mq_tag_set *tagset;
88
	struct list_head namespaces;
89
	struct mutex namespaces_mutex;
90
	struct device *device;	/* char device */
91
	struct list_head node;
92
	struct ida ns_ida;
93

94 95 96 97
	char name[12];
	char serial[20];
	char model[40];
	char firmware_rev[8];
Christoph Hellwig's avatar
Christoph Hellwig committed
98
	u16 cntlid;
99 100 101 102

	u32 ctrl_config;

	u32 page_size;
103 104 105
	u32 max_hw_sectors;
	u32 stripe_size;
	u16 oncs;
106
	u16 vid;
107
	atomic_t abort_limit;
108 109
	u8 event_limit;
	u8 vwc;
110
	u32 vs;
111
	u32 sgls;
112
	bool subsystem;
113
	unsigned long quirks;
114
	struct work_struct scan_work;
115
	struct work_struct async_event_work;
116 117 118 119 120 121 122 123

	/* Fabrics only */
	u16 sqsize;
	u32 ioccsz;
	u32 iorcsz;
	u16 icdoff;
	u16 maxcmd;
	struct nvmf_ctrl_options *opts;
124 125 126 127 128 129 130 131
};

/*
 * An NVM Express namespace is equivalent to a SCSI LUN
 */
struct nvme_ns {
	struct list_head list;

132
	struct nvme_ctrl *ctrl;
133 134 135
	struct request_queue *queue;
	struct gendisk *disk;
	struct kref kref;
136
	int instance;
137

138 139 140
	u8 eui[8];
	u8 uuid[16];

141 142 143 144 145
	unsigned ns_id;
	int lba_shift;
	u16 ms;
	bool ext;
	u8 pi_type;
Matias Bjørling's avatar
Matias Bjørling committed
146
	int type;
147 148 149
	unsigned long flags;

#define NVME_NS_REMOVING 0
150
#define NVME_NS_DEAD     1
151

152 153 154 155
	u64 mode_select_num_blocks;
	u32 mode_select_block_len;
};

156
struct nvme_ctrl_ops {
Ming Lin's avatar
Ming Lin committed
157
	const char *name;
158
	struct module *module;
159
	bool is_fabrics;
160
	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
161
	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
162
	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
163
	int (*reset_ctrl)(struct nvme_ctrl *ctrl);
164
	void (*free_ctrl)(struct nvme_ctrl *ctrl);
165
	void (*post_scan)(struct nvme_ctrl *ctrl);
166
	void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
Ming Lin's avatar
Ming Lin committed
167 168 169
	int (*delete_ctrl)(struct nvme_ctrl *ctrl);
	const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
170 171
};

172 173 174 175 176 177 178 179 180
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
{
	u32 val = 0;

	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
		return false;
	return val & NVME_CSTS_RDY;
}

181 182 183 184 185 186 187
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
	if (!ctrl->subsystem)
		return -ENOTTY;
	return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}

188 189 190 191 192
static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
{
	return (sector >> (ns->lba_shift - 9));
}

Ming Lin's avatar
Ming Lin committed
193 194
static inline unsigned nvme_map_len(struct request *rq)
{
Mike Christie's avatar
Mike Christie committed
195
	if (req_op(rq) == REQ_OP_DISCARD)
Ming Lin's avatar
Ming Lin committed
196 197 198 199 200
		return sizeof(struct nvme_dsm_range);
	else
		return blk_rq_bytes(rq);
}

Ming Lin's avatar
Ming Lin committed
201 202
static inline void nvme_cleanup_cmd(struct request *req)
{
Mike Christie's avatar
Mike Christie committed
203
	if (req_op(req) == REQ_OP_DISCARD)
Ming Lin's avatar
Ming Lin committed
204 205 206
		kfree(req->completion_data);
}

207 208 209 210 211 212 213 214 215 216 217 218
static inline int nvme_error_status(u16 status)
{
	switch (status & 0x7ff) {
	case NVME_SC_SUCCESS:
		return 0;
	case NVME_SC_CAP_EXCEEDED:
		return -ENOSPC;
	default:
		return -EIO;
	}
}

219 220 221 222 223 224
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
{
	return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
		(jiffies - req->start_time) < req->timeout;
}

225
void nvme_cancel_request(struct request *req, void *data, bool reserved);
226 227
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state);
228 229 230
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
231 232
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks);
233
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
234
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
235
int nvme_init_identify(struct nvme_ctrl *ctrl);
236

237
void nvme_queue_scan(struct nvme_ctrl *ctrl);
238
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
239

240 241 242 243 244
#define NVME_NR_AERS	1
void nvme_complete_async_event(struct nvme_ctrl *ctrl,
		struct nvme_completion *cqe);
void nvme_queue_async_events(struct nvme_ctrl *ctrl);

245 246
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
247
void nvme_kill_queues(struct nvme_ctrl *ctrl);
248

249
#define NVME_QID_ANY -1
250
struct request *nvme_alloc_request(struct request_queue *q,
251
		struct nvme_command *cmd, unsigned int flags, int qid);
252
void nvme_requeue_req(struct request *req);
Ming Lin's avatar
Ming Lin committed
253 254
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
		struct nvme_command *cmd);
255 256 257
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
258
		struct nvme_completion *cqe, void *buffer, unsigned bufflen,
259
		unsigned timeout, int qid, int at_head, int flags);
260 261 262
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
		void __user *ubuffer, unsigned bufflen, u32 *result,
		unsigned timeout);
263 264 265
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
		void __user *ubuffer, unsigned bufflen,
		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
266
		u32 *result, unsigned timeout);
267 268
int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
269
		struct nvme_id_ns **id);
270 271
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
272
			dma_addr_t dma_addr, u32 *result);
273
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
274
			dma_addr_t dma_addr, u32 *result);
275
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
276 277 278 279 280 281 282

struct sg_io_hdr;

int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
int nvme_sg_get_version_num(int __user *ip);

283
#ifdef CONFIG_NVM
Matias Bjørling's avatar
Matias Bjørling committed
284 285 286
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
int nvme_nvm_register(struct request_queue *q, char *disk_name);
void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
287 288 289 290 291 292 293 294 295 296 297 298 299
#else
static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
{
	return 0;
}

static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};

static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	return 0;
}
#endif /* CONFIG_NVM */
Matias Bjørling's avatar
Matias Bjørling committed
300

301 302 303
int __init nvme_core_init(void);
void nvme_core_exit(void);

304
#endif /* _NVME_H */