core.c 93.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * NVM Express device driver
 * Copyright (c) 2011-2014, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
17
#include <linux/delay.h>
18
#include <linux/errno.h>
19
#include <linux/hdreg.h>
20
#include <linux/kernel.h>
21 22
#include <linux/module.h>
#include <linux/list_sort.h>
23 24
#include <linux/slab.h>
#include <linux/types.h>
25 26 27 28
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
#include <linux/t10-pi.h>
29
#include <linux/pm_qos.h>
30
#include <asm/unaligned.h>
31

32 33 34
#define CREATE_TRACE_POINTS
#include "trace.h"

35
#include "nvme.h"
Sagi Grimberg's avatar
Sagi Grimberg committed
36
#include "fabrics.h"
37

38 39
#define NVME_MINORS		(1U << MINORBITS)

40 41
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
42
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
43
EXPORT_SYMBOL_GPL(admin_timeout);
44

45 46
unsigned int nvme_io_timeout = 30;
module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
47
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
48
EXPORT_SYMBOL_GPL(nvme_io_timeout);
49

50
static unsigned char shutdown_timeout = 5;
51 52 53
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");

54 55
static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
Keith Busch's avatar
Keith Busch committed
56
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
57

58
static unsigned long default_ps_max_latency_us = 100000;
59 60 61 62
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
		 "max power saving latency for new devices; use PM QOS to change per device");

63 64 65 66
static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");

67 68 69 70
static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");

71 72 73 74 75 76 77 78 79 80 81
/*
 * nvme_wq - hosts nvme related works that are not reset or delete
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
 * nvme_wq will host works such are scan, aen handling, fw activation,
 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
 */
82 83 84
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);

85 86 87 88 89 90
struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL(nvme_reset_wq);

struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);

Christoph Hellwig's avatar
Christoph Hellwig committed
91 92 93
static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
94

95
static DEFINE_IDA(nvme_instance_ida);
96
static dev_t nvme_chr_devt;
97
static struct class *nvme_class;
Christoph Hellwig's avatar
Christoph Hellwig committed
98
static struct class *nvme_subsys_class;
99

100 101
static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
102
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);

static void nvme_set_queue_dying(struct nvme_ns *ns)
{
	/*
	 * Revalidating a dead namespace sets capacity to 0. This will end
	 * buffered writers dirtying pages that can't be synced.
	 */
	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
		return;
	revalidate_disk(ns->disk);
	blk_set_queue_dying(ns->queue);
	/* Forcibly unquiesce queues to avoid blocking dispatch */
	blk_mq_unquiesce_queue(ns->queue);
}
119

120 121 122 123 124 125 126 127 128
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
	/*
	 * Only new queue scan work when admin and IO queues are both alive
	 */
	if (ctrl->state == NVME_CTRL_LIVE)
		queue_work(nvme_wq, &ctrl->scan_work);
}

129 130 131 132
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return -EBUSY;
133
	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
134 135 136 137 138
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

Sagi Grimberg's avatar
Sagi Grimberg committed
139
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
140 141 142 143
{
	int ret;

	ret = nvme_reset_ctrl(ctrl);
144
	if (!ret) {
145
		flush_work(&ctrl->reset_work);
146 147
		if (ctrl->state != NVME_CTRL_LIVE &&
		    ctrl->state != NVME_CTRL_ADMIN_ONLY)
148 149 150
			ret = -ENETRESET;
	}

151 152
	return ret;
}
Sagi Grimberg's avatar
Sagi Grimberg committed
153
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
154

155 156 157 158 159
static void nvme_delete_ctrl_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, delete_work);

160 161 162
	dev_info(ctrl->device,
		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);

163
	flush_work(&ctrl->reset_work);
164 165
	nvme_stop_ctrl(ctrl);
	nvme_remove_namespaces(ctrl);
166
	ctrl->ops->delete_ctrl(ctrl);
167 168
	nvme_uninit_ctrl(ctrl);
	nvme_put_ctrl(ctrl);
169 170 171 172 173 174
}

int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
		return -EBUSY;
175
	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
		return -EBUSY;
	return 0;
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);

int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
{
	int ret = 0;

	/*
	 * Keep a reference until the work is flushed since ->delete_ctrl
	 * can free the controller.
	 */
	nvme_get_ctrl(ctrl);
	ret = nvme_delete_ctrl(ctrl);
	if (!ret)
		flush_work(&ctrl->delete_work);
	nvme_put_ctrl(ctrl);
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync);

198 199 200 201 202
static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
{
	return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
}

203
static blk_status_t nvme_error_status(struct request *req)
204 205 206
{
	switch (nvme_req(req)->status & 0x7ff) {
	case NVME_SC_SUCCESS:
207
		return BLK_STS_OK;
208
	case NVME_SC_CAP_EXCEEDED:
209
		return BLK_STS_NOSPC;
210 211 212
	case NVME_SC_LBA_RANGE:
		return BLK_STS_TARGET;
	case NVME_SC_BAD_ATTRIBUTES:
213
	case NVME_SC_ONCS_NOT_SUPPORTED:
214 215 216
	case NVME_SC_INVALID_OPCODE:
	case NVME_SC_INVALID_FIELD:
	case NVME_SC_INVALID_NS:
217
		return BLK_STS_NOTSUPP;
218 219 220
	case NVME_SC_WRITE_FAULT:
	case NVME_SC_READ_ERROR:
	case NVME_SC_UNWRITTEN_BLOCK:
221 222
	case NVME_SC_ACCESS_DENIED:
	case NVME_SC_READ_ONLY:
223
	case NVME_SC_COMPARE_FAILED:
224
		return BLK_STS_MEDIUM;
225 226 227 228 229 230 231
	case NVME_SC_GUARD_CHECK:
	case NVME_SC_APPTAG_CHECK:
	case NVME_SC_REFTAG_CHECK:
	case NVME_SC_INVALID_PI:
		return BLK_STS_PROTECTION;
	case NVME_SC_RESERVATION_CONFLICT:
		return BLK_STS_NEXUS;
232 233
	default:
		return BLK_STS_IOERR;
234 235 236
	}
}

237
static inline bool nvme_req_needs_retry(struct request *req)
238
{
239 240
	if (blk_noretry_request(req))
		return false;
241
	if (nvme_req(req)->status & NVME_SC_DNR)
242
		return false;
243
	if (nvme_req(req)->retries >= nvme_max_retries)
244 245
		return false;
	return true;
246 247 248 249
}

void nvme_complete_rq(struct request *req)
{
250 251
	blk_status_t status = nvme_error_status(req);

252 253
	trace_nvme_complete_rq(req);

254
	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
255 256
		if ((req->cmd_flags & REQ_NVME_MPATH) &&
		    blk_path_error(status)) {
257 258 259 260 261 262 263 264 265
			nvme_failover_req(req);
			return;
		}

		if (!blk_queue_dying(req->q)) {
			nvme_req(req)->retries++;
			blk_mq_requeue_request(req, true);
			return;
		}
266
	}
267
	blk_mq_end_request(req, status);
268 269 270
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);

271 272 273 274 275
void nvme_cancel_request(struct request *req, void *data, bool reserved)
{
	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
				"Cancelling I/O %d", req->tag);

276
	nvme_req(req)->status = NVME_SC_ABORT_REQ;
277
	blk_mq_complete_request(req);
278

279 280 281
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);

282 283 284
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state)
{
285
	enum nvme_ctrl_state old_state;
286
	unsigned long flags;
287 288
	bool changed = false;

289
	spin_lock_irqsave(&ctrl->lock, flags);
290 291

	old_state = ctrl->state;
292
	switch (new_state) {
293 294
	case NVME_CTRL_ADMIN_ONLY:
		switch (old_state) {
295
		case NVME_CTRL_CONNECTING:
296 297 298 299 300 301
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
302 303
	case NVME_CTRL_LIVE:
		switch (old_state) {
304
		case NVME_CTRL_NEW:
305
		case NVME_CTRL_RESETTING:
306
		case NVME_CTRL_CONNECTING:
307 308 309 310 311 312 313 314 315
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_RESETTING:
		switch (old_state) {
		case NVME_CTRL_NEW:
316
		case NVME_CTRL_LIVE:
317
		case NVME_CTRL_ADMIN_ONLY:
318 319 320 321 322 323
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
324
	case NVME_CTRL_CONNECTING:
325
		switch (old_state) {
326
		case NVME_CTRL_NEW:
327
		case NVME_CTRL_RESETTING:
328 329 330 331 332 333 334 335 336
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
	case NVME_CTRL_DELETING:
		switch (old_state) {
		case NVME_CTRL_LIVE:
337
		case NVME_CTRL_ADMIN_ONLY:
338
		case NVME_CTRL_RESETTING:
339
		case NVME_CTRL_CONNECTING:
340 341 342 343 344 345
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
346 347 348 349 350 351 352 353 354
	case NVME_CTRL_DEAD:
		switch (old_state) {
		case NVME_CTRL_DELETING:
			changed = true;
			/* FALLTHRU */
		default:
			break;
		}
		break;
355 356 357 358 359 360 361
	default:
		break;
	}

	if (changed)
		ctrl->state = new_state;

362
	spin_unlock_irqrestore(&ctrl->lock, flags);
363 364
	if (changed && ctrl->state == NVME_CTRL_LIVE)
		nvme_kick_requeue_lists(ctrl);
365 366 367 368
	return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);

369 370 371 372 373
static void nvme_free_ns_head(struct kref *ref)
{
	struct nvme_ns_head *head =
		container_of(ref, struct nvme_ns_head, ref);

374
	nvme_mpath_remove_disk(head);
375 376
	ida_simple_remove(&head->subsys->ns_ida, head->instance);
	list_del_init(&head->entry);
377
	cleanup_srcu_struct_quiesced(&head->srcu);
378
	nvme_put_subsystem(head->subsys);
379 380 381 382 383 384 385 386
	kfree(head);
}

static void nvme_put_ns_head(struct nvme_ns_head *head)
{
	kref_put(&head->ref, nvme_free_ns_head);
}

387 388 389 390
static void nvme_free_ns(struct kref *kref)
{
	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);

391 392
	if (ns->ndev)
		nvme_nvm_unregister(ns);
393 394

	put_disk(ns->disk);
395
	nvme_put_ns_head(ns->head);
396
	nvme_put_ctrl(ns->ctrl);
397 398 399
	kfree(ns);
}

400
static void nvme_put_ns(struct nvme_ns *ns)
401 402 403 404
{
	kref_put(&ns->kref, nvme_free_ns);
}

405 406 407 408 409 410 411 412 413
static inline void nvme_clear_nvme_request(struct request *req)
{
	if (!(req->rq_flags & RQF_DONTPREP)) {
		nvme_req(req)->retries = 0;
		nvme_req(req)->flags = 0;
		req->rq_flags |= RQF_DONTPREP;
	}
}

414
struct request *nvme_alloc_request(struct request_queue *q,
415
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
416
{
417
	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
418 419
	struct request *req;

420
	if (qid == NVME_QID_ANY) {
421
		req = blk_mq_alloc_request(q, op, flags);
422
	} else {
423
		req = blk_mq_alloc_request_hctx(q, op, flags,
424 425
				qid ? qid - 1 : 0);
	}
426
	if (IS_ERR(req))
427
		return req;
428 429

	req->cmd_flags |= REQ_FAILFAST_DRIVER;
430
	nvme_clear_nvme_request(req);
431
	nvme_req(req)->cmd = cmd;
432

433 434
	return req;
}
435
EXPORT_SYMBOL_GPL(nvme_alloc_request);
436

437 438 439 440 441 442 443
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));

	c.directive.opcode = nvme_admin_directive_send;
Arnav Dawn's avatar
Arnav Dawn committed
444
	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
	c.directive.dtype = NVME_DIR_IDENTIFY;
	c.directive.tdtype = NVME_DIR_STREAMS;
	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
}

static int nvme_disable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, false);
}

static int nvme_enable_streams(struct nvme_ctrl *ctrl)
{
	return nvme_toggle_streams(ctrl, true);
}

static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
				  struct streams_directive_params *s, u32 nsid)
{
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	memset(s, 0, sizeof(*s));

	c.directive.opcode = nvme_admin_directive_recv;
	c.directive.nsid = cpu_to_le32(nsid);
473
	c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
	c.directive.dtype = NVME_DIR_STREAMS;

	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
}

static int nvme_configure_directives(struct nvme_ctrl *ctrl)
{
	struct streams_directive_params s;
	int ret;

	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
		return 0;
	if (!streams)
		return 0;

	ret = nvme_enable_streams(ctrl);
	if (ret)
		return ret;

Arnav Dawn's avatar
Arnav Dawn committed
494
	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
	if (ret)
		return ret;

	ctrl->nssa = le16_to_cpu(s.nssa);
	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
		dev_info(ctrl->device, "too few streams (%u) available\n",
					ctrl->nssa);
		nvme_disable_streams(ctrl);
		return 0;
	}

	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
	return 0;
}

/*
 * Check if 'req' has a write hint associated with it. If it does, assign
 * a valid namespace stream to the write.
 */
static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
				     struct request *req, u16 *control,
				     u32 *dsmgmt)
{
	enum rw_hint streamid = req->write_hint;

	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
		streamid = 0;
	else {
		streamid--;
		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
			return;

		*control |= NVME_RW_DTYPE_STREAMS;
		*dsmgmt |= streamid << 16;
	}

	if (streamid < ARRAY_SIZE(req->q->write_hints))
		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}

Ming Lin's avatar
Ming Lin committed
536 537 538 539 540
static inline void nvme_setup_flush(struct nvme_ns *ns,
		struct nvme_command *cmnd)
{
	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->common.opcode = nvme_cmd_flush;
541
	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
Ming Lin's avatar
Ming Lin committed
542 543
}

544
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
Ming Lin's avatar
Ming Lin committed
545 546
		struct nvme_command *cmnd)
{
547
	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
Ming Lin's avatar
Ming Lin committed
548
	struct nvme_dsm_range *range;
549
	struct bio *bio;
Ming Lin's avatar
Ming Lin committed
550

551
	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
Ming Lin's avatar
Ming Lin committed
552
	if (!range)
553
		return BLK_STS_RESOURCE;
Ming Lin's avatar
Ming Lin committed
554

555 556 557 558
	__rq_for_each_bio(bio, req) {
		u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;

Keith Busch's avatar
Keith Busch committed
559 560 561 562 563
		if (n < segments) {
			range[n].cattr = cpu_to_le32(0);
			range[n].nlb = cpu_to_le32(nlb);
			range[n].slba = cpu_to_le64(slba);
		}
564 565 566 567 568
		n++;
	}

	if (WARN_ON_ONCE(n != segments)) {
		kfree(range);
569
		return BLK_STS_IOERR;
570
	}
Ming Lin's avatar
Ming Lin committed
571 572 573

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->dsm.opcode = nvme_cmd_dsm;
574
	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
575
	cmnd->dsm.nr = cpu_to_le32(segments - 1);
Ming Lin's avatar
Ming Lin committed
576 577
	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);

578 579
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
580
	req->special_vec.bv_len = sizeof(*range) * segments;
581
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
Ming Lin's avatar
Ming Lin committed
582

583
	return BLK_STS_OK;
Ming Lin's avatar
Ming Lin committed
584 585
}

586 587
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd)
Ming Lin's avatar
Ming Lin committed
588
{
589
	struct nvme_ctrl *ctrl = ns->ctrl;
Ming Lin's avatar
Ming Lin committed
590 591 592 593 594 595 596 597 598 599 600 601 602
	u16 control = 0;
	u32 dsmgmt = 0;

	if (req->cmd_flags & REQ_FUA)
		control |= NVME_RW_FUA;
	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
		control |= NVME_RW_LR;

	if (req->cmd_flags & REQ_RAHEAD)
		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;

	memset(cmnd, 0, sizeof(*cmnd));
	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
603
	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
Ming Lin's avatar
Ming Lin committed
604 605 606
	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);

607 608 609
	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);

Ming Lin's avatar
Ming Lin committed
610
	if (ns->ms) {
611 612 613 614 615 616 617 618 619 620
		/*
		 * If formated with metadata, the block layer always provides a
		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
		 * we enable the PRACT bit for protection information or set the
		 * namespace capacity to zero to prevent any I/O.
		 */
		if (!blk_integrity_rq(req)) {
			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
				return BLK_STS_NOTSUPP;
			control |= NVME_RW_PRINFO_PRACT;
621 622
		} else if (req_op(req) == REQ_OP_WRITE) {
			t10_pi_prepare(req, ns->pi_type);
623 624
		}

Ming Lin's avatar
Ming Lin committed
625 626 627 628 629 630 631 632
		switch (ns->pi_type) {
		case NVME_NS_DPS_PI_TYPE3:
			control |= NVME_RW_PRINFO_PRCHK_GUARD;
			break;
		case NVME_NS_DPS_PI_TYPE1:
		case NVME_NS_DPS_PI_TYPE2:
			control |= NVME_RW_PRINFO_PRCHK_GUARD |
					NVME_RW_PRINFO_PRCHK_REF;
633
			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
Ming Lin's avatar
Ming Lin committed
634 635 636 637 638 639
			break;
		}
	}

	cmnd->rw.control = cpu_to_le16(control);
	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
640
	return 0;
Ming Lin's avatar
Ming Lin committed
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
void nvme_cleanup_cmd(struct request *req)
{
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    nvme_req(req)->status == 0) {
		struct nvme_ns *ns = req->rq_disk->private_data;

		t10_pi_complete(req, ns->pi_type,
				blk_rq_bytes(req) >> ns->lba_shift);
	}
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
		kfree(page_address(req->special_vec.bv_page) +
		      req->special_vec.bv_offset);
	}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);

659
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
Ming Lin's avatar
Ming Lin committed
660 661
		struct nvme_command *cmd)
{
662
	blk_status_t ret = BLK_STS_OK;
Ming Lin's avatar
Ming Lin committed
663

664
	nvme_clear_nvme_request(req);
665

666 667 668
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
669
		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
670 671
		break;
	case REQ_OP_FLUSH:
Ming Lin's avatar
Ming Lin committed
672
		nvme_setup_flush(ns, cmd);
673
		break;
674 675
	case REQ_OP_WRITE_ZEROES:
		/* currently only aliased to deallocate for a few ctrls: */
676
	case REQ_OP_DISCARD:
Ming Lin's avatar
Ming Lin committed
677
		ret = nvme_setup_discard(ns, req, cmd);
678 679 680
		break;
	case REQ_OP_READ:
	case REQ_OP_WRITE:
681
		ret = nvme_setup_rw(ns, req, cmd);
682 683 684
		break;
	default:
		WARN_ON_ONCE(1);
685
		return BLK_STS_IOERR;
686
	}
Ming Lin's avatar
Ming Lin committed
687

688
	cmd->common.command_id = req->tag;
Keith Busch's avatar
Keith Busch committed
689
	trace_nvme_setup_cmd(req, cmd);
Ming Lin's avatar
Ming Lin committed
690 691 692 693
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);

694 695 696 697 698
/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
 * if the result is positive, it's an NVM Express status code
 */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
699
		union nvme_result *result, void *buffer, unsigned bufflen,
700 701
		unsigned timeout, int qid, int at_head,
		blk_mq_req_flags_t flags)
702 703 704 705
{
	struct request *req;
	int ret;

706
	req = nvme_alloc_request(q, cmd, flags, qid);
707 708 709 710 711
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;

712 713 714 715
	if (buffer && bufflen) {
		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
		if (ret)
			goto out;
716 717
	}

718
	blk_execute_rq(req->q, NULL, req, at_head);
719 720
	if (result)
		*result = nvme_req(req)->result;
721 722 723 724
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
725 726 727 728
 out:
	blk_mq_free_request(req);
	return ret;
}
729
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
730 731 732 733

int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buffer, unsigned bufflen)
{
734 735
	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
			NVME_QID_ANY, 0, 0);
736
}
737
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
738

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
		unsigned len, u32 seed, bool write)
{
	struct bio_integrity_payload *bip;
	int ret = -ENOMEM;
	void *buf;

	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		goto out;

	ret = -EFAULT;
	if (write && copy_from_user(buf, ubuf, len))
		goto out_free_meta;

	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
	if (IS_ERR(bip)) {
		ret = PTR_ERR(bip);
		goto out_free_meta;
	}

	bip->bip_iter.bi_size = len;
	bip->bip_iter.bi_sector = seed;
	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
			offset_in_page(buf));
	if (ret == len)
		return buf;
	ret = -ENOMEM;
out_free_meta:
	kfree(buf);
out:
	return ERR_PTR(ret);
}

773
static int nvme_submit_user_cmd(struct request_queue *q,
774 775 776
		struct nvme_command *cmd, void __user *ubuffer,
		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
		u32 meta_seed, u32 *result, unsigned timeout)
777
{
778
	bool write = nvme_is_write(cmd);
779 780
	struct nvme_ns *ns = q->queuedata;
	struct gendisk *disk = ns ? ns->disk : NULL;
781
	struct request *req;
782 783
	struct bio *bio = NULL;
	void *meta = NULL;
784 785
	int ret;

786
	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
787 788 789 790
	if (IS_ERR(req))
		return PTR_ERR(req);

	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
791
	nvme_req(req)->flags |= NVME_REQ_USERCMD;
792 793

	if (ubuffer && bufflen) {
794 795 796 797 798
		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
				GFP_KERNEL);
		if (ret)
			goto out;
		bio = req->bio;
799
		bio->bi_disk = disk;
800 801 802 803 804
		if (disk && meta_buffer && meta_len) {
			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
					meta_seed, write);
			if (IS_ERR(meta)) {
				ret = PTR_ERR(meta);
805 806
				goto out_unmap;
			}
807
			req->cmd_flags |= REQ_INTEGRITY;
808 809
		}
	}
810

811
	blk_execute_rq(req->q, disk, req, 0);
812 813 814 815
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else
		ret = nvme_req(req)->status;
816
	if (result)
817
		*result = le32_to_cpu(nvme_req(req)->result.u32);
818 819 820 821 822 823
	if (meta && !ret && !write) {
		if (copy_to_user(meta_buffer, meta, meta_len))
			ret = -EFAULT;
	}
	kfree(meta);
 out_unmap:
824
	if (bio)
825
		blk_rq_unmap_user(bio);
826 827 828 829 830
 out:
	blk_mq_free_request(req);
	return ret;
}

831
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
Sagi Grimberg's avatar
Sagi Grimberg committed
832 833 834 835 836
{
	struct nvme_ctrl *ctrl = rq->end_io_data;

	blk_mq_free_request(rq);

837
	if (status) {
Sagi Grimberg's avatar
Sagi Grimberg committed
838
		dev_err(ctrl->device,
839 840
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
Sagi Grimberg's avatar
Sagi Grimberg committed
841 842 843 844 845 846 847 848 849 850
		return;
	}

	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}

static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
	struct request *rq;

851
	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
Sagi Grimberg's avatar
Sagi Grimberg committed
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
			NVME_QID_ANY);
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	rq->timeout = ctrl->kato * HZ;
	rq->end_io_data = ctrl;

	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);

	return 0;
}

static void nvme_keep_alive_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_ctrl, ka_work);

	if (nvme_keep_alive(ctrl)) {
		/* allocation failure, reset the controller */
		dev_err(ctrl->device, "keep-alive failed\n");
872
		nvme_reset_ctrl(ctrl);
Sagi Grimberg's avatar
Sagi Grimberg committed
873 874 875 876
		return;
	}
}

877
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
Sagi Grimberg's avatar
Sagi Grimberg committed
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
{
	if (unlikely(ctrl->kato == 0))
		return;

	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
	if (unlikely(ctrl->kato == 0))
		return;

	cancel_delayed_work_sync(&ctrl->ka_work);
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);

Keith Busch's avatar
Keith Busch committed
894
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
895 896 897 898 899 900
{
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
901
	c.identify.cns = NVME_ID_CNS_CTRL;
902 903 904 905 906 907 908 909 910 911 912 913

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
		return -ENOMEM;

	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
			sizeof(struct nvme_id_ctrl));
	if (error)
		kfree(*id);
	return error;
}

914
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
915
		struct nvme_ns_ids *ids)
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
{
	struct nvme_command c = { };
	int status;
	void *data;
	int pos;
	int len;

	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;

	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
	if (!data)
		return -ENOMEM;

931
	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
932 933 934 935 936 937 938 939 940 941 942 943 944
				      NVME_IDENTIFY_DATA_SIZE);
	if (status)
		goto free_data;

	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
		struct nvme_ns_id_desc *cur = data + pos;

		if (cur->nidl == 0)
			break;

		switch (cur->nidt) {
		case NVME_NIDT_EUI64:
			if (cur->nidl != NVME_NIDT_EUI64_LEN) {
945
				dev_warn(ctrl->device,
946 947 948 949 950
					 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
					 cur->nidl);
				goto free_data;
			}
			len = NVME_NIDT_EUI64_LEN;
951
			memcpy(ids->eui64, data + pos + sizeof(*cur), len);
952 953 954
			break;
		case NVME_NIDT_NGUID:
			if (cur->nidl != NVME_NIDT_NGUID_LEN) {
955
				dev_warn(ctrl->device,
956 957 958 959 960
					 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
					 cur->nidl);
				goto free_data;
			}
			len = NVME_NIDT_NGUID_LEN;
961
			memcpy(ids->nguid, data + pos + sizeof(*cur), len);
962 963 964
			break;
		case NVME_NIDT_UUID:
			if (cur->nidl != NVME_NIDT_UUID_LEN) {
965
				dev_warn(ctrl->device,
966 967 968 969 970
					 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
					 cur->nidl);
				goto free_data;
			}
			len = NVME_NIDT_UUID_LEN;
971
			uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
972 973
			break;
		default:
974
			/* Skip unknown types */
975 976 977 978 979 980 981 982 983 984 985
			len = cur->nidl;
			break;
		}

		len += sizeof(*cur);
	}
free_data:
	kfree(data);
	return status;
}

986 987 988 989 990
static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
	struct nvme_command c = { };

	c.identify.opcode = nvme_admin_identify;
991
	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
992
	c.identify.nsid = cpu_to_le32(nsid);
993 994
	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
				    NVME_IDENTIFY_DATA_SIZE);
995 996
}

997 998
static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
		unsigned nsid)
999
{
1000
	struct nvme_id_ns *id;
1001 1002 1003 1004
	struct nvme_command c = { };
	int error;

	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1005 1006
	c.identify.opcode = nvme_admin_identify;
	c.identify.nsid = cpu_to_le32(nsid);
1007
	c.identify.cns = NVME_ID_CNS_NS;
1008

1009 1010 1011
	id = kmalloc(sizeof(*id), GFP_KERNEL);
	if (!id)
		return NULL;
1012

1013 1014 1015 1016 1017 1018 1019 1020
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
	if (error) {
		dev_warn(ctrl->device, "Identify namespace failed\n");
		kfree(id);
		return NULL;
	}

	return id;
1021 1022
}

Keith Busch's avatar
Keith Busch committed
1023
static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
1024
		      void *buffer, size_t buflen, u32 *result)
1025 1026
{
	struct nvme_command c;
1027
	union nvme_result res;
1028
	int ret;
1029 1030 1031 1032 1033 1034

	memset(&c, 0, sizeof(c));
	c.features.opcode = nvme_admin_set_features;
	c.features.fid = cpu_to_le32(fid);
	c.features.dword11 = cpu_to_le32(dword11);

1035
	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1036
			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1037
	if (ret >= 0 && result)
1038
		*result = le32_to_cpu(res.u32);
1039
	return ret;
1040 1041
}

1042 1043 1044 1045 1046 1047
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
	u32 q_count = (*count - 1) | ((*count - 1) << 16);
	u32 result;
	int status, nr_io_queues;

1048
	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1049
			&result);
1050
	if (status < 0)
1051 1052
		return status;

1053 1054 1055 1056 1057 1058
	/*
	 * Degraded controllers might return an error when setting the queue
	 * count.  We still want to be able to bring them online and offer
	 * access to the admin queue, as that might be only way to fix them up.
	 */
	if (status > 0) {
1059
		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1060 1061 1062 1063 1064 1065
		*count = 0;
	} else {
		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
		*count = min(*count, nr_io_queues);
	}

1066 1067
	return 0;
}
1068
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1069

1070
#define NVME_AEN_SUPPORTED \
Christoph Hellwig's avatar
Christoph Hellwig committed
1071
	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
1072 1073 1074

static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
1075
	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1076 1077
	int status;

1078 1079 1080 1081 1082
	if (!supported_aens)
		return;

	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
			NULL, 0, &result);
1083 1084
	if (status)
		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1085
			 supported_aens);
1086