virtio_pci_common.c 15.2 KB
Newer Older
Anthony Liguori's avatar
Anthony Liguori committed
1
/*
2
 * Virtio PCI driver - common functionality for all device versions
Anthony Liguori's avatar
Anthony Liguori committed
3 4 5 6 7
 *
 * This module allows virtio devices to be used over a virtual PCI device.
 * This can be used with QEMU based VMMs like KVM or Xen.
 *
 * Copyright IBM Corp. 2007
8
 * Copyright Red Hat, Inc. 2014
Anthony Liguori's avatar
Anthony Liguori committed
9 10 11
 *
 * Authors:
 *  Anthony Liguori  <aliguori@us.ibm.com>
12 13
 *  Rusty Russell <rusty@rustcorp.com.au>
 *  Michael S. Tsirkin <mst@redhat.com>
Anthony Liguori's avatar
Anthony Liguori committed
14 15 16 17 18 19
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */

20
#include "virtio_pci_common.h"
Anthony Liguori's avatar
Anthony Liguori committed
21

22 23 24 25 26 27 28 29
static bool force_legacy = false;

#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
module_param(force_legacy, bool, 0444);
MODULE_PARM_DESC(force_legacy,
		 "Force legacy mode for transitional virtio 1 devices");
#endif

30
/* wait for pending irq handlers */
31
void vp_synchronize_vectors(struct virtio_device *vdev)
32 33 34 35
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	int i;

36 37 38 39
	if (vp_dev->intx_enabled)
		synchronize_irq(vp_dev->pci_dev->irq);

	for (i = 0; i < vp_dev->msix_vectors; ++i)
40
		synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
41 42
}

Anthony Liguori's avatar
Anthony Liguori committed
43
/* the notify function used when creating a virt queue */
44
bool vp_notify(struct virtqueue *vq)
Anthony Liguori's avatar
Anthony Liguori committed
45 46 47
{
	/* we write the queue's selector into the notification register to
	 * signal the other end */
48
	iowrite16(vq->index, (void __iomem *)vq->priv);
49
	return true;
Anthony Liguori's avatar
Anthony Liguori committed
50 51
}

52 53 54 55 56
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
	struct virtio_pci_device *vp_dev = opaque;

57
	virtio_config_changed(&vp_dev->vdev);
58 59 60 61 62 63 64
	return IRQ_HANDLED;
}

/* Notify all virtqueues on an interrupt. */
static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
{
	struct virtio_pci_device *vp_dev = opaque;
65
	struct virtio_pci_vq_info *info;
66
	irqreturn_t ret = IRQ_NONE;
67
	unsigned long flags;
68

69 70 71
	spin_lock_irqsave(&vp_dev->lock, flags);
	list_for_each_entry(info, &vp_dev->virtqueues, node) {
		if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
72 73
			ret = IRQ_HANDLED;
	}
74
	spin_unlock_irqrestore(&vp_dev->lock, flags);
75 76 77 78

	return ret;
}

Anthony Liguori's avatar
Anthony Liguori committed
79 80 81 82 83 84 85 86 87 88 89 90 91
/* A small wrapper to also acknowledge the interrupt when it's handled.
 * I really need an EIO hook for the vring so I can ack the interrupt once we
 * know that we'll be handling the IRQ but before we invoke the callback since
 * the callback may notify the host which results in the host attempting to
 * raise an interrupt that we would then mask once we acknowledged the
 * interrupt. */
static irqreturn_t vp_interrupt(int irq, void *opaque)
{
	struct virtio_pci_device *vp_dev = opaque;
	u8 isr;

	/* reading the ISR has the effect of also clearing it so it's very
	 * important to save off the value. */
92
	isr = ioread8(vp_dev->isr);
Anthony Liguori's avatar
Anthony Liguori committed
93 94 95 96 97 98

	/* It's definitely not us if the ISR was not high */
	if (!isr)
		return IRQ_NONE;

	/* Configuration change?  Tell driver if it wants to know. */
99 100
	if (isr & VIRTIO_PCI_ISR_CONFIG)
		vp_config_changed(irq, opaque);
Anthony Liguori's avatar
Anthony Liguori committed
101

102
	return vp_vring_interrupt(irq, opaque);
Anthony Liguori's avatar
Anthony Liguori committed
103 104
}

105 106 107 108 109
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
				   bool per_vq_vectors, struct irq_affinity *desc)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	const char *name = dev_name(&vp_dev->vdev.dev);
110
	unsigned flags = PCI_IRQ_MSIX;
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	unsigned i, v;
	int err = -ENOMEM;

	vp_dev->msix_vectors = nvectors;

	vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
				     GFP_KERNEL);
	if (!vp_dev->msix_names)
		goto error;
	vp_dev->msix_affinity_masks
		= kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
			  GFP_KERNEL);
	if (!vp_dev->msix_affinity_masks)
		goto error;
	for (i = 0; i < nvectors; ++i)
		if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
					GFP_KERNEL))
			goto error;

130 131 132 133 134
	if (desc) {
		flags |= PCI_IRQ_AFFINITY;
		desc->pre_vectors++; /* virtio config vector */
	}

135
	err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
136
					     nvectors, flags, desc);
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	if (err < 0)
		goto error;
	vp_dev->msix_enabled = 1;

	/* Set the vector used for configuration */
	v = vp_dev->msix_used_vectors;
	snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
		 "%s-config", name);
	err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
			  vp_config_changed, 0, vp_dev->msix_names[v],
			  vp_dev);
	if (err)
		goto error;
	++vp_dev->msix_used_vectors;

	v = vp_dev->config_vector(vp_dev, v);
	/* Verify we had enough resources to assign the vector */
	if (v == VIRTIO_MSI_NO_VECTOR) {
		err = -EBUSY;
		goto error;
	}

	if (!per_vq_vectors) {
		/* Shared vector for all VQs */
		v = vp_dev->msix_used_vectors;
		snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
			 "%s-virtqueues", name);
		err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
				  vp_vring_interrupt, 0, vp_dev->msix_names[v],
				  vp_dev);
		if (err)
			goto error;
		++vp_dev->msix_used_vectors;
	}
	return 0;
error:
	return err;
}

176 177 178
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name,
179
				     bool ctx,
180 181 182 183 184 185 186 187 188 189 190
				     u16 msix_vec)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
	struct virtqueue *vq;
	unsigned long flags;

	/* fill out our structure that represents an active queue */
	if (!info)
		return ERR_PTR(-ENOMEM);

191
	vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
			      msix_vec);
	if (IS_ERR(vq))
		goto out_info;

	info->vq = vq;
	if (callback) {
		spin_lock_irqsave(&vp_dev->lock, flags);
		list_add(&info->node, &vp_dev->virtqueues);
		spin_unlock_irqrestore(&vp_dev->lock, flags);
	} else {
		INIT_LIST_HEAD(&info->node);
	}

	vp_dev->vqs[index] = info;
	return vq;

out_info:
	kfree(info);
	return vq;
}

static void vp_del_vq(struct virtqueue *vq)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
	unsigned long flags;

	spin_lock_irqsave(&vp_dev->lock, flags);
	list_del(&info->node);
	spin_unlock_irqrestore(&vp_dev->lock, flags);

	vp_dev->del_vq(info);
	kfree(info);
}

227 228
/* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev)
229
{
230
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
231
	struct virtqueue *vq, *n;
232
	int i;
233

234
	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
235 236
		if (vp_dev->per_vq_vectors) {
			int v = vp_dev->vqs[vq->index]->msix_vector;
237

238 239 240 241 242 243
			if (v != VIRTIO_MSI_NO_VECTOR) {
				int irq = pci_irq_vector(vp_dev->pci_dev, v);

				irq_set_affinity_hint(irq, NULL);
				free_irq(irq, vq);
			}
244
		}
245
		vp_del_vq(vq);
246
	}
247
	vp_dev->per_vq_vectors = false;
248

249 250 251 252
	if (vp_dev->intx_enabled) {
		free_irq(vp_dev->pci_dev->irq, vp_dev);
		vp_dev->intx_enabled = 0;
	}
253

254 255
	for (i = 0; i < vp_dev->msix_used_vectors; ++i)
		free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
256

257 258
	for (i = 0; i < vp_dev->msix_vectors; i++)
		if (vp_dev->msix_affinity_masks[i])
259 260
			free_cpumask_var(vp_dev->msix_affinity_masks[i]);

261
	if (vp_dev->msix_enabled) {
262 263 264
		/* Disable the vector used for configuration */
		vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);

265 266
		pci_free_irq_vectors(vp_dev->pci_dev);
		vp_dev->msix_enabled = 0;
267 268
	}

269 270 271 272 273 274
	vp_dev->msix_vectors = 0;
	vp_dev->msix_used_vectors = 0;
	kfree(vp_dev->msix_names);
	vp_dev->msix_names = NULL;
	kfree(vp_dev->msix_affinity_masks);
	vp_dev->msix_affinity_masks = NULL;
275 276
	kfree(vp_dev->vqs);
	vp_dev->vqs = NULL;
277 278
}

279
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
280
		struct virtqueue *vqs[], vq_callback_t *callbacks[],
281
		const char * const names[], bool per_vq_vectors,
282
		const bool *ctx,
283
		struct irq_affinity *desc)
284
{
285
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
286
	u16 msix_vec;
287
	int i, err, nvectors, allocated_vectors;
288

289 290 291 292
	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
	if (!vp_dev->vqs)
		return -ENOMEM;

293
	if (per_vq_vectors) {
294 295 296 297 298
		/* Best option: one for change interrupt, one per vq. */
		nvectors = 1;
		for (i = 0; i < nvqs; ++i)
			if (callbacks[i])
				++nvectors;
299
	} else {
300 301
		/* Second best: one for change, shared for all vqs. */
		nvectors = 2;
302 303
	}

304 305
	err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
				      per_vq_vectors ? desc : NULL);
306
	if (err)
307
		goto error_find;
308

309
	vp_dev->per_vq_vectors = per_vq_vectors;
310
	allocated_vectors = vp_dev->msix_used_vectors;
311
	for (i = 0; i < nvqs; ++i) {
312 313 314
		if (!names[i]) {
			vqs[i] = NULL;
			continue;
315 316
		}

317
		if (!callbacks[i])
318
			msix_vec = VIRTIO_MSI_NO_VECTOR;
319
		else if (vp_dev->per_vq_vectors)
320 321 322
			msix_vec = allocated_vectors++;
		else
			msix_vec = VP_MSIX_VQ_VECTOR;
323
		vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
324
				     ctx ? ctx[i] : false,
325
				     msix_vec);
326 327
		if (IS_ERR(vqs[i])) {
			err = PTR_ERR(vqs[i]);
328
			goto error_find;
329
		}
330

331
		if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
332 333
			continue;

334 335 336 337
		/* allocate per-vq irq if available and necessary */
		snprintf(vp_dev->msix_names[msix_vec],
			 sizeof *vp_dev->msix_names,
			 "%s-%s",
338
			 dev_name(&vp_dev->vdev.dev), names[i]);
339
		err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
340 341 342
				  vring_interrupt, 0,
				  vp_dev->msix_names[msix_vec],
				  vqs[i]);
343
		if (err)
344
			goto error_find;
345 346 347
	}
	return 0;

348 349
error_find:
	vp_del_vqs(vdev);
350 351 352
	return err;
}

353 354
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
		struct virtqueue *vqs[], vq_callback_t *callbacks[],
355
		const char * const names[], const bool *ctx)
356 357 358 359
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	int i, err;

360 361 362 363
	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
	if (!vp_dev->vqs)
		return -ENOMEM;

364 365 366
	err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
			dev_name(&vdev->dev), vp_dev);
	if (err)
367
		goto out_del_vqs;
368

369
	vp_dev->intx_enabled = 1;
370
	vp_dev->per_vq_vectors = false;
371 372 373 374 375
	for (i = 0; i < nvqs; ++i) {
		if (!names[i]) {
			vqs[i] = NULL;
			continue;
		}
376
		vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
377
				     ctx ? ctx[i] : false,
378
				     VIRTIO_MSI_NO_VECTOR);
379 380
		if (IS_ERR(vqs[i])) {
			err = PTR_ERR(vqs[i]);
381
			goto out_del_vqs;
382 383 384 385
		}
	}

	return 0;
386 387
out_del_vqs:
	vp_del_vqs(vdev);
388 389 390
	return err;
}

391
/* the config->find_vqs() implementation */
392
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
393
		struct virtqueue *vqs[], vq_callback_t *callbacks[],
394 395
		const char * const names[], const bool *ctx,
		struct irq_affinity *desc)
396
{
397
	int err;
398

399
	/* Try MSI-X with one vector per queue. */
400
	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
401 402 403
	if (!err)
		return 0;
	/* Fallback: MSI-X with one vector for config, one shared for queues. */
404
	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
405 406
	if (!err)
		return 0;
407
	/* Finally fall back to regular interrupts. */
408
	return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
409 410
}

411
const char *vp_bus_name(struct virtio_device *vdev)
412 413 414 415 416 417
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);

	return pci_name(vp_dev->pci_dev);
}

418 419 420 421 422
/* Setup the affinity for a virtqueue:
 * - force the affinity for per vq vector
 * - OR over all affinities for shared MSI
 * - ignore the affinity request if we're using INTX
 */
423
int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
424 425 426
{
	struct virtio_device *vdev = vq->vdev;
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
427 428 429
	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
	struct cpumask *mask;
	unsigned int irq;
430 431 432 433

	if (!vq->callback)
		return -EINVAL;

434
	if (vp_dev->msix_enabled) {
435 436
		mask = vp_dev->msix_affinity_masks[info->msix_vector];
		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
437 438 439
		if (cpu == -1)
			irq_set_affinity_hint(irq, NULL);
		else {
440
			cpumask_clear(mask);
441 442 443 444 445 446 447
			cpumask_set_cpu(cpu, mask);
			irq_set_affinity_hint(irq, mask);
		}
	}
	return 0;
}

448 449 450 451
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);

452 453
	if (!vp_dev->per_vq_vectors ||
	    vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
454 455
		return NULL;

456 457
	return pci_irq_get_affinity(vp_dev->pci_dev,
				    vp_dev->vqs[index]->msix_vector);
458 459
}

460
#ifdef CONFIG_PM_SLEEP
461 462 463 464 465 466
static int virtio_pci_freeze(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
	int ret;

467
	ret = virtio_device_freeze(&vp_dev->vdev);
468 469 470 471 472 473

	if (!ret)
		pci_disable_device(pci_dev);
	return ret;
}

474
static int virtio_pci_restore(struct device *dev)
475 476 477 478 479 480 481 482
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
	int ret;

	ret = pci_enable_device(pci_dev);
	if (ret)
		return ret;
483

484
	pci_set_master(pci_dev);
485
	return virtio_device_restore(&vp_dev->vdev);
486 487
}

488
static const struct dev_pm_ops virtio_pci_pm_ops = {
489
	SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
490
};
Anthony Liguori's avatar
Anthony Liguori committed
491
#endif
492 493 494 495


/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
static const struct pci_device_id virtio_pci_id_table[] = {
496
	{ PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
497 498 499 500 501
	{ 0 }
};

MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);

502 503 504 505 506 507 508 509 510 511 512
static void virtio_pci_release_dev(struct device *_d)
{
	struct virtio_device *vdev = dev_to_virtio(_d);
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);

	/* As struct device is a kobject, it's not safe to
	 * free the memory (including the reference counter itself)
	 * until it's release callback. */
	kfree(vp_dev);
}

513 514 515
static int virtio_pci_probe(struct pci_dev *pci_dev,
			    const struct pci_device_id *id)
{
516 517 518 519 520 521 522 523 524 525 526 527
	struct virtio_pci_device *vp_dev;
	int rc;

	/* allocate our structure and fill it out */
	vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
	if (!vp_dev)
		return -ENOMEM;

	pci_set_drvdata(pci_dev, vp_dev);
	vp_dev->vdev.dev.parent = &pci_dev->dev;
	vp_dev->vdev.dev.release = virtio_pci_release_dev;
	vp_dev->pci_dev = pci_dev;
528 529
	INIT_LIST_HEAD(&vp_dev->virtqueues);
	spin_lock_init(&vp_dev->lock);
530 531 532 533 534 535

	/* enable the device */
	rc = pci_enable_device(pci_dev);
	if (rc)
		goto err_enable_device;

536
	if (force_legacy) {
537
		rc = virtio_pci_legacy_probe(vp_dev);
538 539 540 541 542 543 544 545 546 547 548 549
		/* Also try modern mode if we can't map BAR0 (no IO space). */
		if (rc == -ENODEV || rc == -ENOMEM)
			rc = virtio_pci_modern_probe(vp_dev);
		if (rc)
			goto err_probe;
	} else {
		rc = virtio_pci_modern_probe(vp_dev);
		if (rc == -ENODEV)
			rc = virtio_pci_legacy_probe(vp_dev);
		if (rc)
			goto err_probe;
	}
550 551 552 553 554 555 556 557 558 559

	pci_set_master(pci_dev);

	rc = register_virtio_device(&vp_dev->vdev);
	if (rc)
		goto err_register;

	return 0;

err_register:
560 561 562 563
	if (vp_dev->ioaddr)
	     virtio_pci_legacy_remove(vp_dev);
	else
	     virtio_pci_modern_remove(vp_dev);
564 565 566 567 568
err_probe:
	pci_disable_device(pci_dev);
err_enable_device:
	kfree(vp_dev);
	return rc;
569 570 571 572
}

static void virtio_pci_remove(struct pci_dev *pci_dev)
{
573
	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
574
	struct device *dev = get_device(&vp_dev->vdev.dev);
575 576 577

	unregister_virtio_device(&vp_dev->vdev);

578 579 580 581
	if (vp_dev->ioaddr)
		virtio_pci_legacy_remove(vp_dev);
	else
		virtio_pci_modern_remove(vp_dev);
582 583

	pci_disable_device(pci_dev);
584
	put_device(dev);
585 586 587 588 589 590 591 592 593 594 595 596 597
}

static struct pci_driver virtio_pci_driver = {
	.name		= "virtio-pci",
	.id_table	= virtio_pci_id_table,
	.probe		= virtio_pci_probe,
	.remove		= virtio_pci_remove,
#ifdef CONFIG_PM_SLEEP
	.driver.pm	= &virtio_pci_pm_ops,
#endif
};

module_pci_driver(virtio_pci_driver);
598 599 600 601 602

MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
MODULE_DESCRIPTION("virtio-pci");
MODULE_LICENSE("GPL");
MODULE_VERSION("1");