prime_nv_test.c 9.74 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* basic set of prime tests between intel and nouveau */

/* test list -
   1. share buffer from intel -> nouveau.
   2. share buffer from nouveau -> intel
   3. share intel->nouveau, map on both, write intel, read nouveau
   4. share intel->nouveau, blit intel fill, readback on nouveau
   test 1 + map buffer, read/write, map other size.
   do some hw actions on the buffer
   some illegal operations -
       close prime fd try and map

   TODO add some nouveau rendering tests
*/


17
#include "igt.h"
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/ioctl.h>

#include "intel_bufmgr.h"
#include "nouveau.h"

int intel_fd = -1, nouveau_fd = -1;
drm_intel_bufmgr *bufmgr;
struct nouveau_device *ndev;
struct nouveau_client *nclient;
uint32_t devid;
struct intel_batchbuffer *intel_batch;

#define BO_SIZE (256*1024)

static int find_and_open_devices(void)
{
	int i;
	char path[80];
	struct stat buf;
	FILE *fl;
	char vendor_id[8];
	int venid;
	for (i = 0; i < 9; i++) {
47 48
		char *ret;

49 50 51 52 53 54 55 56
		sprintf(path, "/sys/class/drm/card%d/device/vendor", i);
		if (stat(path, &buf))
			break;

		fl = fopen(path, "r");
		if (!fl)
			break;

57
		ret = fgets(vendor_id, 8, fl);
58
		igt_assert(ret);
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
		fclose(fl);

		venid = strtoul(vendor_id, NULL, 16);
		sprintf(path, "/dev/dri/card%d", i);
		if (venid == 0x8086) {
			intel_fd = open(path, O_RDWR);
			if (!intel_fd)
				return -1;
		} else if (venid == 0x10de) {
			nouveau_fd = open(path, O_RDWR);
			if (!nouveau_fd)
				return -1;
		}
	}
	return 0;
}

/*
 * prime test 1 -
 * allocate buffer on intel,
 * set prime on buffer,
 * retrive buffer from nouveau,
 * close prime_fd,
 *  unref buffers
 */
84
static void test_i915_nv_sharing(void)
85 86 87 88 89 90
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
91
	igt_assert(test_intel_bo);
92 93 94

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

95
	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
96 97 98 99 100 101 102 103 104 105 106 107 108 109
	close(prime_fd);

	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/*
 * prime test 2 -
 * allocate buffer on nouveau
 * set prime on buffer,
 * retrive buffer from intel
 * close prime_fd,
 *  unref buffers
 */
110
static void test_nv_i915_sharing(void)
111 112 113 114 115
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo;

116 117 118
	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
				  0, BO_SIZE, NULL, &nvbo) == 0);
	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
119 120 121

	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
	close(prime_fd);
122
	igt_assert(test_intel_bo);
123 124 125 126 127 128 129 130 131

	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/*
 * allocate intel, give to nouveau, map on nouveau
 * write 0xdeadbeef, non-gtt map on intel, read
 */
132
static void test_nv_write_i915_cpu_mmap_read(void)
133 134 135 136 137 138 139 140 141 142
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo = NULL;
	uint32_t *ptr;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

143
	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
144 145
	close(prime_fd);

146
	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
147 148 149 150 151
	ptr = nvbo->map;
	*ptr = 0xdeadbeef;

	drm_intel_bo_map(test_intel_bo, 1);
	ptr = test_intel_bo->virtual;
152
	igt_assert(ptr);
153

154
	igt_assert(*ptr == 0xdeadbeef);
155 156 157 158 159 160 161 162
	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/*
 * allocate intel, give to nouveau, map on nouveau
 * write 0xdeadbeef, gtt map on intel, read
 */
163
static void test_nv_write_i915_gtt_mmap_read(void)
164 165 166 167 168 169 170 171 172 173
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo = NULL;
	uint32_t *ptr;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

174
	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
175
	close(prime_fd);
176
	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
177 178 179 180 181
	ptr = nvbo->map;
	*ptr = 0xdeadbeef;

	drm_intel_gem_bo_map_gtt(test_intel_bo);
	ptr = test_intel_bo->virtual;
182 183 184
	igt_assert(ptr);

	igt_assert(*ptr == 0xdeadbeef);
185 186 187 188 189 190 191 192

	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/* test drm_intel_bo_map doesn't work properly,
   this tries to map the backing shmem fd, which doesn't exist
   for these objects */
193
static void test_i915_import_cpu_mmap(void)
194 195 196 197 198 199
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo;
	uint32_t *ptr;

200 201
	igt_skip("cpu mmap support for imported dma-bufs not yet implemented\n");

202 203 204
	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
				  0, BO_SIZE, NULL, &nvbo) == 0);
	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
205 206
	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
	close(prime_fd);
207
	igt_assert(test_intel_bo);
208

209
	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
210 211 212 213

	ptr = nvbo->map;
	*ptr = 0xdeadbeef;

214 215
	igt_assert(drm_intel_bo_map(test_intel_bo, 0) == 0);
	igt_assert(test_intel_bo->virtual);
216 217
	ptr = test_intel_bo->virtual;

218
	igt_assert(*ptr == 0xdeadbeef);
219 220 221 222 223 224 225
	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/* test drm_intel_bo_map_gtt works properly,
   this tries to map the backing shmem fd, which doesn't exist
   for these objects */
226
static void test_i915_import_gtt_mmap(void)
227 228 229 230 231 232
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo;
	uint32_t *ptr;

233 234 235
	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
				  0, BO_SIZE, NULL, &nvbo) == 0);
	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
236 237 238

	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
	close(prime_fd);
239
	igt_assert(test_intel_bo);
240

241
	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
242 243 244 245 246

	ptr = nvbo->map;
	*ptr = 0xdeadbeef;
	*(ptr + 1) = 0xa55a55;

247 248
	igt_assert(drm_intel_gem_bo_map_gtt(test_intel_bo) == 0);
	igt_assert(test_intel_bo->virtual);
249 250
	ptr = test_intel_bo->virtual;

251
	igt_assert(*ptr == 0xdeadbeef);
252 253 254 255 256
	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/* test 7 - import from nouveau into intel, test pread/pwrite fail */
257
static void test_i915_import_pread_pwrite(void)
258 259 260 261 262 263 264
{
	drm_intel_bo *test_intel_bo;
	int prime_fd;
	struct nouveau_bo *nvbo;
	uint32_t *ptr;
	uint32_t buf[64];

265 266 267
	igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
				  0, BO_SIZE, NULL, &nvbo) == 0);
	igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);
268 269 270

	test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
	close(prime_fd);
271
	igt_assert(test_intel_bo);
272

273
	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
274 275 276 277

	ptr = nvbo->map;
	*ptr = 0xdeadbeef;

278
	gem_read(intel_fd, test_intel_bo->handle, 0, buf, 256);
279
	igt_assert(buf[0] == 0xdeadbeef);
280 281
	buf[0] = 0xabcdef55;

282
	gem_write(intel_fd, test_intel_bo->handle, 0, buf, 4);
283 284 285

	igt_assert(*ptr == 0xabcdef55);

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

static void
set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
{
        int size = width * height;
        uint32_t *vaddr;

        drm_intel_gem_bo_start_gtt_access(bo, true);
        vaddr = bo->virtual;
        while (size--)
                *vaddr++ = val;
}

static drm_intel_bo *
create_bo(drm_intel_bufmgr *ibufmgr, uint32_t val, int width, int height)
{
        drm_intel_bo *bo;

        bo = drm_intel_bo_alloc(ibufmgr, "bo", 4*width*height, 0);
308
        igt_assert(bo);
309 310 311 312 313 314 315 316 317 318 319 320 321

        /* gtt map doesn't have a write parameter, so just keep the mapping
         * around (to avoid the set_domain with the gtt write domain set) and
         * manually tell the kernel when we start access the gtt. */
        drm_intel_gem_bo_map_gtt(bo);

        set_bo(bo, val, width, height);

        return bo;
}

/* use intel hw to fill the BO with a blit from another BO,
   then readback from the nouveau bo, check value is correct */
322
static void test_i915_blt_fill_nv_read(void)
323 324 325 326 327 328 329 330 331 332 333 334
{
	drm_intel_bo *test_intel_bo, *src_bo;
	int prime_fd;
	struct nouveau_bo *nvbo = NULL;
	uint32_t *ptr;

	src_bo = create_bo(bufmgr, 0xaa55aa55, 256, 1);

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

335
	igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);
336 337
	close(prime_fd);

338
	intel_copy_bo(intel_batch, test_intel_bo, src_bo, BO_SIZE);
339

340
	igt_assert(nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient) == 0);
341 342 343 344

	drm_intel_bo_map(test_intel_bo, 0);

	ptr = nvbo->map;
345
	igt_assert(*ptr == 0xaa55aa55);
346 347 348 349 350 351 352 353
	nouveau_bo_ref(NULL, &nvbo);
	drm_intel_bo_unreference(test_intel_bo);
}

/* test 8 use nouveau to do blit */

/* test 9 nouveau copy engine?? */

Daniel Vetter's avatar
Daniel Vetter committed
354
igt_main
355
{
356 357
	igt_fixture {
		igt_assert(find_and_open_devices() == 0);
358

359 360
		igt_require(nouveau_fd != -1);
		igt_require(intel_fd != -1);
361

362 363 364 365 366
		/* set up intel bufmgr */
		bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
		igt_assert(bufmgr);
		/* Do not enable reuse, we share (almost) all buffers. */
		//drm_intel_bufmgr_gem_enable_reuse(bufmgr);
367

368
		/* set up nouveau bufmgr */
369 370
		igt_assert(nouveau_device_wrap(nouveau_fd, 0, &ndev) == 0);
		igt_assert(nouveau_client_new(ndev, &nclient) == 0);
371

372 373 374 375
		/* set up an intel batch buffer */
		devid = intel_get_drm_devid(intel_fd);
		intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
	}
376

377
#define xtest(name) \
Daniel Vetter's avatar
Daniel Vetter committed
378
	igt_subtest(#name) \
379
		test_##name();
380 381 382 383 384 385 386 387 388

	xtest(i915_nv_sharing);
	xtest(nv_i915_sharing);
	xtest(nv_write_i915_cpu_mmap_read);
	xtest(nv_write_i915_gtt_mmap_read);
	xtest(i915_import_cpu_mmap);
	xtest(i915_import_gtt_mmap);
	xtest(i915_import_pread_pwrite);
	xtest(i915_blt_fill_nv_read);
389

390 391
	igt_fixture {
		intel_batchbuffer_free(intel_batch);
392

393 394
		nouveau_device_del(&ndev);
		drm_intel_bufmgr_destroy(bufmgr);
395

396 397 398
		close(intel_fd);
		close(nouveau_fd);
	}
399
}