rcar_du_crtc.c 29.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2 3 4
/*
 * rcar_du_crtc.c  --  R-Car Display Unit CRTCs
 *
5
 * Copyright (C) 2013-2015 Renesas Electronics Corporation
6 7 8 9 10 11
 *
 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
 */

#include <linux/clk.h>
#include <linux/mutex.h>
12
#include <linux/sys_soc.h>
13 14

#include <drm/drmP.h>
15 16
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
17 18 19 20
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
21
#include <drm/drm_plane_helper.h>
22 23 24 25 26 27

#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
28
#include "rcar_du_vsp.h"
29 30 31

static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
32
	struct rcar_du_device *rcdu = rcrtc->group->dev;
33 34 35 36 37 38

	return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}

static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
39
	struct rcar_du_device *rcdu = rcrtc->group->dev;
40 41 42 43 44 45

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}

static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
46
	struct rcar_du_device *rcdu = rcrtc->group->dev;
47 48 49 50 51 52 53

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
}

static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
54
	struct rcar_du_device *rcdu = rcrtc->group->dev;
55 56 57 58 59 60 61 62

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
}

static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
				 u32 clr, u32 set)
{
63
	struct rcar_du_device *rcdu = rcrtc->group->dev;
64 65 66 67 68
	u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
}

69 70 71 72 73 74 75 76
static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
{
	int ret;

	ret = clk_prepare_enable(rcrtc->clock);
	if (ret < 0)
		return ret;

77 78 79 80
	ret = clk_prepare_enable(rcrtc->extclock);
	if (ret < 0)
		goto error_clock;

81
	ret = rcar_du_group_get(rcrtc->group);
82
	if (ret < 0)
83 84 85
		goto error_group;

	return 0;
86

87 88 89 90
error_group:
	clk_disable_unprepare(rcrtc->extclock);
error_clock:
	clk_disable_unprepare(rcrtc->clock);
91 92 93 94 95
	return ret;
}

static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
{
96
	rcar_du_group_put(rcrtc->group);
97 98

	clk_disable_unprepare(rcrtc->extclock);
99 100 101
	clk_disable_unprepare(rcrtc->clock);
}

102 103 104 105
/* -----------------------------------------------------------------------------
 * Hardware Setup
 */

Koji Matsuoka's avatar
Koji Matsuoka committed
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
struct dpll_info {
	unsigned int output;
	unsigned int fdpll;
	unsigned int n;
	unsigned int m;
};

static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
				 struct dpll_info *dpll,
				 unsigned long input,
				 unsigned long target)
{
	unsigned long best_diff = (unsigned long)-1;
	unsigned long diff;
	unsigned int fdpll;
	unsigned int m;
	unsigned int n;

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	/*
	 *   fin                                 fvco        fout       fclkout
	 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
	 *              +-> |  |                             |
	 *              |                                    |
	 *              +---------------- [1/N] <------------+
	 *
	 *	fclkout = fvco / P / FDPLL -- (1)
	 *
	 * fin/M = fvco/P/N
	 *
	 *	fvco = fin * P *  N / M -- (2)
	 *
	 * (1) + (2) indicates
	 *
	 *	fclkout = fin * N / M / FDPLL
	 *
	 * NOTES
	 *	N	: (n + 1)
	 *	M	: (m + 1)
	 *	FDPLL	: (fdpll + 1)
	 *	P	: 2
	 *	2kHz < fvco < 4096MHz
	 *
	 * To minimize the jitter,
	 * N : as large as possible
	 * M : as small as possible
	 */
	for (m = 0; m < 4; m++) {
		for (n = 119; n > 38; n--) {
			/*
			 * This code only runs on 64-bit architectures, the
			 * unsigned long type can thus be used for 64-bit
			 * computation. It will still compile without any
			 * warning on 32-bit architectures.
			 *
			 * To optimize calculations, use fout instead of fvco
			 * to verify the VCO frequency constraint.
			 */
			unsigned long fout = input * (n + 1) / (m + 1);

			if (fout < 1000 || fout > 2048 * 1000 * 1000U)
				continue;

Koji Matsuoka's avatar
Koji Matsuoka committed
168 169 170
			for (fdpll = 1; fdpll < 32; fdpll++) {
				unsigned long output;

171
				output = fout / (fdpll + 1);
172
				if (output >= 400 * 1000 * 1000)
Koji Matsuoka's avatar
Koji Matsuoka committed
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
					continue;

				diff = abs((long)output - (long)target);
				if (best_diff > diff) {
					best_diff = diff;
					dpll->n = n;
					dpll->m = m;
					dpll->fdpll = fdpll;
					dpll->output = output;
				}

				if (diff == 0)
					goto done;
			}
		}
	}

done:
	dev_dbg(rcrtc->group->dev->dev,
		"output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
		 dpll->output, dpll->fdpll, dpll->n, dpll->m,
		 best_diff);
}

197 198 199 200 201
static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
	{ .soc_id = "r8a7795", .revision = "ES1.*" },
	{ /* sentinel */ }
};

202 203
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
204
	const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
Koji Matsuoka's avatar
Koji Matsuoka committed
205
	struct rcar_du_device *rcdu = rcrtc->group->dev;
206
	unsigned long mode_clock = mode->clock * 1000;
207
	u32 dsmr;
208
	u32 escr;
209

210 211
	if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
		unsigned long target = mode_clock;
Koji Matsuoka's avatar
Koji Matsuoka committed
212
		struct dpll_info dpll = { 0 };
213
		unsigned long extclk;
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
		u32 dpllcr;
		u32 div = 0;

		/*
		 * DU channels that have a display PLL can't use the internal
		 * system clock, and have no internal clock divider.
		 */

		if (WARN_ON(!rcrtc->extclock))
			return;

		/*
		 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
		 * We can work around them by configuring the DPLL to twice the
		 * desired frequency, coupled with a /2 post-divider. Restrict
		 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
		 * no post-divider when a display PLL is present (as shown by
		 * the workaround breaking HDMI output on M3-W during testing).
		 */
		if (soc_device_match(rcar_du_r8a7795_es1)) {
			target *= 2;
			div = 1;
Koji Matsuoka's avatar
Koji Matsuoka committed
236 237
		}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		extclk = clk_get_rate(rcrtc->extclock);
		rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);

		dpllcr = DPLLCR_CODE | DPLLCR_CLKE
		       | DPLLCR_FDPLL(dpll.fdpll)
		       | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
		       | DPLLCR_STBY;

		if (rcrtc->index == 1)
			dpllcr |= DPLLCR_PLCS1
			       |  DPLLCR_INCS_DOTCLKIN1;
		else
			dpllcr |= DPLLCR_PLCS0
			       |  DPLLCR_INCS_DOTCLKIN0;

		rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);

		escr = ESCR_DCLKSEL_DCLKIN | div;
	} else {
		unsigned long clk;
		u32 div;

		/*
		 * Compute the clock divisor and select the internal or external
		 * dot clock based on the requested frequency.
		 */
		clk = clk_get_rate(rcrtc->clock);
		div = DIV_ROUND_CLOSEST(clk, mode_clock);
		div = clamp(div, 1U, 64U) - 1;

		escr = ESCR_DCLKSEL_CLKS | div;

		if (rcrtc->extclock) {
			unsigned long extclk;
			unsigned long extrate;
			unsigned long rate;
			u32 extdiv;

			extclk = clk_get_rate(rcrtc->extclock);
			extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
			extdiv = clamp(extdiv, 1U, 64U) - 1;

			extrate = extclk / (extdiv + 1);
			rate = clk / (div + 1);

			if (abs((long)extrate - (long)mode_clock) <
			    abs((long)rate - (long)mode_clock))
				escr = ESCR_DCLKSEL_DCLKIN | extdiv;

			dev_dbg(rcrtc->group->dev->dev,
				"mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n",
				mode_clock, extrate, rate, escr);
290 291
		}
	}
292

293
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
294
			    escr);
295
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
296 297

	/* Signal polarities */
298 299 300 301 302
	dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
	     | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
	     | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
	     | DSMR_DIPM_DISP | DSMR_CSPM;
	rcar_du_crtc_write(rcrtc, DSMR, dsmr);
303 304 305 306 307 308 309 310 311

	/* Display timings */
	rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
	rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
					mode->hdisplay - 19);
	rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
					mode->hsync_start - 1);
	rcar_du_crtc_write(rcrtc, HCR,  mode->htotal - 1);

312 313 314 315 316 317 318 319 320
	rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
					mode->crtc_vsync_end - 2);
	rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
					mode->crtc_vsync_end +
					mode->crtc_vdisplay - 2);
	rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
					mode->crtc_vsync_end +
					mode->crtc_vsync_start - 1);
	rcar_du_crtc_write(rcrtc, VCR,  mode->crtc_vtotal - 1);
321

322
	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start - 1);
323 324 325
	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
}

326 327
void rcar_du_crtc_route_output(struct drm_crtc *crtc,
			       enum rcar_du_output output)
328 329
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
330
	struct rcar_du_device *rcdu = rcrtc->group->dev;
331

332 333
	/*
	 * Store the route from the CRTC output to the DU output. The DU will be
334 335
	 * configured when starting the CRTC.
	 */
336
	rcrtc->outputs |= BIT(output);
337

338 339
	/*
	 * Store RGB routing to DPAD0, the hardware will be configured when
340 341 342
	 * starting the CRTC.
	 */
	if (output == RCAR_DU_OUTPUT_DPAD0)
343
		rcdu->dpad0_source = rcrtc->index;
344 345
}

346 347
static unsigned int plane_zpos(struct rcar_du_plane *plane)
{
348
	return plane->plane.state->normalized_zpos;
349 350
}

351 352 353
static const struct rcar_du_format_info *
plane_format(struct rcar_du_plane *plane)
{
354
	return to_rcar_plane_state(plane->plane.state)->format;
355 356
}

357
static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
358 359
{
	struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
360
	struct rcar_du_device *rcdu = rcrtc->group->dev;
361
	unsigned int num_planes = 0;
362 363
	unsigned int dptsr_planes;
	unsigned int hwplanes = 0;
364 365 366 367
	unsigned int prio = 0;
	unsigned int i;
	u32 dspr = 0;

368
	for (i = 0; i < rcrtc->group->num_planes; ++i) {
369
		struct rcar_du_plane *plane = &rcrtc->group->planes[i];
370 371
		unsigned int j;

372 373
		if (plane->plane.state->crtc != &rcrtc->crtc ||
		    !plane->plane.state->visible)
374 375 376 377
			continue;

		/* Insert the plane in the sorted planes array. */
		for (j = num_planes++; j > 0; --j) {
378
			if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
379 380 381 382 383
				break;
			planes[j] = planes[j-1];
		}

		planes[j] = plane;
384
		prio += plane_format(plane)->planes * 4;
385 386 387 388
	}

	for (i = 0; i < num_planes; ++i) {
		struct rcar_du_plane *plane = planes[i];
389
		struct drm_plane_state *state = plane->plane.state;
390
		unsigned int index = to_rcar_plane_state(state)->hwindex;
391 392 393

		prio -= 4;
		dspr |= (index + 1) << prio;
394
		hwplanes |= 1 << index;
395

396
		if (plane_format(plane)->planes == 2) {
397 398 399 400
			index = (index + 1) % 8;

			prio -= 4;
			dspr |= (index + 1) << prio;
401
			hwplanes |= 1 << index;
402 403 404
		}
	}

405 406
	/* If VSP+DU integration is enabled the plane assignment is fixed. */
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
407 408 409 410 411 412 413
		if (rcdu->info->gen < 3) {
			dspr = (rcrtc->index % 2) + 1;
			hwplanes = 1 << (rcrtc->index % 2);
		} else {
			dspr = (rcrtc->index % 2) ? 3 : 1;
			hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
		}
414 415
	}

416 417
	/*
	 * Update the planes to display timing and dot clock generator
418 419 420 421 422 423
	 * associations.
	 *
	 * Updating the DPTSR register requires restarting the CRTC group,
	 * resulting in visible flicker. To mitigate the issue only update the
	 * association if needed by enabled planes. Planes being disabled will
	 * keep their current association.
424
	 */
425 426 427 428 429 430 431 432 433 434 435 436
	mutex_lock(&rcrtc->group->lock);

	dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
		     : rcrtc->group->dptsr_planes & ~hwplanes;

	if (dptsr_planes != rcrtc->group->dptsr_planes) {
		rcar_du_group_write(rcrtc->group, DPTSR,
				    (dptsr_planes << 16) | dptsr_planes);
		rcrtc->group->dptsr_planes = dptsr_planes;

		if (rcrtc->group->used_crtcs)
			rcar_du_group_restart(rcrtc->group);
437 438
	}

439 440 441 442
	/* Restart the group if plane sources have changed. */
	if (rcrtc->group->need_restart)
		rcar_du_group_restart(rcrtc->group);

443 444
	mutex_unlock(&rcrtc->group->lock);

445 446
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
			    dspr);
447 448
}

449 450 451 452
/* -----------------------------------------------------------------------------
 * Page Flip
 */

453
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
454 455 456 457 458 459 460 461 462 463 464 465 466 467
{
	struct drm_pending_vblank_event *event;
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	event = rcrtc->event;
	rcrtc->event = NULL;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	if (event == NULL)
		return;

	spin_lock_irqsave(&dev->event_lock, flags);
468
	drm_crtc_send_vblank_event(&rcrtc->crtc, event);
469
	wake_up(&rcrtc->flip_wait);
470 471
	spin_unlock_irqrestore(&dev->event_lock, flags);

472
	drm_crtc_vblank_put(&rcrtc->crtc);
473 474
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
{
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;
	bool pending;

	spin_lock_irqsave(&dev->event_lock, flags);
	pending = rcrtc->event != NULL;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	return pending;
}

static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
	struct rcar_du_device *rcdu = rcrtc->group->dev;

	if (wait_event_timeout(rcrtc->flip_wait,
			       !rcar_du_crtc_page_flip_pending(rcrtc),
			       msecs_to_jiffies(50)))
		return;

	dev_warn(rcdu->dev, "page flip timeout\n");

	rcar_du_crtc_finish_page_flip(rcrtc);
}

502 503 504 505
/* -----------------------------------------------------------------------------
 * Start/Stop and Suspend/Resume
 */

506
static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
507 508 509 510 511 512 513
{
	/* Set display off and background to black */
	rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
	rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));

	/* Configure display timings and output routing */
	rcar_du_crtc_set_display_timing(rcrtc);
514
	rcar_du_group_set_routing(rcrtc->group);
515

516 517
	/* Start with all planes disabled. */
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
518

519 520 521 522 523 524 525 526 527 528 529 530
	/* Enable the VSP compositor. */
	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
		rcar_du_vsp_enable(rcrtc);

	/* Turn vertical blanking interrupt reporting on. */
	drm_crtc_vblank_on(&rcrtc->crtc);
}

static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
	bool interlaced;

531 532
	/*
	 * Select master sync mode. This enables display operation in master
533 534 535
	 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
	 * actively driven).
	 */
536 537 538 539
	interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
			     (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
			     DSYSR_TVM_MASTER);
540

541
	rcar_du_group_start_stop(rcrtc->group, true);
542 543
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
{
	struct rcar_du_device *rcdu = rcrtc->group->dev;
	struct drm_crtc *crtc = &rcrtc->crtc;
	u32 status;

	/* Make sure vblank interrupts are enabled. */
	drm_crtc_vblank_get(crtc);

	/*
	 * Disable planes and calculate how many vertical blanking interrupts we
	 * have to wait for. If a vertical blanking interrupt has been triggered
	 * but not processed yet, we don't know whether it occurred before or
	 * after the planes got disabled. We thus have to wait for two vblank
	 * interrupts in that case.
	 */
	spin_lock_irq(&rcrtc->vblank_lock);
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
	status = rcar_du_crtc_read(rcrtc, DSSR);
	rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
	spin_unlock_irq(&rcrtc->vblank_lock);

	if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
				msecs_to_jiffies(100)))
		dev_warn(rcdu->dev, "vertical blanking timeout\n");

	drm_crtc_vblank_put(crtc);
}

573 574 575 576
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
	struct drm_crtc *crtc = &rcrtc->crtc;

577 578
	/*
	 * Disable all planes and wait for the change to take effect. This is
579 580 581 582
	 * required as the plane enable registers are updated on vblank, and no
	 * vblank will occur once the CRTC is stopped. Disabling planes when
	 * starting the CRTC thus wouldn't be enough as it would start scanning
	 * out immediately from old frame buffers until the next vblank.
583 584 585 586 587
	 *
	 * This increases the CRTC stop delay, especially when multiple CRTCs
	 * are stopped in one operation as we now wait for one vblank per CRTC.
	 * Whether this can be improved needs to be researched.
	 */
588
	rcar_du_crtc_disable_planes(rcrtc);
589

590 591
	/*
	 * Disable vertical blanking interrupt reporting. We first need to wait
592 593
	 * for page flip completion before stopping the CRTC as userspace
	 * expects page flips to eventually complete.
594 595
	 */
	rcar_du_crtc_wait_page_flip(rcrtc);
596
	drm_crtc_vblank_off(crtc);
597

598 599 600 601
	/* Disable the VSP compositor. */
	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
		rcar_du_vsp_disable(rcrtc);

602 603
	/*
	 * Select switch sync mode. This stops display operation and configures
604 605 606 607
	 * the HSYNC and VSYNC signals as inputs.
	 */
	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);

608
	rcar_du_group_start_stop(rcrtc->group, false);
609 610
}

611 612 613 614
/* -----------------------------------------------------------------------------
 * CRTC Functions
 */

615 616
static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
				       struct drm_crtc_state *old_state)
617 618 619
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

620 621 622 623 624 625 626 627 628 629
	/*
	 * If the CRTC has already been setup by the .atomic_begin() handler we
	 * can skip the setup stage.
	 */
	if (!rcrtc->initialized) {
		rcar_du_crtc_get(rcrtc);
		rcar_du_crtc_setup(rcrtc);
		rcrtc->initialized = true;
	}

630 631 632
	rcar_du_crtc_start(rcrtc);
}

633 634
static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
					struct drm_crtc_state *old_state)
635 636
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
637

638 639
	rcar_du_crtc_stop(rcrtc);
	rcar_du_crtc_put(rcrtc);
640

641 642 643 644 645 646 647
	spin_lock_irq(&crtc->dev->event_lock);
	if (crtc->state->event) {
		drm_crtc_send_vblank_event(crtc, crtc->state->event);
		crtc->state->event = NULL;
	}
	spin_unlock_irq(&crtc->dev->event_lock);

648
	rcrtc->initialized = false;
649
	rcrtc->outputs = 0;
650 651
}

652 653
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
				      struct drm_crtc_state *old_crtc_state)
654 655
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
656

657 658 659 660 661 662 663 664 665 666 667 668 669
	WARN_ON(!crtc->state->enable);

	/*
	 * If a mode set is in progress we can be called with the CRTC disabled.
	 * We then need to first setup the CRTC in order to configure planes.
	 * The .atomic_enable() handler will notice and skip the CRTC setup.
	 */
	if (!rcrtc->initialized) {
		rcar_du_crtc_get(rcrtc);
		rcar_du_crtc_setup(rcrtc);
		rcrtc->initialized = true;
	}

670 671
	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
		rcar_du_vsp_atomic_begin(rcrtc);
672 673
}

674 675
static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
				      struct drm_crtc_state *old_crtc_state)
676 677
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
678 679
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;
680

681
	rcar_du_crtc_update_planes(rcrtc);
682

683 684 685 686 687 688 689 690 691
	if (crtc->state->event) {
		WARN_ON(drm_crtc_vblank_get(crtc) != 0);

		spin_lock_irqsave(&dev->event_lock, flags);
		rcrtc->event = crtc->state->event;
		crtc->state->event = NULL;
		spin_unlock_irqrestore(&dev->event_lock, flags);
	}

692 693
	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
		rcar_du_vsp_atomic_flush(rcrtc);
694 695
}

696 697 698 699 700 701 702 703 704 705 706 707 708
enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
				   const struct drm_display_mode *mode)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
	struct rcar_du_device *rcdu = rcrtc->group->dev;
	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;

	if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
		return MODE_NO_INTERLACE;

	return MODE_OK;
}

709
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
710 711
	.atomic_begin = rcar_du_crtc_atomic_begin,
	.atomic_flush = rcar_du_crtc_atomic_flush,
712
	.atomic_enable = rcar_du_crtc_atomic_enable,
713
	.atomic_disable = rcar_du_crtc_atomic_disable,
714
	.mode_valid = rcar_du_crtc_mode_valid,
715 716
};

717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
{
	struct rcar_du_device *rcdu = rcrtc->group->dev;
	const char **sources;
	unsigned int count;
	int i = -1;

	/* CRC available only on Gen3 HW. */
	if (rcdu->info->gen < 3)
		return;

	/* Reserve 1 for "auto" source. */
	count = rcrtc->vsp->num_planes + 1;

	sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
	if (!sources)
		return;

	sources[0] = kstrdup("auto", GFP_KERNEL);
	if (!sources[0])
		goto error;

	for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
		struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
		char name[16];

		sprintf(name, "plane%u", plane->base.id);
		sources[i + 1] = kstrdup(name, GFP_KERNEL);
		if (!sources[i + 1])
			goto error;
	}

	rcrtc->sources = sources;
	rcrtc->sources_count = count;
	return;

error:
	while (i >= 0) {
		kfree(sources[i]);
		i--;
	}
	kfree(sources);
}

static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
{
	unsigned int i;

	if (!rcrtc->sources)
		return;

	for (i = 0; i < rcrtc->sources_count; i++)
		kfree(rcrtc->sources[i]);
	kfree(rcrtc->sources);

	rcrtc->sources = NULL;
	rcrtc->sources_count = 0;
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
static struct drm_crtc_state *
rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
	struct rcar_du_crtc_state *state;
	struct rcar_du_crtc_state *copy;

	if (WARN_ON(!crtc->state))
		return NULL;

	state = to_rcar_crtc_state(crtc->state);
	copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
	if (copy == NULL)
		return NULL;

	__drm_atomic_helper_crtc_duplicate_state(crtc, &copy->state);

	return &copy->state;
}

static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
					      struct drm_crtc_state *state)
{
	__drm_atomic_helper_crtc_destroy_state(state);
	kfree(to_rcar_crtc_state(state));
}

802 803 804 805 806 807 808 809 810
static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	rcar_du_crtc_crc_cleanup(rcrtc);

	return drm_crtc_cleanup(crtc);
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
static void rcar_du_crtc_reset(struct drm_crtc *crtc)
{
	struct rcar_du_crtc_state *state;

	if (crtc->state) {
		rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
		crtc->state = NULL;
	}

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state == NULL)
		return;

	state->crc.source = VSP1_DU_CRC_NONE;
	state->crc.index = 0;

	crtc->state = &state->state;
	crtc->state->crtc = crtc;
}

831 832 833 834 835 836
static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
	rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
837
	rcrtc->vblank_enable = true;
838 839 840 841 842 843 844 845 846

	return 0;
}

static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
847
	rcrtc->vblank_enable = false;
848 849
}

850 851 852
static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
					 const char *source_name,
					 enum vsp1_du_crc_source *source)
853
{
854
	unsigned int index;
855 856 857 858 859 860 861
	int ret;

	/*
	 * Parse the source name. Supported values are "plane%u" to compute the
	 * CRC on an input plane (%u is the plane ID), and "auto" to compute the
	 * CRC on the composer (VSP) output.
	 */
862

863
	if (!source_name) {
864 865
		*source = VSP1_DU_CRC_NONE;
		return 0;
866
	} else if (!strcmp(source_name, "auto")) {
867 868
		*source = VSP1_DU_CRC_OUTPUT;
		return 0;
869
	} else if (strstarts(source_name, "plane")) {
870 871 872
		unsigned int i;

		*source = VSP1_DU_CRC_PLANE;
873 874 875 876 877 878

		ret = kstrtouint(source_name + strlen("plane"), 10, &index);
		if (ret < 0)
			return ret;

		for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
879 880
			if (index == rcrtc->vsp->planes[i].plane.base.id)
				return i;
881
		}
882
	}
883

884 885 886 887 888 889 890 891 892 893 894 895
	return -EINVAL;
}

static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
					  const char *source_name,
					  size_t *values_cnt)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
	enum vsp1_du_crc_source source;

	if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
896 897 898
		return -EINVAL;
	}

899 900 901 902
	*values_cnt = 1;
	return 0;
}

903 904 905 906 907 908 909 910 911
const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
						size_t *count)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	*count = rcrtc->sources_count;
	return rcrtc->sources;
}

912
static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
913
				       const char *source_name)
914 915 916 917 918 919 920 921 922 923 924 925 926 927
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
	struct drm_modeset_acquire_ctx ctx;
	struct drm_crtc_state *crtc_state;
	struct drm_atomic_state *state;
	enum vsp1_du_crc_source source;
	unsigned int index;
	int ret;

	ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
	if (ret < 0)
		return ret;

	index = ret;
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981

	/* Perform an atomic commit to set the CRC source. */
	drm_modeset_acquire_init(&ctx, 0);

	state = drm_atomic_state_alloc(crtc->dev);
	if (!state) {
		ret = -ENOMEM;
		goto unlock;
	}

	state->acquire_ctx = &ctx;

retry:
	crtc_state = drm_atomic_get_crtc_state(state, crtc);
	if (!IS_ERR(crtc_state)) {
		struct rcar_du_crtc_state *rcrtc_state;

		rcrtc_state = to_rcar_crtc_state(crtc_state);
		rcrtc_state->crc.source = source;
		rcrtc_state->crc.index = index;

		ret = drm_atomic_commit(state);
	} else {
		ret = PTR_ERR(crtc_state);
	}

	if (ret == -EDEADLK) {
		drm_atomic_state_clear(state);
		drm_modeset_backoff(&ctx);
		goto retry;
	}

	drm_atomic_state_put(state);

unlock:
	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);

	return 0;
}

static const struct drm_crtc_funcs crtc_funcs_gen2 = {
	.reset = rcar_du_crtc_reset,
	.destroy = drm_crtc_cleanup,
	.set_config = drm_atomic_helper_set_config,
	.page_flip = drm_atomic_helper_page_flip,
	.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
	.atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
	.enable_vblank = rcar_du_crtc_enable_vblank,
	.disable_vblank = rcar_du_crtc_disable_vblank,
};

static const struct drm_crtc_funcs crtc_funcs_gen3 = {
	.reset = rcar_du_crtc_reset,
982
	.destroy = rcar_du_crtc_cleanup,
983
	.set_config = drm_atomic_helper_set_config,
984
	.page_flip = drm_atomic_helper_page_flip,
985 986
	.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
	.atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
987 988
	.enable_vblank = rcar_du_crtc_enable_vblank,
	.disable_vblank = rcar_du_crtc_disable_vblank,
989
	.set_crc_source = rcar_du_crtc_set_crc_source,
990
	.verify_crc_source = rcar_du_crtc_verify_crc_source,
991
	.get_crc_sources = rcar_du_crtc_get_crc_sources,
992 993
};

994 995 996 997 998 999 1000
/* -----------------------------------------------------------------------------
 * Interrupt Handling
 */

static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
	struct rcar_du_crtc *rcrtc = arg;
1001
	struct rcar_du_device *rcdu = rcrtc->group->dev;
1002 1003 1004
	irqreturn_t ret = IRQ_NONE;
	u32 status;

1005 1006
	spin_lock(&rcrtc->vblank_lock);

1007 1008 1009
	status = rcar_du_crtc_read(rcrtc, DSSR);
	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);

1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	if (status & DSSR_VBK) {
		/*
		 * Wake up the vblank wait if the counter reaches 0. This must
		 * be protected by the vblank_lock to avoid races in
		 * rcar_du_crtc_disable_planes().
		 */
		if (rcrtc->vblank_count) {
			if (--rcrtc->vblank_count == 0)
				wake_up(&rcrtc->vblank_wait);
		}
	}

	spin_unlock(&rcrtc->vblank_lock);

1024
	if (status & DSSR_VBK) {
1025 1026
		if (rcdu->info->gen < 3) {
			drm_crtc_handle_vblank(&rcrtc->crtc);
1027
			rcar_du_crtc_finish_page_flip(rcrtc);
1028
		}
1029

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
		ret = IRQ_HANDLED;
	}

	return ret;
}

/* -----------------------------------------------------------------------------
 * Initialization
 */

1040 1041
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
			unsigned int hwindex)
1042
{
1043
	static const unsigned int mmio_offsets[] = {
1044
		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
1045 1046
	};

1047
	struct rcar_du_device *rcdu = rgrp->dev;
1048
	struct platform_device *pdev = to_platform_device(rcdu->dev);
1049
	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
1050
	struct drm_crtc *crtc = &rcrtc->crtc;
1051
	struct drm_plane *primary;
1052
	unsigned int irqflags;
1053 1054
	struct clk *clk;
	char clk_name[9];
1055 1056
	char *name;
	int irq;
1057 1058
	int ret;

1059
	/* Get the CRTC clock and the optional external clock. */
1060
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1061
		sprintf(clk_name, "du.%u", hwindex);
1062 1063 1064 1065 1066 1067 1068
		name = clk_name;
	} else {
		name = NULL;
	}

	rcrtc->clock = devm_clk_get(rcdu->dev, name);
	if (IS_ERR(rcrtc->clock)) {
1069
		dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
1070 1071 1072
		return PTR_ERR(rcrtc->clock);
	}

1073
	sprintf(clk_name, "dclkin.%u", hwindex);
1074 1075 1076 1077
	clk = devm_clk_get(rcdu->dev, clk_name);
	if (!IS_ERR(clk)) {
		rcrtc->extclock = clk;
	} else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
1078
		dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
1079 1080 1081
		return -EPROBE_DEFER;
	}

1082
	init_waitqueue_head(&rcrtc->flip_wait);
1083 1084
	init_waitqueue_head(&rcrtc->vblank_wait);
	spin_lock_init(&rcrtc->vblank_lock);
1085

1086
	rcrtc->group = rgrp;
1087 1088
	rcrtc->mmio_offset = mmio_offsets[hwindex];
	rcrtc->index = hwindex;
1089

1090
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
1091
		primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
1092
	else
1093
		primary = &rgrp->planes[swindex % 2].plane;
1094

1095 1096 1097 1098
	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
					rcdu->info->gen <= 2 ?
					&crtc_funcs_gen2 : &crtc_funcs_gen3,
					NULL);
1099 1100 1101 1102 1103
	if (ret < 0)
		return ret;

	drm_crtc_helper_add(crtc, &crtc_helper_funcs);

1104 1105 1106
	/* Start with vertical blanking interrupt reporting disabled. */
	drm_crtc_vblank_off(crtc);

1107 1108
	/* Register the interrupt handler. */
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1109 1110
		/* The IRQ's are associated with the CRTC (sw)index. */
		irq = platform_get_irq(pdev, swindex);
1111 1112 1113 1114 1115 1116 1117
		irqflags = 0;
	} else {
		irq = platform_get_irq(pdev, 0);
		irqflags = IRQF_SHARED;
	}

	if (irq < 0) {
1118
		dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
1119
		return irq;
1120 1121 1122 1123 1124 1125
	}

	ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
			       dev_name(rcdu->dev), rcrtc);
	if (ret < 0) {
		dev_err(rcdu->dev,
1126
			"failed to register IRQ for CRTC %u\n", swindex);