diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 071d4fbd3efc2a6f5f9392ce6589038ec6c782ac..335148f1cd39c6c720dd81f12b25a5e9a60d43e9 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -309,8 +309,8 @@ static int gt_fw_domain_init(struct xe_gt *gt) /* XXX: Fake that we pull the engine mask from hwconfig blob */ gt->info.engine_mask = gt->info.__engine_mask; - /* Enables per hw engine IRQs */ - xe_gt_irq_postinstall(gt_to_tile(gt)); + /* Enable per hw engine IRQs */ + xe_irq_enable_hwe(gt); /* Rerun MCR init as we now have hw engine list */ xe_gt_mcr_init(gt); diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index b12f65a2bab3e570a5f93df92f7df5e85363f75a..b42a0cb50159a7c51667ea13832c9ceb8126a668 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -17,6 +17,7 @@ #include "xe_gt.h" #include "xe_gt_topology.h" #include "xe_hw_fence.h" +#include "xe_irq.h" #include "xe_lrc.h" #include "xe_macros.h" #include "xe_mmio.h" diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 85bb9bd6b6be9b9026f50764a26e174a6e2d4dd0..b4ed1e4a3388c0bea11ce38f3545b3ecc90b26b2 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -122,13 +122,12 @@ static inline void xelp_intr_enable(struct xe_device *xe, bool stall) xe_mmio_read32(mmio, GFX_MSTR_IRQ); } -void xe_gt_irq_postinstall(struct xe_tile *tile) +/* Enable/unmask the HWE interrupts for a specific GT's engines. */ +void xe_irq_enable_hwe(struct xe_gt *gt) { - struct xe_device *xe = tile_to_xe(tile); - struct xe_gt *mmio = tile->primary_gt; + struct xe_device *xe = gt_to_xe(gt); + u32 ccs_mask, bcs_mask; u32 irqs, dmask, smask; - u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, XE_ENGINE_CLASS_COMPUTE); - u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, XE_ENGINE_CLASS_COPY); if (xe_device_guc_submission_enabled(xe)) { irqs = GT_RENDER_USER_INTERRUPT | @@ -140,45 +139,44 @@ void xe_gt_irq_postinstall(struct xe_tile *tile) GT_WAIT_SEMAPHORE_INTERRUPT; } + ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE); + bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY); + dmask = irqs << 16 | irqs; smask = irqs << 16; - /* Enable RCS, BCS, VCS and VECS class interrupts. */ - xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); - xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask); - if (ccs_mask) - xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask); - - /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ - xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask); - xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); - if (bcs_mask & (BIT(1)|BIT(2))) - xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); - if (bcs_mask & (BIT(3)|BIT(4))) - xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); - if (bcs_mask & (BIT(5)|BIT(6))) - xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); - if (bcs_mask & (BIT(7)|BIT(8))) - xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); - xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask); - xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask); - xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask); - if (ccs_mask & (BIT(0)|BIT(1))) - xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); - if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask); + if (!xe_gt_is_media_type(gt)) { + /* Enable interrupts for each engine class */ + xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask); + if (ccs_mask) + xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask); + + /* Unmask interrupts for each engine instance */ + xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask); + xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask); + if (bcs_mask & (BIT(1)|BIT(2))) + xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); + if (bcs_mask & (BIT(3)|BIT(4))) + xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); + if (bcs_mask & (BIT(5)|BIT(6))) + xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); + if (bcs_mask & (BIT(7)|BIT(8))) + xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); + if (ccs_mask & (BIT(0)|BIT(1))) + xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask); + if (ccs_mask & (BIT(2)|BIT(3))) + xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); + } - /* - * RPS interrupts will get enabled/disabled on demand when RPS itself - * is enabled/disabled. - */ - /* TODO: gt->pm_ier, gt->pm_imr */ - xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0); - xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0); + if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { + /* Enable interrupts for each engine class */ + xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask); - /* Same thing for GuC interrupts */ - xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0); - xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0); + /* Unmask interrupts for each engine instance */ + xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask); + xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask); + xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask); + } } static u32 @@ -497,12 +495,6 @@ static void xe_irq_reset(struct xe_device *xe) static void xe_irq_postinstall(struct xe_device *xe) { - struct xe_tile *tile; - u8 id; - - for_each_tile(tile, xe, id) - xe_gt_irq_postinstall(tile); - xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); /* @@ -591,9 +583,16 @@ void xe_irq_suspend(struct xe_device *xe) void xe_irq_resume(struct xe_device *xe) { + struct xe_gt *gt; + int id; + spin_lock_irq(&xe->irq.lock); xe->irq.enabled = true; xe_irq_reset(xe); xe_irq_postinstall(xe); + + for_each_gt(gt, xe, id) + xe_irq_enable_hwe(gt); + spin_unlock_irq(&xe->irq.lock); } diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h index 69113c21e1cdaa2abd2d6df5409ffa6452697f6c..bc42bc90d96766cf042e11adc072facb7e982bc8 100644 --- a/drivers/gpu/drm/xe/xe_irq.h +++ b/drivers/gpu/drm/xe/xe_irq.h @@ -8,11 +8,12 @@ struct xe_device; struct xe_tile; +struct xe_gt; int xe_irq_install(struct xe_device *xe); -void xe_gt_irq_postinstall(struct xe_tile *tile); void xe_irq_shutdown(struct xe_device *xe); void xe_irq_suspend(struct xe_device *xe); void xe_irq_resume(struct xe_device *xe); +void xe_irq_enable_hwe(struct xe_gt *gt); #endif