Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • agd5f/linux
  • ltuikov/linux
  • FireBurn/linux
  • u6/linux
  • nikitalita/linux
  • siqueira/linux
  • magali/linux
  • amos/linux-display
  • isinyaaa/linux
  • somalapuram/linux
  • nirmoy/linux
  • hwentland/linux
  • hakzsam/linux
  • siqueira/linux-kunit
  • xushuotao/linux
  • lixian/linux
  • asheplyakov/linux
  • ap6711451/linux
  • alonsopascacioflores/linux
  • pyuan/linux
  • pepp/linux
  • alex.hung/linux
  • ckborah/linux-colorpipeline
  • lucmann/linux
  • MSkeffington/amdgfx-linux-fork
25 results
Show changes
Commits on Source (24)
Showing
with 191 additions and 182 deletions
......@@ -455,10 +455,10 @@ static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
return umin(rec_len, chunk);
}
void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
void *src, int count)
void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
{
u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
int rec_cnt_dw = count >> 2;
u32 chunk, ent_sz;
u8 *s = (u8 *)src;
......@@ -485,6 +485,9 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
s += chunk;
}
if (ring->count_dw < rec_cnt_dw)
ring->count_dw = 0;
/* the buffer is overflow, adjust rptr */
if (((wptr_old < rptr) && (rptr <= ring->wptr)) ||
((ring->wptr < wptr_old) && (wptr_old < rptr)) ||
......@@ -501,12 +504,10 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
pos = rptr;
} while (!amdgpu_cper_is_hdr(ring, rptr));
}
mutex_unlock(&ring->adev->cper.ring_lock);
if (ring->count_dw >= (count >> 2))
ring->count_dw -= (count >> 2);
else
ring->count_dw = 0;
if (ring->count_dw >= rec_cnt_dw)
ring->count_dw -= rec_cnt_dw;
mutex_unlock(&ring->adev->cper.ring_lock);
}
static u64 amdgpu_cper_ring_get_rptr(struct amdgpu_ring *ring)
......
......@@ -4395,10 +4395,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
/* Get rid of things like offb */
r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
if (r)
return r;
/*
* No need to remove conflicting FBs for non-display class devices.
* This prevents the sysfb from being freed accidently.
*/
if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
(pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
/* Get rid of things like offb */
r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
if (r)
return r;
}
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
......@@ -4796,6 +4803,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->fru_info);
adev->fru_info = NULL;
kfree(adev->xcp_mgr);
adev->xcp_mgr = NULL;
px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!dev_is_removable(&adev->pdev->dev) &&
......
......@@ -1290,6 +1290,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
uint16_t die_offset;
uint16_t ip_offset;
uint16_t num_dies;
uint32_t wafl_ver;
uint16_t num_ips;
uint16_t hw_id;
uint8_t inst;
......@@ -1303,6 +1304,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r;
}
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
......@@ -1403,6 +1405,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->gfx.xcc_mask |=
(1U << ip->instance_number);
if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
ip->revision, 0, 0);
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
......@@ -1468,6 +1474,9 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
}
if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
return 0;
}
......@@ -2772,10 +2781,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
}
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
/* set NBIO version */
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 1, 0):
......
......@@ -178,6 +178,7 @@ uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
bool enforce_isolation;
int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
* migration and restoration of backing memory when handling
......@@ -1039,6 +1040,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
module_param(enforce_isolation, bool, 0444);
MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
/**
* DOC: modeset (int)
* Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
*/
MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
module_param_named(modeset, amdgpu_modeset, int, 0444);
/**
* DOC: seamless (int)
* Seamless boot will keep the image on the screen during the boot process.
......@@ -2259,6 +2267,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
int ret, retry = 0, i;
bool supports_atomic = false;
if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
(pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
return -EINVAL;
}
/* skip devices which are owned by radeon */
for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
if (amdgpu_unsupported_pciidlist[i] == pdev->device)
......@@ -3011,9 +3025,6 @@ static int __init amdgpu_init(void)
{
int r;
if (drm_firmware_drivers_only())
return -EINVAL;
r = amdgpu_sync_init();
if (r)
goto error_sync;
......
......@@ -57,8 +57,8 @@ enum amdgpu_gfx_pipe_priority {
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
/* 1 second timeout */
#define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* 10 millisecond timeout */
#define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(10)
enum amdgpu_gfx_partition {
AMDGPU_SPX_PARTITION_MODE = 0,
......
......@@ -3473,6 +3473,13 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
adev, control->bad_channel_bitmap);
con->update_channel_flag = false;
}
/* The format action is only applied to new ASICs */
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
control->tbl_hdr.version < RAS_TABLE_VER_V3)
if (!amdgpu_ras_eeprom_reset_table(control))
if (amdgpu_ras_save_bad_pages(adev, NULL))
dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
return ret;
......
......@@ -413,9 +413,11 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(8, 10, 0):
case IP_VERSION(12, 0, 0):
hdr->version = RAS_TABLE_VER_V2_1;
return;
case IP_VERSION(12, 0, 0):
hdr->version = RAS_TABLE_VER_V3;
return;
default:
hdr->version = RAS_TABLE_VER_V1;
return;
......@@ -443,7 +445,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
hdr->header = RAS_TABLE_HDR_VAL;
amdgpu_ras_set_eeprom_table_version(control);
if (hdr->version == RAS_TABLE_VER_V2_1) {
if (hdr->version >= RAS_TABLE_VER_V2_1) {
hdr->first_rec_offset = RAS_RECORD_START_V2_1;
hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE;
......@@ -461,7 +463,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
}
csum = __calc_hdr_byte_sum(control);
if (hdr->version == RAS_TABLE_VER_V2_1)
if (hdr->version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
csum = -csum;
hdr->checksum = csum;
......@@ -757,7 +759,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
control->tbl_rai.health_percent = 0;
}
......@@ -770,7 +772,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
......@@ -810,7 +812,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
* now calculate gpu health percent
*/
if (amdgpu_bad_page_threshold != 0 &&
control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
control->ras_num_bad_pages) * 100) /
......@@ -823,7 +825,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
csum += *pp;
csum += __calc_hdr_byte_sum(control);
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
/* avoid sign extension when assigning to "checksum" */
csum = -csum;
......@@ -1040,7 +1042,7 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
/* get available eeprom table version first before eeprom table init */
amdgpu_ras_set_eeprom_table_version(control);
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
return RAS_MAX_RECORD_COUNT_V2_1;
else
return RAS_MAX_RECORD_COUNT;
......@@ -1285,7 +1287,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
int buf_size, res;
u8 csum, *buf, *pp;
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
buf_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
......@@ -1388,7 +1390,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
__decode_table_header_from_buf(hdr, buf);
if (hdr->version == RAS_TABLE_VER_V2_1) {
if (hdr->version >= RAS_TABLE_VER_V2_1) {
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
......@@ -1428,7 +1430,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->ras_num_bad_pages);
if (hdr->version == RAS_TABLE_VER_V2_1) {
if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
......@@ -1448,7 +1450,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
if (hdr->version == RAS_TABLE_VER_V2_1) {
if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
......
......@@ -28,6 +28,7 @@
#define RAS_TABLE_VER_V1 0x00010000
#define RAS_TABLE_VER_V2_1 0x00021000
#define RAS_TABLE_VER_V3 0x00030000
struct amdgpu_device;
......
......@@ -609,7 +609,7 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, b
* if they were stopped by this function. This allows new tasks
* to be submitted to the queues after the reset is complete.
*/
if (ret) {
if (!ret) {
if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
drm_sched_wqueue_start(&gfx_ring->sched);
}
......
......@@ -1749,7 +1749,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
drm_info(dev, "aux_hpd_discon_quirk attached\n");
drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
......
......@@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
......@@ -202,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
dcn315_disable_otg_wa(clk_mgr_base, context, true);
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
(new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
int requested_dispclk_khz = new_clocks->dispclk_khz;
dcn315_disable_otg_wa(clk_mgr_base, context, true);
/* Clamp the requested clock to PMFW based on their limit. */
if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
requested_dispclk_khz = dc->debug.min_disp_clk_khz;
dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
dcn315_disable_otg_wa(clk_mgr_base, context, false);
dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
......
......@@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
......@@ -209,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
(new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
int requested_dispclk_khz = new_clocks->dispclk_khz;
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
/* Clamp the requested clock to PMFW based on their limit. */
if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
requested_dispclk_khz = dc->debug.min_disp_clk_khz;
dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
......
......@@ -204,6 +204,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
struct link_encoder *new_pipe_link_enc = new_pipe->link_res.dio_link_enc;
struct link_encoder *pipe_link_enc = pipe->link_res.dio_link_enc;
bool stream_changed_otg_dig_on = false;
bool has_active_hpo = false;
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
......@@ -225,20 +227,15 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
bool has_active_hpo = false;
if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) {
has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) &&
dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
}
if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
(pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
!pipe_link_enc) && !stream_changed_otg_dig_on)) {
}
if (!has_active_hpo && !stream_changed_otg_dig_on && pipe->stream &&
(pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || !pipe_link_enc) &&
!dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe)) {
/* This w/a should not trigger when we have a dig active */
if (disable) {
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
......
......@@ -370,15 +370,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
return link->dc->link_srv->dp_should_enable_fec(link);
}
int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw)
{
return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
}
void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
{
link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result);
link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
}
bool dc_link_check_link_loss_status(
......
......@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.324"
#define DC_VER "3.2.325"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
......@@ -2353,19 +2353,6 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
*/
void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
/*
* Handle function for when the status of the Request above is complete.
* We will find out the result of allocating on CM and update structs.
*
* @link: pointer to the dc_link struct instance
* @bw: Allocated or Estimated BW depending on the result
* @result: Response type
*
* return: none
*/
void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
uint8_t bw, uint8_t result);
/*
* Handle the USB4 BW Allocation related functionality here:
* Plug => Try to allocate max bw from timing parameters supported by the sink
......@@ -2374,9 +2361,8 @@ void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
* return: allocated bw else return 0
*/
int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
......
......@@ -70,28 +70,20 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub;
struct dc_context *dc_ctx;
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
do {
status = dmub_srv_wait_for_pending(dmub, 100000);
status = dmub_srv_wait_for_idle(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
......@@ -134,49 +126,7 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status = DMUB_STATUS_OK;
int i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
/* confirm no messages pending */
do {
status = dmub_srv_wait_for_idle(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* queue command */
if (status == DMUB_STATUS_OK)
status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
/* check for errors */
if (status != DMUB_STATUS_OK) {
break;
}
}
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
return false;
}
return true;
}
static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
......@@ -193,16 +143,11 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_sr
for (i = 0 ; i < count; i++) {
// Queue command
if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
} else {
status = DMUB_STATUS_QUEUE_FULL;
}
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
status = dmub_srv_fb_cmd_execute(dmub);
status = dmub_srv_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
......@@ -211,7 +156,7 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_sr
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
......@@ -223,7 +168,7 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_sr
}
}
status = dmub_srv_fb_cmd_execute(dmub);
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
......@@ -235,23 +180,6 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_sr
return true;
}
bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
bool res = false;
if (dc_dmub_srv && dc_dmub_srv->dmub) {
if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
} else {
res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
}
}
return res;
}
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
......@@ -274,8 +202,7 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
if (!dmub->debug.timeout_info.timeout_occured) {
dmub->debug.timeout_info.timeout_occured = true;
if (cmd_list)
dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
......@@ -283,9 +210,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
}
// Copy data back from ring buffer into command
if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
}
if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
}
return true;
......@@ -298,10 +224,74 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
int i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
// Queue command
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
status = dmub_srv_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
status = dmub_srv_wait_for_idle(dmub, 100000);
if (status != DMUB_STATUS_OK)
return false;
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error queueing DMUB command: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
return false;
}
// Wait for DMUB to process command
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
do {
status = dmub_srv_wait_for_idle(dmub, 100000);
} while (status != DMUB_STATUS_OK);
} else
status = dmub_srv_wait_for_idle(dmub, 100000);
return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
// Copy data back from ring buffer into command
if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
}
return true;
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
......@@ -1253,7 +1243,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
memset(&new_signals, 0, sizeof(new_signals));
......@@ -1410,7 +1400,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
......@@ -1664,8 +1654,7 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
global_cmd->header.payload_bytes =
sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
......@@ -1684,13 +1673,11 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
stream_base_cmd->header.payload_bytes =
sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
stream_sub_state_cmd->header.payload_bytes =
sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
......@@ -1736,8 +1723,7 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
cmd.fams2_drr_update.header.payload_bytes =
sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
......@@ -1773,8 +1759,7 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
cmds[num_cmds].fams2_flip.header.payload_bytes =
sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
......
......@@ -58,7 +58,7 @@ struct dc_dmub_srv {
bool needs_idle_wake;
};
bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
......
......@@ -682,7 +682,7 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
dc_dmub_srv_wait_idle(ctx->dmub_srv);
}
}
......
......@@ -1224,7 +1224,6 @@ struct dc_dpia_bw_alloc {
int bw_granularity; // BW Granularity
int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
......
......@@ -240,8 +240,7 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
cmd.abm_save_restore.header.payload_bytes =
sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
......