forked from luck/tmp_suning_uos_patched
drm i915, amdgpu, arm display, atomic update fixes + nouveau firmware loading fix
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJc+g/nAAoJEAx081l5xIa+fXkP/RJYtIEJsWdeD0BmM2yceG3Z R2gMrtI9FAsIVFDEAL2phGJy1dXtmi/q/OROwkAekPSxtdtR0ZyELqn3Qn6UjXbv hib+pAlEW0Ma3KDEAUXsMwJnE6d0JBgPYQmz8KGIqGQTjzNz5CTc/eyVs5iLMjk2 QCTUtdoFaNUtaCTyfEPNG20lzOOFLuEfjms0116nKln0iVHoTzoNF44EUgA8vIWH XW/oktopMGTQ4AO6S5rtKklCsIMSkW9Wtq3DQuhPfgyCYqo61r02TPYOJDeX0cD+ 31K2+MFHdDxTYj5TCytOJsGKArwgaE6rVsAz/Zt1MIznCHQYAH+i3IHNGvBu8krg gcfOWKk+DWcL7sVkO2Oy8HVDq8i9+xWDRwe9fmZvyZ/HtzJkJFtTJWVyTR8HMkPN vENsLeYLG3NdND8DRJkfCHVQV5JQrfGPGNLuPfLstRdaP9KQOCqU/xpLLZY4m+N2 0ZUwzDnH8jZmQcsv+fDi4s0KrseXGWw9F4TR9/15ITEr0yPWesGrlPd2F68MAtFr FmAnogHorTzM055II0fkX15/itokKkz1W1IJrpvBH4s9Upe0agJ/oe2hzvjDSSRE or/EHkvQOiqC8gl/LzbBp55p05E6rCNiMWAwW4z8WrazrAKU0qmaw1GsnfSDPzKn eUy7pVQJfucpwyGfKr9b =elzi -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-06-07-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "A small bit more lively this week but not majorly so. I'm away in Japan next week for family holiday, so I'll be pretty disconnected, I've asked Daniel to do fixes for the week while I'm out. The nouveau firmware changes are a bit large, but they address a big problem where a whole set of boards don't load with the driver, and the new firmware fixes that, so I think it's worth trying to land it now. core: - Allow fb changes in async commits (drivers as well) udmabuf: - Unmap scatterlist when unmapping udmabuf nouveau: - firmware loading fixes for secboot firmware on new GPU revision. komeda: - oops, dma mapping and warning fixes arm-hdlcd: - clock fixes - mode validation fix i915: - Add a missing Icelake workaround - GVT - DMA map fault fix and enforcement fixes amdgpu: - DCE resume fix - New raven variation updates" * tag 'drm-fixes-2019-06-07-1' of git://anongit.freedesktop.org/drm/drm: (33 commits) drm/nouveau/secboot/gp10[2467]: support newer FW to fix SEC2 failures on some boards drm/nouveau/secboot: enable loading of versioned LS PMU/SEC2 ACR msgqueue FW drm/nouveau/secboot: split out FW version-specific LS function pointers drm/nouveau/secboot: pass max supported FW version to LS load funcs drm/nouveau/core: support versioned firmware loading drm/nouveau/core: pass subdev into nvkm_firmware_get, rather than device drm/komeda: Potential error pointer dereference drm/komeda: remove set but not used variable 'kcrtc' drm/amd/amdgpu: add RLC firmware to support raven1 refresh drm/amd/powerplay: add set_power_profile_mode for raven1_refresh drm/amdgpu: fix ring test failure issue during s3 in vce 3.0 (V2) udmabuf: actually unmap the scatterlist drm/arm/hdlcd: Allow a bit of clock tolerance drm/arm/hdlcd: Actually validate CRTC modes drm/arm/mali-dp: Add a loop around the second set CVAL and try 5 times drm/komeda: fixing of DMA mapping sg segment warning drm: don't block fb changes for async plane updates drm/vc4: fix fb references in async update drm/msm: fix fb references in async update drm/amd: fix fb references in async update ...
This commit is contained in:
commit
79c3ba3206
|
@ -77,6 +77,7 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
|
|||
struct sg_table *sg,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
|
||||
sg_free_table(sg);
|
||||
kfree(sg);
|
||||
}
|
||||
|
|
|
@ -1589,6 +1589,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
|
|||
{
|
||||
int r = 0;
|
||||
int i;
|
||||
uint32_t smu_version;
|
||||
|
||||
if (adev->asic_type >= CHIP_VEGA10) {
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
|
@ -1614,16 +1615,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
}
|
||||
r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
|
||||
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
|
||||
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
|
||||
if (r) {
|
||||
pr_err("firmware loading failed\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2490,6 +2490,21 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
|||
|
||||
}
|
||||
|
||||
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
|
||||
{
|
||||
int r = -EINVAL;
|
||||
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
|
||||
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
|
||||
if (r) {
|
||||
pr_err("smu firmware loading failed\n");
|
||||
return r;
|
||||
}
|
||||
*smu_version = adev->pm.fw_version;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
|
|
|
@ -34,6 +34,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
|
|||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
|
||||
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
|
||||
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
|
||||
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
|
||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
|
||||
|
|
|
@ -1072,7 +1072,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
|||
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||
uint32_t rptr;
|
||||
unsigned i;
|
||||
int r, timeout = adev->usec_timeout;
|
||||
|
||||
|
@ -1084,6 +1084,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
rptr = amdgpu_ring_get_rptr(ring);
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "soc15.h"
|
||||
#include "soc15d.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "amdgpu_pm.h"
|
||||
|
||||
#include "gc/gc_9_0_offset.h"
|
||||
#include "gc/gc_9_0_sh_mask.h"
|
||||
|
@ -96,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_me.bin");
|
|||
MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
|
||||
{
|
||||
|
@ -588,7 +590,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
|||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
||||
break;
|
||||
if ((adev->gfx.rlc_fw_version < 531) ||
|
||||
if ((adev->gfx.rlc_fw_version != 106 &&
|
||||
adev->gfx.rlc_fw_version < 531) ||
|
||||
(adev->gfx.rlc_fw_version == 53815) ||
|
||||
(adev->gfx.rlc_feature_version < 1) ||
|
||||
!adev->gfx.rlc.is_rlc_v2_1)
|
||||
|
@ -612,6 +615,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
|||
unsigned int i = 0;
|
||||
uint16_t version_major;
|
||||
uint16_t version_minor;
|
||||
uint32_t smu_version;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
|
@ -682,6 +686,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
|||
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
|
||||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
|
||||
else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
|
||||
(smu_version >= 0x41e2b))
|
||||
/**
|
||||
*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
|
||||
*/
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
|
||||
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
|
||||
|
|
|
@ -4232,8 +4232,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
|
|||
struct drm_plane_state *old_state =
|
||||
drm_atomic_get_old_plane_state(new_state->state, plane);
|
||||
|
||||
if (plane->state->fb != new_state->fb)
|
||||
drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
|
||||
swap(plane->state->fb, new_state->fb);
|
||||
|
||||
plane->state->src_x = new_state->src_x;
|
||||
plane->state->src_y = new_state->src_y;
|
||||
|
|
|
@ -92,6 +92,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
|||
hwmgr_set_user_specify_caps(hwmgr);
|
||||
hwmgr->fan_ctrl_is_in_default_mode = true;
|
||||
hwmgr_init_workload_prority(hwmgr);
|
||||
hwmgr->gfxoff_state_changed_by_workload = false;
|
||||
|
||||
switch (hwmgr->chip_family) {
|
||||
case AMDGPU_FAMILY_CI:
|
||||
|
|
|
@ -1258,21 +1258,46 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
return size;
|
||||
}
|
||||
|
||||
static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
if ((adev->asic_type == CHIP_RAVEN) &&
|
||||
(adev->rev_id != 0x15d8) &&
|
||||
(hwmgr->smu_version >= 0x41e2b))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
|
||||
{
|
||||
int workload_type = 0;
|
||||
int result = 0;
|
||||
|
||||
if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
|
||||
pr_err("Invalid power profile mode %ld\n", input[size]);
|
||||
return -EINVAL;
|
||||
}
|
||||
hwmgr->power_profile_mode = input[size];
|
||||
if (hwmgr->power_profile_mode == input[size])
|
||||
return 0;
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type =
|
||||
conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
|
||||
conv_power_profile_to_pplib_workload(input[size]);
|
||||
if (workload_type &&
|
||||
smu10_is_raven1_refresh(hwmgr) &&
|
||||
!hwmgr->gfxoff_state_changed_by_workload) {
|
||||
smu10_gfx_off_control(hwmgr, false);
|
||||
hwmgr->gfxoff_state_changed_by_workload = true;
|
||||
}
|
||||
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
|
||||
1 << workload_type);
|
||||
if (!result)
|
||||
hwmgr->power_profile_mode = input[size];
|
||||
if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
|
||||
smu10_gfx_off_control(hwmgr, true);
|
||||
hwmgr->gfxoff_state_changed_by_workload = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -782,6 +782,7 @@ struct pp_hwmgr {
|
|||
uint32_t workload_mask;
|
||||
uint32_t workload_prority[Workload_Policy_Max];
|
||||
uint32_t workload_setting[Workload_Policy_Max];
|
||||
bool gfxoff_state_changed_by_workload;
|
||||
};
|
||||
|
||||
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
|
||||
|
|
|
@ -245,7 +245,7 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
|
|||
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
|
||||
}
|
||||
|
||||
static struct komeda_component_funcs d71_layer_funcs = {
|
||||
static const struct komeda_component_funcs d71_layer_funcs = {
|
||||
.update = d71_layer_update,
|
||||
.disable = d71_layer_disable,
|
||||
.dump_register = d71_layer_dump,
|
||||
|
@ -391,7 +391,7 @@ static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
|
|||
seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
|
||||
}
|
||||
|
||||
static struct komeda_component_funcs d71_compiz_funcs = {
|
||||
static const struct komeda_component_funcs d71_compiz_funcs = {
|
||||
.update = d71_compiz_update,
|
||||
.disable = d71_component_disable,
|
||||
.dump_register = d71_compiz_dump,
|
||||
|
@ -467,7 +467,7 @@ static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
|
|||
seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
|
||||
}
|
||||
|
||||
static struct komeda_component_funcs d71_improc_funcs = {
|
||||
static const struct komeda_component_funcs d71_improc_funcs = {
|
||||
.update = d71_improc_update,
|
||||
.disable = d71_component_disable,
|
||||
.dump_register = d71_improc_dump,
|
||||
|
@ -580,7 +580,7 @@ static void d71_timing_ctrlr_dump(struct komeda_component *c,
|
|||
seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
|
||||
}
|
||||
|
||||
static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
|
||||
static const struct komeda_component_funcs d71_timing_ctrlr_funcs = {
|
||||
.update = d71_timing_ctrlr_update,
|
||||
.disable = d71_timing_ctrlr_disable,
|
||||
.dump_register = d71_timing_ctrlr_dump,
|
||||
|
|
|
@ -502,7 +502,7 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
|
|||
table->n_formats = ARRAY_SIZE(d71_format_caps_table);
|
||||
}
|
||||
|
||||
static struct komeda_dev_funcs d71_chip_funcs = {
|
||||
static const struct komeda_dev_funcs d71_chip_funcs = {
|
||||
.init_format_table = d71_init_fmt_tbl,
|
||||
.enum_resources = d71_enum_resources,
|
||||
.cleanup = d71_cleanup,
|
||||
|
@ -514,7 +514,7 @@ static struct komeda_dev_funcs d71_chip_funcs = {
|
|||
.flush = d71_flush,
|
||||
};
|
||||
|
||||
struct komeda_dev_funcs *
|
||||
const struct komeda_dev_funcs *
|
||||
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
|
||||
{
|
||||
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
|
||||
|
|
|
@ -350,7 +350,7 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
|
||||
static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
|
||||
.atomic_check = komeda_crtc_atomic_check,
|
||||
.atomic_flush = komeda_crtc_atomic_flush,
|
||||
.atomic_enable = komeda_crtc_atomic_enable,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
@ -249,6 +250,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
|
|||
goto err_cleanup;
|
||||
}
|
||||
|
||||
dev->dma_parms = &mdev->dma_parms;
|
||||
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
|
||||
|
||||
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
|
||||
if (err) {
|
||||
DRM_ERROR("create sysfs group failed.\n");
|
||||
|
@ -269,7 +273,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
|
|||
void komeda_dev_destroy(struct komeda_dev *mdev)
|
||||
{
|
||||
struct device *dev = mdev->dev;
|
||||
struct komeda_dev_funcs *funcs = mdev->funcs;
|
||||
const struct komeda_dev_funcs *funcs = mdev->funcs;
|
||||
int i;
|
||||
|
||||
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
|
||||
|
|
|
@ -60,7 +60,7 @@ struct komeda_chip_info {
|
|||
|
||||
struct komeda_product_data {
|
||||
u32 product_id;
|
||||
struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
|
||||
const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
|
||||
struct komeda_chip_info *info);
|
||||
};
|
||||
|
||||
|
@ -149,6 +149,8 @@ struct komeda_dev {
|
|||
struct device *dev;
|
||||
/** @reg_base: the base address of komeda io space */
|
||||
u32 __iomem *reg_base;
|
||||
/** @dma_parms: the dma parameters of komeda */
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
||||
/** @chip: the basic chip information */
|
||||
struct komeda_chip_info chip;
|
||||
|
@ -173,7 +175,7 @@ struct komeda_dev {
|
|||
struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
|
||||
|
||||
/** @funcs: chip funcs to access to HW */
|
||||
struct komeda_dev_funcs *funcs;
|
||||
const struct komeda_dev_funcs *funcs;
|
||||
/**
|
||||
* @chip_data:
|
||||
*
|
||||
|
@ -192,7 +194,7 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
|
|||
return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
|
||||
}
|
||||
|
||||
struct komeda_dev_funcs *
|
||||
const struct komeda_dev_funcs *
|
||||
d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
|
||||
|
||||
struct komeda_dev *komeda_dev_create(struct device *dev);
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
|
||||
struct komeda_pipeline *
|
||||
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
|
||||
struct komeda_pipeline_funcs *funcs)
|
||||
const struct komeda_pipeline_funcs *funcs)
|
||||
{
|
||||
struct komeda_pipeline *pipe;
|
||||
|
||||
|
@ -130,7 +130,7 @@ komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
|
|||
struct komeda_component *
|
||||
komeda_component_add(struct komeda_pipeline *pipe,
|
||||
size_t comp_sz, u32 id, u32 hw_id,
|
||||
struct komeda_component_funcs *funcs,
|
||||
const struct komeda_component_funcs *funcs,
|
||||
u8 max_active_inputs, u32 supported_inputs,
|
||||
u8 max_active_outputs, u32 __iomem *reg,
|
||||
const char *name_fmt, ...)
|
||||
|
|
|
@ -124,7 +124,7 @@ struct komeda_component {
|
|||
/**
|
||||
* @funcs: chip functions to access HW
|
||||
*/
|
||||
struct komeda_component_funcs *funcs;
|
||||
const struct komeda_component_funcs *funcs;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -346,8 +346,8 @@ struct komeda_pipeline {
|
|||
struct komeda_improc *improc;
|
||||
/** @ctrlr: timing controller */
|
||||
struct komeda_timing_ctrlr *ctrlr;
|
||||
/** @funcs: chip pipeline functions */
|
||||
struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
|
||||
/** @funcs: chip private pipeline functions */
|
||||
const struct komeda_pipeline_funcs *funcs;
|
||||
|
||||
/** @of_node: pipeline dt node */
|
||||
struct device_node *of_node;
|
||||
|
@ -397,7 +397,7 @@ struct komeda_pipeline_state {
|
|||
/* pipeline APIs */
|
||||
struct komeda_pipeline *
|
||||
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
|
||||
struct komeda_pipeline_funcs *funcs);
|
||||
const struct komeda_pipeline_funcs *funcs);
|
||||
void komeda_pipeline_destroy(struct komeda_dev *mdev,
|
||||
struct komeda_pipeline *pipe);
|
||||
int komeda_assemble_pipelines(struct komeda_dev *mdev);
|
||||
|
@ -411,7 +411,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
|
|||
struct komeda_component *
|
||||
komeda_component_add(struct komeda_pipeline *pipe,
|
||||
size_t comp_sz, u32 id, u32 hw_id,
|
||||
struct komeda_component_funcs *funcs,
|
||||
const struct komeda_component_funcs *funcs,
|
||||
u8 max_active_inputs, u32 supported_inputs,
|
||||
u8 max_active_outputs, u32 __iomem *reg,
|
||||
const char *name_fmt, ...);
|
||||
|
|
|
@ -55,7 +55,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
|
|||
struct komeda_plane_state *kplane_st = to_kplane_st(state);
|
||||
struct komeda_layer *layer = kplane->layer;
|
||||
struct drm_crtc_state *crtc_st;
|
||||
struct komeda_crtc *kcrtc;
|
||||
struct komeda_crtc_state *kcrtc_st;
|
||||
struct komeda_data_flow_cfg dflow;
|
||||
int err;
|
||||
|
@ -64,7 +63,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
|
|||
return 0;
|
||||
|
||||
crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
|
||||
if (!crtc_st->enable) {
|
||||
if (IS_ERR(crtc_st) || !crtc_st->enable) {
|
||||
DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -73,7 +72,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
|
|||
if (!crtc_st->active)
|
||||
return 0;
|
||||
|
||||
kcrtc = to_kcrtc(state->crtc);
|
||||
kcrtc_st = to_kcrtc_st(crtc_st);
|
||||
|
||||
err = komeda_plane_init_data_flow(state, &dflow);
|
||||
|
|
|
@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
clk_disable_unprepare(hdlcd->clk);
|
||||
}
|
||||
|
||||
static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
|
||||
struct drm_display_mode *mode = &state->adjusted_mode;
|
||||
long rate, clk_rate = mode->clock * 1000;
|
||||
|
||||
rate = clk_round_rate(hdlcd->clk, clk_rate);
|
||||
if (rate != clk_rate) {
|
||||
/* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
|
||||
if (abs(rate - clk_rate) * 1000 > clk_rate) {
|
||||
/* clock required by mode not supported by hardware */
|
||||
return -EINVAL;
|
||||
return MODE_NOCLOCK;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
|
@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
|
||||
.atomic_check = hdlcd_crtc_atomic_check,
|
||||
.mode_valid = hdlcd_crtc_mode_valid,
|
||||
.atomic_begin = hdlcd_crtc_atomic_begin,
|
||||
.atomic_enable = hdlcd_crtc_atomic_enable,
|
||||
.atomic_disable = hdlcd_crtc_atomic_disable,
|
||||
|
|
|
@ -192,6 +192,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
|
|||
{
|
||||
struct drm_device *drm = state->dev;
|
||||
struct malidp_drm *malidp = drm->dev_private;
|
||||
int loop = 5;
|
||||
|
||||
malidp->event = malidp->crtc.state->event;
|
||||
malidp->crtc.state->event = NULL;
|
||||
|
@ -206,8 +207,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
|
|||
drm_crtc_vblank_get(&malidp->crtc);
|
||||
|
||||
/* only set config_valid if the CRTC is enabled */
|
||||
if (malidp_set_and_wait_config_valid(drm) < 0)
|
||||
if (malidp_set_and_wait_config_valid(drm) < 0) {
|
||||
/*
|
||||
* make a loop around the second CVAL setting and
|
||||
* try 5 times before giving up.
|
||||
*/
|
||||
while (loop--) {
|
||||
if (!malidp_set_and_wait_config_valid(drm))
|
||||
break;
|
||||
}
|
||||
DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
|
||||
}
|
||||
|
||||
} else if (malidp->event) {
|
||||
/* CRTC inactive means vblank IRQ is disabled, send event directly */
|
||||
spin_lock_irq(&drm->event_lock);
|
||||
|
|
|
@ -1607,15 +1607,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
|
|||
old_plane_state->crtc != new_plane_state->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* FIXME: Since prepare_fb and cleanup_fb are always called on
|
||||
* the new_plane_state for async updates we need to block framebuffer
|
||||
* changes. This prevents use of a fb that's been cleaned up and
|
||||
* double cleanups from occuring.
|
||||
*/
|
||||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
if (!funcs->atomic_async_update)
|
||||
return -EINVAL;
|
||||
|
@ -1646,6 +1637,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
|
|||
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
|
||||
* the states like normal sync commits, but just do in-place changes on the
|
||||
* current state.
|
||||
*
|
||||
* TODO: Implement full swap instead of doing in-place changes.
|
||||
*/
|
||||
void drm_atomic_helper_async_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
|
@ -1656,6 +1649,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
|
|||
int i;
|
||||
|
||||
for_each_new_plane_in_state(state, plane, plane_state, i) {
|
||||
struct drm_framebuffer *new_fb = plane_state->fb;
|
||||
struct drm_framebuffer *old_fb = plane->state->fb;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
funcs->atomic_async_update(plane, plane_state);
|
||||
|
||||
|
@ -1664,11 +1660,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
|
|||
* plane->state in-place, make sure at least common
|
||||
* properties have been properly updated.
|
||||
*/
|
||||
WARN_ON_ONCE(plane->state->fb != plane_state->fb);
|
||||
WARN_ON_ONCE(plane->state->fb != new_fb);
|
||||
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
|
||||
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
|
||||
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
|
||||
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
|
||||
|
||||
/*
|
||||
* Make sure the FBs have been swapped so that cleanups in the
|
||||
* new_state performs a cleanup in the old FB.
|
||||
*/
|
||||
WARN_ON_ONCE(plane_state->fb != old_fb);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
|
||||
|
|
|
@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
|
|||
0, 12, NULL},
|
||||
|
||||
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
|
||||
0, 20, NULL},
|
||||
0, 12, NULL},
|
||||
};
|
||||
|
||||
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
|
||||
|
|
|
@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
|
|||
*/
|
||||
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
|
||||
{
|
||||
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
|
||||
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
|
||||
gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
|
||||
addr, size);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if (size == 0)
|
||||
return vgpu_gmadr_is_valid(vgpu, addr);
|
||||
|
||||
if (vgpu_gmadr_is_aperture(vgpu, addr) &&
|
||||
vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
|
||||
return true;
|
||||
else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
|
||||
vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
|
||||
return true;
|
||||
|
||||
gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
|
||||
addr, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* translate a guest gmadr to host gmadr */
|
||||
|
@ -942,7 +948,16 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
|
|||
|
||||
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
|
||||
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
cur_pt_type = get_next_pt_type(e->type) + 1;
|
||||
cur_pt_type = get_next_pt_type(e->type);
|
||||
|
||||
if (!gtt_type_is_pt(cur_pt_type) ||
|
||||
!gtt_type_is_pt(cur_pt_type + 1)) {
|
||||
WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cur_pt_type += 1;
|
||||
|
||||
if (ops->get_pfn(e) ==
|
||||
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
|
||||
return 0;
|
||||
|
@ -1102,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
|
|||
|
||||
err_free_spt:
|
||||
ppgtt_free_spt(spt);
|
||||
spt = NULL;
|
||||
err:
|
||||
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||
spt, we->val64, we->type);
|
||||
|
@ -2183,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
|
||||
unsigned long gma, gfn;
|
||||
struct intel_gvt_gtt_entry e, m;
|
||||
struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
||||
struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
|
||||
|
@ -2250,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
|
||||
if (!partial_update && (ops->test_present(&e))) {
|
||||
gfn = ops->get_pfn(&e);
|
||||
m = e;
|
||||
m.val64 = e.val64;
|
||||
m.type = e.type;
|
||||
|
||||
/* one PTE update may be issued in multiple writes and the
|
||||
* first write may not construct a valid gfn
|
||||
|
|
|
@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
|
|||
_MMIO(0x2690),
|
||||
_MMIO(0x2694),
|
||||
_MMIO(0x2698),
|
||||
_MMIO(0x2754),
|
||||
_MMIO(0x28a0),
|
||||
_MMIO(0x4de0),
|
||||
_MMIO(0x4de4),
|
||||
_MMIO(0x4dfc),
|
||||
|
@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
bool enable_execlist;
|
||||
int ret;
|
||||
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
|
||||
if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(1)) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
|
||||
data & _MASKED_BIT_ENABLE(2)) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* when PPGTT mode enabled, we will check if guest has called
|
||||
* pvinfo, if not, we will treat this guest as non-gvtg-aware
|
||||
* guest, and stop emulating its cfg space, mmio, gtt, etc.
|
||||
|
@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data,
|
||||
unsigned int bytes)
|
||||
{
|
||||
u32 data = *(u32 *)p_data;
|
||||
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
|
||||
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
|
||||
f, s, am, rm, d, r, w); \
|
||||
|
@ -1893,7 +1924,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
|
||||
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
|
@ -2997,7 +3029,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
|
||||
MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
|
||||
|
||||
MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
|
||||
MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_D(SKL_DFSM, D_SKL_PLUS);
|
||||
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
|
||||
|
@ -3010,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
|
||||
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
|
||||
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
|
||||
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
|
@ -3030,7 +3062,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
|
||||
MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
|
||||
|
||||
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
|
||||
|
@ -3059,7 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
|
||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
|
||||
MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, csfe_chicken1_mmio_write);
|
||||
#undef CSFE_CHICKEN1_REG
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
|
@ -3239,7 +3274,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
|
||||
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
|
||||
MMIO_D(GEN6_GFXPAUSE, D_BXT);
|
||||
MMIO_D(GEN8_L3SQCREG1, D_BXT);
|
||||
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
|
|
|
@ -102,6 +102,8 @@
|
|||
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
|
||||
#define FORCEWAKE_ACK_HSW_REG 0x130044
|
||||
|
||||
#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1)
|
||||
#define RB_HEAD_WRAP_CNT_OFF 21
|
||||
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
|
||||
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
|
||||
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
|
||||
|
|
|
@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
|||
void *src;
|
||||
unsigned long context_gpa, context_page_num;
|
||||
int i;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
u32 ring_base;
|
||||
u32 head, tail;
|
||||
u16 wrap_count;
|
||||
|
||||
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
|
||||
workload->ctx_desc.lrca);
|
||||
|
||||
head = workload->rb_head;
|
||||
tail = workload->rb_tail;
|
||||
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
|
||||
|
||||
if (tail < head) {
|
||||
if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
|
||||
wrap_count = 0;
|
||||
else
|
||||
wrap_count += 1;
|
||||
}
|
||||
|
||||
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
|
||||
|
||||
ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
|
||||
vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
|
||||
vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
|
||||
|
||||
context_page_num = rq->engine->context_size;
|
||||
context_page_num = context_page_num >> PAGE_SHIFT;
|
||||
|
||||
|
@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
u64 ring_context_gpa;
|
||||
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
|
||||
u32 guest_head;
|
||||
int ret;
|
||||
|
||||
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
|
@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ring_tail.val), &tail, 4);
|
||||
|
||||
guest_head = head;
|
||||
|
||||
head &= RB_HEAD_OFF_MASK;
|
||||
tail &= RB_TAIL_OFF_MASK;
|
||||
|
||||
|
@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
workload->ctx_desc = *desc;
|
||||
workload->ring_context_gpa = ring_context_gpa;
|
||||
workload->rb_head = head;
|
||||
workload->guest_rb_head = guest_head;
|
||||
workload->rb_tail = tail;
|
||||
workload->rb_start = start;
|
||||
workload->rb_ctl = ctl;
|
||||
|
|
|
@ -100,6 +100,7 @@ struct intel_vgpu_workload {
|
|||
struct execlist_ctx_descriptor_format ctx_desc;
|
||||
struct execlist_ring_context *ring_context;
|
||||
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
|
||||
unsigned long guest_rb_head;
|
||||
bool restore_inhibit;
|
||||
struct intel_vgpu_elsp_dwords elsp_dwords;
|
||||
bool emulate_schedule_in;
|
||||
|
|
|
@ -7620,6 +7620,9 @@ enum {
|
|||
#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
|
||||
#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
|
||||
|
||||
#define GEN8_L3CNTLREG _MMIO(0x7034)
|
||||
#define GEN8_ERRDETBCTRL (1 << 9)
|
||||
|
||||
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
|
||||
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
|
||||
|
||||
|
|
|
@ -518,6 +518,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
|
|||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct i915_wa_list *wal = &engine->ctx_wa_list;
|
||||
|
||||
/* WaDisableBankHangMode:icl */
|
||||
wa_write(wal,
|
||||
GEN8_L3CNTLREG,
|
||||
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
|
||||
GEN8_ERRDETBCTRL);
|
||||
|
||||
/* Wa_1604370585:icl (pre-prod)
|
||||
* Formerly known as WaPushConstantDereferenceHoldDisable
|
||||
*/
|
||||
|
|
|
@ -502,6 +502,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
|
|||
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct drm_framebuffer *old_fb = plane->state->fb;
|
||||
|
||||
plane->state->src_x = new_state->src_x;
|
||||
plane->state->src_y = new_state->src_y;
|
||||
plane->state->crtc_x = new_state->crtc_x;
|
||||
|
@ -524,6 +526,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
|
|||
|
||||
*to_mdp5_plane_state(plane->state) =
|
||||
*to_mdp5_plane_state(new_state);
|
||||
|
||||
new_state->fb = old_fb;
|
||||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __NVKM_FIRMWARE_H__
|
||||
#define __NVKM_FIRMWARE_H__
|
||||
#include <core/subdev.h>
|
||||
|
||||
#include <core/device.h>
|
||||
|
||||
int nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
|
||||
const struct firmware **fw);
|
||||
|
||||
void nvkm_firmware_put(const struct firmware *fw);
|
||||
|
||||
int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
|
||||
int min_version, int max_version,
|
||||
const struct firmware **);
|
||||
int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
|
||||
const struct firmware **);
|
||||
void nvkm_firmware_put(const struct firmware *);
|
||||
#endif
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
/**
|
||||
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
|
||||
* @device device that will use that firmware
|
||||
* @subdev subdevice that will use that firmware
|
||||
* @fwname name of firmware file to load
|
||||
* @fw firmware structure to load to
|
||||
*
|
||||
|
@ -32,9 +32,11 @@
|
|||
* Firmware files released by NVIDIA will always follow this format.
|
||||
*/
|
||||
int
|
||||
nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
|
||||
const struct firmware **fw)
|
||||
nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
|
||||
int min_version, int max_version,
|
||||
const struct firmware **fw)
|
||||
{
|
||||
struct nvkm_device *device = subdev->device;
|
||||
char f[64];
|
||||
char cname[16];
|
||||
int i;
|
||||
|
@ -48,8 +50,29 @@ nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
|
|||
cname[i] = tolower(cname[i]);
|
||||
}
|
||||
|
||||
snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
|
||||
return request_firmware(fw, f, device->dev);
|
||||
for (i = max_version; i >= min_version; i--) {
|
||||
if (i != 0)
|
||||
snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
|
||||
else
|
||||
snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
|
||||
|
||||
if (!firmware_request_nowarn(fw, f, device->dev)) {
|
||||
nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
|
||||
return i;
|
||||
}
|
||||
|
||||
nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
|
||||
}
|
||||
|
||||
nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
|
||||
const struct firmware **fw)
|
||||
{
|
||||
return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2115,12 +2115,10 @@ int
|
|||
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
|
||||
struct gf100_gr_fuc *fuc)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_firmware_get(device, fwname, &fw);
|
||||
ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
|
||||
if (ret) {
|
||||
ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
|
||||
if (ret)
|
||||
|
|
|
@ -36,7 +36,7 @@ nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
|
|||
void *blob;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_firmware_get(subdev->device, name, &fw);
|
||||
ret = nvkm_firmware_get(subdev, name, &fw);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
if (fw->size < min_size) {
|
||||
|
|
|
@ -229,6 +229,8 @@ struct acr_r352_lsf_wpr_header {
|
|||
struct ls_ucode_img_r352 {
|
||||
struct ls_ucode_img base;
|
||||
|
||||
const struct acr_r352_lsf_func *func;
|
||||
|
||||
struct acr_r352_lsf_wpr_header wpr_header;
|
||||
struct acr_r352_lsf_lsb_header lsb_header;
|
||||
};
|
||||
|
@ -243,6 +245,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
|
|||
enum nvkm_secboot_falcon falcon_id)
|
||||
{
|
||||
const struct nvkm_subdev *subdev = acr->base.subdev;
|
||||
const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
|
||||
struct ls_ucode_img_r352 *img;
|
||||
int ret;
|
||||
|
||||
|
@ -252,15 +255,16 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
|
|||
|
||||
img->base.falcon_id = falcon_id;
|
||||
|
||||
ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
|
||||
|
||||
if (ret) {
|
||||
ret = func->load(sb, func->version_max, &img->base);
|
||||
if (ret < 0) {
|
||||
kfree(img->base.ucode_data);
|
||||
kfree(img->base.sig);
|
||||
kfree(img);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
img->func = func->version[ret];
|
||||
|
||||
/* Check that the signature size matches our expectations... */
|
||||
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
|
||||
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
|
||||
|
@ -302,8 +306,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
|
|||
struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
|
||||
struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
|
||||
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
|
||||
const struct acr_r352_ls_func *func =
|
||||
acr->func->ls_func[_img->falcon_id];
|
||||
const struct acr_r352_lsf_func *func = img->func;
|
||||
|
||||
/* Fill WPR header */
|
||||
whdr->falcon_id = _img->falcon_id;
|
||||
|
@ -419,8 +422,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
|
|||
|
||||
/* Figure out how large we need gdesc to be. */
|
||||
list_for_each_entry(_img, imgs, node) {
|
||||
const struct acr_r352_ls_func *ls_func =
|
||||
acr->func->ls_func[_img->falcon_id];
|
||||
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
|
||||
const struct acr_r352_lsf_func *ls_func = img->func;
|
||||
|
||||
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
|
||||
}
|
||||
|
@ -433,8 +436,7 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
|
|||
|
||||
list_for_each_entry(_img, imgs, node) {
|
||||
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
|
||||
const struct acr_r352_ls_func *ls_func =
|
||||
acr->func->ls_func[_img->falcon_id];
|
||||
const struct acr_r352_lsf_func *ls_func = img->func;
|
||||
|
||||
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
|
||||
sizeof(img->wpr_header));
|
||||
|
@ -1063,22 +1065,38 @@ acr_r352_dtor(struct nvkm_acr *_acr)
|
|||
kfree(acr);
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r352_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r352_ls_fecs_func_0 = {
|
||||
.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r352_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
acr_r352_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r352_ls_fecs_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r352_ls_gpccs_func_0 = {
|
||||
.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
|
||||
/* GPCCS will be loaded using PRI */
|
||||
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r352_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r352_ls_gpccs_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
/**
|
||||
|
@ -1150,12 +1168,20 @@ acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
|||
desc->argv = addr_args;
|
||||
}
|
||||
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r352_ls_pmu_func_0 = {
|
||||
.generate_bl_desc = acr_r352_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
|
||||
};
|
||||
|
||||
static const struct acr_r352_ls_func
|
||||
acr_r352_ls_pmu_func = {
|
||||
.load = acr_ls_ucode_load_pmu,
|
||||
.generate_bl_desc = acr_r352_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
|
||||
.post_run = acr_ls_pmu_post_run,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r352_ls_pmu_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
const struct acr_r352_func
|
||||
|
|
|
@ -47,24 +47,34 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
|
|||
}
|
||||
|
||||
/**
|
||||
* struct acr_r352_ls_func - manages a single LS firmware
|
||||
* struct acr_r352_lsf_func - manages a specific LS firmware version
|
||||
*
|
||||
* @load: load the external firmware into a ls_ucode_img
|
||||
* @generate_bl_desc: function called on a block of bl_desc_size to generate the
|
||||
* proper bootloader descriptor for this LS firmware
|
||||
* @bl_desc_size: size of the bootloader descriptor
|
||||
* @post_run: hook called right after the ACR is executed
|
||||
* @lhdr_flags: LS flags
|
||||
*/
|
||||
struct acr_r352_ls_func {
|
||||
int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
|
||||
struct acr_r352_lsf_func {
|
||||
void (*generate_bl_desc)(const struct nvkm_acr *,
|
||||
const struct ls_ucode_img *, u64, void *);
|
||||
u32 bl_desc_size;
|
||||
int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
|
||||
u32 lhdr_flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct acr_r352_ls_func - manages a single LS falcon
|
||||
*
|
||||
* @load: load the external firmware into a ls_ucode_img
|
||||
* @post_run: hook called right after the ACR is executed
|
||||
*/
|
||||
struct acr_r352_ls_func {
|
||||
int (*load)(const struct nvkm_secboot *, int maxver,
|
||||
struct ls_ucode_img *);
|
||||
int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
|
||||
int version_max;
|
||||
const struct acr_r352_lsf_func *version[];
|
||||
};
|
||||
|
||||
struct acr_r352;
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,22 +66,38 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
|
|||
bl_desc->data_size = hdr->data_size;
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r361_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r361_ls_fecs_func_0 = {
|
||||
.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r361_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
acr_r361_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r361_ls_fecs_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r361_ls_gpccs_func_0 = {
|
||||
.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
|
||||
/* GPCCS will be loaded using PRI */
|
||||
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r361_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r361_ls_gpccs_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
struct acr_r361_pmu_bl_desc {
|
||||
u32 reserved;
|
||||
u32 dma_idx;
|
||||
|
@ -125,12 +141,20 @@ acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
|||
desc->argv = addr_args;
|
||||
}
|
||||
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r361_ls_pmu_func_0 = {
|
||||
.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r361_ls_pmu_func = {
|
||||
.load = acr_ls_ucode_load_pmu,
|
||||
.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
|
||||
.post_run = acr_ls_pmu_post_run,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r361_ls_pmu_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -164,12 +188,20 @@ acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
|
|||
desc->argv = 0x01000000;
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r361_ls_sec2_func = {
|
||||
.load = acr_ls_ucode_load_sec2,
|
||||
const struct acr_r352_lsf_func
|
||||
acr_r361_ls_sec2_func_0 = {
|
||||
.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
|
||||
};
|
||||
|
||||
static const struct acr_r352_ls_func
|
||||
acr_r361_ls_sec2_func = {
|
||||
.load = acr_ls_ucode_load_sec2,
|
||||
.post_run = acr_ls_sec2_post_run,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r361_ls_sec2_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -67,6 +67,5 @@ void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
|
|||
extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
|
||||
extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
|
||||
extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
|
||||
extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
|
||||
|
||||
extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
|
||||
#endif
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include "acr_r367.h"
|
||||
#include "acr_r361.h"
|
||||
#include "acr_r370.h"
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
|
@ -100,6 +101,8 @@ struct acr_r367_lsf_wpr_header {
|
|||
struct ls_ucode_img_r367 {
|
||||
struct ls_ucode_img base;
|
||||
|
||||
const struct acr_r352_lsf_func *func;
|
||||
|
||||
struct acr_r367_lsf_wpr_header wpr_header;
|
||||
struct acr_r367_lsf_lsb_header lsb_header;
|
||||
};
|
||||
|
@ -111,6 +114,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
|
|||
enum nvkm_secboot_falcon falcon_id)
|
||||
{
|
||||
const struct nvkm_subdev *subdev = acr->base.subdev;
|
||||
const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
|
||||
struct ls_ucode_img_r367 *img;
|
||||
int ret;
|
||||
|
||||
|
@ -120,14 +124,16 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
|
|||
|
||||
img->base.falcon_id = falcon_id;
|
||||
|
||||
ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
|
||||
if (ret) {
|
||||
ret = func->load(sb, func->version_max, &img->base);
|
||||
if (ret < 0) {
|
||||
kfree(img->base.ucode_data);
|
||||
kfree(img->base.sig);
|
||||
kfree(img);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
img->func = func->version[ret];
|
||||
|
||||
/* Check that the signature size matches our expectations... */
|
||||
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
|
||||
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
|
||||
|
@ -158,8 +164,7 @@ acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
|
|||
struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
|
||||
struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
|
||||
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
|
||||
const struct acr_r352_ls_func *func =
|
||||
acr->func->ls_func[_img->falcon_id];
|
||||
const struct acr_r352_lsf_func *func = img->func;
|
||||
|
||||
/* Fill WPR header */
|
||||
whdr->falcon_id = _img->falcon_id;
|
||||
|
@ -269,8 +274,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
|
|||
u8 *gdesc;
|
||||
|
||||
list_for_each_entry(_img, imgs, node) {
|
||||
const struct acr_r352_ls_func *ls_func =
|
||||
acr->func->ls_func[_img->falcon_id];
|
||||
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
|
||||
const struct acr_r352_lsf_func *ls_func = img->func;
|
||||
|
||||
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
|
||||
}
|
||||
|
@ -283,8 +288,7 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
|
|||
|
||||
list_for_each_entry(_img, imgs, node) {
|
||||
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
|
||||
const struct acr_r352_ls_func *ls_func =
|
||||
acr->func->ls_func[_img->falcon_id];
|
||||
const struct acr_r352_lsf_func *ls_func = img->func;
|
||||
|
||||
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
|
||||
sizeof(img->wpr_header));
|
||||
|
@ -378,6 +382,17 @@ acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
|
|||
}
|
||||
}
|
||||
|
||||
static const struct acr_r352_ls_func
|
||||
acr_r367_ls_sec2_func = {
|
||||
.load = acr_ls_ucode_load_sec2,
|
||||
.post_run = acr_ls_sec2_post_run,
|
||||
.version_max = 1,
|
||||
.version = {
|
||||
&acr_r361_ls_sec2_func_0,
|
||||
&acr_r370_ls_sec2_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
const struct acr_r352_func
|
||||
acr_r367_func = {
|
||||
.fixup_hs_desc = acr_r367_fixup_hs_desc,
|
||||
|
@ -391,7 +406,7 @@ acr_r367_func = {
|
|||
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
|
||||
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
|
||||
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
|
||||
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
|
||||
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -49,22 +49,38 @@ acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
|
|||
desc->data_size = pdesc->app_resident_data_size;
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r370_ls_fecs_func_0 = {
|
||||
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
acr_r370_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r370_ls_fecs_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r370_ls_gpccs_func_0 = {
|
||||
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
/* GPCCS will be loaded using PRI */
|
||||
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r370_ls_gpccs_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
|
||||
const struct ls_ucode_img *img, u64 wpr_addr,
|
||||
|
@ -95,12 +111,20 @@ acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
|
|||
desc->argv = 0x01000000;
|
||||
}
|
||||
|
||||
const struct acr_r352_lsf_func
|
||||
acr_r370_ls_sec2_func_0 = {
|
||||
.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_sec2_func = {
|
||||
.load = acr_ls_ucode_load_sec2,
|
||||
.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
.post_run = acr_ls_sec2_post_run,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r370_ls_sec2_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
|
|
|
@ -46,4 +46,5 @@ struct acr_r370_flcn_bl_desc {
|
|||
void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
|
||||
extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
|
||||
extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
|
||||
extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
|
||||
#endif
|
||||
|
|
|
@ -54,12 +54,20 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
|||
desc->argv = addr_args;
|
||||
}
|
||||
|
||||
static const struct acr_r352_lsf_func
|
||||
acr_r375_ls_pmu_func_0 = {
|
||||
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r375_ls_pmu_func = {
|
||||
.load = acr_ls_ucode_load_pmu,
|
||||
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
.post_run = acr_ls_pmu_post_run,
|
||||
.version_max = 0,
|
||||
.version = {
|
||||
&acr_r375_ls_pmu_func_0,
|
||||
}
|
||||
};
|
||||
|
||||
const struct acr_r352_func
|
||||
|
|
|
@ -147,11 +147,15 @@ struct fw_bl_desc {
|
|||
u32 data_size;
|
||||
};
|
||||
|
||||
int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
|
||||
int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
|
||||
int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
|
||||
int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
|
||||
struct ls_ucode_img *);
|
||||
int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
|
||||
struct ls_ucode_img *);
|
||||
int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
|
||||
struct ls_ucode_img *);
|
||||
int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
|
||||
int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
|
||||
int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
|
||||
struct ls_ucode_img *);
|
||||
int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -90,30 +90,30 @@ ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
|
|||
* blob. Also generate the corresponding ucode descriptor.
|
||||
*/
|
||||
static int
|
||||
ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
|
||||
const char *falcon_name)
|
||||
ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
|
||||
struct ls_ucode_img *img, const char *falcon_name)
|
||||
{
|
||||
const struct firmware *bl, *code, *data, *sig;
|
||||
char f[64];
|
||||
int ret;
|
||||
|
||||
snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &bl);
|
||||
ret = nvkm_firmware_get(subdev, f, &bl);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &code);
|
||||
ret = nvkm_firmware_get(subdev, f, &code);
|
||||
if (ret)
|
||||
goto free_bl;
|
||||
|
||||
snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &data);
|
||||
ret = nvkm_firmware_get(subdev, f, &data);
|
||||
if (ret)
|
||||
goto free_inst;
|
||||
|
||||
snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &sig);
|
||||
ret = nvkm_firmware_get(subdev, f, &sig);
|
||||
if (ret)
|
||||
goto free_data;
|
||||
|
||||
|
@ -146,13 +146,15 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
|
|||
}
|
||||
|
||||
int
|
||||
acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
|
||||
acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
|
||||
struct ls_ucode_img *img)
|
||||
{
|
||||
return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
|
||||
return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
|
||||
}
|
||||
|
||||
int
|
||||
acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
|
||||
acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
|
||||
struct ls_ucode_img *img)
|
||||
{
|
||||
return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
|
||||
return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
|
||||
}
|
||||
|
|
|
@ -39,32 +39,32 @@
|
|||
*/
|
||||
static int
|
||||
acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
|
||||
struct ls_ucode_img *img)
|
||||
int maxver, struct ls_ucode_img *img)
|
||||
{
|
||||
const struct firmware *image, *desc, *sig;
|
||||
char f[64];
|
||||
int ret;
|
||||
int ver, ret;
|
||||
|
||||
snprintf(f, sizeof(f), "%s/image", name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &image);
|
||||
if (ret)
|
||||
return ret;
|
||||
ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
|
||||
if (ver < 0)
|
||||
return ver;
|
||||
img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
|
||||
nvkm_firmware_put(image);
|
||||
if (!img->ucode_data)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(f, sizeof(f), "%s/desc", name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &desc);
|
||||
if (ret)
|
||||
ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
|
||||
img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
|
||||
nvkm_firmware_put(desc);
|
||||
|
||||
snprintf(f, sizeof(f), "%s/sig", name);
|
||||
ret = nvkm_firmware_get(subdev->device, f, &sig);
|
||||
if (ret)
|
||||
ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
img->sig_size = sig->size;
|
||||
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
|
||||
|
@ -72,7 +72,7 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
|
|||
if (!img->sig)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
return ver;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -99,12 +99,13 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
|
|||
}
|
||||
|
||||
int
|
||||
acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
|
||||
acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
|
||||
struct ls_ucode_img *img)
|
||||
{
|
||||
struct nvkm_pmu *pmu = sb->subdev.device->pmu;
|
||||
int ret;
|
||||
|
||||
ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
|
||||
ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -136,14 +137,15 @@ acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
|
|||
}
|
||||
|
||||
int
|
||||
acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
|
||||
acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
|
||||
struct ls_ucode_img *img)
|
||||
{
|
||||
struct nvkm_sec2 *sec = sb->subdev.device->sec2;
|
||||
int ret;
|
||||
int ver, ret;
|
||||
|
||||
ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
|
||||
if (ret)
|
||||
return ret;
|
||||
ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
|
||||
if (ver < 0)
|
||||
return ver;
|
||||
|
||||
/* Allocate the PMU queue corresponding to the FW version */
|
||||
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
|
||||
|
@ -151,7 +153,7 @@ acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return ver;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -924,29 +924,17 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
|
|||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct vop *vop = to_vop(plane->state->crtc);
|
||||
struct drm_plane_state *plane_state;
|
||||
struct drm_framebuffer *old_fb = plane->state->fb;
|
||||
|
||||
plane_state = plane->funcs->atomic_duplicate_state(plane);
|
||||
plane_state->crtc_x = new_state->crtc_x;
|
||||
plane_state->crtc_y = new_state->crtc_y;
|
||||
plane_state->crtc_h = new_state->crtc_h;
|
||||
plane_state->crtc_w = new_state->crtc_w;
|
||||
plane_state->src_x = new_state->src_x;
|
||||
plane_state->src_y = new_state->src_y;
|
||||
plane_state->src_h = new_state->src_h;
|
||||
plane_state->src_w = new_state->src_w;
|
||||
|
||||
if (plane_state->fb != new_state->fb)
|
||||
drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
|
||||
|
||||
swap(plane_state, plane->state);
|
||||
|
||||
if (plane->state->fb && plane->state->fb != new_state->fb) {
|
||||
drm_framebuffer_get(plane->state->fb);
|
||||
WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
|
||||
drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
|
||||
set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
|
||||
}
|
||||
plane->state->crtc_x = new_state->crtc_x;
|
||||
plane->state->crtc_y = new_state->crtc_y;
|
||||
plane->state->crtc_h = new_state->crtc_h;
|
||||
plane->state->crtc_w = new_state->crtc_w;
|
||||
plane->state->src_x = new_state->src_x;
|
||||
plane->state->src_y = new_state->src_y;
|
||||
plane->state->src_h = new_state->src_h;
|
||||
plane->state->src_w = new_state->src_w;
|
||||
swap(plane->state->fb, new_state->fb);
|
||||
|
||||
if (vop->is_enabled) {
|
||||
rockchip_drm_psr_inhibit_get_state(new_state->state);
|
||||
|
@ -955,9 +943,22 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
|
|||
vop_cfg_done(vop);
|
||||
spin_unlock(&vop->reg_lock);
|
||||
rockchip_drm_psr_inhibit_put_state(new_state->state);
|
||||
}
|
||||
|
||||
plane->funcs->atomic_destroy_state(plane, plane_state);
|
||||
/*
|
||||
* A scanout can still be occurring, so we can't drop the
|
||||
* reference to the old framebuffer. To solve this we get a
|
||||
* reference to old_fb and set a worker to release it later.
|
||||
* FIXME: if we perform 500 async_update calls before the
|
||||
* vblank, then we can have 500 different framebuffers waiting
|
||||
* to be released.
|
||||
*/
|
||||
if (old_fb && plane->state->fb != old_fb) {
|
||||
drm_framebuffer_get(old_fb);
|
||||
WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
|
||||
drm_flip_work_queue(&vop->fb_unref_work, old_fb);
|
||||
set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs plane_helper_funcs = {
|
||||
|
|
|
@ -1025,7 +1025,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
|
|||
{
|
||||
struct vc4_plane_state *vc4_state, *new_vc4_state;
|
||||
|
||||
drm_atomic_set_fb_for_plane(plane->state, state->fb);
|
||||
swap(plane->state->fb, state->fb);
|
||||
plane->state->crtc_x = state->crtc_x;
|
||||
plane->state->crtc_y = state->crtc_y;
|
||||
plane->state->crtc_w = state->crtc_w;
|
||||
|
|
|
@ -1185,6 +1185,14 @@ struct drm_plane_helper_funcs {
|
|||
* current one with the new plane configurations in the new
|
||||
* plane_state.
|
||||
*
|
||||
* Drivers should also swap the framebuffers between current plane
|
||||
* state (&drm_plane.state) and new_state.
|
||||
* This is required since cleanup for async commits is performed on
|
||||
* the new state, rather than old state like for traditional commits.
|
||||
* Since we want to give up the reference on the current (old) fb
|
||||
* instead of our brand new one, swap them in the driver during the
|
||||
* async commit.
|
||||
*
|
||||
* FIXME:
|
||||
* - It only works for single plane updates
|
||||
* - Async Pageflips are not supported yet
|
||||
|
|
Loading…
Reference in New Issue
Block a user