forked from luck/tmp_suning_uos_patched
drm/tegra: Changes for v5.5-rc1
The bulk of these changes is the addition of DisplayPort support for Tegra210, Tegra186 and Tegra194. I've been running versions of this for about three years now, so I'd consider these changes to be pretty mature. These changes also unify the existing eDP support with the DP support since the programming is very similar, except for a few steps that can be easily parameterized. The rest are a couple of fixes all over the place for minor issues, as well as some work to support the IOMMU-backed DMA API, which in the end turned out to also clean up a number of cases where the DMA API was not being used correctly. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEiOrDCAFJzPfAjcif3SOs138+s6EFAl29isQTHHRyZWRpbmdA bnZpZGlhLmNvbQAKCRDdI6zXfz6zoSXBEACTkVCNwi6xq2mqYl2OnxMEEGuFvMgH bZrXPm54Md2qM0IBOXCM8GDl6aO6iwj8q+RVADSnU0XlravfTGtVPhLLNbBBgIMy wjg5Ib6/aN1zuHTvPyvvx++LIeiG2L6PtirdCVPA9GgFCIwVt4xgw35Kekk9lHEt hia9g9rsfuNUOSKQsBXRbgk/yxlUdk/rM/2tdWQgbpRPNttgc9wrp581jjU2Y+lG dSkqri2Dv1loNyRAv/tRQgeA+td+lYR6zl5emsukhe4ECPvG4XaqgN+D33Eo+kDx 2MiBJAwefboJV/VjDm+dfR1d/1Qa4XCUEUMgR6P1LXgX0+5jNLOo3InPaKpwDxBw wvESiibJiLiL3KdJfjrVpDsYDbOt1GUK2fmkLwoH4ihAl674Fvo8M/31P0czxK8X v5JCm7POhB3vSf/chMoQEJcfklu9NzAUw8J5SJC2Oaj13uhOqAzN+uIrfe7qemtm W4E2bj50EI+IEVbf8DRfuDCcE/eB35XJMBdip4HwOFCQPeRGGN7EsM49w+TQ9fba 0+GbyY1glxJyUyGN0v/5Lu2IYfbvdOm51pWTlvUW4TS5Lv68rJpiE/lEDmFqHPE2 69cOLe843K4mGIh1wbmT2G+dQKf/PNd/mrQHcMuUK2au849lVQeUE9xz77NuJTeK 3lPim7/95m8eSQ== =jU8e -----END PGP SIGNATURE----- Merge tag 'drm/tegra/for-5.5-rc1' of git://anongit.freedesktop.org/tegra/linux into drm-next drm/tegra: Changes for v5.5-rc1 The bulk of these changes is the addition of DisplayPort support for Tegra210, Tegra186 and Tegra194. I've been running versions of this for about three years now, so I'd consider these changes to be pretty mature. These changes also unify the existing eDP support with the DP support since the programming is very similar, except for a few steps that can be easily parameterized. The rest are a couple of fixes all over the place for minor issues, as well as some work to support the IOMMU-backed DMA API, which in the end turned out to also clean up a number of cases where the DMA API was not being used correctly. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thierry Reding <thierry.reding@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191102140116.3860545-1-thierry.reding@gmail.com
This commit is contained in:
commit
904ce198dd
|
@ -9,7 +9,7 @@ config DRM_TEGRA
|
|||
select DRM_MIPI_DSI
|
||||
select DRM_PANEL
|
||||
select TEGRA_HOST1X
|
||||
select IOMMU_IOVA if IOMMU_SUPPORT
|
||||
select IOMMU_IOVA
|
||||
select CEC_CORE if CEC_NOTIFIER
|
||||
help
|
||||
Choose this option if you have an NVIDIA Tegra SoC.
|
||||
|
|
|
@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
|
|||
window.swap = state->swap;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
|
||||
|
||||
window.base[i] = bo->paddr + fb->offsets[i];
|
||||
window.base[i] = state->iova[i] + fb->offsets[i];
|
||||
|
||||
/*
|
||||
* Tegra uses a shared stride for UV planes. Framebuffers are
|
||||
|
@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
|
||||
.prepare_fb = tegra_plane_prepare_fb,
|
||||
.cleanup_fb = tegra_plane_cleanup_fb,
|
||||
.atomic_check = tegra_plane_atomic_check,
|
||||
.atomic_disable = tegra_plane_atomic_disable,
|
||||
.atomic_update = tegra_plane_atomic_update,
|
||||
|
@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
|
|||
return;
|
||||
}
|
||||
|
||||
value |= (bo->paddr >> 10) & 0x3fffff;
|
||||
value |= (bo->iova >> 10) & 0x3fffff;
|
||||
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
value = (bo->paddr >> 32) & 0x3;
|
||||
value = (bo->iova >> 32) & 0x3;
|
||||
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
|
||||
#endif
|
||||
|
||||
|
@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
|
||||
.prepare_fb = tegra_plane_prepare_fb,
|
||||
.cleanup_fb = tegra_plane_cleanup_fb,
|
||||
.atomic_check = tegra_cursor_atomic_check,
|
||||
.atomic_update = tegra_cursor_atomic_update,
|
||||
.atomic_disable = tegra_cursor_atomic_disable,
|
||||
|
@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client)
|
|||
if (!dc->syncpt)
|
||||
dev_warn(dc->dev, "failed to allocate syncpoint\n");
|
||||
|
||||
dc->group = host1x_client_iommu_attach(client, true);
|
||||
if (IS_ERR(dc->group)) {
|
||||
err = PTR_ERR(dc->group);
|
||||
err = host1x_client_iommu_attach(client);
|
||||
if (err < 0) {
|
||||
dev_err(client->dev, "failed to attach to domain: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inherit the DMA parameters (such as maximum segment size) from the
|
||||
* parent device.
|
||||
*/
|
||||
client->dev->dma_parms = client->parent->dma_parms;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
|
@ -2083,7 +2090,7 @@ static int tegra_dc_init(struct host1x_client *client)
|
|||
if (!IS_ERR(primary))
|
||||
drm_plane_cleanup(primary);
|
||||
|
||||
host1x_client_iommu_detach(client, dc->group);
|
||||
host1x_client_iommu_detach(client);
|
||||
host1x_syncpt_free(dc->syncpt);
|
||||
|
||||
return err;
|
||||
|
@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client)
|
|||
if (!tegra_dc_has_window_groups(dc))
|
||||
return 0;
|
||||
|
||||
/* avoid a dangling pointer just in case this disappears */
|
||||
client->dev->dma_parms = NULL;
|
||||
|
||||
devm_free_irq(dc->dev, dc->irq, dc);
|
||||
|
||||
err = tegra_dc_rgb_exit(dc);
|
||||
|
@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client)
|
|||
return err;
|
||||
}
|
||||
|
||||
host1x_client_iommu_detach(client, dc->group);
|
||||
host1x_client_iommu_detach(client);
|
||||
host1x_syncpt_free(dc->syncpt);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -90,8 +90,6 @@ struct tegra_dc {
|
|||
struct drm_info_list *debugfs_files;
|
||||
|
||||
const struct tegra_dc_soc_info *soc;
|
||||
|
||||
struct iommu_group *group;
|
||||
};
|
||||
|
||||
static inline struct tegra_dc *
|
||||
|
|
|
@ -4,10 +4,158 @@
|
|||
* Copyright (C) 2015 Rob Clark
|
||||
*/
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "dp.h"
|
||||
|
||||
static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
|
||||
|
||||
static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
|
||||
{
|
||||
caps->enhanced_framing = false;
|
||||
caps->tps3_supported = false;
|
||||
caps->fast_training = false;
|
||||
caps->channel_coding = false;
|
||||
caps->alternate_scrambler_reset = false;
|
||||
}
|
||||
|
||||
void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
|
||||
const struct drm_dp_link_caps *src)
|
||||
{
|
||||
dest->enhanced_framing = src->enhanced_framing;
|
||||
dest->tps3_supported = src->tps3_supported;
|
||||
dest->fast_training = src->fast_training;
|
||||
dest->channel_coding = src->channel_coding;
|
||||
dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
|
||||
}
|
||||
|
||||
static void drm_dp_link_reset(struct drm_dp_link *link)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!link)
|
||||
return;
|
||||
|
||||
link->revision = 0;
|
||||
link->max_rate = 0;
|
||||
link->max_lanes = 0;
|
||||
|
||||
drm_dp_link_caps_reset(&link->caps);
|
||||
link->aux_rd_interval.cr = 0;
|
||||
link->aux_rd_interval.ce = 0;
|
||||
link->edp = 0;
|
||||
|
||||
link->rate = 0;
|
||||
link->lanes = 0;
|
||||
|
||||
for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
|
||||
link->rates[i] = 0;
|
||||
|
||||
link->num_rates = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_link_add_rate() - add a rate to the list of supported rates
|
||||
* @link: the link to add the rate to
|
||||
* @rate: the rate to add
|
||||
*
|
||||
* Add a link rate to the list of supported link rates.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or one of the following negative error codes on failure:
|
||||
* - ENOSPC if the maximum number of supported rates has been reached
|
||||
* - EEXISTS if the link already supports this rate
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_link_remove_rate()
|
||||
*/
|
||||
int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
|
||||
{
|
||||
unsigned int i, pivot;
|
||||
|
||||
if (link->num_rates == DP_MAX_SUPPORTED_RATES)
|
||||
return -ENOSPC;
|
||||
|
||||
for (pivot = 0; pivot < link->num_rates; pivot++)
|
||||
if (rate <= link->rates[pivot])
|
||||
break;
|
||||
|
||||
if (pivot != link->num_rates && rate == link->rates[pivot])
|
||||
return -EEXIST;
|
||||
|
||||
for (i = link->num_rates; i > pivot; i--)
|
||||
link->rates[i] = link->rates[i - 1];
|
||||
|
||||
link->rates[pivot] = rate;
|
||||
link->num_rates++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_link_remove_rate() - remove a rate from the list of supported rates
|
||||
* @link: the link from which to remove the rate
|
||||
* @rate: the rate to remove
|
||||
*
|
||||
* Removes a link rate from the list of supported link rates.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or one of the following negative error codes on failure:
|
||||
* - EINVAL if the specified rate is not among the supported rates
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_link_add_rate()
|
||||
*/
|
||||
int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < link->num_rates; i++)
|
||||
if (rate == link->rates[i])
|
||||
break;
|
||||
|
||||
if (i == link->num_rates)
|
||||
return -EINVAL;
|
||||
|
||||
link->num_rates--;
|
||||
|
||||
while (i < link->num_rates) {
|
||||
link->rates[i] = link->rates[i + 1];
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_link_update_rates() - normalize the supported link rates array
|
||||
* @link: the link for which to normalize the supported link rates
|
||||
*
|
||||
* Users should call this function after they've manually modified the array
|
||||
* of supported link rates. This function removes any stale entries, compacts
|
||||
* the array and updates the supported link rate count. Note that calling the
|
||||
* drm_dp_link_remove_rate() function already does this janitorial work.
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_link_add_rate(), drm_dp_link_remove_rate()
|
||||
*/
|
||||
void drm_dp_link_update_rates(struct drm_dp_link *link)
|
||||
{
|
||||
unsigned int i, count = 0;
|
||||
|
||||
for (i = 0; i < link->num_rates; i++) {
|
||||
if (link->rates[i] != 0)
|
||||
link->rates[count++] = link->rates[i];
|
||||
}
|
||||
|
||||
for (i = count; i < link->num_rates; i++)
|
||||
link->rates[i] = 0;
|
||||
|
||||
link->num_rates = count;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_link_probe() - probe a DisplayPort link for capabilities
|
||||
* @aux: DisplayPort AUX channel
|
||||
|
@ -21,21 +169,88 @@
|
|||
*/
|
||||
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
|
||||
{
|
||||
u8 values[3];
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
|
||||
unsigned int rd_interval;
|
||||
int err;
|
||||
|
||||
memset(link, 0, sizeof(*link));
|
||||
drm_dp_link_reset(link);
|
||||
|
||||
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
|
||||
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
link->revision = values[0];
|
||||
link->rate = drm_dp_bw_code_to_link_rate(values[1]);
|
||||
link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
|
||||
link->revision = dpcd[DP_DPCD_REV];
|
||||
link->max_rate = drm_dp_max_link_rate(dpcd);
|
||||
link->max_lanes = drm_dp_max_lane_count(dpcd);
|
||||
|
||||
if (values[2] & DP_ENHANCED_FRAME_CAP)
|
||||
link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
|
||||
link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
|
||||
link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
|
||||
link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
|
||||
link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
|
||||
|
||||
if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
|
||||
link->caps.alternate_scrambler_reset = true;
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
|
||||
DRM_ERROR("unsupported eDP version: %02x\n", value);
|
||||
else
|
||||
link->edp = drm_dp_edp_revisions[value];
|
||||
}
|
||||
|
||||
/*
|
||||
* The DPCD stores the AUX read interval in units of 4 ms. There are
|
||||
* two special cases:
|
||||
*
|
||||
* 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
|
||||
* and channel equalization should use 100 us or 400 us AUX read
|
||||
* intervals, respectively
|
||||
*
|
||||
* 2) for DP v1.4 and above, clock recovery should always use 100 us
|
||||
* AUX read intervals
|
||||
*/
|
||||
rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_TRAINING_AUX_RD_MASK;
|
||||
|
||||
if (rd_interval > 4) {
|
||||
DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
|
||||
rd_interval);
|
||||
rd_interval = 4;
|
||||
}
|
||||
|
||||
rd_interval *= 4 * USEC_PER_MSEC;
|
||||
|
||||
if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
|
||||
link->aux_rd_interval.cr = 100;
|
||||
|
||||
if (rd_interval == 0)
|
||||
link->aux_rd_interval.ce = 400;
|
||||
|
||||
link->rate = link->max_rate;
|
||||
link->lanes = link->max_lanes;
|
||||
|
||||
/* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
|
||||
if (link->edp >= 0x14) {
|
||||
u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
|
||||
unsigned int i;
|
||||
u16 rate;
|
||||
|
||||
err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
|
||||
supported_rates,
|
||||
sizeof(supported_rates));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
|
||||
rate = supported_rates[i * 2 + 1] << 8 |
|
||||
supported_rates[i * 2 + 0];
|
||||
|
||||
drm_dp_link_add_rate(link, rate * 200);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -116,18 +331,546 @@ int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
|
|||
*/
|
||||
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
|
||||
{
|
||||
u8 values[2];
|
||||
u8 values[2], value;
|
||||
int err;
|
||||
|
||||
values[0] = drm_dp_link_rate_to_bw_code(link->rate);
|
||||
values[1] = link->num_lanes;
|
||||
if (link->ops && link->ops->configure) {
|
||||
err = link->ops->configure(link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to configure DP link: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
|
||||
values[0] = drm_dp_link_rate_to_bw_code(link->rate);
|
||||
values[1] = link->lanes;
|
||||
|
||||
if (link->caps.enhanced_framing)
|
||||
values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
|
||||
err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (link->caps.channel_coding)
|
||||
value = DP_SET_ANSI_8B10B;
|
||||
else
|
||||
value = 0;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (link->caps.alternate_scrambler_reset) {
|
||||
err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
|
||||
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_link_choose() - choose the lowest possible configuration for a mode
|
||||
* @link: DRM DP link object
|
||||
* @mode: DRM display mode
|
||||
* @info: DRM display information
|
||||
*
|
||||
* According to the eDP specification, a source should select a configuration
|
||||
* with the lowest number of lanes and the lowest possible link rate that can
|
||||
* match the bitrate requirements of a video mode. However it must ensure not
|
||||
* to exceed the capabilities of the sink.
|
||||
*
|
||||
* Returns: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_link_choose(struct drm_dp_link *link,
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_info *info)
|
||||
{
|
||||
/* available link symbol clock rates */
|
||||
static const unsigned int rates[3] = { 162000, 270000, 540000 };
|
||||
/* available number of lanes */
|
||||
static const unsigned int lanes[3] = { 1, 2, 4 };
|
||||
unsigned long requirement, capacity;
|
||||
unsigned int rate = link->max_rate;
|
||||
unsigned int i, j;
|
||||
|
||||
/* bandwidth requirement */
|
||||
requirement = mode->clock * info->bpc * 3;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
|
||||
for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
|
||||
/*
|
||||
* Capacity for this combination of lanes and rate,
|
||||
* factoring in the ANSI 8B/10B encoding.
|
||||
*
|
||||
* Link rates in the DRM DP helpers are really link
|
||||
* symbol frequencies, so a tenth of the actual rate
|
||||
* of the link.
|
||||
*/
|
||||
capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
|
||||
|
||||
if (capacity >= requirement) {
|
||||
DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
|
||||
lanes[i], rates[j], requirement,
|
||||
capacity);
|
||||
link->lanes = lanes[i];
|
||||
link->rate = rates[j];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: Link training
|
||||
*
|
||||
* These functions contain common logic and helpers to implement DisplayPort
|
||||
* link training.
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_dp_link_train_init() - initialize DisplayPort link training state
|
||||
* @train: DisplayPort link training state
|
||||
*/
|
||||
void drm_dp_link_train_init(struct drm_dp_link_train *train)
|
||||
{
|
||||
struct drm_dp_link_train_set *request = &train->request;
|
||||
struct drm_dp_link_train_set *adjust = &train->adjust;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
request->voltage_swing[i] = 0;
|
||||
adjust->voltage_swing[i] = 0;
|
||||
|
||||
request->pre_emphasis[i] = 0;
|
||||
adjust->pre_emphasis[i] = 0;
|
||||
|
||||
request->post_cursor[i] = 0;
|
||||
adjust->post_cursor[i] = 0;
|
||||
}
|
||||
|
||||
train->pattern = DP_TRAINING_PATTERN_DISABLE;
|
||||
train->clock_recovered = false;
|
||||
train->channel_equalized = false;
|
||||
}
|
||||
|
||||
static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
|
||||
{
|
||||
return train->clock_recovered && train->channel_equalized;
|
||||
}
|
||||
|
||||
static int drm_dp_link_apply_training(struct drm_dp_link *link)
|
||||
{
|
||||
struct drm_dp_link_train_set *request = &link->train.request;
|
||||
unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
|
||||
struct drm_dp_aux *aux = link->aux;
|
||||
u8 values[4], pattern = 0;
|
||||
int err;
|
||||
|
||||
err = link->ops->apply_training(link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to apply link training: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
vs = request->voltage_swing;
|
||||
pe = request->pre_emphasis;
|
||||
pc = request->post_cursor;
|
||||
|
||||
/* write currently selected voltage-swing and pre-emphasis levels */
|
||||
for (i = 0; i < lanes; i++)
|
||||
values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
|
||||
DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
|
||||
|
||||
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to set training parameters: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* write currently selected post-cursor level (if supported) */
|
||||
if (link->revision >= 0x12 && link->rate == 540000) {
|
||||
values[0] = values[1] = 0;
|
||||
|
||||
for (i = 0; i < lanes; i++)
|
||||
values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
|
||||
|
||||
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
|
||||
DIV_ROUND_UP(lanes, 2));
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to set post-cursor: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* write link pattern */
|
||||
if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
|
||||
pattern |= DP_LINK_SCRAMBLING_DISABLE;
|
||||
|
||||
pattern |= link->train.pattern;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to set training pattern: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_dp_link_train_wait(struct drm_dp_link *link)
|
||||
{
|
||||
unsigned long min = 0;
|
||||
|
||||
switch (link->train.pattern) {
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
min = link->aux_rd_interval.cr;
|
||||
break;
|
||||
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
min = link->aux_rd_interval.ce;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (min > 0)
|
||||
usleep_range(min, 2 * min);
|
||||
}
|
||||
|
||||
static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
|
||||
u8 status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
struct drm_dp_link_train_set *adjust = &link->train.adjust;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < link->lanes; i++) {
|
||||
adjust->voltage_swing[i] =
|
||||
drm_dp_get_adjust_request_voltage(status, i) >>
|
||||
DP_TRAIN_VOLTAGE_SWING_SHIFT;
|
||||
|
||||
adjust->pre_emphasis[i] =
|
||||
drm_dp_get_adjust_request_pre_emphasis(status, i) >>
|
||||
DP_TRAIN_PRE_EMPHASIS_SHIFT;
|
||||
|
||||
adjust->post_cursor[i] =
|
||||
drm_dp_get_adjust_request_post_cursor(status, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
|
||||
{
|
||||
struct drm_dp_link_train_set *request = &train->request;
|
||||
struct drm_dp_link_train_set *adjust = &train->adjust;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
if (request->voltage_swing[i] != adjust->voltage_swing[i])
|
||||
request->voltage_swing[i] = adjust->voltage_swing[i];
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
|
||||
request->pre_emphasis[i] = adjust->pre_emphasis[i];
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
if (request->post_cursor[i] != adjust->post_cursor[i])
|
||||
request->post_cursor[i] = adjust->post_cursor[i];
|
||||
}
|
||||
|
||||
static int drm_dp_link_recover_clock(struct drm_dp_link *link)
|
||||
{
|
||||
u8 status[DP_LINK_STATUS_SIZE];
|
||||
int err;
|
||||
|
||||
err = drm_dp_link_apply_training(link);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
drm_dp_link_train_wait(link);
|
||||
|
||||
err = drm_dp_dpcd_read_link_status(link->aux, status);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to read link status: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!drm_dp_clock_recovery_ok(status, link->lanes))
|
||||
drm_dp_link_get_adjustments(link, status);
|
||||
else
|
||||
link->train.clock_recovered = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
|
||||
{
|
||||
unsigned int repeat;
|
||||
int err;
|
||||
|
||||
/* start clock recovery using training pattern 1 */
|
||||
link->train.pattern = DP_TRAINING_PATTERN_1;
|
||||
|
||||
for (repeat = 1; repeat < 5; repeat++) {
|
||||
err = drm_dp_link_recover_clock(link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to recover clock: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (link->train.clock_recovered)
|
||||
break;
|
||||
|
||||
drm_dp_link_train_adjust(&link->train);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
|
||||
{
|
||||
struct drm_dp_aux *aux = link->aux;
|
||||
u8 status[DP_LINK_STATUS_SIZE];
|
||||
int err;
|
||||
|
||||
err = drm_dp_link_apply_training(link);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
drm_dp_link_train_wait(link);
|
||||
|
||||
err = drm_dp_dpcd_read_link_status(aux, status);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to read link status: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
|
||||
DRM_ERROR("clock recovery lost while equalizing channel\n");
|
||||
link->train.clock_recovered = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!drm_dp_channel_eq_ok(status, link->lanes))
|
||||
drm_dp_link_get_adjustments(link, status);
|
||||
else
|
||||
link->train.channel_equalized = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
|
||||
{
|
||||
unsigned int repeat;
|
||||
int err;
|
||||
|
||||
/* start channel equalization using pattern 2 or 3 */
|
||||
if (link->caps.tps3_supported)
|
||||
link->train.pattern = DP_TRAINING_PATTERN_3;
|
||||
else
|
||||
link->train.pattern = DP_TRAINING_PATTERN_2;
|
||||
|
||||
for (repeat = 1; repeat < 5; repeat++) {
|
||||
err = drm_dp_link_equalize_channel(link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to equalize channel: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (link->train.channel_equalized)
|
||||
break;
|
||||
|
||||
drm_dp_link_train_adjust(&link->train);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_dp_link_downgrade(struct drm_dp_link *link)
|
||||
{
|
||||
switch (link->rate) {
|
||||
case 162000:
|
||||
return -EINVAL;
|
||||
|
||||
case 270000:
|
||||
link->rate = 162000;
|
||||
break;
|
||||
|
||||
case 540000:
|
||||
link->rate = 270000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_dp_link_train_disable(struct drm_dp_link *link)
|
||||
{
|
||||
int err;
|
||||
|
||||
link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
|
||||
|
||||
err = drm_dp_link_apply_training(link);
|
||||
if (err < 0)
|
||||
DRM_ERROR("failed to disable link training: %d\n", err);
|
||||
}
|
||||
|
||||
static int drm_dp_link_train_full(struct drm_dp_link *link)
|
||||
{
|
||||
int err;
|
||||
|
||||
retry:
|
||||
DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
|
||||
link->lanes, (link->lanes > 1) ? "s" : "",
|
||||
link->rate / 100);
|
||||
|
||||
err = drm_dp_link_configure(link->aux, link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to configure DP link: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = drm_dp_link_clock_recovery(link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("clock recovery failed: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!link->train.clock_recovered) {
|
||||
DRM_ERROR("clock recovery failed, downgrading link\n");
|
||||
|
||||
err = drm_dp_link_downgrade(link);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("clock recovery succeeded\n");
|
||||
|
||||
err = drm_dp_link_channel_equalization(link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("channel equalization failed: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!link->train.channel_equalized) {
|
||||
DRM_ERROR("channel equalization failed, downgrading link\n");
|
||||
|
||||
err = drm_dp_link_downgrade(link);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("channel equalization succeeded\n");
|
||||
|
||||
out:
|
||||
drm_dp_link_train_disable(link);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int drm_dp_link_train_fast(struct drm_dp_link *link)
|
||||
{
|
||||
u8 status[DP_LINK_STATUS_SIZE];
|
||||
int err;
|
||||
|
||||
DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
|
||||
link->lanes, (link->lanes > 1) ? "s" : "",
|
||||
link->rate / 100);
|
||||
|
||||
err = drm_dp_link_configure(link->aux, link);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to configure DP link: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* transmit training pattern 1 for 500 microseconds */
|
||||
link->train.pattern = DP_TRAINING_PATTERN_1;
|
||||
|
||||
err = drm_dp_link_apply_training(link);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
usleep_range(500, 1000);
|
||||
|
||||
/* transmit training pattern 2 or 3 for 500 microseconds */
|
||||
if (link->caps.tps3_supported)
|
||||
link->train.pattern = DP_TRAINING_PATTERN_3;
|
||||
else
|
||||
link->train.pattern = DP_TRAINING_PATTERN_2;
|
||||
|
||||
err = drm_dp_link_apply_training(link);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
usleep_range(500, 1000);
|
||||
|
||||
err = drm_dp_dpcd_read_link_status(link->aux, status);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("failed to read link status: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
|
||||
DRM_ERROR("clock recovery failed\n");
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
if (!drm_dp_channel_eq_ok(status, link->lanes)) {
|
||||
DRM_ERROR("channel equalization failed\n");
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
out:
|
||||
drm_dp_link_train_disable(link);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_link_train() - perform DisplayPort link training
|
||||
* @link: a DP link object
|
||||
*
|
||||
* Uses the context stored in the DP link object to perform link training. It
|
||||
* is expected that drivers will call drm_dp_link_probe() to obtain the link
|
||||
* capabilities before performing link training.
|
||||
*
|
||||
* If the sink supports fast link training (no AUX CH handshake) and valid
|
||||
* training settings are available, this function will try to perform fast
|
||||
* link training and fall back to full link training on failure.
|
||||
*
|
||||
* Returns: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_link_train(struct drm_dp_link *link)
|
||||
{
|
||||
int err;
|
||||
|
||||
drm_dp_link_train_init(&link->train);
|
||||
|
||||
if (link->caps.fast_training) {
|
||||
if (drm_dp_link_train_valid(&link->train)) {
|
||||
err = drm_dp_link_train_fast(link);
|
||||
if (err < 0)
|
||||
DRM_ERROR("fast link training failed: %d\n",
|
||||
err);
|
||||
else
|
||||
return 0;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("training parameters not available\n");
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG_KMS("fast link training not supported\n");
|
||||
}
|
||||
|
||||
err = drm_dp_link_train_full(link);
|
||||
if (err < 0)
|
||||
DRM_ERROR("full link training failed: %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -7,20 +7,171 @@
|
|||
#ifndef DRM_TEGRA_DP_H
|
||||
#define DRM_TEGRA_DP_H 1
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_display_info;
|
||||
struct drm_display_mode;
|
||||
struct drm_dp_aux;
|
||||
struct drm_dp_link;
|
||||
|
||||
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
|
||||
/**
|
||||
* struct drm_dp_link_caps - DP link capabilities
|
||||
*/
|
||||
struct drm_dp_link_caps {
|
||||
/**
|
||||
* @enhanced_framing:
|
||||
*
|
||||
* enhanced framing capability (mandatory as of DP 1.2)
|
||||
*/
|
||||
bool enhanced_framing;
|
||||
|
||||
/**
|
||||
* tps3_supported:
|
||||
*
|
||||
* training pattern sequence 3 supported for equalization
|
||||
*/
|
||||
bool tps3_supported;
|
||||
|
||||
/**
|
||||
* @fast_training:
|
||||
*
|
||||
* AUX CH handshake not required for link training
|
||||
*/
|
||||
bool fast_training;
|
||||
|
||||
/**
|
||||
* @channel_coding:
|
||||
*
|
||||
* ANSI 8B/10B channel coding capability
|
||||
*/
|
||||
bool channel_coding;
|
||||
|
||||
/**
|
||||
* @alternate_scrambler_reset:
|
||||
*
|
||||
* eDP alternate scrambler reset capability
|
||||
*/
|
||||
bool alternate_scrambler_reset;
|
||||
};
|
||||
|
||||
void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
|
||||
const struct drm_dp_link_caps *src);
|
||||
|
||||
/**
|
||||
* struct drm_dp_link_ops - DP link operations
|
||||
*/
|
||||
struct drm_dp_link_ops {
|
||||
/**
|
||||
* @apply_training:
|
||||
*/
|
||||
int (*apply_training)(struct drm_dp_link *link);
|
||||
|
||||
/**
|
||||
* @configure:
|
||||
*/
|
||||
int (*configure)(struct drm_dp_link *link);
|
||||
};
|
||||
|
||||
#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
|
||||
#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
|
||||
#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
|
||||
|
||||
/**
|
||||
* struct drm_dp_link_train_set - link training settings
|
||||
* @voltage_swing: per-lane voltage swing
|
||||
* @pre_emphasis: per-lane pre-emphasis
|
||||
* @post_cursor: per-lane post-cursor
|
||||
*/
|
||||
struct drm_dp_link_train_set {
|
||||
unsigned int voltage_swing[4];
|
||||
unsigned int pre_emphasis[4];
|
||||
unsigned int post_cursor[4];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_dp_link_train - link training state information
|
||||
* @request: currently requested settings
|
||||
* @adjust: adjustments requested by sink
|
||||
* @pattern: currently requested training pattern
|
||||
* @clock_recovered: flag to track if clock recovery has completed
|
||||
* @channel_equalized: flag to track if channel equalization has completed
|
||||
*/
|
||||
struct drm_dp_link_train {
|
||||
struct drm_dp_link_train_set request;
|
||||
struct drm_dp_link_train_set adjust;
|
||||
|
||||
unsigned int pattern;
|
||||
|
||||
bool clock_recovered;
|
||||
bool channel_equalized;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_dp_link - DP link capabilities and configuration
|
||||
* @revision: DP specification revision supported on the link
|
||||
* @max_rate: maximum clock rate supported on the link
|
||||
* @max_lanes: maximum number of lanes supported on the link
|
||||
* @caps: capabilities supported on the link (see &drm_dp_link_caps)
|
||||
* @aux_rd_interval: AUX read interval to use for training (in microseconds)
|
||||
* @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
|
||||
* @rate: currently configured link rate
|
||||
* @lanes: currently configured number of lanes
|
||||
* @rates: additional supported link rates in kHz (eDP 1.4)
|
||||
* @num_rates: number of additional supported link rates (eDP 1.4)
|
||||
*/
|
||||
struct drm_dp_link {
|
||||
unsigned char revision;
|
||||
unsigned int max_rate;
|
||||
unsigned int max_lanes;
|
||||
|
||||
struct drm_dp_link_caps caps;
|
||||
|
||||
/**
|
||||
* @cr: clock recovery read interval
|
||||
* @ce: channel equalization read interval
|
||||
*/
|
||||
struct {
|
||||
unsigned int cr;
|
||||
unsigned int ce;
|
||||
} aux_rd_interval;
|
||||
|
||||
unsigned char edp;
|
||||
|
||||
unsigned int rate;
|
||||
unsigned int num_lanes;
|
||||
unsigned long capabilities;
|
||||
unsigned int lanes;
|
||||
|
||||
unsigned long rates[DP_MAX_SUPPORTED_RATES];
|
||||
unsigned int num_rates;
|
||||
|
||||
/**
|
||||
* @ops: DP link operations
|
||||
*/
|
||||
const struct drm_dp_link_ops *ops;
|
||||
|
||||
/**
|
||||
* @aux: DP AUX channel
|
||||
*/
|
||||
struct drm_dp_aux *aux;
|
||||
|
||||
/**
|
||||
* @train: DP link training state
|
||||
*/
|
||||
struct drm_dp_link_train train;
|
||||
};
|
||||
|
||||
int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
|
||||
int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
|
||||
void drm_dp_link_update_rates(struct drm_dp_link *link);
|
||||
|
||||
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
|
||||
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
|
||||
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
|
||||
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
|
||||
int drm_dp_link_choose(struct drm_dp_link *link,
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_info *info);
|
||||
|
||||
void drm_dp_link_train_init(struct drm_dp_link_train *train);
|
||||
int drm_dp_link_train(struct drm_dp_link *link);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/pinctrl/pinconf-generic.h>
|
||||
#include <linux/pinctrl/pinctrl.h>
|
||||
|
@ -30,10 +31,18 @@
|
|||
static DEFINE_MUTEX(dpaux_lock);
|
||||
static LIST_HEAD(dpaux_list);
|
||||
|
||||
struct tegra_dpaux_soc {
|
||||
unsigned int cmh;
|
||||
unsigned int drvz;
|
||||
unsigned int drvi;
|
||||
};
|
||||
|
||||
struct tegra_dpaux {
|
||||
struct drm_dp_aux aux;
|
||||
struct device *dev;
|
||||
|
||||
const struct tegra_dpaux_soc *soc;
|
||||
|
||||
void __iomem *regs;
|
||||
int irq;
|
||||
|
||||
|
@ -121,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
|
|||
struct tegra_dpaux *dpaux = to_dpaux(aux);
|
||||
unsigned long status;
|
||||
ssize_t ret = 0;
|
||||
u8 reply = 0;
|
||||
u32 value;
|
||||
|
||||
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
|
||||
|
@ -215,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
|
|||
|
||||
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
|
||||
case 0x00:
|
||||
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
||||
reply = DP_AUX_NATIVE_REPLY_ACK;
|
||||
break;
|
||||
|
||||
case 0x01:
|
||||
msg->reply = DP_AUX_NATIVE_REPLY_NACK;
|
||||
reply = DP_AUX_NATIVE_REPLY_NACK;
|
||||
break;
|
||||
|
||||
case 0x02:
|
||||
msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
|
||||
reply = DP_AUX_NATIVE_REPLY_DEFER;
|
||||
break;
|
||||
|
||||
case 0x04:
|
||||
msg->reply = DP_AUX_I2C_REPLY_NACK;
|
||||
reply = DP_AUX_I2C_REPLY_NACK;
|
||||
break;
|
||||
|
||||
case 0x08:
|
||||
msg->reply = DP_AUX_I2C_REPLY_DEFER;
|
||||
reply = DP_AUX_I2C_REPLY_DEFER;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -239,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
|
|||
if (msg->request & DP_AUX_I2C_READ) {
|
||||
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
|
||||
|
||||
if (WARN_ON(count != msg->size))
|
||||
count = min_t(size_t, count, msg->size);
|
||||
/*
|
||||
* There might be a smarter way to do this, but since
|
||||
* the DP helpers will already retry transactions for
|
||||
* an -EBUSY return value, simply reuse that instead.
|
||||
*/
|
||||
if (count != msg->size) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
|
||||
ret = count;
|
||||
}
|
||||
}
|
||||
|
||||
msg->reply = reply;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -311,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
|
|||
|
||||
switch (function) {
|
||||
case DPAUX_PADCTL_FUNC_AUX:
|
||||
value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
|
||||
value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
|
||||
DPAUX_HYBRID_PADCTL_MODE_AUX;
|
||||
break;
|
||||
|
@ -321,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
|
|||
case DPAUX_PADCTL_FUNC_I2C:
|
||||
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
|
||||
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
|
||||
DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
|
||||
DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
|
||||
DPAUX_HYBRID_PADCTL_MODE_I2C;
|
||||
break;
|
||||
|
||||
|
@ -437,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
|
|||
if (!dpaux)
|
||||
return -ENOMEM;
|
||||
|
||||
dpaux->soc = of_device_get_match_data(&pdev->dev);
|
||||
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
|
||||
init_completion(&dpaux->complete);
|
||||
INIT_LIST_HEAD(&dpaux->list);
|
||||
|
@ -494,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
|
|||
|
||||
return PTR_ERR(dpaux->vdd);
|
||||
}
|
||||
|
||||
dpaux->vdd = NULL;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, dpaux);
|
||||
|
@ -642,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
|
|||
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
|
||||
};
|
||||
|
||||
static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
|
||||
.cmh = 0x02,
|
||||
.drvz = 0x04,
|
||||
.drvi = 0x18,
|
||||
};
|
||||
|
||||
static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
|
||||
.cmh = 0x02,
|
||||
.drvz = 0x04,
|
||||
.drvi = 0x30,
|
||||
};
|
||||
|
||||
static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
|
||||
.cmh = 0x02,
|
||||
.drvz = 0x04,
|
||||
.drvi = 0x2c,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_dpaux_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra194-dpaux", },
|
||||
{ .compatible = "nvidia,tegra186-dpaux", },
|
||||
{ .compatible = "nvidia,tegra210-dpaux", },
|
||||
{ .compatible = "nvidia,tegra124-dpaux", },
|
||||
{ .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
|
||||
{ .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
|
||||
{ .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
|
||||
{ .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
|
||||
|
@ -687,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
|
|||
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
dpaux->output = output;
|
||||
|
||||
err = regulator_enable(dpaux->vdd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(250);
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
if (output->panel) {
|
||||
enum drm_connector_status status;
|
||||
|
||||
status = drm_dp_aux_detect(aux);
|
||||
if (status == connector_status_connected) {
|
||||
enable_irq(dpaux->irq);
|
||||
return 0;
|
||||
if (dpaux->vdd) {
|
||||
err = regulator_enable(dpaux->vdd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
timeout = jiffies + msecs_to_jiffies(250);
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
status = drm_dp_aux_detect(aux);
|
||||
|
||||
if (status == connector_status_connected)
|
||||
break;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
if (status != connector_status_connected)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
enable_irq(dpaux->irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_dp_aux_detach(struct drm_dp_aux *aux)
|
||||
|
@ -716,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
|
|||
|
||||
disable_irq(dpaux->irq);
|
||||
|
||||
err = regulator_disable(dpaux->vdd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(250);
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
if (dpaux->output->panel) {
|
||||
enum drm_connector_status status;
|
||||
|
||||
status = drm_dp_aux_detect(aux);
|
||||
if (status == connector_status_disconnected) {
|
||||
dpaux->output = NULL;
|
||||
return 0;
|
||||
if (dpaux->vdd) {
|
||||
err = regulator_disable(dpaux->vdd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
timeout = jiffies + msecs_to_jiffies(250);
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
status = drm_dp_aux_detect(aux);
|
||||
|
||||
if (status == connector_status_disconnected)
|
||||
break;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
if (status != connector_status_disconnected)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
dpaux->output = NULL;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
|
||||
|
@ -765,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
|
||||
encoding);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
|
||||
u8 pattern)
|
||||
{
|
||||
u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
|
||||
u8 status[DP_LINK_STATUS_SIZE], values[4];
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tp == DP_TRAINING_PATTERN_DISABLE)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < link->num_lanes; i++)
|
||||
values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
|
||||
DP_TRAIN_PRE_EMPH_LEVEL_0 |
|
||||
DP_TRAIN_MAX_SWING_REACHED |
|
||||
DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
|
||||
|
||||
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
|
||||
link->num_lanes);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
usleep_range(500, 1000);
|
||||
|
||||
err = drm_dp_dpcd_read_link_status(aux, status);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
switch (tp) {
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
|
||||
return -EAGAIN;
|
||||
|
||||
break;
|
||||
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
if (!drm_dp_channel_eq_ok(status, link->num_lanes))
|
||||
return -EAGAIN;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(aux->dev, "unsupported training pattern %u\n", tp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -20,10 +20,6 @@
|
|||
#include <drm/drm_prime.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
|
||||
#include <asm/dma-iommu.h>
|
||||
#endif
|
||||
|
||||
#include "drm.h"
|
||||
#include "gem.h"
|
||||
|
||||
|
@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = {
|
|||
.atomic_commit_tail = tegra_atomic_commit_tail,
|
||||
};
|
||||
|
||||
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
||||
{
|
||||
struct host1x_device *device = to_host1x_device(drm->dev);
|
||||
struct tegra_drm *tegra;
|
||||
int err;
|
||||
|
||||
tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
|
||||
if (!tegra)
|
||||
return -ENOMEM;
|
||||
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
tegra->domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!tegra->domain) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
err = iova_cache_get();
|
||||
if (err < 0)
|
||||
goto domain;
|
||||
}
|
||||
|
||||
mutex_init(&tegra->clients_lock);
|
||||
INIT_LIST_HEAD(&tegra->clients);
|
||||
|
||||
drm->dev_private = tegra;
|
||||
tegra->drm = drm;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
|
||||
drm->mode_config.min_width = 0;
|
||||
drm->mode_config.min_height = 0;
|
||||
|
||||
drm->mode_config.max_width = 4096;
|
||||
drm->mode_config.max_height = 4096;
|
||||
|
||||
drm->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
drm->mode_config.normalize_zpos = true;
|
||||
|
||||
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
|
||||
drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
|
||||
|
||||
err = tegra_drm_fb_prepare(drm);
|
||||
if (err < 0)
|
||||
goto config;
|
||||
|
||||
drm_kms_helper_poll_init(drm);
|
||||
|
||||
err = host1x_device_init(device);
|
||||
if (err < 0)
|
||||
goto fbdev;
|
||||
|
||||
if (tegra->domain) {
|
||||
u64 carveout_start, carveout_end, gem_start, gem_end;
|
||||
u64 dma_mask = dma_get_mask(&device->dev);
|
||||
dma_addr_t start, end;
|
||||
unsigned long order;
|
||||
|
||||
start = tegra->domain->geometry.aperture_start & dma_mask;
|
||||
end = tegra->domain->geometry.aperture_end & dma_mask;
|
||||
|
||||
gem_start = start;
|
||||
gem_end = end - CARVEOUT_SZ;
|
||||
carveout_start = gem_end + 1;
|
||||
carveout_end = end;
|
||||
|
||||
order = __ffs(tegra->domain->pgsize_bitmap);
|
||||
init_iova_domain(&tegra->carveout.domain, 1UL << order,
|
||||
carveout_start >> order);
|
||||
|
||||
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
|
||||
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
|
||||
|
||||
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
|
||||
mutex_init(&tegra->mm_lock);
|
||||
|
||||
DRM_DEBUG("IOMMU apertures:\n");
|
||||
DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
|
||||
DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
|
||||
carveout_end);
|
||||
}
|
||||
|
||||
if (tegra->hub) {
|
||||
err = tegra_display_hub_prepare(tegra->hub);
|
||||
if (err < 0)
|
||||
goto device;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't use the drm_irq_install() helpers provided by the DRM
|
||||
* core, so we need to set this manually in order to allow the
|
||||
* DRM_IOCTL_WAIT_VBLANK to operate correctly.
|
||||
*/
|
||||
drm->irq_enabled = true;
|
||||
|
||||
/* syncpoints are used for full 32-bit hardware VBLANK counters */
|
||||
drm->max_vblank_count = 0xffffffff;
|
||||
|
||||
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
if (err < 0)
|
||||
goto hub;
|
||||
|
||||
drm_mode_config_reset(drm);
|
||||
|
||||
err = tegra_drm_fb_init(drm);
|
||||
if (err < 0)
|
||||
goto hub;
|
||||
|
||||
return 0;
|
||||
|
||||
hub:
|
||||
if (tegra->hub)
|
||||
tegra_display_hub_cleanup(tegra->hub);
|
||||
device:
|
||||
host1x_device_exit(device);
|
||||
fbdev:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
tegra_drm_fb_free(drm);
|
||||
config:
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
if (tegra->domain) {
|
||||
mutex_destroy(&tegra->mm_lock);
|
||||
drm_mm_takedown(&tegra->mm);
|
||||
put_iova_domain(&tegra->carveout.domain);
|
||||
iova_cache_put();
|
||||
}
|
||||
domain:
|
||||
if (tegra->domain)
|
||||
iommu_domain_free(tegra->domain);
|
||||
free:
|
||||
kfree(tegra);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tegra_drm_unload(struct drm_device *drm)
|
||||
{
|
||||
struct host1x_device *device = to_host1x_device(drm->dev);
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
int err;
|
||||
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
tegra_drm_fb_exit(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
err = host1x_device_exit(device);
|
||||
if (err < 0)
|
||||
return;
|
||||
|
||||
if (tegra->domain) {
|
||||
mutex_destroy(&tegra->mm_lock);
|
||||
drm_mm_takedown(&tegra->mm);
|
||||
put_iova_domain(&tegra->carveout.domain);
|
||||
iova_cache_put();
|
||||
iommu_domain_free(tegra->domain);
|
||||
}
|
||||
|
||||
kfree(tegra);
|
||||
}
|
||||
|
||||
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
|
||||
{
|
||||
struct tegra_drm_file *fpriv;
|
||||
|
@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
|
||||
|
||||
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
|
||||
if (!dest->cmdbuf.bo)
|
||||
return -ENOENT;
|
||||
|
@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
|
|||
static struct drm_driver tegra_drm_driver = {
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM |
|
||||
DRIVER_ATOMIC | DRIVER_RENDER,
|
||||
.load = tegra_drm_load,
|
||||
.unload = tegra_drm_unload,
|
||||
.open = tegra_drm_open,
|
||||
.postclose = tegra_drm_postclose,
|
||||
.lastclose = drm_fb_helper_lastclose,
|
||||
|
@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
|
||||
bool shared)
|
||||
int host1x_client_iommu_attach(struct host1x_client *client)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
|
||||
struct drm_device *drm = dev_get_drvdata(client->parent);
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
struct iommu_group *group = NULL;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* If the host1x client is already attached to an IOMMU domain that is
|
||||
* not the shared IOMMU domain, don't try to attach it to a different
|
||||
* domain. This allows using the IOMMU-backed DMA API.
|
||||
*/
|
||||
if (domain && domain != tegra->domain)
|
||||
return 0;
|
||||
|
||||
if (tegra->domain) {
|
||||
group = iommu_group_get(client->dev);
|
||||
if (!group) {
|
||||
dev_err(client->dev, "failed to get IOMMU group\n");
|
||||
return ERR_PTR(-ENODEV);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!shared || (shared && (group != tegra->group))) {
|
||||
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
|
||||
if (client->dev->archdata.mapping) {
|
||||
struct dma_iommu_mapping *mapping =
|
||||
to_dma_iommu_mapping(client->dev);
|
||||
arm_iommu_detach_device(client->dev);
|
||||
arm_iommu_release_mapping(mapping);
|
||||
}
|
||||
#endif
|
||||
if (domain != tegra->domain) {
|
||||
err = iommu_attach_group(tegra->domain, group);
|
||||
if (err < 0) {
|
||||
iommu_group_put(group);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (shared && !tegra->group)
|
||||
tegra->group = group;
|
||||
}
|
||||
|
||||
tegra->use_explicit_iommu = true;
|
||||
}
|
||||
|
||||
return group;
|
||||
client->group = group;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void host1x_client_iommu_detach(struct host1x_client *client,
|
||||
struct iommu_group *group)
|
||||
void host1x_client_iommu_detach(struct host1x_client *client)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(client->parent);
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (group) {
|
||||
if (group == tegra->group) {
|
||||
iommu_detach_group(tegra->domain, group);
|
||||
tegra->group = NULL;
|
||||
}
|
||||
if (client->group) {
|
||||
/*
|
||||
* Devices that are part of the same group may no longer be
|
||||
* attached to a domain at this point because their group may
|
||||
* have been detached by an earlier client.
|
||||
*/
|
||||
domain = iommu_get_domain_for_dev(client->dev);
|
||||
if (domain)
|
||||
iommu_detach_group(tegra->domain, client->group);
|
||||
|
||||
iommu_group_put(group);
|
||||
iommu_group_put(client->group);
|
||||
client->group = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
|
|||
static int host1x_drm_probe(struct host1x_device *dev)
|
||||
{
|
||||
struct drm_driver *driver = &tegra_drm_driver;
|
||||
struct iommu_domain *domain;
|
||||
struct tegra_drm *tegra;
|
||||
struct drm_device *drm;
|
||||
int err;
|
||||
|
||||
|
@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev)
|
|||
if (IS_ERR(drm))
|
||||
return PTR_ERR(drm);
|
||||
|
||||
dev_set_drvdata(&dev->dev, drm);
|
||||
|
||||
err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
|
||||
if (err < 0)
|
||||
tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
|
||||
if (!tegra) {
|
||||
err = -ENOMEM;
|
||||
goto put;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
|
||||
* likely to be allocated beyond the 32-bit boundary if sufficient
|
||||
* system memory is available. This is problematic on earlier Tegra
|
||||
* generations where host1x supports a maximum of 32 address bits in
|
||||
* the GATHER opcode. In this case, unless host1x is behind an IOMMU
|
||||
* as well it won't be able to process buffers allocated beyond the
|
||||
* 32-bit boundary.
|
||||
*
|
||||
* The DMA API will use bounce buffers in this case, so that could
|
||||
* perhaps still be made to work, even if less efficient, but there
|
||||
* is another catch: in order to perform cache maintenance on pages
|
||||
* allocated for discontiguous buffers we need to map and unmap the
|
||||
* SG table representing these buffers. This is fine for something
|
||||
* small like a push buffer, but it exhausts the bounce buffer pool
|
||||
* (typically on the order of a few MiB) for framebuffers (many MiB
|
||||
* for any modern resolution).
|
||||
*
|
||||
* Work around this by making sure that Tegra DRM clients only use
|
||||
* an IOMMU if the parent host1x also uses an IOMMU.
|
||||
*
|
||||
* Note that there's still a small gap here that we don't cover: if
|
||||
* the DMA API is backed by an IOMMU there's no way to control which
|
||||
* device is attached to an IOMMU and which isn't, except via wiring
|
||||
* up the device tree appropriately. This is considered an problem
|
||||
* of integration, so care must be taken for the DT to be consistent.
|
||||
*/
|
||||
domain = iommu_get_domain_for_dev(drm->dev->parent);
|
||||
|
||||
if (domain && iommu_present(&platform_bus_type)) {
|
||||
tegra->domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!tegra->domain) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
err = iova_cache_get();
|
||||
if (err < 0)
|
||||
goto domain;
|
||||
}
|
||||
|
||||
mutex_init(&tegra->clients_lock);
|
||||
INIT_LIST_HEAD(&tegra->clients);
|
||||
|
||||
dev_set_drvdata(&dev->dev, drm);
|
||||
drm->dev_private = tegra;
|
||||
tegra->drm = drm;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
|
||||
drm->mode_config.min_width = 0;
|
||||
drm->mode_config.min_height = 0;
|
||||
|
||||
drm->mode_config.max_width = 4096;
|
||||
drm->mode_config.max_height = 4096;
|
||||
|
||||
drm->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
drm->mode_config.normalize_zpos = true;
|
||||
|
||||
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
|
||||
drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
|
||||
|
||||
err = tegra_drm_fb_prepare(drm);
|
||||
if (err < 0)
|
||||
goto config;
|
||||
|
||||
drm_kms_helper_poll_init(drm);
|
||||
|
||||
err = host1x_device_init(dev);
|
||||
if (err < 0)
|
||||
goto fbdev;
|
||||
|
||||
if (tegra->use_explicit_iommu) {
|
||||
u64 carveout_start, carveout_end, gem_start, gem_end;
|
||||
u64 dma_mask = dma_get_mask(&dev->dev);
|
||||
dma_addr_t start, end;
|
||||
unsigned long order;
|
||||
|
||||
start = tegra->domain->geometry.aperture_start & dma_mask;
|
||||
end = tegra->domain->geometry.aperture_end & dma_mask;
|
||||
|
||||
gem_start = start;
|
||||
gem_end = end - CARVEOUT_SZ;
|
||||
carveout_start = gem_end + 1;
|
||||
carveout_end = end;
|
||||
|
||||
order = __ffs(tegra->domain->pgsize_bitmap);
|
||||
init_iova_domain(&tegra->carveout.domain, 1UL << order,
|
||||
carveout_start >> order);
|
||||
|
||||
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
|
||||
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
|
||||
|
||||
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
|
||||
mutex_init(&tegra->mm_lock);
|
||||
|
||||
DRM_DEBUG_DRIVER("IOMMU apertures:\n");
|
||||
DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
|
||||
DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
|
||||
carveout_end);
|
||||
} else if (tegra->domain) {
|
||||
iommu_domain_free(tegra->domain);
|
||||
tegra->domain = NULL;
|
||||
iova_cache_put();
|
||||
}
|
||||
|
||||
if (tegra->hub) {
|
||||
err = tegra_display_hub_prepare(tegra->hub);
|
||||
if (err < 0)
|
||||
goto device;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't use the drm_irq_install() helpers provided by the DRM
|
||||
* core, so we need to set this manually in order to allow the
|
||||
* DRM_IOCTL_WAIT_VBLANK to operate correctly.
|
||||
*/
|
||||
drm->irq_enabled = true;
|
||||
|
||||
/* syncpoints are used for full 32-bit hardware VBLANK counters */
|
||||
drm->max_vblank_count = 0xffffffff;
|
||||
|
||||
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
if (err < 0)
|
||||
goto hub;
|
||||
|
||||
drm_mode_config_reset(drm);
|
||||
|
||||
err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
|
||||
false);
|
||||
if (err < 0)
|
||||
goto hub;
|
||||
|
||||
err = tegra_drm_fb_init(drm);
|
||||
if (err < 0)
|
||||
goto hub;
|
||||
|
||||
err = drm_dev_register(drm, 0);
|
||||
if (err < 0)
|
||||
goto put;
|
||||
goto fb;
|
||||
|
||||
return 0;
|
||||
|
||||
fb:
|
||||
tegra_drm_fb_exit(drm);
|
||||
hub:
|
||||
if (tegra->hub)
|
||||
tegra_display_hub_cleanup(tegra->hub);
|
||||
device:
|
||||
if (tegra->domain) {
|
||||
mutex_destroy(&tegra->mm_lock);
|
||||
drm_mm_takedown(&tegra->mm);
|
||||
put_iova_domain(&tegra->carveout.domain);
|
||||
iova_cache_put();
|
||||
}
|
||||
|
||||
host1x_device_exit(dev);
|
||||
fbdev:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
tegra_drm_fb_free(drm);
|
||||
config:
|
||||
drm_mode_config_cleanup(drm);
|
||||
domain:
|
||||
if (tegra->domain)
|
||||
iommu_domain_free(tegra->domain);
|
||||
free:
|
||||
kfree(tegra);
|
||||
put:
|
||||
drm_dev_put(drm);
|
||||
return err;
|
||||
|
@ -1229,8 +1233,29 @@ static int host1x_drm_probe(struct host1x_device *dev)
|
|||
static int host1x_drm_remove(struct host1x_device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(&dev->dev);
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
int err;
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
tegra_drm_fb_exit(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
err = host1x_device_exit(dev);
|
||||
if (err < 0)
|
||||
dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
|
||||
|
||||
if (tegra->domain) {
|
||||
mutex_destroy(&tegra->mm_lock);
|
||||
drm_mm_takedown(&tegra->mm);
|
||||
put_iova_domain(&tegra->carveout.domain);
|
||||
iova_cache_put();
|
||||
iommu_domain_free(tegra->domain);
|
||||
}
|
||||
|
||||
kfree(tegra);
|
||||
drm_dev_put(drm);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -36,7 +36,7 @@ struct tegra_drm {
|
|||
struct drm_device *drm;
|
||||
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_group *group;
|
||||
bool use_explicit_iommu;
|
||||
struct mutex mm_lock;
|
||||
struct drm_mm mm;
|
||||
|
||||
|
@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
|
|||
struct tegra_drm_client *client);
|
||||
int tegra_drm_unregister_client(struct tegra_drm *tegra,
|
||||
struct tegra_drm_client *client);
|
||||
struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
|
||||
bool shared);
|
||||
void host1x_client_iommu_detach(struct host1x_client *client,
|
||||
struct iommu_group *group);
|
||||
int host1x_client_iommu_attach(struct host1x_client *client);
|
||||
void host1x_client_iommu_detach(struct host1x_client *client);
|
||||
|
||||
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
|
||||
int tegra_drm_exit(struct tegra_drm *tegra);
|
||||
|
@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
|
|||
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
|
||||
|
||||
/* from dpaux.c */
|
||||
struct drm_dp_link;
|
||||
|
||||
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
|
||||
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
|
||||
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
|
||||
int drm_dp_aux_detach(struct drm_dp_aux *aux);
|
||||
int drm_dp_aux_enable(struct drm_dp_aux *aux);
|
||||
int drm_dp_aux_disable(struct drm_dp_aux *aux);
|
||||
int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
|
||||
int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
|
||||
u8 pattern);
|
||||
|
||||
/* from fb.c */
|
||||
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
|
||||
|
|
|
@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
|
|||
static void falcon_copy_firmware_image(struct falcon *falcon,
|
||||
const struct firmware *firmware)
|
||||
{
|
||||
u32 *firmware_vaddr = falcon->firmware.vaddr;
|
||||
dma_addr_t daddr;
|
||||
u32 *virt = falcon->firmware.virt;
|
||||
size_t i;
|
||||
int err;
|
||||
|
||||
/* copy the whole thing taking into account endianness */
|
||||
for (i = 0; i < firmware->size / sizeof(u32); i++)
|
||||
firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
|
||||
|
||||
/* ensure that caches are flushed and falcon can see the firmware */
|
||||
daddr = dma_map_single(falcon->dev, firmware_vaddr,
|
||||
falcon->firmware.size, DMA_TO_DEVICE);
|
||||
err = dma_mapping_error(falcon->dev, daddr);
|
||||
if (err) {
|
||||
dev_err(falcon->dev, "failed to map firmware: %d\n", err);
|
||||
return;
|
||||
}
|
||||
dma_sync_single_for_device(falcon->dev, daddr,
|
||||
falcon->firmware.size, DMA_TO_DEVICE);
|
||||
dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
|
||||
DMA_TO_DEVICE);
|
||||
virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
|
||||
}
|
||||
|
||||
static int falcon_parse_firmware_image(struct falcon *falcon)
|
||||
{
|
||||
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
|
||||
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
|
||||
struct falcon_fw_os_header_v1 *os;
|
||||
|
||||
/* endian problems would show up right here */
|
||||
|
@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
os = falcon->firmware.vaddr + bin->os_header_offset;
|
||||
os = falcon->firmware.virt + bin->os_header_offset;
|
||||
|
||||
falcon->firmware.bin_data.size = bin->os_size;
|
||||
falcon->firmware.bin_data.offset = bin->os_data_offset;
|
||||
|
@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
falcon->firmware.size = falcon->firmware.firmware->size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
|
|||
const struct firmware *firmware = falcon->firmware.firmware;
|
||||
int err;
|
||||
|
||||
falcon->firmware.size = firmware->size;
|
||||
|
||||
/* allocate iova space for the firmware */
|
||||
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
|
||||
&falcon->firmware.paddr);
|
||||
if (IS_ERR(falcon->firmware.vaddr)) {
|
||||
dev_err(falcon->dev, "DMA memory mapping failed\n");
|
||||
return PTR_ERR(falcon->firmware.vaddr);
|
||||
}
|
||||
|
||||
/* copy firmware image into local area. this also ensures endianness */
|
||||
falcon_copy_firmware_image(falcon, firmware);
|
||||
|
||||
|
@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
|
|||
err = falcon_parse_firmware_image(falcon);
|
||||
if (err < 0) {
|
||||
dev_err(falcon->dev, "failed to parse firmware image\n");
|
||||
goto err_setup_firmware_image;
|
||||
return err;
|
||||
}
|
||||
|
||||
release_firmware(firmware);
|
||||
falcon->firmware.firmware = NULL;
|
||||
|
||||
return 0;
|
||||
|
||||
err_setup_firmware_image:
|
||||
falcon->ops->free(falcon, falcon->firmware.size,
|
||||
falcon->firmware.paddr, falcon->firmware.vaddr);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int falcon_init(struct falcon *falcon)
|
||||
{
|
||||
/* check mandatory ops */
|
||||
if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
|
||||
return -EINVAL;
|
||||
|
||||
falcon->firmware.vaddr = NULL;
|
||||
falcon->firmware.virt = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void falcon_exit(struct falcon *falcon)
|
||||
{
|
||||
if (falcon->firmware.firmware) {
|
||||
if (falcon->firmware.firmware)
|
||||
release_firmware(falcon->firmware.firmware);
|
||||
falcon->firmware.firmware = NULL;
|
||||
}
|
||||
|
||||
if (falcon->firmware.vaddr) {
|
||||
falcon->ops->free(falcon, falcon->firmware.size,
|
||||
falcon->firmware.paddr,
|
||||
falcon->firmware.vaddr);
|
||||
falcon->firmware.vaddr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int falcon_boot(struct falcon *falcon)
|
||||
|
@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
|
|||
u32 value;
|
||||
int err;
|
||||
|
||||
if (!falcon->firmware.vaddr)
|
||||
if (!falcon->firmware.virt)
|
||||
return -EINVAL;
|
||||
|
||||
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
|
||||
|
@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
|
|||
falcon_writel(falcon, 0, FALCON_DMACTL);
|
||||
|
||||
/* setup the address of the binary data so Falcon can access it later */
|
||||
falcon_writel(falcon, (falcon->firmware.paddr +
|
||||
falcon_writel(falcon, (falcon->firmware.iova +
|
||||
falcon->firmware.bin_data.offset) >> 8,
|
||||
FALCON_DMATRFBASE);
|
||||
|
||||
|
|
|
@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
|
|||
u32 data_size;
|
||||
};
|
||||
|
||||
struct falcon;
|
||||
|
||||
struct falcon_ops {
|
||||
void *(*alloc)(struct falcon *falcon, size_t size,
|
||||
dma_addr_t *paddr);
|
||||
void (*free)(struct falcon *falcon, size_t size,
|
||||
dma_addr_t paddr, void *vaddr);
|
||||
};
|
||||
|
||||
struct falcon_firmware_section {
|
||||
unsigned long offset;
|
||||
size_t size;
|
||||
|
@ -93,8 +84,9 @@ struct falcon_firmware {
|
|||
const struct firmware *firmware;
|
||||
|
||||
/* Raw firmware data */
|
||||
dma_addr_t paddr;
|
||||
void *vaddr;
|
||||
dma_addr_t iova;
|
||||
dma_addr_t phys;
|
||||
void *virt;
|
||||
size_t size;
|
||||
|
||||
/* Parsed firmware information */
|
||||
|
@ -107,8 +99,6 @@ struct falcon {
|
|||
/* Set by falcon client */
|
||||
struct device *dev;
|
||||
void __iomem *regs;
|
||||
const struct falcon_ops *ops;
|
||||
void *data;
|
||||
|
||||
struct falcon_firmware firmware;
|
||||
};
|
||||
|
|
|
@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
|
|||
}
|
||||
}
|
||||
|
||||
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
|
||||
drm->mode_config.fb_base = (resource_size_t)bo->iova;
|
||||
info->screen_base = (void __iomem *)bo->vaddr + offset;
|
||||
info->screen_size = size;
|
||||
info->fix.smem_start = (unsigned long)(bo->paddr + offset);
|
||||
info->fix.smem_start = (unsigned long)(bo->iova + offset);
|
||||
info->fix.smem_len = size;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo)
|
|||
drm_gem_object_put_unlocked(&obj->gem);
|
||||
}
|
||||
|
||||
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
|
||||
static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
||||
dma_addr_t *phys)
|
||||
{
|
||||
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
||||
struct sg_table *sgt;
|
||||
int err;
|
||||
|
||||
*sgt = obj->sgt;
|
||||
/*
|
||||
* If we've manually mapped the buffer object through the IOMMU, make
|
||||
* sure to return the IOVA address of our mapping.
|
||||
*/
|
||||
if (phys && obj->mm) {
|
||||
*phys = obj->iova;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return obj->paddr;
|
||||
/*
|
||||
* If we don't have a mapping for this buffer yet, return an SG table
|
||||
* so that host1x can do the mapping for us via the DMA API.
|
||||
*/
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (obj->pages) {
|
||||
err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
|
||||
0, obj->gem.size, GFP_KERNEL);
|
||||
if (err < 0)
|
||||
goto free;
|
||||
} else {
|
||||
err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
|
||||
obj->gem.size);
|
||||
if (err < 0)
|
||||
goto free;
|
||||
}
|
||||
|
||||
return sgt;
|
||||
|
||||
free:
|
||||
kfree(sgt);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
|
||||
static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
|
||||
{
|
||||
if (sgt) {
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
}
|
||||
|
||||
static void *tegra_bo_mmap(struct host1x_bo *bo)
|
||||
|
@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
bo->paddr = bo->mm->start;
|
||||
bo->iova = bo->mm->start;
|
||||
|
||||
bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
|
||||
bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
|
||||
bo->sgt->nents, prot);
|
||||
if (!bo->size) {
|
||||
dev_err(tegra->drm->dev, "failed to map buffer\n");
|
||||
|
@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
|
|||
return 0;
|
||||
|
||||
mutex_lock(&tegra->mm_lock);
|
||||
iommu_unmap(tegra->domain, bo->paddr, bo->size);
|
||||
iommu_unmap(tegra->domain, bo->iova, bo->size);
|
||||
drm_mm_remove_node(bo->mm);
|
||||
mutex_unlock(&tegra->mm_lock);
|
||||
|
||||
|
@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
|
|||
sg_free_table(bo->sgt);
|
||||
kfree(bo->sgt);
|
||||
} else if (bo->vaddr) {
|
||||
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
|
||||
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
|
|||
} else {
|
||||
size_t size = bo->gem.size;
|
||||
|
||||
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
|
||||
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!bo->vaddr) {
|
||||
dev_err(drm->dev,
|
||||
|
@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
|
|||
goto detach;
|
||||
}
|
||||
|
||||
bo->paddr = sg_dma_address(bo->sgt->sgl);
|
||||
bo->iova = sg_dma_address(bo->sgt->sgl);
|
||||
}
|
||||
|
||||
bo->gem.import_attach = attach;
|
||||
|
@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
|
|||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
|
||||
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
|
||||
gem->size);
|
||||
if (err < 0) {
|
||||
drm_gem_vm_close(vma);
|
||||
|
@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|||
return NULL;
|
||||
|
||||
if (bo->pages) {
|
||||
struct scatterlist *sg;
|
||||
unsigned int i;
|
||||
|
||||
if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
|
||||
goto free;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, bo->num_pages, i)
|
||||
sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
|
||||
|
||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
||||
if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
|
||||
0, gem->size, GFP_KERNEL) < 0)
|
||||
goto free;
|
||||
} else {
|
||||
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
||||
if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
|
||||
gem->size) < 0)
|
||||
goto free;
|
||||
|
||||
sg_dma_address(sgt->sgl) = bo->paddr;
|
||||
sg_dma_len(sgt->sgl) = gem->size;
|
||||
}
|
||||
|
||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
||||
goto free;
|
||||
|
||||
return sgt;
|
||||
|
||||
free:
|
||||
|
|
|
@ -31,7 +31,7 @@ struct tegra_bo {
|
|||
struct host1x_bo base;
|
||||
unsigned long flags;
|
||||
struct sg_table *sgt;
|
||||
dma_addr_t paddr;
|
||||
dma_addr_t iova;
|
||||
void *vaddr;
|
||||
|
||||
struct drm_mm_node *mm;
|
||||
|
|
|
@ -17,7 +17,6 @@ struct gr2d_soc {
|
|||
};
|
||||
|
||||
struct gr2d {
|
||||
struct iommu_group *group;
|
||||
struct tegra_drm_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct clk *clk;
|
||||
|
@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client)
|
|||
struct gr2d *gr2d = to_gr2d(drm);
|
||||
int err;
|
||||
|
||||
gr2d->channel = host1x_channel_request(client->dev);
|
||||
gr2d->channel = host1x_channel_request(client);
|
||||
if (!gr2d->channel)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
|
|||
goto put;
|
||||
}
|
||||
|
||||
gr2d->group = host1x_client_iommu_attach(client, false);
|
||||
if (IS_ERR(gr2d->group)) {
|
||||
err = PTR_ERR(gr2d->group);
|
||||
err = host1x_client_iommu_attach(client);
|
||||
if (err < 0) {
|
||||
dev_err(client->dev, "failed to attach to domain: %d\n", err);
|
||||
goto free;
|
||||
}
|
||||
|
@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
|
|||
return 0;
|
||||
|
||||
detach:
|
||||
host1x_client_iommu_detach(client, gr2d->group);
|
||||
host1x_client_iommu_detach(client);
|
||||
free:
|
||||
host1x_syncpt_free(client->syncpts[0]);
|
||||
put:
|
||||
|
@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
host1x_client_iommu_detach(client, gr2d->group);
|
||||
host1x_client_iommu_detach(client);
|
||||
host1x_syncpt_free(client->syncpts[0]);
|
||||
host1x_channel_put(gr2d->channel);
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ struct gr3d_soc {
|
|||
};
|
||||
|
||||
struct gr3d {
|
||||
struct iommu_group *group;
|
||||
struct tegra_drm_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct clk *clk_secondary;
|
||||
|
@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client)
|
|||
struct gr3d *gr3d = to_gr3d(drm);
|
||||
int err;
|
||||
|
||||
gr3d->channel = host1x_channel_request(client->dev);
|
||||
gr3d->channel = host1x_channel_request(client);
|
||||
if (!gr3d->channel)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
|
|||
goto put;
|
||||
}
|
||||
|
||||
gr3d->group = host1x_client_iommu_attach(client, false);
|
||||
if (IS_ERR(gr3d->group)) {
|
||||
err = PTR_ERR(gr3d->group);
|
||||
err = host1x_client_iommu_attach(client);
|
||||
if (err < 0) {
|
||||
dev_err(client->dev, "failed to attach to domain: %d\n", err);
|
||||
goto free;
|
||||
}
|
||||
|
@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
|
|||
return 0;
|
||||
|
||||
detach:
|
||||
host1x_client_iommu_detach(client, gr3d->group);
|
||||
host1x_client_iommu_detach(client);
|
||||
free:
|
||||
host1x_syncpt_free(client->syncpts[0]);
|
||||
put:
|
||||
|
@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
host1x_client_iommu_detach(client, gr3d->group);
|
||||
host1x_client_iommu_detach(client);
|
||||
host1x_syncpt_free(client->syncpts[0]);
|
||||
host1x_channel_put(gr3d->channel);
|
||||
|
||||
|
|
|
@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
|
|||
unsigned int zpos = plane->state->normalized_zpos;
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
struct tegra_plane *p = to_tegra_plane(plane);
|
||||
struct tegra_bo *bo;
|
||||
dma_addr_t base;
|
||||
u32 value;
|
||||
|
||||
|
@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
|
|||
/* disable compression */
|
||||
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
|
||||
|
||||
bo = tegra_fb_get_plane(fb, 0);
|
||||
base = bo->paddr;
|
||||
base = state->iova[0] + fb->offsets[0];
|
||||
|
||||
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
|
||||
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
|
||||
|
@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
|
||||
.prepare_fb = tegra_plane_prepare_fb,
|
||||
.cleanup_fb = tegra_plane_cleanup_fb,
|
||||
.atomic_check = tegra_shared_plane_atomic_check,
|
||||
.atomic_update = tegra_shared_plane_atomic_update,
|
||||
.atomic_disable = tegra_shared_plane_atomic_disable,
|
||||
|
|
|
@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
void tegra_output_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct tegra_output *output = connector_to_output(connector);
|
||||
|
||||
if (output->cec)
|
||||
cec_notifier_conn_unregister(output->cec);
|
||||
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
}
|
||||
|
@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
|
|||
disable_irq(output->hpd_irq);
|
||||
}
|
||||
|
||||
output->cec = cec_notifier_get(output->dev);
|
||||
if (!output->cec)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tegra_output_remove(struct tegra_output *output)
|
||||
{
|
||||
if (output->cec)
|
||||
cec_notifier_put(output->cec);
|
||||
|
||||
if (output->hpd_gpio)
|
||||
free_irq(output->hpd_irq, output);
|
||||
|
||||
|
@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
|
|||
|
||||
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
|
||||
{
|
||||
int connector_type;
|
||||
int err;
|
||||
|
||||
if (output->panel) {
|
||||
|
@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
|
|||
if (output->hpd_gpio)
|
||||
enable_irq(output->hpd_irq);
|
||||
|
||||
connector_type = output->connector.connector_type;
|
||||
/*
|
||||
* Create a CEC notifier for HDMI connector.
|
||||
*/
|
||||
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
|
||||
struct cec_connector_info conn_info;
|
||||
|
||||
cec_fill_conn_info_from_drm(&conn_info, &output->connector);
|
||||
output->cec = cec_notifier_conn_register(output->dev, NULL,
|
||||
&conn_info);
|
||||
if (!output->cec)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "dc.h"
|
||||
|
@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
|
|||
{
|
||||
struct tegra_plane *p = to_tegra_plane(plane);
|
||||
struct tegra_plane_state *state;
|
||||
unsigned int i;
|
||||
|
||||
if (plane->state)
|
||||
__drm_atomic_helper_plane_destroy_state(plane->state);
|
||||
|
@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
|
|||
plane->state->plane = plane;
|
||||
plane->state->zpos = p->index;
|
||||
plane->state->normalized_zpos = p->index;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
state->iova[i] = DMA_MAPPING_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
|
|||
for (i = 0; i < 2; i++)
|
||||
copy->blending[i] = state->blending[i];
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
copy->iova[i] = DMA_MAPPING_ERROR;
|
||||
copy->sgt[i] = NULL;
|
||||
}
|
||||
|
||||
return ©->base;
|
||||
}
|
||||
|
||||
|
@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = {
|
|||
.format_mod_supported = tegra_plane_format_mod_supported,
|
||||
};
|
||||
|
||||
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
||||
{
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < state->base.fb->format->num_planes; i++) {
|
||||
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
||||
|
||||
if (!dc->client.group) {
|
||||
struct sg_table *sgt;
|
||||
|
||||
sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
|
||||
if (IS_ERR(sgt)) {
|
||||
err = PTR_ERR(sgt);
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
|
||||
DMA_TO_DEVICE);
|
||||
if (err == 0) {
|
||||
err = -ENOMEM;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
state->iova[i] = sg_dma_address(sgt->sgl);
|
||||
state->sgt[i] = sgt;
|
||||
} else {
|
||||
state->iova[i] = bo->iova;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unpin:
|
||||
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
|
||||
|
||||
while (i--) {
|
||||
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
||||
struct sg_table *sgt = state->sgt[i];
|
||||
|
||||
dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
|
||||
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
||||
|
||||
state->iova[i] = DMA_MAPPING_ERROR;
|
||||
state->sgt[i] = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < state->base.fb->format->num_planes; i++) {
|
||||
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
||||
|
||||
if (!dc->client.group) {
|
||||
struct sg_table *sgt = state->sgt[i];
|
||||
|
||||
if (sgt) {
|
||||
dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
|
||||
DMA_TO_DEVICE);
|
||||
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
||||
}
|
||||
}
|
||||
|
||||
state->iova[i] = DMA_MAPPING_ERROR;
|
||||
state->sgt[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int tegra_plane_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct tegra_dc *dc = to_tegra_dc(state->crtc);
|
||||
|
||||
if (!state->fb)
|
||||
return 0;
|
||||
|
||||
drm_gem_fb_prepare_fb(plane, state);
|
||||
|
||||
return tegra_dc_pin(dc, to_tegra_plane_state(state));
|
||||
}
|
||||
|
||||
void tegra_plane_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct tegra_dc *dc = to_tegra_dc(state->crtc);
|
||||
|
||||
if (dc)
|
||||
tegra_dc_unpin(dc, to_tegra_plane_state(state));
|
||||
}
|
||||
|
||||
int tegra_plane_state_add(struct tegra_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
|
|
|
@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
|
|||
struct tegra_plane_state {
|
||||
struct drm_plane_state base;
|
||||
|
||||
struct sg_table *sgt[3];
|
||||
dma_addr_t iova[3];
|
||||
|
||||
struct tegra_bo_tiling tiling;
|
||||
u32 format;
|
||||
u32 swap;
|
||||
|
@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
|
|||
|
||||
extern const struct drm_plane_funcs tegra_plane_funcs;
|
||||
|
||||
int tegra_plane_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
void tegra_plane_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
|
||||
int tegra_plane_state_add(struct tegra_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,6 +39,7 @@
|
|||
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
|
||||
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
|
||||
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
|
||||
#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
|
||||
#define SOR_STATE_ASY_OWNER_MASK 0xf
|
||||
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
|
||||
|
||||
|
@ -283,10 +284,12 @@
|
|||
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
|
||||
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
|
||||
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
|
||||
#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
|
||||
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
|
||||
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
|
||||
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
|
||||
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
|
||||
#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
|
||||
|
||||
#define SOR_DP_PADCTL1 0x5d
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ struct vic {
|
|||
void __iomem *regs;
|
||||
struct tegra_drm_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct iommu_domain *domain;
|
||||
struct device *dev;
|
||||
struct clk *clk;
|
||||
struct reset_control *rst;
|
||||
|
@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
|
|||
|
||||
static int vic_boot(struct vic *vic)
|
||||
{
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
|
||||
#endif
|
||||
u32 fce_ucode_size, fce_bin_data_offset;
|
||||
void *hdr;
|
||||
int err = 0;
|
||||
|
@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic)
|
|||
return 0;
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
if (vic->config->supports_sid) {
|
||||
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
|
||||
if (vic->config->supports_sid && spec) {
|
||||
u32 value;
|
||||
|
||||
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
|
||||
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
|
||||
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
|
||||
|
||||
if (spec && spec->num_ids > 0) {
|
||||
if (spec->num_ids > 0) {
|
||||
value = spec->ids[0] & 0xffff;
|
||||
|
||||
vic_writel(vic, value, VIC_THI_STREAMID0);
|
||||
|
@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
hdr = vic->falcon.firmware.vaddr;
|
||||
hdr = vic->falcon.firmware.virt;
|
||||
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
|
||||
hdr = vic->falcon.firmware.vaddr +
|
||||
hdr = vic->falcon.firmware.virt +
|
||||
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
|
||||
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
|
||||
|
||||
|
@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic)
|
|||
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
|
||||
fce_ucode_size);
|
||||
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
|
||||
(vic->falcon.firmware.paddr + fce_bin_data_offset)
|
||||
(vic->falcon.firmware.iova + fce_bin_data_offset)
|
||||
>> 8);
|
||||
|
||||
err = falcon_wait_idle(&vic->falcon);
|
||||
|
@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
|
||||
dma_addr_t *iova)
|
||||
{
|
||||
struct tegra_drm *tegra = falcon->data;
|
||||
|
||||
return tegra_drm_alloc(tegra, size, iova);
|
||||
}
|
||||
|
||||
static void vic_falcon_free(struct falcon *falcon, size_t size,
|
||||
dma_addr_t iova, void *va)
|
||||
{
|
||||
struct tegra_drm *tegra = falcon->data;
|
||||
|
||||
return tegra_drm_free(tegra, size, va, iova);
|
||||
}
|
||||
|
||||
static const struct falcon_ops vic_falcon_ops = {
|
||||
.alloc = vic_falcon_alloc,
|
||||
.free = vic_falcon_free
|
||||
};
|
||||
|
||||
static int vic_init(struct host1x_client *client)
|
||||
{
|
||||
struct tegra_drm_client *drm = host1x_to_drm_client(client);
|
||||
struct iommu_group *group = iommu_group_get(client->dev);
|
||||
struct drm_device *dev = dev_get_drvdata(client->parent);
|
||||
struct tegra_drm *tegra = dev->dev_private;
|
||||
struct vic *vic = to_vic(drm);
|
||||
int err;
|
||||
|
||||
if (group && tegra->domain) {
|
||||
err = iommu_attach_group(tegra->domain, group);
|
||||
if (err < 0) {
|
||||
dev_err(vic->dev, "failed to attach to domain: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
vic->domain = tegra->domain;
|
||||
err = host1x_client_iommu_attach(client);
|
||||
if (err < 0) {
|
||||
dev_err(vic->dev, "failed to attach to domain: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
vic->channel = host1x_channel_request(client->dev);
|
||||
vic->channel = host1x_channel_request(client);
|
||||
if (!vic->channel) {
|
||||
err = -ENOMEM;
|
||||
goto detach;
|
||||
|
@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client)
|
|||
if (err < 0)
|
||||
goto free_syncpt;
|
||||
|
||||
/*
|
||||
* Inherit the DMA parameters (such as maximum segment size) from the
|
||||
* parent device.
|
||||
*/
|
||||
client->dev->dma_parms = client->parent->dma_parms;
|
||||
|
||||
return 0;
|
||||
|
||||
free_syncpt:
|
||||
|
@ -221,8 +201,7 @@ static int vic_init(struct host1x_client *client)
|
|||
free_channel:
|
||||
host1x_channel_put(vic->channel);
|
||||
detach:
|
||||
if (group && tegra->domain)
|
||||
iommu_detach_group(tegra->domain, group);
|
||||
host1x_client_iommu_detach(client);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -230,22 +209,32 @@ static int vic_init(struct host1x_client *client)
|
|||
static int vic_exit(struct host1x_client *client)
|
||||
{
|
||||
struct tegra_drm_client *drm = host1x_to_drm_client(client);
|
||||
struct iommu_group *group = iommu_group_get(client->dev);
|
||||
struct drm_device *dev = dev_get_drvdata(client->parent);
|
||||
struct tegra_drm *tegra = dev->dev_private;
|
||||
struct vic *vic = to_vic(drm);
|
||||
int err;
|
||||
|
||||
/* avoid a dangling pointer just in case this disappears */
|
||||
client->dev->dma_parms = NULL;
|
||||
|
||||
err = tegra_drm_unregister_client(tegra, drm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
host1x_syncpt_free(client->syncpts[0]);
|
||||
host1x_channel_put(vic->channel);
|
||||
host1x_client_iommu_detach(client);
|
||||
|
||||
if (vic->domain) {
|
||||
iommu_detach_group(vic->domain, group);
|
||||
vic->domain = NULL;
|
||||
if (client->group) {
|
||||
dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
|
||||
vic->falcon.firmware.size, DMA_TO_DEVICE);
|
||||
tegra_drm_free(tegra, vic->falcon.firmware.size,
|
||||
vic->falcon.firmware.virt,
|
||||
vic->falcon.firmware.iova);
|
||||
} else {
|
||||
dma_free_coherent(vic->dev, vic->falcon.firmware.size,
|
||||
vic->falcon.firmware.virt,
|
||||
vic->falcon.firmware.iova);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
|
|||
|
||||
static int vic_load_firmware(struct vic *vic)
|
||||
{
|
||||
struct host1x_client *client = &vic->client.base;
|
||||
struct tegra_drm *tegra = vic->client.drm;
|
||||
dma_addr_t iova;
|
||||
size_t size;
|
||||
void *virt;
|
||||
int err;
|
||||
|
||||
if (vic->falcon.data)
|
||||
if (vic->falcon.firmware.virt)
|
||||
return 0;
|
||||
|
||||
vic->falcon.data = vic->client.drm;
|
||||
|
||||
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
|
||||
if (err < 0)
|
||||
goto cleanup;
|
||||
return err;
|
||||
|
||||
size = vic->falcon.firmware.size;
|
||||
|
||||
if (!client->group) {
|
||||
virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
|
||||
|
||||
err = dma_mapping_error(vic->dev, iova);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else {
|
||||
virt = tegra_drm_alloc(tegra, size, &iova);
|
||||
}
|
||||
|
||||
vic->falcon.firmware.virt = virt;
|
||||
vic->falcon.firmware.iova = iova;
|
||||
|
||||
err = falcon_load_firmware(&vic->falcon);
|
||||
if (err < 0)
|
||||
goto cleanup;
|
||||
|
||||
/*
|
||||
* In this case we have received an IOVA from the shared domain, so we
|
||||
* need to make sure to get the physical address so that the DMA API
|
||||
* knows what memory pages to flush the cache for.
|
||||
*/
|
||||
if (client->group) {
|
||||
dma_addr_t phys;
|
||||
|
||||
phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
|
||||
|
||||
err = dma_mapping_error(vic->dev, phys);
|
||||
if (err < 0)
|
||||
goto cleanup;
|
||||
|
||||
vic->falcon.firmware.phys = phys;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
vic->falcon.data = NULL;
|
||||
if (!client->group)
|
||||
dma_free_coherent(vic->dev, size, virt, iova);
|
||||
else
|
||||
tegra_drm_free(tegra, size, virt, iova);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev)
|
|||
struct vic *vic;
|
||||
int err;
|
||||
|
||||
/* inherit DMA mask from host1x parent */
|
||||
err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
|
||||
if (!vic)
|
||||
return -ENOMEM;
|
||||
|
@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev)
|
|||
|
||||
vic->falcon.dev = dev;
|
||||
vic->falcon.regs = vic->regs;
|
||||
vic->falcon.ops = &vic_falcon_ops;
|
||||
|
||||
err = falcon_init(&vic->falcon);
|
||||
if (err < 0)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
config TEGRA_HOST1X
|
||||
tristate "NVIDIA Tegra host1x driver"
|
||||
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
|
||||
select IOMMU_IOVA if IOMMU_SUPPORT
|
||||
select IOMMU_IOVA
|
||||
help
|
||||
Driver for the NVIDIA Tegra host1x hardware.
|
||||
|
||||
|
|
|
@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x,
|
|||
of_dma_configure(&device->dev, host1x->dev->of_node, true);
|
||||
|
||||
device->dev.dma_parms = &device->dma_parms;
|
||||
dma_set_max_seg_size(&device->dev, SZ_4M);
|
||||
dma_set_max_seg_size(&device->dev, UINT_MAX);
|
||||
|
||||
err = host1x_device_parse_dt(device, driver);
|
||||
if (err < 0) {
|
||||
|
|
|
@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
|
|||
*
|
||||
* Must be called with the cdma lock held.
|
||||
*/
|
||||
int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
|
||||
struct host1x_cdma *cdma,
|
||||
unsigned int needed)
|
||||
static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
|
||||
struct host1x_cdma *cdma,
|
||||
unsigned int needed)
|
||||
{
|
||||
while (true) {
|
||||
struct push_buffer *pb = &cdma->push_buffer;
|
||||
|
|
|
@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
|
|||
|
||||
/**
|
||||
* host1x_channel_request() - Allocate a channel
|
||||
* @device: Host1x unit this channel will be used to send commands to
|
||||
* @client: Host1x client this channel will be used to send commands to
|
||||
*
|
||||
* Allocates a new host1x channel for @device. May return NULL if CDMA
|
||||
* Allocates a new host1x channel for @client. May return NULL if CDMA
|
||||
* initialization fails.
|
||||
*/
|
||||
struct host1x_channel *host1x_channel_request(struct device *dev)
|
||||
struct host1x_channel *host1x_channel_request(struct host1x_client *client)
|
||||
{
|
||||
struct host1x *host = dev_get_drvdata(dev->parent);
|
||||
struct host1x *host = dev_get_drvdata(client->dev->parent);
|
||||
struct host1x_channel_list *chlist = &host->channel_list;
|
||||
struct host1x_channel *channel;
|
||||
int err;
|
||||
|
@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
|
|||
|
||||
kref_init(&channel->refcount);
|
||||
mutex_init(&channel->submitlock);
|
||||
channel->dev = dev;
|
||||
channel->client = client;
|
||||
channel->dev = client->dev;
|
||||
|
||||
err = host1x_hw_channel_init(host, channel, channel->id);
|
||||
if (err < 0)
|
||||
|
@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
|
|||
fail:
|
||||
clear_bit(channel->id, chlist->allocated_channels);
|
||||
|
||||
dev_err(dev, "failed to initialize channel\n");
|
||||
dev_err(client->dev, "failed to initialize channel\n");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ struct host1x_channel {
|
|||
unsigned int id;
|
||||
struct mutex submitlock;
|
||||
void __iomem *regs;
|
||||
struct host1x_client *client;
|
||||
struct device *dev;
|
||||
struct host1x_cdma cdma;
|
||||
};
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
#include <trace/events/host1x.h>
|
||||
#undef CREATE_TRACE_POINTS
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
|
||||
#include <asm/dma-iommu.h>
|
||||
#endif
|
||||
|
||||
#include "bus.h"
|
||||
#include "channel.h"
|
||||
#include "debug.h"
|
||||
|
@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = {
|
|||
.init = host1x01_init,
|
||||
.sync_offset = 0x3000,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
.has_wide_gather = false,
|
||||
.has_hypervisor = false,
|
||||
.num_sid_entries = 0,
|
||||
.sid_table = NULL,
|
||||
};
|
||||
|
||||
static const struct host1x_info host1x02_info = {
|
||||
|
@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = {
|
|||
.init = host1x02_init,
|
||||
.sync_offset = 0x3000,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
.has_wide_gather = false,
|
||||
.has_hypervisor = false,
|
||||
.num_sid_entries = 0,
|
||||
.sid_table = NULL,
|
||||
};
|
||||
|
||||
static const struct host1x_info host1x04_info = {
|
||||
|
@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = {
|
|||
.init = host1x04_init,
|
||||
.sync_offset = 0x2100,
|
||||
.dma_mask = DMA_BIT_MASK(34),
|
||||
.has_wide_gather = false,
|
||||
.has_hypervisor = false,
|
||||
.num_sid_entries = 0,
|
||||
.sid_table = NULL,
|
||||
};
|
||||
|
||||
static const struct host1x_info host1x05_info = {
|
||||
|
@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = {
|
|||
.init = host1x05_init,
|
||||
.sync_offset = 0x2100,
|
||||
.dma_mask = DMA_BIT_MASK(34),
|
||||
.has_wide_gather = false,
|
||||
.has_hypervisor = false,
|
||||
.num_sid_entries = 0,
|
||||
.sid_table = NULL,
|
||||
};
|
||||
|
||||
static const struct host1x_sid_entry tegra186_sid_table[] = {
|
||||
|
@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = {
|
|||
.init = host1x06_init,
|
||||
.sync_offset = 0x0,
|
||||
.dma_mask = DMA_BIT_MASK(40),
|
||||
.has_wide_gather = true,
|
||||
.has_hypervisor = true,
|
||||
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
|
||||
.sid_table = tegra186_sid_table,
|
||||
|
@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = {
|
|||
.init = host1x07_init,
|
||||
.sync_offset = 0x0,
|
||||
.dma_mask = DMA_BIT_MASK(40),
|
||||
.has_wide_gather = true,
|
||||
.has_hypervisor = true,
|
||||
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
|
||||
.sid_table = tegra194_sid_table,
|
||||
|
@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host)
|
|||
}
|
||||
}
|
||||
|
||||
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
|
||||
int err;
|
||||
|
||||
/*
|
||||
* If the host1x firewall is enabled, there's no need to enable IOMMU
|
||||
* support. Similarly, if host1x is already attached to an IOMMU (via
|
||||
* the DMA API), don't try to attach again.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
|
||||
return domain;
|
||||
|
||||
host->group = iommu_group_get(host->dev);
|
||||
if (host->group) {
|
||||
struct iommu_domain_geometry *geometry;
|
||||
dma_addr_t start, end;
|
||||
unsigned long order;
|
||||
|
||||
err = iova_cache_get();
|
||||
if (err < 0)
|
||||
goto put_group;
|
||||
|
||||
host->domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!host->domain) {
|
||||
err = -ENOMEM;
|
||||
goto put_cache;
|
||||
}
|
||||
|
||||
err = iommu_attach_group(host->domain, host->group);
|
||||
if (err) {
|
||||
if (err == -ENODEV)
|
||||
err = 0;
|
||||
|
||||
goto free_domain;
|
||||
}
|
||||
|
||||
geometry = &host->domain->geometry;
|
||||
start = geometry->aperture_start & host->info->dma_mask;
|
||||
end = geometry->aperture_end & host->info->dma_mask;
|
||||
|
||||
order = __ffs(host->domain->pgsize_bitmap);
|
||||
init_iova_domain(&host->iova, 1UL << order, start >> order);
|
||||
host->iova_end = end;
|
||||
|
||||
domain = host->domain;
|
||||
}
|
||||
|
||||
return domain;
|
||||
|
||||
free_domain:
|
||||
iommu_domain_free(host->domain);
|
||||
host->domain = NULL;
|
||||
put_cache:
|
||||
iova_cache_put();
|
||||
put_group:
|
||||
iommu_group_put(host->group);
|
||||
host->group = NULL;
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int host1x_iommu_init(struct host1x *host)
|
||||
{
|
||||
u64 mask = host->info->dma_mask;
|
||||
struct iommu_domain *domain;
|
||||
int err;
|
||||
|
||||
domain = host1x_iommu_attach(host);
|
||||
if (IS_ERR(domain)) {
|
||||
err = PTR_ERR(domain);
|
||||
dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're not behind an IOMMU make sure we don't get push buffers
|
||||
* that are allocated outside of the range addressable by the GATHER
|
||||
* opcode.
|
||||
*
|
||||
* Newer generations of Tegra (Tegra186 and later) support a wide
|
||||
* variant of the GATHER opcode that allows addressing more bits.
|
||||
*/
|
||||
if (!domain && !host->info->has_wide_gather)
|
||||
mask = DMA_BIT_MASK(32);
|
||||
|
||||
err = dma_coerce_mask_and_coherent(host->dev, mask);
|
||||
if (err < 0) {
|
||||
dev_err(host->dev, "failed to set DMA mask: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void host1x_iommu_exit(struct host1x *host)
|
||||
{
|
||||
if (host->domain) {
|
||||
put_iova_domain(&host->iova);
|
||||
iommu_detach_group(host->domain, host->group);
|
||||
|
||||
iommu_domain_free(host->domain);
|
||||
host->domain = NULL;
|
||||
|
||||
iova_cache_put();
|
||||
|
||||
iommu_group_put(host->group);
|
||||
host->group = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int host1x_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct host1x *host;
|
||||
|
@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(host->hv_regs);
|
||||
}
|
||||
|
||||
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
|
||||
host->dev->dma_parms = &host->dma_parms;
|
||||
dma_set_max_seg_size(host->dev, UINT_MAX);
|
||||
|
||||
if (host->info->init) {
|
||||
err = host->info->init(host);
|
||||
|
@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
|
||||
if (host->dev->archdata.mapping) {
|
||||
struct dma_iommu_mapping *mapping =
|
||||
to_dma_iommu_mapping(host->dev);
|
||||
arm_iommu_detach_device(host->dev);
|
||||
arm_iommu_release_mapping(mapping);
|
||||
}
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
|
||||
goto skip_iommu;
|
||||
|
||||
host->group = iommu_group_get(&pdev->dev);
|
||||
if (host->group) {
|
||||
struct iommu_domain_geometry *geometry;
|
||||
u64 mask = dma_get_mask(host->dev);
|
||||
dma_addr_t start, end;
|
||||
unsigned long order;
|
||||
|
||||
err = iova_cache_get();
|
||||
if (err < 0)
|
||||
goto put_group;
|
||||
|
||||
host->domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!host->domain) {
|
||||
err = -ENOMEM;
|
||||
goto put_cache;
|
||||
}
|
||||
|
||||
err = iommu_attach_group(host->domain, host->group);
|
||||
if (err) {
|
||||
if (err == -ENODEV) {
|
||||
iommu_domain_free(host->domain);
|
||||
host->domain = NULL;
|
||||
iova_cache_put();
|
||||
iommu_group_put(host->group);
|
||||
host->group = NULL;
|
||||
goto skip_iommu;
|
||||
}
|
||||
|
||||
goto fail_free_domain;
|
||||
}
|
||||
|
||||
geometry = &host->domain->geometry;
|
||||
start = geometry->aperture_start & mask;
|
||||
end = geometry->aperture_end & mask;
|
||||
|
||||
order = __ffs(host->domain->pgsize_bitmap);
|
||||
init_iova_domain(&host->iova, 1UL << order, start >> order);
|
||||
host->iova_end = end;
|
||||
err = host1x_iommu_init(host);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
skip_iommu:
|
||||
err = host1x_channel_list_init(&host->channel_list,
|
||||
host->info->nb_channels);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize channel list\n");
|
||||
goto fail_detach_device;
|
||||
goto iommu_exit;
|
||||
}
|
||||
|
||||
err = clk_prepare_enable(host->clk);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to enable clock\n");
|
||||
goto fail_free_channels;
|
||||
goto free_channels;
|
||||
}
|
||||
|
||||
err = reset_control_deassert(host->rst);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
|
||||
goto fail_unprepare_disable;
|
||||
goto unprepare_disable;
|
||||
}
|
||||
|
||||
err = host1x_syncpt_init(host);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize syncpts\n");
|
||||
goto fail_reset_assert;
|
||||
goto reset_assert;
|
||||
}
|
||||
|
||||
err = host1x_intr_init(host, syncpt_irq);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize interrupts\n");
|
||||
goto fail_deinit_syncpt;
|
||||
goto deinit_syncpt;
|
||||
}
|
||||
|
||||
host1x_debug_init(host);
|
||||
|
@ -351,33 +432,22 @@ static int host1x_probe(struct platform_device *pdev)
|
|||
|
||||
err = host1x_register(host);
|
||||
if (err < 0)
|
||||
goto fail_deinit_intr;
|
||||
goto deinit_intr;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_deinit_intr:
|
||||
deinit_intr:
|
||||
host1x_intr_deinit(host);
|
||||
fail_deinit_syncpt:
|
||||
deinit_syncpt:
|
||||
host1x_syncpt_deinit(host);
|
||||
fail_reset_assert:
|
||||
reset_assert:
|
||||
reset_control_assert(host->rst);
|
||||
fail_unprepare_disable:
|
||||
unprepare_disable:
|
||||
clk_disable_unprepare(host->clk);
|
||||
fail_free_channels:
|
||||
free_channels:
|
||||
host1x_channel_list_free(&host->channel_list);
|
||||
fail_detach_device:
|
||||
if (host->group && host->domain) {
|
||||
put_iova_domain(&host->iova);
|
||||
iommu_detach_group(host->domain, host->group);
|
||||
}
|
||||
fail_free_domain:
|
||||
if (host->domain)
|
||||
iommu_domain_free(host->domain);
|
||||
put_cache:
|
||||
if (host->group)
|
||||
iova_cache_put();
|
||||
put_group:
|
||||
iommu_group_put(host->group);
|
||||
iommu_exit:
|
||||
host1x_iommu_exit(host);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev)
|
|||
struct host1x *host = platform_get_drvdata(pdev);
|
||||
|
||||
host1x_unregister(host);
|
||||
host1x_debug_deinit(host);
|
||||
host1x_intr_deinit(host);
|
||||
host1x_syncpt_deinit(host);
|
||||
reset_control_assert(host->rst);
|
||||
clk_disable_unprepare(host->clk);
|
||||
|
||||
if (host->domain) {
|
||||
put_iova_domain(&host->iova);
|
||||
iommu_detach_group(host->domain, host->group);
|
||||
iommu_domain_free(host->domain);
|
||||
iova_cache_put();
|
||||
iommu_group_put(host->group);
|
||||
}
|
||||
host1x_iommu_exit(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@ struct host1x_info {
|
|||
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
|
||||
unsigned int sync_offset; /* offset of syncpoint registers */
|
||||
u64 dma_mask; /* mask of addressable memory */
|
||||
bool has_wide_gather; /* supports GATHER_W opcode */
|
||||
bool has_hypervisor; /* has hypervisor registers */
|
||||
unsigned int num_sid_entries;
|
||||
const struct host1x_sid_entry *sid_table;
|
||||
|
@ -140,6 +141,8 @@ struct host1x {
|
|||
struct list_head devices;
|
||||
|
||||
struct list_head list;
|
||||
|
||||
struct device_dma_parameters dma_parms;
|
||||
};
|
||||
|
||||
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
|
||||
|
|
|
@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
|
|||
/* Add nr_completed to trace */
|
||||
trace_host1x_channel_submit_complete(dev_name(channel->dev),
|
||||
waiter->count, waiter->thresh);
|
||||
|
||||
}
|
||||
|
||||
static void action_wakeup(struct host1x_waitlist *waiter)
|
||||
|
|
|
@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather);
|
|||
|
||||
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||||
{
|
||||
struct host1x_client *client = job->client;
|
||||
struct device *dev = client->dev;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
|
@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||
|
||||
for (i = 0; i < job->num_relocs; i++) {
|
||||
struct host1x_reloc *reloc = &job->relocs[i];
|
||||
dma_addr_t phys_addr, *phys;
|
||||
struct sg_table *sgt;
|
||||
dma_addr_t phys_addr;
|
||||
|
||||
reloc->target.bo = host1x_bo_get(reloc->target.bo);
|
||||
if (!reloc->target.bo) {
|
||||
|
@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||
goto unpin;
|
||||
}
|
||||
|
||||
phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
|
||||
if (client->group)
|
||||
phys = &phys_addr;
|
||||
else
|
||||
phys = NULL;
|
||||
|
||||
sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
|
||||
if (IS_ERR(sgt)) {
|
||||
err = PTR_ERR(sgt);
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
if (sgt) {
|
||||
unsigned long mask = HOST1X_RELOC_READ |
|
||||
HOST1X_RELOC_WRITE;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
switch (reloc->flags & mask) {
|
||||
case HOST1X_RELOC_READ:
|
||||
dir = DMA_TO_DEVICE;
|
||||
break;
|
||||
|
||||
case HOST1X_RELOC_WRITE:
|
||||
dir = DMA_FROM_DEVICE;
|
||||
break;
|
||||
|
||||
case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
|
||||
if (!err) {
|
||||
err = -ENOMEM;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
job->unpins[job->num_unpins].dev = dev;
|
||||
job->unpins[job->num_unpins].dir = dir;
|
||||
phys_addr = sg_dma_address(sgt->sgl);
|
||||
}
|
||||
|
||||
job->addr_phys[job->num_unpins] = phys_addr;
|
||||
job->unpins[job->num_unpins].bo = reloc->target.bo;
|
||||
|
@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||
goto unpin;
|
||||
}
|
||||
|
||||
phys_addr = host1x_bo_pin(g->bo, &sgt);
|
||||
sgt = host1x_bo_pin(host->dev, g->bo, NULL);
|
||||
if (IS_ERR(sgt)) {
|
||||
err = PTR_ERR(sgt);
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, j)
|
||||
|
@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||
goto unpin;
|
||||
}
|
||||
|
||||
job->addr_phys[job->num_unpins] =
|
||||
iova_dma_addr(&host->iova, alloc);
|
||||
job->unpins[job->num_unpins].size = gather_size;
|
||||
phys_addr = iova_dma_addr(&host->iova, alloc);
|
||||
} else {
|
||||
job->addr_phys[job->num_unpins] = phys_addr;
|
||||
err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
|
||||
DMA_TO_DEVICE);
|
||||
if (!err) {
|
||||
err = -ENOMEM;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
job->unpins[job->num_unpins].dev = host->dev;
|
||||
phys_addr = sg_dma_address(sgt->sgl);
|
||||
}
|
||||
|
||||
job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
|
||||
job->addr_phys[job->num_unpins] = phys_addr;
|
||||
job->gather_addr_phys[i] = phys_addr;
|
||||
|
||||
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
|
||||
job->unpins[job->num_unpins].bo = g->bo;
|
||||
job->unpins[job->num_unpins].sgt = sgt;
|
||||
job->num_unpins++;
|
||||
|
@ -436,7 +494,8 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
|
|||
return err;
|
||||
}
|
||||
|
||||
static inline int copy_gathers(struct host1x_job *job, struct device *dev)
|
||||
static inline int copy_gathers(struct device *host, struct host1x_job *job,
|
||||
struct device *dev)
|
||||
{
|
||||
struct host1x_firewall fw;
|
||||
size_t size = 0;
|
||||
|
@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
|
|||
* Try a non-blocking allocation from a higher priority pools first,
|
||||
* as awaiting for the allocation here is a major performance hit.
|
||||
*/
|
||||
job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
|
||||
job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
|
||||
GFP_NOWAIT);
|
||||
|
||||
/* the higher priority allocation failed, try the generic-blocking */
|
||||
if (!job->gather_copy_mapped)
|
||||
job->gather_copy_mapped = dma_alloc_wc(dev, size,
|
||||
job->gather_copy_mapped = dma_alloc_wc(host, size,
|
||||
&job->gather_copy,
|
||||
GFP_KERNEL);
|
||||
if (!job->gather_copy_mapped)
|
||||
|
@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
|
|||
goto out;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
|
||||
err = copy_gathers(job, dev);
|
||||
err = copy_gathers(host->dev, job, dev);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job)
|
|||
|
||||
for (i = 0; i < job->num_unpins; i++) {
|
||||
struct host1x_job_unpin_data *unpin = &job->unpins[i];
|
||||
struct device *dev = unpin->dev ?: host->dev;
|
||||
struct sg_table *sgt = unpin->sgt;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
|
||||
unpin->size && host->domain) {
|
||||
|
@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job)
|
|||
iova_pfn(&host->iova, job->addr_phys[i]));
|
||||
}
|
||||
|
||||
host1x_bo_unpin(unpin->bo, unpin->sgt);
|
||||
if (unpin->dev && sgt)
|
||||
dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
|
||||
unpin->dir);
|
||||
|
||||
host1x_bo_unpin(dev, unpin->bo, sgt);
|
||||
host1x_bo_put(unpin->bo);
|
||||
}
|
||||
|
||||
job->num_unpins = 0;
|
||||
|
||||
if (job->gather_copy_size)
|
||||
dma_free_wc(job->channel->dev, job->gather_copy_size,
|
||||
dma_free_wc(host->dev, job->gather_copy_size,
|
||||
job->gather_copy_mapped, job->gather_copy);
|
||||
}
|
||||
EXPORT_SYMBOL(host1x_job_unpin);
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#ifndef __HOST1X_JOB_H
|
||||
#define __HOST1X_JOB_H
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
|
||||
struct host1x_job_gather {
|
||||
unsigned int words;
|
||||
dma_addr_t base;
|
||||
|
@ -19,7 +21,9 @@ struct host1x_job_gather {
|
|||
struct host1x_job_unpin_data {
|
||||
struct host1x_bo *bo;
|
||||
struct sg_table *sgt;
|
||||
struct device *dev;
|
||||
size_t size;
|
||||
enum dma_data_direction dir;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,6 +18,7 @@ enum host1x_class {
|
|||
};
|
||||
|
||||
struct host1x_client;
|
||||
struct iommu_group;
|
||||
|
||||
/**
|
||||
* struct host1x_client_ops - host1x client operations
|
||||
|
@ -34,6 +35,7 @@ struct host1x_client_ops {
|
|||
* @list: list node for the host1x client
|
||||
* @parent: pointer to struct device representing the host1x controller
|
||||
* @dev: pointer to struct device backing this host1x client
|
||||
* @group: IOMMU group that this client is a member of
|
||||
* @ops: host1x client operations
|
||||
* @class: host1x class represented by this client
|
||||
* @channel: host1x channel associated with this client
|
||||
|
@ -44,6 +46,7 @@ struct host1x_client {
|
|||
struct list_head list;
|
||||
struct device *parent;
|
||||
struct device *dev;
|
||||
struct iommu_group *group;
|
||||
|
||||
const struct host1x_client_ops *ops;
|
||||
|
||||
|
@ -64,8 +67,9 @@ struct sg_table;
|
|||
struct host1x_bo_ops {
|
||||
struct host1x_bo *(*get)(struct host1x_bo *bo);
|
||||
void (*put)(struct host1x_bo *bo);
|
||||
dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
|
||||
void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
|
||||
struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
|
||||
dma_addr_t *phys);
|
||||
void (*unpin)(struct device *dev, struct sg_table *sgt);
|
||||
void *(*mmap)(struct host1x_bo *bo);
|
||||
void (*munmap)(struct host1x_bo *bo, void *addr);
|
||||
void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
|
||||
|
@ -92,15 +96,17 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
|
|||
bo->ops->put(bo);
|
||||
}
|
||||
|
||||
static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
|
||||
struct sg_table **sgt)
|
||||
static inline struct sg_table *host1x_bo_pin(struct device *dev,
|
||||
struct host1x_bo *bo,
|
||||
dma_addr_t *phys)
|
||||
{
|
||||
return bo->ops->pin(bo, sgt);
|
||||
return bo->ops->pin(dev, bo, phys);
|
||||
}
|
||||
|
||||
static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
|
||||
static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
bo->ops->unpin(bo, sgt);
|
||||
bo->ops->unpin(dev, sgt);
|
||||
}
|
||||
|
||||
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
|
||||
|
@ -158,7 +164,7 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
|
|||
struct host1x_channel;
|
||||
struct host1x_job;
|
||||
|
||||
struct host1x_channel *host1x_channel_request(struct device *dev);
|
||||
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
|
||||
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
|
||||
void host1x_channel_put(struct host1x_channel *channel);
|
||||
int host1x_job_submit(struct host1x_job *job);
|
||||
|
@ -167,6 +173,9 @@ int host1x_job_submit(struct host1x_job *job);
|
|||
* host1x job
|
||||
*/
|
||||
|
||||
#define HOST1X_RELOC_READ (1 << 0)
|
||||
#define HOST1X_RELOC_WRITE (1 << 1)
|
||||
|
||||
struct host1x_reloc {
|
||||
struct {
|
||||
struct host1x_bo *bo;
|
||||
|
@ -177,6 +186,7 @@ struct host1x_reloc {
|
|||
unsigned long offset;
|
||||
} target;
|
||||
unsigned long shift;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
struct host1x_job {
|
||||
|
|
Loading…
Reference in New Issue
Block a user