forked from luck/tmp_suning_uos_patched
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
A couple of fixes from the previous pull request as well as gl3 support. There is one drm core change, an export of a previously private function. Take 2 implementing screen targets, this time with the fbdev code adjusted accordingly. Also there is an implementation of register-driven command buffers, that overrides the FIFO ring for command processing. It's needed for our upcoming hardware revision. * 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux: (35 commits) drm/vmwgfx: Fix copyright headers drm/vmwgfx: Add DX query support. Various fixes. drm/vmwgfx: Add command parser support for a couple of DX commands drm/vmwgfx: Command parser fixes for DX drm/vmwgfx: Initial DX support drm/vmwgfx: Update device includes for DX device functionality drm: export the DRM permission check code drm/vmwgfx: Fix crash when unloading vmwgfx v2 drm/vmwgfx: Fix framebuffer creation on older hardware drm/vmwgfx: Fixed topology boundary checking for Screen Targets drm/vmwgfx: Fix an uninitialized value drm/vmwgfx: Fix compiler warning with 32-bit dma_addr_t drm/vmwgfx: Kill a bunch of sparse warnings drm/vmwgfx: Fix kms preferred mode sorting drm/vmwgfx: Reinstate the legacy display system dirty callback drm/vmwgfx: Implement fbdev on kms v2 drm/vmwgfx: Add a kernel interface to create a framebuffer v2 drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING drm/vmwgfx: Convert screen targets to new helpers v3 drm/vmwgfx: Convert screen objects to the new helpers ...
This commit is contained in:
commit
294947a5c7
|
@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data,
|
|||
* indicated permissions. If so, returns zero. Otherwise returns an
|
||||
* error code suitable for ioctl return.
|
||||
*/
|
||||
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
||||
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
||||
{
|
||||
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
|
||||
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
|
||||
|
@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioctl_permit);
|
||||
|
||||
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
|
||||
[DRM_IOCTL_NR(ioctl)] = { \
|
||||
|
|
|
@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
|||
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
|
||||
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
|
||||
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
|
||||
vmwgfx_cmdbuf_res.o \
|
||||
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
|
||||
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
||||
|
|
3
drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
Normal file
3
drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
Normal file
|
@ -0,0 +1,3 @@
|
|||
/*
|
||||
* Intentionally empty file.
|
||||
*/
|
110
drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
Normal file
110
drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
Normal file
|
@ -0,0 +1,110 @@
|
|||
/**********************************************************
|
||||
* Copyright 2007-2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
/*
|
||||
* svga3d_caps.h --
|
||||
*
|
||||
* Definitions for SVGA3D hardware capabilities. Capabilities
|
||||
* are used to query for optional rendering features during
|
||||
* driver initialization. The capability data is stored as very
|
||||
* basic key/value dictionary within the "FIFO register" memory
|
||||
* area at the beginning of BAR2.
|
||||
*
|
||||
* Note that these definitions are only for 3D capabilities.
|
||||
* The SVGA device also has "device capabilities" and "FIFO
|
||||
* capabilities", which are non-3D-specific and are stored as
|
||||
* bitfields rather than key/value pairs.
|
||||
*/
|
||||
|
||||
#ifndef _SVGA3D_CAPS_H_
|
||||
#define _SVGA3D_CAPS_H_
|
||||
|
||||
#define INCLUDE_ALLOW_MODULE
|
||||
#define INCLUDE_ALLOW_USERLEVEL
|
||||
|
||||
#include "includeCheck.h"
|
||||
|
||||
#include "svga_reg.h"
|
||||
|
||||
#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \
|
||||
SVGA_FIFO_3D_CAPS + 1)
|
||||
|
||||
|
||||
/*
|
||||
* SVGA3dCapsRecordType
|
||||
*
|
||||
* Record types that can be found in the caps block.
|
||||
* Related record types are grouped together numerically so that
|
||||
* SVGA3dCaps_FindRecord() can be applied on a range of record
|
||||
* types.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
SVGA3DCAPS_RECORD_UNKNOWN = 0,
|
||||
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
|
||||
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
|
||||
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
|
||||
} SVGA3dCapsRecordType;
|
||||
|
||||
|
||||
/*
|
||||
* SVGA3dCapsRecordHeader
|
||||
*
|
||||
* Header field leading each caps block record. Contains the offset (in
|
||||
* register words, NOT bytes) to the next caps block record (or the end
|
||||
* of caps block records which will be a zero word) and the record type
|
||||
* as defined above.
|
||||
*/
|
||||
|
||||
typedef
|
||||
#include "vmware_pack_begin.h"
|
||||
struct SVGA3dCapsRecordHeader {
|
||||
uint32 length;
|
||||
SVGA3dCapsRecordType type;
|
||||
}
|
||||
#include "vmware_pack_end.h"
|
||||
SVGA3dCapsRecordHeader;
|
||||
|
||||
|
||||
/*
|
||||
* SVGA3dCapsRecord
|
||||
*
|
||||
* Caps block record; "data" is a placeholder for the actual data structure
|
||||
* contained within the record;
|
||||
*/
|
||||
|
||||
typedef
|
||||
#include "vmware_pack_begin.h"
|
||||
struct SVGA3dCapsRecord {
|
||||
SVGA3dCapsRecordHeader header;
|
||||
uint32 data[1];
|
||||
}
|
||||
#include "vmware_pack_end.h"
|
||||
SVGA3dCapsRecord;
|
||||
|
||||
|
||||
typedef uint32 SVGA3dCapPair[2];
|
||||
|
||||
#endif
|
2071
drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
Normal file
2071
drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
Normal file
File diff suppressed because it is too large
Load Diff
457
drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
Normal file
457
drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
Normal file
|
@ -0,0 +1,457 @@
|
|||
/**********************************************************
|
||||
* Copyright 1998-2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
/*
|
||||
* svga3d_devcaps.h --
|
||||
*
|
||||
* SVGA 3d caps definitions
|
||||
*/
|
||||
|
||||
#ifndef _SVGA3D_DEVCAPS_H_
|
||||
#define _SVGA3D_DEVCAPS_H_
|
||||
|
||||
#define INCLUDE_ALLOW_MODULE
|
||||
#define INCLUDE_ALLOW_USERLEVEL
|
||||
#define INCLUDE_ALLOW_VMCORE
|
||||
|
||||
#include "includeCheck.h"
|
||||
|
||||
/*
|
||||
* 3D Hardware Version
|
||||
*
|
||||
* The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
|
||||
* register. Is set by the host and read by the guest. This lets
|
||||
* us make new guest drivers which are backwards-compatible with old
|
||||
* SVGA hardware revisions. It does not let us support old guest
|
||||
* drivers. Good enough for now.
|
||||
*
|
||||
*/
|
||||
|
||||
#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
|
||||
#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
|
||||
#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
|
||||
|
||||
typedef enum {
|
||||
SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
|
||||
SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
|
||||
SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
|
||||
SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
|
||||
SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
|
||||
SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
|
||||
SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
|
||||
SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
|
||||
} SVGA3dHardwareVersion;
|
||||
|
||||
/*
|
||||
* DevCap indexes.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
SVGA3D_DEVCAP_INVALID = ((uint32)-1),
|
||||
SVGA3D_DEVCAP_3D = 0,
|
||||
SVGA3D_DEVCAP_MAX_LIGHTS = 1,
|
||||
|
||||
/*
|
||||
* SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
|
||||
* fixed-function texture units available. Each of these units
|
||||
* work in both FFP and Shader modes, and they support texture
|
||||
* transforms and texture coordinates. The host may have additional
|
||||
* texture image units that are only usable with shaders.
|
||||
*/
|
||||
SVGA3D_DEVCAP_MAX_TEXTURES = 2,
|
||||
SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
|
||||
SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
|
||||
SVGA3D_DEVCAP_VERTEX_SHADER = 5,
|
||||
SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
|
||||
SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
|
||||
SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
|
||||
SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
|
||||
SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
|
||||
SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
|
||||
SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
|
||||
SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
|
||||
SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
|
||||
SVGA3D_DEVCAP_QUERY_TYPES = 15,
|
||||
SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
|
||||
SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
|
||||
SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
|
||||
SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
|
||||
SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
|
||||
SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
|
||||
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
|
||||
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
|
||||
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
|
||||
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
|
||||
SVGA3D_DEVCAP_TEXTURE_OPS = 31,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
|
||||
|
||||
/*
|
||||
* There is a hole in our devcap definitions for
|
||||
* historical reasons.
|
||||
*
|
||||
* Define a constant just for completeness.
|
||||
*/
|
||||
SVGA3D_DEVCAP_MISSING62 = 62,
|
||||
|
||||
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
|
||||
|
||||
/*
|
||||
* Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
|
||||
* render targets. This does not include the depth or stencil targets.
|
||||
*/
|
||||
SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
|
||||
|
||||
SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
|
||||
SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
|
||||
SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
|
||||
SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
|
||||
SVGA3D_DEVCAP_SUPERSAMPLE = 73,
|
||||
SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
|
||||
|
||||
/*
|
||||
* This is the maximum number of SVGA context IDs that the guest
|
||||
* can define using SVGA_3D_CMD_CONTEXT_DEFINE.
|
||||
*/
|
||||
SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
|
||||
|
||||
/*
|
||||
* This is the maximum number of SVGA surface IDs that the guest
|
||||
* can define using SVGA_3D_CMD_SURFACE_DEFINE*.
|
||||
*/
|
||||
SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
|
||||
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
|
||||
|
||||
SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
|
||||
SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
|
||||
|
||||
/*
|
||||
* Deprecated.
|
||||
*/
|
||||
SVGA3D_DEVCAP_DEAD1 = 84,
|
||||
|
||||
/*
|
||||
* This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
|
||||
* ored together, one for every type of video decoding supported.
|
||||
*/
|
||||
SVGA3D_DEVCAP_VIDEO_DECODE = 85,
|
||||
|
||||
/*
|
||||
* This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
|
||||
* ored together, one for every type of video processing supported.
|
||||
*/
|
||||
SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
|
||||
|
||||
SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
|
||||
SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
|
||||
SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
|
||||
SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
|
||||
|
||||
SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
|
||||
|
||||
/*
|
||||
* Does the host support the SVGA logic ops commands?
|
||||
*/
|
||||
SVGA3D_DEVCAP_LOGICOPS = 92,
|
||||
|
||||
/*
|
||||
* Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
|
||||
*/
|
||||
SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
|
||||
|
||||
/*
|
||||
* Deprecated.
|
||||
*/
|
||||
SVGA3D_DEVCAP_DEAD2 = 94,
|
||||
|
||||
/*
|
||||
* Does the device support the DX commands?
|
||||
*/
|
||||
SVGA3D_DEVCAP_DX = 95,
|
||||
|
||||
/*
|
||||
* What is the maximum size of a texture array?
|
||||
*
|
||||
* (Even if this cap is zero, cubemaps are still allowed.)
|
||||
*/
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
|
||||
|
||||
/*
|
||||
* What is the maximum number of vertex buffers that can
|
||||
* be used in the DXContext inputAssembly?
|
||||
*/
|
||||
SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
|
||||
|
||||
/*
|
||||
* What is the maximum number of constant buffers
|
||||
* that can be expected to work correctly with a
|
||||
* DX context?
|
||||
*/
|
||||
SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
|
||||
|
||||
/*
|
||||
* Does the device support provoking vertex control?
|
||||
* If zero, the first vertex will always be the provoking vertex.
|
||||
*/
|
||||
SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
|
||||
|
||||
SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
|
||||
SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
|
||||
SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
|
||||
SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
|
||||
SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
|
||||
SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
|
||||
SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
|
||||
SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
|
||||
SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
|
||||
SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
|
||||
SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
|
||||
SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
|
||||
SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
|
||||
SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
|
||||
SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
|
||||
SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
|
||||
SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
|
||||
SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
|
||||
SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
|
||||
SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
|
||||
SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
|
||||
SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
|
||||
SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
|
||||
SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
|
||||
SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
|
||||
SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
|
||||
SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
|
||||
SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
|
||||
SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
|
||||
SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
|
||||
SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
|
||||
SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
|
||||
SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
|
||||
SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
|
||||
SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
|
||||
SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
|
||||
SVGA3D_DEVCAP_DXFMT_UYVY = 141,
|
||||
SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
|
||||
SVGA3D_DEVCAP_DXFMT_NV12 = 143,
|
||||
SVGA3D_DEVCAP_DXFMT_AYUV = 144,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
|
||||
SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
|
||||
SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
|
||||
SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
|
||||
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
|
||||
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
|
||||
SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
|
||||
SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
|
||||
SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
|
||||
SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
|
||||
SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
|
||||
SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
|
||||
SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
|
||||
SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
|
||||
SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
|
||||
SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
|
||||
SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
|
||||
SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
|
||||
SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
|
||||
SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
|
||||
SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
|
||||
SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
|
||||
SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
|
||||
SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
|
||||
SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
|
||||
SVGA3D_DEVCAP_DXFMT_P8 = 196,
|
||||
SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
|
||||
SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
|
||||
SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
|
||||
SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
|
||||
SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
|
||||
SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
|
||||
SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
|
||||
SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
|
||||
SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
|
||||
SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
|
||||
SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
|
||||
SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
|
||||
SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
|
||||
SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
|
||||
SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
|
||||
SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
|
||||
SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
|
||||
SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
|
||||
SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
|
||||
SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
|
||||
SVGA3D_DEVCAP_DXFMT_YV12 = 220,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
|
||||
SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
|
||||
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
|
||||
SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
|
||||
SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
|
||||
SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
|
||||
SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
|
||||
SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
|
||||
SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
|
||||
SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
|
||||
SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
|
||||
SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
|
||||
SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
|
||||
SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
|
||||
SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
|
||||
SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
|
||||
SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
|
||||
SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
|
||||
|
||||
SVGA3D_DEVCAP_MAX /* This must be the last index. */
|
||||
} SVGA3dDevCapIndex;
|
||||
|
||||
/*
|
||||
* Bit definitions for DXFMT devcaps
|
||||
*
|
||||
*
|
||||
* SUPPORTED: Can the format be defined?
|
||||
* SHADER_SAMPLE: Can the format be sampled from a shader?
|
||||
* COLOR_RENDERTARGET: Can the format be a color render target?
|
||||
* DEPTH_RENDERTARGET: Can the format be a depth render target?
|
||||
* BLENDABLE: Is the format blendable?
|
||||
* MIPS: Does the format support mip levels?
|
||||
* ARRAY: Does the format support texture arrays?
|
||||
* VOLUME: Does the format support having volume?
|
||||
* MULTISAMPLE_2: Does the format support 2x multisample?
|
||||
* MULTISAMPLE_4: Does the format support 4x multisample?
|
||||
* MULTISAMPLE_8: Does the format support 8x multisample?
|
||||
*/
|
||||
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
|
||||
#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
|
||||
#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2)
|
||||
#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3)
|
||||
#define SVGA3D_DXFMT_BLENDABLE (1 << 4)
|
||||
#define SVGA3D_DXFMT_MIPS (1 << 5)
|
||||
#define SVGA3D_DXFMT_ARRAY (1 << 6)
|
||||
#define SVGA3D_DXFMT_VOLUME (1 << 7)
|
||||
#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
|
||||
#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
|
||||
#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
|
||||
#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
|
||||
#define SVGADX_DXFMT_MAX (1 << 12)
|
||||
|
||||
/*
|
||||
* Convenience mask for any multisample capability.
|
||||
*
|
||||
* The multisample bits imply both load and render capability.
|
||||
*/
|
||||
#define SVGA3D_DXFMT_MULTISAMPLE ( \
|
||||
SVGADX_DXFMT_MULTISAMPLE_2 | \
|
||||
SVGADX_DXFMT_MULTISAMPLE_4 | \
|
||||
SVGADX_DXFMT_MULTISAMPLE_8 )
|
||||
|
||||
typedef union {
|
||||
Bool b;
|
||||
uint32 u;
|
||||
int32 i;
|
||||
float f;
|
||||
} SVGA3dDevCapResult;
|
||||
|
||||
#endif /* _SVGA3D_DEVCAPS_H_ */
|
1487
drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
Normal file
1487
drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
Normal file
File diff suppressed because it is too large
Load Diff
99
drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
Normal file
99
drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
Normal file
|
@ -0,0 +1,99 @@
|
|||
/**********************************************************
|
||||
* Copyright 2007-2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
/*
|
||||
* svga3d_limits.h --
|
||||
*
|
||||
* SVGA 3d hardware limits
|
||||
*/
|
||||
|
||||
#ifndef _SVGA3D_LIMITS_H_
|
||||
#define _SVGA3D_LIMITS_H_
|
||||
|
||||
#define INCLUDE_ALLOW_MODULE
|
||||
#define INCLUDE_ALLOW_USERLEVEL
|
||||
#define INCLUDE_ALLOW_VMCORE
|
||||
|
||||
#include "includeCheck.h"
|
||||
|
||||
#define SVGA3D_NUM_CLIPPLANES 6
|
||||
#define SVGA3D_MAX_RENDER_TARGETS 8
|
||||
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
|
||||
#define SVGA3D_MAX_UAVIEWS 8
|
||||
#define SVGA3D_MAX_CONTEXT_IDS 256
|
||||
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
|
||||
|
||||
/*
|
||||
* Maximum ID a shader can be assigned on a given context.
|
||||
*/
|
||||
#define SVGA3D_MAX_SHADERIDS 5000
|
||||
/*
|
||||
* Maximum number of shaders of a given type that can be defined
|
||||
* (including all contexts).
|
||||
*/
|
||||
#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000
|
||||
|
||||
#define SVGA3D_NUM_TEXTURE_UNITS 32
|
||||
#define SVGA3D_NUM_LIGHTS 8
|
||||
|
||||
/*
|
||||
* Maximum size in dwords of shader text the SVGA device will allow.
|
||||
* Currently 8 MB.
|
||||
*/
|
||||
#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
|
||||
|
||||
#define SVGA3D_MAX_CLIP_PLANES 6
|
||||
|
||||
/*
|
||||
* This is the limit to the number of fixed-function texture
|
||||
* transforms and texture coordinates we can support. It does *not*
|
||||
* correspond to the number of texture image units (samplers) we
|
||||
* support!
|
||||
*/
|
||||
#define SVGA3D_MAX_TEXTURE_COORDS 8
|
||||
|
||||
/*
|
||||
* Number of faces in a cubemap.
|
||||
*/
|
||||
#define SVGA3D_MAX_SURFACE_FACES 6
|
||||
|
||||
/*
|
||||
* Maximum number of array indexes in a GB surface (with DX enabled).
|
||||
*/
|
||||
#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
|
||||
|
||||
/*
|
||||
* The maximum number of vertex arrays we're guaranteed to support in
|
||||
* SVGA_3D_CMD_DRAWPRIMITIVES.
|
||||
*/
|
||||
#define SVGA3D_MAX_VERTEX_ARRAYS 32
|
||||
|
||||
/*
|
||||
* The maximum number of primitive ranges we're guaranteed to support
|
||||
* in SVGA_3D_CMD_DRAWPRIMITIVES.
|
||||
*/
|
||||
#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
|
||||
|
||||
#endif /* _SVGA3D_LIMITS_H_ */
|
50
drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
Normal file
50
drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
/**********************************************************
|
||||
* Copyright 1998-2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
/*
|
||||
* svga3d_reg.h --
|
||||
*
|
||||
* SVGA 3d hardware definitions
|
||||
*/
|
||||
|
||||
#ifndef _SVGA3D_REG_H_
|
||||
#define _SVGA3D_REG_H_
|
||||
|
||||
#define INCLUDE_ALLOW_MODULE
|
||||
#define INCLUDE_ALLOW_USERLEVEL
|
||||
#define INCLUDE_ALLOW_VMCORE
|
||||
|
||||
#include "includeCheck.h"
|
||||
|
||||
#include "svga_reg.h"
|
||||
|
||||
#include "svga3d_types.h"
|
||||
#include "svga3d_limits.h"
|
||||
#include "svga3d_cmd.h"
|
||||
#include "svga3d_dx.h"
|
||||
#include "svga3d_devcaps.h"
|
||||
|
||||
|
||||
#endif /* _SVGA3D_REG_H_ */
|
1204
drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
Normal file
1204
drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
Normal file
File diff suppressed because it is too large
Load Diff
1633
drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
Normal file
1633
drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/**********************************************************
|
||||
* Copyright 2007-2009 VMware, Inc. All rights reserved.
|
||||
* Copyright 2007-2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
|
@ -1,5 +1,5 @@
|
|||
/**********************************************************
|
||||
* Copyright 2007-2009 VMware, Inc. All rights reserved.
|
||||
* Copyright 2007-2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
|
@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
|
|||
switch (format) {
|
||||
case VMWARE_FOURCC_YV12:
|
||||
*height = (*height + 1) & ~1;
|
||||
*size = (*width + 3) & ~3;
|
||||
*size = (*width) * (*height);
|
||||
|
||||
if (pitches) {
|
||||
pitches[0] = *size;
|
||||
pitches[0] = *width;
|
||||
}
|
||||
|
||||
*size *= *height;
|
||||
|
||||
if (offsets) {
|
||||
offsets[1] = *size;
|
||||
}
|
||||
|
||||
tmp = ((*width >> 1) + 3) & ~3;
|
||||
tmp = *width >> 1;
|
||||
|
||||
if (pitches) {
|
||||
pitches[1] = pitches[2] = tmp;
|
File diff suppressed because it is too large
Load Diff
46
drivers/gpu/drm/vmwgfx/device_include/svga_types.h
Normal file
46
drivers/gpu/drm/vmwgfx/device_include/svga_types.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
/**********************************************************
|
||||
* Copyright 2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
#ifndef _VM_BASIC_TYPES_H_
|
||||
#define _VM_BASIC_TYPES_H_
|
||||
#include <linux/kernel.h>
|
||||
|
||||
typedef u32 uint32;
|
||||
typedef s32 int32;
|
||||
typedef u64 uint64;
|
||||
typedef u16 uint16;
|
||||
typedef s16 int16;
|
||||
typedef u8 uint8;
|
||||
typedef s8 int8;
|
||||
|
||||
typedef uint64 PA;
|
||||
typedef uint32 PPN;
|
||||
typedef uint64 PPN64;
|
||||
|
||||
typedef bool Bool;
|
||||
|
||||
#define MAX_UINT32 U32_MAX
|
||||
#define MAX_UINT16 U16_MAX
|
||||
|
||||
#endif
|
21
drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
Normal file
21
drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
Normal file
|
@ -0,0 +1,21 @@
|
|||
#ifndef _VM_BASIC_TYPES_H_
|
||||
#define _VM_BASIC_TYPES_H_
|
||||
#include <linux/kernel.h>
|
||||
|
||||
typedef u32 uint32;
|
||||
typedef s32 int32;
|
||||
typedef u64 uint64;
|
||||
typedef u16 uint16;
|
||||
typedef s16 int16;
|
||||
typedef u8 uint8;
|
||||
typedef s8 int8;
|
||||
|
||||
typedef uint64 PA;
|
||||
typedef uint32 PPN;
|
||||
typedef uint64 PPN64;
|
||||
|
||||
typedef bool Bool;
|
||||
|
||||
#define MAX_UINT32 U32_MAX
|
||||
|
||||
#endif
|
25
drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
Normal file
25
drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/**********************************************************
|
||||
* Copyright 2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
#include <linux/compiler.h>
|
25
drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
Normal file
25
drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/**********************************************************
|
||||
* Copyright 2015 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
__packed
|
File diff suppressed because it is too large
Load Diff
|
@ -1,912 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#define surf_size_struct struct drm_vmw_size
|
||||
|
||||
#else /* __KERNEL__ */
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
|
||||
#endif /* ARRAY_SIZE */
|
||||
|
||||
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
|
||||
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
|
||||
#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
|
||||
#define surf_size_struct SVGA3dSize
|
||||
#define u32 uint32
|
||||
#define u64 uint64_t
|
||||
#define U32_MAX ((u32)~0U)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
|
||||
/*
|
||||
* enum svga3d_block_desc describes the active data channels in a block.
|
||||
*
|
||||
* There can be at-most four active channels in a block:
|
||||
* 1. Red, bump W, luminance and depth are stored in the first channel.
|
||||
* 2. Green, bump V and stencil are stored in the second channel.
|
||||
* 3. Blue and bump U are stored in the third channel.
|
||||
* 4. Alpha and bump Q are stored in the fourth channel.
|
||||
*
|
||||
* Block channels can be used to store compressed and buffer data:
|
||||
* 1. For compressed formats, only the data channel is used and its size
|
||||
* is equal to that of a singular block in the compression scheme.
|
||||
* 2. For buffer formats, only the data channel is used and its size is
|
||||
* exactly one byte in length.
|
||||
* 3. In each case the bit depth represent the size of a singular block.
|
||||
*
|
||||
* Note: Compressed and IEEE formats do not use the bitMask structure.
|
||||
*/
|
||||
|
||||
enum svga3d_block_desc {
|
||||
SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
|
||||
SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
|
||||
U and V */
|
||||
SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
|
||||
channel */
|
||||
SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
|
||||
data */
|
||||
SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
|
||||
SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
|
||||
channel */
|
||||
SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
|
||||
data */
|
||||
SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
|
||||
data depending on the
|
||||
compression method used */
|
||||
SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
|
||||
floating point
|
||||
representation in
|
||||
all channels */
|
||||
SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
|
||||
data. */
|
||||
SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
|
||||
SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
|
||||
SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
|
||||
SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
|
||||
SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
|
||||
e.g., NV12. */
|
||||
SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
|
||||
Y, U, V, e.g., YV12. */
|
||||
|
||||
SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
|
||||
SVGA3DBLOCKDESC_GREEN,
|
||||
SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
|
||||
SVGA3DBLOCKDESC_BLUE,
|
||||
SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
|
||||
SVGA3DBLOCKDESC_V,
|
||||
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
|
||||
SVGA3DBLOCKDESC_LUMINANCE,
|
||||
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
|
||||
SVGA3DBLOCKDESC_W,
|
||||
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
|
||||
SVGA3DBLOCKDESC_V |
|
||||
SVGA3DBLOCKDESC_W |
|
||||
SVGA3DBLOCKDESC_Q,
|
||||
SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
|
||||
SVGA3DBLOCKDESC_IEEE_FP,
|
||||
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
|
||||
SVGA3DBLOCKDESC_GREEN,
|
||||
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
|
||||
SVGA3DBLOCKDESC_BLUE,
|
||||
SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
|
||||
SVGA3DBLOCKDESC_STENCIL,
|
||||
SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
|
||||
SVGA3DBLOCKDESC_Y,
|
||||
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
|
||||
SVGA3DBLOCKDESC_Y |
|
||||
SVGA3DBLOCKDESC_U_VIDEO |
|
||||
SVGA3DBLOCKDESC_V_VIDEO,
|
||||
SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_EXP,
|
||||
SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
|
||||
SVGA3DBLOCKDESC_2PLANAR_YUV,
|
||||
SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
|
||||
SVGA3DBLOCKDESC_3PLANAR_YUV,
|
||||
};
|
||||
|
||||
/*
|
||||
* SVGA3dSurfaceDesc describes the actual pixel data.
|
||||
*
|
||||
* This structure provides the following information:
|
||||
* 1. Block description.
|
||||
* 2. Dimensions of a block in the surface.
|
||||
* 3. Size of block in bytes.
|
||||
* 4. Bit depth of the pixel data.
|
||||
* 5. Channel bit depths and masks (if applicable).
|
||||
*/
|
||||
#define SVGA3D_CHANNEL_DEF(type) \
|
||||
struct { \
|
||||
union { \
|
||||
type blue; \
|
||||
type u; \
|
||||
type uv_video; \
|
||||
type u_video; \
|
||||
}; \
|
||||
union { \
|
||||
type green; \
|
||||
type v; \
|
||||
type stencil; \
|
||||
type v_video; \
|
||||
}; \
|
||||
union { \
|
||||
type red; \
|
||||
type w; \
|
||||
type luminance; \
|
||||
type y; \
|
||||
type depth; \
|
||||
type data; \
|
||||
}; \
|
||||
union { \
|
||||
type alpha; \
|
||||
type q; \
|
||||
type exp; \
|
||||
}; \
|
||||
}
|
||||
|
||||
struct svga3d_surface_desc {
|
||||
enum svga3d_block_desc block_desc;
|
||||
surf_size_struct block_size;
|
||||
u32 bytes_per_block;
|
||||
u32 pitch_bytes_per_block;
|
||||
|
||||
struct {
|
||||
u32 total;
|
||||
SVGA3D_CHANNEL_DEF(uint8);
|
||||
} bit_depth;
|
||||
|
||||
struct {
|
||||
SVGA3D_CHANNEL_DEF(uint8);
|
||||
} bit_offset;
|
||||
};
|
||||
|
||||
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
|
||||
{SVGA3DBLOCKDESC_NONE,
|
||||
{1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
|
||||
{{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
|
||||
{{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
|
||||
{{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
|
||||
{{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
|
||||
{{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LUMINANCE,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LA,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
|
||||
{{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LUMINANCE,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LA,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
|
||||
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
|
||||
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
|
||||
{{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_FP,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
|
||||
|
||||
{SVGA3DBLOCKDESC_ALPHA,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_BUFFER,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
|
||||
{{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_YUV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
|
||||
|
||||
{SVGA3DBLOCKDESC_YUV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
|
||||
|
||||
{SVGA3DBLOCKDESC_NV12,
|
||||
{2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
|
||||
|
||||
{SVGA3DBLOCKDESC_AYUV,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_FP,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVW,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_GREEN,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
|
||||
{{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_SRGB,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_GREEN,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBE,
|
||||
{1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
|
||||
{{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_SRGB,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_SRGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
|
||||
};
|
||||
|
||||
static inline u32 clamped_umul32(u32 a, u32 b)
|
||||
{
|
||||
u64 tmp = (u64) a*b;
|
||||
return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
|
||||
}
|
||||
|
||||
static inline const struct svga3d_surface_desc *
|
||||
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
|
||||
{
|
||||
if (format < ARRAY_SIZE(svga3d_surface_descs))
|
||||
return &svga3d_surface_descs[format];
|
||||
|
||||
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
|
||||
}
|
||||
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
*
|
||||
* svga3dsurface_get_mip_size --
|
||||
*
|
||||
* Given a base level size and the mip level, compute the size of
|
||||
* the mip level.
|
||||
*
|
||||
* Results:
|
||||
* See above.
|
||||
*
|
||||
* Side effects:
|
||||
* None.
|
||||
*
|
||||
*----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline surf_size_struct
|
||||
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
|
||||
{
|
||||
surf_size_struct size;
|
||||
|
||||
size.width = max_t(u32, base_level.width >> mip_level, 1);
|
||||
size.height = max_t(u32, base_level.height >> mip_level, 1);
|
||||
size.depth = max_t(u32, base_level.depth >> mip_level, 1);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *pixel_size,
|
||||
surf_size_struct *block_size)
|
||||
{
|
||||
block_size->width = DIV_ROUND_UP(pixel_size->width,
|
||||
desc->block_size.width);
|
||||
block_size->height = DIV_ROUND_UP(pixel_size->height,
|
||||
desc->block_size.height);
|
||||
block_size->depth = DIV_ROUND_UP(pixel_size->depth,
|
||||
desc->block_size.depth);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
|
||||
{
|
||||
return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *size)
|
||||
{
|
||||
u32 pitch;
|
||||
surf_size_struct blocks;
|
||||
|
||||
svga3dsurface_get_size_in_blocks(desc, size, &blocks);
|
||||
|
||||
pitch = blocks.width * desc->pitch_bytes_per_block;
|
||||
|
||||
return pitch;
|
||||
}
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------
|
||||
*
|
||||
* svga3dsurface_get_image_buffer_size --
|
||||
*
|
||||
* Return the number of bytes of buffer space required to store
|
||||
* one image of a surface, optionally using the specified pitch.
|
||||
*
|
||||
* If pitch is zero, it is assumed that rows are tightly packed.
|
||||
*
|
||||
* This function is overflow-safe. If the result would have
|
||||
* overflowed, instead we return MAX_UINT32.
|
||||
*
|
||||
* Results:
|
||||
* Byte count.
|
||||
*
|
||||
* Side effects:
|
||||
* None.
|
||||
*
|
||||
*-----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *size,
|
||||
u32 pitch)
|
||||
{
|
||||
surf_size_struct image_blocks;
|
||||
u32 slice_size, total_size;
|
||||
|
||||
svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
|
||||
|
||||
if (svga3dsurface_is_planar_surface(desc)) {
|
||||
total_size = clamped_umul32(image_blocks.width,
|
||||
image_blocks.height);
|
||||
total_size = clamped_umul32(total_size, image_blocks.depth);
|
||||
total_size = clamped_umul32(total_size, desc->bytes_per_block);
|
||||
return total_size;
|
||||
}
|
||||
|
||||
if (pitch == 0)
|
||||
pitch = svga3dsurface_calculate_pitch(desc, size);
|
||||
|
||||
slice_size = clamped_umul32(image_blocks.height, pitch);
|
||||
total_size = clamped_umul32(slice_size, image_blocks.depth);
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
|
||||
surf_size_struct base_level_size,
|
||||
u32 num_mip_levels,
|
||||
bool cubemap)
|
||||
{
|
||||
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
|
||||
u64 total_size = 0;
|
||||
u32 mip;
|
||||
|
||||
for (mip = 0; mip < num_mip_levels; mip++) {
|
||||
surf_size_struct size =
|
||||
svga3dsurface_get_mip_size(base_level_size, mip);
|
||||
total_size += svga3dsurface_get_image_buffer_size(desc,
|
||||
&size, 0);
|
||||
}
|
||||
|
||||
if (cubemap)
|
||||
total_size *= SVGA3D_MAX_SURFACE_FACES;
|
||||
|
||||
return (u32) min_t(u64, total_size, (u64) U32_MAX);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
|
||||
* in an image (or volume).
|
||||
*
|
||||
* @width: The image width in pixels.
|
||||
* @height: The image height in pixels
|
||||
*/
|
||||
static inline u32
|
||||
svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
|
||||
u32 width, u32 height,
|
||||
u32 x, u32 y, u32 z)
|
||||
{
|
||||
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
|
||||
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
|
||||
const u32 bd = desc->block_size.depth;
|
||||
const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
|
||||
const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
|
||||
const u32 offset = (z / bd * imgstride +
|
||||
y / bh * rowstride +
|
||||
x / bw * desc->bytes_per_block);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
|
||||
surf_size_struct baseLevelSize,
|
||||
u32 numMipLevels,
|
||||
u32 face,
|
||||
u32 mip)
|
||||
|
||||
{
|
||||
u32 offset;
|
||||
u32 mipChainBytes;
|
||||
u32 mipChainBytesToLevel;
|
||||
u32 i;
|
||||
const struct svga3d_surface_desc *desc;
|
||||
surf_size_struct mipSize;
|
||||
u32 bytes;
|
||||
|
||||
desc = svga3dsurface_get_desc(format);
|
||||
|
||||
mipChainBytes = 0;
|
||||
mipChainBytesToLevel = 0;
|
||||
for (i = 0; i < numMipLevels; i++) {
|
||||
mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
|
||||
bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
|
||||
mipChainBytes += bytes;
|
||||
if (i < mip)
|
||||
mipChainBytesToLevel += bytes;
|
||||
}
|
||||
|
||||
offset = mipChainBytes * face + mipChainBytesToLevel;
|
||||
|
||||
return offset;
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* Silly typedefs for the svga headers. Currently the headers are shared
|
||||
* between all components that talk to svga. And as such the headers are
|
||||
* are in a completely different style and use weird defines.
|
||||
*
|
||||
* This file lets all the ugly be prefixed with svga*.
|
||||
*/
|
||||
|
||||
#ifndef _SVGA_TYPES_H_
|
||||
#define _SVGA_TYPES_H_
|
||||
|
||||
typedef uint16_t uint16;
|
||||
typedef uint32_t uint32;
|
||||
typedef uint8_t uint8;
|
||||
typedef int32_t int32;
|
||||
typedef bool Bool;
|
||||
|
||||
#endif
|
1294
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
Normal file
1294
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
Normal file
File diff suppressed because it is too large
Load Diff
209
drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
Normal file
209
drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
Normal file
|
@ -0,0 +1,209 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
#ifndef _VMWGFX_BINDING_H_
|
||||
#define _VMWGFX_BINDING_H_
|
||||
|
||||
#include "device_include/svga3d_reg.h"
|
||||
#include <linux/list.h>
|
||||
|
||||
#define VMW_MAX_VIEW_BINDINGS 128
|
||||
|
||||
struct vmw_private;
|
||||
struct vmw_ctx_binding_state;
|
||||
|
||||
/*
|
||||
* enum vmw_ctx_binding_type - abstract resource to context binding types
|
||||
*/
|
||||
enum vmw_ctx_binding_type {
|
||||
vmw_ctx_binding_shader,
|
||||
vmw_ctx_binding_rt,
|
||||
vmw_ctx_binding_tex,
|
||||
vmw_ctx_binding_cb,
|
||||
vmw_ctx_binding_dx_shader,
|
||||
vmw_ctx_binding_dx_rt,
|
||||
vmw_ctx_binding_sr,
|
||||
vmw_ctx_binding_ds,
|
||||
vmw_ctx_binding_so,
|
||||
vmw_ctx_binding_vb,
|
||||
vmw_ctx_binding_ib,
|
||||
vmw_ctx_binding_max
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo - single binding metadata
|
||||
*
|
||||
* @ctx_list: List head for the context's list of bindings.
|
||||
* @res_list: List head for a resource's list of bindings.
|
||||
* @ctx: Non-refcounted pointer to the context that owns the binding. NULL
|
||||
* indicates no binding present.
|
||||
* @res: Non-refcounted pointer to the resource the binding points to. This
|
||||
* is typically a surface or a view.
|
||||
* @bt: Binding type.
|
||||
* @scrubbed: Whether the binding has been scrubbed from the context.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo {
|
||||
struct list_head ctx_list;
|
||||
struct list_head res_list;
|
||||
struct vmw_resource *ctx;
|
||||
struct vmw_resource *res;
|
||||
enum vmw_ctx_binding_type bt;
|
||||
bool scrubbed;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_tex - texture stage binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @texture_stage: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_tex {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
uint32 texture_stage;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_shader - Shader binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @shader_slot: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_shader {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
SVGA3dShaderType shader_slot;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @shader_slot: Device data used to reconstruct binding command.
|
||||
* @offset: Device data used to reconstruct binding command.
|
||||
* @size: Device data used to reconstruct binding command.
|
||||
* @slot: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_cb {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
SVGA3dShaderType shader_slot;
|
||||
uint32 offset;
|
||||
uint32 size;
|
||||
uint32 slot;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_view - View binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @shader_slot: Device data used to reconstruct binding command.
|
||||
* @slot: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_view {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
SVGA3dShaderType shader_slot;
|
||||
uint32 slot;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @offset: Device data used to reconstruct binding command.
|
||||
* @size: Device data used to reconstruct binding command.
|
||||
* @slot: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_so {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
uint32 offset;
|
||||
uint32 size;
|
||||
uint32 slot;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @offset: Device data used to reconstruct binding command.
|
||||
* @stride: Device data used to reconstruct binding command.
|
||||
* @slot: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_vb {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
uint32 offset;
|
||||
uint32 stride;
|
||||
uint32 slot;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
|
||||
*
|
||||
* @bi: struct vmw_ctx_bindinfo we derive from.
|
||||
* @offset: Device data used to reconstruct binding command.
|
||||
* @format: Device data used to reconstruct binding command.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo_ib {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
uint32 offset;
|
||||
uint32 format;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_dx_shader_bindings - per shader type context binding state
|
||||
*
|
||||
* @shader: The shader binding for this shader type
|
||||
* @const_buffer: Const buffer bindings for this shader type.
|
||||
* @shader_res: Shader resource view bindings for this shader type.
|
||||
* @dirty_sr: Bitmap tracking individual shader resource bindings changes
|
||||
* that have not yet been emitted to the device.
|
||||
* @dirty: Bitmap tracking per-binding type binding changes that have not
|
||||
* yet been emitted to the device.
|
||||
*/
|
||||
struct vmw_dx_shader_bindings {
|
||||
struct vmw_ctx_bindinfo_shader shader;
|
||||
struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
|
||||
struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
|
||||
DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
|
||||
unsigned long dirty;
|
||||
};
|
||||
|
||||
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
|
||||
const struct vmw_ctx_bindinfo *ci,
|
||||
u32 shader_slot, u32 slot);
|
||||
extern void
|
||||
vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
|
||||
struct vmw_ctx_binding_state *from);
|
||||
extern void vmw_binding_res_list_kill(struct list_head *head);
|
||||
extern void vmw_binding_res_list_scrub(struct list_head *head);
|
||||
extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
|
||||
extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
|
||||
extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
|
||||
extern struct vmw_ctx_binding_state *
|
||||
vmw_binding_state_alloc(struct vmw_private *dev_priv);
|
||||
extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
|
||||
extern struct list_head *
|
||||
vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
|
||||
extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
|
||||
|
||||
#endif
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
|
|||
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
static struct ttm_place mob_ne_placement_flags = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_placement = {
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
|
@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
|
|||
.busy_placement = &mob_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_mob_ne_placement = {
|
||||
.num_placement = 1,
|
||||
.num_busy_placement = 1,
|
||||
.placement = &mob_ne_placement_flags,
|
||||
.busy_placement = &mob_ne_placement_flags
|
||||
};
|
||||
|
||||
struct vmw_ttm_tt {
|
||||
struct ttm_dma_tt dma_ttm;
|
||||
struct vmw_private *dev_priv;
|
||||
|
@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
/**
|
||||
* vmw_move_notify - TTM move_notify_callback
|
||||
*
|
||||
* @bo: The TTM buffer object about to move.
|
||||
* @mem: The truct ttm_mem_reg indicating to what memory
|
||||
* region the move is taking place.
|
||||
* @bo: The TTM buffer object about to move.
|
||||
* @mem: The struct ttm_mem_reg indicating to what memory
|
||||
* region the move is taking place.
|
||||
*
|
||||
* Calls move_notify for all subsystems needing it.
|
||||
* (currently only resources).
|
||||
|
@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
|
|||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
vmw_resource_move_notify(bo, mem);
|
||||
vmw_query_move_notify(bo, mem);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_swap_notify - TTM move_notify_callback
|
||||
*
|
||||
* @bo: The TTM buffer object about to be swapped out.
|
||||
* @bo: The TTM buffer object about to be swapped out.
|
||||
*/
|
||||
static void vmw_swap_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
|
|
1303
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
Normal file
1303
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -26,15 +26,10 @@
|
|||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
|
||||
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
|
||||
|
||||
enum vmw_cmdbuf_res_state {
|
||||
VMW_CMDBUF_RES_COMMITED,
|
||||
VMW_CMDBUF_RES_ADD,
|
||||
VMW_CMDBUF_RES_DEL
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_cmdbuf_res - Command buffer managed resource entry.
|
||||
*
|
||||
|
@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
|
|||
|
||||
list_for_each_entry_safe(entry, next, list, head) {
|
||||
list_del(&entry->head);
|
||||
if (entry->res->func->commit_notify)
|
||||
entry->res->func->commit_notify(entry->res,
|
||||
entry->state);
|
||||
switch (entry->state) {
|
||||
case VMW_CMDBUF_RES_ADD:
|
||||
entry->state = VMW_CMDBUF_RES_COMMITED;
|
||||
entry->state = VMW_CMDBUF_RES_COMMITTED;
|
||||
list_add_tail(&entry->head, &entry->man->list);
|
||||
break;
|
||||
case VMW_CMDBUF_RES_DEL:
|
||||
|
@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
|
|||
&entry->hash);
|
||||
list_del(&entry->head);
|
||||
list_add_tail(&entry->head, &entry->man->list);
|
||||
entry->state = VMW_CMDBUF_RES_COMMITED;
|
||||
entry->state = VMW_CMDBUF_RES_COMMITTED;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -231,6 +229,9 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
|||
* @res_type: The resource type.
|
||||
* @user_key: The user-space id of the resource.
|
||||
* @list: The staging list.
|
||||
* @res_p: If the resource is in an already committed state, points to the
|
||||
* struct vmw_resource on successful return. The pointer will be
|
||||
* non ref-counted.
|
||||
*
|
||||
* This function looks up the struct vmw_cmdbuf_res entry from the manager
|
||||
* hash table and, if it exists, removes it. Depending on its current staging
|
||||
|
@ -240,7 +241,8 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
|||
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key,
|
||||
struct list_head *list)
|
||||
struct list_head *list,
|
||||
struct vmw_resource **res_p)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry;
|
||||
struct drm_hash_item *hash;
|
||||
|
@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
|
|||
switch (entry->state) {
|
||||
case VMW_CMDBUF_RES_ADD:
|
||||
vmw_cmdbuf_res_free(man, entry);
|
||||
*res_p = NULL;
|
||||
break;
|
||||
case VMW_CMDBUF_RES_COMMITED:
|
||||
case VMW_CMDBUF_RES_COMMITTED:
|
||||
(void) drm_ht_remove_item(&man->resources, &entry->hash);
|
||||
list_del(&entry->head);
|
||||
entry->state = VMW_CMDBUF_RES_DEL;
|
||||
list_add_tail(&entry->head, list);
|
||||
*res_p = entry->res;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
File diff suppressed because it is too large
Load Diff
662
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
Normal file
662
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
Normal file
|
@ -0,0 +1,662 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Treat context OTables as resources to make use of the resource
|
||||
* backing MOB eviction mechanism, that is used to read back the COTable
|
||||
* whenever the backing MOB is evicted.
|
||||
*/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include "vmwgfx_so.h"
|
||||
|
||||
/**
|
||||
* struct vmw_cotable - Context Object Table resource
|
||||
*
|
||||
* @res: struct vmw_resource we are deriving from.
|
||||
* @ctx: non-refcounted pointer to the owning context.
|
||||
* @size_read_back: Size of data read back during eviction.
|
||||
* @seen_entries: Seen entries in command stream for this cotable.
|
||||
* @type: The cotable type.
|
||||
* @scrubbed: Whether the cotable has been scrubbed.
|
||||
* @resource_list: List of resources in the cotable.
|
||||
*/
|
||||
struct vmw_cotable {
|
||||
struct vmw_resource res;
|
||||
struct vmw_resource *ctx;
|
||||
size_t size_read_back;
|
||||
int seen_entries;
|
||||
u32 type;
|
||||
bool scrubbed;
|
||||
struct list_head resource_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_cotable_info - Static info about cotable types
|
||||
*
|
||||
* @min_initial_entries: Min number of initial intries at cotable allocation
|
||||
* for this cotable type.
|
||||
* @size: Size of each entry.
|
||||
*/
|
||||
struct vmw_cotable_info {
|
||||
u32 min_initial_entries;
|
||||
u32 size;
|
||||
void (*unbind_func)(struct vmw_private *, struct list_head *,
|
||||
bool);
|
||||
};
|
||||
|
||||
static const struct vmw_cotable_info co_info[] = {
|
||||
{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
|
||||
{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
|
||||
{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
|
||||
{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXQueryEntry), NULL},
|
||||
{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
|
||||
};
|
||||
|
||||
/*
|
||||
* Cotables with bindings that we remove must be scrubbed first,
|
||||
* otherwise, the device will swap in an invalid context when we remove
|
||||
* bindings before scrubbing a cotable...
|
||||
*/
|
||||
const SVGACOTableType vmw_cotable_scrub_order[] = {
|
||||
SVGA_COTABLE_RTVIEW,
|
||||
SVGA_COTABLE_DSVIEW,
|
||||
SVGA_COTABLE_SRVIEW,
|
||||
SVGA_COTABLE_DXSHADER,
|
||||
SVGA_COTABLE_ELEMENTLAYOUT,
|
||||
SVGA_COTABLE_BLENDSTATE,
|
||||
SVGA_COTABLE_DEPTHSTENCIL,
|
||||
SVGA_COTABLE_RASTERIZERSTATE,
|
||||
SVGA_COTABLE_SAMPLER,
|
||||
SVGA_COTABLE_STREAMOUTPUT,
|
||||
SVGA_COTABLE_DXQUERY,
|
||||
};
|
||||
|
||||
static int vmw_cotable_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_cotable_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_cotable_create(struct vmw_resource *res);
|
||||
static int vmw_cotable_destroy(struct vmw_resource *res);
|
||||
|
||||
static const struct vmw_res_func vmw_cotable_func = {
|
||||
.res_type = vmw_res_cotable,
|
||||
.needs_backup = true,
|
||||
.may_evict = true,
|
||||
.type_name = "context guest backed object tables",
|
||||
.backup_placement = &vmw_mob_placement,
|
||||
.create = vmw_cotable_create,
|
||||
.destroy = vmw_cotable_destroy,
|
||||
.bind = vmw_cotable_bind,
|
||||
.unbind = vmw_cotable_unbind,
|
||||
};
|
||||
|
||||
/**
|
||||
* vmw_cotable - Convert a struct vmw_resource pointer to a struct
|
||||
* vmw_cotable pointer
|
||||
*
|
||||
* @res: Pointer to the resource.
|
||||
*/
|
||||
static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
|
||||
{
|
||||
return container_of(res, struct vmw_cotable, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_destroy - Cotable resource destroy callback
|
||||
*
|
||||
* @res: Pointer to the cotable resource.
|
||||
*
|
||||
* There is no device cotable destroy command, so this function only
|
||||
* makes sure that the resource id is set to invalid.
|
||||
*/
|
||||
static int vmw_cotable_destroy(struct vmw_resource *res)
|
||||
{
|
||||
res->id = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_unscrub - Undo a cotable unscrub operation
|
||||
*
|
||||
* @res: Pointer to the cotable resource
|
||||
*
|
||||
* This function issues commands to (re)bind the cotable to
|
||||
* its backing mob, which needs to be validated and reserved at this point.
|
||||
* This is identical to bind() except the function interface looks different.
|
||||
*/
|
||||
static int vmw_cotable_unscrub(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct ttm_buffer_object *bo = &res->backup->base;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXSetCOTable body;
|
||||
} *cmd;
|
||||
|
||||
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
|
||||
if (!cmd) {
|
||||
DRM_ERROR("Failed reserving FIFO space for cotable "
|
||||
"binding.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
|
||||
WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = vcotbl->ctx->id;
|
||||
cmd->body.type = vcotbl->type;
|
||||
cmd->body.mobid = bo->mem.start;
|
||||
cmd->body.validSizeInBytes = vcotbl->size_read_back;
|
||||
|
||||
vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
|
||||
vcotbl->scrubbed = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_bind - Undo a cotable unscrub operation
|
||||
*
|
||||
* @res: Pointer to the cotable resource
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
|
||||
* for convenience / fencing.
|
||||
*
|
||||
* This function issues commands to (re)bind the cotable to
|
||||
* its backing mob, which needs to be validated and reserved at this point.
|
||||
*/
|
||||
static int vmw_cotable_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
/*
|
||||
* The create() callback may have changed @res->backup without
|
||||
* the caller noticing, and with val_buf->bo still pointing to
|
||||
* the old backup buffer. Although hackish, and not used currently,
|
||||
* take the opportunity to correct the value here so that it's not
|
||||
* misused in the future.
|
||||
*/
|
||||
val_buf->bo = &res->backup->base;
|
||||
|
||||
return vmw_cotable_unscrub(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_scrub - Scrub the cotable from the device.
|
||||
*
|
||||
* @res: Pointer to the cotable resource.
|
||||
* @readback: Whether initiate a readback of the cotable data to the backup
|
||||
* buffer.
|
||||
*
|
||||
* In some situations (context swapouts) it might be desirable to make the
|
||||
* device forget about the cotable without performing a full unbind. A full
|
||||
* unbind requires reserved backup buffers and it might not be possible to
|
||||
* reserve them due to locking order violation issues. The vmw_cotable_scrub
|
||||
* function implements a partial unbind() without that requirement but with the
|
||||
* following restrictions.
|
||||
* 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
|
||||
* be called.
|
||||
* 2) Before the cotable backing buffer is used by the CPU, or during the
|
||||
* resource destruction, vmw_cotable_unbind() must be called.
|
||||
*/
|
||||
int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
|
||||
{
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
size_t submit_size;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXReadbackCOTable body;
|
||||
} *cmd0;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXSetCOTable body;
|
||||
} *cmd1;
|
||||
|
||||
if (vcotbl->scrubbed)
|
||||
return 0;
|
||||
|
||||
if (co_info[vcotbl->type].unbind_func)
|
||||
co_info[vcotbl->type].unbind_func(dev_priv,
|
||||
&vcotbl->resource_list,
|
||||
readback);
|
||||
submit_size = sizeof(*cmd1);
|
||||
if (readback)
|
||||
submit_size += sizeof(*cmd0);
|
||||
|
||||
cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
|
||||
if (!cmd1) {
|
||||
DRM_ERROR("Failed reserving FIFO space for cotable "
|
||||
"unbinding.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vcotbl->size_read_back = 0;
|
||||
if (readback) {
|
||||
cmd0 = (void *) cmd1;
|
||||
cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
|
||||
cmd0->header.size = sizeof(cmd0->body);
|
||||
cmd0->body.cid = vcotbl->ctx->id;
|
||||
cmd0->body.type = vcotbl->type;
|
||||
cmd1 = (void *) &cmd0[1];
|
||||
vcotbl->size_read_back = res->backup_size;
|
||||
}
|
||||
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
|
||||
cmd1->header.size = sizeof(cmd1->body);
|
||||
cmd1->body.cid = vcotbl->ctx->id;
|
||||
cmd1->body.type = vcotbl->type;
|
||||
cmd1->body.mobid = SVGA3D_INVALID_ID;
|
||||
cmd1->body.validSizeInBytes = 0;
|
||||
vmw_fifo_commit_flush(dev_priv, submit_size);
|
||||
vcotbl->scrubbed = true;
|
||||
|
||||
/* Trigger a create() on next validate. */
|
||||
res->id = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_unbind - Cotable resource unbind callback
|
||||
*
|
||||
* @res: Pointer to the cotable resource.
|
||||
* @readback: Whether to read back cotable data to the backup buffer.
|
||||
* val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
|
||||
* for convenience / fencing.
|
||||
*
|
||||
* Unbinds the cotable from the device and fences the backup buffer.
|
||||
*/
|
||||
static int vmw_cotable_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
struct vmw_fence_obj *fence;
|
||||
int ret;
|
||||
|
||||
if (list_empty(&res->mob_head))
|
||||
return 0;
|
||||
|
||||
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
if (!vcotbl->scrubbed)
|
||||
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
|
||||
vmw_fence_single_bo(bo, fence);
|
||||
if (likely(fence != NULL))
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_readback - Read back a cotable without unbinding.
|
||||
*
|
||||
* @res: The cotable resource.
|
||||
*
|
||||
* Reads back a cotable to its backing mob without scrubbing the MOB from
|
||||
* the cotable. The MOB is fenced for subsequent CPU access.
|
||||
*/
|
||||
static int vmw_cotable_readback(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXReadbackCOTable body;
|
||||
} *cmd;
|
||||
struct vmw_fence_obj *fence;
|
||||
|
||||
if (!vcotbl->scrubbed) {
|
||||
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
|
||||
SVGA3D_INVALID_ID);
|
||||
if (!cmd) {
|
||||
DRM_ERROR("Failed reserving FIFO space for cotable "
|
||||
"readback.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = vcotbl->ctx->id;
|
||||
cmd->body.type = vcotbl->type;
|
||||
vcotbl->size_read_back = res->backup_size;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
}
|
||||
|
||||
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
|
||||
vmw_fence_single_bo(&res->backup->base, fence);
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_resize - Resize a cotable.
|
||||
*
|
||||
* @res: The cotable resource.
|
||||
* @new_size: The new size.
|
||||
*
|
||||
* Resizes a cotable and binds the new backup buffer.
|
||||
* On failure the cotable is left intact.
|
||||
* Important! This function may not fail once the MOB switch has been
|
||||
* committed to hardware. That would put the device context in an
|
||||
* invalid state which we can't currently recover from.
|
||||
*/
|
||||
static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
struct vmw_dma_buffer *buf, *old_buf = res->backup;
|
||||
struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
|
||||
size_t old_size = res->backup_size;
|
||||
size_t old_size_read_back = vcotbl->size_read_back;
|
||||
size_t cur_size_read_back;
|
||||
struct ttm_bo_kmap_obj old_map, new_map;
|
||||
int ret;
|
||||
size_t i;
|
||||
|
||||
ret = vmw_cotable_readback(res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cur_size_read_back = vcotbl->size_read_back;
|
||||
vcotbl->size_read_back = old_size_read_back;
|
||||
|
||||
/*
|
||||
* While device is processing, Allocate and reserve a buffer object
|
||||
* for the new COTable. Initially pin the buffer object to make sure
|
||||
* we can use tryreserve without failure.
|
||||
*/
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
|
||||
true, vmw_dmabuf_bo_free);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed initializing new cotable MOB.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
bo = &buf->base;
|
||||
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
|
||||
|
||||
ret = ttm_bo_wait(old_bo, false, false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed waiting for cotable unbind.\n");
|
||||
goto out_wait;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do a page by page copy of COTables. This eliminates slow vmap()s.
|
||||
* This should really be a TTM utility.
|
||||
*/
|
||||
for (i = 0; i < old_bo->num_pages; ++i) {
|
||||
bool dummy;
|
||||
|
||||
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed mapping old COTable on resize.\n");
|
||||
goto out_wait;
|
||||
}
|
||||
ret = ttm_bo_kmap(bo, i, 1, &new_map);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed mapping new COTable on resize.\n");
|
||||
goto out_map_new;
|
||||
}
|
||||
memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
|
||||
ttm_kmap_obj_virtual(&old_map, &dummy),
|
||||
PAGE_SIZE);
|
||||
ttm_bo_kunmap(&new_map);
|
||||
ttm_bo_kunmap(&old_map);
|
||||
}
|
||||
|
||||
/* Unpin new buffer, and switch backup buffers. */
|
||||
ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed validating new COTable backup buffer.\n");
|
||||
goto out_wait;
|
||||
}
|
||||
|
||||
res->backup = buf;
|
||||
res->backup_size = new_size;
|
||||
vcotbl->size_read_back = cur_size_read_back;
|
||||
|
||||
/*
|
||||
* Now tell the device to switch. If this fails, then we need to
|
||||
* revert the full resize.
|
||||
*/
|
||||
ret = vmw_cotable_unscrub(res);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed switching COTable backup buffer.\n");
|
||||
res->backup = old_buf;
|
||||
res->backup_size = old_size;
|
||||
vcotbl->size_read_back = old_size_read_back;
|
||||
goto out_wait;
|
||||
}
|
||||
|
||||
/* Let go of the old mob. */
|
||||
list_del(&res->mob_head);
|
||||
list_add_tail(&res->mob_head, &buf->res_list);
|
||||
vmw_dmabuf_unreference(&old_buf);
|
||||
res->id = vcotbl->type;
|
||||
|
||||
return 0;
|
||||
|
||||
out_map_new:
|
||||
ttm_bo_kunmap(&old_map);
|
||||
out_wait:
|
||||
ttm_bo_unreserve(bo);
|
||||
vmw_dmabuf_unreference(&buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_create - Cotable resource create callback
|
||||
*
|
||||
* @res: Pointer to a cotable resource.
|
||||
*
|
||||
* There is no separate create command for cotables, so this callback, which
|
||||
* is called before bind() in the validation sequence is instead used for two
|
||||
* things.
|
||||
* 1) Unscrub the cotable if it is scrubbed and still attached to a backup
|
||||
* buffer, that is, if @res->mob_head is non-empty.
|
||||
* 2) Resize the cotable if needed.
|
||||
*/
|
||||
static int vmw_cotable_create(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
size_t new_size = res->backup_size;
|
||||
size_t needed_size;
|
||||
int ret;
|
||||
|
||||
/* Check whether we need to resize the cotable */
|
||||
needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
|
||||
while (needed_size > new_size)
|
||||
new_size *= 2;
|
||||
|
||||
if (likely(new_size <= res->backup_size)) {
|
||||
if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
|
||||
ret = vmw_cotable_unscrub(res);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
res->id = vcotbl->type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return vmw_cotable_resize(res, new_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_hw_cotable_destroy - Cotable hw_destroy callback
|
||||
*
|
||||
* @res: Pointer to a cotable resource.
|
||||
*
|
||||
* The final (part of resource destruction) destroy callback.
|
||||
*/
|
||||
static void vmw_hw_cotable_destroy(struct vmw_resource *res)
|
||||
{
|
||||
(void) vmw_cotable_destroy(res);
|
||||
}
|
||||
|
||||
static size_t cotable_acc_size;
|
||||
|
||||
/**
|
||||
* vmw_cotable_free - Cotable resource destructor
|
||||
*
|
||||
* @res: Pointer to a cotable resource.
|
||||
*/
|
||||
static void vmw_cotable_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
kfree(res);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_alloc - Create a cotable resource
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct.
|
||||
* @ctx: Pointer to the context resource.
|
||||
* The cotable resource will not add a refcount.
|
||||
* @type: The cotable type.
|
||||
*/
|
||||
struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_resource *ctx,
|
||||
u32 type)
|
||||
{
|
||||
struct vmw_cotable *vcotbl;
|
||||
int ret;
|
||||
u32 num_entries;
|
||||
|
||||
if (unlikely(cotable_acc_size == 0))
|
||||
cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
cotable_acc_size, false, true);
|
||||
if (unlikely(ret))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
|
||||
if (unlikely(vcotbl == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_alloc;
|
||||
}
|
||||
|
||||
ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
|
||||
vmw_cotable_free, &vmw_cotable_func);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_init;
|
||||
|
||||
INIT_LIST_HEAD(&vcotbl->resource_list);
|
||||
vcotbl->res.id = type;
|
||||
vcotbl->res.backup_size = PAGE_SIZE;
|
||||
num_entries = PAGE_SIZE / co_info[type].size;
|
||||
if (num_entries < co_info[type].min_initial_entries) {
|
||||
vcotbl->res.backup_size = co_info[type].min_initial_entries *
|
||||
co_info[type].size;
|
||||
vcotbl->res.backup_size =
|
||||
(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
vcotbl->scrubbed = true;
|
||||
vcotbl->seen_entries = -1;
|
||||
vcotbl->type = type;
|
||||
vcotbl->ctx = ctx;
|
||||
|
||||
vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
|
||||
|
||||
return &vcotbl->res;
|
||||
|
||||
out_no_init:
|
||||
kfree(vcotbl);
|
||||
out_no_alloc:
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_notify - Notify the cotable about an item creation
|
||||
*
|
||||
* @res: Pointer to a cotable resource.
|
||||
* @id: Item id.
|
||||
*/
|
||||
int vmw_cotable_notify(struct vmw_resource *res, int id)
|
||||
{
|
||||
struct vmw_cotable *vcotbl = vmw_cotable(res);
|
||||
|
||||
if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
|
||||
DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
|
||||
(unsigned) vcotbl->type, id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vcotbl->seen_entries < id) {
|
||||
/* Trigger a call to create() on next validate */
|
||||
res->id = -1;
|
||||
vcotbl->seen_entries = id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cotable_add_view - add a view to the cotable's list of active views.
|
||||
*
|
||||
* @res: pointer struct vmw_resource representing the cotable.
|
||||
* @head: pointer to the struct list_head member of the resource, dedicated
|
||||
* to the cotable active resource list.
|
||||
*/
|
||||
void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
|
||||
{
|
||||
struct vmw_cotable *vcotbl =
|
||||
container_of(res, struct vmw_cotable, res);
|
||||
|
||||
list_add_tail(head, &vcotbl->resource_list);
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -32,25 +32,20 @@
|
|||
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_placement - Validate a buffer to placement.
|
||||
* vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer if true.
|
||||
* @placement: The placement to pin it.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo to avoid failures.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible)
|
||||
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible)
|
||||
{
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
|
|||
goto err;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
if (!ret)
|
||||
vmw_bo_pin_reserved(buf, true);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
|
@ -75,12 +72,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
|
||||
* vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo if @pin == true to avoid failures.
|
||||
* This function takes the reservation_sem in write mode.
|
||||
* Flushes and unpins the query bo to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
|
@ -90,55 +85,34 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
|
|||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible)
|
||||
{
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_placement *placement;
|
||||
int ret;
|
||||
|
||||
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (pin)
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
/**
|
||||
* Put BO in VRAM if there is space, otherwise as a GMR.
|
||||
* If there is no space in VRAM and GMR ids are all used up,
|
||||
* start evicting GMRs to make room. If the DMA buffer can't be
|
||||
* used as a GMR, this will return -ENOMEM.
|
||||
*/
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_gmr_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_gmr_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
|
||||
false);
|
||||
if (likely(ret == 0) || ret == -ERESTARTSYS)
|
||||
goto err_unreserve;
|
||||
goto out_unreserve;
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
|
||||
|
||||
/**
|
||||
* If that failed, try VRAM again, this time evicting
|
||||
* previous contents.
|
||||
*/
|
||||
out_unreserve:
|
||||
if (!ret)
|
||||
vmw_bo_pin_reserved(buf, true);
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(bo);
|
||||
err:
|
||||
ttm_write_unlock(&dev_priv->reservation_sem);
|
||||
|
@ -146,67 +120,50 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_vram - Move a buffer to vram.
|
||||
* vmw_dmabuf_pin_in_vram - Move a buffer to vram.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* This function takes the reservation_sem in write mode.
|
||||
* Flushes and unpins the query bo to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible)
|
||||
{
|
||||
struct ttm_placement *placement;
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_placement;
|
||||
|
||||
return vmw_dmabuf_to_placement(dev_priv, buf,
|
||||
placement,
|
||||
interruptible);
|
||||
return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
|
||||
* vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo if @pin == true to avoid failures.
|
||||
* This function takes the reservation_sem in write mode.
|
||||
* Flushes and unpins the query bo to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @buf: DMA buffer to pin.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible)
|
||||
{
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_placement placement;
|
||||
struct ttm_place place;
|
||||
int ret = 0;
|
||||
|
||||
if (pin)
|
||||
place = vmw_vram_ne_placement.placement[0];
|
||||
else
|
||||
place = vmw_vram_placement.placement[0];
|
||||
place = vmw_vram_placement.placement[0];
|
||||
place.lpfn = bo->num_pages;
|
||||
|
||||
placement.num_placement = 1;
|
||||
placement.placement = &place;
|
||||
placement.num_busy_placement = 1;
|
||||
|
@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
|
|||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (pin)
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
/* Is this buffer already in vram but not at the start of it? */
|
||||
/*
|
||||
* Is this buffer already in vram but not at the start of it?
|
||||
* In that case, evict it first because TTM isn't good at handling
|
||||
* that situation.
|
||||
*/
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.start < bo->num_pages &&
|
||||
bo->mem.start > 0)
|
||||
|
@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
|
|||
|
||||
ret = ttm_bo_validate(bo, &placement, interruptible, false);
|
||||
|
||||
/* For some reason we didn't up at the start of vram */
|
||||
/* For some reason we didn't end up at the start of vram */
|
||||
WARN_ON(ret == 0 && bo->offset != 0);
|
||||
if (!ret)
|
||||
vmw_bo_pin_reserved(buf, true);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
err_unlock:
|
||||
|
@ -240,13 +202,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
|
||||
* vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* This function takes the reservation_sem in write mode.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to unpin.
|
||||
|
@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
|
|||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible)
|
||||
{
|
||||
/*
|
||||
* We could in theory early out if the buffer is
|
||||
* unpinned but we need to lock and reserve the buffer
|
||||
* anyways so we don't gain much by that.
|
||||
*/
|
||||
return vmw_dmabuf_to_placement(dev_priv, buf,
|
||||
&vmw_evictable_placement,
|
||||
interruptible);
|
||||
}
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
vmw_bo_pin_reserved(buf, false);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
err:
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
|
||||
|
@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
|
|||
|
||||
|
||||
/**
|
||||
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
|
||||
* vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
|
||||
*
|
||||
* @bo: The buffer object. Must be reserved.
|
||||
* @vbo: The buffer object. Must be reserved.
|
||||
* @pin: Whether to pin or unpin.
|
||||
*
|
||||
*/
|
||||
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
|
||||
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
|
||||
{
|
||||
struct ttm_place pl;
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object *bo = &vbo->base;
|
||||
uint32_t old_mem_type = bo->mem.mem_type;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&bo->resv->lock.base);
|
||||
|
||||
if (pin) {
|
||||
if (vbo->pin_count++ > 0)
|
||||
return;
|
||||
} else {
|
||||
WARN_ON(vbo->pin_count <= 0);
|
||||
if (--vbo->pin_count > 0)
|
||||
return;
|
||||
}
|
||||
|
||||
pl.fpfn = 0;
|
||||
pl.lpfn = 0;
|
||||
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
|
@ -127,6 +128,9 @@
|
|||
#define DRM_IOCTL_VMW_SYNCCPU \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
|
||||
struct drm_vmw_synccpu_arg)
|
||||
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
|
||||
struct drm_vmw_context_arg)
|
||||
|
||||
/**
|
||||
* The core DRM version of this macro doesn't account for
|
||||
|
@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
|
|||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
|
||||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
|
||||
|
@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
|
|||
VMW_IOCTL_DEF(VMW_SYNCCPU,
|
||||
vmw_user_dmabuf_synccpu_ioctl,
|
||||
DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
|
||||
vmw_extended_context_define_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static struct pci_device_id vmw_pci_id_list[] = {
|
||||
|
@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
|
|||
DRM_INFO(" Command Buffers 2.\n");
|
||||
if (capabilities & SVGA_CAP_GBOBJECTS)
|
||||
DRM_INFO(" Guest Backed Resources.\n");
|
||||
if (capabilities & SVGA_CAP_DX)
|
||||
DRM_INFO(" DX Features.\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
|
|||
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct vmw_dma_buffer *vbo;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
volatile SVGA3dQueryResult *result;
|
||||
bool dummy;
|
||||
|
||||
/*
|
||||
* Create the bo as pinned, so that a tryreserve will
|
||||
* Create the vbo as pinned, so that a tryreserve will
|
||||
* immediately succeed. This is because we're the only
|
||||
* user of the bo currently.
|
||||
*/
|
||||
ret = ttm_bo_create(&dev_priv->bdev,
|
||||
PAGE_SIZE,
|
||||
ttm_bo_type_device,
|
||||
&vmw_sys_ne_placement,
|
||||
0, false, NULL,
|
||||
&bo);
|
||||
vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
|
||||
if (!vbo)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
|
||||
&vmw_sys_ne_placement, false,
|
||||
&vmw_dmabuf_bo_free);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
vmw_bo_pin_reserved(vbo, true);
|
||||
|
||||
ret = ttm_bo_kmap(bo, 0, 1, &map);
|
||||
ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
|
||||
if (likely(ret == 0)) {
|
||||
result = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
result->totalSize = sizeof(*result);
|
||||
|
@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
|||
result->result32 = 0xff;
|
||||
ttm_bo_kunmap(&map);
|
||||
}
|
||||
vmw_bo_pin(bo, false);
|
||||
ttm_bo_unreserve(bo);
|
||||
vmw_bo_pin_reserved(vbo, false);
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Dummy query buffer map failed.\n");
|
||||
ttm_bo_unref(&bo);
|
||||
vmw_dmabuf_unreference(&vbo);
|
||||
} else
|
||||
dev_priv->dummy_query_bo = bo;
|
||||
dev_priv->dummy_query_bo = vbo;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_request_device_late - Perform late device setup
|
||||
*
|
||||
* @dev_priv: Pointer to device private.
|
||||
*
|
||||
* This function performs setup of otables and enables large command
|
||||
* buffer submission. These tasks are split out to a separate function
|
||||
* because it reverts vmw_release_device_early and is intended to be used
|
||||
* by an error path in the hibernation code.
|
||||
*/
|
||||
static int vmw_request_device_late(struct vmw_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (dev_priv->has_mob) {
|
||||
ret = vmw_otables_setup(dev_priv);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Unable to initialize "
|
||||
"guest Memory OBjects.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev_priv->cman) {
|
||||
ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
|
||||
256*4096, 2*4096);
|
||||
if (ret) {
|
||||
struct vmw_cmdbuf_man *man = dev_priv->cman;
|
||||
|
||||
dev_priv->cman = NULL;
|
||||
vmw_cmdbuf_man_destroy(man);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_request_device(struct vmw_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
|
|||
return ret;
|
||||
}
|
||||
vmw_fence_fifo_up(dev_priv->fman);
|
||||
if (dev_priv->has_mob) {
|
||||
ret = vmw_otables_setup(dev_priv);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Unable to initialize "
|
||||
"guest Memory OBjects.\n");
|
||||
goto out_no_mob;
|
||||
}
|
||||
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
|
||||
if (IS_ERR(dev_priv->cman)) {
|
||||
dev_priv->cman = NULL;
|
||||
dev_priv->has_dx = false;
|
||||
}
|
||||
|
||||
ret = vmw_request_device_late(dev_priv);
|
||||
if (ret)
|
||||
goto out_no_mob;
|
||||
|
||||
ret = vmw_dummy_query_bo_create(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_query_bo;
|
||||
|
@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
|
|||
return 0;
|
||||
|
||||
out_no_query_bo:
|
||||
if (dev_priv->has_mob)
|
||||
if (dev_priv->cman)
|
||||
vmw_cmdbuf_remove_pool(dev_priv->cman);
|
||||
if (dev_priv->has_mob) {
|
||||
(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
|
||||
vmw_otables_takedown(dev_priv);
|
||||
}
|
||||
if (dev_priv->cman)
|
||||
vmw_cmdbuf_man_destroy(dev_priv->cman);
|
||||
out_no_mob:
|
||||
vmw_fence_fifo_down(dev_priv->fman);
|
||||
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_release_device(struct vmw_private *dev_priv)
|
||||
/**
|
||||
* vmw_release_device_early - Early part of fifo takedown.
|
||||
*
|
||||
* @dev_priv: Pointer to device private struct.
|
||||
*
|
||||
* This is the first part of command submission takedown, to be called before
|
||||
* buffer management is taken down.
|
||||
*/
|
||||
static void vmw_release_device_early(struct vmw_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* Previous destructions should've released
|
||||
|
@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
|
|||
|
||||
BUG_ON(dev_priv->pinned_bo != NULL);
|
||||
|
||||
ttm_bo_unref(&dev_priv->dummy_query_bo);
|
||||
if (dev_priv->has_mob)
|
||||
vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
|
||||
if (dev_priv->cman)
|
||||
vmw_cmdbuf_remove_pool(dev_priv->cman);
|
||||
|
||||
if (dev_priv->has_mob) {
|
||||
ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
|
||||
vmw_otables_takedown(dev_priv);
|
||||
vmw_fence_fifo_down(dev_priv->fman);
|
||||
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Increase the 3d resource refcount.
|
||||
* If the count was prevously zero, initialize the fifo, switching to svga
|
||||
* mode. Note that the master holds a ref as well, and may request an
|
||||
* explicit switch to svga mode if fb is not running, using @unhide_svga.
|
||||
*/
|
||||
int vmw_3d_resource_inc(struct vmw_private *dev_priv,
|
||||
bool unhide_svga)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->release_mutex);
|
||||
if (unlikely(dev_priv->num_3d_resources++ == 0)) {
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
--dev_priv->num_3d_resources;
|
||||
} else if (unhide_svga) {
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
vmw_read(dev_priv, SVGA_REG_ENABLE) &
|
||||
~SVGA_REG_ENABLE_HIDE);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->release_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrease the 3d resource refcount.
|
||||
* If the count reaches zero, disable the fifo, switching to vga mode.
|
||||
* Note that the master holds a refcount as well, and may request an
|
||||
* explicit switch to vga mode when it releases its refcount to account
|
||||
* for the situation of an X server vt switch to VGA with 3d resources
|
||||
* active.
|
||||
* vmw_release_device_late - Late part of fifo takedown.
|
||||
*
|
||||
* @dev_priv: Pointer to device private struct.
|
||||
*
|
||||
* This is the last part of the command submission takedown, to be called when
|
||||
* command submission is no longer needed. It may wait on pending fences.
|
||||
*/
|
||||
void vmw_3d_resource_dec(struct vmw_private *dev_priv,
|
||||
bool hide_svga)
|
||||
static void vmw_release_device_late(struct vmw_private *dev_priv)
|
||||
{
|
||||
int32_t n3d;
|
||||
vmw_fence_fifo_down(dev_priv->fman);
|
||||
if (dev_priv->cman)
|
||||
vmw_cmdbuf_man_destroy(dev_priv->cman);
|
||||
|
||||
mutex_lock(&dev_priv->release_mutex);
|
||||
if (unlikely(--dev_priv->num_3d_resources == 0))
|
||||
vmw_release_device(dev_priv);
|
||||
else if (hide_svga)
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
vmw_read(dev_priv, SVGA_REG_ENABLE) |
|
||||
SVGA_REG_ENABLE_HIDE);
|
||||
|
||||
n3d = (int32_t) dev_priv->num_3d_resources;
|
||||
mutex_unlock(&dev_priv->release_mutex);
|
||||
|
||||
BUG_ON(n3d < 0);
|
||||
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
spin_lock_init(&dev_priv->hw_lock);
|
||||
spin_lock_init(&dev_priv->waiter_lock);
|
||||
spin_lock_init(&dev_priv->cap_lock);
|
||||
spin_lock_init(&dev_priv->svga_lock);
|
||||
|
||||
for (i = vmw_res_context; i < vmw_res_max; ++i) {
|
||||
idr_init(&dev_priv->res_idr[i]);
|
||||
|
@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
|
||||
dev_priv->max_mob_size =
|
||||
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
|
||||
} else
|
||||
dev_priv->stdu_max_width =
|
||||
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
|
||||
dev_priv->stdu_max_height =
|
||||
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
|
||||
dev_priv->texture_max_width = vmw_read(dev_priv,
|
||||
SVGA_REG_DEV_CAP);
|
||||
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
|
||||
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
|
||||
dev_priv->texture_max_height = vmw_read(dev_priv,
|
||||
SVGA_REG_DEV_CAP);
|
||||
} else {
|
||||
dev_priv->texture_max_width = 8192;
|
||||
dev_priv->texture_max_height = 8192;
|
||||
dev_priv->prim_bb_mem = dev_priv->vram_size;
|
||||
}
|
||||
|
||||
vmw_print_capabilities(dev_priv->capabilities);
|
||||
|
||||
ret = vmw_dma_masks(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
/*
|
||||
* Limit back buffer size to VRAM size. Remove this once
|
||||
* screen targets are implemented.
|
||||
*/
|
||||
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
|
||||
dev_priv->prim_bb_mem = dev_priv->vram_size;
|
||||
|
||||
vmw_print_capabilities(dev_priv->capabilities);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
||||
DRM_INFO("Max GMR ids is %u\n",
|
||||
(unsigned)dev_priv->max_gmr_ids);
|
||||
|
@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
|
||||
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||
dev_priv->bo_global_ref.ref.object,
|
||||
&vmw_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
VMWGFX_FILE_PAGE_OFFSET,
|
||||
false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
|
||||
goto out_err1;
|
||||
}
|
||||
|
||||
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
|
||||
|
@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
goto out_no_fman;
|
||||
}
|
||||
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||
dev_priv->bo_global_ref.ref.object,
|
||||
&vmw_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
VMWGFX_FILE_PAGE_OFFSET,
|
||||
false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
|
||||
goto out_no_bdev;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable VRAM, but initially don't use it until SVGA is enabled and
|
||||
* unhidden.
|
||||
*/
|
||||
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
|
||||
(dev_priv->vram_size >> PAGE_SHIFT));
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
|
||||
goto out_no_vram;
|
||||
}
|
||||
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
|
||||
|
||||
dev_priv->has_gmr = true;
|
||||
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
|
||||
|
@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
}
|
||||
}
|
||||
|
||||
vmw_kms_save_vga(dev_priv);
|
||||
if (dev_priv->has_mob) {
|
||||
spin_lock(&dev_priv->cap_lock);
|
||||
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
|
||||
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
||||
spin_unlock(&dev_priv->cap_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Start kms and overlay systems, needs fifo. */
|
||||
ret = vmw_kms_init(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_kms;
|
||||
vmw_overlay_init(dev_priv);
|
||||
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (ret)
|
||||
goto out_no_fifo;
|
||||
|
||||
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
|
||||
|
||||
if (dev_priv->enable_fb) {
|
||||
ret = vmw_3d_resource_inc(dev_priv, true);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_fifo;
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
vmw_svga_enable(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
}
|
||||
|
||||
|
@ -838,13 +891,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
vmw_overlay_close(dev_priv);
|
||||
vmw_kms_close(dev_priv);
|
||||
out_no_kms:
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
if (dev_priv->has_mob)
|
||||
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
||||
if (dev_priv->has_gmr)
|
||||
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
out_no_vram:
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
out_no_bdev:
|
||||
vmw_fence_manager_takedown(dev_priv->fman);
|
||||
out_no_fman:
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
|
@ -860,13 +914,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
iounmap(dev_priv->mmio_virt);
|
||||
out_err3:
|
||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
out_err1:
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
||||
idr_destroy(&dev_priv->res_idr[i]);
|
||||
|
||||
if (dev_priv->ctx.staged_bindings)
|
||||
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|||
drm_ht_remove(&dev_priv->ctx.res_ht);
|
||||
vfree(dev_priv->ctx.cmd_bounce);
|
||||
if (dev_priv->enable_fb) {
|
||||
vmw_fb_off(dev_priv);
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
vmw_svga_disable(dev_priv);
|
||||
}
|
||||
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
|
||||
if (dev_priv->has_mob)
|
||||
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
||||
if (dev_priv->has_gmr)
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
|
||||
vmw_release_device_early(dev_priv);
|
||||
if (dev_priv->has_mob)
|
||||
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
||||
(void) ttm_bo_device_release(&dev_priv->bdev);
|
||||
vmw_release_device_late(dev_priv);
|
||||
vmw_fence_manager_takedown(dev_priv->fman);
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
|
@ -907,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|||
iounmap(dev_priv->mmio_virt);
|
||||
arch_phys_wc_del(dev_priv->mmio_mtrr);
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
if (dev_priv->ctx.staged_bindings)
|
||||
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
|
||||
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
||||
|
@ -1044,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
|
|||
const struct drm_ioctl_desc *ioctl =
|
||||
&vmw_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
if (unlikely(ioctl->cmd != cmd)) {
|
||||
DRM_ERROR("Invalid command format, ioctl %d\n",
|
||||
nr - DRM_COMMAND_BASE);
|
||||
return -EINVAL;
|
||||
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
|
||||
ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
|
||||
goto out_io_encoding;
|
||||
|
||||
return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
|
||||
_IOC_SIZE(cmd));
|
||||
}
|
||||
|
||||
if (unlikely(ioctl->cmd != cmd))
|
||||
goto out_io_encoding;
|
||||
|
||||
flags = ioctl->flags;
|
||||
} else if (!drm_ioctl_flags(nr, &flags))
|
||||
return -EINVAL;
|
||||
|
@ -1068,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
|
|||
ttm_read_unlock(&vmaster->lock);
|
||||
|
||||
return ret;
|
||||
|
||||
out_io_encoding:
|
||||
DRM_ERROR("Invalid command format, ioctl %d\n",
|
||||
nr - DRM_COMMAND_BASE);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
|
@ -1086,30 +1163,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
|
|||
|
||||
static void vmw_lastclose(struct drm_device *dev)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_mode_set set;
|
||||
int ret;
|
||||
|
||||
set.x = 0;
|
||||
set.y = 0;
|
||||
set.fb = NULL;
|
||||
set.mode = NULL;
|
||||
set.connectors = NULL;
|
||||
set.num_connectors = 0;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
set.crtc = crtc;
|
||||
ret = drm_mode_set_config_internal(&set);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void vmw_master_init(struct vmw_master *vmaster)
|
||||
{
|
||||
ttm_lock_init(&vmaster->lock);
|
||||
INIT_LIST_HEAD(&vmaster->fb_surf);
|
||||
mutex_init(&vmaster->fb_surf_mutex);
|
||||
}
|
||||
|
||||
static int vmw_master_create(struct drm_device *dev,
|
||||
|
@ -1137,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev,
|
|||
kfree(vmaster);
|
||||
}
|
||||
|
||||
|
||||
static int vmw_master_set(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
bool from_open)
|
||||
|
@ -1148,27 +1205,13 @@ static int vmw_master_set(struct drm_device *dev,
|
|||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_priv->enable_fb) {
|
||||
ret = vmw_3d_resource_inc(dev_priv, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
vmw_kms_save_vga(dev_priv);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
|
||||
}
|
||||
|
||||
if (active) {
|
||||
BUG_ON(active != &dev_priv->fbdev_master);
|
||||
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_active_lock;
|
||||
return ret;
|
||||
|
||||
ttm_lock_set_kill(&active->lock, true, SIGTERM);
|
||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Unable to clean VRAM on "
|
||||
"master drop.\n");
|
||||
}
|
||||
|
||||
dev_priv->active_master = NULL;
|
||||
}
|
||||
|
||||
|
@ -1182,14 +1225,6 @@ static int vmw_master_set(struct drm_device *dev,
|
|||
dev_priv->active_master = vmaster;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_active_lock:
|
||||
if (!dev_priv->enable_fb) {
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv, true);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_master_drop(struct drm_device *dev,
|
||||
|
@ -1214,16 +1249,9 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||
}
|
||||
|
||||
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
if (!dev_priv->enable_fb) {
|
||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Unable to clean VRAM on master drop.\n");
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv, true);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
|
||||
}
|
||||
if (!dev_priv->enable_fb)
|
||||
vmw_svga_disable(dev_priv);
|
||||
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
|
@ -1233,6 +1261,76 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||
vmw_fb_on(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
|
||||
*
|
||||
* @dev_priv: Pointer to device private struct.
|
||||
* Needs the reservation sem to be held in non-exclusive mode.
|
||||
*/
|
||||
static void __vmw_svga_enable(struct vmw_private *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->svga_lock);
|
||||
if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
|
||||
dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
|
||||
}
|
||||
spin_unlock(&dev_priv->svga_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
|
||||
*
|
||||
* @dev_priv: Pointer to device private struct.
|
||||
*/
|
||||
void vmw_svga_enable(struct vmw_private *dev_priv)
|
||||
{
|
||||
ttm_read_lock(&dev_priv->reservation_sem, false);
|
||||
__vmw_svga_enable(dev_priv);
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
}
|
||||
|
||||
/**
|
||||
* __vmw_svga_disable - Disable SVGA mode and use of VRAM.
|
||||
*
|
||||
* @dev_priv: Pointer to device private struct.
|
||||
* Needs the reservation sem to be held in exclusive mode.
|
||||
* Will not empty VRAM. VRAM must be emptied by caller.
|
||||
*/
|
||||
static void __vmw_svga_disable(struct vmw_private *dev_priv)
|
||||
{
|
||||
spin_lock(&dev_priv->svga_lock);
|
||||
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
||||
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
SVGA_REG_ENABLE_HIDE |
|
||||
SVGA_REG_ENABLE_ENABLE);
|
||||
}
|
||||
spin_unlock(&dev_priv->svga_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
|
||||
* running.
|
||||
*
|
||||
* @dev_priv: Pointer to device private struct.
|
||||
* Will empty VRAM.
|
||||
*/
|
||||
void vmw_svga_disable(struct vmw_private *dev_priv)
|
||||
{
|
||||
ttm_write_lock(&dev_priv->reservation_sem, false);
|
||||
spin_lock(&dev_priv->svga_lock);
|
||||
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
||||
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
|
||||
spin_unlock(&dev_priv->svga_lock);
|
||||
if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
|
||||
DRM_ERROR("Failed evicting VRAM buffers.\n");
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
SVGA_REG_ENABLE_HIDE |
|
||||
SVGA_REG_ENABLE_ENABLE);
|
||||
} else
|
||||
spin_unlock(&dev_priv->svga_lock);
|
||||
ttm_write_unlock(&dev_priv->reservation_sem);
|
||||
}
|
||||
|
||||
static void vmw_remove(struct pci_dev *pdev)
|
||||
{
|
||||
|
@ -1250,23 +1348,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
|||
|
||||
switch (val) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
case PM_SUSPEND_PREPARE:
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_off(dev_priv);
|
||||
ttm_suspend_lock(&dev_priv->reservation_sem);
|
||||
|
||||
/**
|
||||
/*
|
||||
* This empties VRAM and unbinds all GMR bindings.
|
||||
* Buffer contents is moved to swappable memory.
|
||||
*/
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
vmw_resource_evict_all(dev_priv);
|
||||
vmw_release_device_early(dev_priv);
|
||||
ttm_bo_swapout_all(&dev_priv->bdev);
|
||||
|
||||
vmw_fence_fifo_down(dev_priv->fman);
|
||||
break;
|
||||
case PM_POST_HIBERNATION:
|
||||
case PM_POST_SUSPEND:
|
||||
case PM_POST_RESTORE:
|
||||
vmw_fence_fifo_up(dev_priv->fman);
|
||||
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
break;
|
||||
case PM_RESTORE_PREPARE:
|
||||
break;
|
||||
|
@ -1276,20 +1377,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* These might not be needed with the virtual SVGA device.
|
||||
*/
|
||||
|
||||
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
if (dev_priv->num_3d_resources != 0) {
|
||||
DRM_INFO("Can't suspend or hibernate "
|
||||
"while 3D resources are active.\n");
|
||||
if (dev_priv->refuse_hibernation)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -1321,56 +1415,62 @@ static int vmw_pm_resume(struct device *kdev)
|
|||
return vmw_pci_resume(pdev);
|
||||
}
|
||||
|
||||
static int vmw_pm_prepare(struct device *kdev)
|
||||
static int vmw_pm_freeze(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
/**
|
||||
* Release 3d reference held by fbdev and potentially
|
||||
* stop fifo.
|
||||
*/
|
||||
dev_priv->suspended = true;
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_3d_resource_dec(dev_priv, true);
|
||||
|
||||
if (dev_priv->num_3d_resources != 0) {
|
||||
|
||||
DRM_INFO("Can't suspend or hibernate "
|
||||
"while 3D resources are active.\n");
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
|
||||
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
|
||||
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_3d_resource_inc(dev_priv, true);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
WARN_ON(vmw_request_device_late(dev_priv));
|
||||
dev_priv->suspended = false;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
__vmw_svga_disable(dev_priv);
|
||||
|
||||
vmw_release_device_late(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_pm_complete(struct device *kdev)
|
||||
static int vmw_pm_restore(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
int ret;
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
|
||||
(void) vmw_read(dev_priv, SVGA_REG_ID);
|
||||
|
||||
/**
|
||||
* Reclaim 3d reference held by fbdev and potentially
|
||||
* start fifo.
|
||||
*/
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
__vmw_svga_enable(dev_priv);
|
||||
|
||||
dev_priv->suspended = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops vmw_pm_ops = {
|
||||
.prepare = vmw_pm_prepare,
|
||||
.complete = vmw_pm_complete,
|
||||
.freeze = vmw_pm_freeze,
|
||||
.thaw = vmw_pm_restore,
|
||||
.restore = vmw_pm_restore,
|
||||
.suspend = vmw_pm_suspend,
|
||||
.resume = vmw_pm_resume,
|
||||
};
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -40,17 +40,17 @@
|
|||
#include <drm/ttm/ttm_module.h>
|
||||
#include "vmwgfx_fence.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20140704"
|
||||
#define VMWGFX_DRIVER_DATE "20150810"
|
||||
#define VMWGFX_DRIVER_MAJOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 6
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 1
|
||||
#define VMWGFX_DRIVER_MINOR 9
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
#define VMWGFX_MAX_VALIDATIONS 2048
|
||||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
||||
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
|
||||
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
|
||||
|
||||
/*
|
||||
* Perhaps we should have sysfs entries for these.
|
||||
|
@ -59,6 +59,8 @@
|
|||
#define VMWGFX_NUM_GB_SHADER 20000
|
||||
#define VMWGFX_NUM_GB_SURFACE 32768
|
||||
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
|
||||
#define VMWGFX_NUM_DXCONTEXT 256
|
||||
#define VMWGFX_NUM_DXQUERY 512
|
||||
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
|
||||
VMWGFX_NUM_GB_SHADER +\
|
||||
VMWGFX_NUM_GB_SURFACE +\
|
||||
|
@ -85,6 +87,9 @@ struct vmw_fpriv {
|
|||
struct vmw_dma_buffer {
|
||||
struct ttm_buffer_object base;
|
||||
struct list_head res_list;
|
||||
s32 pin_count;
|
||||
/* Not ref-counted. Protected by binding_mutex */
|
||||
struct vmw_resource *dx_query_ctx;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -113,6 +118,7 @@ struct vmw_resource {
|
|||
bool backup_dirty; /* Protected by backup buffer reserved */
|
||||
struct vmw_dma_buffer *backup;
|
||||
unsigned long backup_offset;
|
||||
unsigned long pin_count; /* Protected by resource reserved */
|
||||
const struct vmw_res_func *func;
|
||||
struct list_head lru_head; /* Protected by the resource lock */
|
||||
struct list_head mob_head; /* Protected by @backup reserved */
|
||||
|
@ -130,6 +136,9 @@ enum vmw_res_type {
|
|||
vmw_res_surface,
|
||||
vmw_res_stream,
|
||||
vmw_res_shader,
|
||||
vmw_res_dx_context,
|
||||
vmw_res_cotable,
|
||||
vmw_res_view,
|
||||
vmw_res_max
|
||||
};
|
||||
|
||||
|
@ -137,7 +146,8 @@ enum vmw_res_type {
|
|||
* Resources that are managed using command streams.
|
||||
*/
|
||||
enum vmw_cmdbuf_res_type {
|
||||
vmw_cmdbuf_res_compat_shader
|
||||
vmw_cmdbuf_res_shader,
|
||||
vmw_cmdbuf_res_view
|
||||
};
|
||||
|
||||
struct vmw_cmdbuf_res_manager;
|
||||
|
@ -160,11 +170,13 @@ struct vmw_surface {
|
|||
struct drm_vmw_size *sizes;
|
||||
uint32_t num_sizes;
|
||||
bool scanout;
|
||||
uint32_t array_size;
|
||||
/* TODO so far just a extra pointer */
|
||||
struct vmw_cursor_snooper snooper;
|
||||
struct vmw_surface_offset *offsets;
|
||||
SVGA3dTextureFilter autogen_filter;
|
||||
uint32_t multisample_count;
|
||||
struct list_head view_list;
|
||||
};
|
||||
|
||||
struct vmw_marker_queue {
|
||||
|
@ -176,14 +188,15 @@ struct vmw_marker_queue {
|
|||
|
||||
struct vmw_fifo_state {
|
||||
unsigned long reserved_size;
|
||||
__le32 *dynamic_buffer;
|
||||
__le32 *static_buffer;
|
||||
u32 *dynamic_buffer;
|
||||
u32 *static_buffer;
|
||||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct mutex fifo_mutex;
|
||||
struct rw_semaphore rwsem;
|
||||
struct vmw_marker_queue marker_queue;
|
||||
bool dx;
|
||||
};
|
||||
|
||||
struct vmw_relocation {
|
||||
|
@ -264,70 +277,15 @@ struct vmw_piter {
|
|||
};
|
||||
|
||||
/*
|
||||
* enum vmw_ctx_binding_type - abstract resource to context binding types
|
||||
* enum vmw_display_unit_type - Describes the display unit
|
||||
*/
|
||||
enum vmw_ctx_binding_type {
|
||||
vmw_ctx_binding_shader,
|
||||
vmw_ctx_binding_rt,
|
||||
vmw_ctx_binding_tex,
|
||||
vmw_ctx_binding_max
|
||||
enum vmw_display_unit_type {
|
||||
vmw_du_invalid = 0,
|
||||
vmw_du_legacy,
|
||||
vmw_du_screen_object,
|
||||
vmw_du_screen_target
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_bindinfo - structure representing a single context binding
|
||||
*
|
||||
* @ctx: Pointer to the context structure. NULL means the binding is not
|
||||
* active.
|
||||
* @res: Non ref-counted pointer to the bound resource.
|
||||
* @bt: The binding type.
|
||||
* @i1: Union of information needed to unbind.
|
||||
*/
|
||||
struct vmw_ctx_bindinfo {
|
||||
struct vmw_resource *ctx;
|
||||
struct vmw_resource *res;
|
||||
enum vmw_ctx_binding_type bt;
|
||||
bool scrubbed;
|
||||
union {
|
||||
SVGA3dShaderType shader_type;
|
||||
SVGA3dRenderTargetType rt_type;
|
||||
uint32 texture_stage;
|
||||
} i1;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_binding - structure representing a single context binding
|
||||
* - suitable for tracking in a context
|
||||
*
|
||||
* @ctx_list: List head for context.
|
||||
* @res_list: List head for bound resource.
|
||||
* @bi: Binding info
|
||||
*/
|
||||
struct vmw_ctx_binding {
|
||||
struct list_head ctx_list;
|
||||
struct list_head res_list;
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct vmw_ctx_binding_state - context binding state
|
||||
*
|
||||
* @list: linked list of individual bindings.
|
||||
* @render_targets: Render target bindings.
|
||||
* @texture_units: Texture units/samplers bindings.
|
||||
* @shaders: Shader bindings.
|
||||
*
|
||||
* Note that this structure also provides storage space for the individual
|
||||
* struct vmw_ctx_binding objects, so that no dynamic allocation is needed
|
||||
* for individual bindings.
|
||||
*
|
||||
*/
|
||||
struct vmw_ctx_binding_state {
|
||||
struct list_head list;
|
||||
struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
|
||||
struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
|
||||
struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
|
||||
};
|
||||
|
||||
struct vmw_sw_context{
|
||||
struct drm_open_hash res_ht;
|
||||
|
@ -342,15 +300,21 @@ struct vmw_sw_context{
|
|||
uint32_t *cmd_bounce;
|
||||
uint32_t cmd_bounce_size;
|
||||
struct list_head resource_list;
|
||||
struct ttm_buffer_object *cur_query_bo;
|
||||
struct list_head ctx_resource_list; /* For contexts and cotables */
|
||||
struct vmw_dma_buffer *cur_query_bo;
|
||||
struct list_head res_relocations;
|
||||
uint32_t *buf_start;
|
||||
struct vmw_res_cache_entry res_cache[vmw_res_max];
|
||||
struct vmw_resource *last_query_ctx;
|
||||
bool needs_post_query_barrier;
|
||||
struct vmw_resource *error_resource;
|
||||
struct vmw_ctx_binding_state staged_bindings;
|
||||
struct vmw_ctx_binding_state *staged_bindings;
|
||||
bool staged_bindings_inuse;
|
||||
struct list_head staged_cmd_res;
|
||||
struct vmw_resource_val_node *dx_ctx_node;
|
||||
struct vmw_dma_buffer *dx_query_mob;
|
||||
struct vmw_resource *dx_query_ctx;
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
};
|
||||
|
||||
struct vmw_legacy_display;
|
||||
|
@ -358,8 +322,6 @@ struct vmw_overlay;
|
|||
|
||||
struct vmw_master {
|
||||
struct ttm_lock lock;
|
||||
struct mutex fb_surf_mutex;
|
||||
struct list_head fb_surf;
|
||||
};
|
||||
|
||||
struct vmw_vga_topology_state {
|
||||
|
@ -370,6 +332,26 @@ struct vmw_vga_topology_state {
|
|||
uint32_t pos_y;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* struct vmw_otable - Guest Memory OBject table metadata
|
||||
*
|
||||
* @size: Size of the table (page-aligned).
|
||||
* @page_table: Pointer to a struct vmw_mob holding the page table.
|
||||
*/
|
||||
struct vmw_otable {
|
||||
unsigned long size;
|
||||
struct vmw_mob *page_table;
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
struct vmw_otable_batch {
|
||||
unsigned num_otables;
|
||||
struct vmw_otable *otables;
|
||||
struct vmw_resource *context;
|
||||
struct ttm_buffer_object *otable_bo;
|
||||
};
|
||||
|
||||
struct vmw_private {
|
||||
struct ttm_bo_device bdev;
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
|
@ -387,9 +369,13 @@ struct vmw_private {
|
|||
uint32_t mmio_size;
|
||||
uint32_t fb_max_width;
|
||||
uint32_t fb_max_height;
|
||||
uint32_t texture_max_width;
|
||||
uint32_t texture_max_height;
|
||||
uint32_t stdu_max_width;
|
||||
uint32_t stdu_max_height;
|
||||
uint32_t initial_width;
|
||||
uint32_t initial_height;
|
||||
__le32 __iomem *mmio_virt;
|
||||
u32 __iomem *mmio_virt;
|
||||
int mmio_mtrr;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_ids;
|
||||
|
@ -401,6 +387,7 @@ struct vmw_private {
|
|||
bool has_mob;
|
||||
spinlock_t hw_lock;
|
||||
spinlock_t cap_lock;
|
||||
bool has_dx;
|
||||
|
||||
/*
|
||||
* VGA registers.
|
||||
|
@ -420,6 +407,7 @@ struct vmw_private {
|
|||
*/
|
||||
|
||||
void *fb_info;
|
||||
enum vmw_display_unit_type active_display_unit;
|
||||
struct vmw_legacy_display *ldu_priv;
|
||||
struct vmw_screen_object_display *sou_priv;
|
||||
struct vmw_overlay *overlay_priv;
|
||||
|
@ -453,6 +441,8 @@ struct vmw_private {
|
|||
spinlock_t waiter_lock;
|
||||
int fence_queue_waiters; /* Protected by waiter_lock */
|
||||
int goal_queue_waiters; /* Protected by waiter_lock */
|
||||
int cmdbuf_waiters; /* Protected by irq_lock */
|
||||
int error_waiters; /* Protected by irq_lock */
|
||||
atomic_t fifo_queue_waiters;
|
||||
uint32_t last_read_seqno;
|
||||
spinlock_t irq_lock;
|
||||
|
@ -484,6 +474,7 @@ struct vmw_private {
|
|||
|
||||
bool stealth;
|
||||
bool enable_fb;
|
||||
spinlock_t svga_lock;
|
||||
|
||||
/**
|
||||
* Master management.
|
||||
|
@ -493,9 +484,10 @@ struct vmw_private {
|
|||
struct vmw_master fbdev_master;
|
||||
struct notifier_block pm_nb;
|
||||
bool suspended;
|
||||
bool refuse_hibernation;
|
||||
|
||||
struct mutex release_mutex;
|
||||
uint32_t num_3d_resources;
|
||||
atomic_t num_fifo_resources;
|
||||
|
||||
/*
|
||||
* Replace this with an rwsem as soon as we have down_xx_interruptible()
|
||||
|
@ -507,8 +499,8 @@ struct vmw_private {
|
|||
* are protected by the cmdbuf mutex.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *dummy_query_bo;
|
||||
struct ttm_buffer_object *pinned_bo;
|
||||
struct vmw_dma_buffer *dummy_query_bo;
|
||||
struct vmw_dma_buffer *pinned_bo;
|
||||
uint32_t query_cid;
|
||||
uint32_t query_cid_valid;
|
||||
bool dummy_query_bo_pinned;
|
||||
|
@ -531,8 +523,9 @@ struct vmw_private {
|
|||
/*
|
||||
* Guest Backed stuff
|
||||
*/
|
||||
struct ttm_buffer_object *otable_bo;
|
||||
struct vmw_otable *otables;
|
||||
struct vmw_otable_batch otable_batch;
|
||||
|
||||
struct vmw_cmdbuf_man *cman;
|
||||
};
|
||||
|
||||
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
||||
|
@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
|
|||
return val;
|
||||
}
|
||||
|
||||
int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
|
||||
void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
|
||||
extern void vmw_svga_enable(struct vmw_private *dev_priv);
|
||||
extern void vmw_svga_disable(struct vmw_private *dev_priv);
|
||||
|
||||
|
||||
/**
|
||||
* GMR utilities - vmwgfx_gmr.c
|
||||
|
@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
|||
extern struct vmw_resource *
|
||||
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
|
||||
extern int vmw_resource_validate(struct vmw_resource *res);
|
||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
|
||||
bool no_backup);
|
||||
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
||||
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
|
@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
|
|||
uint32_t *inout_id,
|
||||
struct vmw_resource **out);
|
||||
extern void vmw_resource_unreserve(struct vmw_resource *res,
|
||||
bool switch_backup,
|
||||
struct vmw_dma_buffer *new_backup,
|
||||
unsigned long new_backup_offset);
|
||||
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem);
|
||||
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem);
|
||||
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
|
||||
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
|
||||
struct vmw_fence_obj *fence);
|
||||
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
|
||||
|
@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
|
|||
/**
|
||||
* DMA buffer helper routines - vmwgfx_dmabuf.c
|
||||
*/
|
||||
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool pin, bool interruptible);
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool interruptible);
|
||||
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
|
||||
SVGAGuestPtr *ptr);
|
||||
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
|
||||
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
|
||||
|
||||
/**
|
||||
* Misc Ioctl functionality - vmwgfx_ioctl.c
|
||||
|
@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
|
|||
extern void vmw_fifo_release(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern void *
|
||||
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
|
||||
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
|
||||
uint32_t *seqno);
|
||||
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
|
||||
|
@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
|
|||
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
|
||||
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
||||
uint32_t cid);
|
||||
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
|
||||
bool interruptible);
|
||||
|
||||
/**
|
||||
* TTM glue - vmwgfx_ttm_glue.c
|
||||
|
@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
|
|||
extern struct ttm_placement vmw_evictable_placement;
|
||||
extern struct ttm_placement vmw_srf_placement;
|
||||
extern struct ttm_placement vmw_mob_placement;
|
||||
extern struct ttm_placement vmw_mob_ne_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
|
||||
|
@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
|
|||
* Command submission - vmwgfx_execbuf.c
|
||||
*/
|
||||
|
||||
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
|
||||
struct drm_file *file_priv, size_t size);
|
||||
extern int vmw_execbuf_process(struct drm_file *file_priv,
|
||||
struct vmw_private *dev_priv,
|
||||
void __user *user_commands,
|
||||
void *kernel_commands,
|
||||
uint32_t command_size,
|
||||
uint64_t throttle_us,
|
||||
uint32_t dx_context_handle,
|
||||
struct drm_vmw_fence_rep __user
|
||||
*user_fence_rep,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
|
@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
|||
*user_fence_rep,
|
||||
struct vmw_fence_obj *fence,
|
||||
uint32_t fence_handle);
|
||||
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool validate_as_mob);
|
||||
|
||||
|
||||
/**
|
||||
* IRQs and wating - vmwgfx_irq.c
|
||||
|
@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
|||
|
||||
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
|
||||
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
|
||||
uint32_t seqno, bool interruptible,
|
||||
unsigned long timeout);
|
||||
uint32_t seqno, bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void vmw_irq_preinstall(struct drm_device *dev);
|
||||
extern int vmw_irq_postinstall(struct drm_device *dev);
|
||||
extern void vmw_irq_uninstall(struct drm_device *dev);
|
||||
|
@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
|
|||
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
|
||||
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
|
||||
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
|
||||
extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
|
||||
int *waiter_count);
|
||||
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count);
|
||||
|
||||
/**
|
||||
* Rudimentary fence-like objects currently used only for throttling -
|
||||
|
@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
|
|||
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
|
||||
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
|
||||
extern int vmw_marker_push(struct vmw_marker_queue *queue,
|
||||
uint32_t seqno);
|
||||
uint32_t seqno);
|
||||
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
|
||||
uint32_t signaled_seqno);
|
||||
uint32_t signaled_seqno);
|
||||
extern int vmw_wait_lag(struct vmw_private *dev_priv,
|
||||
struct vmw_marker_queue *queue, uint32_t us);
|
||||
|
||||
|
@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
|
|||
uint32_t sid, int32_t destX, int32_t destY,
|
||||
struct drm_vmw_rect *clips,
|
||||
uint32_t num_clips);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_vmw_rect *clips,
|
||||
uint32_t num_clips);
|
||||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
|
@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
|
|||
int vmw_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
|
||||
extern void vmw_resource_unpin(struct vmw_resource *res);
|
||||
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
|
||||
|
||||
/**
|
||||
* Overlay control - vmwgfx_overlay.c
|
||||
*/
|
||||
|
@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
|
|||
|
||||
extern const struct vmw_user_resource_conv *user_context_converter;
|
||||
|
||||
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
|
||||
|
||||
extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id,
|
||||
struct vmw_resource **p_res);
|
||||
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
|
||||
const struct vmw_ctx_bindinfo *ci);
|
||||
extern void
|
||||
vmw_context_binding_state_transfer(struct vmw_resource *res,
|
||||
struct vmw_ctx_binding_state *cbs);
|
||||
extern void vmw_context_binding_res_list_kill(struct list_head *head);
|
||||
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
|
||||
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
|
||||
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
|
||||
extern struct vmw_cmdbuf_res_manager *
|
||||
vmw_context_res_man(struct vmw_resource *ctx);
|
||||
extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
|
||||
SVGACOTableType cotable_type);
|
||||
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
|
||||
struct vmw_ctx_binding_state;
|
||||
extern struct vmw_ctx_binding_state *
|
||||
vmw_context_binding_state(struct vmw_resource *ctx);
|
||||
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
|
||||
bool readback);
|
||||
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
|
||||
struct vmw_dma_buffer *mob);
|
||||
extern struct vmw_dma_buffer *
|
||||
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
|
||||
|
||||
|
||||
/*
|
||||
* Surface management - vmwgfx_surface.c
|
||||
*/
|
||||
|
@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
|
|||
uint32_t handle, int *id);
|
||||
extern int vmw_surface_validate(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf);
|
||||
int vmw_surface_gb_priv_define(struct drm_device *dev,
|
||||
uint32_t user_accounting_size,
|
||||
uint32_t svga3d_flags,
|
||||
SVGA3dSurfaceFormat format,
|
||||
bool for_scanout,
|
||||
uint32_t num_mip_levels,
|
||||
uint32_t multisample_count,
|
||||
uint32_t array_size,
|
||||
struct drm_vmw_size size,
|
||||
struct vmw_surface **srf_out);
|
||||
|
||||
/*
|
||||
* Shader management - vmwgfx_shader.c
|
||||
|
@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
SVGA3dShaderType shader_type,
|
||||
size_t size,
|
||||
struct list_head *list);
|
||||
extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type,
|
||||
struct list_head *list);
|
||||
extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type,
|
||||
struct list_head *list);
|
||||
extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_resource *ctx,
|
||||
u32 user_key,
|
||||
SVGA3dShaderType shader_type,
|
||||
struct list_head *list);
|
||||
extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
|
||||
struct list_head *list,
|
||||
bool readback);
|
||||
|
||||
extern struct vmw_resource *
|
||||
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type);
|
||||
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type);
|
||||
|
||||
/*
|
||||
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
|
||||
|
@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
|||
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key,
|
||||
struct list_head *list);
|
||||
struct list_head *list,
|
||||
struct vmw_resource **res);
|
||||
|
||||
/*
|
||||
* COTable management - vmwgfx_cotable.c
|
||||
*/
|
||||
extern const SVGACOTableType vmw_cotable_scrub_order[];
|
||||
extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_resource *ctx,
|
||||
u32 type);
|
||||
extern int vmw_cotable_notify(struct vmw_resource *res, int id);
|
||||
extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
|
||||
extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
|
||||
struct list_head *head);
|
||||
|
||||
/*
|
||||
* Command buffer managerment vmwgfx_cmdbuf.c
|
||||
*/
|
||||
struct vmw_cmdbuf_man;
|
||||
struct vmw_cmdbuf_header;
|
||||
|
||||
extern struct vmw_cmdbuf_man *
|
||||
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
|
||||
extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
|
||||
size_t size, size_t default_size);
|
||||
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
|
||||
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
|
||||
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
|
||||
int ctx_id, bool interruptible,
|
||||
struct vmw_cmdbuf_header *header);
|
||||
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
|
||||
struct vmw_cmdbuf_header *header,
|
||||
bool flush);
|
||||
extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
|
||||
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
|
||||
size_t size, bool interruptible,
|
||||
struct vmw_cmdbuf_header **p_header);
|
||||
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
|
||||
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
|
||||
bool interruptible);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
|
|||
{
|
||||
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
|
||||
}
|
||||
|
||||
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
|
||||
{
|
||||
atomic_inc(&dev_priv->num_fifo_resources);
|
||||
}
|
||||
|
||||
static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
|
||||
{
|
||||
atomic_dec(&dev_priv->num_fifo_resources);
|
||||
}
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,7 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2007 David Airlie
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -30,6 +30,7 @@
|
|||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
|
@ -40,21 +41,22 @@ struct vmw_fb_par {
|
|||
|
||||
void *vmalloc;
|
||||
|
||||
struct mutex bo_mutex;
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
void *bo_ptr;
|
||||
unsigned bo_size;
|
||||
struct drm_framebuffer *set_fb;
|
||||
struct drm_display_mode *set_mode;
|
||||
u32 fb_x;
|
||||
u32 fb_y;
|
||||
bool bo_iowrite;
|
||||
|
||||
u32 pseudo_palette[17];
|
||||
|
||||
unsigned depth;
|
||||
unsigned bpp;
|
||||
|
||||
unsigned max_width;
|
||||
unsigned max_height;
|
||||
|
||||
void *bo_ptr;
|
||||
unsigned bo_size;
|
||||
bool bo_iowrite;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
|
@ -63,6 +65,11 @@ struct vmw_fb_par {
|
|||
unsigned x2;
|
||||
unsigned y2;
|
||||
} dirty;
|
||||
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_connector *con;
|
||||
|
||||
bool local_mode;
|
||||
};
|
||||
|
||||
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
||||
|
@ -77,7 +84,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
|||
return 1;
|
||||
}
|
||||
|
||||
switch (par->depth) {
|
||||
switch (par->set_fb->depth) {
|
||||
case 24:
|
||||
case 32:
|
||||
pal[regno] = ((red & 0xff00) << 8) |
|
||||
|
@ -85,7 +92,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
|||
((blue & 0xff00) >> 8);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
|
||||
DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
|
||||
par->set_fb->bits_per_pixel);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -134,12 +142,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
|
||||
(var->xoffset != 0 || var->yoffset != 0)) {
|
||||
DRM_ERROR("Can not handle panning without display topology\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((var->xoffset + var->xres) > par->max_width ||
|
||||
(var->yoffset + var->yres) > par->max_height) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
|
@ -156,46 +158,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_set_par(struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
int ret;
|
||||
|
||||
info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
|
||||
|
||||
ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
|
||||
info->fix.line_length,
|
||||
par->bpp, par->depth);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
|
||||
/* TODO check if pitch and offset changes */
|
||||
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
}
|
||||
|
||||
/* This is really helpful since if this fails the user
|
||||
* can probably not see anything on the screen.
|
||||
*/
|
||||
WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_blank(int blank, struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
|
@ -209,54 +171,77 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
|
|||
{
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
struct fb_info *info = vmw_priv->fb_info;
|
||||
int stride = (info->fix.line_length / 4);
|
||||
int *src = (int *)info->screen_base;
|
||||
__le32 __iomem *vram_mem = par->bo_ptr;
|
||||
unsigned long flags;
|
||||
unsigned x, y, w, h;
|
||||
int i, k;
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
unsigned long irq_flags;
|
||||
s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
|
||||
u32 cpp, max_x, max_y;
|
||||
struct drm_clip_rect clip;
|
||||
struct drm_framebuffer *cur_fb;
|
||||
u8 *src_ptr, *dst_ptr;
|
||||
|
||||
if (vmw_priv->suspended)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
mutex_lock(&par->bo_mutex);
|
||||
cur_fb = par->set_fb;
|
||||
if (!cur_fb)
|
||||
goto out_unlock;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, irq_flags);
|
||||
if (!par->dirty.active) {
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
return;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
|
||||
goto out_unlock;
|
||||
}
|
||||
x = par->dirty.x1;
|
||||
y = par->dirty.y1;
|
||||
w = min(par->dirty.x2, info->var.xres) - x;
|
||||
h = min(par->dirty.y2, info->var.yres) - y;
|
||||
|
||||
/*
|
||||
* Handle panning when copying from vmalloc to framebuffer.
|
||||
* Clip dirty area to framebuffer.
|
||||
*/
|
||||
cpp = (cur_fb->bits_per_pixel + 7) / 8;
|
||||
max_x = par->fb_x + cur_fb->width;
|
||||
max_y = par->fb_y + cur_fb->height;
|
||||
|
||||
dst_x1 = par->dirty.x1 - par->fb_x;
|
||||
dst_y1 = par->dirty.y1 - par->fb_y;
|
||||
dst_x1 = max_t(s32, dst_x1, 0);
|
||||
dst_y1 = max_t(s32, dst_y1, 0);
|
||||
|
||||
dst_x2 = par->dirty.x2 - par->fb_x;
|
||||
dst_y2 = par->dirty.y2 - par->fb_y;
|
||||
dst_x2 = min_t(s32, dst_x2, max_x);
|
||||
dst_y2 = min_t(s32, dst_y2, max_y);
|
||||
w = dst_x2 - dst_x1;
|
||||
h = dst_y2 - dst_y1;
|
||||
w = max_t(s32, 0, w);
|
||||
h = max_t(s32, 0, h);
|
||||
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
|
||||
|
||||
for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
|
||||
for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
|
||||
iowrite32(src[k], vram_mem + k);
|
||||
if (w && h) {
|
||||
dst_ptr = (u8 *)par->bo_ptr +
|
||||
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
|
||||
src_ptr = (u8 *)par->vmalloc +
|
||||
((dst_y1 + par->fb_y) * info->fix.line_length +
|
||||
(dst_x1 + par->fb_x) * cpp);
|
||||
|
||||
while (h-- > 0) {
|
||||
memcpy(dst_ptr, src_ptr, w*cpp);
|
||||
dst_ptr += par->set_fb->pitches[0];
|
||||
src_ptr += info->fix.line_length;
|
||||
}
|
||||
|
||||
clip.x1 = dst_x1;
|
||||
clip.x2 = dst_x2;
|
||||
clip.y1 = dst_y1;
|
||||
clip.y2 = dst_y2;
|
||||
|
||||
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
|
||||
&clip, 1));
|
||||
vmw_fifo_flush(vmw_priv, false);
|
||||
}
|
||||
|
||||
#if 0
|
||||
DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
|
||||
#endif
|
||||
|
||||
cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
|
||||
cmd->body.x = cpu_to_le32(x);
|
||||
cmd->body.y = cpu_to_le32(y);
|
||||
cmd->body.width = cpu_to_le32(w);
|
||||
cmd->body.height = cpu_to_le32(h);
|
||||
vmw_fifo_commit(vmw_priv, sizeof(*cmd));
|
||||
out_unlock:
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
}
|
||||
|
||||
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
|
||||
|
@ -291,6 +276,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
|
|||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
}
|
||||
|
||||
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
|
||||
if ((var->xoffset + var->xres) > var->xres_virtual ||
|
||||
(var->yoffset + var->yres) > var->yres_virtual) {
|
||||
DRM_ERROR("Requested panning can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&par->bo_mutex);
|
||||
par->fb_x = var->xoffset;
|
||||
par->fb_y = var->yoffset;
|
||||
if (par->set_fb)
|
||||
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
|
||||
par->set_fb->height);
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
|
@ -324,7 +331,7 @@ static void vmw_deferred_io(struct fb_info *info,
|
|||
vmw_fb_dirty_flush(par);
|
||||
};
|
||||
|
||||
struct fb_deferred_io vmw_defio = {
|
||||
static struct fb_deferred_io vmw_defio = {
|
||||
.delay = VMW_DIRTY_DELAY,
|
||||
.deferred_io = vmw_deferred_io,
|
||||
};
|
||||
|
@ -358,6 +365,260 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
* Bring up code
|
||||
*/
|
||||
|
||||
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
|
||||
size_t size, struct vmw_dma_buffer **out)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
int ret;
|
||||
|
||||
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
|
||||
|
||||
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
|
||||
if (!vmw_bo) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
|
||||
&vmw_sys_placement,
|
||||
false,
|
||||
&vmw_dmabuf_bo_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock; /* init frees the buffer on failure */
|
||||
|
||||
*out = vmw_bo;
|
||||
ttm_write_unlock(&vmw_priv->reservation_sem);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
ttm_write_unlock(&vmw_priv->reservation_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
|
||||
int *depth)
|
||||
{
|
||||
switch (var->bits_per_pixel) {
|
||||
case 32:
|
||||
*depth = (var->transp.length > 0) ? 32 : 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_kms_detach(struct vmw_fb_par *par,
|
||||
bool detach_bo,
|
||||
bool unref_bo)
|
||||
{
|
||||
struct drm_framebuffer *cur_fb = par->set_fb;
|
||||
int ret;
|
||||
|
||||
/* Detach the KMS framebuffer from crtcs */
|
||||
if (par->set_mode) {
|
||||
struct drm_mode_set set;
|
||||
|
||||
set.crtc = par->crtc;
|
||||
set.x = 0;
|
||||
set.y = 0;
|
||||
set.mode = NULL;
|
||||
set.fb = NULL;
|
||||
set.num_connectors = 1;
|
||||
set.connectors = &par->con;
|
||||
ret = drm_mode_set_config_internal(&set);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not unset a mode.\n");
|
||||
return ret;
|
||||
}
|
||||
drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
|
||||
par->set_mode = NULL;
|
||||
}
|
||||
|
||||
if (cur_fb) {
|
||||
drm_framebuffer_unreference(cur_fb);
|
||||
par->set_fb = NULL;
|
||||
}
|
||||
|
||||
if (par->vmw_bo && detach_bo) {
|
||||
if (par->bo_ptr) {
|
||||
ttm_bo_kunmap(&par->map);
|
||||
par->bo_ptr = NULL;
|
||||
}
|
||||
if (unref_bo)
|
||||
vmw_dmabuf_unreference(&par->vmw_bo);
|
||||
else
|
||||
vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_kms_framebuffer(struct fb_info *info)
|
||||
{
|
||||
struct drm_mode_fb_cmd mode_cmd;
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_framebuffer *cur_fb;
|
||||
struct vmw_framebuffer *vfb;
|
||||
int ret = 0;
|
||||
size_t new_bo_size;
|
||||
|
||||
ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mode_cmd.width = var->xres;
|
||||
mode_cmd.height = var->yres;
|
||||
mode_cmd.bpp = var->bits_per_pixel;
|
||||
mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
|
||||
|
||||
cur_fb = par->set_fb;
|
||||
if (cur_fb && cur_fb->width == mode_cmd.width &&
|
||||
cur_fb->height == mode_cmd.height &&
|
||||
cur_fb->bits_per_pixel == mode_cmd.bpp &&
|
||||
cur_fb->depth == mode_cmd.depth &&
|
||||
cur_fb->pitches[0] == mode_cmd.pitch)
|
||||
return 0;
|
||||
|
||||
/* Need new buffer object ? */
|
||||
new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
|
||||
ret = vmw_fb_kms_detach(par,
|
||||
par->bo_size < new_bo_size ||
|
||||
par->bo_size > 2*new_bo_size,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!par->vmw_bo) {
|
||||
ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
|
||||
&par->vmw_bo);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed creating a buffer object for "
|
||||
"fbdev.\n");
|
||||
return ret;
|
||||
}
|
||||
par->bo_size = new_bo_size;
|
||||
}
|
||||
|
||||
vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
|
||||
true, &mode_cmd);
|
||||
if (IS_ERR(vfb))
|
||||
return PTR_ERR(vfb);
|
||||
|
||||
par->set_fb = &vfb->base;
|
||||
|
||||
if (!par->bo_ptr) {
|
||||
/*
|
||||
* Pin before mapping. Since we don't know in what placement
|
||||
* to pin, call into KMS to do it for us.
|
||||
*/
|
||||
ret = vfb->pin(vfb);
|
||||
if (ret) {
|
||||
DRM_ERROR("Could not pin the fbdev framebuffer.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
|
||||
par->vmw_bo->base.num_pages, &par->map);
|
||||
if (ret) {
|
||||
vfb->unpin(vfb);
|
||||
DRM_ERROR("Could not map the fbdev framebuffer.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_set_par(struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
struct drm_mode_set set;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
|
||||
DRM_MODE_TYPE_DRIVER,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
|
||||
};
|
||||
struct drm_display_mode *old_mode;
|
||||
struct drm_display_mode *mode;
|
||||
int ret;
|
||||
|
||||
old_mode = par->set_mode;
|
||||
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
|
||||
if (!mode) {
|
||||
DRM_ERROR("Could not create new fb mode.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mode->hdisplay = var->xres;
|
||||
mode->vdisplay = var->yres;
|
||||
vmw_guess_mode_timing(mode);
|
||||
|
||||
if (old_mode && drm_mode_equal(old_mode, mode)) {
|
||||
drm_mode_destroy(vmw_priv->dev, mode);
|
||||
mode = old_mode;
|
||||
old_mode = NULL;
|
||||
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
|
||||
mode->hdisplay *
|
||||
(var->bits_per_pixel + 7) / 8,
|
||||
mode->vdisplay)) {
|
||||
drm_mode_destroy(vmw_priv->dev, mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&par->bo_mutex);
|
||||
drm_modeset_lock_all(vmw_priv->dev);
|
||||
ret = vmw_fb_kms_framebuffer(info);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
par->fb_x = var->xoffset;
|
||||
par->fb_y = var->yoffset;
|
||||
|
||||
set.crtc = par->crtc;
|
||||
set.x = 0;
|
||||
set.y = 0;
|
||||
set.mode = mode;
|
||||
set.fb = par->set_fb;
|
||||
set.num_connectors = 1;
|
||||
set.connectors = &par->con;
|
||||
|
||||
ret = drm_mode_set_config_internal(&set);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
|
||||
par->set_fb->width, par->set_fb->height);
|
||||
|
||||
/* If there already was stuff dirty we wont
|
||||
* schedule a new work, so lets do it now */
|
||||
|
||||
#if (defined(VMWGFX_STANDALONE) && defined(VMWGFX_FB_DEFERRED))
|
||||
schedule_delayed_work(&par->def_par.deferred_work, 0);
|
||||
#else
|
||||
schedule_delayed_work(&info->deferred_work, 0);
|
||||
#endif
|
||||
|
||||
out_unlock:
|
||||
if (old_mode)
|
||||
drm_mode_destroy(vmw_priv->dev, old_mode);
|
||||
par->set_mode = mode;
|
||||
|
||||
drm_modeset_unlock_all(vmw_priv->dev);
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static struct fb_ops vmw_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = vmw_fb_check_var,
|
||||
|
@ -370,55 +631,14 @@ static struct fb_ops vmw_fb_ops = {
|
|||
.fb_blank = vmw_fb_blank,
|
||||
};
|
||||
|
||||
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
|
||||
size_t size, struct vmw_dma_buffer **out)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
|
||||
struct ttm_placement ne_placement;
|
||||
int ret;
|
||||
|
||||
ne_placement.num_placement = 1;
|
||||
ne_placement.placement = &ne_place;
|
||||
ne_placement.num_busy_placement = 1;
|
||||
ne_placement.busy_placement = &ne_place;
|
||||
|
||||
ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
|
||||
|
||||
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
|
||||
if (!vmw_bo) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
|
||||
&ne_placement,
|
||||
false,
|
||||
&vmw_dmabuf_bo_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock; /* init frees the buffer on failure */
|
||||
|
||||
*out = vmw_bo;
|
||||
|
||||
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct device *device = &vmw_priv->dev->pdev->dev;
|
||||
struct vmw_fb_par *par;
|
||||
struct fb_info *info;
|
||||
unsigned initial_width, initial_height;
|
||||
unsigned fb_width, fb_height;
|
||||
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
|
||||
struct drm_display_mode *init_mode;
|
||||
int ret;
|
||||
|
||||
fb_bpp = 32;
|
||||
|
@ -428,9 +648,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
|
||||
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
|
||||
|
||||
initial_width = min(vmw_priv->initial_width, fb_width);
|
||||
initial_height = min(vmw_priv->initial_height, fb_height);
|
||||
|
||||
fb_pitch = fb_width * fb_bpp / 8;
|
||||
fb_size = fb_pitch * fb_height;
|
||||
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
|
||||
|
@ -444,35 +661,34 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||
*/
|
||||
vmw_priv->fb_info = info;
|
||||
par = info->par;
|
||||
memset(par, 0, sizeof(*par));
|
||||
par->vmw_priv = vmw_priv;
|
||||
par->depth = fb_depth;
|
||||
par->bpp = fb_bpp;
|
||||
par->vmalloc = NULL;
|
||||
par->max_width = fb_width;
|
||||
par->max_height = fb_height;
|
||||
|
||||
drm_modeset_lock_all(vmw_priv->dev);
|
||||
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
|
||||
par->max_height, &par->con,
|
||||
&par->crtc, &init_mode);
|
||||
if (ret) {
|
||||
drm_modeset_unlock_all(vmw_priv->dev);
|
||||
goto err_kms;
|
||||
}
|
||||
|
||||
info->var.xres = init_mode->hdisplay;
|
||||
info->var.yres = init_mode->vdisplay;
|
||||
drm_modeset_unlock_all(vmw_priv->dev);
|
||||
|
||||
/*
|
||||
* Create buffers and alloc memory
|
||||
*/
|
||||
par->vmalloc = vmalloc(fb_size);
|
||||
par->vmalloc = vzalloc(fb_size);
|
||||
if (unlikely(par->vmalloc == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_free;
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base,
|
||||
0,
|
||||
par->vmw_bo->base.num_pages,
|
||||
&par->map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unref;
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
|
||||
par->bo_size = fb_size;
|
||||
|
||||
/*
|
||||
* Fixed and var
|
||||
*/
|
||||
|
@ -490,7 +706,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||
info->fix.smem_len = fb_size;
|
||||
|
||||
info->pseudo_palette = par->pseudo_palette;
|
||||
info->screen_base = par->vmalloc;
|
||||
info->screen_base = (char __iomem *)par->vmalloc;
|
||||
info->screen_size = fb_size;
|
||||
|
||||
info->flags = FBINFO_DEFAULT;
|
||||
|
@ -508,18 +724,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||
|
||||
info->var.xres_virtual = fb_width;
|
||||
info->var.yres_virtual = fb_height;
|
||||
info->var.bits_per_pixel = par->bpp;
|
||||
info->var.bits_per_pixel = fb_bpp;
|
||||
info->var.xoffset = 0;
|
||||
info->var.yoffset = 0;
|
||||
info->var.activate = FB_ACTIVATE_NOW;
|
||||
info->var.height = -1;
|
||||
info->var.width = -1;
|
||||
|
||||
info->var.xres = initial_width;
|
||||
info->var.yres = initial_height;
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
info->apertures = alloc_apertures(1);
|
||||
if (!info->apertures) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -535,6 +747,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
par->dirty.active = true;
|
||||
spin_lock_init(&par->dirty.lock);
|
||||
mutex_init(&par->bo_mutex);
|
||||
info->fbdefio = &vmw_defio;
|
||||
fb_deferred_io_init(info);
|
||||
|
||||
|
@ -542,16 +755,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||
if (unlikely(ret != 0))
|
||||
goto err_defio;
|
||||
|
||||
vmw_fb_set_par(info);
|
||||
|
||||
return 0;
|
||||
|
||||
err_defio:
|
||||
fb_deferred_io_cleanup(info);
|
||||
err_aper:
|
||||
ttm_bo_kunmap(&par->map);
|
||||
err_unref:
|
||||
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
|
||||
err_free:
|
||||
vfree(par->vmalloc);
|
||||
err_kms:
|
||||
framebuffer_release(info);
|
||||
vmw_priv->fb_info = NULL;
|
||||
|
||||
|
@ -562,22 +775,18 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
|
|||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return 0;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
bo = &par->vmw_bo->base;
|
||||
par->vmw_bo = NULL;
|
||||
|
||||
/* ??? order */
|
||||
fb_deferred_io_cleanup(info);
|
||||
unregister_framebuffer(info);
|
||||
|
||||
ttm_bo_kunmap(&par->map);
|
||||
ttm_bo_unref(&bo);
|
||||
(void) vmw_fb_kms_detach(par, true, true);
|
||||
|
||||
vfree(par->vmalloc);
|
||||
framebuffer_release(info);
|
||||
|
@ -603,10 +812,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
|
|||
|
||||
flush_delayed_work(&info->deferred_work);
|
||||
|
||||
par->bo_ptr = NULL;
|
||||
ttm_bo_kunmap(&par->map);
|
||||
|
||||
vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
|
||||
mutex_lock(&par->bo_mutex);
|
||||
(void) vmw_fb_kms_detach(par, true, false);
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -616,8 +824,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
|
|||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
bool dummy;
|
||||
int ret;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
@ -625,38 +831,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
|
|||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
/* we are already active */
|
||||
if (par->bo_ptr != NULL)
|
||||
return 0;
|
||||
|
||||
/* Make sure that all overlays are stoped when we take over */
|
||||
vmw_overlay_stop_all(vmw_priv);
|
||||
|
||||
ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("could not move buffer to start of VRAM\n");
|
||||
goto err_no_buffer;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base,
|
||||
0,
|
||||
par->vmw_bo->base.num_pages,
|
||||
&par->map);
|
||||
BUG_ON(ret != 0);
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
|
||||
|
||||
vmw_fb_set_par(info);
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = true;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
err_no_buffer:
|
||||
vmw_fb_set_par(info);
|
||||
|
||||
vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
|
||||
|
||||
/* If there already was stuff dirty we wont
|
||||
* schedule a new work, so lets do it now */
|
||||
schedule_delayed_work(&info->deferred_work, 0);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
|
|||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
struct vmw_private *dev_priv = fman->dev_priv;
|
||||
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
|
||||
return false;
|
||||
|
@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
|||
u32 passed_seqno)
|
||||
{
|
||||
u32 goal_seqno;
|
||||
__le32 __iomem *fifo_mem;
|
||||
u32 __iomem *fifo_mem;
|
||||
struct vmw_fence_obj *fence;
|
||||
|
||||
if (likely(!fman->seqno_valid))
|
||||
|
@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
|
|||
{
|
||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
u32 goal_seqno;
|
||||
__le32 __iomem *fifo_mem;
|
||||
u32 __iomem *fifo_mem;
|
||||
|
||||
if (fence_is_signaled_locked(&fence->base))
|
||||
return false;
|
||||
|
@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
|
|||
struct list_head action_list;
|
||||
bool needs_rerun;
|
||||
uint32_t seqno, new_seqno;
|
||||
__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
|
||||
|
||||
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
rerun:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -29,9 +29,14 @@
|
|||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
struct vmw_temp_set_context {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXTempSetContext body;
|
||||
};
|
||||
|
||||
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t fifo_min, hwversion;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
|
@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
|||
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
|
||||
return false;
|
||||
|
||||
/* Non-Screen Object path does not support surfaces */
|
||||
if (!dev_priv->sou_priv)
|
||||
/* Legacy Display Unit does not support surfaces */
|
||||
if (dev_priv->active_display_unit == vmw_du_legacy)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
|||
|
||||
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t caps;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
|
@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
|
|||
|
||||
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t dummy;
|
||||
|
||||
fifo->dx = false;
|
||||
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
|
||||
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
|
||||
if (unlikely(fifo->static_buffer == NULL))
|
||||
|
@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
mutex_init(&fifo->fifo_mutex);
|
||||
init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
* Allow mapping the first page read-only to user-space.
|
||||
*/
|
||||
|
||||
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
|
||||
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
|
||||
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
|
||||
|
@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
|
||||
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
|
||||
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
|
||||
SVGA_REG_ENABLE_HIDE);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
|
||||
|
||||
min = 4;
|
||||
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
|
||||
|
@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
|
||||
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
|
||||
vmw_marker_queue_init(&fifo->marker_queue);
|
||||
return vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
static DEFINE_SPINLOCK(ping_lock);
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
|||
|
||||
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
||||
|
@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
|
||||
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
|
@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
|||
* Returns:
|
||||
* Pointer to the fifo, or null on error (possible hardware hang).
|
||||
*/
|
||||
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
|
||||
uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t next_cmd;
|
||||
|
@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
if (reserveable)
|
||||
iowrite32(bytes, fifo_mem +
|
||||
SVGA_FIFO_RESERVED);
|
||||
return fifo_mem + (next_cmd >> 2);
|
||||
return (void __force *) (fifo_mem +
|
||||
(next_cmd >> 2));
|
||||
} else {
|
||||
need_bounce = true;
|
||||
}
|
||||
|
@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
|
||||
int ctx_id)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (dev_priv->cman)
|
||||
ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
|
||||
ctx_id, false, NULL);
|
||||
else if (ctx_id == SVGA3D_INVALID_ID)
|
||||
ret = vmw_local_fifo_reserve(dev_priv, bytes);
|
||||
else {
|
||||
WARN_ON("Command buffer has not been allocated.\n");
|
||||
ret = NULL;
|
||||
}
|
||||
if (IS_ERR_OR_NULL(ret)) {
|
||||
DRM_ERROR("Fifo reserve failure of %u bytes.\n",
|
||||
(unsigned) bytes);
|
||||
dump_stack();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
u32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
|
@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
|||
}
|
||||
|
||||
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
u32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
|
@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
|||
}
|
||||
}
|
||||
|
||||
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
|
||||
if (fifo_state->dx)
|
||||
bytes += sizeof(struct vmw_temp_set_context);
|
||||
|
||||
fifo_state->dx = false;
|
||||
BUG_ON((bytes & 3) != 0);
|
||||
BUG_ON(bytes > fifo_state->reserved_size);
|
||||
|
||||
|
@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
}
|
||||
|
||||
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
if (dev_priv->cman)
|
||||
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
|
||||
else
|
||||
vmw_local_fifo_commit(dev_priv, bytes);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
|
||||
*
|
||||
* @dev_priv: Pointer to device private structure.
|
||||
* @bytes: Number of bytes to commit.
|
||||
*/
|
||||
void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
if (dev_priv->cman)
|
||||
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
|
||||
else
|
||||
vmw_local_fifo_commit(dev_priv, bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_fifo_flush - Flush any buffered commands and make sure command processing
|
||||
* starts.
|
||||
*
|
||||
* @dev_priv: Pointer to device private structure.
|
||||
* @interruptible: Whether to wait interruptible if function needs to sleep.
|
||||
*/
|
||||
int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (dev_priv->cman)
|
||||
return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
struct svga_fifo_cmd_fence *cmd_fence;
|
||||
void *fm;
|
||||
u32 *fm;
|
||||
int ret = 0;
|
||||
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
|
||||
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
|
||||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
|
@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
|||
return 0;
|
||||
}
|
||||
|
||||
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
|
||||
cmd_fence = (struct svga_fifo_cmd_fence *)
|
||||
((unsigned long)fm + sizeof(__le32));
|
||||
|
||||
iowrite32(*seqno, &cmd_fence->fence);
|
||||
vmw_fifo_commit(dev_priv, bytes);
|
||||
*fm++ = SVGA_CMD_FENCE;
|
||||
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
|
||||
cmd_fence->fence = *seqno;
|
||||
vmw_fifo_commit_flush(dev_priv, bytes);
|
||||
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
|
||||
vmw_update_seqno(dev_priv, fifo_state);
|
||||
|
||||
|
@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
|
|||
* without writing to the query result structure.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
|
||||
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdWaitForQuery body;
|
||||
|
@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
|
|||
* without writing to the query result structure.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
|
||||
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdWaitForGBQuery body;
|
||||
|
@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
|||
|
||||
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
|
||||
}
|
||||
|
||||
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -28,6 +28,7 @@
|
|||
#include "vmwgfx_drv.h"
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#include "vmwgfx_kms.h"
|
||||
#include "device_include/svga3d_caps.h"
|
||||
|
||||
struct svga_3d_compat_cap {
|
||||
SVGA3dCapsRecordHeader header;
|
||||
|
@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
break;
|
||||
case DRM_VMW_PARAM_FIFO_HW_VERSION:
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
|
||||
|
@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case DRM_VMW_PARAM_MAX_MOB_SIZE:
|
||||
param->value = dev_priv->max_mob_size;
|
||||
break;
|
||||
case DRM_VMW_PARAM_SCREEN_TARGET:
|
||||
param->value =
|
||||
(dev_priv->active_display_unit == vmw_du_screen_target);
|
||||
break;
|
||||
case DRM_VMW_PARAM_DX:
|
||||
param->value = dev_priv->has_dx;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
|
@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
|||
(struct drm_vmw_get_3d_cap_arg *) data;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t size;
|
||||
__le32 __iomem *fifo_mem;
|
||||
u32 __iomem *fifo_mem;
|
||||
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
|
||||
void *bounce;
|
||||
int ret;
|
||||
|
@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
|
|||
int ret;
|
||||
|
||||
num_clips = arg->num_clips;
|
||||
clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
|
||||
clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
|
||||
|
||||
if (unlikely(num_clips == 0))
|
||||
return 0;
|
||||
|
@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
|
|||
int ret;
|
||||
|
||||
num_clips = arg->num_clips;
|
||||
clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
|
||||
clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
|
||||
|
||||
if (unlikely(num_clips == 0))
|
||||
return 0;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
|
|||
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
|
||||
if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
|
||||
SVGA_IRQFLAG_ERROR))
|
||||
vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
|
|||
void vmw_update_seqno(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo_state)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
if (dev_priv->last_read_seqno != seqno) {
|
||||
|
@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
* Block command submission while waiting for idle.
|
||||
*/
|
||||
|
||||
if (fifo_idle)
|
||||
if (fifo_idle) {
|
||||
down_read(&fifo_state->rwsem);
|
||||
if (dev_priv->cman) {
|
||||
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
|
||||
10*HZ);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
signal_seq = atomic_read(&dev_priv->marker_seq);
|
||||
ret = 0;
|
||||
|
||||
|
@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
}
|
||||
finish_wait(&dev_priv->fence_queue, &__wait);
|
||||
if (ret == 0 && fifo_idle) {
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
|
||||
}
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
out_err:
|
||||
if (fifo_idle)
|
||||
up_read(&fifo_state->rwsem);
|
||||
|
||||
|
@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
|
|||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
if ((*waiter_count)++ == 0) {
|
||||
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= flag;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
|
||||
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
|
||||
u32 flag, int *waiter_count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
if (--(*waiter_count) == 0) {
|
||||
dev_priv->irq_mask &= ~flag;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -32,11 +32,60 @@
|
|||
#include <drm/drm_crtc_helper.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
/**
|
||||
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
|
||||
* function.
|
||||
*
|
||||
* @fifo_commit: Callback that is called once for each display unit after
|
||||
* all clip rects. This function must commit the fifo space reserved by the
|
||||
* helper. Set up by the caller.
|
||||
* @clip: Callback that is called for each cliprect on each display unit.
|
||||
* Set up by the caller.
|
||||
* @fifo_reserve_size: Fifo size that the helper should try to allocat for
|
||||
* each display unit. Set up by the caller.
|
||||
* @dev_priv: Pointer to the device private. Set up by the helper.
|
||||
* @unit: The current display unit. Set up by the helper before a call to @clip.
|
||||
* @cmd: The allocated fifo space. Set up by the helper before the first @clip
|
||||
* call.
|
||||
* @num_hits: Number of clip rect commands for this display unit.
|
||||
* Cleared by the helper before the first @clip call. Updated by the @clip
|
||||
* callback.
|
||||
* @fb_x: Clip rect left side in framebuffer coordinates.
|
||||
* @fb_y: Clip rect right side in framebuffer coordinates.
|
||||
* @unit_x1: Clip rect left side in crtc coordinates.
|
||||
* @unit_y1: Clip rect top side in crtc coordinates.
|
||||
* @unit_x2: Clip rect right side in crtc coordinates.
|
||||
* @unit_y2: Clip rect bottom side in crtc coordinates.
|
||||
*
|
||||
* The clip rect coordinates are updated by the helper for each @clip call.
|
||||
* Note that this may be derived from if more info needs to be passed between
|
||||
* helper caller and helper callbacks.
|
||||
*/
|
||||
struct vmw_kms_dirty {
|
||||
void (*fifo_commit)(struct vmw_kms_dirty *);
|
||||
void (*clip)(struct vmw_kms_dirty *);
|
||||
size_t fifo_reserve_size;
|
||||
struct vmw_private *dev_priv;
|
||||
struct vmw_display_unit *unit;
|
||||
void *cmd;
|
||||
u32 num_hits;
|
||||
s32 fb_x;
|
||||
s32 fb_y;
|
||||
s32 unit_x1;
|
||||
s32 unit_y1;
|
||||
s32 unit_x2;
|
||||
s32 unit_y2;
|
||||
};
|
||||
|
||||
#define VMWGFX_NUM_DISPLAY_UNITS 8
|
||||
|
||||
|
||||
#define vmw_framebuffer_to_vfb(x) \
|
||||
container_of(x, struct vmw_framebuffer, base)
|
||||
#define vmw_framebuffer_to_vfbs(x) \
|
||||
container_of(x, struct vmw_framebuffer_surface, base.base)
|
||||
#define vmw_framebuffer_to_vfbd(x) \
|
||||
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
|
||||
|
||||
/**
|
||||
* Base class for framebuffers
|
||||
|
@ -53,9 +102,27 @@ struct vmw_framebuffer {
|
|||
uint32_t user_handle;
|
||||
};
|
||||
|
||||
/*
|
||||
* Clip rectangle
|
||||
*/
|
||||
struct vmw_clip_rect {
|
||||
int x1, x2, y1, y2;
|
||||
};
|
||||
|
||||
struct vmw_framebuffer_surface {
|
||||
struct vmw_framebuffer base;
|
||||
struct vmw_surface *surface;
|
||||
struct vmw_dma_buffer *buffer;
|
||||
struct list_head head;
|
||||
bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
|
||||
};
|
||||
|
||||
|
||||
struct vmw_framebuffer_dmabuf {
|
||||
struct vmw_framebuffer base;
|
||||
struct vmw_dma_buffer *buffer;
|
||||
};
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
|
||||
/*
|
||||
* Basic cursor manipulation
|
||||
|
@ -120,11 +187,7 @@ struct vmw_display_unit {
|
|||
/*
|
||||
* Shared display unit functions - vmwgfx_kms.c
|
||||
*/
|
||||
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
|
||||
int vmw_du_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags);
|
||||
void vmw_du_cleanup(struct vmw_display_unit *du);
|
||||
void vmw_du_crtc_save(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_restore(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
|
||||
|
@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
|||
int vmw_du_connector_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
const struct drm_clip_rect *clips,
|
||||
const struct drm_vmw_rect *vclips,
|
||||
s32 dest_x, s32 dest_y,
|
||||
int num_clips,
|
||||
int increment,
|
||||
struct vmw_kms_dirty *dirty);
|
||||
|
||||
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible,
|
||||
bool validate_as_mob);
|
||||
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
|
||||
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_resource *res);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_vmw_rect *vclips,
|
||||
uint32_t num_clips);
|
||||
struct vmw_framebuffer *
|
||||
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
struct vmw_surface *surface,
|
||||
bool only_2d,
|
||||
const struct drm_mode_fb_cmd *mode_cmd);
|
||||
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
||||
unsigned unit,
|
||||
u32 max_width,
|
||||
u32 max_height,
|
||||
struct drm_connector **p_con,
|
||||
struct drm_crtc **p_crtc,
|
||||
struct drm_display_mode **p_mode);
|
||||
void vmw_guess_mode_timing(struct drm_display_mode *mode);
|
||||
|
||||
/*
|
||||
* Legacy display unit functions - vmwgfx_ldu.c
|
||||
*/
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
|
||||
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int increment);
|
||||
int vmw_kms_update_proxy(struct vmw_resource *res,
|
||||
const struct drm_clip_rect *clips,
|
||||
unsigned num_clips,
|
||||
int increment);
|
||||
|
||||
/*
|
||||
* Screen Objects display functions - vmwgfx_scrn.c
|
||||
*/
|
||||
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
|
||||
struct drm_vmw_rect *rects);
|
||||
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc);
|
||||
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc);
|
||||
int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
struct drm_clip_rect *clips,
|
||||
struct drm_vmw_rect *vclips,
|
||||
struct vmw_resource *srf,
|
||||
s32 dest_x,
|
||||
s32 dest_y,
|
||||
unsigned num_clips, int inc,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int increment,
|
||||
bool interruptible,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_vmw_rect *vclips,
|
||||
uint32_t num_clips);
|
||||
|
||||
/*
|
||||
* Screen Target Display Unit functions - vmwgfx_stdu.c
|
||||
*/
|
||||
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
struct drm_clip_rect *clips,
|
||||
struct drm_vmw_rect *vclips,
|
||||
struct vmw_resource *srf,
|
||||
s32 dest_x,
|
||||
s32 dest_y,
|
||||
unsigned num_clips, int inc,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_clip_rect *clips,
|
||||
struct drm_vmw_rect *vclips,
|
||||
uint32_t num_clips,
|
||||
int increment,
|
||||
bool to_surface,
|
||||
bool interruptible);
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -57,7 +57,7 @@ struct vmw_legacy_display_unit {
|
|||
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
|
||||
{
|
||||
list_del_init(&ldu->active);
|
||||
vmw_display_unit_cleanup(&ldu->base);
|
||||
vmw_du_cleanup(&ldu->base);
|
||||
kfree(ldu);
|
||||
}
|
||||
|
||||
|
@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
vmw_fb_off(dev_priv);
|
||||
vmw_svga_enable(dev_priv);
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
encoder->crtc = crtc;
|
||||
|
@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
||||
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i, ret;
|
||||
|
@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
|||
else
|
||||
vmw_ldu_init(dev_priv, 0);
|
||||
|
||||
dev_priv->active_display_unit = vmw_du_legacy;
|
||||
|
||||
DRM_INFO("Legacy Display Unit initialized\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_vblank_cleanup:
|
||||
|
@ -432,7 +436,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
|
||||
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
|
@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int increment)
|
||||
{
|
||||
size_t fifo_size;
|
||||
int i;
|
||||
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
|
||||
fifo_size = sizeof(*cmd) * num_clips;
|
||||
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, fifo_size);
|
||||
for (i = 0; i < num_clips; i++, clips += increment) {
|
||||
cmd[i].header = SVGA_CMD_UPDATE;
|
||||
cmd[i].body.x = clips->x1;
|
||||
cmd[i].body.y = clips->y1;
|
||||
cmd[i].body.width = clips->x2 - clips->x1;
|
||||
cmd[i].body.height = clips->y2 - clips->y1;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, fifo_size);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -31,7 +31,7 @@
|
|||
* If we set up the screen target otable, screen objects stop working.
|
||||
*/
|
||||
|
||||
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
|
||||
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define VMW_PPN_SIZE 8
|
||||
|
@ -67,9 +67,23 @@ struct vmw_mob {
|
|||
* @size: Size of the table (page-aligned).
|
||||
* @page_table: Pointer to a struct vmw_mob holding the page table.
|
||||
*/
|
||||
struct vmw_otable {
|
||||
unsigned long size;
|
||||
struct vmw_mob *page_table;
|
||||
static const struct vmw_otable pre_dx_tables[] = {
|
||||
{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
|
||||
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
|
||||
};
|
||||
|
||||
static const struct vmw_otable dx_tables[] = {
|
||||
{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
|
||||
{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
|
||||
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
|
||||
{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
|
||||
};
|
||||
|
||||
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
|
||||
|
@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
|
|||
*/
|
||||
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
|
||||
SVGAOTableType type,
|
||||
struct ttm_buffer_object *otable_bo,
|
||||
unsigned long offset,
|
||||
struct vmw_otable *otable)
|
||||
{
|
||||
|
@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
|
|||
|
||||
BUG_ON(otable->page_table != NULL);
|
||||
|
||||
vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
|
||||
vsgt = vmw_bo_sg_table(otable_bo);
|
||||
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
|
||||
WARN_ON(!vmw_piter_next(&iter));
|
||||
|
||||
|
@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
|
|||
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.type = type;
|
||||
cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
|
||||
cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
|
||||
cmd->body.sizeInBytes = otable->size;
|
||||
cmd->body.validSizeInBytes = 0;
|
||||
cmd->body.ptDepth = mob->pt_level;
|
||||
|
@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
|
|||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for OTable "
|
||||
"takedown.\n");
|
||||
} else {
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.type = type;
|
||||
cmd->body.baseAddress = 0;
|
||||
cmd->body.sizeInBytes = 0;
|
||||
cmd->body.validSizeInBytes = 0;
|
||||
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
return;
|
||||
}
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.type = type;
|
||||
cmd->body.baseAddress = 0;
|
||||
cmd->body.sizeInBytes = 0;
|
||||
cmd->body.validSizeInBytes = 0;
|
||||
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
if (bo) {
|
||||
int ret;
|
||||
|
||||
|
@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
|
|||
otable->page_table = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmw_otables_setup - Set up guest backed memory object tables
|
||||
*
|
||||
* @dev_priv: Pointer to a device private structure
|
||||
*
|
||||
* Takes care of the device guest backed surface
|
||||
* initialization, by setting up the guest backed memory object tables.
|
||||
* Returns 0 on success and various error codes on failure. A succesful return
|
||||
* means the object tables can be taken down using the vmw_otables_takedown
|
||||
* function.
|
||||
*/
|
||||
int vmw_otables_setup(struct vmw_private *dev_priv)
|
||||
|
||||
static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
|
||||
struct vmw_otable_batch *batch)
|
||||
{
|
||||
unsigned long offset;
|
||||
unsigned long bo_size;
|
||||
struct vmw_otable *otables;
|
||||
struct vmw_otable *otables = batch->otables;
|
||||
SVGAOTableType i;
|
||||
int ret;
|
||||
|
||||
otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(otables == NULL)) {
|
||||
DRM_ERROR("Failed to allocate space for otable "
|
||||
"metadata.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
otables[SVGA_OTABLE_MOB].size =
|
||||
VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
|
||||
otables[SVGA_OTABLE_SURFACE].size =
|
||||
VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
|
||||
otables[SVGA_OTABLE_CONTEXT].size =
|
||||
VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
|
||||
otables[SVGA_OTABLE_SHADER].size =
|
||||
VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
|
||||
otables[SVGA_OTABLE_SCREEN_TARGET].size =
|
||||
VMWGFX_NUM_GB_SCREEN_TARGET *
|
||||
SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
|
||||
|
||||
bo_size = 0;
|
||||
for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
|
||||
for (i = 0; i < batch->num_otables; ++i) {
|
||||
if (!otables[i].enabled)
|
||||
continue;
|
||||
|
||||
otables[i].size =
|
||||
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
bo_size += otables[i].size;
|
||||
|
@ -267,46 +257,114 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
|
|||
ttm_bo_type_device,
|
||||
&vmw_sys_ne_placement,
|
||||
0, false, NULL,
|
||||
&dev_priv->otable_bo);
|
||||
&batch->otable_bo);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_bo;
|
||||
|
||||
ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
|
||||
ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
|
||||
ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unreserve;
|
||||
ret = vmw_bo_map_dma(dev_priv->otable_bo);
|
||||
ret = vmw_bo_map_dma(batch->otable_bo);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unreserve;
|
||||
|
||||
ttm_bo_unreserve(dev_priv->otable_bo);
|
||||
ttm_bo_unreserve(batch->otable_bo);
|
||||
|
||||
offset = 0;
|
||||
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
|
||||
ret = vmw_setup_otable_base(dev_priv, i, offset,
|
||||
for (i = 0; i < batch->num_otables; ++i) {
|
||||
if (!batch->otables[i].enabled)
|
||||
continue;
|
||||
|
||||
ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
|
||||
offset,
|
||||
&otables[i]);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_setup;
|
||||
offset += otables[i].size;
|
||||
}
|
||||
|
||||
dev_priv->otables = otables;
|
||||
return 0;
|
||||
|
||||
out_unreserve:
|
||||
ttm_bo_unreserve(dev_priv->otable_bo);
|
||||
ttm_bo_unreserve(batch->otable_bo);
|
||||
out_no_setup:
|
||||
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
|
||||
vmw_takedown_otable_base(dev_priv, i, &otables[i]);
|
||||
for (i = 0; i < batch->num_otables; ++i) {
|
||||
if (batch->otables[i].enabled)
|
||||
vmw_takedown_otable_base(dev_priv, i,
|
||||
&batch->otables[i]);
|
||||
}
|
||||
|
||||
ttm_bo_unref(&dev_priv->otable_bo);
|
||||
ttm_bo_unref(&batch->otable_bo);
|
||||
out_no_bo:
|
||||
kfree(otables);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmw_otables_setup - Set up guest backed memory object tables
|
||||
*
|
||||
* @dev_priv: Pointer to a device private structure
|
||||
*
|
||||
* Takes care of the device guest backed surface
|
||||
* initialization, by setting up the guest backed memory object tables.
|
||||
* Returns 0 on success and various error codes on failure. A successful return
|
||||
* means the object tables can be taken down using the vmw_otables_takedown
|
||||
* function.
|
||||
*/
|
||||
int vmw_otables_setup(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_otable **otables = &dev_priv->otable_batch.otables;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->has_dx) {
|
||||
*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
|
||||
if (*otables == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(*otables, dx_tables, sizeof(dx_tables));
|
||||
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
|
||||
} else {
|
||||
*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
|
||||
if (*otables == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
|
||||
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
|
||||
}
|
||||
|
||||
ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_setup;
|
||||
|
||||
return 0;
|
||||
|
||||
out_setup:
|
||||
kfree(*otables);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
|
||||
struct vmw_otable_batch *batch)
|
||||
{
|
||||
SVGAOTableType i;
|
||||
struct ttm_buffer_object *bo = batch->otable_bo;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < batch->num_otables; ++i)
|
||||
if (batch->otables[i].enabled)
|
||||
vmw_takedown_otable_base(dev_priv, i,
|
||||
&batch->otables[i]);
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
vmw_fence_single_bo(bo, NULL);
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
ttm_bo_unref(&batch->otable_bo);
|
||||
}
|
||||
|
||||
/*
|
||||
* vmw_otables_takedown - Take down guest backed memory object tables
|
||||
|
@ -317,26 +375,10 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
|
|||
*/
|
||||
void vmw_otables_takedown(struct vmw_private *dev_priv)
|
||||
{
|
||||
SVGAOTableType i;
|
||||
struct ttm_buffer_object *bo = dev_priv->otable_bo;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
|
||||
vmw_takedown_otable_base(dev_priv, i,
|
||||
&dev_priv->otables[i]);
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
vmw_fence_single_bo(bo, NULL);
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
ttm_bo_unref(&dev_priv->otable_bo);
|
||||
kfree(dev_priv->otables);
|
||||
dev_priv->otables = NULL;
|
||||
vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
|
||||
kfree(dev_priv->otable_batch.otables);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages
|
||||
* needed for a guest backed memory object.
|
||||
|
@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
|
|||
goto out_unreserve;
|
||||
|
||||
ttm_bo_unreserve(mob->pt_bo);
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
out_unreserve:
|
||||
|
@ -429,15 +471,15 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
|
|||
* *@addr according to the page table entry size.
|
||||
*/
|
||||
#if (VMW_PPN_SIZE == 8)
|
||||
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
|
||||
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
|
||||
{
|
||||
*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
|
||||
*((u64 *) *addr) = val >> PAGE_SHIFT;
|
||||
*addr += 2;
|
||||
}
|
||||
#else
|
||||
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
|
||||
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
|
||||
{
|
||||
*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
|
||||
*(*addr)++ = val >> PAGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
|
|||
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
|
||||
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
|
||||
unsigned long pt_page;
|
||||
__le32 *addr, *save_addr;
|
||||
u32 *addr, *save_addr;
|
||||
unsigned long i;
|
||||
struct page *page;
|
||||
|
||||
|
@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
|
|||
vmw_fence_single_bo(bo, NULL);
|
||||
ttm_bo_unreserve(bo);
|
||||
}
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
|
|||
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
|
||||
}
|
||||
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
|
@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
|
|||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.mobid = mob_id;
|
||||
cmd->body.ptDepth = mob->pt_level;
|
||||
cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
|
||||
cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
|
||||
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
|
|||
return 0;
|
||||
|
||||
out_no_cmd_space:
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
if (pt_set_up)
|
||||
ttm_bo_unref(&mob->pt_bo);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -31,8 +31,8 @@
|
|||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "svga_overlay.h"
|
||||
#include "svga_escape.h"
|
||||
#include "device_include/svga_overlay.h"
|
||||
#include "device_include/svga_escape.h"
|
||||
|
||||
#define VMW_MAX_NUM_STREAMS 1
|
||||
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
|
||||
|
@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
|
|||
{
|
||||
struct vmw_escape_video_flush *flush;
|
||||
size_t fifo_size;
|
||||
bool have_so = dev_priv->sou_priv ? true : false;
|
||||
bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
|
||||
int i, num_items;
|
||||
SVGAGuestPtr ptr;
|
||||
|
||||
|
@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
|
|||
if (!pin)
|
||||
return vmw_dmabuf_unpin(dev_priv, buf, inter);
|
||||
|
||||
if (!dev_priv->sou_priv)
|
||||
return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
|
||||
if (dev_priv->active_display_unit == vmw_du_legacy)
|
||||
return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
|
||||
|
||||
return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
|
||||
return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
|
|||
|
||||
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
|
||||
{
|
||||
return (dev_priv->overlay_priv != NULL &&
|
||||
return (dev_priv->overlay_priv != NULL &&
|
||||
((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
|
||||
VMW_OVERLAY_CAP_MASK));
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -39,19 +39,17 @@
|
|||
#define VMWGFX_IRQSTATUS_PORT 0x8
|
||||
|
||||
struct svga_guest_mem_descriptor {
|
||||
__le32 ppn;
|
||||
__le32 num_pages;
|
||||
u32 ppn;
|
||||
u32 num_pages;
|
||||
};
|
||||
|
||||
struct svga_fifo_cmd_fence {
|
||||
__le32 fence;
|
||||
u32 fence;
|
||||
};
|
||||
|
||||
#define SVGA_SYNC_GENERIC 1
|
||||
#define SVGA_SYNC_FIFOFULL 2
|
||||
|
||||
#include "svga_types.h"
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
#include "device_include/svga3d_reg.h"
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -31,6 +31,7 @@
|
|||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
|
||||
#define VMW_RES_EVICT_ERR_COUNT 10
|
||||
|
||||
|
@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref)
|
|||
int id;
|
||||
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
res->avail = false;
|
||||
list_del_init(&res->lru_head);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
|
@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
|
|||
}
|
||||
|
||||
if (likely(res->hw_destroy != NULL)) {
|
||||
res->hw_destroy(res);
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_context_binding_res_list_kill(&res->binding_head);
|
||||
vmw_binding_res_list_kill(&res->binding_head);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
res->hw_destroy(res);
|
||||
}
|
||||
|
||||
id = res->id;
|
||||
|
@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref)
|
|||
kfree(res);
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
|
||||
if (id != -1)
|
||||
idr_remove(idr, id);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
void vmw_resource_unreference(struct vmw_resource **p_res)
|
||||
{
|
||||
struct vmw_resource *res = *p_res;
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
*p_res = NULL;
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
kref_put(&res->kref, vmw_resource_release);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res,
|
|||
write_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
|
||||
struct idr *idr, int id)
|
||||
static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
|
||||
struct idr *idr, int id)
|
||||
{
|
||||
struct vmw_resource *res;
|
||||
|
||||
read_lock(&dev_priv->resource_lock);
|
||||
res = idr_find(idr, id);
|
||||
if (res && res->avail)
|
||||
kref_get(&res->kref);
|
||||
else
|
||||
if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
|
||||
res = NULL;
|
||||
|
||||
read_unlock(&dev_priv->resource_lock);
|
||||
|
||||
if (unlikely(res == NULL))
|
||||
|
@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
|||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_user_stream_size,
|
||||
false, true);
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for stream"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
goto out_ret;
|
||||
}
|
||||
|
||||
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
|
||||
if (unlikely(stream == NULL)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_stream_size);
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
goto out_ret;
|
||||
}
|
||||
|
||||
res = &stream->stream.res;
|
||||
|
@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
goto out_ret;
|
||||
|
||||
tmp = vmw_resource_reference(res);
|
||||
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
|
||||
|
@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
|||
arg->stream_id = res->id;
|
||||
out_err:
|
||||
vmw_resource_unreference(&res);
|
||||
out_unlock:
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
out_ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1152,14 +1150,16 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
|
|||
* command submission.
|
||||
*
|
||||
* @res: Pointer to the struct vmw_resource to unreserve.
|
||||
* @switch_backup: Backup buffer has been switched.
|
||||
* @new_backup: Pointer to new backup buffer if command submission
|
||||
* switched.
|
||||
* @new_backup_offset: New backup offset if @new_backup is !NULL.
|
||||
* switched. May be NULL.
|
||||
* @new_backup_offset: New backup offset if @switch_backup is true.
|
||||
*
|
||||
* Currently unreserving a resource means putting it back on the device's
|
||||
* resource lru list, so that it can be evicted if necessary.
|
||||
*/
|
||||
void vmw_resource_unreserve(struct vmw_resource *res,
|
||||
bool switch_backup,
|
||||
struct vmw_dma_buffer *new_backup,
|
||||
unsigned long new_backup_offset)
|
||||
{
|
||||
|
@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res,
|
|||
if (!list_empty(&res->lru_head))
|
||||
return;
|
||||
|
||||
if (new_backup && new_backup != res->backup) {
|
||||
|
||||
if (switch_backup && new_backup != res->backup) {
|
||||
if (res->backup) {
|
||||
lockdep_assert_held(&res->backup->base.resv->lock.base);
|
||||
list_del_init(&res->mob_head);
|
||||
vmw_dmabuf_unreference(&res->backup);
|
||||
}
|
||||
|
||||
res->backup = vmw_dmabuf_reference(new_backup);
|
||||
lockdep_assert_held(&new_backup->base.resv->lock.base);
|
||||
list_add_tail(&res->mob_head, &new_backup->res_list);
|
||||
if (new_backup) {
|
||||
res->backup = vmw_dmabuf_reference(new_backup);
|
||||
lockdep_assert_held(&new_backup->base.resv->lock.base);
|
||||
list_add_tail(&res->mob_head, &new_backup->res_list);
|
||||
} else {
|
||||
res->backup = NULL;
|
||||
}
|
||||
}
|
||||
if (new_backup)
|
||||
if (switch_backup)
|
||||
res->backup_offset = new_backup_offset;
|
||||
|
||||
if (!res->func->may_evict || res->id == -1)
|
||||
if (!res->func->may_evict || res->id == -1 || res->pin_count)
|
||||
return;
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
|
@ -1259,7 +1262,8 @@ vmw_resource_check_buffer(struct vmw_resource *res,
|
|||
* the buffer may not be bound to the resource at this point.
|
||||
*
|
||||
*/
|
||||
int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
|
||||
int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
|
||||
bool no_backup)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
int ret;
|
||||
|
@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
|
|||
|
||||
if (res->func->needs_backup && res->backup == NULL &&
|
||||
!no_backup) {
|
||||
ret = vmw_resource_buf_alloc(res, true);
|
||||
if (unlikely(ret != 0))
|
||||
ret = vmw_resource_buf_alloc(res, interruptible);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed to allocate a backup buffer "
|
||||
"of size %lu. bytes\n",
|
||||
(unsigned long) res->backup_size);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
|
|||
* @res: The resource to evict.
|
||||
* @interruptible: Whether to wait interruptible.
|
||||
*/
|
||||
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
|
||||
static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
|
||||
{
|
||||
struct ttm_validate_buffer val_buf;
|
||||
const struct vmw_res_func *func = res->func;
|
||||
|
@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||
struct ttm_validate_buffer val_buf;
|
||||
unsigned err_count = 0;
|
||||
|
||||
if (likely(!res->func->may_evict))
|
||||
if (!res->func->create)
|
||||
return 0;
|
||||
|
||||
val_buf.bo = NULL;
|
||||
|
@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
|
|||
/**
|
||||
* vmw_resource_move_notify - TTM move_notify_callback
|
||||
*
|
||||
* @bo: The TTM buffer object about to move.
|
||||
* @mem: The truct ttm_mem_reg indicating to what memory
|
||||
* region the move is taking place.
|
||||
* @bo: The TTM buffer object about to move.
|
||||
* @mem: The struct ttm_mem_reg indicating to what memory
|
||||
* region the move is taking place.
|
||||
*
|
||||
* Evicts the Guest Backed hardware resource if the backup
|
||||
* buffer is being moved out of MOB memory.
|
||||
|
@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* vmw_query_readback_all - Read back cached query states
|
||||
*
|
||||
* @dx_query_mob: Buffer containing the DX query MOB
|
||||
*
|
||||
* Read back cached states from the device if they exist. This function
|
||||
* assumings binding_mutex is held.
|
||||
*/
|
||||
int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
|
||||
{
|
||||
struct vmw_resource *dx_query_ctx;
|
||||
struct vmw_private *dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXReadbackAllQuery body;
|
||||
} *cmd;
|
||||
|
||||
|
||||
/* No query bound, so do nothing */
|
||||
if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
|
||||
return 0;
|
||||
|
||||
dx_query_ctx = dx_query_mob->dx_query_ctx;
|
||||
dev_priv = dx_query_ctx->dev_priv;
|
||||
|
||||
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for "
|
||||
"query MOB read back.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = dx_query_ctx->id;
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
/* Triggers a rebind the next time affected context is bound */
|
||||
dx_query_mob->dx_query_ctx = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* vmw_query_move_notify - Read back cached query states
|
||||
*
|
||||
* @bo: The TTM buffer object about to move.
|
||||
* @mem: The memory region @bo is moving to.
|
||||
*
|
||||
* Called before the query MOB is swapped out to read back cached query
|
||||
* states from the device.
|
||||
*/
|
||||
void vmw_query_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmw_dma_buffer *dx_query_mob;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct vmw_private *dev_priv;
|
||||
|
||||
|
||||
dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
|
||||
dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
|
||||
if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If BO is being moved from MOB to system memory */
|
||||
if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
|
||||
struct vmw_fence_obj *fence;
|
||||
|
||||
(void) vmw_query_readback_all(dx_query_mob);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
||||
/* Create a fence and attach the BO to it */
|
||||
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
|
||||
vmw_fence_single_bo(bo, fence);
|
||||
|
||||
if (fence != NULL)
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
(void) ttm_bo_wait(bo, false, false, false);
|
||||
} else
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
|
||||
*
|
||||
|
@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
|
|||
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_resource_pin - Add a pin reference on a resource
|
||||
*
|
||||
* @res: The resource to add a pin reference on
|
||||
*
|
||||
* This function adds a pin reference, and if needed validates the resource.
|
||||
* Having a pin reference means that the resource can never be evicted, and
|
||||
* its id will never change as long as there is a pin reference.
|
||||
* This function returns 0 on success and a negative error code on failure.
|
||||
*/
|
||||
int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
int ret;
|
||||
|
||||
ttm_write_lock(&dev_priv->reservation_sem, interruptible);
|
||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||
ret = vmw_resource_reserve(res, interruptible, false);
|
||||
if (ret)
|
||||
goto out_no_reserve;
|
||||
|
||||
if (res->pin_count == 0) {
|
||||
struct vmw_dma_buffer *vbo = NULL;
|
||||
|
||||
if (res->backup) {
|
||||
vbo = res->backup;
|
||||
|
||||
ttm_bo_reserve(&vbo->base, interruptible, false, false,
|
||||
NULL);
|
||||
if (!vbo->pin_count) {
|
||||
ret = ttm_bo_validate
|
||||
(&vbo->base,
|
||||
res->func->backup_placement,
|
||||
interruptible, false);
|
||||
if (ret) {
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
goto out_no_validate;
|
||||
}
|
||||
}
|
||||
|
||||
/* Do we really need to pin the MOB as well? */
|
||||
vmw_bo_pin_reserved(vbo, true);
|
||||
}
|
||||
ret = vmw_resource_validate(res);
|
||||
if (vbo)
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
if (ret)
|
||||
goto out_no_validate;
|
||||
}
|
||||
res->pin_count++;
|
||||
|
||||
out_no_validate:
|
||||
vmw_resource_unreserve(res, false, NULL, 0UL);
|
||||
out_no_reserve:
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
ttm_write_unlock(&dev_priv->reservation_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_resource_unpin - Remove a pin reference from a resource
|
||||
*
|
||||
* @res: The resource to remove a pin reference from
|
||||
*
|
||||
* Having a pin reference means that the resource can never be evicted, and
|
||||
* its id will never change as long as there is a pin reference.
|
||||
*/
|
||||
void vmw_resource_unpin(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
int ret;
|
||||
|
||||
ttm_read_lock(&dev_priv->reservation_sem, false);
|
||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||
|
||||
ret = vmw_resource_reserve(res, false, true);
|
||||
WARN_ON(ret);
|
||||
|
||||
WARN_ON(res->pin_count == 0);
|
||||
if (--res->pin_count == 0 && res->backup) {
|
||||
struct vmw_dma_buffer *vbo = res->backup;
|
||||
|
||||
ttm_bo_reserve(&vbo->base, false, false, false, NULL);
|
||||
vmw_bo_pin_reserved(vbo, false);
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
}
|
||||
|
||||
vmw_resource_unreserve(res, false, NULL, 0UL);
|
||||
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_res_type - Return the resource type
|
||||
*
|
||||
* @res: Pointer to the resource
|
||||
*/
|
||||
enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
|
||||
{
|
||||
return res->func->res_type;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -30,6 +30,12 @@
|
|||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
enum vmw_cmdbuf_res_state {
|
||||
VMW_CMDBUF_RES_COMMITTED,
|
||||
VMW_CMDBUF_RES_ADD,
|
||||
VMW_CMDBUF_RES_DEL
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_user_resource_conv - Identify a derived user-exported resource
|
||||
* type and provide a function to convert its ttm_base_object pointer to
|
||||
|
@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
|
|||
* @bind: Bind a hardware resource to persistent buffer storage.
|
||||
* @unbind: Unbind a hardware resource from persistent
|
||||
* buffer storage.
|
||||
* @commit_notify: If the resource is a command buffer managed resource,
|
||||
* callback to notify that a define or remove command
|
||||
* has been committed to the device.
|
||||
*/
|
||||
|
||||
struct vmw_res_func {
|
||||
enum vmw_res_type res_type;
|
||||
bool needs_backup;
|
||||
|
@ -71,6 +79,8 @@ struct vmw_res_func {
|
|||
int (*unbind) (struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
void (*commit_notify)(struct vmw_resource *res,
|
||||
enum vmw_cmdbuf_res_state state);
|
||||
};
|
||||
|
||||
int vmw_resource_alloc_id(struct vmw_resource *res);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -36,10 +36,55 @@
|
|||
#define vmw_connector_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.connector)
|
||||
|
||||
/**
|
||||
* struct vmw_kms_sou_surface_dirty - Closure structure for
|
||||
* blit surface to screen command.
|
||||
* @base: The base type we derive from. Used by vmw_kms_helper_dirty().
|
||||
* @left: Left side of bounding box.
|
||||
* @right: Right side of bounding box.
|
||||
* @top: Top side of bounding box.
|
||||
* @bottom: Bottom side of bounding box.
|
||||
* @dst_x: Difference between source clip rects and framebuffer coordinates.
|
||||
* @dst_y: Difference between source clip rects and framebuffer coordinates.
|
||||
* @sid: Surface id of surface to copy from.
|
||||
*/
|
||||
struct vmw_kms_sou_surface_dirty {
|
||||
struct vmw_kms_dirty base;
|
||||
s32 left, right, top, bottom;
|
||||
s32 dst_x, dst_y;
|
||||
u32 sid;
|
||||
};
|
||||
|
||||
/*
|
||||
* SVGA commands that are used by this code. Please see the device headers
|
||||
* for explanation.
|
||||
*/
|
||||
struct vmw_kms_sou_readback_blit {
|
||||
uint32 header;
|
||||
SVGAFifoCmdBlitScreenToGMRFB body;
|
||||
};
|
||||
|
||||
struct vmw_kms_sou_dmabuf_blit {
|
||||
uint32 header;
|
||||
SVGAFifoCmdBlitGMRFBToScreen body;
|
||||
};
|
||||
|
||||
struct vmw_kms_sou_dirty_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdBlitSurfaceToScreen body;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Other structs.
|
||||
*/
|
||||
|
||||
struct vmw_screen_object_display {
|
||||
unsigned num_implicit;
|
||||
|
||||
struct vmw_framebuffer *implicit_fb;
|
||||
SVGAFifoCmdDefineGMRFB cur;
|
||||
struct vmw_dma_buffer *pinned_gmrfb;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -57,7 +102,7 @@ struct vmw_screen_object_unit {
|
|||
|
||||
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
vmw_display_unit_cleanup(&sou->base);
|
||||
vmw_du_cleanup(&sou->base);
|
||||
kfree(sou);
|
||||
}
|
||||
|
||||
|
@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
static void vmw_sou_del_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
|
||||
|
||||
|
@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv,
|
|||
}
|
||||
|
||||
static void vmw_sou_add_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
struct vmw_framebuffer *vfb)
|
||||
struct vmw_screen_object_unit *sou,
|
||||
struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
|
||||
|
||||
|
@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
|
|||
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (unlikely(sou->buffer == NULL))
|
||||
return;
|
||||
|
||||
bo = &sou->buffer->base;
|
||||
ttm_bo_unref(&bo);
|
||||
sou->buffer = NULL;
|
||||
vmw_dmabuf_unreference(&sou->buffer);
|
||||
sou->buffer_size = 0;
|
||||
}
|
||||
|
||||
|
@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
|||
dev_priv = vmw_priv(crtc->dev);
|
||||
|
||||
if (set->num_connectors > 1) {
|
||||
DRM_ERROR("to many connectors\n");
|
||||
DRM_ERROR("Too many connectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (set->num_connectors == 1 &&
|
||||
set->connectors[0] != &sou->base.connector) {
|
||||
DRM_ERROR("connector doesn't match %p %p\n",
|
||||
DRM_ERROR("Connector doesn't match %p %p\n",
|
||||
set->connectors[0], &sou->base.connector);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
vmw_fb_off(dev_priv);
|
||||
vmw_svga_enable(dev_priv);
|
||||
|
||||
if (mode->hdisplay != crtc->mode.hdisplay ||
|
||||
mode->vdisplay != crtc->mode.vdisplay) {
|
||||
|
@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if this unit can be page flipped.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
|
||||
if (!sou->base.is_implicit)
|
||||
return true;
|
||||
|
||||
if (dev_priv->sou_priv->num_implicit != 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the implicit fb to the current fb of this crtc.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
|
||||
BUG_ON(!sou->base.is_implicit);
|
||||
|
||||
dev_priv->sou_priv->implicit_fb =
|
||||
vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
|
||||
}
|
||||
|
||||
static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
|
||||
struct drm_framebuffer *old_fb = crtc->primary->fb;
|
||||
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
struct drm_clip_rect clips;
|
||||
int ret;
|
||||
|
||||
/* require ScreenObject support for page flipping */
|
||||
if (!dev_priv->sou_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
|
||||
return -EINVAL;
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
|
||||
/* do a full screen dirty update */
|
||||
clips.x1 = clips.y1 = 0;
|
||||
clips.x2 = fb->width;
|
||||
clips.y2 = fb->height;
|
||||
|
||||
if (vfb->dmabuf)
|
||||
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
|
||||
&clips, 1, 1,
|
||||
true, &fence);
|
||||
else
|
||||
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
|
||||
&clips, NULL, NULL,
|
||||
0, 0, 1, 1, &fence);
|
||||
|
||||
|
||||
if (ret != 0)
|
||||
goto out_no_fence;
|
||||
if (!fence) {
|
||||
ret = -EINVAL;
|
||||
goto out_no_fence;
|
||||
}
|
||||
|
||||
if (event) {
|
||||
struct drm_file *file_priv = event->base.file_priv;
|
||||
|
||||
ret = vmw_event_fence_action_queue(file_priv, fence,
|
||||
&event->base,
|
||||
&event->event.tv_sec,
|
||||
&event->event.tv_usec,
|
||||
true);
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to hold on to this now. The only cleanup
|
||||
* we need to do if we fail is unref the fence.
|
||||
*/
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
if (vmw_crtc_to_du(crtc)->is_implicit)
|
||||
vmw_sou_update_implicit_fb(dev_priv, crtc);
|
||||
|
||||
return ret;
|
||||
|
||||
out_no_fence:
|
||||
crtc->primary->fb = old_fb;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
|
@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
|
|||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_sou_crtc_destroy,
|
||||
.set_config = vmw_sou_crtc_set_config,
|
||||
.page_flip = vmw_du_page_flip,
|
||||
.page_flip = vmw_sou_crtc_page_flip,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
|
|||
vmw_sou_destroy(vmw_connector_to_sou(connector));
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
|
||||
static struct drm_connector_funcs vmw_sou_connector_funcs = {
|
||||
.dpms = vmw_du_connector_dpms,
|
||||
.save = vmw_du_connector_save,
|
||||
.restore = vmw_du_connector_restore,
|
||||
|
@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
sou->base.pref_mode = NULL;
|
||||
sou->base.is_implicit = true;
|
||||
|
||||
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||||
drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
connector->status = vmw_du_connector_detect(connector, true);
|
||||
|
||||
|
@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
|
||||
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i, ret;
|
||||
|
@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
|
|||
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
|
||||
vmw_sou_init(dev_priv, i);
|
||||
|
||||
DRM_INFO("Screen objects system initialized\n");
|
||||
dev_priv->active_display_unit = vmw_du_screen_object;
|
||||
|
||||
DRM_INFO("Screen Objects Display Unit initialized\n");
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -529,7 +671,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
|
||||
int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
|
@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if this unit can be page flipped.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
struct vmw_dma_buffer *buf =
|
||||
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
|
||||
base)->buffer;
|
||||
int depth = framebuffer->base.depth;
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdDefineGMRFB body;
|
||||
} *cmd;
|
||||
|
||||
if (!sou->base.is_implicit)
|
||||
return true;
|
||||
/* Emulate RGBA support, contrary to svga_reg.h this is not
|
||||
* supported by hosts. This is only a problem if we are reading
|
||||
* this value later and expecting what we uploaded back.
|
||||
*/
|
||||
if (depth == 32)
|
||||
depth = 24;
|
||||
|
||||
if (dev_priv->sou_priv->num_implicit != 1)
|
||||
return false;
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (!cmd) {
|
||||
DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return true;
|
||||
cmd->header = SVGA_CMD_DEFINE_GMRFB;
|
||||
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
|
||||
cmd->body.format.colorDepth = depth;
|
||||
cmd->body.format.reserved = 0;
|
||||
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
|
||||
/* Buffer is reserved in vram or GMR */
|
||||
vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the implicit fb to the current fb of this crtc.
|
||||
* Must be called with the mode_config mutex held.
|
||||
* vmw_sou_surface_fifo_commit - Callback to fill in and submit a
|
||||
* blit surface to screen command.
|
||||
*
|
||||
* @dirty: The closure structure.
|
||||
*
|
||||
* Fills in the missing fields in the command, and translates the cliprects
|
||||
* to match the destination bounding box encoded.
|
||||
*/
|
||||
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
struct vmw_kms_sou_surface_dirty *sdirty =
|
||||
container_of(dirty, typeof(*sdirty), base);
|
||||
struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
|
||||
s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
|
||||
s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
|
||||
size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
|
||||
SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
|
||||
int i;
|
||||
|
||||
BUG_ON(!sou->base.is_implicit);
|
||||
cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
|
||||
cmd->header.size = sizeof(cmd->body) + region_size;
|
||||
|
||||
dev_priv->sou_priv->implicit_fb =
|
||||
vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
|
||||
/*
|
||||
* Use the destination bounding box to specify destination - and
|
||||
* source bounding regions.
|
||||
*/
|
||||
cmd->body.destRect.left = sdirty->left;
|
||||
cmd->body.destRect.right = sdirty->right;
|
||||
cmd->body.destRect.top = sdirty->top;
|
||||
cmd->body.destRect.bottom = sdirty->bottom;
|
||||
|
||||
cmd->body.srcRect.left = sdirty->left + trans_x;
|
||||
cmd->body.srcRect.right = sdirty->right + trans_x;
|
||||
cmd->body.srcRect.top = sdirty->top + trans_y;
|
||||
cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
|
||||
|
||||
cmd->body.srcImage.sid = sdirty->sid;
|
||||
cmd->body.destScreenId = dirty->unit->unit;
|
||||
|
||||
/* Blits are relative to the destination rect. Translate. */
|
||||
for (i = 0; i < dirty->num_hits; ++i, ++blit) {
|
||||
blit->left -= sdirty->left;
|
||||
blit->right -= sdirty->left;
|
||||
blit->top -= sdirty->top;
|
||||
blit->bottom -= sdirty->top;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
|
||||
|
||||
sdirty->left = sdirty->top = S32_MAX;
|
||||
sdirty->right = sdirty->bottom = S32_MIN;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
|
||||
*
|
||||
* @dirty: The closure structure
|
||||
*
|
||||
* Encodes a SVGASignedRect cliprect and updates the bounding box of the
|
||||
* BLIT_SURFACE_TO_SCREEN command.
|
||||
*/
|
||||
static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
|
||||
{
|
||||
struct vmw_kms_sou_surface_dirty *sdirty =
|
||||
container_of(dirty, typeof(*sdirty), base);
|
||||
struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
|
||||
SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
|
||||
|
||||
/* Destination rect. */
|
||||
blit += dirty->num_hits;
|
||||
blit->left = dirty->unit_x1;
|
||||
blit->top = dirty->unit_y1;
|
||||
blit->right = dirty->unit_x2;
|
||||
blit->bottom = dirty->unit_y2;
|
||||
|
||||
/* Destination bounding box */
|
||||
sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
|
||||
sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
|
||||
sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
|
||||
sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
|
||||
|
||||
dirty->num_hits++;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
|
||||
*
|
||||
* @dev_priv: Pointer to the device private structure.
|
||||
* @framebuffer: Pointer to the surface-buffer backed framebuffer.
|
||||
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
|
||||
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
|
||||
* be NULL.
|
||||
* @srf: Pointer to surface to blit from. If NULL, the surface attached
|
||||
* to @framebuffer will be used.
|
||||
* @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
|
||||
* @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
|
||||
* @num_clips: Number of clip rects in @clips.
|
||||
* @inc: Increment to use when looping over @clips.
|
||||
* @out_fence: If non-NULL, will return a ref-counted pointer to a
|
||||
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
|
||||
* case the device has already synchronized.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
|
||||
* interrupted.
|
||||
*/
|
||||
int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
struct drm_clip_rect *clips,
|
||||
struct drm_vmw_rect *vclips,
|
||||
struct vmw_resource *srf,
|
||||
s32 dest_x,
|
||||
s32 dest_y,
|
||||
unsigned num_clips, int inc,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
{
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_kms_sou_surface_dirty sdirty;
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
|
||||
sdirty.base.clip = vmw_sou_surface_clip;
|
||||
sdirty.base.dev_priv = dev_priv;
|
||||
sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
|
||||
sizeof(SVGASignedRect) * num_clips;
|
||||
|
||||
sdirty.sid = srf->id;
|
||||
sdirty.left = sdirty.top = S32_MAX;
|
||||
sdirty.right = sdirty.bottom = S32_MIN;
|
||||
sdirty.dst_x = dest_x;
|
||||
sdirty.dst_y = dest_y;
|
||||
|
||||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
vmw_kms_helper_resource_finish(srf, out_fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
|
||||
*
|
||||
* @dirty: The closure structure.
|
||||
*
|
||||
* Commits a previously built command buffer of readback clips.
|
||||
*/
|
||||
static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
|
||||
{
|
||||
vmw_fifo_commit(dirty->dev_priv,
|
||||
sizeof(struct vmw_kms_sou_dmabuf_blit) *
|
||||
dirty->num_hits);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
|
||||
*
|
||||
* @dirty: The closure structure
|
||||
*
|
||||
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
|
||||
*/
|
||||
static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
|
||||
{
|
||||
struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
|
||||
|
||||
blit += dirty->num_hits;
|
||||
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
|
||||
blit->body.destScreenId = dirty->unit->unit;
|
||||
blit->body.srcOrigin.x = dirty->fb_x;
|
||||
blit->body.srcOrigin.y = dirty->fb_y;
|
||||
blit->body.destRect.left = dirty->unit_x1;
|
||||
blit->body.destRect.top = dirty->unit_y1;
|
||||
blit->body.destRect.right = dirty->unit_x2;
|
||||
blit->body.destRect.bottom = dirty->unit_y2;
|
||||
dirty->num_hits++;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
|
||||
*
|
||||
* @dev_priv: Pointer to the device private structure.
|
||||
* @framebuffer: Pointer to the dma-buffer backed framebuffer.
|
||||
* @clips: Array of clip rects.
|
||||
* @num_clips: Number of clip rects in @clips.
|
||||
* @increment: Increment to use when looping over @clips.
|
||||
* @interruptible: Whether to perform waits interruptible if possible.
|
||||
* @out_fence: If non-NULL, will return a ref-counted pointer to a
|
||||
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
|
||||
* case the device has already synchronized.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
|
||||
* interrupted.
|
||||
*/
|
||||
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
|
||||
struct vmw_framebuffer *framebuffer,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int increment,
|
||||
bool interruptible,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
{
|
||||
struct vmw_dma_buffer *buf =
|
||||
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
|
||||
base)->buffer;
|
||||
struct vmw_kms_dirty dirty;
|
||||
int ret;
|
||||
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
|
||||
false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_revert;
|
||||
|
||||
dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
|
||||
dirty.clip = vmw_sou_dmabuf_clip;
|
||||
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
|
||||
num_clips;
|
||||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
|
||||
0, 0, num_clips, increment, &dirty);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
|
||||
*
|
||||
* @dirty: The closure structure.
|
||||
*
|
||||
* Commits a previously built command buffer of readback clips.
|
||||
*/
|
||||
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
|
||||
{
|
||||
vmw_fifo_commit(dirty->dev_priv,
|
||||
sizeof(struct vmw_kms_sou_readback_blit) *
|
||||
dirty->num_hits);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_sou_readback_clip - Callback to encode a readback cliprect.
|
||||
*
|
||||
* @dirty: The closure structure
|
||||
*
|
||||
* Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
|
||||
*/
|
||||
static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
|
||||
{
|
||||
struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
|
||||
|
||||
blit += dirty->num_hits;
|
||||
blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
|
||||
blit->body.srcScreenId = dirty->unit->unit;
|
||||
blit->body.destOrigin.x = dirty->fb_x;
|
||||
blit->body.destOrigin.y = dirty->fb_y;
|
||||
blit->body.srcRect.left = dirty->unit_x1;
|
||||
blit->body.srcRect.top = dirty->unit_y1;
|
||||
blit->body.srcRect.right = dirty->unit_x2;
|
||||
blit->body.srcRect.bottom = dirty->unit_y2;
|
||||
dirty->num_hits++;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_sou_readback - Perform a readback from the screen object system to
|
||||
* a dma-buffer backed framebuffer.
|
||||
*
|
||||
* @dev_priv: Pointer to the device private structure.
|
||||
* @file_priv: Pointer to a struct drm_file identifying the caller.
|
||||
* Must be set to NULL if @user_fence_rep is NULL.
|
||||
* @vfb: Pointer to the dma-buffer backed framebuffer.
|
||||
* @user_fence_rep: User-space provided structure for fence information.
|
||||
* Must be set to non-NULL if @file_priv is non-NULL.
|
||||
* @vclips: Array of clip rects.
|
||||
* @num_clips: Number of clip rects in @vclips.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
|
||||
* interrupted.
|
||||
*/
|
||||
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_vmw_rect *vclips,
|
||||
uint32_t num_clips)
|
||||
{
|
||||
struct vmw_dma_buffer *buf =
|
||||
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
|
||||
struct vmw_kms_dirty dirty;
|
||||
int ret;
|
||||
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_revert;
|
||||
|
||||
dirty.fifo_commit = vmw_sou_readback_fifo_commit;
|
||||
dirty.clip = vmw_sou_readback_clip;
|
||||
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
|
||||
num_clips;
|
||||
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
|
||||
0, 0, num_clips, 1, &dirty);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
|
||||
user_fence_rep);
|
||||
|
||||
return ret;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -27,12 +27,15 @@
|
|||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
struct vmw_shader {
|
||||
struct vmw_resource res;
|
||||
SVGA3dShaderType type;
|
||||
uint32_t size;
|
||||
uint8_t num_input_sig;
|
||||
uint8_t num_output_sig;
|
||||
};
|
||||
|
||||
struct vmw_user_shader {
|
||||
|
@ -40,8 +43,18 @@ struct vmw_user_shader {
|
|||
struct vmw_shader shader;
|
||||
};
|
||||
|
||||
struct vmw_dx_shader {
|
||||
struct vmw_resource res;
|
||||
struct vmw_resource *ctx;
|
||||
struct vmw_resource *cotable;
|
||||
u32 id;
|
||||
bool committed;
|
||||
struct list_head cotable_head;
|
||||
};
|
||||
|
||||
static uint64_t vmw_user_shader_size;
|
||||
static uint64_t vmw_shader_size;
|
||||
static size_t vmw_shader_dx_size;
|
||||
|
||||
static void vmw_user_shader_free(struct vmw_resource *res);
|
||||
static struct vmw_resource *
|
||||
|
@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
|
|||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_gb_shader_destroy(struct vmw_resource *res);
|
||||
|
||||
static int vmw_dx_shader_create(struct vmw_resource *res);
|
||||
static int vmw_dx_shader_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_dx_shader_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
|
||||
enum vmw_cmdbuf_res_state state);
|
||||
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
|
||||
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
|
||||
static uint64_t vmw_user_shader_size;
|
||||
|
||||
static const struct vmw_user_resource_conv user_shader_conv = {
|
||||
.object_type = VMW_RES_SHADER,
|
||||
.base_obj_to_res = vmw_user_shader_base_to_res,
|
||||
|
@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
|
|||
.unbind = vmw_gb_shader_unbind
|
||||
};
|
||||
|
||||
static const struct vmw_res_func vmw_dx_shader_func = {
|
||||
.res_type = vmw_res_shader,
|
||||
.needs_backup = true,
|
||||
.may_evict = false,
|
||||
.type_name = "dx shaders",
|
||||
.backup_placement = &vmw_mob_placement,
|
||||
.create = vmw_dx_shader_create,
|
||||
/*
|
||||
* The destroy callback is only called with a committed resource on
|
||||
* context destroy, in which case we destroy the cotable anyway,
|
||||
* so there's no need to destroy DX shaders separately.
|
||||
*/
|
||||
.destroy = NULL,
|
||||
.bind = vmw_dx_shader_bind,
|
||||
.unbind = vmw_dx_shader_unbind,
|
||||
.commit_notify = vmw_dx_shader_commit_notify,
|
||||
};
|
||||
|
||||
/**
|
||||
* Shader management:
|
||||
*/
|
||||
|
@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
|
|||
return container_of(res, struct vmw_shader, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_res_to_dx_shader - typecast a struct vmw_resource to a
|
||||
* struct vmw_dx_shader
|
||||
*
|
||||
* @res: Pointer to the struct vmw_resource.
|
||||
*/
|
||||
static inline struct vmw_dx_shader *
|
||||
vmw_res_to_dx_shader(struct vmw_resource *res)
|
||||
{
|
||||
return container_of(res, struct vmw_dx_shader, res);
|
||||
}
|
||||
|
||||
static void vmw_hw_shader_destroy(struct vmw_resource *res)
|
||||
{
|
||||
(void) vmw_gb_shader_destroy(res);
|
||||
if (likely(res->func->destroy))
|
||||
(void) res->func->destroy(res);
|
||||
else
|
||||
res->id = -1;
|
||||
}
|
||||
|
||||
|
||||
static int vmw_gb_shader_init(struct vmw_private *dev_priv,
|
||||
struct vmw_resource *res,
|
||||
uint32_t size,
|
||||
uint64_t offset,
|
||||
SVGA3dShaderType type,
|
||||
uint8_t num_input_sig,
|
||||
uint8_t num_output_sig,
|
||||
struct vmw_dma_buffer *byte_code,
|
||||
void (*res_free) (struct vmw_resource *res))
|
||||
{
|
||||
struct vmw_shader *shader = vmw_res_to_shader(res);
|
||||
int ret;
|
||||
|
||||
ret = vmw_resource_init(dev_priv, res, true,
|
||||
res_free, &vmw_gb_shader_func);
|
||||
|
||||
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
||||
&vmw_gb_shader_func);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (res_free)
|
||||
|
@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
|
|||
}
|
||||
shader->size = size;
|
||||
shader->type = type;
|
||||
shader->num_input_sig = num_input_sig;
|
||||
shader->num_output_sig = num_output_sig;
|
||||
|
||||
vmw_resource_activate(res, vmw_hw_shader_destroy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* GB shader code:
|
||||
*/
|
||||
|
||||
static int vmw_gb_shader_create(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
|
|||
cmd->body.type = shader->type;
|
||||
cmd->body.sizeInBytes = shader->size;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
|
|||
return 0;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_context_binding_res_list_scrub(&res->binding_head);
|
||||
vmw_binding_res_list_scrub(&res->binding_head);
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
|
@ -275,11 +341,326 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
|
|||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
vmw_resource_release_id(res);
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* DX shader code:
|
||||
*/
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_commit_notify - Notify that a shader operation has been
|
||||
* committed to hardware from a user-supplied command stream.
|
||||
*
|
||||
* @res: Pointer to the shader resource.
|
||||
* @state: Indicating whether a creation or removal has been committed.
|
||||
*
|
||||
*/
|
||||
static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
|
||||
enum vmw_cmdbuf_res_state state)
|
||||
{
|
||||
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
if (state == VMW_CMDBUF_RES_ADD) {
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_cotable_add_resource(shader->cotable,
|
||||
&shader->cotable_head);
|
||||
shader->committed = true;
|
||||
res->id = shader->id;
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
} else {
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
list_del_init(&shader->cotable_head);
|
||||
shader->committed = false;
|
||||
res->id = -1;
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
|
||||
*
|
||||
* @res: The shader resource
|
||||
*
|
||||
* This function reverts a scrub operation.
|
||||
*/
|
||||
static int vmw_dx_shader_unscrub(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXBindShader body;
|
||||
} *cmd;
|
||||
|
||||
if (!list_empty(&shader->cotable_head) || !shader->committed)
|
||||
return 0;
|
||||
|
||||
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
|
||||
shader->ctx->id);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||
"scrubbing.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = shader->ctx->id;
|
||||
cmd->body.shid = shader->id;
|
||||
cmd->body.mobid = res->backup->base.mem.start;
|
||||
cmd->body.offsetInBytes = res->backup_offset;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_create - The DX shader create callback
|
||||
*
|
||||
* @res: The DX shader resource
|
||||
*
|
||||
* The create callback is called as part of resource validation and
|
||||
* makes sure that we unscrub the shader if it's previously been scrubbed.
|
||||
*/
|
||||
static int vmw_dx_shader_create(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON_ONCE(!shader->committed);
|
||||
|
||||
if (!list_empty(&res->mob_head)) {
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
ret = vmw_dx_shader_unscrub(res);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
}
|
||||
|
||||
res->id = shader->id;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_bind - The DX shader bind callback
|
||||
*
|
||||
* @res: The DX shader resource
|
||||
* @val_buf: Pointer to the validate buffer.
|
||||
*
|
||||
*/
|
||||
static int vmw_dx_shader_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_dx_shader_unscrub(res);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
|
||||
*
|
||||
* @res: The shader resource
|
||||
*
|
||||
* This function unbinds a MOB from the DX shader without requiring the
|
||||
* MOB dma_buffer to be reserved. The driver still considers the MOB bound.
|
||||
* However, once the driver eventually decides to unbind the MOB, it doesn't
|
||||
* need to access the context.
|
||||
*/
|
||||
static int vmw_dx_shader_scrub(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXBindShader body;
|
||||
} *cmd;
|
||||
|
||||
if (list_empty(&shader->cotable_head))
|
||||
return 0;
|
||||
|
||||
WARN_ON_ONCE(!shader->committed);
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for shader "
|
||||
"scrubbing.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = shader->ctx->id;
|
||||
cmd->body.shid = res->id;
|
||||
cmd->body.mobid = SVGA3D_INVALID_ID;
|
||||
cmd->body.offsetInBytes = 0;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
res->id = -1;
|
||||
list_del_init(&shader->cotable_head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_unbind - The dx shader unbind callback.
|
||||
*
|
||||
* @res: The shader resource
|
||||
* @readback: Whether this is a readback unbind. Currently unused.
|
||||
* @val_buf: MOB buffer information.
|
||||
*/
|
||||
static int vmw_dx_shader_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_fence_obj *fence;
|
||||
int ret;
|
||||
|
||||
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
ret = vmw_dx_shader_scrub(res);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
|
||||
&fence, NULL);
|
||||
vmw_fence_single_bo(val_buf->bo, fence);
|
||||
|
||||
if (likely(fence != NULL))
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
|
||||
* DX shaders.
|
||||
*
|
||||
* @dev_priv: Pointer to device private structure.
|
||||
* @list: The list of cotable resources.
|
||||
* @readback: Whether the call was part of a readback unbind.
|
||||
*
|
||||
* Scrubs all shader MOBs so that any subsequent shader unbind or shader
|
||||
* destroy operation won't need to swap in the context.
|
||||
*/
|
||||
void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
|
||||
struct list_head *list,
|
||||
bool readback)
|
||||
{
|
||||
struct vmw_dx_shader *entry, *next;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, cotable_head) {
|
||||
WARN_ON(vmw_dx_shader_scrub(&entry->res));
|
||||
if (!readback)
|
||||
entry->committed = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_res_free - The DX shader free callback
|
||||
*
|
||||
* @res: The shader resource
|
||||
*
|
||||
* Frees the DX shader resource and updates memory accounting.
|
||||
*/
|
||||
static void vmw_dx_shader_res_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
|
||||
|
||||
vmw_resource_unreference(&shader->cotable);
|
||||
kfree(shader);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dx_shader_add - Add a shader resource as a command buffer managed
|
||||
* resource.
|
||||
*
|
||||
* @man: The command buffer resource manager.
|
||||
* @ctx: Pointer to the context resource.
|
||||
* @user_key: The id used for this shader.
|
||||
* @shader_type: The shader type.
|
||||
* @list: The list of staged command buffer managed resources.
|
||||
*/
|
||||
int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_resource *ctx,
|
||||
u32 user_key,
|
||||
SVGA3dShaderType shader_type,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_dx_shader *shader;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_private *dev_priv = ctx->dev_priv;
|
||||
int ret;
|
||||
|
||||
if (!vmw_shader_dx_size)
|
||||
vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
|
||||
|
||||
if (!vmw_shader_id_ok(user_key, shader_type))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
|
||||
false, true);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for shader "
|
||||
"creation.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
shader = kmalloc(sizeof(*shader), GFP_KERNEL);
|
||||
if (!shader) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
res = &shader->res;
|
||||
shader->ctx = ctx;
|
||||
shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
|
||||
shader->id = user_key;
|
||||
shader->committed = false;
|
||||
INIT_LIST_HEAD(&shader->cotable_head);
|
||||
ret = vmw_resource_init(dev_priv, res, true,
|
||||
vmw_dx_shader_res_free, &vmw_dx_shader_func);
|
||||
if (ret)
|
||||
goto out_resource_init;
|
||||
|
||||
/*
|
||||
* The user_key name-space is not per shader type for DX shaders,
|
||||
* so when hashing, use a single zero shader type.
|
||||
*/
|
||||
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
|
||||
vmw_shader_key(user_key, 0),
|
||||
res, list);
|
||||
if (ret)
|
||||
goto out_resource_init;
|
||||
|
||||
res->id = shader->id;
|
||||
vmw_resource_activate(res, vmw_hw_shader_destroy);
|
||||
|
||||
out_resource_init:
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* User-space shader management:
|
||||
*/
|
||||
|
@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
|||
size_t shader_size,
|
||||
size_t offset,
|
||||
SVGA3dShaderType shader_type,
|
||||
uint8_t num_input_sig,
|
||||
uint8_t num_output_sig,
|
||||
struct ttm_object_file *tfile,
|
||||
u32 *handle)
|
||||
{
|
||||
|
@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
|||
*/
|
||||
|
||||
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
|
||||
offset, shader_type, buffer,
|
||||
offset, shader_type, num_input_sig,
|
||||
num_output_sig, buffer,
|
||||
vmw_user_shader_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
@ -407,11 +791,11 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
|
||||
struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buffer,
|
||||
size_t shader_size,
|
||||
size_t offset,
|
||||
SVGA3dShaderType shader_type)
|
||||
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buffer,
|
||||
size_t shader_size,
|
||||
size_t offset,
|
||||
SVGA3dShaderType shader_type)
|
||||
{
|
||||
struct vmw_shader *shader;
|
||||
struct vmw_resource *res;
|
||||
|
@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
|||
* From here on, the destructor takes over resource freeing.
|
||||
*/
|
||||
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
|
||||
offset, shader_type, buffer,
|
||||
offset, shader_type, 0, 0, buffer,
|
||||
vmw_shader_free);
|
||||
|
||||
out_err:
|
||||
|
@ -457,19 +841,20 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
|
||||
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
||||
enum drm_vmw_shader_type shader_type_drm,
|
||||
u32 buffer_handle, size_t size, size_t offset,
|
||||
uint8_t num_input_sig, uint8_t num_output_sig,
|
||||
uint32_t *shader_handle)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_vmw_shader_create_arg *arg =
|
||||
(struct drm_vmw_shader_create_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_dma_buffer *buffer = NULL;
|
||||
SVGA3dShaderType shader_type;
|
||||
int ret;
|
||||
|
||||
if (arg->buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
|
||||
if (buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
|
||||
&buffer);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find buffer for shader "
|
||||
|
@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if ((u64)buffer->base.num_pages * PAGE_SIZE <
|
||||
(u64)arg->size + (u64)arg->offset) {
|
||||
(u64)size + (u64)offset) {
|
||||
DRM_ERROR("Illegal buffer- or shader size.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_bad_arg;
|
||||
}
|
||||
}
|
||||
|
||||
switch (arg->shader_type) {
|
||||
switch (shader_type_drm) {
|
||||
case drm_vmw_shader_type_vs:
|
||||
shader_type = SVGA3D_SHADERTYPE_VS;
|
||||
break;
|
||||
case drm_vmw_shader_type_ps:
|
||||
shader_type = SVGA3D_SHADERTYPE_PS;
|
||||
break;
|
||||
case drm_vmw_shader_type_gs:
|
||||
shader_type = SVGA3D_SHADERTYPE_GS;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal shader type.\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_bad_arg;
|
||||
|
||||
ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
|
||||
shader_type, tfile, &arg->shader_handle);
|
||||
ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
|
||||
shader_type, num_input_sig,
|
||||
num_output_sig, tfile, shader_handle);
|
||||
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
out_bad_arg:
|
||||
|
@ -515,7 +898,7 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_id_ok - Check whether a compat shader user key and
|
||||
* vmw_shader_id_ok - Check whether a compat shader user key and
|
||||
* shader type are within valid bounds.
|
||||
*
|
||||
* @user_key: User space id of the shader.
|
||||
|
@ -523,13 +906,13 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
|||
*
|
||||
* Returns true if valid false if not.
|
||||
*/
|
||||
static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
|
||||
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
|
||||
{
|
||||
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
|
||||
* vmw_shader_key - Compute a hash key suitable for a compat shader.
|
||||
*
|
||||
* @user_key: User space id of the shader.
|
||||
* @shader_type: Shader type.
|
||||
|
@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
|
|||
* Returns a hash key suitable for a command buffer managed resource
|
||||
* manager hash table.
|
||||
*/
|
||||
static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
|
||||
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
|
||||
{
|
||||
return user_key | (shader_type << 20);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_remove - Stage a compat shader for removal.
|
||||
* vmw_shader_remove - Stage a compat shader for removal.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager identifying the shader namespace.
|
||||
* @user_key: The key that is used to identify the shader. The key is
|
||||
|
@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
|
|||
* @shader_type: Shader type.
|
||||
* @list: Caller's list of staged command buffer resource actions.
|
||||
*/
|
||||
int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type,
|
||||
struct list_head *list)
|
||||
int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type,
|
||||
struct list_head *list)
|
||||
{
|
||||
if (!vmw_compat_shader_id_ok(user_key, shader_type))
|
||||
struct vmw_resource *dummy;
|
||||
|
||||
if (!vmw_shader_id_ok(user_key, shader_type))
|
||||
return -EINVAL;
|
||||
|
||||
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
|
||||
vmw_compat_shader_key(user_key,
|
||||
shader_type),
|
||||
list);
|
||||
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
|
||||
vmw_shader_key(user_key, shader_type),
|
||||
list, &dummy);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
int ret;
|
||||
struct vmw_resource *res;
|
||||
|
||||
if (!vmw_compat_shader_id_ok(user_key, shader_type))
|
||||
if (!vmw_shader_id_ok(user_key, shader_type))
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate and pin a DMA buffer */
|
||||
|
@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
if (unlikely(ret != 0))
|
||||
goto no_reserve;
|
||||
|
||||
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
|
||||
vmw_compat_shader_key(user_key, shader_type),
|
||||
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
|
||||
vmw_shader_key(user_key, shader_type),
|
||||
res, list);
|
||||
vmw_resource_unreference(&res);
|
||||
no_reserve:
|
||||
|
@ -639,7 +1023,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_lookup - Look up a compat shader
|
||||
* vmw_shader_lookup - Look up a compat shader
|
||||
*
|
||||
* @man: Pointer to the command buffer managed resource manager identifying
|
||||
* the shader namespace.
|
||||
|
@ -650,14 +1034,26 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|||
* found. An error pointer otherwise.
|
||||
*/
|
||||
struct vmw_resource *
|
||||
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key,
|
||||
SVGA3dShaderType shader_type)
|
||||
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key,
|
||||
SVGA3dShaderType shader_type)
|
||||
{
|
||||
if (!vmw_compat_shader_id_ok(user_key, shader_type))
|
||||
if (!vmw_shader_id_ok(user_key, shader_type))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
|
||||
vmw_compat_shader_key(user_key,
|
||||
shader_type));
|
||||
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
|
||||
vmw_shader_key(user_key, shader_type));
|
||||
}
|
||||
|
||||
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_shader_create_arg *arg =
|
||||
(struct drm_vmw_shader_create_arg *)data;
|
||||
|
||||
return vmw_shader_define(dev, file_priv, arg->shader_type,
|
||||
arg->buffer_handle,
|
||||
arg->size, arg->offset,
|
||||
0, 0,
|
||||
&arg->shader_handle);
|
||||
}
|
||||
|
|
555
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
Normal file
555
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
Normal file
|
@ -0,0 +1,555 @@
|
|||
/**************************************************************************
|
||||
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "vmwgfx_so.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
|
||||
/*
|
||||
* The currently only reason we need to keep track of views is that if we
|
||||
* destroy a hardware surface, all views pointing to it must also be destroyed,
|
||||
* otherwise the device will error.
|
||||
* So in particuar if a surface is evicted, we must destroy all views pointing
|
||||
* to it, and all context bindings of that view. Similarly we must restore
|
||||
* the view bindings, views and surfaces pointed to by the views when a
|
||||
* context is referenced in the command stream.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct vmw_view - view metadata
|
||||
*
|
||||
* @res: The struct vmw_resource we derive from
|
||||
* @ctx: Non-refcounted pointer to the context this view belongs to.
|
||||
* @srf: Refcounted pointer to the surface pointed to by this view.
|
||||
* @cotable: Refcounted pointer to the cotable holding this view.
|
||||
* @srf_head: List head for the surface-to-view list.
|
||||
* @cotable_head: List head for the cotable-to_view list.
|
||||
* @view_type: View type.
|
||||
* @view_id: User-space per context view id. Currently used also as per
|
||||
* context device view id.
|
||||
* @cmd_size: Size of the SVGA3D define view command that we've copied from the
|
||||
* command stream.
|
||||
* @committed: Whether the view is actually created or pending creation at the
|
||||
* device level.
|
||||
* @cmd: The SVGA3D define view command copied from the command stream.
|
||||
*/
|
||||
struct vmw_view {
|
||||
struct rcu_head rcu;
|
||||
struct vmw_resource res;
|
||||
struct vmw_resource *ctx; /* Immutable */
|
||||
struct vmw_resource *srf; /* Immutable */
|
||||
struct vmw_resource *cotable; /* Immutable */
|
||||
struct list_head srf_head; /* Protected by binding_mutex */
|
||||
struct list_head cotable_head; /* Protected by binding_mutex */
|
||||
unsigned view_type; /* Immutable */
|
||||
unsigned view_id; /* Immutable */
|
||||
u32 cmd_size; /* Immutable */
|
||||
bool committed; /* Protected by binding_mutex */
|
||||
u32 cmd[1]; /* Immutable */
|
||||
};
|
||||
|
||||
static int vmw_view_create(struct vmw_resource *res);
|
||||
static int vmw_view_destroy(struct vmw_resource *res);
|
||||
static void vmw_hw_view_destroy(struct vmw_resource *res);
|
||||
static void vmw_view_commit_notify(struct vmw_resource *res,
|
||||
enum vmw_cmdbuf_res_state state);
|
||||
|
||||
static const struct vmw_res_func vmw_view_func = {
|
||||
.res_type = vmw_res_view,
|
||||
.needs_backup = false,
|
||||
.may_evict = false,
|
||||
.type_name = "DX view",
|
||||
.backup_placement = NULL,
|
||||
.create = vmw_view_create,
|
||||
.commit_notify = vmw_view_commit_notify,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_view - view define command body stub
|
||||
*
|
||||
* @view_id: The device id of the view being defined
|
||||
* @sid: The surface id of the view being defined
|
||||
*
|
||||
* This generic struct is used by the code to change @view_id and @sid of a
|
||||
* saved view define command.
|
||||
*/
|
||||
struct vmw_view_define {
|
||||
uint32 view_id;
|
||||
uint32 sid;
|
||||
};
|
||||
|
||||
/**
|
||||
* vmw_view - Convert a struct vmw_resource to a struct vmw_view
|
||||
*
|
||||
* @res: Pointer to the resource to convert.
|
||||
*
|
||||
* Returns a pointer to a struct vmw_view.
|
||||
*/
|
||||
static struct vmw_view *vmw_view(struct vmw_resource *res)
|
||||
{
|
||||
return container_of(res, struct vmw_view, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_commit_notify - Notify that a view operation has been committed to
|
||||
* hardware from a user-supplied command stream.
|
||||
*
|
||||
* @res: Pointer to the view resource.
|
||||
* @state: Indicating whether a creation or removal has been committed.
|
||||
*
|
||||
*/
|
||||
static void vmw_view_commit_notify(struct vmw_resource *res,
|
||||
enum vmw_cmdbuf_res_state state)
|
||||
{
|
||||
struct vmw_view *view = vmw_view(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
if (state == VMW_CMDBUF_RES_ADD) {
|
||||
struct vmw_surface *srf = vmw_res_to_srf(view->srf);
|
||||
|
||||
list_add_tail(&view->srf_head, &srf->view_list);
|
||||
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
|
||||
view->committed = true;
|
||||
res->id = view->view_id;
|
||||
|
||||
} else {
|
||||
list_del_init(&view->cotable_head);
|
||||
list_del_init(&view->srf_head);
|
||||
view->committed = false;
|
||||
res->id = -1;
|
||||
}
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_create - Create a hardware view.
|
||||
*
|
||||
* @res: Pointer to the view resource.
|
||||
*
|
||||
* Create a hardware view. Typically used if that view has previously been
|
||||
* destroyed by an eviction operation.
|
||||
*/
|
||||
static int vmw_view_create(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_view *view = vmw_view(res);
|
||||
struct vmw_surface *srf = vmw_res_to_srf(view->srf);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
struct vmw_view_define body;
|
||||
} *cmd;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
if (!view->committed) {
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
|
||||
view->ctx->id);
|
||||
if (!cmd) {
|
||||
DRM_ERROR("Failed reserving FIFO space for view creation.\n");
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(cmd, &view->cmd, view->cmd_size);
|
||||
WARN_ON(cmd->body.view_id != view->view_id);
|
||||
/* Sid may have changed due to surface eviction. */
|
||||
WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
|
||||
cmd->body.sid = view->srf->id;
|
||||
vmw_fifo_commit(res->dev_priv, view->cmd_size);
|
||||
res->id = view->view_id;
|
||||
list_add_tail(&view->srf_head, &srf->view_list);
|
||||
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_destroy - Destroy a hardware view.
|
||||
*
|
||||
* @res: Pointer to the view resource.
|
||||
*
|
||||
* Destroy a hardware view. Typically used on unexpected termination of the
|
||||
* owning process or if the surface the view is pointing to is destroyed.
|
||||
*/
|
||||
static int vmw_view_destroy(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_view *view = vmw_view(res);
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
union vmw_view_destroy body;
|
||||
} *cmd;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
vmw_binding_res_list_scrub(&res->binding_head);
|
||||
|
||||
if (!view->committed || res->id == -1)
|
||||
return 0;
|
||||
|
||||
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
|
||||
if (!cmd) {
|
||||
DRM_ERROR("Failed reserving FIFO space for view "
|
||||
"destruction.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.view_id = view->view_id;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
res->id = -1;
|
||||
list_del_init(&view->cotable_head);
|
||||
list_del_init(&view->srf_head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
|
||||
*
|
||||
* @res: Pointer to the view resource.
|
||||
*
|
||||
* Destroy a hardware view if it's still present.
|
||||
*/
|
||||
static void vmw_hw_view_destroy(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
WARN_ON(vmw_view_destroy(res));
|
||||
res->id = -1;
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
|
||||
*
|
||||
* @user_key: The user-space id used for the view.
|
||||
* @view_type: The view type.
|
||||
*
|
||||
* Destroy a hardware view if it's still present.
|
||||
*/
|
||||
static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
|
||||
{
|
||||
return user_key | (view_type << 20);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_id_ok - Basic view id and type range checks.
|
||||
*
|
||||
* @user_key: The user-space id used for the view.
|
||||
* @view_type: The view type.
|
||||
*
|
||||
* Checks that the view id and type (typically provided by user-space) is
|
||||
* valid.
|
||||
*/
|
||||
static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
|
||||
{
|
||||
return (user_key < SVGA_COTABLE_MAX_IDS &&
|
||||
view_type < vmw_view_max);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_res_free - resource res_free callback for view resources
|
||||
*
|
||||
* @res: Pointer to a struct vmw_resource
|
||||
*
|
||||
* Frees memory and memory accounting held by a struct vmw_view.
|
||||
*/
|
||||
static void vmw_view_res_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_view *view = vmw_view(res);
|
||||
size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
vmw_resource_unreference(&view->cotable);
|
||||
vmw_resource_unreference(&view->srf);
|
||||
kfree_rcu(view, rcu);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_add - Create a view resource and stage it for addition
|
||||
* as a command buffer managed resource.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager identifying the shader namespace.
|
||||
* @ctx: Pointer to a struct vmw_resource identifying the active context.
|
||||
* @srf: Pointer to a struct vmw_resource identifying the surface the view
|
||||
* points to.
|
||||
* @view_type: The view type deduced from the view create command.
|
||||
* @user_key: The key that is used to identify the shader. The key is
|
||||
* unique to the view type and to the context.
|
||||
* @cmd: Pointer to the view create command in the command stream.
|
||||
* @cmd_size: Size of the view create command in the command stream.
|
||||
* @list: Caller's list of staged command buffer resource actions.
|
||||
*/
|
||||
int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_resource *ctx,
|
||||
struct vmw_resource *srf,
|
||||
enum vmw_view_type view_type,
|
||||
u32 user_key,
|
||||
const void *cmd,
|
||||
size_t cmd_size,
|
||||
struct list_head *list)
|
||||
{
|
||||
static const size_t vmw_view_define_sizes[] = {
|
||||
[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
|
||||
[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
|
||||
[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
|
||||
};
|
||||
|
||||
struct vmw_private *dev_priv = ctx->dev_priv;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_view *view;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (cmd_size != vmw_view_define_sizes[view_type] +
|
||||
sizeof(SVGA3dCmdHeader)) {
|
||||
DRM_ERROR("Illegal view create command size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vmw_view_id_ok(user_key, view_type)) {
|
||||
DRM_ERROR("Illegal view add view id.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
size = offsetof(struct vmw_view, cmd) + cmd_size;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for view"
|
||||
" creation.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
view = kmalloc(size, GFP_KERNEL);
|
||||
if (!view) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
res = &view->res;
|
||||
view->ctx = ctx;
|
||||
view->srf = vmw_resource_reference(srf);
|
||||
view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
|
||||
view->view_type = view_type;
|
||||
view->view_id = user_key;
|
||||
view->cmd_size = cmd_size;
|
||||
view->committed = false;
|
||||
INIT_LIST_HEAD(&view->srf_head);
|
||||
INIT_LIST_HEAD(&view->cotable_head);
|
||||
memcpy(&view->cmd, cmd, cmd_size);
|
||||
ret = vmw_resource_init(dev_priv, res, true,
|
||||
vmw_view_res_free, &vmw_view_func);
|
||||
if (ret)
|
||||
goto out_resource_init;
|
||||
|
||||
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
|
||||
vmw_view_key(user_key, view_type),
|
||||
res, list);
|
||||
if (ret)
|
||||
goto out_resource_init;
|
||||
|
||||
res->id = view->view_id;
|
||||
vmw_resource_activate(res, vmw_hw_view_destroy);
|
||||
|
||||
out_resource_init:
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_remove - Stage a view for removal.
|
||||
*
|
||||
* @man: Pointer to the view manager identifying the shader namespace.
|
||||
* @user_key: The key that is used to identify the view. The key is
|
||||
* unique to the view type.
|
||||
* @view_type: View type
|
||||
* @list: Caller's list of staged command buffer resource actions.
|
||||
* @res_p: If the resource is in an already committed state, points to the
|
||||
* struct vmw_resource on successful return. The pointer will be
|
||||
* non ref-counted.
|
||||
*/
|
||||
int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, enum vmw_view_type view_type,
|
||||
struct list_head *list,
|
||||
struct vmw_resource **res_p)
|
||||
{
|
||||
if (!vmw_view_id_ok(user_key, view_type)) {
|
||||
DRM_ERROR("Illegal view remove view id.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
|
||||
vmw_view_key(user_key, view_type),
|
||||
list, res_p);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct.
|
||||
* @list: List of views belonging to a cotable.
|
||||
* @readback: Unused. Needed for function interface only.
|
||||
*
|
||||
* This function evicts all views belonging to a cotable.
|
||||
* It must be called with the binding_mutex held, and the caller must hold
|
||||
* a reference to the view resource. This is typically called before the
|
||||
* cotable is paged out.
|
||||
*/
|
||||
void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
|
||||
struct list_head *list,
|
||||
bool readback)
|
||||
{
|
||||
struct vmw_view *entry, *next;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, cotable_head)
|
||||
WARN_ON(vmw_view_destroy(&entry->res));
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_surface_list_destroy - Evict all views pointing to a surface
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct.
|
||||
* @list: List of views pointing to a surface.
|
||||
*
|
||||
* This function evicts all views pointing to a surface. This is typically
|
||||
* called before the surface is evicted.
|
||||
*/
|
||||
void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_view *entry, *next;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, srf_head)
|
||||
WARN_ON(vmw_view_destroy(&entry->res));
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_srf - Return a non-refcounted pointer to the surface a view is
|
||||
* pointing to.
|
||||
*
|
||||
* @res: pointer to a view resource.
|
||||
*
|
||||
* Note that the view itself is holding a reference, so as long
|
||||
* the view resource is alive, the surface resource will be.
|
||||
*/
|
||||
struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
|
||||
{
|
||||
return vmw_view(res)->srf;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_view_lookup - Look up a view.
|
||||
*
|
||||
* @man: The context's cmdbuf ref manager.
|
||||
* @view_type: The view type.
|
||||
* @user_key: The view user id.
|
||||
*
|
||||
* returns a refcounted pointer to a view or an error pointer if not found.
|
||||
*/
|
||||
struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_view_type view_type,
|
||||
u32 user_key)
|
||||
{
|
||||
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
|
||||
vmw_view_key(user_key, view_type));
|
||||
}
|
||||
|
||||
const u32 vmw_view_destroy_cmds[] = {
|
||||
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
|
||||
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
|
||||
[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
|
||||
};
|
||||
|
||||
const SVGACOTableType vmw_view_cotables[] = {
|
||||
[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
|
||||
[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
|
||||
[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
|
||||
};
|
||||
|
||||
const SVGACOTableType vmw_so_cotables[] = {
|
||||
[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
|
||||
[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
|
||||
[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
|
||||
[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
|
||||
[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
|
||||
[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
|
||||
};
|
||||
|
||||
|
||||
/* To remove unused function warning */
|
||||
static void vmw_so_build_asserts(void) __attribute__((used));
|
||||
|
||||
|
||||
/*
|
||||
* This function is unused at run-time, and only used to dump various build
|
||||
* asserts important for code optimization assumptions.
|
||||
*/
|
||||
static void vmw_so_build_asserts(void)
|
||||
{
|
||||
/* Assert that our vmw_view_cmd_to_type() function is correct. */
|
||||
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
|
||||
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
|
||||
BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
|
||||
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
|
||||
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
|
||||
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
|
||||
BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
|
||||
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
|
||||
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
|
||||
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
|
||||
|
||||
/* Assert that our "one body fits all" assumption is valid */
|
||||
BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
|
||||
|
||||
/* Assert that the view key space can hold all view ids. */
|
||||
BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
|
||||
|
||||
/*
|
||||
* Assert that the offset of sid in all view define commands
|
||||
* is what we assume it to be.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
|
||||
offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
|
||||
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
|
||||
offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
|
||||
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
|
||||
offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
|
||||
}
|
160
drivers/gpu/drm/vmwgfx/vmwgfx_so.h
Normal file
160
drivers/gpu/drm/vmwgfx/vmwgfx_so.h
Normal file
|
@ -0,0 +1,160 @@
|
|||
/**************************************************************************
|
||||
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
#ifndef VMW_SO_H
|
||||
#define VMW_SO_H
|
||||
|
||||
enum vmw_view_type {
|
||||
vmw_view_sr,
|
||||
vmw_view_rt,
|
||||
vmw_view_ds,
|
||||
vmw_view_max,
|
||||
};
|
||||
|
||||
enum vmw_so_type {
|
||||
vmw_so_el,
|
||||
vmw_so_bs,
|
||||
vmw_so_ds,
|
||||
vmw_so_rs,
|
||||
vmw_so_ss,
|
||||
vmw_so_so,
|
||||
vmw_so_max,
|
||||
};
|
||||
|
||||
/**
|
||||
* union vmw_view_destroy - view destruction command body
|
||||
*
|
||||
* @rtv: RenderTarget view destruction command body
|
||||
* @srv: ShaderResource view destruction command body
|
||||
* @dsv: DepthStencil view destruction command body
|
||||
* @view_id: A single u32 view id.
|
||||
*
|
||||
* The assumption here is that all union members are really represented by a
|
||||
* single u32 in the command stream. If that's not the case,
|
||||
* the size of this union will not equal the size of an u32, and the
|
||||
* assumption is invalid, and we detect that at compile time in the
|
||||
* vmw_so_build_asserts() function.
|
||||
*/
|
||||
union vmw_view_destroy {
|
||||
struct SVGA3dCmdDXDestroyRenderTargetView rtv;
|
||||
struct SVGA3dCmdDXDestroyShaderResourceView srv;
|
||||
struct SVGA3dCmdDXDestroyDepthStencilView dsv;
|
||||
u32 view_id;
|
||||
};
|
||||
|
||||
/* Map enum vmw_view_type to view destroy command ids*/
|
||||
extern const u32 vmw_view_destroy_cmds[];
|
||||
|
||||
/* Map enum vmw_view_type to SVGACOTableType */
|
||||
extern const SVGACOTableType vmw_view_cotables[];
|
||||
|
||||
/* Map enum vmw_so_type to SVGACOTableType */
|
||||
extern const SVGACOTableType vmw_so_cotables[];
|
||||
|
||||
/*
|
||||
* vmw_view_cmd_to_type - Return the view type for a create or destroy command
|
||||
*
|
||||
* @id: The SVGA3D command id.
|
||||
*
|
||||
* For a given view create or destroy command id, return the corresponding
|
||||
* enum vmw_view_type. If the command is unknown, return vmw_view_max.
|
||||
* The validity of the simplified calculation is verified in the
|
||||
* vmw_so_build_asserts() function.
|
||||
*/
|
||||
static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
|
||||
{
|
||||
u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
|
||||
|
||||
if (tmp > (u32)vmw_view_max)
|
||||
return vmw_view_max;
|
||||
|
||||
return (enum vmw_view_type) tmp;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmw_so_cmd_to_type - Return the state object type for a
|
||||
* create or destroy command
|
||||
*
|
||||
* @id: The SVGA3D command id.
|
||||
*
|
||||
* For a given state object create or destroy command id,
|
||||
* return the corresponding enum vmw_so_type. If the command is uknown,
|
||||
* return vmw_so_max. We should perhaps optimize this function using
|
||||
* a similar strategy as vmw_view_cmd_to_type().
|
||||
*/
|
||||
static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
|
||||
{
|
||||
switch (id) {
|
||||
case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
|
||||
case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
|
||||
return vmw_so_el;
|
||||
case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
|
||||
case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
|
||||
return vmw_so_bs;
|
||||
case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
|
||||
case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
|
||||
return vmw_so_ds;
|
||||
case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
|
||||
case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
|
||||
return vmw_so_rs;
|
||||
case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
|
||||
case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
|
||||
return vmw_so_ss;
|
||||
case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
|
||||
case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
|
||||
return vmw_so_so;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return vmw_so_max;
|
||||
}
|
||||
|
||||
/*
|
||||
* View management - vmwgfx_so.c
|
||||
*/
|
||||
extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_resource *ctx,
|
||||
struct vmw_resource *srf,
|
||||
enum vmw_view_type view_type,
|
||||
u32 user_key,
|
||||
const void *cmd,
|
||||
size_t cmd_size,
|
||||
struct list_head *list);
|
||||
|
||||
extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, enum vmw_view_type view_type,
|
||||
struct list_head *list,
|
||||
struct vmw_resource **res_p);
|
||||
|
||||
extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
|
||||
struct list_head *view_list);
|
||||
extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
|
||||
struct list_head *list,
|
||||
bool readback);
|
||||
extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
|
||||
extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_view_type view_type,
|
||||
u32 user_key);
|
||||
#endif
|
1266
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
Normal file
1266
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -27,8 +27,11 @@
|
|||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "vmwgfx_so.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include "svga3d_surfacedefs.h"
|
||||
#include "device_include/svga3d_surfacedefs.h"
|
||||
|
||||
|
||||
/**
|
||||
* struct vmw_user_surface - User-space visible surface resource
|
||||
|
@ -36,7 +39,7 @@
|
|||
* @base: The TTM base object handling user-space visibility.
|
||||
* @srf: The surface metadata.
|
||||
* @size: TTM accounting size for the surface.
|
||||
* @master: master of the creating client. Used for security check.
|
||||
* @master: master of the creating client. Used for security check.
|
||||
*/
|
||||
struct vmw_user_surface {
|
||||
struct ttm_prime_object prime;
|
||||
|
@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
|
|||
cmd->header.size = cmd_len;
|
||||
cmd->body.sid = srf->res.id;
|
||||
cmd->body.surfaceFlags = srf->flags;
|
||||
cmd->body.format = cpu_to_le32(srf->format);
|
||||
cmd->body.format = srf->format;
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
|
||||
|
||||
|
@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
|||
dev_priv->used_memory_size -= res->backup_size;
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|||
|
||||
BUG_ON(res_free == NULL);
|
||||
if (!dev_priv->has_mob)
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
||||
(dev_priv->has_mob) ? &vmw_gb_surface_func :
|
||||
&vmw_legacy_surface_func);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (!dev_priv->has_mob)
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
res_free(res);
|
||||
return ret;
|
||||
}
|
||||
|
@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|||
* surface validate.
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(&srf->view_list);
|
||||
vmw_resource_activate(res, vmw_hw_surface_destroy);
|
||||
return ret;
|
||||
}
|
||||
|
@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
desc = svga3dsurface_get_desc(req->format);
|
||||
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
|
||||
DRM_ERROR("Invalid surface format for surface creation.\n");
|
||||
DRM_ERROR("Format requested is: %d\n", req->format);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1018,17 +1023,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
|
|||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_surface *srf = vmw_res_to_srf(res);
|
||||
uint32_t cmd_len, submit_len;
|
||||
uint32_t cmd_len, cmd_id, submit_len;
|
||||
int ret;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDefineGBSurface body;
|
||||
} *cmd;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDefineGBSurface_v2 body;
|
||||
} *cmd2;
|
||||
|
||||
if (likely(res->id != -1))
|
||||
return 0;
|
||||
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
ret = vmw_resource_alloc_id(res);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed to allocate a surface id.\n");
|
||||
|
@ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
|
|||
goto out_no_fifo;
|
||||
}
|
||||
|
||||
cmd_len = sizeof(cmd->body);
|
||||
submit_len = sizeof(*cmd);
|
||||
if (srf->array_size > 0) {
|
||||
/* has_dx checked on creation time. */
|
||||
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
|
||||
cmd_len = sizeof(cmd2->body);
|
||||
submit_len = sizeof(*cmd2);
|
||||
} else {
|
||||
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
|
||||
cmd_len = sizeof(cmd->body);
|
||||
submit_len = sizeof(*cmd);
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_len);
|
||||
cmd2 = (typeof(cmd2))cmd;
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"creation.\n");
|
||||
|
@ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
|
|||
goto out_no_fifo;
|
||||
}
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
|
||||
cmd->header.size = cmd_len;
|
||||
cmd->body.sid = srf->res.id;
|
||||
cmd->body.surfaceFlags = srf->flags;
|
||||
cmd->body.format = cpu_to_le32(srf->format);
|
||||
cmd->body.numMipLevels = srf->mip_levels[0];
|
||||
cmd->body.multisampleCount = srf->multisample_count;
|
||||
cmd->body.autogenFilter = srf->autogen_filter;
|
||||
cmd->body.size.width = srf->base_size.width;
|
||||
cmd->body.size.height = srf->base_size.height;
|
||||
cmd->body.size.depth = srf->base_size.depth;
|
||||
if (srf->array_size > 0) {
|
||||
cmd2->header.id = cmd_id;
|
||||
cmd2->header.size = cmd_len;
|
||||
cmd2->body.sid = srf->res.id;
|
||||
cmd2->body.surfaceFlags = srf->flags;
|
||||
cmd2->body.format = cpu_to_le32(srf->format);
|
||||
cmd2->body.numMipLevels = srf->mip_levels[0];
|
||||
cmd2->body.multisampleCount = srf->multisample_count;
|
||||
cmd2->body.autogenFilter = srf->autogen_filter;
|
||||
cmd2->body.size.width = srf->base_size.width;
|
||||
cmd2->body.size.height = srf->base_size.height;
|
||||
cmd2->body.size.depth = srf->base_size.depth;
|
||||
cmd2->body.arraySize = srf->array_size;
|
||||
} else {
|
||||
cmd->header.id = cmd_id;
|
||||
cmd->header.size = cmd_len;
|
||||
cmd->body.sid = srf->res.id;
|
||||
cmd->body.surfaceFlags = srf->flags;
|
||||
cmd->body.format = cpu_to_le32(srf->format);
|
||||
cmd->body.numMipLevels = srf->mip_levels[0];
|
||||
cmd->body.multisampleCount = srf->multisample_count;
|
||||
cmd->body.autogenFilter = srf->autogen_filter;
|
||||
cmd->body.size.width = srf->base_size.width;
|
||||
cmd->body.size.height = srf->base_size.height;
|
||||
cmd->body.size.depth = srf->base_size.depth;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, submit_len);
|
||||
|
||||
return 0;
|
||||
|
@ -1068,7 +1103,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
|
|||
out_no_fifo:
|
||||
vmw_resource_release_id(res);
|
||||
out_no_id:
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
|
|||
static int vmw_gb_surface_destroy(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_surface *srf = vmw_res_to_srf(res);
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDestroyGBSurface body;
|
||||
|
@ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
|
|||
return 0;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_context_binding_res_list_scrub(&res->binding_head);
|
||||
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
|
||||
vmw_binding_res_list_scrub(&res->binding_head);
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
|
@ -1213,11 +1250,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
|
|||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
vmw_resource_release_id(res);
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
vmw_fifo_resource_dec(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_gb_surface_define_ioctl - Ioctl function implementing
|
||||
* the user surface define functionality.
|
||||
|
@ -1241,77 +1279,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
int ret;
|
||||
uint32_t size;
|
||||
const struct svga3d_surface_desc *desc;
|
||||
uint32_t backup_handle;
|
||||
|
||||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
128;
|
||||
|
||||
size = vmw_user_surface_size + 128;
|
||||
|
||||
desc = svga3dsurface_get_desc(req->format);
|
||||
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
|
||||
DRM_ERROR("Invalid surface format for surface creation.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Define a surface based on the parameters. */
|
||||
ret = vmw_surface_gb_priv_define(dev,
|
||||
size,
|
||||
req->svga3d_flags,
|
||||
req->format,
|
||||
req->drm_surface_flags & drm_vmw_surface_flag_scanout,
|
||||
req->mip_levels,
|
||||
req->multisample_count,
|
||||
req->array_size,
|
||||
req->base_size,
|
||||
&srf);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
user_srf = container_of(srf, struct vmw_user_surface, srf);
|
||||
if (drm_is_primary_client(file_priv))
|
||||
user_srf->master = drm_master_get(file_priv->master);
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
size, false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for surface"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
res = &user_srf->srf.res;
|
||||
|
||||
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
||||
if (unlikely(user_srf == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_user_srf;
|
||||
}
|
||||
|
||||
srf = &user_srf->srf;
|
||||
res = &srf->res;
|
||||
|
||||
srf->flags = req->svga3d_flags;
|
||||
srf->format = req->format;
|
||||
srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
|
||||
srf->mip_levels[0] = req->mip_levels;
|
||||
srf->num_sizes = 1;
|
||||
srf->sizes = NULL;
|
||||
srf->offsets = NULL;
|
||||
user_srf->size = size;
|
||||
srf->base_size = req->base_size;
|
||||
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
||||
srf->multisample_count = req->multisample_count;
|
||||
res->backup_size = svga3dsurface_get_serialized_size
|
||||
(srf->format, srf->base_size, srf->mip_levels[0],
|
||||
srf->flags & SVGA3D_SURFACE_CUBEMAP);
|
||||
|
||||
user_srf->prime.base.shareable = false;
|
||||
user_srf->prime.base.tfile = NULL;
|
||||
if (drm_is_primary_client(file_priv))
|
||||
user_srf->master = drm_master_get(file_priv->master);
|
||||
|
||||
/**
|
||||
* From this point, the generic resource management functions
|
||||
* destroy the object on failure.
|
||||
*/
|
||||
|
||||
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
|
||||
if (req->buffer_handle != SVGA3D_INVALID_ID) {
|
||||
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
|
||||
&res->backup);
|
||||
} else if (req->drm_surface_flags &
|
||||
drm_vmw_surface_flag_create_buffer)
|
||||
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
|
||||
res->backup_size) {
|
||||
DRM_ERROR("Surface backup buffer is too small.\n");
|
||||
vmw_dmabuf_unreference(&res->backup);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
|
||||
res->backup_size,
|
||||
req->drm_surface_flags &
|
||||
|
@ -1324,7 +1336,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
tmp = vmw_resource_reference(&srf->res);
|
||||
tmp = vmw_resource_reference(res);
|
||||
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
|
||||
req->drm_surface_flags &
|
||||
drm_vmw_surface_flag_shareable,
|
||||
|
@ -1337,7 +1349,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
rep->handle = user_srf->prime.base.hash.key;
|
||||
rep->handle = user_srf->prime.base.hash.key;
|
||||
rep->backup_size = res->backup_size;
|
||||
if (res->backup) {
|
||||
rep->buffer_map_handle =
|
||||
|
@ -1352,10 +1364,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
return 0;
|
||||
out_no_user_srf:
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
out_unlock:
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
return ret;
|
||||
|
@ -1415,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|||
rep->creq.drm_surface_flags = 0;
|
||||
rep->creq.multisample_count = srf->multisample_count;
|
||||
rep->creq.autogen_filter = srf->autogen_filter;
|
||||
rep->creq.array_size = srf->array_size;
|
||||
rep->creq.buffer_handle = backup_handle;
|
||||
rep->creq.base_size = srf->base_size;
|
||||
rep->crep.handle = user_srf->prime.base.hash.key;
|
||||
|
@ -1429,3 +1438,137 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_gb_priv_define - Define a private GB surface
|
||||
*
|
||||
* @dev: Pointer to a struct drm_device
|
||||
* @user_accounting_size: Used to track user-space memory usage, set
|
||||
* to 0 for kernel mode only memory
|
||||
* @svga3d_flags: SVGA3d surface flags for the device
|
||||
* @format: requested surface format
|
||||
* @for_scanout: true if inteded to be used for scanout buffer
|
||||
* @num_mip_levels: number of MIP levels
|
||||
* @multisample_count:
|
||||
* @array_size: Surface array size.
|
||||
* @size: width, heigh, depth of the surface requested
|
||||
* @user_srf_out: allocated user_srf. Set to NULL on failure.
|
||||
*
|
||||
* GB surfaces allocated by this function will not have a user mode handle, and
|
||||
* thus will only be visible to vmwgfx. For optimization reasons the
|
||||
* surface may later be given a user mode handle by another function to make
|
||||
* it available to user mode drivers.
|
||||
*/
|
||||
int vmw_surface_gb_priv_define(struct drm_device *dev,
|
||||
uint32_t user_accounting_size,
|
||||
uint32_t svga3d_flags,
|
||||
SVGA3dSurfaceFormat format,
|
||||
bool for_scanout,
|
||||
uint32_t num_mip_levels,
|
||||
uint32_t multisample_count,
|
||||
uint32_t array_size,
|
||||
struct drm_vmw_size size,
|
||||
struct vmw_surface **srf_out)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct vmw_surface *srf;
|
||||
int ret;
|
||||
u32 num_layers;
|
||||
|
||||
*srf_out = NULL;
|
||||
|
||||
if (for_scanout) {
|
||||
if (!svga3dsurface_is_screen_target_format(format)) {
|
||||
DRM_ERROR("Invalid Screen Target surface format.");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
const struct svga3d_surface_desc *desc;
|
||||
|
||||
desc = svga3dsurface_get_desc(format);
|
||||
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
|
||||
DRM_ERROR("Invalid surface format.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* array_size must be null for non-GL3 host. */
|
||||
if (array_size > 0 && !dev_priv->has_dx) {
|
||||
DRM_ERROR("Tried to create DX surface on non-DX host.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
user_accounting_size, false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for surface"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
||||
if (unlikely(user_srf == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_user_srf;
|
||||
}
|
||||
|
||||
*srf_out = &user_srf->srf;
|
||||
user_srf->size = user_accounting_size;
|
||||
user_srf->prime.base.shareable = false;
|
||||
user_srf->prime.base.tfile = NULL;
|
||||
|
||||
srf = &user_srf->srf;
|
||||
srf->flags = svga3d_flags;
|
||||
srf->format = format;
|
||||
srf->scanout = for_scanout;
|
||||
srf->mip_levels[0] = num_mip_levels;
|
||||
srf->num_sizes = 1;
|
||||
srf->sizes = NULL;
|
||||
srf->offsets = NULL;
|
||||
srf->base_size = size;
|
||||
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
||||
srf->array_size = array_size;
|
||||
srf->multisample_count = multisample_count;
|
||||
|
||||
if (array_size)
|
||||
num_layers = array_size;
|
||||
else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
|
||||
num_layers = SVGA3D_MAX_SURFACE_FACES;
|
||||
else
|
||||
num_layers = 1;
|
||||
|
||||
srf->res.backup_size =
|
||||
svga3dsurface_get_serialized_size(srf->format,
|
||||
srf->base_size,
|
||||
srf->mip_levels[0],
|
||||
num_layers);
|
||||
|
||||
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
|
||||
srf->res.backup_size += sizeof(SVGA3dDXSOState);
|
||||
|
||||
if (dev_priv->active_display_unit == vmw_du_screen_target &&
|
||||
for_scanout)
|
||||
srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
|
||||
|
||||
/*
|
||||
* From this point, the generic resource management functions
|
||||
* destroy the object on failure.
|
||||
*/
|
||||
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
|
||||
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
return ret;
|
||||
|
||||
out_no_user_srf:
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
|
||||
|
||||
out_unlock:
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
|
|
@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
|
|||
/*@{*/
|
||||
|
||||
/* Driver support (drm_drv.h) */
|
||||
extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
|
||||
extern long drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern long drm_compat_ioctl(struct file *filp,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -64,6 +64,7 @@
|
|||
#define DRM_VMW_GB_SURFACE_CREATE 23
|
||||
#define DRM_VMW_GB_SURFACE_REF 24
|
||||
#define DRM_VMW_SYNCCPU 25
|
||||
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
|
@ -88,6 +89,8 @@
|
|||
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
|
||||
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
|
||||
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
|
||||
#define DRM_VMW_PARAM_SCREEN_TARGET 11
|
||||
#define DRM_VMW_PARAM_DX 12
|
||||
|
||||
/**
|
||||
* enum drm_vmw_handle_type - handle type for ref ioctls
|
||||
|
@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
|
|||
* Argument to the DRM_VMW_EXECBUF Ioctl.
|
||||
*/
|
||||
|
||||
#define DRM_VMW_EXECBUF_VERSION 1
|
||||
#define DRM_VMW_EXECBUF_VERSION 2
|
||||
|
||||
struct drm_vmw_execbuf_arg {
|
||||
uint64_t commands;
|
||||
|
@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
|
|||
uint64_t fence_rep;
|
||||
uint32_t version;
|
||||
uint32_t flags;
|
||||
uint32_t context_handle;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
|
|||
enum drm_vmw_shader_type {
|
||||
drm_vmw_shader_type_vs = 0,
|
||||
drm_vmw_shader_type_ps,
|
||||
drm_vmw_shader_type_gs
|
||||
};
|
||||
|
||||
|
||||
|
@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
|
|||
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
|
||||
* if none.
|
||||
* @base_size Size of the base mip level for all faces.
|
||||
* @array_size Must be zero for non-DX hardware, and if non-zero
|
||||
* svga3d_flags must have proper bind flags setup.
|
||||
*
|
||||
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
|
||||
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
|
||||
|
@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
|
|||
uint32_t multisample_count;
|
||||
uint32_t autogen_filter;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t pad64;
|
||||
uint32_t array_size;
|
||||
struct drm_vmw_size base_size;
|
||||
};
|
||||
|
||||
|
@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
|
|||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
|
||||
*
|
||||
* Allocates a device unique context id, and queues a create context command
|
||||
* for the host. Does not wait for host completion.
|
||||
*/
|
||||
enum drm_vmw_extended_context {
|
||||
drm_vmw_context_legacy,
|
||||
drm_vmw_context_dx
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_extended_context_arg
|
||||
*
|
||||
* @req: Context type.
|
||||
* @rep: Context identifier.
|
||||
*
|
||||
* Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
|
||||
*/
|
||||
union drm_vmw_extended_context_arg {
|
||||
enum drm_vmw_extended_context req;
|
||||
struct drm_vmw_context_arg rep;
|
||||
};
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user