summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLingfeng Yang <lfy@google.com>2020-04-16 12:19:08 -0700
committerAlistair Delva <adelva@google.com>2020-04-22 16:40:28 -0700
commit911adb384d314a82c6e234a3a055b19e680517cd (patch)
tree7847ee169207fddc668a156c7f982125298e72ac
parent6babe2174961ede47ff9d1c2314acedb480c0840 (diff)
downloadcuttlefish-modules-911adb384d314a82c6e234a3a055b19e680517cd.tar.gz
CHROMIUM: drm/virtio: rebase zero-copy patches to virgl/drm-misc-next
* Adds RESOURCE_MAP/RESOURCE_UNMAP * Removes guest_memory_type/guest_caching_type in favor of a bitmask * Removes EXECBUFFER_v2 until Q3 * Renames HOST_COHERENT to HOST_VISIBLE BUG=chromium:924405 TEST=compile Test: - dEQP-VK.smoke* pass w/ gfxstream and host coherent memory enabled - launch_cvd with 2d, virgl, and gfxstream modes work with current - launch_cvd with 2d, virgl, and gfxstream modes work w/ crosvm modified for host coherent memory (https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2035595) Signed-off-by: Lingfeng Yang <lfy@google.com> Bug: 153580313 Change-Id: I04052c3d164c77c713bbc7251c357fd43653fa50
-rw-r--r--uapi/drm/virtgpu_drm.h73
-rw-r--r--uapi/linux/virtio_gpu.h146
-rw-r--r--virtio_gpu/virtgpu_debugfs.c5
-rw-r--r--virtio_gpu/virtgpu_drv.c5
-rw-r--r--virtio_gpu/virtgpu_drv.h56
-rw-r--r--virtio_gpu/virtgpu_gem.c4
-rw-r--r--virtio_gpu/virtgpu_ioctl.c186
-rw-r--r--virtio_gpu/virtgpu_kms.c13
-rw-r--r--virtio_gpu/virtgpu_object.c37
-rw-r--r--virtio_gpu/virtgpu_ttm.c8
-rw-r--r--virtio_gpu/virtgpu_vq.c199
11 files changed, 281 insertions, 451 deletions
diff --git a/uapi/drm/virtgpu_drm.h b/uapi/drm/virtgpu_drm.h
index 144ea5e..5759963 100644
--- a/uapi/drm/virtgpu_drm.h
+++ b/uapi/drm/virtgpu_drm.h
@@ -46,9 +46,7 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
-#define DRM_VIRTGPU_RESOURCE_CREATE_V2 0x0a
-#define DRM_VIRTGPU_ALLOCATION_METADATA_REQUEST 0x0b
-#define DRM_VIRTGPU_ALLOCATION_METADATA_RESPONSE 0x0c
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
@@ -74,19 +72,8 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
-#define VIRTGPU_PARAM_RESOURCE_V2 3
-#define VIRTGPU_PARAM_SHARED_GUEST 4
-#define VIRTGPU_PARAM_HOST_COHERENT 5
-
-#define VIRTGPU_MEMORY_UNDEFINED 0
-#define VIRTGPU_MEMORY_TRANSFER 1
-#define VIRTGPU_MEMORY_SHARED_GUEST 2
-#define VIRTGPU_MEMORY_HOST_COHERENT 3
-
-#define VIRTGPU_UNDEFINED_CACHING 0
-#define VIRTGPU_CACHED 1
-#define VIRTGPU_WRITE_COMBINE 2
-#define VIRTGPU_UNCACHED 3
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4
struct drm_virtgpu_getparam {
__u64 param;
@@ -162,29 +149,27 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
-struct drm_virtgpu_resource_create_v2 {
- __u32 resource_id;
- __u32 guest_memory_type;
- __u32 caching_type;
- __u32 args_size;
- __u32 gem_handle;
- __u64 size;
- __u64 args; /* void */
-};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_RES_BLOB_GUEST_MASK 0x000f
+#define VIRTGPU_RES_BLOB_GUEST_NONE 0x0000
+#define VIRTGPU_RES_BLOB_GUEST_SYSTEM 0x0001
-struct drm_virtgpu_allocation_metadata_request {
- __u32 request_id;
- __u32 pad;
- __u32 request_size;
- __u32 response_size;
- __u64 request; /* void */
-};
+#define VIRTGPU_RES_BLOB_HOST_MASK 0x00f0
+#define VIRTGPU_RES_BLOB_HOST_NONE 0x0000
+#define VIRTGPU_RES_BLOB_HOST 0x0010
-struct drm_virtgpu_allocation_metadata_response {
- __u32 request_id;
- __u32 pad;
- __u32 response_size;
- __u64 response; /* void */
+#define VIRTGPU_RES_BLOB_USE_MASK 0x0f00
+#define VIRTGPU_RES_BLOB_USE_NONE 0x0000
+#define VIRTGPU_RES_BLOB_USE_MAPPABLE 0x0100
+#define VIRTGPU_RES_BLOB_USE_SHAREABLE 0x0200
+#define VIRTGPU_RES_BLOB_USE_CROSS_DEVICE 0x0400
+ __u32 flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 size;
+ __u64 memory_id;
};
#define DRM_IOCTL_VIRTGPU_MAP \
@@ -222,17 +207,9 @@ struct drm_virtgpu_allocation_metadata_response {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
-#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_V2 \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_V2, \
- struct drm_virtgpu_resource_create_v2)
-
-#define DRM_IOCTL_VIRTGPU_ALLOCATION_METADATA_REQUEST \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_ALLOCATION_METADATA_REQUEST, \
- struct drm_virtgpu_allocation_metadata_request)
-
-#define DRM_IOCTL_VIRTGPU_ALLOCATION_METADATA_RESPONSE \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_ALLOCATION_METADATA_RESPONSE, \
- struct drm_virtgpu_allocation_metadata_response)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
#if defined(__cplusplus)
}
diff --git a/uapi/linux/virtio_gpu.h b/uapi/linux/virtio_gpu.h
index ba2c36e..c802678 100644
--- a/uapi/linux/virtio_gpu.h
+++ b/uapi/linux/virtio_gpu.h
@@ -54,20 +54,20 @@
* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
*/
#define VIRTIO_GPU_F_RESOURCE_UUID 2
-
/*
- * VIRTIO_GPU_CMD_ALLOCATION_METADATA
- * VIRTIO_GPU_CMD_RESOURCE_CREATE_V2
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
*/
-#define VIRTIO_GPU_F_RESOURCE_V2 3
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
/*
- * Ability to turn guest pages into host buffers.
+ * VIRTIO_GPU_CMD_RESOURCE_MAP
+ * VIRTIO_GPU_CMD_RESOURCE_UMAP
*/
-#define VIRTIO_GPU_F_SHARED_GUEST 4
+#define VIRTIO_GPU_F_HOST_VISIBLE 4
/*
- * Can inject host pages into guest.
+ * VIRTIO_GPU_CMD_CTX_CREATE_V2
*/
-#define VIRTIO_GPU_F_HOST_COHERENT 5
+#define VIRTIO_GPU_F_VULKAN 5
+
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -94,9 +94,9 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
- VIRTIO_GPU_CMD_RESOURCE_CREATE_V2,
- VIRTIO_GPU_CMD_RESOURCE_CREATE_V2_UNREF,
- VIRTIO_GPU_CMD_ALLOCATION_METADATA,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_MAP,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -109,14 +109,13 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* CHROMIUM: legacy responses */
VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO_LEGACY = 0x1104,
- VIRTIO_GPU_RESP_OK_ALLOCATION_METADATA_LEGACY = 0x1106,
/* CHROMIUM: success responses */
VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO = 0x11FF,
- VIRTIO_GPU_RESP_OK_ALLOCATION_METADATA = 0x11FE,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -128,30 +127,6 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_ERR_INVALID_MEMORY_ID,
};
-enum virtio_gpu_memory_type {
- VIRTIO_GPU_MEMORY_UNDEFINED = 0,
-
- /*
- * Traditional virtio-gpu memory.
- * Has both host and guest side storage.
- *
- * VIRTIO_GPU_CMD_TRANSFER_* commands are used
- * to copy between guest and host storage.
- *
- * Created using VIRTIO_GPU_CMD_RESOURCE_CREATE_V2.
- */
- VIRTIO_GPU_MEMORY_TRANSFER,
- VIRTIO_GPU_MEMORY_SHARED_GUEST,
- VIRTIO_GPU_MEMORY_HOST_COHERENT,
-};
-
-enum virtio_gpu_caching_type {
- VIRTIO_GPU_UNDEFINED_CACHING = 0,
- VIRTIO_GPU_CACHED,
- VIRTIO_GPU_WRITE_COMBINE,
- VIRTIO_GPU_UNCACHED,
-};
-
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
struct virtio_gpu_ctrl_hdr {
@@ -287,7 +262,6 @@ struct virtio_gpu_transfer_host_3d {
struct virtio_gpu_resource_create_3d {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
- /* memory_type is VIRTIO_GPU_MEMORY_TRANSFER */
__le32 target;
__le32 format;
__le32 bind;
@@ -328,47 +302,6 @@ struct virtio_gpu_cmd_submit {
__le32 padding;
};
-/* VIRTIO_GPU_CMD_RESOURCE_CREATE_V2 */
-struct virtio_gpu_resource_create_v2 {
- struct virtio_gpu_ctrl_hdr hdr;
- __le32 resource_id;
- __le32 guest_memory_type;
- __le32 caching_type;
- __le32 pad;
- __le64 size;
- __le64 pci_addr;
- __le32 args_size;
- __le32 nr_entries;
- /* ('nr_entries' * struct virtio_gpu_mem_entry) + 'args_size'
- * bytes follow here.
- */
-};
-
-/* VIRTIO_GPU_CMD_RESOURCE_CREATE_V2_UNREF */
-struct virtio_gpu_resource_v2_unref {
- struct virtio_gpu_ctrl_hdr hdr;
- __le32 resource_id;
- __le32 padding;
-};
-
-/* VIRTIO_GPU_CMD_RESOURCE_CREATE_V2 */
-struct virtio_gpu_allocation_metadata {
- struct virtio_gpu_ctrl_hdr hdr;
- __le32 request_id;
- __le32 pad;
- __le32 request_size;
- __le32 response_size;
- /* 'request_size' bytes go here */
-};
-
-/* VIRTIO_GPU_RESP_OK_ALLOCATION_METADATA */
-struct virtio_gpu_resp_allocation_metadata {
- struct virtio_gpu_ctrl_hdr hdr;
- __le32 request_id;
- __le32 response_size;
- /* 'response_size' bytes go here */
-};
-
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
@@ -461,4 +394,59 @@ struct virtio_gpu_resp_resource_uuid {
__u8 uuid[16];
};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+#define VIRTIO_GPU_RES_BLOB_GUEST_MASK 0x000f
+#define VIRTIO_GPU_RES_BLOB_GUEST_NONE 0x0000
+#define VIRTIO_GPU_RES_BLOB_GUEST_SYSTEM 0x0001
+
+#define VIRTIO_GPU_RES_BLOB_HOST_MASK 0x00f0
+#define VIRTIO_GPU_RES_BLOB_HOST_NONE 0x0000
+#define VIRTIO_GPU_RES_BLOB_HOST 0x0010
+
+#define VIRTIO_GPU_RES_BLOB_USE_MASK 0x0f00
+#define VIRTIO_GPU_RES_BLOB_USE_NONE 0x0000
+#define VIRTIO_GPU_RES_BLOB_USE_MAPPABLE 0x0100
+#define VIRTIO_GPU_RES_BLOB_USE_SHAREABLE 0x0200
+#define VIRTIO_GPU_RES_BLOB_USE_CROSS_DEVICE 0x0400
+ __le32 flags;
+ __le64 size;
+ __le64 memory_id;
+ __le32 nr_entries;
+ __le32 padding;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP */
+struct virtio_gpu_resource_map {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+ __le64 offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u32 map_flags;
+ __u32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP */
+struct virtio_gpu_resource_unmap {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
#endif
diff --git a/virtio_gpu/virtgpu_debugfs.c b/virtio_gpu/virtgpu_debugfs.c
index 1054ddc..91128d7 100644
--- a/virtio_gpu/virtgpu_debugfs.c
+++ b/virtio_gpu/virtgpu_debugfs.c
@@ -47,9 +47,8 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
- virtio_add_bool(m, "resource v2", vgdev->has_resource_v2);
- virtio_add_bool(m, "shared guest", vgdev->has_shared_guest);
- virtio_add_bool(m, "host coherent", vgdev->has_host_coherent);
+ virtio_add_bool(m, "resource blob", vgdev->has_resource_blob);
+ virtio_add_bool(m, "host visible", vgdev->has_host_visible);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/virtio_gpu/virtgpu_drv.c b/virtio_gpu/virtgpu_drv.c
index 5da5d66..742fbf3 100644
--- a/virtio_gpu/virtgpu_drv.c
+++ b/virtio_gpu/virtgpu_drv.c
@@ -165,9 +165,8 @@ static unsigned int features[] = {
#endif
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
- VIRTIO_GPU_F_RESOURCE_V2,
- VIRTIO_GPU_F_SHARED_GUEST,
- VIRTIO_GPU_F_HOST_COHERENT,
+ VIRTIO_GPU_F_RESOURCE_BLOB,
+ VIRTIO_GPU_F_HOST_VISIBLE,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
diff --git a/virtio_gpu/virtgpu_drv.h b/virtio_gpu/virtgpu_drv.h
index fb0c10c..b426fd8 100644
--- a/virtio_gpu/virtgpu_drv.h
+++ b/virtio_gpu/virtgpu_drv.h
@@ -62,9 +62,8 @@ struct virtio_gpu_object_params {
bool dumb;
/* 3d */
bool virgl;
- bool resource_v2;
- enum virtio_gpu_memory_type guest_memory_type;
- enum virtio_gpu_caching_type caching_type;
+ bool blob;
+ uint32_t blob_flags;
uint32_t target;
uint32_t bind;
uint32_t depth;
@@ -89,17 +88,15 @@ struct virtio_gpu_object {
uint32_t mapped;
void *vmap;
bool dumb;
- bool resource_v2;
+ bool blob;
struct ttm_place placement_code;
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
bool created;
- enum virtio_gpu_memory_type guest_memory_type;
- enum virtio_gpu_caching_type caching_type;
-
int uuid_state;
uuid_t uuid;
+ uint32_t blob_flags;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
@@ -193,12 +190,6 @@ struct virtio_gpu_drv_cap_cache {
atomic_t is_valid;
};
-struct virtio_gpu_allocation_metadata_response {
- bool callback_done;
- struct virtio_gpu_resp_allocation_metadata info;
- uint32_t response_data[];
-};
-
struct virtio_gpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -228,9 +219,9 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
- bool has_resource_v2;
- bool has_shared_guest;
- bool has_host_coherent;
+ bool has_resource_assign_uuid;
+ bool has_resource_blob;
+ bool has_host_visible;
struct work_struct config_changed_work;
@@ -357,24 +348,23 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
struct virtio_gpu_fence *fence);
+
void
-virtio_gpu_cmd_resource_create_v2(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t guest_memory_type,
- uint32_t caching_type, uint64_t size,
- uint64_t pci_addr, uint32_t nents,
- uint32_t args_size, void *data, uint32_t data_size,
- struct virtio_gpu_fence *fence);
-void
-virtio_gpu_cmd_resource_v2_unref(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence);
-int
-virtio_gpu_cmd_allocation_metadata(struct virtio_gpu_device *vgdev,
- uint32_t request_id,
- uint32_t request_size,
- uint32_t response_size,
- void *request,
- struct virtio_gpu_fence *fence);
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ uint32_t ctx_id, uint32_t flags,
+ uint64_t size, uint64_t memory_id,
+ uint32_t nents,
+ struct virtio_gpu_mem_entry *ents);
+
+void virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ uint64_t offset,
+ struct virtio_gpu_fence *fence);
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id);
+
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
diff --git a/virtio_gpu/virtgpu_gem.c b/virtio_gpu/virtgpu_gem.c
index eb08cf4..2925661 100644
--- a/virtio_gpu/virtgpu_gem.c
+++ b/virtio_gpu/virtgpu_gem.c
@@ -139,7 +139,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
int r;
- if (!vgdev->has_virgl_3d || qobj->resource_v2)
+ if (!vgdev->has_virgl_3d)
return 0;
r = virtio_gpu_object_reserve(qobj, false);
@@ -160,7 +160,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
int r;
- if (!vgdev->has_virgl_3d || qobj->resource_v2)
+ if (!vgdev->has_virgl_3d)
return;
r = virtio_gpu_object_reserve(qobj, false);
diff --git a/virtio_gpu/virtgpu_ioctl.c b/virtio_gpu/virtgpu_ioctl.c
index 9ec9fc4..6224777 100644
--- a/virtio_gpu/virtgpu_ioctl.c
+++ b/virtio_gpu/virtgpu_ioctl.c
@@ -262,6 +262,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
value = 1;
break;
+ case VIRTGPU_PARAM_RESOURCE_BLOB:
+ value = vgdev->has_resource_blob == true ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_HOST_VISIBLE:
+ value = vgdev->has_host_visible == true ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -576,29 +582,44 @@ copy_exit:
return 0;
}
-static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
+static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
void *buf;
int ret, si, nents;
uint32_t handle = 0;
- uint64_t pci_addr = 0;
struct scatterlist *sg;
- size_t total_size, offset;
struct virtio_gpu_object *obj;
struct virtio_gpu_fence *fence;
struct virtio_gpu_mem_entry *ents;
- struct drm_virtgpu_resource_create_v2 *rc_v2 = data;
+ struct drm_virtgpu_resource_create_blob *rc_blob = data;
struct virtio_gpu_object_params params = { 0 };
struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- void __user *args = u64_to_user_ptr(rc_v2->args);
+ bool mappable = rc_blob->flags & VIRTGPU_RES_BLOB_USE_MAPPABLE;
+ bool guest = rc_blob->flags & VIRTGPU_RES_BLOB_GUEST_MASK;
+
+ params.size = rc_blob->size;
+ params.blob_flags = rc_blob->flags;
+ params.blob = true;
- ret = total_size = offset = 0;
- params.size = rc_v2->size;
- params.guest_memory_type = rc_v2->guest_memory_type;
- params.resource_v2 = true;
- params.caching_type = rc_v2->caching_type;
+ if (rc_blob->cmd_size && vfpriv) {
+ void *buf;
+ void __user *cmd = u64_to_user_ptr(rc_blob->cmd);
+
+ buf = kzalloc(rc_blob->cmd_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, cmd, rc_blob->cmd_size)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
+ vfpriv->ctx_id, NULL);
+ }
obj = virtio_gpu_alloc_object(dev, &params, NULL);
if (IS_ERR(obj))
@@ -610,7 +631,7 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
goto err_free_obj;
}
- if (rc_v2->guest_memory_type == VIRTGPU_MEMORY_HOST_COHERENT) {
+ if (!guest) {
nents = 0;
} else if (use_dma_api) {
obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
@@ -621,34 +642,14 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
nents = obj->pages->nents;
}
- total_size = nents * sizeof(struct virtio_gpu_mem_entry) +
- rc_v2->args_size;
-
- buf = kzalloc(total_size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err_free_obj;
- }
-
- ents = buf;
- if (rc_v2->guest_memory_type == VIRTGPU_MEMORY_HOST_COHERENT) {
- pci_addr = vgdev->caddr + obj->tbo.offset;
- } else {
+ ents = kzalloc(nents * sizeof(struct virtio_gpu_mem_entry), GFP_KERNEL);
+ if (guest) {
for_each_sg(obj->pages->sgl, sg, nents, si) {
ents[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
- offset += sizeof(struct virtio_gpu_mem_entry);
- }
- }
-
- if (rc_v2->args_size) {
- if (copy_from_user(buf + offset, args,
- rc_v2->args_size)) {
- ret = -EFAULT;
- goto err_free_buf;
}
}
@@ -658,15 +659,18 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
goto err_free_buf;
}
+ virtio_gpu_cmd_resource_create_blob(vgdev, obj, vfpriv->ctx_id,
+ rc_blob->flags, rc_blob->size,
+ rc_blob->memory_id, nents,
+ ents);
+
ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
if (ret)
goto err_fence_put;
- virtio_gpu_cmd_resource_create_v2(vgdev, obj->hw_res_handle,
- rc_v2->guest_memory_type,
- rc_v2->caching_type, rc_v2->size,
- pci_addr, nents, rc_v2->args_size,
- buf, total_size, fence);
+ if (!guest && mappable) {
+ virtio_gpu_cmd_map(vgdev, obj, obj->tbo.offset, fence);
+ }
/*
* No need to call virtio_gpu_object_reserve since the buffer is not
@@ -678,8 +682,8 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
dma_fence_put(&fence->f);
drm_gem_object_put_unlocked(&obj->gem_base);
- rc_v2->resource_id = obj->hw_res_handle;
- rc_v2->gem_handle = handle;
+ rc_blob->res_handle = obj->hw_res_handle;
+ rc_blob->bo_handle = handle;
return 0;
err_fence_put:
@@ -691,94 +695,6 @@ err_free_obj:
return ret;
}
-static int virtio_gpu_allocation_metadata_request_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- void *request;
- uint32_t request_id = 0;
- struct drm_virtgpu_allocation_metadata_request *amr = data;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_allocation_metadata_response *response;
- void __user *params = u64_to_user_ptr(amr->request);
-
- if (!amr->request_size)
- return -EINVAL;
-
- request = kzalloc(amr->request_size, GFP_KERNEL);
- if (!request) {
- return -ENOMEM;
- }
-
- if (copy_from_user(request, params,
- amr->request_size)) {
- kfree(request);
- return -EFAULT;
- }
-
- if (amr->response_size) {
- response = kzalloc(sizeof(struct virtio_gpu_allocation_metadata_response) +
- amr->response_size, GFP_KERNEL);
- if (!response) {
- kfree(request);
- return -ENOMEM;
- }
-
- response->callback_done = false;
- idr_preload(GFP_KERNEL);
- spin_lock(&vgdev->request_idr_lock);
- request_id = idr_alloc(&vgdev->request_idr, response, 1, 0,
- GFP_NOWAIT);
- spin_unlock(&vgdev->request_idr_lock);
- idr_preload_end();
- amr->request_id = request_id;
- }
-
- virtio_gpu_cmd_allocation_metadata(vgdev, request_id,
- amr->request_size,
- amr->response_size,
- request,
- NULL);
- return 0;
-}
-
-static int virtio_gpu_allocation_metadata_response_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- int ret = -EINVAL;
- struct virtio_gpu_allocation_metadata_response *response;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct drm_virtgpu_allocation_metadata_response *rcr = data;
- void __user *user_data = u64_to_user_ptr(rcr->response);
-
- spin_lock(&vgdev->request_idr_lock);
- response = idr_find(&vgdev->request_idr, rcr->request_id);
- spin_unlock(&vgdev->request_idr_lock);
-
- if (!response)
- goto out;
-
- ret = wait_event_interruptible(vgdev->resp_wq,
- response->callback_done);
- if (ret)
- goto out_remove;
-
- if (copy_to_user(user_data, &response->response_data,
- rcr->response_size)) {
- ret = -EFAULT;
- goto out_remove;
- }
-
- ret = 0;
-
-out_remove:
- spin_lock(&vgdev->request_idr_lock);
- response = idr_remove(&vgdev->request_idr, rcr->request_id);
- spin_unlock(&vgdev->request_idr_lock);
- kfree(response);
-out:
- return ret;
-}
-
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -812,15 +728,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_V2,
- virtio_gpu_resource_create_v2_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF_DRV(VIRTGPU_ALLOCATION_METADATA_REQUEST,
- virtio_gpu_allocation_metadata_request_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF_DRV(VIRTGPU_ALLOCATION_METADATA_RESPONSE,
- virtio_gpu_allocation_metadata_response_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
+ virtio_gpu_resource_create_blob_ioctl,
+ DRM_RENDER_ALLOW)
};
diff --git a/virtio_gpu/virtgpu_kms.c b/virtio_gpu/virtgpu_kms.c
index c417122..f0f3023 100644
--- a/virtio_gpu/virtgpu_kms.c
+++ b/virtio_gpu/virtgpu_kms.c
@@ -168,8 +168,8 @@ int virtio_gpu_init(struct drm_device *dev)
DRM_INFO("Virtio cross device support available.\n");
}
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_V2)) {
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_HOST_COHERENT)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_HOST_VISIBLE)) {
vgdev->cbar = 4;
vgdev->caddr = pci_resource_start(dev->pdev, vgdev->cbar);
vgdev->csize = pci_resource_len(dev->pdev, vgdev->cbar);
@@ -181,14 +181,13 @@ int virtio_gpu_init(struct drm_device *dev)
"at 0x%lx, size %ld MB", dev_name(&dev->pdev->dev),
vgdev->cbar, vgdev->caddr, vgdev->csize >> 20);
- vgdev->has_host_coherent = true;
+ vgdev->has_host_visible = true;
}
}
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_SHARED_GUEST))
- vgdev->has_shared_guest = true;
-
- vgdev->has_resource_v2 = true;
+ vgdev->has_resource_blob = true;
+ DRM_INFO("resource_v2: %u, host visible %u\n",
+ vgdev->has_resource_blob, vgdev->has_host_visible);
}
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
diff --git a/virtio_gpu/virtgpu_object.c b/virtio_gpu/virtgpu_object.c
index 7c522c4..2b76c47 100644
--- a/virtio_gpu/virtgpu_object.c
+++ b/virtio_gpu/virtgpu_object.c
@@ -29,6 +29,7 @@
#include <linux/dma-buf.h>
#include <linux/uuid.h>
#include "virtgpu_drv.h"
+#include <drm/virtgpu_drm.h>
static int virtio_gpu_virglrenderer_workaround = 1;
module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
@@ -83,44 +84,47 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
+// define internally for testing purposes
+#define VIRTGPU_RESOURCE_CACHE_MASK 0xf000
+#define VIRTGPU_RESOURCE_CACHE_CACHED 0x1000
+#define VIRTGPU_RESOURCE_CACHE_UNCACHED 0x2000
+#define VIRTGPU_RESOURCE_CACHE_WC 0x3000
+
static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
{
u32 c = 1;
u32 ttm_caching_flags = 0;
+ u32 cache_type = (vgbo->blob_flags & VIRTGPU_RESOURCE_CACHE_MASK);
+ u32 guest = (vgbo->blob_flags & VIRTGPU_RES_BLOB_GUEST_MASK);
+
vgbo->placement.placement = &vgbo->placement_code;
vgbo->placement.busy_placement = &vgbo->placement_code;
vgbo->placement_code.fpfn = 0;
vgbo->placement_code.lpfn = 0;
- switch (vgbo->caching_type) {
- case VIRTIO_GPU_CACHED:
+ switch (cache_type) {
+ case VIRTGPU_RESOURCE_CACHE_CACHED:
ttm_caching_flags = TTM_PL_FLAG_CACHED;
break;
- case VIRTIO_GPU_WRITE_COMBINE:
+ case VIRTGPU_RESOURCE_CACHE_WC:
ttm_caching_flags = TTM_PL_FLAG_WC;
break;
- case VIRTIO_GPU_UNCACHED:
+ case VIRTGPU_RESOURCE_CACHE_UNCACHED:
ttm_caching_flags = TTM_PL_FLAG_UNCACHED;
break;
default:
ttm_caching_flags = TTM_PL_MASK_CACHING;
}
-
- switch (vgbo->guest_memory_type) {
- case VIRTIO_GPU_MEMORY_UNDEFINED:
- case VIRTIO_GPU_MEMORY_TRANSFER:
- case VIRTIO_GPU_MEMORY_SHARED_GUEST:
+ if (!guest && vgbo->blob) {
vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
+ ttm_caching_flags | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_NO_EVICT;
- break;
- case VIRTIO_GPU_MEMORY_HOST_COHERENT:
+ } else {
vgbo->placement_code.flags =
- ttm_caching_flags | TTM_PL_FLAG_VRAM |
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
TTM_PL_FLAG_NO_EVICT;
- break;
}
vgbo->placement.num_placement = c;
vgbo->placement.num_busy_placement = c;
@@ -157,9 +161,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
return ret;
}
bo->dumb = params->dumb;
- bo->resource_v2 = params->resource_v2;
- bo->guest_memory_type = params->guest_memory_type;
- bo->caching_type = params->caching_type;
+ bo->blob = params->blob;
+ bo->blob_flags = params->blob_flags;
if (params->virgl) {
virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
diff --git a/virtio_gpu/virtgpu_ttm.c b/virtio_gpu/virtgpu_ttm.c
index 351b7a3..831d474 100644
--- a/virtio_gpu/virtgpu_ttm.c
+++ b/virtio_gpu/virtgpu_ttm.c
@@ -223,7 +223,7 @@ static int virtio_gpu_ttm_vram_unbind(struct ttm_tt *ttm)
virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
struct virtio_gpu_object *obj = gtt->obj;
- virtio_gpu_cmd_resource_v2_unref(vgdev, obj->hw_res_handle, NULL);
+ virtio_gpu_cmd_unmap(vgdev, obj->hw_res_handle);
return 0;
}
@@ -277,6 +277,7 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
struct virtio_gpu_device *vgdev;
struct virtio_gpu_object *obj;
struct virtio_gpu_ttm_tt *gtt;
+ uint32_t guest;
vgdev = virtio_gpu_get_vgdev(bo->bdev);
obj = container_of(bo, struct virtio_gpu_object, tbo);
@@ -285,8 +286,9 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL)
return NULL;
gtt->obj = obj;
+ guest = (obj->blob_flags & VIRTGPU_RES_BLOB_GUEST_MASK);
- if (obj->guest_memory_type == VIRTIO_GPU_MEMORY_HOST_COHERENT) {
+ if (!guest && obj->blob) {
gtt->ttm.ttm.func = &virtio_gpu_vram_func;
if (ttm_tt_init(&gtt->ttm.ttm, bo, page_flags)) {
kfree(gtt);
@@ -345,7 +347,7 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
goto err_mm_init;
}
- if (vgdev->has_host_coherent) {
+ if (vgdev->has_host_visible) {
r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_VRAM,
vgdev->csize >> PAGE_SHIFT);
if (r) {
diff --git a/virtio_gpu/virtgpu_vq.c b/virtio_gpu/virtgpu_vq.c
index b80e981..9ccc95d 100644
--- a/virtio_gpu/virtgpu_vq.c
+++ b/virtio_gpu/virtgpu_vq.c
@@ -533,54 +533,6 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
-void
-virtio_gpu_cmd_resource_create_v2(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- uint32_t guest_memory_type,
- uint32_t caching_type, uint64_t size,
- uint64_t pci_addr, uint32_t nents,
- uint32_t args_size, void *data,
- uint32_t data_size,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_resource_create_v2 *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
-
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
-
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_V2);
- cmd_p->resource_id = cpu_to_le32(resource_id);
- cmd_p->guest_memory_type = cpu_to_le32(guest_memory_type);
- cmd_p->caching_type = cpu_to_le32(caching_type);
- cmd_p->size = cpu_to_le64(size);
- cmd_p->pci_addr = cpu_to_le64(pci_addr);
- cmd_p->args_size = cpu_to_le32(args_size);
- cmd_p->nr_entries = cpu_to_le32(nents);
-
- vbuf->data_buf = data;
- vbuf->data_size = data_size;
-
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
-}
-
-void
-virtio_gpu_cmd_resource_v2_unref(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_resource_v2_unref *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
-
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
-
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_V2_UNREF);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
-}
-
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -943,34 +895,6 @@ finish_pending:
wake_up_all(&vgdev->resp_wq);
}
-static void virtio_gpu_cmd_allocation_metadata_cb(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
-{
- struct virtio_gpu_allocation_metadata_response *response;
- struct virtio_gpu_resp_allocation_metadata *resp =
- (struct virtio_gpu_resp_allocation_metadata *)vbuf->resp_buf;
- uint32_t resp_type = le32_to_cpu(resp->hdr.type);
- uint32_t handle = le32_to_cpu(resp->request_id);
- size_t total_size = sizeof(struct virtio_gpu_resp_allocation_metadata) +
- le32_to_cpu(resp->response_size);
-
- spin_lock(&vgdev->request_idr_lock);
- response = idr_find(&vgdev->request_idr, handle);
- spin_unlock(&vgdev->request_idr_lock);
-
- if (!response)
- return;
-
- switch (resp_type) {
- case VIRTIO_GPU_RESP_OK_ALLOCATION_METADATA:
- case VIRTIO_GPU_RESP_OK_ALLOCATION_METADATA_LEGACY:
- memcpy(&response->info, resp, total_size);
- }
-
- response->callback_done = true;
- wake_up_all(&vgdev->resp_wq);
-}
-
int
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
@@ -1015,47 +939,6 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
return 0;
}
-int
-virtio_gpu_cmd_allocation_metadata(struct virtio_gpu_device *vgdev,
- uint32_t request_id,
- uint32_t request_size,
- uint32_t response_size,
- void *request,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_vbuffer *vbuf;
- struct virtio_gpu_allocation_metadata *cmd_p;
-
- if (response_size) {
- struct virtio_gpu_resp_allocation_metadata *resp_buf;
- size_t resp_size = sizeof(struct virtio_gpu_resp_allocation_metadata) +
- response_size;
- resp_buf = kzalloc(resp_size, GFP_KERNEL);
- if (!resp_buf)
- return -ENOMEM;
-
- cmd_p = virtio_gpu_alloc_cmd_resp(vgdev,
- &virtio_gpu_cmd_allocation_metadata_cb, &vbuf,
- sizeof(*cmd_p), resp_size,
- resp_buf);
- resp_buf->request_id = cpu_to_le32(request_id);
- } else {
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- }
-
- memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_ALLOCATION_METADATA);
- cmd_p->request_id = cpu_to_le32(request_id);
- cmd_p->request_size = cpu_to_le32(request_size);
- cmd_p->response_size = cpu_to_le32(response_size);
-
- vbuf->data_buf = request;
- vbuf->data_size = request_size;
-
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
- return 0;
-}
-
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint32_t ctx_id,
@@ -1136,6 +1019,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct scatterlist *sg;
int si, nents;
+ if (obj->blob)
+ return 0;
+
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
@@ -1272,3 +1158,82 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
return 0;
}
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ /*
+ * No-op for v5.4.
+ */
+}
+
+void virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ uint64_t offset,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_map *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_map_info *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ DRM_ERROR("allocation failure\n");
+ return;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp(vgdev,
+ virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = offset;
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id)
+{
+ struct virtio_gpu_resource_unmap *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ uint32_t ctx_id, uint32_t flags,
+ uint64_t size, uint64_t memory_id,
+ uint32_t nents,
+ struct virtio_gpu_mem_entry *ents)
+{
+ struct virtio_gpu_resource_create_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+ cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->flags = cpu_to_le32(flags);
+ cmd_p->size = cpu_to_le64(size);
+ cmd_p->memory_id = cpu_to_le64(memory_id);
+ cmd_p->nr_entries = cpu_to_le32(nents);
+
+ vbuf->data_buf = ents;
+ vbuf->data_size = sizeof(*ents) * nents;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
+}
+