virtio_gpu: initial driver

sample for qemu: -device virtio-vga,edid=on,xres=1024,yres=768 -display sdl
display mode can be set in the Screen preferences.

Change-Id: If1d6aeecb208ce7c62c42eea1a95c71237c4375a
Reviewed-on: https://review.haiku-os.org/c/haiku/+/7038
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
Reviewed-by: Jérôme Duval <jerome.duval@gmail.com>
This commit is contained in:
Jérôme Duval 2023-10-10 18:11:18 +02:00
parent a2270c7035
commit 0b733c9c80
12 changed files with 1928 additions and 0 deletions

View File

@ -0,0 +1,43 @@
/*
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#ifndef VIRTIO_INFO_H
#define VIRTIO_INFO_H
#include <Drivers.h>
#include <Accelerant.h>
#include <edid.h>
struct virtio_gpu_shared_info {
area_id mode_list_area; // area containing display mode list
uint32 mode_count;
display_mode current_mode;
uint32 bytes_per_row;
area_id frame_buffer_area; // area of frame buffer
uint8* frame_buffer;
// pointer to frame buffer (visible by all apps!)
edid1_raw edid_raw;
bool has_edid;
uint32 dpms_capabilities;
char name[32];
uint32 vram_size;
};
//----------------- ioctl() interface ----------------
// list ioctls
enum {
VIRTIO_GPU_GET_PRIVATE_DATA = B_DEVICE_OP_CODES_END + 1,
VIRTIO_GPU_GET_DEVICE_NAME,
VIRTIO_GPU_SET_DISPLAY_MODE,
};
#endif /* VIRTIO_INFO_H */

View File

@ -15,3 +15,4 @@ SubInclude HAIKU_TOP src add-ons accelerants radeon_hd ;
SubInclude HAIKU_TOP src add-ons accelerants s3 ;
SubInclude HAIKU_TOP src add-ons accelerants vesa ;
SubInclude HAIKU_TOP src add-ons accelerants via ;
SubInclude HAIKU_TOP src add-ons accelerants virtio ;

View File

@ -0,0 +1,13 @@
SubDir HAIKU_TOP src add-ons accelerants virtio ;
UsePrivateHeaders graphics ;
UsePrivateHeaders [ FDirName graphics virtio ] ;
UsePrivateHeaders [ FDirName graphics common ] ;
UsePrivateHeaders shared ;
Addon virtio_gpu.accelerant :
accelerant.cpp
hooks.cpp
mode.cpp
: libaccelerantscommon.a
;

View File

@ -0,0 +1,202 @@
/*
* Copyright 2005-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2016, Jessica Hamilton, jessica.l.hamilton@gmail.com.
* Distributed under the terms of the MIT License.
*/
#include "accelerant_protos.h"
#include "accelerant.h"
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <syslog.h>
#include <AutoDeleterOS.h>
#define TRACE_ACCELERANT
#ifdef TRACE_ACCELERANT
extern "C" void _sPrintf(const char *format, ...);
# define TRACE(x) _sPrintf x
#else
# define TRACE(x) ;
#endif
struct accelerant_info *gInfo;
// #pragma mark -
/*! This is the common accelerant_info initializer. It is called by
both, the first accelerant and all clones.
*/
static status_t
init_common(int device, bool isClone)
{
// initialize global accelerant info structure
gInfo = (accelerant_info *)malloc(sizeof(accelerant_info));
MemoryDeleter infoDeleter(gInfo);
if (gInfo == NULL)
return B_NO_MEMORY;
memset(gInfo, 0, sizeof(accelerant_info));
gInfo->is_clone = isClone;
gInfo->device = device;
gInfo->current_mode = UINT16_MAX;
// get basic info from driver
area_id sharedArea;
if (ioctl(device, VIRTIO_GPU_GET_PRIVATE_DATA, &sharedArea, sizeof(area_id)) != 0)
return B_ERROR;
AreaDeleter sharedDeleter(clone_area("virtio_gpu shared info",
(void **)&gInfo->shared_info, B_ANY_ADDRESS,
B_READ_AREA | B_WRITE_AREA, sharedArea));
status_t status = gInfo->shared_info_area = sharedDeleter.Get();
if (status < B_OK)
return status;
infoDeleter.Detach();
sharedDeleter.Detach();
return B_OK;
}
/*! Cleans up everything done by a successful init_common(). */
static void
uninit_common(void)
{
delete_area(gInfo->shared_info_area);
gInfo->shared_info_area = -1;
gInfo->shared_info = NULL;
// close the file handle ONLY if we're the clone
// (this is what Be tells us ;)
if (gInfo->is_clone)
close(gInfo->device);
free(gInfo);
}
// #pragma mark - public accelerant functions
/*! Init primary accelerant */
status_t
virtio_gpu_init_accelerant(int device)
{
TRACE(("virtio_gpu_init_accelerant()\n"));
status_t status = init_common(device, false);
if (status != B_OK)
return status;
status = create_mode_list();
if (status != B_OK) {
uninit_common();
return status;
}
return B_OK;
}
ssize_t
virtio_gpu_accelerant_clone_info_size(void)
{
// clone info is device name, so return its maximum size
return B_PATH_NAME_LENGTH;
}
void
virtio_gpu_get_accelerant_clone_info(void *info)
{
ioctl(gInfo->device, VIRTIO_GPU_GET_DEVICE_NAME, info, B_PATH_NAME_LENGTH);
}
status_t
virtio_gpu_clone_accelerant(void *info)
{
TRACE(("virtio_gpu_clone_accelerant()\n"));
// create full device name
char path[MAXPATHLEN];
strcpy(path, "/dev/");
strcat(path, (const char *)info);
int fd = open(path, B_READ_WRITE);
if (fd < 0)
return errno;
status_t status = init_common(fd, true);
if (status != B_OK)
goto err1;
// get read-only clone of supported display modes
status = gInfo->mode_list_area = clone_area(
"virtio_gpu cloned modes", (void **)&gInfo->mode_list,
B_ANY_ADDRESS, B_READ_AREA, gInfo->shared_info->mode_list_area);
if (status < B_OK)
goto err2;
return B_OK;
err2:
uninit_common();
err1:
close(fd);
return status;
}
/*! This function is called for both, the primary accelerant and all of
its clones.
*/
void
virtio_gpu_uninit_accelerant(void)
{
TRACE(("virtio_gpu_uninit_accelerant()\n"));
// delete accelerant instance data
delete_area(gInfo->mode_list_area);
gInfo->mode_list = NULL;
uninit_common();
}
status_t
virtio_gpu_get_accelerant_device_info(accelerant_device_info *info)
{
info->version = B_ACCELERANT_VERSION;
strcpy(info->name, "VirtioGpu Driver");
strcpy(info->chipset, "Virtio");
// ToDo: provide some more insight here...
strcpy(info->serial_no, "None");
#if 0
info->memory = ???
info->dac_speed = ???
#endif
return B_OK;
}
sem_id
virtio_gpu_accelerant_retrace_semaphore()
{
return -1;
}

View File

@ -0,0 +1,30 @@
/*
* Copyright 2005-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2016, Jessica Hamilton, jessica.l.hamilton@gmail.com.
* Distributed under the terms of the MIT License.
*/
#ifndef VIRTIO_GPU_ACCELERANT_H
#define VIRTIO_GPU_ACCELERANT_H
#include "virtio_info.h"
typedef struct accelerant_info {
int device;
bool is_clone;
area_id shared_info_area;
virtio_gpu_shared_info* shared_info;
area_id mode_list_area;
// cloned list of standard display modes
display_mode *mode_list;
uint16 current_mode;
} accelerant_info;
extern accelerant_info *gInfo;
extern status_t create_mode_list(void);
#endif /* VIRTIO_GPU_ACCELERANT_H */

View File

@ -0,0 +1,42 @@
/*
* Copyright 2005-2008, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2016, Jessica Hamilton, jessica.l.hamilton@gmail.com.
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef _ACCELERANT_PROTOS_H
#define _ACCELERANT_PROTOS_H
#include <Accelerant.h>
#include "video_overlay.h"
#ifdef __cplusplus
extern "C" {
#endif
// general
status_t virtio_gpu_init_accelerant(int fd);
ssize_t virtio_gpu_accelerant_clone_info_size(void);
void virtio_gpu_get_accelerant_clone_info(void *data);
status_t virtio_gpu_clone_accelerant(void *data);
void virtio_gpu_uninit_accelerant(void);
status_t virtio_gpu_get_accelerant_device_info(accelerant_device_info *adi);
sem_id virtio_gpu_accelerant_retrace_semaphore(void);
// modes & constraints
uint32 virtio_gpu_accelerant_mode_count(void);
status_t virtio_gpu_get_mode_list(display_mode *dm);
status_t virtio_gpu_get_preferred_mode(display_mode *mode);
status_t virtio_gpu_set_display_mode(display_mode *modeToSet);
status_t virtio_gpu_get_display_mode(display_mode *currentMode);
status_t virtio_gpu_get_edid_info(void *info, size_t size, uint32 *_version);
status_t virtio_gpu_get_frame_buffer_config(frame_buffer_config *config);
status_t virtio_gpu_get_pixel_clock_limits(display_mode *dm, uint32 *low,
uint32 *high);
#ifdef __cplusplus
}
#endif
#endif /* _ACCELERANT_PROTOS_H */

View File

@ -0,0 +1,55 @@
/*
* Copyright 2005-2012, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2016, Jessica Hamilton, jessica.l.hamilton@gmail.com
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include "accelerant_protos.h"
#include "accelerant.h"
#include <new>
extern "C" void*
get_accelerant_hook(uint32 feature, void* data)
{
switch (feature) {
/* general */
case B_INIT_ACCELERANT:
return (void*)virtio_gpu_init_accelerant;
case B_UNINIT_ACCELERANT:
return (void*)virtio_gpu_uninit_accelerant;
case B_CLONE_ACCELERANT:
return (void*)virtio_gpu_clone_accelerant;
case B_ACCELERANT_CLONE_INFO_SIZE:
return (void*)virtio_gpu_accelerant_clone_info_size;
case B_GET_ACCELERANT_CLONE_INFO:
return (void*)virtio_gpu_get_accelerant_clone_info;
case B_GET_ACCELERANT_DEVICE_INFO:
return (void*)virtio_gpu_get_accelerant_device_info;
case B_ACCELERANT_RETRACE_SEMAPHORE:
return (void*)virtio_gpu_accelerant_retrace_semaphore;
/* mode configuration */
case B_ACCELERANT_MODE_COUNT:
return (void*)virtio_gpu_accelerant_mode_count;
case B_GET_MODE_LIST:
return (void*)virtio_gpu_get_mode_list;
case B_GET_PREFERRED_DISPLAY_MODE:
return (void*)virtio_gpu_get_preferred_mode;
case B_SET_DISPLAY_MODE:
return (void*)virtio_gpu_set_display_mode;
case B_GET_DISPLAY_MODE:
return (void*)virtio_gpu_get_display_mode;
case B_GET_EDID_INFO:
return (void*)virtio_gpu_get_edid_info;
case B_GET_FRAME_BUFFER_CONFIG:
return (void*)virtio_gpu_get_frame_buffer_config;
case B_GET_PIXEL_CLOCK_LIMITS:
return (void*)virtio_gpu_get_pixel_clock_limits;
}
return NULL;
}

View File

@ -0,0 +1,184 @@
/*
* Copyright 2005-2015, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2016, Jessica Hamilton, jessica.l.hamilton@gmail.com.
* Distributed under the terms of the MIT License.
*/
#include <stdlib.h>
#include <string.h>
#include <compute_display_timing.h>
#include <create_display_modes.h>
#include "accelerant_protos.h"
#include "accelerant.h"
#define TRACE_MODE
#ifdef TRACE_MODE
extern "C" void _sPrintf(const char* format, ...);
# define TRACE(x) _sPrintf x
#else
# define TRACE(x) ;
#endif
bool
operator==(const display_mode &lhs, const display_mode &rhs)
{
return lhs.space == rhs.space
&& lhs.virtual_width == rhs.virtual_width
&& lhs.virtual_height == rhs.virtual_height
&& lhs.h_display_start == rhs.h_display_start
&& lhs.v_display_start == rhs.v_display_start;
}
/*! Checks if the specified \a mode can be set using VESA. */
static bool
is_mode_supported(display_mode* mode)
{
return (mode != NULL) && (*mode == gInfo->shared_info->current_mode);
}
/*! Creates the initial mode list of the primary accelerant.
It's called from vesa_init_accelerant().
*/
status_t
create_mode_list(void)
{
const color_space colorspace[] = {
(color_space)gInfo->shared_info->current_mode.space
};
if (!gInfo->shared_info->has_edid) {
display_mode mode = gInfo->shared_info->current_mode;
compute_display_timing(mode.virtual_width, mode.virtual_height, 60, false,
&mode.timing);
fill_display_mode(mode.virtual_width, mode.virtual_height, &mode);
gInfo->mode_list_area = create_display_modes("virtio_gpu modes",
NULL, &mode, 1, colorspace, 1, is_mode_supported, &gInfo->mode_list,
&gInfo->shared_info->mode_count);
} else {
edid1_info edidInfo;
edid_decode(&edidInfo, &gInfo->shared_info->edid_raw);
gInfo->mode_list_area = create_display_modes("virtio_gpu modes",
&edidInfo, NULL, 0, colorspace, 1, NULL, &gInfo->mode_list,
&gInfo->shared_info->mode_count);
}
if (gInfo->mode_list_area < 0)
return gInfo->mode_list_area;
gInfo->shared_info->mode_list_area = gInfo->mode_list_area;
return B_OK;
}
// #pragma mark -
uint32
virtio_gpu_accelerant_mode_count(void)
{
TRACE(("virtio_gpu_accelerant_mode_count() = %d\n",
gInfo->shared_info->mode_count));
return gInfo->shared_info->mode_count;
}
status_t
virtio_gpu_get_mode_list(display_mode* modeList)
{
TRACE(("virtio_gpu_get_mode_info()\n"));
memcpy(modeList, gInfo->mode_list,
gInfo->shared_info->mode_count * sizeof(display_mode));
return B_OK;
}
status_t
virtio_gpu_get_preferred_mode(display_mode* _mode)
{
TRACE(("virtio_gpu_get_preferred_mode()\n"));
*_mode = gInfo->shared_info->current_mode;
return B_OK;
}
status_t
virtio_gpu_set_display_mode(display_mode* _mode)
{
TRACE(("virtio_gpu_set_display_mode()\n"));
if (_mode != NULL && *_mode == gInfo->shared_info->current_mode)
return B_OK;
return ioctl(gInfo->device, VIRTIO_GPU_SET_DISPLAY_MODE,
_mode, sizeof(display_mode));
}
status_t
virtio_gpu_get_display_mode(display_mode* _currentMode)
{
TRACE(("virtio_gpu_get_display_mode()\n"));
*_currentMode = gInfo->shared_info->current_mode;
return B_OK;
}
status_t
virtio_gpu_get_edid_info(void* info, size_t size, uint32* _version)
{
TRACE(("virtio_gpu_get_edid_info()\n"));
if (!gInfo->shared_info->has_edid)
return B_ERROR;
if (size < sizeof(struct edid1_info))
return B_BUFFER_OVERFLOW;
edid_decode((edid1_info*)info, &gInfo->shared_info->edid_raw);
*_version = EDID_VERSION_1;
edid_dump((edid1_info*)info);
return B_OK;
}
status_t
virtio_gpu_get_frame_buffer_config(frame_buffer_config* config)
{
TRACE(("virtio_gpu_get_frame_buffer_config()\n"));
config->frame_buffer = gInfo->shared_info->frame_buffer;
TRACE(("virtio_gpu_get_frame_buffer_config() = %" B_PRIxADDR "\n",
config->frame_buffer));
//config->frame_buffer_dma = gInfo->shared_info->physical_frame_buffer;
config->bytes_per_row = gInfo->shared_info->bytes_per_row;
TRACE(("virtio_gpu_get_frame_buffer_config() %p\n", config->frame_buffer));
return B_OK;
}
status_t
virtio_gpu_get_pixel_clock_limits(display_mode* mode, uint32* _low, uint32* _high)
{
TRACE(("virtio_gpu_get_pixel_clock_limits()\n"));
// TODO: do some real stuff here (taken from radeon driver)
uint32 totalPixel = (uint32)mode->timing.h_total
* (uint32)mode->timing.v_total;
uint32 clockLimit = 2000000;
// lower limit of about 48Hz vertical refresh
*_low = totalPixel * 48L / 1000L;
if (*_low > clockLimit)
return B_ERROR;
*_high = clockLimit;
return B_OK;
}

View File

@ -16,3 +16,5 @@ SubInclude HAIKU_TOP src add-ons kernel drivers graphics s3 ;
SubInclude HAIKU_TOP src add-ons kernel drivers graphics skeleton ;
SubInclude HAIKU_TOP src add-ons kernel drivers graphics vesa ;
SubInclude HAIKU_TOP src add-ons kernel drivers graphics via ;
SubInclude HAIKU_TOP src add-ons kernel drivers graphics virtio ;

View File

@ -0,0 +1,10 @@
SubDir HAIKU_TOP src add-ons kernel drivers graphics virtio ;
UsePrivateKernelHeaders ;
UsePrivateHeaders [ FDirName graphics common ] ;
UsePrivateHeaders [ FDirName graphics virtio ] ;
UsePrivateHeaders drivers graphics virtio ;
KernelAddon virtio_gpu :
virtio_gpu.cpp
;

View File

@ -0,0 +1,450 @@
/*
* Virtio GPU Device
*
* Copyright Red Hat, Inc. 2013-2014
*
* Authors:
* Dave Airlie <airlied@redhat.com>
* Gerd Hoffmann <kraxel@redhat.com>
*
* This header is BSD licensed so anyone can use the definitions
* to implement compatible drivers/servers:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef VIRTIO_GPU_HW_H
#define VIRTIO_GPU_HW_H
#include <sys/types.h>
#define __u8 uint8_t
#define __u32 uint32_t
#define __le16 uint16_t
#define __le32 uint32_t
#define __le64 uint64_t
/*
* VIRTIO_GPU_CMD_CTX_*
* VIRTIO_GPU_CMD_*_3D
*/
#define VIRTIO_GPU_F_VIRGL (1ULL << 0)
/*
* VIRTIO_GPU_CMD_GET_EDID
*/
#define VIRTIO_GPU_F_EDID (1ULL << 1)
/*
* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
*/
#define VIRTIO_GPU_F_RESOURCE_UUID (1ULL << 2)
/*
* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
*/
#define VIRTIO_GPU_F_RESOURCE_BLOB (1ULL << 3)
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
/* 2d commands */
VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
VIRTIO_GPU_CMD_RESOURCE_UNREF,
VIRTIO_GPU_CMD_SET_SCANOUT,
VIRTIO_GPU_CMD_RESOURCE_FLUSH,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
VIRTIO_GPU_CMD_CTX_DESTROY,
VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE,
VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE,
VIRTIO_GPU_CMD_RESOURCE_CREATE_3D,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
VIRTIO_GPU_CMD_MOVE_CURSOR,
/* success responses */
VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
enum virtio_gpu_shm_id {
VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
/*
* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
*/
VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
};
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
struct virtio_gpu_ctrl_hdr {
__le32 type;
__le32 flags;
__le64 fence_id;
__le32 ctx_id;
__le32 padding;
} _PACKED;
/* data passed in the cursor vq */
struct virtio_gpu_cursor_pos {
__le32 scanout_id;
__le32 x;
__le32 y;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
struct virtio_gpu_update_cursor {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_cursor_pos pos; /* update & move */
__le32 resource_id; /* update only */
__le32 hot_x; /* update only */
__le32 hot_y; /* update only */
__le32 padding;
} _PACKED;
/* data passed in the control vq, 2d related */
struct virtio_gpu_rect {
__le32 x;
__le32 y;
__le32 width;
__le32 height;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
struct virtio_gpu_resource_unref {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
struct virtio_gpu_resource_create_2d {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 format;
__le32 width;
__le32 height;
} _PACKED;
/* VIRTIO_GPU_CMD_SET_SCANOUT */
struct virtio_gpu_set_scanout {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
__le32 scanout_id;
__le32 resource_id;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
struct virtio_gpu_resource_flush {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
__le32 resource_id;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
struct virtio_gpu_transfer_to_host_2d {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
__le64 offset;
__le32 resource_id;
__le32 padding;
} _PACKED;
struct virtio_gpu_mem_entry {
__le64 addr;
__le32 length;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
struct virtio_gpu_resource_attach_backing {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 nr_entries;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
struct virtio_gpu_resource_detach_backing {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
#define VIRTIO_GPU_MAX_SCANOUTS 16
struct virtio_gpu_resp_display_info {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_display_one {
struct virtio_gpu_rect r;
__le32 enabled;
__le32 flags;
} pmodes[VIRTIO_GPU_MAX_SCANOUTS];
} _PACKED;
/* data passed in the control vq, 3d related */
struct virtio_gpu_box {
__le32 x, y, z;
__le32 w, h, d;
} _PACKED;
/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */
struct virtio_gpu_transfer_host_3d {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_box box;
__le64 offset;
__le32 resource_id;
__le32 level;
__le32 stride;
__le32 layer_stride;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */
#define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0)
struct virtio_gpu_resource_create_3d {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 target;
__le32 format;
__le32 bind;
__le32 width;
__le32 height;
__le32 depth;
__le32 array_size;
__le32 last_level;
__le32 nr_samples;
__le32 flags;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_CTX_CREATE */
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
__le32 nlen;
__le32 padding;
char debug_name[64];
} _PACKED;
/* VIRTIO_GPU_CMD_CTX_DESTROY */
struct virtio_gpu_ctx_destroy {
struct virtio_gpu_ctrl_hdr hdr;
} _PACKED;
/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */
struct virtio_gpu_ctx_resource {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_SUBMIT_3D */
struct virtio_gpu_cmd_submit {
struct virtio_gpu_ctrl_hdr hdr;
__le32 size;
__le32 padding;
} _PACKED;
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
struct virtio_gpu_ctrl_hdr hdr;
__le32 capset_index;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */
struct virtio_gpu_resp_capset_info {
struct virtio_gpu_ctrl_hdr hdr;
__le32 capset_id;
__le32 capset_max_version;
__le32 capset_max_size;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_GET_CAPSET */
struct virtio_gpu_get_capset {
struct virtio_gpu_ctrl_hdr hdr;
__le32 capset_id;
__le32 capset_version;
} _PACKED;
/* VIRTIO_GPU_RESP_OK_CAPSET */
struct virtio_gpu_resp_capset {
struct virtio_gpu_ctrl_hdr hdr;
__u8 capset_data[];
} _PACKED;
/* VIRTIO_GPU_CMD_GET_EDID */
struct virtio_gpu_cmd_get_edid {
struct virtio_gpu_ctrl_hdr hdr;
__le32 scanout;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_RESP_OK_EDID */
struct virtio_gpu_resp_edid {
struct virtio_gpu_ctrl_hdr hdr;
__le32 size;
__le32 padding;
__u8 edid[1024];
} _PACKED;
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
__le32 events_read;
__le32 events_clear;
__le32 num_scanouts;
__le32 num_capsets;
} _PACKED;
/* simple formats for fbcon/X use */
enum virtio_gpu_formats {
VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2,
VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3,
VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4,
VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68,
VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
struct virtio_gpu_resource_assign_uuid {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 padding;
} _PACKED;
/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */
struct virtio_gpu_resp_resource_uuid {
struct virtio_gpu_ctrl_hdr hdr;
__u8 uuid[16];
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
struct virtio_gpu_resource_create_blob {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
/* zero is invalid blob mem */
__le32 blob_mem;
__le32 blob_flags;
__le32 nr_entries;
__le64 blob_id;
__le64 size;
/*
* sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
*/
} _PACKED;
/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
struct virtio_gpu_set_scanout_blob {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
__le32 scanout_id;
__le32 resource_id;
__le32 width;
__le32 height;
__le32 format;
__le32 padding;
__le32 strides[4];
__le32 offsets[4];
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
struct virtio_gpu_resource_map_blob {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 padding;
__le64 offset;
} _PACKED;
/* VIRTIO_GPU_RESP_OK_MAP_INFO */
#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
#define VIRTIO_GPU_MAP_CACHE_WC 0x03
struct virtio_gpu_resp_map_info {
struct virtio_gpu_ctrl_hdr hdr;
__u32 map_info;
__u32 padding;
} _PACKED;
/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
struct virtio_gpu_resource_unmap_blob {
struct virtio_gpu_ctrl_hdr hdr;
__le32 resource_id;
__le32 padding;
} _PACKED;
#endif

View File

@ -0,0 +1,896 @@
/*
* Copyright 2023, Jérôme Duval, jerome.duval@gmail.com.
* Distributed under the terms of the MIT License.
*/
#include <new>
#include <graphic_driver.h>
#include <lock.h>
#include <virtio.h>
#include <virtio_info.h>
#include <util/AutoLock.h>
#include "viogpu.h"
#define VIRTIO_GPU_DRIVER_MODULE_NAME "drivers/graphics/virtio_gpu/driver_v1"
#define VIRTIO_GPU_DEVICE_MODULE_NAME "drivers/graphics/virtio_gpu/device_v1"
#define VIRTIO_GPU_DEVICE_ID_GENERATOR "virtio_gpu/device_id"
typedef struct {
device_node* node;
::virtio_device virtio_device;
virtio_device_interface* virtio;
uint64 features;
::virtio_queue controlQueue;
mutex commandLock;
area_id commandArea;
addr_t commandBuffer;
phys_addr_t commandPhysAddr;
sem_id commandDone;
uint64 fenceId;
::virtio_queue cursorQueue;
int displayResourceId;
uint32 framebufferWidth;
uint32 framebufferHeight;
area_id framebufferArea;
addr_t framebuffer;
size_t framebufferSize;
uint32 displayWidth;
uint32 displayHeight;
thread_id updateThread;
bool updateThreadRunning;
area_id sharedArea;
virtio_gpu_shared_info* sharedInfo;
} virtio_gpu_driver_info;
typedef struct {
virtio_gpu_driver_info* info;
} virtio_gpu_handle;
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <fs/devfs.h>
#define ROUND_TO_PAGE_SIZE(x) (((x) + (B_PAGE_SIZE) - 1) & ~((B_PAGE_SIZE) - 1))
#define DEVICE_NAME "virtio_gpu"
#define ACCELERANT_NAME "virtio_gpu.accelerant"
#define TRACE_VIRTIO_GPU
#ifdef TRACE_VIRTIO_GPU
# define TRACE(x...) dprintf(DEVICE_NAME ": " x)
#else
# define TRACE(x...) ;
#endif
#define ERROR(x...) dprintf("\33[33m" DEVICE_NAME ":\33[0m " x)
#define CALLED() TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
static device_manager_info* sDeviceManager;
static void virtio_gpu_vqwait(void* driverCookie, void* cookie);
const char*
get_feature_name(uint64 feature)
{
switch (feature) {
case VIRTIO_GPU_F_VIRGL:
return "virgl";
case VIRTIO_GPU_F_EDID:
return "edid";
case VIRTIO_GPU_F_RESOURCE_UUID:
return "res_uuid";
case VIRTIO_GPU_F_RESOURCE_BLOB:
return "res_blob";
}
return NULL;
}
static status_t
virtio_gpu_drain_queues(virtio_gpu_driver_info* info)
{
while (info->virtio->queue_dequeue(info->controlQueue, NULL, NULL))
;
while (info->virtio->queue_dequeue(info->cursorQueue, NULL, NULL))
;
return B_OK;
}
status_t
virtio_gpu_send_cmd(virtio_gpu_driver_info* info, void *cmd, size_t cmdSize, void *response,
size_t responseSize)
{
struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *)info->commandBuffer;
struct virtio_gpu_ctrl_hdr *responseHdr = (struct virtio_gpu_ctrl_hdr *)response;
memcpy((void*)info->commandBuffer, cmd, cmdSize);
memset((void*)(info->commandBuffer + cmdSize), 0, responseSize);
hdr->flags |= VIRTIO_GPU_FLAG_FENCE;
hdr->fence_id = ++info->fenceId;
physical_entry entries[] {
{ info->commandPhysAddr, cmdSize },
{ info->commandPhysAddr + cmdSize, responseSize },
};
if (!info->virtio->queue_is_empty(info->controlQueue))
return B_ERROR;
status_t status = info->virtio->queue_request_v(info->controlQueue, entries, 1, 1, NULL);
if (status != B_OK)
return status;
acquire_sem(info->commandDone);
while (!info->virtio->queue_dequeue(info->controlQueue, NULL, NULL))
spin(10);
memcpy(response, (void*)(info->commandBuffer + cmdSize), responseSize);
if (responseHdr->fence_id != info->fenceId) {
ERROR("response fence id not right\n");
}
return B_OK;
}
status_t
virtio_gpu_get_display_info(virtio_gpu_driver_info* info)
{
CALLED();
struct virtio_gpu_ctrl_hdr hdr = {};
struct virtio_gpu_resp_display_info displayInfo = {};
hdr.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
virtio_gpu_send_cmd(info, &hdr, sizeof(hdr), &displayInfo, sizeof(displayInfo));
if (displayInfo.hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
ERROR("failed getting display info\n");
return B_ERROR;
}
if (!displayInfo.pmodes[0].enabled) {
ERROR("pmodes[0] is not enabled\n");
return B_BAD_VALUE;
}
info->displayWidth = displayInfo.pmodes[0].r.width;
info->displayHeight = displayInfo.pmodes[0].r.height;
TRACE("virtio_gpu_get_display_info width %" B_PRIu32 " height %" B_PRIu32 "\n",
info->displayWidth, info->displayHeight);
return B_OK;
}
status_t
virtio_gpu_get_edids(virtio_gpu_driver_info* info, int scanout)
{
CALLED();
struct virtio_gpu_cmd_get_edid getEdid = {};
struct virtio_gpu_resp_edid response = {};
getEdid.hdr.type = VIRTIO_GPU_CMD_GET_EDID;
getEdid.scanout = scanout;
virtio_gpu_send_cmd(info, &getEdid, sizeof(getEdid), &response, sizeof(response));
if (response.hdr.type != VIRTIO_GPU_RESP_OK_EDID) {
ERROR("failed getting edids %d\n", response.hdr.type);
return B_ERROR;
}
info->sharedInfo->has_edid = true;
memcpy(&info->sharedInfo->edid_raw, response.edid, sizeof(edid1_raw));
TRACE("virtio_gpu_get_edids success\n");
return B_OK;
}
status_t
virtio_gpu_create_2d(virtio_gpu_driver_info* info, int resourceId, int width, int height)
{
CALLED();
struct virtio_gpu_resource_create_2d resource = {};
struct virtio_gpu_ctrl_hdr response = {};
resource.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
resource.resource_id = resourceId;
resource.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
resource.width = width;
resource.height = height;
virtio_gpu_send_cmd(info, &resource, sizeof(resource), &response, sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("viogpu_create_2d: failed %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_gpu_unref(virtio_gpu_driver_info* info, int resourceId)
{
CALLED();
struct virtio_gpu_resource_unref resource = {};
struct virtio_gpu_ctrl_hdr response = {};
resource.hdr.type = VIRTIO_GPU_CMD_RESOURCE_UNREF;
resource.resource_id = resourceId;
virtio_gpu_send_cmd(info, &resource, sizeof(resource), &response, sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("virtio_gpu_unref: failed %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_gpu_attach_backing(virtio_gpu_driver_info* info, int resourceId)
{
CALLED();
struct virtio_gpu_resource_attach_backing_entries {
struct virtio_gpu_resource_attach_backing backing;
struct virtio_gpu_mem_entry entries[16];
} _PACKED backing = {};
struct virtio_gpu_ctrl_hdr response = {};
physical_entry entries[16] = {};
status_t status = get_memory_map((void*)info->framebuffer, info->framebufferSize, entries, 16);
if (status != B_OK) {
ERROR("virtio_gpu_attach_backing get_memory_map failed: %s\n", strerror(status));
return status;
}
backing.backing.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
backing.backing.resource_id = resourceId;
for (int i = 0; i < 16; i++) {
if (entries[i].size == 0)
break;
TRACE("virtio_gpu_attach_backing %d %lx %lx\n", i, entries[i].address, entries[i].size);
backing.entries[i].addr = entries[i].address;
backing.entries[i].length = entries[i].size;
backing.backing.nr_entries++;
}
virtio_gpu_send_cmd(info, &backing, sizeof(backing), &response, sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("virtio_gpu_attach_backing failed: %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_gpu_detach_backing(virtio_gpu_driver_info* info, int resourceId)
{
CALLED();
struct virtio_gpu_resource_detach_backing backing;
struct virtio_gpu_ctrl_hdr response = {};
backing.hdr.type = VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING;
backing.resource_id = resourceId;
virtio_gpu_send_cmd(info, &backing, sizeof(backing), &response, sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("virtio_gpu_detach_backing failed: %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_gpu_set_scanout(virtio_gpu_driver_info* info, int scanoutId, int resourceId,
uint32 width, uint32 height)
{
CALLED();
struct virtio_gpu_set_scanout set_scanout = {};
struct virtio_gpu_ctrl_hdr response = {};
set_scanout.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
set_scanout.scanout_id = scanoutId;
set_scanout.resource_id = resourceId;
set_scanout.r.width = width;
set_scanout.r.height = height;
virtio_gpu_send_cmd(info, &set_scanout, sizeof(set_scanout), &response, sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("virtio_gpu_set_scanout failed %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_gpu_transfer_to_host_2d(virtio_gpu_driver_info* info, int resourceId,
uint32 width, uint32 height)
{
struct virtio_gpu_transfer_to_host_2d transferToHost = {};
struct virtio_gpu_ctrl_hdr response = {};
transferToHost.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
transferToHost.resource_id = resourceId;
transferToHost.r.width = width;
transferToHost.r.height = height;
virtio_gpu_send_cmd(info, &transferToHost, sizeof(transferToHost), &response,
sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("virtio_gpu_transfer_to_host_2d failed %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_gpu_flush_resource(virtio_gpu_driver_info* info, int resourceId, uint32 width,
uint32 height)
{
struct virtio_gpu_resource_flush resourceFlush = {};
struct virtio_gpu_ctrl_hdr response = {};
resourceFlush.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
resourceFlush.resource_id = resourceId;
resourceFlush.r.width = width;
resourceFlush.r.height = height;
virtio_gpu_send_cmd(info, &resourceFlush, sizeof(resourceFlush), &response, sizeof(response));
if (response.type != VIRTIO_GPU_RESP_OK_NODATA) {
ERROR("virtio_gpu_flush_resource failed %d\n", response.type);
return B_ERROR;
}
return B_OK;
}
status_t
virtio_update_thread(void *arg)
{
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)arg;
while (info->updateThreadRunning) {
bigtime_t start = system_time();
MutexLocker commandLocker(&info->commandLock);
virtio_gpu_transfer_to_host_2d(info, info->displayResourceId, info->displayWidth,
info->displayHeight);
virtio_gpu_flush_resource(info, info->displayResourceId, info->displayWidth, info->displayHeight);
bigtime_t delay = system_time() - start;
if (delay < 20000)
snooze(20000 - delay);
}
return B_OK;
}
status_t
virtio_gpu_set_display_mode(virtio_gpu_driver_info* info, display_mode *mode)
{
CALLED();
int newResourceId = info->displayResourceId + 1;
// create framebuffer area
TRACE("virtio_gpu_set_display_mode %" B_PRIu32 " %" B_PRIu32 "\n", mode->virtual_width,
mode->virtual_height);
status_t status = virtio_gpu_create_2d(info, newResourceId, mode->virtual_width, mode->virtual_height);
if (status != B_OK)
return status;
status = virtio_gpu_attach_backing(info, newResourceId);
if (status != B_OK)
return status;
status = virtio_gpu_unref(info, info->displayResourceId);
if (status != B_OK)
return status;
info->displayResourceId = newResourceId;
info->displayWidth = mode->virtual_width;
info->displayHeight = mode->virtual_height;
status = virtio_gpu_set_scanout(info, 0, 0, 0, 0);
if (status != B_OK)
return status;
status = virtio_gpu_set_scanout(info, 0, info->displayResourceId, info->displayWidth, info->displayHeight);
if (status != B_OK)
return status;
status = virtio_gpu_transfer_to_host_2d(info, info->displayResourceId, info->displayWidth, info->displayHeight);
if (status != B_OK)
return status;
status = virtio_gpu_flush_resource(info, info->displayResourceId, info->displayWidth, info->displayHeight);
if (status != B_OK)
return status;
{
virtio_gpu_shared_info& sharedInfo = *info->sharedInfo;
sharedInfo.frame_buffer_area = info->framebufferArea;
sharedInfo.frame_buffer = (uint8*)info->framebuffer;
sharedInfo.bytes_per_row = info->displayWidth * 4;
sharedInfo.current_mode.virtual_width = info->displayWidth;
sharedInfo.current_mode.virtual_height = info->displayHeight;
sharedInfo.current_mode.space = B_RGB32;
}
return B_OK;
}
// #pragma mark - device module API
static status_t
virtio_gpu_init_device(void* _info, void** _cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)_info;
device_node* parent = sDeviceManager->get_parent_node(info->node);
sDeviceManager->get_driver(parent, (driver_module_info**)&info->virtio,
(void**)&info->virtio_device);
sDeviceManager->put_node(parent);
info->virtio->negotiate_features(info->virtio_device, VIRTIO_GPU_F_EDID,
&info->features, &get_feature_name);
// TODO read config
// Setup queues
::virtio_queue virtioQueues[2];
status_t status = info->virtio->alloc_queues(info->virtio_device, 2,
virtioQueues);
if (status != B_OK) {
ERROR("queue allocation failed (%s)\n", strerror(status));
return status;
}
info->controlQueue = virtioQueues[0];
info->cursorQueue = virtioQueues[1];
// create command buffer area
info->commandArea = create_area("virtiogpu command buffer", (void**)&info->commandBuffer,
B_ANY_KERNEL_BLOCK_ADDRESS, B_PAGE_SIZE,
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (info->commandArea < B_OK) {
status = info->commandArea;
goto err1;
}
physical_entry entry;
status = get_memory_map((void*)info->commandBuffer, B_PAGE_SIZE, &entry, 1);
if (status != B_OK)
goto err2;
info->commandPhysAddr = entry.address;
mutex_init(&info->commandLock, "virtiogpu command lock");
// Setup interrupt
status = info->virtio->setup_interrupt(info->virtio_device, NULL, info);
if (status != B_OK) {
ERROR("interrupt setup failed (%s)\n", strerror(status));
goto err3;
}
status = info->virtio->queue_setup_interrupt(info->controlQueue,
virtio_gpu_vqwait, info);
if (status != B_OK) {
ERROR("queue interrupt setup failed (%s)\n", strerror(status));
goto err3;
}
*_cookie = info;
return B_OK;
err3:
err2:
delete_area(info->commandArea);
err1:
return status;
}
static void
virtio_gpu_uninit_device(void* _cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)_cookie;
info->virtio->free_interrupts(info->virtio_device);
mutex_destroy(&info->commandLock);
delete_area(info->commandArea);
info->commandArea = -1;
info->virtio->free_queues(info->virtio_device);
}
static status_t
virtio_gpu_open(void* _info, const char* path, int openMode, void** _cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)_info;
status_t status;
size_t sharedSize = (sizeof(virtio_gpu_shared_info) + 7) & ~7;
MutexLocker commandLocker;
virtio_gpu_handle* handle = (virtio_gpu_handle*)malloc(
sizeof(virtio_gpu_handle));
if (handle == NULL)
return B_NO_MEMORY;
info->commandDone = create_sem(1, "virtio_gpu_command");
if (info->commandDone < B_OK)
goto error;
info->sharedArea = create_area("virtio_gpu shared info",
(void**)&info->sharedInfo, B_ANY_KERNEL_ADDRESS,
ROUND_TO_PAGE_SIZE(sharedSize), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA);
if (info->sharedArea < 0)
goto error;
memset(info->sharedInfo, 0, sizeof(virtio_gpu_shared_info));
commandLocker.SetTo(&info->commandLock, false, true);
status = virtio_gpu_get_display_info(info);
if (status != B_OK)
goto error;
if ((info->features & VIRTIO_GPU_F_EDID) != 0)
virtio_gpu_get_edids(info, 0);
// so we can fit every mode
info->framebufferWidth = 3840;
info->framebufferHeight = 2160;
// create framebuffer area
info->framebufferSize = 4 * info->framebufferWidth * info->framebufferHeight;
info->framebufferArea = create_area("virtio_gpu framebuffer", (void**)&info->framebuffer,
B_ANY_KERNEL_ADDRESS, info->framebufferSize,
B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
if (info->framebufferArea < B_OK) {
status = info->framebufferArea;
goto error;
}
info->displayResourceId = 1;
status = virtio_gpu_create_2d(info, info->displayResourceId, info->displayWidth,
info->displayHeight);
if (status != B_OK)
goto error;
status = virtio_gpu_attach_backing(info, info->displayResourceId);
if (status != B_OK)
goto error;
status = virtio_gpu_set_scanout(info, 0, info->displayResourceId, info->displayWidth,
info->displayHeight);
if (status != B_OK)
goto error;
{
virtio_gpu_shared_info& sharedInfo = *info->sharedInfo;
sharedInfo.frame_buffer_area = info->framebufferArea;
sharedInfo.frame_buffer = (uint8*)info->framebuffer;
sharedInfo.bytes_per_row = info->displayWidth * 4;
sharedInfo.current_mode.virtual_width = info->displayWidth;
sharedInfo.current_mode.virtual_height = info->displayHeight;
sharedInfo.current_mode.space = B_RGB32;
}
info->updateThreadRunning = true;
info->updateThread = spawn_kernel_thread(virtio_update_thread, "virtio_gpu update",
B_DISPLAY_PRIORITY, info);
if (info->updateThread < B_OK)
goto error;
resume_thread(info->updateThread);
handle->info = info;
*_cookie = handle;
return B_OK;
error:
delete_area(info->framebufferArea);
info->framebufferArea = -1;
delete_sem(info->commandDone);
info->commandDone = -1;
free(handle);
return B_ERROR;
}
static status_t
virtio_gpu_close(void* cookie)
{
virtio_gpu_handle* handle = (virtio_gpu_handle*)cookie;
CALLED();
virtio_gpu_driver_info* info = handle->info;
info->updateThreadRunning = false;
delete_sem(info->commandDone);
info->commandDone = -1;
return B_OK;
}
static status_t
virtio_gpu_free(void* cookie)
{
CALLED();
virtio_gpu_handle* handle = (virtio_gpu_handle*)cookie;
virtio_gpu_driver_info* info = handle->info;
int32 result;
wait_for_thread(info->updateThread, &result);
info->updateThread = -1;
virtio_gpu_drain_queues(info);
free(handle);
return B_OK;
}
static void
virtio_gpu_vqwait(void* driverCookie, void* cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)cookie;
release_sem_etc(info->commandDone, 1, B_DO_NOT_RESCHEDULE);
}
static status_t
virtio_gpu_read(void* cookie, off_t pos, void* buffer, size_t* _length)
{
*_length = 0;
return B_NOT_ALLOWED;
}
static status_t
virtio_gpu_write(void* cookie, off_t pos, const void* buffer,
size_t* _length)
{
*_length = 0;
return B_NOT_ALLOWED;
}
static status_t
virtio_gpu_ioctl(void* cookie, uint32 op, void* buffer, size_t length)
{
CALLED();
virtio_gpu_handle* handle = (virtio_gpu_handle*)cookie;
virtio_gpu_driver_info* info = handle->info;
// TRACE("ioctl(op = %lx)\n", op);
switch (op) {
case B_GET_ACCELERANT_SIGNATURE:
dprintf(DEVICE_NAME ": acc: %s\n", ACCELERANT_NAME);
if (user_strlcpy((char*)buffer, ACCELERANT_NAME,
B_FILE_NAME_LENGTH) < B_OK)
return B_BAD_ADDRESS;
return B_OK;
// needed to share data between kernel and accelerant
case VIRTIO_GPU_GET_PRIVATE_DATA:
return user_memcpy(buffer, &info->sharedArea, sizeof(area_id));
case VIRTIO_GPU_SET_DISPLAY_MODE:
{
if (length != sizeof(display_mode))
return B_BAD_VALUE;
display_mode mode;
if (user_memcpy(&mode, buffer, sizeof(display_mode)) != B_OK)
return B_BAD_ADDRESS;
MutexLocker commandLocker(&info->commandLock);
return virtio_gpu_set_display_mode(info, &mode);
}
default:
ERROR("ioctl: unknown message %" B_PRIx32 "\n", op);
break;
}
return B_DEV_INVALID_IOCTL;
}
// #pragma mark - driver module API
static float
virtio_gpu_supports_device(device_node* parent)
{
CALLED();
const char* bus;
uint16 deviceType;
// make sure parent is really the Virtio bus manager
if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false))
return -1;
if (strcmp(bus, "virtio"))
return 0.0;
// check whether it's really a Virtio GPU device
if (sDeviceManager->get_attr_uint16(parent, VIRTIO_DEVICE_TYPE_ITEM,
&deviceType, true) != B_OK || deviceType != VIRTIO_DEVICE_ID_GPU)
return 0.0;
TRACE("Virtio gpu device found!\n");
return 0.6;
}
static status_t
virtio_gpu_register_device(device_node* node)
{
CALLED();
device_attr attrs[] = {
{ B_DEVICE_PRETTY_NAME, B_STRING_TYPE, {.string = "Virtio GPU"} },
{ NULL }
};
return sDeviceManager->register_node(node, VIRTIO_GPU_DRIVER_MODULE_NAME,
attrs, NULL, NULL);
}
static status_t
virtio_gpu_init_driver(device_node* node, void** cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)malloc(
sizeof(virtio_gpu_driver_info));
if (info == NULL)
return B_NO_MEMORY;
memset(info, 0, sizeof(*info));
info->node = node;
*cookie = info;
return B_OK;
}
static void
virtio_gpu_uninit_driver(void* _cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)_cookie;
free(info);
}
static status_t
virtio_gpu_register_child_devices(void* _cookie)
{
CALLED();
virtio_gpu_driver_info* info = (virtio_gpu_driver_info*)_cookie;
status_t status;
int32 id = sDeviceManager->create_id(VIRTIO_GPU_DEVICE_ID_GENERATOR);
if (id < 0)
return id;
char name[64];
snprintf(name, sizeof(name), "graphics/virtio/%" B_PRId32,
id);
status = sDeviceManager->publish_device(info->node, name,
VIRTIO_GPU_DEVICE_MODULE_NAME);
return status;
}
// #pragma mark -
module_dependency module_dependencies[] = {
{B_DEVICE_MANAGER_MODULE_NAME, (module_info**)&sDeviceManager},
{}
};
struct device_module_info sVirtioGpuDevice = {
{
VIRTIO_GPU_DEVICE_MODULE_NAME,
0,
NULL
},
virtio_gpu_init_device,
virtio_gpu_uninit_device,
NULL, // remove,
virtio_gpu_open,
virtio_gpu_close,
virtio_gpu_free,
virtio_gpu_read,
virtio_gpu_write,
NULL, // io
virtio_gpu_ioctl,
NULL, // select
NULL, // deselect
};
struct driver_module_info sVirtioGpuDriver = {
{
VIRTIO_GPU_DRIVER_MODULE_NAME,
0,
NULL
},
virtio_gpu_supports_device,
virtio_gpu_register_device,
virtio_gpu_init_driver,
virtio_gpu_uninit_driver,
virtio_gpu_register_child_devices,
NULL, // rescan
NULL, // removed
};
module_info* modules[] = {
(module_info*)&sVirtioGpuDriver,
(module_info*)&sVirtioGpuDevice,
NULL
};