Commit bcb53e57 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'staging-4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

Pull staging driver fixes from Greg KH:
 "Here are some small staging driver fixes for reported issues for
  4.13-rc2.

  Also in here is a new driver, the virtualbox DRM driver. It's
  stand-alone and got acks from the DRM developers to go in through this
  tree. It's a new thing, but it should be fine for this point in the rc
  cycle due to it being independent.

  All of this has been in linux-next for a while with no reported
  issues"

* tag 'staging-4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging: rtl8188eu: add TL-WN722N v2 support
  staging: speakup: safely register and unregister ldisc
  staging: speakup: add functions to register and unregister ldisc
  staging: speakup: safely close tty
  staging: sm750fb: avoid conflicting vesafb
  staging: lustre: ko2iblnd: check copy_from_iter/copy_to_iter return code
  staging: vboxvideo: Add vboxvideo to drivers/staging
  staging: sm750fb: fixed a assignment typo
  staging: rtl8188eu: memory leak in rtw_free_cmd_obj()
  staging: vchiq_arm: fix error codes in probe
  staging: comedi: ni_mio_common: fix AO timer off-by-one regression
parents 82abbea7 5a1d4c5d
......@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
source "drivers/staging/typec/Kconfig"
source "drivers/staging/vboxvideo/Kconfig"
endif # STAGING
......@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
......@@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
/* following line: 2-1 per STC */
ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
/* following line: N-1 per STC */
ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
} else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
......
......@@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
&from);
if (rc != payload_nob) {
kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
return -EFAULT;
}
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
......@@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
break;
}
copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
IBLND_MSG_SIZE, to);
rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
to);
if (rc != rlen) {
rc = -EFAULT;
break;
}
rc = 0;
lnet_finalize(ni, lntmsg, 0);
break;
......
......@@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
kfree(pcmd->parmbuf);
}
if (!pcmd->rsp) {
if (pcmd->rsp) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
kfree(pcmd->rsp);
......
......@@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{} /* Terminating entry */
};
......
......@@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void)
pll_reg = peek32(MXCLK_PLL_CTRL);
M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT;
N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT;
OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
......
......@@ -1053,6 +1053,26 @@ static int sm750fb_frambuffer_alloc(struct sm750_dev *sm750_dev, int fbidx)
return err;
}
static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
if (!ap)
return -ENOMEM;
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
kfree(ap);
return 0;
}
static int lynxfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
......@@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
int fbidx;
int err;
err = lynxfb_kick_out_firmware_fb(pdev);
if (err)
return err;
/* enable device */
err = pcim_enable_device(pdev);
if (err)
......
......@@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void)
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
spk_ttyio_unregister_ldisc();
speakup_kobj_exit();
......@@ -2376,6 +2377,7 @@ static int __init speakup_init(void)
if (err)
goto error_kobjects;
spk_ttyio_register_ldisc();
synth_init(synth_name);
speakup_register_devsynth();
/*
......
......@@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void);
int spk_wait_for_xmitr(struct spk_synth *in_synth);
void spk_serial_release(void);
void spk_ttyio_release(void);
void spk_ttyio_register_ldisc(void);
void spk_ttyio_unregister_ldisc(void);
void synth_buffer_skip_nonlatin1(void);
u16 synth_buffer_getc(void);
......
......@@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
struct ktermios tmp_termios;
dev_t dev;
ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
if (ret) {
pr_err("Error registering line discipline.\n");
return ret;
}
ret = get_dev_to_use(synth, &dev);
if (ret)
return ret;
......@@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
tty_unlock(tty);
ret = tty_set_ldisc(tty, N_SPEAKUP);
if (ret)
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
return ret;
}
void spk_ttyio_register_ldisc(void)
{
if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
}
void spk_ttyio_unregister_ldisc(void)
{
if (tty_unregister_ldisc(N_SPEAKUP))
pr_warn("speakup: Couldn't unregister ldisc\n");
}
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
......@@ -300,7 +308,7 @@ void spk_ttyio_release(void)
tty_ldisc_flush(speakup_tty);
tty_unlock(speakup_tty);
tty_ldisc_release(speakup_tty);
tty_release_struct(speakup_tty, speakup_tty->index);
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
......
config DRM_VBOXVIDEO
tristate "Virtual Box Graphics Card"
depends on DRM && X86 && PCI
select DRM_KMS_HELPER
help
This is a KMS driver for the virtual Graphics Card used in
Virtual Box virtual machines.
Although it is possible to builtin this module, it is advised
to build this driver as a module, so that it can be updated
independently of the kernel. Select M to built this driver as a
module and add support for these devices via drm/kms interfaces.
ccflags-y := -Iinclude/drm
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
vbox_mode.o vbox_prime.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
TODO:
-Move the driver over to the atomic API
-Stop using old load / unload drm_driver hooks
-Get a full review from the drm-maintainers on dri-devel done on this driver
-Extend this TODO with the results of that review
Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Hans de Goede <hdegoede@redhat.com> and
Michael Thayer <michael.thayer@oracle.com>.
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vbox_drv.h"
#include "vbox_err.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_channels.h"
#include "hgsmi_ch_setup.h"
/**
* Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
* @param ctx the context of the guest heap to use.
* @param location the offset chosen for the flags within guest VRAM.
* @returns 0 on success, -errno on failure
*/
int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
{
struct hgsmi_buffer_location *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
HGSMI_CC_HOST_FLAGS_LOCATION);
if (!p)
return -ENOMEM;
p->buf_location = location;
p->buf_len = sizeof(struct hgsmi_host_flags);
hgsmi_buffer_submit(ctx, p);
hgsmi_buffer_free(ctx, p);
return 0;
}
/**
* Notify the host of HGSMI-related guest capabilities via an HGSMI command.
* @param ctx the context of the guest heap to use.
* @param caps the capabilities to report, see vbva_caps.
* @returns 0 on success, -errno on failure
*/
int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
{
struct vbva_caps *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
if (!p)
return -ENOMEM;
p->rc = VERR_NOT_IMPLEMENTED;
p->caps = caps;
hgsmi_buffer_submit(ctx, p);
WARN_ON_ONCE(RT_FAILURE(p->rc));
hgsmi_buffer_free(ctx, p);
return 0;
}
int hgsmi_test_query_conf(struct gen_pool *ctx)
{
u32 value = 0;
int ret;
ret = hgsmi_query_conf(ctx, U32_MAX, &value);
if (ret)
return ret;
return value == U32_MAX ? 0 : -EIO;
}
/**
* Query the host for an HGSMI configuration parameter via an HGSMI command.
* @param ctx the context containing the heap used
* @param index the index of the parameter to query,
* @see vbva_conf32::index
* @param value_ret where to store the value of the parameter on success
* @returns 0 on success, -errno on failure
*/
int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
{
struct vbva_conf32 *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
VBVA_QUERY_CONF32);
if (!p)
return -ENOMEM;
p->index = index;
p->value = U32_MAX;
hgsmi_buffer_submit(ctx, p);
*value_ret = p->value;
hgsmi_buffer_free(ctx, p);
return 0;
}
/**
* Pass the host a new mouse pointer shape via an HGSMI command.
*
* @param ctx the context containing the heap to be used
* @param flags cursor flags, @see VMMDevReqMousePointer::flags
* @param hot_x horizontal position of the hot spot
* @param hot_y vertical position of the hot spot
* @param width width in pixels of the cursor
* @param height height in pixels of the cursor
* @param pixels pixel data, @see VMMDevReqMousePointer for the format
* @param len size in bytes of the pixel data
* @returns 0 on success, -errno on failure
*/
int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
u32 hot_x, u32 hot_y, u32 width, u32 height,
u8 *pixels, u32 len)
{
struct vbva_mouse_pointer_shape *p;
u32 pixel_len = 0;
int rc;
if (flags & VBOX_MOUSE_POINTER_SHAPE) {
/*
* Size of the pointer data:
* sizeof (AND mask) + sizeof (XOR_MASK)
*/
pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
width * 4 * height;
if (pixel_len > len)
return -EINVAL;
/*
* If shape is supplied, then always create the pointer visible.
* See comments in 'vboxUpdatePointerShape'
*/
flags |= VBOX_MOUSE_POINTER_VISIBLE;
}
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
VBVA_MOUSE_POINTER_SHAPE);
if (!p)
return -ENOMEM;
p->result = VINF_SUCCESS;
p->flags = flags;
p->hot_X = hot_x;
p->hot_y = hot_y;
p->width = width;
p->height = height;
if (pixel_len)
memcpy(p->data, pixels, pixel_len);
hgsmi_buffer_submit(ctx, p);
switch (p->result) {
case VINF_SUCCESS:
rc = 0;
break;
case VERR_NO_MEMORY:
rc = -ENOMEM;
break;
case VERR_NOT_SUPPORTED:
rc = -EBUSY;
break;
default:
rc = -EINVAL;
}
hgsmi_buffer_free(ctx, p);
return rc;
}
/**
* Report the guest cursor position. The host may wish to use this information
* to re-position its own cursor (though this is currently unlikely). The
* current host cursor position is returned.
* @param ctx The context containing the heap used.
* @param report_position Are we reporting a position?
* @param x Guest cursor X position.
* @param y Guest cursor Y position.
* @param x_host Host cursor X position is stored here. Optional.
* @param y_host Host cursor Y position is stored here. Optional.
* @returns 0 on success, -errno on failure
*/
int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
u32 x, u32 y, u32 *x_host, u32 *y_host)
{
struct vbva_cursor_position *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
VBVA_CURSOR_POSITION);
if (!p)
return -ENOMEM;
p->report_position = report_position;
p->x = x;
p->y = y;
hgsmi_buffer_submit(ctx, p);
*x_host = p->x;
*y_host = p->y;
hgsmi_buffer_free(ctx, p);
return 0;
}
/**
* @todo Mouse pointer position to be read from VMMDev memory, address of the
* memory region can be queried from VMMDev via an IOCTL. This VMMDev memory
* region will contain host information which is needed by the guest.
*
* Reading will not cause a switch to the host.
*
* Have to take into account:
* * synchronization: host must write to the memory only from EMT,
* large structures must be read under flag, which tells the host
* that the guest is currently reading the memory (OWNER flag?).
* * guest writes: may be allocate a page for the host info and make
* the page readonly for the guest.
* * the information should be available only for additions drivers.
* * VMMDev additions driver will inform the host which version of the info
* it expects, host must support all versions.
*/
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __HGSMI_CH_SETUP_H__
#define __HGSMI_CH_SETUP_H__
/*
* Tell the host the location of hgsmi_host_flags structure, where the host
* can write information about pending buffers, etc, and which can be quickly
* polled by the guest without a need to port IO.
*/
#define HGSMI_CC_HOST_FLAGS_LOCATION 0
struct hgsmi_buffer_location {
u32 buf_location;
u32 buf_len;
} __packed;
/* HGSMI setup and configuration data structures. */
/* host->guest commands pending, should be accessed under FIFO lock only */
#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
/* IRQ is fired, should be accessed under VGAState::lock only */
#define HGSMIHOSTFLAGS_IRQ 0x02u
/* vsync interrupt flag, should be accessed under VGAState::lock only */
#define HGSMIHOSTFLAGS_VSYNC 0x10u
/** monitor hotplug flag, should be accessed under VGAState::lock only */
#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
/**
* Cursor capability state change flag, should be accessed under
* VGAState::lock only. @see vbva_conf32.
*/
#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
struct hgsmi_host_flags {
/*
* Host flags can be accessed and modified in multiple threads
* concurrently, e.g. CrOpenGL HGCM and GUI threads when completing
* HGSMI 3D and Video Accel respectively, EMT thread when dealing with
* HGSMI command processing, etc.
* Besides settings/cleaning flags atomically, some flags have their
* own special sync restrictions, see comments for flags above.
*/
u32 host_flags;
u32 reserved[3];
} __packed;
#endif
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __HGSMI_CHANNELS_H__
#define __HGSMI_CHANNELS_H__
/*
* Each channel has an 8 bit identifier. There are a number of predefined
* (hardcoded) channels.
*
* HGSMI_CH_HGSMI channel can be used to map a string channel identifier
* to a free 16 bit numerical value. values are allocated in range
* [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
*/
/* A reserved channel value */
#define HGSMI_CH_RESERVED 0x00
/* HGCMI: setup and configuration */
#define HGSMI_CH_HGSMI 0x01
/* Graphics: VBVA */
#define HGSMI_CH_VBVA 0x02
/* Graphics: Seamless with a single guest region */
#define HGSMI_CH_SEAMLESS 0x03
/* Graphics: Seamless with separate host windows */
#define HGSMI_CH_SEAMLESS2 0x04
/* Graphics: OpenGL HW acceleration */
#define HGSMI_CH_OPENGL 0x05
/* The first channel index to be used for string mappings (inclusive) */
#define HGSMI_CH_STRING_FIRST 0x20
/* The last channel index for string mappings (inclusive) */
#define HGSMI_CH_STRING_LAST 0xff
#endif
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __HGSMI_DEFS_H__
#define __HGSMI_DEFS_H__
/* Buffer sequence type mask. */
#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
/* Single buffer, not a part of a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
/* The first buffer in a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
/* A middle buffer in a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
/* The last buffer in a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
/* 16 bytes buffer header. */
struct hgsmi_buffer_header {
u32 data_size; /* Size of data that follows the header. */
u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
u8 channel; /* The channel the data must be routed to. */
u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
union {
/* Opaque placeholder to make the union 8 bytes. */
u8 header_data[8];
/* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
struct {
u32 reserved1; /* A reserved field, initialize to 0. */
u32 reserved2; /* A reserved field, initialize to 0. */
} buffer;
/* HGSMI_BUFFER_HEADER_F_SEQ_START */
struct {
/* Must be the same for all buffers in the sequence. */
u32 sequence_number;
/* The total size of the sequence. */
u32 sequence_size;
} sequence_start;
/*
* HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
* HGSMI_BUFFER_HEADER_F_SEQ_END
*/
struct {
/* Must be the same for all buffers in the sequence. */
u32 sequence_number;
/* Data offset in the entire sequence. */
u32 sequence_offset;
} sequence_continue;
} u;
} __packed;
/* 8 bytes buffer tail. */
struct hgsmi_buffer_tail {
/* Reserved, must be initialized to 0. */
u32 reserved;
/*
* One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
* Over the header, offset and for first 4 bytes of the tail.
*/
u32 checksum;
} __packed;
/*
* The size of the array of channels. Array indexes are u8.
* Note: the value must not be changed.
*/
#define HGSMI_NUMBER_OF_CHANNELS 0x100
#endif
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vbox_drv.h"
#include "vbox_err.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_channels.h"
/**
* Set a video mode via an HGSMI request. The views must have been
* initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
* set on the first display then it must be set first using registers.
* @param ctx The context containing the heap to use
* @param display The screen number
* @param origin_x The horizontal displacement relative to the first scrn
* @param origin_y The vertical displacement relative to the first screen
* @param start_offset The offset of the visible area of the framebuffer
* relative to the framebuffer start
* @param pitch The offset in bytes between the starts of two adjecent
* scan lines in video RAM
* @param width The mode width
* @param height The mode height
* @param bpp The colour depth of the mode
* @param flags Flags
*/
void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
s32 origin_x, s32 origin_y, u32 start_offset,
u32 pitch, u32 width, u32 height,
u16 bpp, u16 flags)
{
struct vbva_infoscreen *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
VBVA_INFO_SCREEN);
if (!p)
return;
p->view_index = display;
p->origin_x = origin_x;
p->origin_y = origin_y;
p->start_offset = start_offset;
p->line_size = pitch;
p->width = width;
p->height = height;
p->bits_per_pixel = bpp;
p->flags = flags;
hgsmi_buffer_submit(ctx, p);
hgsmi_buffer_free(ctx, p);
}
/**
* Report the rectangle relative to which absolute pointer events should be
* expressed. This information remains valid until the next VBVA resize event
* for any screen, at which time it is reset to the bounding rectangle of all
* virtual screens.
* @param ctx The context containing the heap to use.
* @param origin_x Upper left X co-ordinate relative to the first screen.
* @param origin_y Upper left Y co-ordinate relative to the first screen.
* @param width Rectangle width.
* @param height Rectangle height.
* @returns 0 on success, -errno on failure
*/
int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
u32 width, u32 height)
{
struct vbva_report_input_mapping *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
VBVA_REPORT_INPUT_MAPPING);
if (!p)
return -ENOMEM;
p->x = origin_x;
p->y = origin_y;
p->cx = width;
p->cy = height;
hgsmi_buffer_submit(ctx, p);
hgsmi_buffer_free(ctx, p);
return 0;
}
/**
* Get most recent video mode hints.
* @param ctx The context containing the heap to use.
* @param screens The number of screens to query hints for, starting at 0.
* @param hints Array of vbva_modehint structures for receiving the hints.
* @returns 0 on success, -errno on failure
*/
int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
struct vbva_modehint *hints)
{
struct vbva_query_mode_hints *p;
size_t size;
if (WARN_ON(!hints))
return -EINVAL;
size = screens * sizeof(struct vbva_modehint);
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
VBVA_QUERY_MODE_HINTS);
if (!p)
return -ENOMEM;
p->hints_queried_count = screens;
p->hint_structure_guest_size = sizeof(struct vbva_modehint);
p->rc = VERR_NOT_SUPPORTED;
hgsmi_buffer_submit(ctx, p);
if (RT_FAILURE(p->rc)) {
hgsmi_buffer_free(ctx, p);
return -EIO;
}
memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
hgsmi_buffer_free(ctx, p);
return 0;
}
/*
* Copyright (C) 2013-2017 Oracle Corporation
* This file is based on ast_drv.c
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/vt_kern.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "vbox_drv.h"
int vbox_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, vbox_modeset, int, 0400);
static struct drm_driver driver;
static const struct pci_device_id pciidlist[] = {
{ 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, 0, 0},
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
static void vbox_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static int vbox_drm_freeze(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
return 0;
}
static int vbox_drm_thaw(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
drm_mode_config_reset(dev);
drm_helper_resume_force_mode(dev);
drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
return 0;
}
static int vbox_drm_resume(struct drm_device *dev)
{
int ret;
if (pci_enable_device(dev->pdev))
return -EIO;
ret = vbox_drm_thaw(dev);
if (ret)
return ret;
drm_kms_helper_poll_enable(dev);
return 0;
}
static int vbox_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
int error;
error = vbox_drm_freeze(ddev);
if (error)
return error;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int vbox_pm_resume(struct device *dev)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
return vbox_drm_resume(ddev);
}
static int vbox_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
if (!ddev || !ddev->dev_private)
return -ENODEV;
return vbox_drm_freeze(ddev);
}
static int vbox_pm_thaw(struct device *dev)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
return vbox_drm_thaw(ddev);
}
static int vbox_pm_poweroff(struct device *dev)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
return vbox_drm_freeze(ddev);
}
static const struct dev_pm_ops vbox_pm_ops = {
.suspend = vbox_pm_suspend,
.resume = vbox_pm_resume,
.freeze = vbox_pm_freeze,
.thaw = vbox_pm_thaw,
.poweroff = vbox_pm_poweroff,
.restore = vbox_pm_resume,
};
static struct pci_driver vbox_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = vbox_pci_probe,
.remove = vbox_pci_remove,
.driver.pm = &vbox_pm_ops,
};
static const struct file_operations vbox_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = vbox_mmap,
.poll = drm_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.read = drm_read,
};
static int vbox_master_set(struct drm_device *dev,
struct drm_file *file_priv, bool from_open)
{
struct vbox_private *vbox = dev->dev_private;
/*
* We do not yet know whether the new owner can handle hotplug, so we
* do not advertise dynamic modes on the first query and send a
* tentative hotplug notification after that to see if they query again.
*/
vbox->initial_mode_queried = false;
mutex_lock(&vbox->hw_mutex);
/*
* Disable VBVA when someone releases master in case the next person
* tries tries to do VESA.
*/
/** @todo work out if anyone is likely to and whether it will work. */
/*
* Update: we also disable it because if the new master does not do
* dirty rectangle reporting (e.g. old versions of Plymouth) then at
* least the first screen will still be updated. We enable it as soon
* as we receive a dirty rectangle report.
*/
vbox_disable_accel(vbox);
mutex_unlock(&vbox->hw_mutex);
return 0;
}
static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
{
struct vbox_private *vbox = dev->dev_private;
/* See vbox_master_set() */
vbox->initial_mode_queried = false;
mutex_lock(&vbox->hw_mutex);
vbox_disable_accel(vbox);
mutex_unlock(&vbox->hw_mutex);
}
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_PRIME,
.dev_priv_size = 0,
.load = vbox_driver_load,
.unload = vbox_driver_unload,
.lastclose = vbox_driver_lastclose,
.master_set = vbox_master_set,
.master_drop = vbox_master_drop,
.set_busid = drm_pci_set_busid,
.fops = &vbox_fops,
.irq_handler = vbox_irq_handler,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.gem_free_object = vbox_gem_free_object,
.dumb_create = vbox_dumb_create,
.dumb_map_offset = vbox_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = vbox_gem_prime_pin,
.gem_prime_unpin = vbox_gem_prime_unpin,
.gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
.gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
.gem_prime_vmap = vbox_gem_prime_vmap,
.gem_prime_vunmap = vbox_gem_prime_vunmap,
.gem_prime_mmap = vbox_gem_prime_mmap,
};
static int __init vbox_init(void)
{
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && vbox_modeset == -1)
return -EINVAL;
#endif
if (vbox_modeset == 0)
return -EINVAL;
return drm_pci_init(&driver, &vbox_pci_driver);
}
static void __exit vbox_exit(void)
{
drm_pci_exit(&driver, &vbox_pci_driver);
}
module_init(vbox_init);
module_exit(vbox_exit);
MODULE_AUTHOR("Oracle Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
/*
* Copyright (C) 2013-2017 Oracle Corporation
* This file is based on ast_drv.h
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
#ifndef __VBOX_DRV_H__
#define __VBOX_DRV_H__
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/version.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_ch_setup.h"
#define DRIVER_NAME "vboxvideo"
#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
#define DRIVER_DATE "20130823"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define VBOX_MAX_CURSOR_WIDTH 64
#define VBOX_MAX_CURSOR_HEIGHT 64
#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
#define VBOX_MAX_SCREENS 32
#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
VBVA_ADAPTER_INFORMATION_SIZE)
#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
struct vbox_fbdev;
struct vbox_private {
struct drm_device *dev;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
struct gen_pool *guest_pool;
struct vbva_buf_ctx *vbva_info;
bool any_pitch;
u32 num_crtcs;
/** Amount of available VRAM, including space used for buffers. */
u32 full_vram_size;
/** Amount of available VRAM, not including space used for buffers. */
u32 available_vram_size;
/** Array of structures for receiving mode hints. */
struct vbva_modehint *last_mode_hints;
struct vbox_fbdev *fbdev;
int fb_mtrr;
struct {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
/**
* We decide whether or not user-space supports display hot-plug
* depending on whether they react to a hot-plug event after the initial
* mode query.
*/
bool initial_mode_queried;
struct work_struct hotplug_work;
u32 input_mapping_width;
u32 input_mapping_height;
/**
* Is user-space using an X.Org-style layout of one large frame-buffer
* encompassing all screen ones or is the fbdev console active?
*/
bool single_framebuffer;
u32 cursor_width;
u32 cursor_height;
u32 cursor_hot_x;
u32 cursor_hot_y;
size_t cursor_data_size;
u8 cursor_data[CURSOR_DATA_SIZE];
};
#undef CURSOR_PIXEL_COUNT
#undef CURSOR_DATA_SIZE
int vbox_driver_load(struct drm_device *dev, unsigned long flags);
void vbox_driver_unload(struct drm_device *dev);
void vbox_driver_lastclose(struct drm_device *dev);
struct vbox_gem_object;
struct vbox_connector {
struct drm_connector base;
char name[32];
struct vbox_crtc *vbox_crtc;
struct {
u16 width;
u16 height;
bool disconnected;
} mode_hint;
};
struct vbox_crtc {
struct drm_crtc base;
bool blanked;
bool disconnected;
unsigned int crtc_id;
u32 fb_offset;
bool cursor_enabled;
u16 x_hint;
u16 y_hint;
};
struct vbox_encoder {
struct drm_encoder base;
};
struct vbox_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
struct vbox_fbdev {
struct drm_fb_helper helper;
struct vbox_framebuffer afb;
int size;
struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
};
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
int vbox_mode_init(struct drm_device *dev);
void vbox_mode_fini(struct drm_device *dev);
#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
#define CRTC_FB(crtc) ((crtc)->primary->fb)
void vbox_enable_accel(struct vbox_private *vbox);
void vbox_disable_accel(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
struct drm_clip_rect *rects,
unsigned int num_rects);
int vbox_framebuffer_init(struct drm_device *dev,
struct vbox_framebuffer *vbox_fb,
const struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object *obj);
int vbox_fbdev_init(struct drm_device *dev);
void vbox_fbdev_fini(struct drm_device *dev);
void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
struct vbox_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
struct ttm_place placements[3];
int pin_count;
};
#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vbox_bo, bo);
}
#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
void vbox_gem_free_object(struct drm_gem_object *obj);
int vbox_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
u32 handle, u64 *offset);
#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
int vbox_bo_create(struct drm_device *dev, int size, int align,
u32 flags, struct vbox_bo **pvboxbo);
int vbox_gem_create(struct drm_device *dev,
u32 size, bool iskernel, struct drm_gem_object **obj);
int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
int vbox_bo_unpin(struct vbox_bo *bo);
static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
{
int ret;
ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
return 0;
}
static inline void vbox_bo_unreserve(struct vbox_bo *bo)
{
ttm_bo_unreserve(&bo->bo);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain);
int vbox_bo_push_sysram(struct vbox_bo *bo);
int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
/* vbox_prime.c */
int vbox_gem_prime_pin(struct drm_gem_object *obj);
void vbox_gem_prime_unpin(struct drm_gem_object *obj);
struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *vbox_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table);
void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int vbox_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *area);
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
void vbox_report_hotplug(struct vbox_private *vbox);
irqreturn_t vbox_irq_handler(int irq, void *arg);
/* vbox_hgsmi.c */
void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
u8 channel, u16 channel_info);
void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
static inline void vbox_write_ioport(u16 index, u16 data)
{
outw(index, VBE_DISPI_IOPORT_INDEX);
outw(data, VBE_DISPI_IOPORT_DATA);
}
#endif
/*
* Copyright (C) 2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __VBOX_ERR_H__
#define __VBOX_ERR_H__
/**
* @name VirtualBox virtual-hardware error macros
* @{
*/
#define VINF_SUCCESS 0
#define VERR_INVALID_PARAMETER (-2)
#define VERR_INVALID_POINTER (-6)
#define VERR_NO_MEMORY (-8)
#define VERR_NOT_IMPLEMENTED (-12)
#define VERR_INVALID_FUNCTION (-36)
#define VERR_NOT_SUPPORTED (-37)
#define VERR_TOO_MUCH_DATA (-42)
#define VERR_INVALID_STATE (-79)
#define VERR_OUT_OF_RESOURCES (-80)
#define VERR_ALREADY_EXISTS (-105)
#define VERR_INTERNAL_ERROR (-225)
#define RT_SUCCESS_NP(rc) ((int)(rc) >= VINF_SUCCESS)
#define RT_SUCCESS(rc) (likely(RT_SUCCESS_NP(rc)))
#define RT_FAILURE(rc) (unlikely(!RT_SUCCESS_NP(rc)))
/** @} */
#endif
/*
* Copyright (C) 2013-2017 Oracle Corporation
* This file is based on ast_fb.c
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Michael Thayer <michael.thayer@oracle.com,
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include "vbox_drv.h"
#include "vboxvideo.h"
#define VBOX_DIRTY_DELAY (HZ / 30)
/**
* Tell the host about dirty rectangles to update.
*/
static void vbox_dirty_update(struct vbox_fbdev *fbdev,
int x, int y, int width, int height)
{
struct drm_gem_object *obj;
struct vbox_bo *bo;
int ret = -EBUSY;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
struct drm_clip_rect rect;
obj = fbdev->afb.obj;
bo = gem_to_vbox_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
if (drm_can_sleep())
ret = vbox_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&fbdev->dirty_lock, flags);
if (fbdev->y1 < y)
y = fbdev->y1;
if (fbdev->y2 > y2)
y2 = fbdev->y2;
if (fbdev->x1 < x)
x = fbdev->x1;
if (fbdev->x2 > x2)
x2 = fbdev->x2;
if (store_for_later) {
fbdev->x1 = x;
fbdev->x2 = x2;
fbdev->y1 = y;
fbdev->y2 = y2;
spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
return;
}
fbdev->x1 = INT_MAX;
fbdev->y1 = INT_MAX;
fbdev->x2 = 0;
fbdev->y2 = 0;
spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
/*
* Not sure why the original code subtracted 1 here, but I will keep
* it that way to avoid unnecessary differences.
*/
rect.x1 = x;
rect.x2 = x2 + 1;
rect.y1 = y;
rect.y2 = y2 + 1;
vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
vbox_bo_unreserve(bo);
}
#ifdef CONFIG_FB_DEFERRED_IO
static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
{
struct vbox_fbdev *fbdev = info->par;
unsigned long start, end, min, max;
struct page *page;
int y1, y2;
min = ULONG_MAX;
max = 0;
list_for_each_entry(page, pagelist, lru) {
start = page->index << PAGE_SHIFT;
end = start + PAGE_SIZE - 1;
min = min(min, start);
max = max(max, end);
}
if (min < max) {
y1 = min / info->fix.line_length;
y2 = (max / info->fix.line_length) + 1;
DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
__func__, y1, info->var.xres, y2 - y1 - 1);
vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
}
}
static struct fb_deferred_io vbox_defio = {
.delay = VBOX_DIRTY_DELAY,
.deferred_io = vbox_deferred_io,
};
#endif
static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct vbox_fbdev *fbdev = info->par;
sys_fillrect(info, rect);
vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
}
static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct vbox_fbdev *fbdev = info->par;
sys_copyarea(info, area);
vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
}
static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct vbox_fbdev *fbdev = info->par;
sys_imageblit(info, image);
vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
image->height);
}
static struct fb_ops vboxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = vbox_fillrect,
.fb_copyarea = vbox_copyarea,
.fb_imageblit = vbox_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static int vboxfb_create_object(struct vbox_fbdev *fbdev,
struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = fbdev->helper.dev;
u32 size;
struct drm_gem_object *gobj;
u32 pitch = mode_cmd->pitches[0];
int ret;
size = pitch * mode_cmd->height;
ret = vbox_gem_create(dev, size, true, &gobj);
if (ret)
return ret;
*gobj_p = gobj;
return 0;
}
static int vboxfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct vbox_fbdev *fbdev =
container_of(helper, struct vbox_fbdev, helper);
struct drm_device *dev = fbdev->helper.dev;
struct DRM_MODE_FB_CMD mode_cmd;
struct drm_framebuffer *fb;
struct fb_info *info;
struct device *device = &dev->pdev->dev;
struct drm_gem_object *gobj;
struct vbox_bo *bo;
int size, ret;
u32 pitch;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mode_cmd.pitches[0] = pitch;
size = pitch * mode_cmd.height;
ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
if (ret)
return ret;
bo = gem_to_vbox_bo(gobj);
ret = vbox_bo_reserve(bo, false);
if (ret)
return ret;
ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
if (ret) {
vbox_bo_unreserve(bo);
return ret;
}
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
vbox_bo_unreserve(bo);
if (ret) {
DRM_ERROR("failed to kmap fbcon\n");
return ret;
}
info = framebuffer_alloc(0, device);
if (!info)
return -ENOMEM;
info->par = fbdev;
fbdev->size = size;
fb = &fbdev->afb.base;
fbdev->helper.fb = fb;
fbdev->helper.fbdev = info;
strcpy(info->fix.id, "vboxdrmfb");
/*
* The last flag forces a mode set on VT switches even if the kernel
* does not think it is needed.
*/
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
FBINFO_MISC_ALWAYS_SETPAR;
info->fbops = &vboxfb_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
return -ENOMEM;
/*
* This seems to be done for safety checking that the framebuffer
* is not registered twice by different drivers.
*/
info->apertures = alloc_apertures(1);
if (!info->apertures)
return -ENOMEM;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
sizes->fb_height);
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
#ifdef CONFIG_FB_DEFERRED_IO
info->fbdefio = &vbox_defio;
fb_deferred_io_init(info);
#endif
info->pixmap.flags = FB_PIXMAP_SYSTEM;
DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
return 0;
}
static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
}
static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
*red = regno;
*green = regno;
*blue = regno;
}
static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
.gamma_set = vbox_fb_gamma_set,
.gamma_get = vbox_fb_gamma_get,
.fb_probe = vboxfb_create,
};
void vbox_fbdev_fini(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
struct vbox_fbdev *fbdev = vbox->fbdev;
struct vbox_framebuffer *afb = &fbdev->afb;
drm_fb_helper_unregister_fbi(&fbdev->helper);
if (afb->obj) {
struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
if (!vbox_bo_reserve(bo, false)) {
if (bo->kmap.virtual)
ttm_bo_kunmap(&bo->kmap);
/*
* QXL does this, but is it really needed before
* freeing?
*/
if (bo->pin_count)
vbox_bo_unpin(bo);
vbox_bo_unreserve(bo);
}
drm_gem_object_unreference_unlocked(afb->obj);
afb->obj = NULL;
}
drm_fb_helper_fini(&fbdev->helper);
drm_framebuffer_unregister_private(&afb->base);
drm_framebuffer_cleanup(&afb->base);
}
int vbox_fbdev_init(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
struct vbox_fbdev *fbdev;
int ret;
fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
return -ENOMEM;
vbox->fbdev = fbdev;
spin_lock_init(&fbdev->dirty_lock);
drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
if (ret)
return ret;
ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
if (ret)
goto err_fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
if (ret)
goto err_fini;
return 0;
err_fini:
drm_fb_helper_fini(&fbdev->helper);
return ret;
}
void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
{
struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
}
/*
* Copyright (C) 2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Hans de Goede <hdegoede@redhat.com>
*/
#include "vbox_drv.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_defs.h"
/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
{
while (size--) {
hash += *data++;
hash += (hash << 10);
hash ^= (hash >> 6);
}
return hash;
}
static u32 hgsmi_hash_end(u32 hash)
{
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash;
}
/* Not really a checksum but that is the naming used in all vbox code */
static u32 hgsmi_checksum(u32 offset,
const struct hgsmi_buffer_header *header,
const struct hgsmi_buffer_tail *tail)
{
u32 checksum;
checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
/* 4 -> Do not checksum the checksum itself */
checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
return hgsmi_hash_end(checksum);
}
void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
u8 channel, u16 channel_info)
{
struct hgsmi_buffer_header *h;
struct hgsmi_buffer_tail *t;
size_t total_size;
dma_addr_t offset;
total_size = size + sizeof(*h) + sizeof(*t);
h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
if (!h)
return NULL;
t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
h->data_size = size;
h->channel = channel;
h->channel_info = channel_info;
memset(&h->u.header_data, 0, sizeof(h->u.header_data));
t->reserved = 0;
t->checksum = hgsmi_checksum(offset, h, t);
return (u8 *)h + sizeof(*h);
}
void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
{
struct hgsmi_buffer_header *h =
(struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
size_t total_size = h->data_size + sizeof(*h) +
sizeof(struct hgsmi_buffer_tail);
gen_pool_free(guest_pool, (unsigned long)h, total_size);
}
int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
{
phys_addr_t offset;
offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
sizeof(struct hgsmi_buffer_header));
outl(offset, VGA_PORT_HGSMI_GUEST);
/* Make the compiler aware that the host has changed memory. */
mb();
return 0;
}
/*
* Copyright (C) 2016-2017 Oracle Corporation
* This file is based on qxl_irq.c
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
#include <drm/drm_crtc_helper.h>
#include "vbox_drv.h"
#include "vboxvideo.h"
static void vbox_clear_irq(void)
{
outl((u32)~0, VGA_PORT_HGSMI_HOST);
}
static u32 vbox_get_flags(struct vbox_private *vbox)
{
return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
}
void vbox_report_hotplug(struct vbox_private *vbox)
{
schedule_work(&vbox->hotplug_work);
}
irqreturn_t vbox_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
u32 host_flags = vbox_get_flags(vbox);
if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
return IRQ_NONE;
/*
* Due to a bug in the initial host implementation of hot-plug irqs,
* the hot-plug and cursor capability flags were never cleared.
* Fortunately we can tell when they would have been set by checking
* that the VSYNC flag is not set.
*/
if (host_flags &
(HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
!(host_flags & HGSMIHOSTFLAGS_VSYNC))
vbox_report_hotplug(vbox);
vbox_clear_irq();
return IRQ_HANDLED;
}
/**
* Check that the position hints provided by the host are suitable for GNOME
* shell (i.e. all screens disjoint and hints for all enabled screens) and if
* not replace them with default ones. Providing valid hints improves the
* chances that we will get a known screen layout for pointer mapping.
*/
static void validate_or_set_position_hints(struct vbox_private *vbox)
{
struct vbva_modehint *hintsi, *hintsj;
bool valid = true;
u16 currentx = 0;
int i, j;
for (i = 0; i < vbox->num_crtcs; ++i) {
for (j = 0; j < i; ++j) {
hintsi = &vbox->last_mode_hints[i];
hintsj = &vbox->last_mode_hints[j];
if (hintsi->enabled && hintsj->enabled) {
if (hintsi->dx >= 0xffff ||
hintsi->dy >= 0xffff ||
hintsj->dx >= 0xffff ||
hintsj->dy >= 0xffff ||
(hintsi->dx <
hintsj->dx + (hintsj->cx & 0x8fff) &&
hintsi->dx + (hintsi->cx & 0x8fff) >
hintsj->dx) ||
(hintsi->dy <
hintsj->dy + (hintsj->cy & 0x8fff) &&
hintsi->dy + (hintsi->cy & 0x8fff) >
hintsj->dy))
valid = false;
}
}
}
if (!valid)
for (i = 0; i < vbox->num_crtcs; ++i) {
if (vbox->last_mode_hints[i].enabled) {
vbox->last_mode_hints[i].dx = currentx;
vbox->last_mode_hints[i].dy = 0;
currentx +=
vbox->last_mode_hints[i].cx & 0x8fff;
}
}
}
/**
* Query the host for the most recent video mode hints.
*/
static void vbox_update_mode_hints(struct vbox_private *vbox)
{
struct drm_device *dev = vbox->dev;
struct drm_connector *connector;
struct vbox_connector *vbox_conn;
struct vbva_modehint *hints;
u16 flags;
bool disconnected;
unsigned int crtc_id;
int ret;
ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
vbox->last_mode_hints);
if (ret) {
DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
return;
}
validate_or_set_position_hints(vbox);
drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
vbox_conn = to_vbox_connector(connector);
hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
if (hints->magic != VBVAMODEHINT_MAGIC)
continue;
disconnected = !(hints->enabled);
crtc_id = vbox_conn->vbox_crtc->crtc_id;
vbox_conn->mode_hint.width = hints->cx & 0x8fff;
vbox_conn->mode_hint.height = hints->cy & 0x8fff;
vbox_conn->vbox_crtc->x_hint = hints->dx;
vbox_conn->vbox_crtc->y_hint = hints->dy;
vbox_conn->mode_hint.disconnected = disconnected;
if (vbox_conn->vbox_crtc->disconnected == disconnected)
continue;
if (disconnected)
flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
else
flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
hints->cx * 4, hints->cx,
hints->cy, 0, flags);
vbox_conn->vbox_crtc->disconnected = disconnected;
}
drm_modeset_unlock_all(dev);
}
static void vbox_hotplug_worker(struct work_struct *work)
{
struct vbox_private *vbox = container_of(work, struct vbox_private,
hotplug_work);
vbox_update_mode_hints(vbox);
drm_kms_helper_hotplug_event(vbox->dev);
}
int vbox_irq_init(struct vbox_private *vbox)
{
INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
vbox_update_mode_hints(vbox);
return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
}
void vbox_irq_fini(struct vbox_private *vbox)
{
drm_irq_uninstall(vbox->dev);
flush_work(&vbox->hotplug_work);
}
/*
* Copyright (C) 2013-2017 Oracle Corporation
* This file is based on ast_main.c
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Dave Airlie <airlied@redhat.com>,
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include "vbox_drv.h"
#include "vbox_err.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
if (vbox_fb->obj)
drm_gem_object_unreference_unlocked(vbox_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
void vbox_enable_accel(struct vbox_private *vbox)
{
unsigned int i;
struct vbva_buffer *vbva;
if (!vbox->vbva_info || !vbox->vbva_buffers) {
/* Should never happen... */
DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
return;
}
for (i = 0; i < vbox->num_crtcs; ++i) {
if (vbox->vbva_info[i].vbva)
continue;
vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
if (!vbva_enable(&vbox->vbva_info[i],
vbox->guest_pool, vbva, i)) {
/* very old host or driver error. */
DRM_ERROR("vboxvideo: vbva_enable failed\n");
return;
}
}
}
void vbox_disable_accel(struct vbox_private *vbox)
{
unsigned int i;
for (i = 0; i < vbox->num_crtcs; ++i)
vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
}
void vbox_report_caps(struct vbox_private *vbox)
{
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
if (vbox->initial_mode_queried)
caps |= VBVACAPS_VIDEO_MODE_HINTS;
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
/**
* Send information about dirty rectangles to VBVA. If necessary we enable
* VBVA first, as this is normally disabled after a change of master in case
* the new master does not send dirty rectangle information (is this even
* allowed?)
*/
void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
struct drm_clip_rect *rects,
unsigned int num_rects)
{
struct vbox_private *vbox = fb->dev->dev_private;
struct drm_crtc *crtc;
unsigned int i;
mutex_lock(&vbox->hw_mutex);
list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
if (CRTC_FB(crtc) != fb)
continue;
vbox_enable_accel(vbox);
for (i = 0; i < num_rects; ++i) {
struct vbva_cmd_hdr cmd_hdr;
unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
(rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
(rects[i].x2 < crtc->x) ||
(rects[i].y2 < crtc->y))
continue;
cmd_hdr.x = (s16)rects[i].x1;
cmd_hdr.y = (s16)rects[i].y1;
cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
vbox->guest_pool))
continue;
vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
&cmd_hdr, sizeof(cmd_hdr));
vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
}
}
mutex_unlock(&vbox->hw_mutex);
}
static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int flags, unsigned int color,
struct drm_clip_rect *rects,
unsigned int num_rects)
{
vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
return 0;
}
static const struct drm_framebuffer_funcs vbox_fb_funcs = {
.destroy = vbox_user_framebuffer_destroy,
.dirty = vbox_user_framebuffer_dirty,
};
int vbox_framebuffer_init(struct drm_device *dev,
struct vbox_framebuffer *vbox_fb,
const struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
vbox_fb->obj = obj;
ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
return 0;
}
static struct drm_framebuffer *vbox_user_framebuffer_create(
struct drm_device *dev,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct vbox_framebuffer *vbox_fb;
int ret = -ENOMEM;
obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (!obj)
return ERR_PTR(-ENOENT);
vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
if (!vbox_fb)
goto err_unref_obj;
ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
if (ret)
goto err_free_vbox_fb;
return &vbox_fb->base;
err_free_vbox_fb:
kfree(vbox_fb);
err_unref_obj:
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = vbox_user_framebuffer_create,
};
static int vbox_accel_init(struct vbox_private *vbox)
{
unsigned int i;
vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
sizeof(*vbox->vbva_info), GFP_KERNEL);
if (!vbox->vbva_info)
return -ENOMEM;
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
vbox->available_vram_size,
vbox->num_crtcs *
VBVA_MIN_BUFFER_SIZE);
if (!vbox->vbva_buffers)
return -ENOMEM;
for (i = 0; i < vbox->num_crtcs; ++i)
vbva_setup_buffer_context(&vbox->vbva_info[i],
vbox->available_vram_size +
i * VBVA_MIN_BUFFER_SIZE,
VBVA_MIN_BUFFER_SIZE);
return 0;
}
static void vbox_accel_fini(struct vbox_private *vbox)
{
vbox_disable_accel(vbox);
pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
}
/** Do we support the 4.3 plus mode hint reporting interface? */
static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
{
u32 have_hints, have_cursor;
int ret;
ret = hgsmi_query_conf(vbox->guest_pool,
VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
&have_hints);
if (ret)
return false;
ret = hgsmi_query_conf(vbox->guest_pool,
VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
&have_cursor);
if (ret)
return false;
return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
}
static bool vbox_check_supported(u16 id)
{
u16 dispi_id;
vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
dispi_id = inw(VBE_DISPI_IOPORT_DATA);
return dispi_id == id;
}
/**
* Set up our heaps and data exchange buffers in VRAM before handing the rest
* to the memory manager.
*/
static int vbox_hw_init(struct vbox_private *vbox)
{
int ret = -ENOMEM;
vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
/* Map guest-heap at end of vram */
vbox->guest_heap =
pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_SIZE);
if (!vbox->guest_heap)
return -ENOMEM;
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
vbox->guest_pool = gen_pool_create(4, -1);
if (!vbox->guest_pool)
goto err_unmap_guest_heap;
ret = gen_pool_add_virt(vbox->guest_pool,
(unsigned long)vbox->guest_heap,
GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_USABLE_SIZE, -1);
if (ret)
goto err_destroy_guest_pool;
ret = hgsmi_test_query_conf(vbox->guest_pool);
if (ret) {
DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
goto err_destroy_guest_pool;
}
/* Reduce available VRAM size to reflect the guest heap. */
vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
/* Linux drm represents monitors as a 32-bit array. */
hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
&vbox->num_crtcs);
vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
if (!have_hgsmi_mode_hints(vbox)) {
ret = -ENOTSUPP;
goto err_destroy_guest_pool;
}
vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
sizeof(struct vbva_modehint),
GFP_KERNEL);
if (!vbox->last_mode_hints) {
ret = -ENOMEM;
goto err_destroy_guest_pool;
}
ret = vbox_accel_init(vbox);
if (ret)
goto err_destroy_guest_pool;
return 0;
err_destroy_guest_pool:
gen_pool_destroy(vbox->guest_pool);
err_unmap_guest_heap:
pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
return ret;
}
static void vbox_hw_fini(struct vbox_private *vbox)
{
vbox_accel_fini(vbox);
gen_pool_destroy(vbox->guest_pool);
pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
}
int vbox_driver_load(struct drm_device *dev, unsigned long flags)
{
struct vbox_private *vbox;
int ret = 0;
if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
return -ENODEV;
vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
if (!vbox)
return -ENOMEM;
dev->dev_private = vbox;
vbox->dev = dev;
mutex_init(&vbox->hw_mutex);
ret = vbox_hw_init(vbox);
if (ret)
return ret;
ret = vbox_mm_init(vbox);
if (ret)
goto err_hw_fini;
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&vbox_mode_funcs;
dev->mode_config.min_width = 64;
dev->mode_config.min_height = 64;
dev->mode_config.preferred_depth = 24;
dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
ret = vbox_mode_init(dev);
if (ret)
goto err_drm_mode_cleanup;
ret = vbox_irq_init(vbox);
if (ret)
goto err_mode_fini;
ret = vbox_fbdev_init(dev);
if (ret)
goto err_irq_fini;
return 0;
err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
vbox_mode_fini(dev);
err_drm_mode_cleanup:
drm_mode_config_cleanup(dev);
vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
return ret;
}
void vbox_driver_unload(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
vbox_fbdev_fini(dev);
vbox_irq_fini(vbox);
vbox_mode_fini(dev);
drm_mode_config_cleanup(dev);
vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
}
/**
* @note this is described in the DRM framework documentation. AST does not
* have it, but we get an oops on driver unload if it is not present.
*/
void vbox_driver_lastclose(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
if (vbox->fbdev)
drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
}
int vbox_gem_create(struct drm_device *dev,
u32 size, bool iskernel, struct drm_gem_object **obj)
{
struct vbox_bo *vboxbo;
int ret;
*obj = NULL;
size = roundup(size, PAGE_SIZE);
if (size == 0)
return -EINVAL;
ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object\n");
return ret;
}
*obj = &vboxbo->gem;
return 0;
}
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev, struct drm_mode_create_dumb *args)
{
int ret;
struct drm_gem_object *gobj;
u32 handle;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
ret = vbox_gem_create(dev, args->size, false, &gobj);
if (ret)
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
drm_gem_object_unreference_unlocked(gobj);
if (ret)
return ret;
args->handle = handle;
return 0;
}
static void vbox_bo_unref(struct vbox_bo **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
if (!tbo)
*bo = NULL;
}
void vbox_gem_free_object(struct drm_gem_object *obj)
{
struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
vbox_bo_unref(&vbox_bo);
}
static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
{
return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
vbox_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
u32 handle, u64 *offset)
{
struct drm_gem_object *obj;
int ret;
struct vbox_bo *bo;
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(file, handle);
if (!obj) {
ret = -ENOENT;
goto out_unlock;
}
bo = gem_to_vbox_bo(obj);
*offset = vbox_bo_mmap_offset(bo);
drm_gem_object_unreference(obj);
ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Copyright (C) 2013-2017 Oracle Corporation
* This file is based on ast_mode.c
* Copyright 2012 Red Hat Inc.
* Parts based on xf86-video-ast
* Copyright (c) 2005 ASPEED Technology Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
#include <linux/export.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include "vbox_drv.h"
#include "vboxvideo.h"
#include "hgsmi_channels.h"
static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
u32 handle, u32 width, u32 height,
s32 hot_x, s32 hot_y);
static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
/**
* Set a graphics mode. Poke any required values into registers, do an HGSMI
* mode set and tell the host we support advanced graphics functions.
*/
static void vbox_do_modeset(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox;
int width, height, bpp, pitch;
unsigned int crtc_id;
u16 flags;
s32 x_offset, y_offset;
vbox = crtc->dev->dev_private;
width = mode->hdisplay ? mode->hdisplay : 640;
height = mode->vdisplay ? mode->vdisplay : 480;
crtc_id = vbox_crtc->crtc_id;
bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
/*
* This is the old way of setting graphics modes. It assumed one screen
* and a frame-buffer at the start of video RAM. On older versions of
* VirtualBox, certain parts of the code still assume that the first
* screen is programmed this way, so try to fake it.
*/
if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
vbox_crtc->fb_offset % (bpp / 8) == 0) {
vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
vbox_write_ioport(VBE_DISPI_INDEX_BPP,
CRTC_FB(crtc)->format->cpp[0] * 8);
vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
vbox_write_ioport(
VBE_DISPI_INDEX_X_OFFSET,
vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
vbox_crtc->fb_offset / pitch + crtc->y);
}
flags = VBVA_SCREEN_F_ACTIVE;
flags |= (crtc->enabled && !vbox_crtc->blanked) ?
0 : VBVA_SCREEN_F_BLANK;
flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
x_offset, y_offset,
crtc->x * bpp / 8 + crtc->y * pitch,
pitch, width, height,
vbox_crtc->blanked ? 0 : bpp, flags);
}
static int vbox_set_view(struct drm_crtc *crtc)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbva_infoview *p;
/*
* Tell the host about the view. This design originally targeted the
* Windows XP driver architecture and assumed that each screen would
* have a dedicated frame buffer with the command buffer following it,
* the whole being a "view". The host works out which screen a command
* buffer belongs to by checking whether it is in the first view, then
* whether it is in the second and so on. The first match wins. We
* cheat around this by making the first view be the managed memory
* plus the first command buffer, the second the same plus the second
* buffer and so on.
*/
p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
HGSMI_CH_VBVA, VBVA_INFO_VIEW);
if (!p)
return -ENOMEM;
p->view_index = vbox_crtc->crtc_id;
p->view_offset = vbox_crtc->fb_offset;
p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
hgsmi_buffer_submit(vbox->guest_pool, p);
hgsmi_buffer_free(vbox->guest_pool, p);
return 0;
}
static void vbox_crtc_load_lut(struct drm_crtc *crtc)
{
}
static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox = crtc->dev->dev_private;
switch (mode) {
case DRM_MODE_DPMS_ON:
vbox_crtc->blanked = false;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
vbox_crtc->blanked = true;
break;
}
mutex_lock(&vbox->hw_mutex);
vbox_do_modeset(crtc, &crtc->hwmode);
mutex_unlock(&vbox->hw_mutex);
}
static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
/*
* Try to map the layout of virtual screens to the range of the input device.
* Return true if we need to re-set the crtc modes due to screen offset
* changes.
*/
static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
{
struct drm_crtc *crtci;
struct drm_connector *connectori;
struct drm_framebuffer *fb1 = NULL;
bool single_framebuffer = true;
bool old_single_framebuffer = vbox->single_framebuffer;
u16 width = 0, height = 0;
/*
* Are we using an X.Org-style single large frame-buffer for all crtcs?
* If so then screen layout can be deduced from the crtc offsets.
* Same fall-back if this is the fbdev frame-buffer.
*/
list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
if (!fb1) {
fb1 = CRTC_FB(crtci);
if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
break;
} else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
single_framebuffer = false;
}
}
if (single_framebuffer) {
list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
head) {
if (to_vbox_crtc(crtci)->crtc_id != 0)
continue;
vbox->single_framebuffer = true;
vbox->input_mapping_width = CRTC_FB(crtci)->width;
vbox->input_mapping_height = CRTC_FB(crtci)->height;
return old_single_framebuffer !=
vbox->single_framebuffer;
}
}
/* Otherwise calculate the total span of all screens. */
list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
head) {
struct vbox_connector *vbox_connector =
to_vbox_connector(connectori);
struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
width = max_t(u16, width, vbox_crtc->x_hint +
vbox_connector->mode_hint.width);
height = max_t(u16, height, vbox_crtc->y_hint +
vbox_connector->mode_hint.height);
}
vbox->single_framebuffer = false;
vbox->input_mapping_width = width;
vbox->input_mapping_height = height;
return old_single_framebuffer != vbox->single_framebuffer;
}
static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb, int x, int y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct drm_gem_object *obj;
struct vbox_framebuffer *vbox_fb;
struct vbox_bo *bo;
int ret;
u64 gpu_addr;
/* Unpin the previous fb. */
if (old_fb) {
vbox_fb = to_vbox_framebuffer(old_fb);
obj = vbox_fb->obj;
bo = gem_to_vbox_bo(obj);
ret = vbox_bo_reserve(bo, false);
if (ret)
return ret;
vbox_bo_unpin(bo);
vbox_bo_unreserve(bo);
}
vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
obj = vbox_fb->obj;
bo = gem_to_vbox_bo(obj);
ret = vbox_bo_reserve(bo, false);
if (ret)
return ret;
ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
if (ret) {
vbox_bo_unreserve(bo);
return ret;
}
if (&vbox->fbdev->afb == vbox_fb)
vbox_fbdev_set_base(vbox, gpu_addr);
vbox_bo_unreserve(bo);
/* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
vbox_crtc->fb_offset = gpu_addr;
if (vbox_set_up_input_mapping(vbox)) {
struct drm_crtc *crtci;
list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
head) {
vbox_set_view(crtc);
vbox_do_modeset(crtci, &crtci->mode);
}
}
return 0;
}
static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return vbox_crtc_do_set_base(crtc, old_fb, x, y);
}
static int vbox_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y, struct drm_framebuffer *old_fb)
{
struct vbox_private *vbox = crtc->dev->dev_private;
int ret;
vbox_crtc_mode_set_base(crtc, x, y, old_fb);
mutex_lock(&vbox->hw_mutex);
ret = vbox_set_view(crtc);
if (!ret)
vbox_do_modeset(crtc, mode);
hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
vbox->input_mapping_width,
vbox->input_mapping_height);
mutex_unlock(&vbox->hw_mutex);
return ret;
}
static void vbox_crtc_disable(struct drm_crtc *crtc)
{
}
static void vbox_crtc_prepare(struct drm_crtc *crtc)
{
}
static void vbox_crtc_commit(struct drm_crtc *crtc)
{
}
static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
.dpms = vbox_crtc_dpms,
.mode_fixup = vbox_crtc_mode_fixup,
.mode_set = vbox_crtc_mode_set,
/* .mode_set_base = vbox_crtc_mode_set_base, */
.disable = vbox_crtc_disable,
.load_lut = vbox_crtc_load_lut,
.prepare = vbox_crtc_prepare,
.commit = vbox_crtc_commit,
};
static void vbox_crtc_reset(struct drm_crtc *crtc)
{
}
static void vbox_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
kfree(crtc);
}
static const struct drm_crtc_funcs vbox_crtc_funcs = {
.cursor_move = vbox_cursor_move,
.cursor_set2 = vbox_cursor_set2,
.reset = vbox_crtc_reset,
.set_config = drm_crtc_helper_set_config,
/* .gamma_set = vbox_crtc_gamma_set, */
.destroy = vbox_crtc_destroy,
};
static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
{
struct vbox_crtc *vbox_crtc;
vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
if (!vbox_crtc)
return NULL;
vbox_crtc->crtc_id = i;
drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
return vbox_crtc;
}
static void vbox_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
static const struct drm_encoder_funcs vbox_enc_funcs = {
.destroy = vbox_encoder_destroy,
};
static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
static bool vbox_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void vbox_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void vbox_encoder_prepare(struct drm_encoder *encoder)
{
}
static void vbox_encoder_commit(struct drm_encoder *encoder)
{
}
static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
.dpms = vbox_encoder_dpms,
.mode_fixup = vbox_mode_fixup,
.prepare = vbox_encoder_prepare,
.commit = vbox_encoder_commit,
.mode_set = vbox_encoder_mode_set,
};
static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
unsigned int i)
{
struct vbox_encoder *vbox_encoder;
vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
if (!vbox_encoder)
return NULL;
drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
vbox_encoder->base.possible_crtcs = 1 << i;
return &vbox_encoder->base;
}
/**
* Generate EDID data with a mode-unique serial number for the virtual
* monitor to try to persuade Unity that different modes correspond to
* different monitors and it should not try to force the same resolution on
* them.
*/
static void vbox_set_edid(struct drm_connector *connector, int width,
int height)
{
enum { EDID_SIZE = 128 };
unsigned char edid[EDID_SIZE] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
0x58, 0x58, /* manufacturer (VBX) */
0x00, 0x00, /* product code */
0x00, 0x00, 0x00, 0x00, /* serial number goes here */
0x01, /* week of manufacture */
0x00, /* year of manufacture */
0x01, 0x03, /* EDID version */
0x80, /* capabilities - digital */
0x00, /* horiz. res in cm, zero for projectors */
0x00, /* vert. res in cm */
0x78, /* display gamma (120 == 2.2). */
0xEE, /* features (standby, suspend, off, RGB, std */
/* colour space, preferred timing mode) */
0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
/* chromaticity for standard colour space. */
0x00, 0x00, 0x00, /* no default timings */
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01,
0x01, 0x01, 0x01, 0x01, /* no standard timings */
0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
0x02, 0x02,
/* descriptor block 1 goes below */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* descriptor block 2, monitor ranges */
0x00, 0x00, 0x00, 0xFD, 0x00,
0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
0x20, 0x20,
/* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
0x20,
/* descriptor block 3, monitor name */
0x00, 0x00, 0x00, 0xFC, 0x00,
'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
'\n',
/* descriptor block 4: dummy data */
0x00, 0x00, 0x00, 0x10, 0x00,
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20,
0x00, /* number of extensions */
0x00 /* checksum goes here */
};
int clock = (width + 6) * (height + 6) * 60 / 10000;
unsigned int i, sum = 0;
edid[12] = width & 0xff;
edid[13] = width >> 8;
edid[14] = height & 0xff;
edid[15] = height >> 8;
edid[54] = clock & 0xff;
edid[55] = clock >> 8;
edid[56] = width & 0xff;
edid[58] = (width >> 4) & 0xf0;
edid[59] = height & 0xff;
edid[61] = (height >> 4) & 0xf0;
for (i = 0; i < EDID_SIZE - 1; ++i)
sum += edid[i];
edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
}
static int vbox_get_modes(struct drm_connector *connector)
{
struct vbox_connector *vbox_connector = NULL;
struct drm_display_mode *mode = NULL;
struct vbox_private *vbox = NULL;
unsigned int num_modes = 0;
int preferred_width, preferred_height;
vbox_connector = to_vbox_connector(connector);
vbox = connector->dev->dev_private;
/*
* Heuristic: we do not want to tell the host that we support dynamic
* resizing unless we feel confident that the user space client using
* the video driver can handle hot-plug events. So the first time modes
* are queried after a "master" switch we tell the host that we do not,
* and immediately after we send the client a hot-plug notification as
* a test to see if they will respond and query again.
* That is also the reason why capabilities are reported to the host at
* this place in the code rather than elsewhere.
* We need to report the flags location before reporting the IRQ
* capability.
*/
hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
HOST_FLAGS_OFFSET);
if (vbox_connector->vbox_crtc->crtc_id == 0)
vbox_report_caps(vbox);
if (!vbox->initial_mode_queried) {
if (vbox_connector->vbox_crtc->crtc_id == 0) {
vbox->initial_mode_queried = true;
vbox_report_hotplug(vbox);
}
return drm_add_modes_noedid(connector, 800, 600);
}
num_modes = drm_add_modes_noedid(connector, 2560, 1600);
preferred_width = vbox_connector->mode_hint.width ?
vbox_connector->mode_hint.width : 1024;
preferred_height = vbox_connector->mode_hint.height ?
vbox_connector->mode_hint.height : 768;
mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
60, false, false, false);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
++num_modes;
}
vbox_set_edid(connector, preferred_width, preferred_height);
drm_object_property_set_value(
&connector->base, vbox->dev->mode_config.suggested_x_property,
vbox_connector->vbox_crtc->x_hint);
drm_object_property_set_value(
&connector->base, vbox->dev->mode_config.suggested_y_property,
vbox_connector->vbox_crtc->y_hint);
return num_modes;
}
static int vbox_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void vbox_connector_destroy(struct drm_connector *connector)
{
struct vbox_connector *vbox_connector;
vbox_connector = to_vbox_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static enum drm_connector_status
vbox_connector_detect(struct drm_connector *connector, bool force)
{
struct vbox_connector *vbox_connector;
vbox_connector = to_vbox_connector(connector);
return vbox_connector->mode_hint.disconnected ?
connector_status_disconnected : connector_status_connected;
}
static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
u32 max_y)
{
struct vbox_connector *vbox_connector;
struct drm_device *dev;
struct drm_display_mode *mode, *iterator;
vbox_connector = to_vbox_connector(connector);
dev = vbox_connector->base.dev;
list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
list_del(&mode->head);
drm_mode_destroy(dev, mode);
}
return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
}
static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
.mode_valid = vbox_mode_valid,
.get_modes = vbox_get_modes,
.best_encoder = vbox_best_single_encoder,
};
static const struct drm_connector_funcs vbox_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = vbox_connector_detect,
.fill_modes = vbox_fill_modes,
.destroy = vbox_connector_destroy,
};
static int vbox_connector_init(struct drm_device *dev,
struct vbox_crtc *vbox_crtc,
struct drm_encoder *encoder)
{
struct vbox_connector *vbox_connector;
struct drm_connector *connector;
vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
if (!vbox_connector)
return -ENOMEM;
connector = &vbox_connector->base;
vbox_connector->vbox_crtc = vbox_crtc;
drm_connector_init(dev, connector, &vbox_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_mode_create_suggested_offset_properties(dev);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, -1);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, -1);
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
int vbox_mode_init(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
struct drm_encoder *encoder;
struct vbox_crtc *vbox_crtc;
unsigned int i;
int ret;
/* vbox_cursor_init(dev); */
for (i = 0; i < vbox->num_crtcs; ++i) {
vbox_crtc = vbox_crtc_init(dev, i);
if (!vbox_crtc)
return -ENOMEM;
encoder = vbox_encoder_init(dev, i);
if (!encoder)
return -ENOMEM;
ret = vbox_connector_init(dev, vbox_crtc, encoder);
if (ret)
return ret;
}
return 0;
}
void vbox_mode_fini(struct drm_device *dev)
{
/* vbox_cursor_fini(dev); */
}
/**
* Copy the ARGB image and generate the mask, which is needed in case the host
* does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
* if the corresponding alpha value in the ARGB image is greater than 0xF0.
*/
static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
size_t mask_size)
{
size_t line_size = (width + 7) / 8;
u32 i, j;
memcpy(dst + mask_size, src, width * height * 4);
for (i = 0; i < height; ++i)
for (j = 0; j < width; ++j)
if (((u32 *)src)[i * width + j] > 0xf0000000)
dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
}
static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
u32 handle, u32 width, u32 height,
s32 hot_x, s32 hot_y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct ttm_bo_kmap_obj uobj_map;
size_t data_size, mask_size;
struct drm_gem_object *obj;
u32 flags, caps = 0;
struct vbox_bo *bo;
bool src_isiomem;
u8 *dst = NULL;
u8 *src;
int ret;
/*
* Re-set this regularly as in 5.0.20 and earlier the information was
* lost on save and restore.
*/
hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
vbox->input_mapping_width,
vbox->input_mapping_height);
if (!handle) {
bool cursor_enabled = false;
struct drm_crtc *crtci;
/* Hide cursor. */
vbox_crtc->cursor_enabled = false;
list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
head) {
if (to_vbox_crtc(crtci)->cursor_enabled)
cursor_enabled = true;
}
if (!cursor_enabled)
hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
0, 0, NULL, 0);
return 0;
}
vbox_crtc->cursor_enabled = true;
if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
width == 0 || height == 0)
return -EINVAL;
ret = hgsmi_query_conf(vbox->guest_pool,
VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
if (ret)
return ret;
if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
/*
* -EINVAL means cursor_set2() not supported, -EAGAIN means
* retry at once.
*/
return -EBUSY;
}
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
return -ENOENT;
}
bo = gem_to_vbox_bo(obj);
ret = vbox_bo_reserve(bo, false);
if (ret)
goto out_unref_obj;
/*
* The mask must be calculated based on the alpha
* channel, one bit per ARGB word, and must be 32-bit
* padded.
*/
mask_size = ((width + 7) / 8 * height + 3) & ~3;
data_size = width * height * 4 + mask_size;
vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
vbox->cursor_width = width;
vbox->cursor_height = height;
vbox->cursor_data_size = data_size;
dst = vbox->cursor_data;
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
if (ret) {
vbox->cursor_data_size = 0;
goto out_unreserve_bo;
}
src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
if (src_isiomem) {
DRM_ERROR("src cursor bo not in main memory\n");
ret = -EIO;
goto out_unmap_bo;
}
copy_cursor_image(src, dst, width, height, mask_size);
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
vbox->cursor_hot_x, vbox->cursor_hot_y,
width, height, dst, data_size);
out_unmap_bo:
ttm_bo_kunmap(&uobj_map);
out_unreserve_bo:
vbox_bo_unreserve(bo);
out_unref_obj:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
s32 crtc_x =
vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
s32 crtc_y =
vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
u32 host_x, host_y;
u32 hot_x = 0;
u32 hot_y = 0;
int ret;
/*
* We compare these to unsigned later and don't
* need to handle negative.
*/
if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
return 0;
ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
y + crtc_y, &host_x, &host_y);
/*
* The only reason we have vbox_cursor_move() is that some older clients
* might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
* use DRM_MODE_CURSOR_MOVE to set the hot-spot.
*
* However VirtualBox 5.0.20 and earlier has a bug causing it to return
* 0,0 as host cursor location after a save and restore.
*
* To work around this we ignore a 0, 0 return, since missing the odd
* time when it legitimately happens is not going to hurt much.
*/
if (ret || (host_x == 0 && host_y == 0))
return ret;
if (x + crtc_x < host_x)
hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
if (y + crtc_y < host_y)
hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
return 0;
vbox->cursor_hot_x = hot_x;
vbox->cursor_hot_y = hot_y;
return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
vbox->cursor_data, vbox->cursor_data_size);
}
/*
* Copyright (C) 2017 Oracle Corporation
* Copyright 2017 Canonical
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Andreas Pokorny
*/
#include "vbox_drv.h"
/*
* Based on qxl_prime.c:
* Empty Implementations as there should not be any other driver for a virtual
* device that might share buffers with vboxvideo
*/
int vbox_gem_prime_pin(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
return -ENOSYS;
}
void vbox_gem_prime_unpin(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
}
struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
struct drm_gem_object *vbox_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
WARN_ONCE(1, "not implemented");
}
int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
{
WARN_ONCE(1, "not implemented");
return -ENOSYS;
}
/*
* Copyright (C) 2013-2017 Oracle Corporation
* This file is based on ast_ttm.c
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
* Authors: Dave Airlie <airlied@redhat.com>
* Michael Thayer <michael.thayer@oracle.com>
*/
#include "vbox_drv.h"
#include <ttm/ttm_page_alloc.h>
static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
{
return container_of(bd, struct vbox_private, ttm.bdev);
}
static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
/**
* Adds the vbox memory manager object/structures to the global memory manager.
*/
static int vbox_ttm_global_init(struct vbox_private *vbox)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &vbox->ttm.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &vbox_ttm_mem_global_init;
global_ref->release = &vbox_ttm_mem_global_release;
ret = drm_global_item_ref(global_ref);
if (ret) {
DRM_ERROR("Failed setting up TTM memory subsystem.\n");
return ret;
}
vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
global_ref = &vbox->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
ret = drm_global_item_ref(global_ref);
if (ret) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
drm_global_item_unref(&vbox->ttm.mem_global_ref);
return ret;
}
return 0;
}
/**
* Removes the vbox memory manager object from the global memory manager.
*/
static void vbox_ttm_global_release(struct vbox_private *vbox)
{
drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
drm_global_item_unref(&vbox->ttm.mem_global_ref);
}
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct vbox_bo *bo;
bo = container_of(tbo, struct vbox_bo, bo);
drm_gem_object_release(&bo->gem);
kfree(bo);
}
static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &vbox_bo_ttm_destroy)
return true;
return false;
}
static int
vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
struct ttm_mem_type_manager *man)
{
switch (type) {
case TTM_PL_SYSTEM:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
}
return 0;
}
static void
vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct vbox_bo *vboxbo = vbox_bo(bo);
if (!vbox_ttm_bo_is_vbox_bo(bo))
return;
vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
*pl = vboxbo->placement;
}
static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
struct file *filp)
{
return 0;
}
static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vbox_private *vbox = vbox_bdev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
}
static int vbox_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
}
static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
kfree(tt);
}
static struct ttm_backend_func vbox_tt_backend_func = {
.destroy = &vbox_ttm_backend_destroy,
};
static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size,
u32 page_flags,
struct page *dummy_read_page)
{
struct ttm_tt *tt;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
if (!tt)
return NULL;
tt->func = &vbox_tt_backend_func;
if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
kfree(tt);
return NULL;
}
return tt;
}
static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
{
return ttm_pool_populate(ttm);
}
static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
ttm_pool_unpopulate(ttm);
}
struct ttm_bo_driver vbox_bo_driver = {
.ttm_tt_create = vbox_ttm_tt_create,
.ttm_tt_populate = vbox_ttm_tt_populate,
.ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
.init_mem_type = vbox_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vbox_bo_evict_flags,
.move = vbox_bo_move,
.verify_access = vbox_bo_verify_access,
.io_mem_reserve = &vbox_ttm_io_mem_reserve,
.io_mem_free = &vbox_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int vbox_mm_init(struct vbox_private *vbox)
{
int ret;
struct drm_device *dev = vbox->dev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
ret = vbox_ttm_global_init(vbox);
if (ret)
return ret;
ret = ttm_bo_device_init(&vbox->ttm.bdev,
vbox->ttm.bo_global_ref.ref.object,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
goto err_ttm_global_release;
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
vbox->available_vram_size >> PAGE_SHIFT);
if (ret) {
DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
goto err_device_release;
}
#ifdef DRM_MTRR_WC
vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0),
DRM_MTRR_WC);
#else
vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
#endif
return 0;
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
err_ttm_global_release:
vbox_ttm_global_release(vbox);
return ret;
}
void vbox_mm_fini(struct vbox_private *vbox)
{
#ifdef DRM_MTRR_WC
drm_mtrr_del(vbox->fb_mtrr,
pci_resource_start(vbox->dev->pdev, 0),
pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
#else
arch_phys_wc_del(vbox->fb_mtrr);
#endif
ttm_bo_device_release(&vbox->ttm.bdev);
vbox_ttm_global_release(vbox);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)
{
unsigned int i;
u32 c = 0;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
bo->placements[c++].flags =
TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
bo->placements[c++].flags =
TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
bo->placements[c++].flags =
TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
bo->placements[i].fpfn = 0;
bo->placements[i].lpfn = 0;
}
}
int vbox_bo_create(struct drm_device *dev, int size, int align,
u32 flags, struct vbox_bo **pvboxbo)
{
struct vbox_private *vbox = dev->dev_private;
struct vbox_bo *vboxbo;
size_t acc_size;
int ret;
vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
if (!vboxbo)
return -ENOMEM;
ret = drm_gem_object_init(dev, &vboxbo->gem, size);
if (ret)
goto err_free_vboxbo;
vboxbo->bo.bdev = &vbox->ttm.bdev;
vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
sizeof(struct vbox_bo));
ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
ttm_bo_type_device, &vboxbo->placement,
align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, NULL, vbox_bo_ttm_destroy);
if (ret)
goto err_free_vboxbo;
*pvboxbo = vboxbo;
return 0;
err_free_vboxbo:
kfree(vboxbo);
return ret;
}
static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
{
return bo->bo.offset;
}
int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
int i, ret;
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
*gpu_addr = vbox_bo_gpu_offset(bo);
return 0;
}
vbox_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
bo->pin_count = 1;
if (gpu_addr)
*gpu_addr = vbox_bo_gpu_offset(bo);
return 0;
}
int vbox_bo_unpin(struct vbox_bo *bo)
{
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
return 0;
}
/*
* Move a vbox-owned buffer object to system memory if no one else has it
* pinned. The caller must have pinned it previously, and this call will
* release the caller's pin.
*/
int vbox_bo_push_sysram(struct vbox_bo *bo)
{
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
if (bo->kmap.virtual)
ttm_bo_kunmap(&bo->kmap);
vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
}
return 0;
}
int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct vbox_private *vbox;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL;
file_priv = filp->private_data;
vbox = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
}
/*
* Copyright (C) 2006-2016 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#ifndef __VBOXVIDEO_H__
#define __VBOXVIDEO_H__
/*
* This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in
* src/VBox/Main/xml/VirtualBox-settings-common.xsd
*/
#define VBOX_VIDEO_MAX_SCREENS 64
/*
* The last 4096 bytes of the guest VRAM contains the generic info for all
* DualView chunks: sizes and offsets of chunks. This is filled by miniport.
*
* Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
* etc. This is used exclusively by the corresponding instance of a display
* driver.
*
* The VRAM layout:
* Last 4096 bytes - Adapter information area.
* 4096 bytes aligned miniport heap (value specified in the config rouded up).
* Slack - what left after dividing the VRAM.
* 4096 bytes aligned framebuffers:
* last 4096 bytes of each framebuffer is the display information area.
*
* The Virtual Graphics Adapter information in the guest VRAM is stored by the
* guest video driver using structures prepended by VBOXVIDEOINFOHDR.
*
* When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
* the host starts to process the info. The first element at the start of
* the 4096 bytes region should be normally be a LINK that points to
* actual information chain. That way the guest driver can have some
* fixed layout of the information memory block and just rewrite
* the link to point to relevant memory chain.
*
* The processing stops at the END element.
*
* The host can access the memory only when the port IO is processed.
* All data that will be needed later must be copied from these 4096 bytes.
* But other VRAM can be used by host until the mode is disabled.
*
* The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
* to disable the mode.
*
* VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
* from the host and issue commands to the host.
*
* The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
* following operations with the VBE data register can be performed:
*
* Operation Result
* write 16 bit value NOP
* read 16 bit value count of monitors
* write 32 bit value set the vbox cmd value and the cmd processed by the host
* read 32 bit value result of the last vbox command is returned
*/
/**
* VBVA command header.
*
* @todo Where does this fit in?
*/
struct vbva_cmd_hdr {
/** Coordinates of affected rectangle. */
s16 x;
s16 y;
u16 w;
u16 h;
} __packed;
/** @name VBVA ring defines.
*
* The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
* data. For example big bitmaps which do not fit to the buffer.
*
* Guest starts writing to the buffer by initializing a record entry in the
* records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
* written. As data is written to the ring buffer, the guest increases
* free_offset.
*
* The host reads the records on flushes and processes all completed records.
* When host encounters situation when only a partial record presents and
* len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
* VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
* data_offset. After that on each flush the host continues fetching the data
* until the record is completed.
*
*/
#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
#define VBVA_RING_BUFFER_THRESHOLD (4096)
#define VBVA_MAX_RECORDS (64)
#define VBVA_F_MODE_ENABLED 0x00000001u
#define VBVA_F_MODE_VRDP 0x00000002u
#define VBVA_F_MODE_VRDP_RESET 0x00000004u
#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
#define VBVA_F_STATE_PROCESSING 0x00010000u
#define VBVA_F_RECORD_PARTIAL 0x80000000u
/**
* VBVA record.
*/
struct vbva_record {
/** The length of the record. Changed by guest. */
u32 len_and_flags;
} __packed;
/*
* The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
* the runtime heapsimple API. Use minimum 2 pages here, because the info area
* also may contain other data (for example hgsmi_host_flags structure).
*/
#define VBVA_ADAPTER_INFORMATION_SIZE 65536
#define VBVA_MIN_BUFFER_SIZE 65536
/* The value for port IO to let the adapter to interpret the adapter memory. */
#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
/* The value for port IO to let the adapter to interpret the adapter memory. */
#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
/* The value for port IO to let the adapter to interpret the display memory.
* The display number is encoded in low 16 bits.
*/
#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
struct vbva_host_flags {
u32 host_events;
u32 supported_orders;
} __packed;
struct vbva_buffer {
struct vbva_host_flags host_flags;
/* The offset where the data start in the buffer. */
u32 data_offset;
/* The offset where next data must be placed in the buffer. */
u32 free_offset;
/* The queue of record descriptions. */
struct vbva_record records[VBVA_MAX_RECORDS];
u32 record_first_index;
u32 record_free_index;
/* Space to leave free when large partial records are transferred. */
u32 partial_write_tresh;
u32 data_len;
/* variable size for the rest of the vbva_buffer area in VRAM. */
u8 data[0];
} __packed;
#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
/* guest->host commands */
#define VBVA_QUERY_CONF32 1
#define VBVA_SET_CONF32 2
#define VBVA_INFO_VIEW 3
#define VBVA_INFO_HEAP 4
#define VBVA_FLUSH 5
#define VBVA_INFO_SCREEN 6
#define VBVA_ENABLE 7
#define VBVA_MOUSE_POINTER_SHAPE 8
/* informs host about HGSMI caps. see vbva_caps below */
#define VBVA_INFO_CAPS 12
/* configures scanline, see VBVASCANLINECFG below */
#define VBVA_SCANLINE_CFG 13
/* requests scanline info, see VBVASCANLINEINFO below */
#define VBVA_SCANLINE_INFO 14
/* inform host about VBVA Command submission */
#define VBVA_CMDVBVA_SUBMIT 16
/* inform host about VBVA Command submission */
#define VBVA_CMDVBVA_FLUSH 17
/* G->H DMA command */
#define VBVA_CMDVBVA_CTL 18
/* Query most recent mode hints sent */
#define VBVA_QUERY_MODE_HINTS 19
/**
* Report the guest virtual desktop position and size for mapping host and
* guest pointer positions.
*/
#define VBVA_REPORT_INPUT_MAPPING 20
/** Report the guest cursor position and query the host position. */
#define VBVA_CURSOR_POSITION 21
/* host->guest commands */
#define VBVAHG_EVENT 1
#define VBVAHG_DISPLAY_CUSTOM 2
/* vbva_conf32::index */
#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
/**
* Returns VINF_SUCCESS if the host can report mode hints via VBVA.
* Set value to VERR_NOT_SUPPORTED before calling.
*/
#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
/**
* Returns VINF_SUCCESS if the host can report guest cursor enabled status via
* VBVA. Set value to VERR_NOT_SUPPORTED before calling.
*/
#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
/**
* Returns the currently available host cursor capabilities. Available if
* vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
* @see VMMDevReqMouseStatus::mouseFeatures.
*/
#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
/** Returns the supported flags in vbva_infoscreen::flags. */
#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
/** Returns the max size of VBVA record. */
#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
struct vbva_conf32 {
u32 index;
u32 value;
} __packed;
/** Reserved for historical reasons. */
#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
/**
* Guest cursor capability: can the host show a hardware cursor at the host
* pointer location?
*/
#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
/** Reserved for historical reasons. */
#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
/** Reserved for historical reasons. Must always be unset. */
#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
/** Reserved for historical reasons. */
#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
/** Reserved for historical reasons. */
#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
struct vbva_infoview {
/* Index of the screen, assigned by the guest. */
u32 view_index;
/* The screen offset in VRAM, the framebuffer starts here. */
u32 view_offset;
/* The size of the VRAM memory that can be used for the view. */
u32 view_size;
/* The recommended maximum size of the VRAM memory for the screen. */
u32 max_screen_size;
} __packed;
struct vbva_flush {
u32 reserved;
} __packed;
/* vbva_infoscreen::flags */
#define VBVA_SCREEN_F_NONE 0x0000
#define VBVA_SCREEN_F_ACTIVE 0x0001
/**
* The virtual monitor has been disabled by the guest and should be removed
* by the host and ignored for purposes of pointer position calculation.
*/
#define VBVA_SCREEN_F_DISABLED 0x0002
/**
* The virtual monitor has been blanked by the guest and should be blacked
* out by the host using width, height, etc values from the vbva_infoscreen
* request.
*/
#define VBVA_SCREEN_F_BLANK 0x0004
/**
* The virtual monitor has been blanked by the guest and should be blacked
* out by the host using the previous mode values for width. height, etc.
*/
#define VBVA_SCREEN_F_BLANK2 0x0008
struct vbva_infoscreen {
/* Which view contains the screen. */
u32 view_index;
/* Physical X origin relative to the primary screen. */
s32 origin_x;
/* Physical Y origin relative to the primary screen. */
s32 origin_y;
/* Offset of visible framebuffer relative to the framebuffer start. */
u32 start_offset;
/* The scan line size in bytes. */
u32 line_size;
/* Width of the screen. */
u32 width;
/* Height of the screen. */
u32 height;
/* Color depth. */
u16 bits_per_pixel;
/* VBVA_SCREEN_F_* */
u16 flags;
} __packed;
/* vbva_enable::flags */
#define VBVA_F_NONE 0x00000000
#define VBVA_F_ENABLE 0x00000001
#define VBVA_F_DISABLE 0x00000002
/* extended VBVA to be used with WDDM */
#define VBVA_F_EXTENDED 0x00000004
/* vbva offset is absolute VRAM offset */
#define VBVA_F_ABSOFFSET 0x00000008
struct vbva_enable {
u32 flags;
u32 offset;
s32 result;
} __packed;
struct vbva_enable_ex {
struct vbva_enable base;
u32 screen_id;
} __packed;
struct vbva_mouse_pointer_shape {
/* The host result. */
s32 result;
/* VBOX_MOUSE_POINTER_* bit flags. */
u32 flags;
/* X coordinate of the hot spot. */
u32 hot_X;
/* Y coordinate of the hot spot. */
u32 hot_y;
/* Width of the pointer in pixels. */
u32 width;
/* Height of the pointer in scanlines. */
u32 height;
/* Pointer data.
*
****
* The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
* mask.
*
* For pointers without alpha channel the XOR mask pixels are 32 bit
* values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
* consists of (lsb)BGRA(msb) 32 bit values.
*
* Guest driver must create the AND mask for pointers with alpha chan.,
* so if host does not support alpha, the pointer could be displayed as
* a normal color pointer. The AND mask can be constructed from alpha
* values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
*
* The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
* mask, therefore, is and_len = (width + 7) / 8 * height. The padding
* bits at the end of any scanline are undefined.
*
* The XOR mask follows the AND mask on the next 4 bytes aligned offset:
* u8 *xor = and + (and_len + 3) & ~3
* Bytes in the gap between the AND and the XOR mask are undefined.
* XOR mask scanlines have no gap between them and size of XOR mask is:
* xor_len = width * 4 * height.
****
*
* Preallocate 4 bytes for accessing actual data as p->data.
*/
u8 data[4];
} __packed;
/**
* @name vbva_mouse_pointer_shape::flags
* @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
* values must be <= 0x8000 and must not be changed. (try make more sense
* of this, please).
* @{
*/
/** pointer is visible */
#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
/** pointer has alpha channel */
#define VBOX_MOUSE_POINTER_ALPHA 0x0002
/** pointerData contains new pointer shape */
#define VBOX_MOUSE_POINTER_SHAPE 0x0004
/** @} */
/*
* The guest driver can handle asynch guest cmd completion by reading the
* command offset from io port.
*/
#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
/* the guest driver can handle video adapter IRQs */
#define VBVACAPS_IRQ 0x00000002
/** The guest can read video mode hints sent via VBVA. */
#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
/** The guest can switch to a software cursor on demand. */
#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
/** The guest does not depend on host handling the VBE registers. */
#define VBVACAPS_USE_VBVA_ONLY 0x00000010
struct vbva_caps {
s32 rc;
u32 caps;
} __packed;
/** Query the most recent mode hints received from the host. */
struct vbva_query_mode_hints {
/** The maximum number of screens to return hints for. */
u16 hints_queried_count;
/** The size of the mode hint structures directly following this one. */
u16 hint_structure_guest_size;
/** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
s32 rc;
} __packed;
/**
* Structure in which a mode hint is returned. The guest allocates an array
* of these immediately after the vbva_query_mode_hints structure.
* To accommodate future extensions, the vbva_query_mode_hints structure
* specifies the size of the vbva_modehint structures allocated by the guest,
* and the host only fills out structure elements which fit into that size. The
* host should fill any unused members (e.g. dx, dy) or structure space on the
* end with ~0. The whole structure can legally be set to ~0 to skip a screen.
*/
struct vbva_modehint {
u32 magic;
u32 cx;
u32 cy;
u32 bpp; /* Which has never been used... */
u32 display;
u32 dx; /**< X offset into the virtual frame-buffer. */
u32 dy; /**< Y offset into the virtual frame-buffer. */
u32 enabled; /* Not flags. Add new members for new flags. */
} __packed;
#define VBVAMODEHINT_MAGIC 0x0801add9u
/**
* Report the rectangle relative to which absolute pointer events should be
* expressed. This information remains valid until the next VBVA resize event
* for any screen, at which time it is reset to the bounding rectangle of all
* virtual screens and must be re-set.
* @see VBVA_REPORT_INPUT_MAPPING.
*/
struct vbva_report_input_mapping {
s32 x; /**< Upper left X co-ordinate relative to the first screen. */
s32 y; /**< Upper left Y co-ordinate relative to the first screen. */
u32 cx; /**< Rectangle width. */
u32 cy; /**< Rectangle height. */
} __packed;
/**
* Report the guest cursor position and query the host one. The host may wish
* to use the guest information to re-position its own cursor (though this is
* currently unlikely).
* @see VBVA_CURSOR_POSITION
*/
struct vbva_cursor_position {
u32 report_position; /**< Are we reporting a position? */
u32 x; /**< Guest cursor X position */
u32 y; /**< Guest cursor Y position */
} __packed;
#endif
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __VBOXVIDEO_GUEST_H__
#define __VBOXVIDEO_GUEST_H__
#include <linux/genalloc.h>
#include "vboxvideo.h"
/**
* Structure grouping the context needed for sending graphics acceleration
* information to the host via VBVA. Each screen has its own VBVA buffer.
*/
struct vbva_buf_ctx {
/** Offset of the buffer in the VRAM section for the screen */
u32 buffer_offset;
/** Length of the buffer in bytes */
u32 buffer_length;
/** Set if we wrote to the buffer faster than the host could read it */
bool buffer_overflow;
/** VBVA record that we are currently preparing for the host, or NULL */
struct vbva_record *record;
/**
* Pointer to the VBVA buffer mapped into the current address space.
* Will be NULL if VBVA is not enabled.
*/
struct vbva_buffer *vbva;
};
/**
* @name Base HGSMI APIs
* @{
*/
int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
int hgsmi_test_query_conf(struct gen_pool *ctx);
int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
u32 hot_x, u32 hot_y, u32 width, u32 height,
u8 *pixels, u32 len);
int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
u32 x, u32 y, u32 *x_host, u32 *y_host);
/** @} */
/**
* @name VBVA APIs
* @{
*/
bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
struct vbva_buffer *vbva, s32 screen);
void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
s32 screen);
bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
struct gen_pool *ctx);
void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
const void *p, u32 len);
void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
u32 buffer_offset, u32 buffer_length);
/** @} */
/**
* @name Modesetting APIs
* @{
*/
void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
s32 origin_x, s32 origin_y, u32 start_offset,
u32 pitch, u32 width, u32 height,
u16 bpp, u16 flags);
int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
u32 width, u32 height);
int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
struct vbva_modehint *hints);
/** @} */
#endif
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __VBOXVIDEO_VBE_H__
#define __VBOXVIDEO_VBE_H__
/* GUEST <-> HOST Communication API */
/**
* @todo FIXME: Either dynamicly ask host for this or put somewhere high in
* physical memory like 0xE0000000.
*/
#define VBE_DISPI_BANK_ADDRESS 0xA0000
#define VBE_DISPI_BANK_SIZE_KB 64
#define VBE_DISPI_MAX_XRES 16384
#define VBE_DISPI_MAX_YRES 16384
#define VBE_DISPI_MAX_BPP 32
#define VBE_DISPI_IOPORT_INDEX 0x01CE
#define VBE_DISPI_IOPORT_DATA 0x01CF
#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
#define VBE_DISPI_INDEX_ID 0x0
#define VBE_DISPI_INDEX_XRES 0x1
#define VBE_DISPI_INDEX_YRES 0x2
#define VBE_DISPI_INDEX_BPP 0x3
#define VBE_DISPI_INDEX_ENABLE 0x4
#define VBE_DISPI_INDEX_BANK 0x5
#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
#define VBE_DISPI_INDEX_X_OFFSET 0x8
#define VBE_DISPI_INDEX_Y_OFFSET 0x9
#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
#define VBE_DISPI_ID0 0xB0C0
#define VBE_DISPI_ID1 0xB0C1
#define VBE_DISPI_ID2 0xB0C2
#define VBE_DISPI_ID3 0xB0C3
#define VBE_DISPI_ID4 0xB0C4
#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
#define VBE_DISPI_ID_HGSMI 0xBE01
#define VBE_DISPI_ID_ANYX 0xBE02
#define VBE_DISPI_DISABLED 0x00
#define VBE_DISPI_ENABLED 0x01
#define VBE_DISPI_GETCAPS 0x02
#define VBE_DISPI_8BIT_DAC 0x20
/**
* @note this definition is a BOCHS legacy, used only in the video BIOS
* code and ignored by the emulated hardware.
*/
#define VBE_DISPI_LFB_ENABLED 0x40
#define VBE_DISPI_NOCLEARMEM 0x80
#define VGA_PORT_HGSMI_HOST 0x3b0
#define VGA_PORT_HGSMI_GUEST 0x3d0
#endif
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vbox_drv.h"
#include "vbox_err.h"
#include "vboxvideo_guest.h"
#include "hgsmi_channels.h"
/*
* There is a hardware ring buffer in the graphics device video RAM, formerly
* in the VBox VMMDev PCI memory space.
* All graphics commands go there serialized by vbva_buffer_begin_update.
* and vbva_buffer_end_update.
*
* free_offset is writing position. data_offset is reading position.
* free_offset == data_offset means buffer is empty.
* There must be always gap between data_offset and free_offset when data
* are in the buffer.
* Guest only changes free_offset, host changes data_offset.
*/
static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
{
s32 diff = vbva->data_offset - vbva->free_offset;
return diff > 0 ? diff : vbva->data_len + diff;
}
static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
const void *p, u32 len, u32 offset)
{
struct vbva_buffer *vbva = vbva_ctx->vbva;
u32 bytes_till_boundary = vbva->data_len - offset;
u8 *dst = &vbva->data[offset];
s32 diff = len - bytes_till_boundary;
if (diff <= 0) {
/* Chunk will not cross buffer boundary. */
memcpy(dst, p, len);
} else {
/* Chunk crosses buffer boundary. */
memcpy(dst, p, bytes_till_boundary);
memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
}
}
static void vbva_buffer_flush(struct gen_pool *ctx)
{
struct vbva_flush *p;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
if (!p)
return;
p->reserved = 0;
hgsmi_buffer_submit(ctx, p);
hgsmi_buffer_free(ctx, p);
}
bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
const void *p, u32 len)
{
struct vbva_record *record;
struct vbva_buffer *vbva;
u32 available;
vbva = vbva_ctx->vbva;
record = vbva_ctx->record;
if (!vbva || vbva_ctx->buffer_overflow ||
!record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
return false;
available = vbva_buffer_available(vbva);
while (len > 0) {
u32 chunk = len;
if (chunk >= available) {
vbva_buffer_flush(ctx);
available = vbva_buffer_available(vbva);
}
if (chunk >= available) {
if (WARN_ON(available <= vbva->partial_write_tresh)) {
vbva_ctx->buffer_overflow = true;
return false;
}
chunk = available - vbva->partial_write_tresh;
}
vbva_buffer_place_data_at(vbva_ctx, p, chunk,
vbva->free_offset);
vbva->free_offset = (vbva->free_offset + chunk) %
vbva->data_len;
record->len_and_flags += chunk;
available -= chunk;
len -= chunk;
p += chunk;
}
return true;
}
static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
struct gen_pool *ctx, s32 screen, bool enable)
{
struct vbva_enable_ex *p;
bool ret;
p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
if (!p)
return false;
p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
p->base.offset = vbva_ctx->buffer_offset;
p->base.result = VERR_NOT_SUPPORTED;
if (screen >= 0) {
p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
p->screen_id = screen;
}
hgsmi_buffer_submit(ctx, p);
if (enable)
ret = RT_SUCCESS(p->base.result);
else
ret = true;
hgsmi_buffer_free(ctx, p);
return ret;
}
bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
struct vbva_buffer *vbva, s32 screen)
{
bool ret = false;
memset(vbva, 0, sizeof(*vbva));
vbva->partial_write_tresh = 256;
vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
vbva_ctx->vbva = vbva;
ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
if (!ret)
vbva_disable(vbva_ctx, ctx, screen);
return ret;
}
void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
s32 screen)
{
vbva_ctx->buffer_overflow = false;
vbva_ctx->record = NULL;
vbva_ctx->vbva = NULL;
vbva_inform_host(vbva_ctx, ctx, screen, false);
}
bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
struct gen_pool *ctx)
{
struct vbva_record *record;
u32 next;
if (!vbva_ctx->vbva ||
!(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
return false;
WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
/* Flush if all slots in the records queue are used */
if (next == vbva_ctx->vbva->record_first_index)
vbva_buffer_flush(ctx);
/* If even after flush there is no place then fail the request */
if (next == vbva_ctx->vbva->record_first_index)
return false;
record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
record->len_and_flags = VBVA_F_RECORD_PARTIAL;
vbva_ctx->vbva->record_free_index = next;
/* Remember which record we are using. */
vbva_ctx->record = record;
return true;
}
void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
{
struct vbva_record *record = vbva_ctx->record;
WARN_ON(!vbva_ctx->vbva || !record ||
!(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
/* Mark the record completed. */
record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
vbva_ctx->buffer_overflow = false;
vbva_ctx->record = NULL;
}
void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
u32 buffer_offset, u32 buffer_length)
{
vbva_ctx->buffer_offset = buffer_offset;
vbva_ctx->buffer_length = buffer_length;
}
......@@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev)
struct device_node *fw_node;
struct rpi_firmware *fw;
int err;
void *ptr_err;
fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
if (!fw_node) {
......@@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev)
/* create sysfs entries */
vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
ptr_err = vchiq_class;
if (IS_ERR(ptr_err))
err = PTR_ERR(vchiq_class);
if (IS_ERR(vchiq_class))
goto failed_class_create;
vchiq_dev = device_create(vchiq_class, NULL,
vchiq_devid, NULL, "vchiq");
ptr_err = vchiq_dev;
if (IS_ERR(ptr_err))
err = PTR_ERR(vchiq_dev);
if (IS_ERR(vchiq_dev))
goto failed_device_create;
/* create debugfs entries */
......@@ -3455,7 +3454,6 @@ static int vchiq_probe(struct platform_device *pdev)
class_destroy(vchiq_class);
failed_class_create:
cdev_del(&vchiq_cdev);
err = PTR_ERR(ptr_err);
failed_cdev_add:
unregister_chrdev_region(vchiq_devid, 1);
failed_platform_init:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment